Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +12 -0
- code/batch_test_views.sh +34 -0
- code/batch_test_views2.sh +33 -0
- code/batch_test_views3.sh +34 -0
- code/eval.sh +8 -0
- code/eval_real.py +214 -0
- code/eval_real_ws.py +188 -0
- code/eval_sim.py +129 -0
- code/msgpack_numpy.py +62 -0
- code/nohup.out +3 -0
- code/run.sh +33 -0
- code/run0.sh +41 -0
- code/serve_policy.py +297 -0
- code/setup.py +6 -0
- code/train.py +68 -0
- code/umi/asset/mask.json +17 -0
- code/umi/common/__pycache__/cv_util.cpython-310.pyc +0 -0
- code/umi/common/__pycache__/cv_util.cpython-39.pyc +0 -0
- code/umi/common/__pycache__/pose_util.cpython-310.pyc +0 -0
- code/umi/common/__pycache__/pose_util.cpython-39.pyc +0 -0
- code/umi/common/cv_util.py +461 -0
- code/umi/common/exiftool_util.py +14 -0
- code/umi/common/interpolation_util.py +49 -0
- code/umi/common/k3d_util.py +33 -0
- code/umi/common/latency_util.py +58 -0
- code/umi/common/mocap_util.py +48 -0
- code/umi/common/nested_dict_util.py +34 -0
- code/umi/common/orb_slam_util.py +46 -0
- code/umi/common/pose_trajectory_interpolator.py +207 -0
- code/umi/common/pose_util.py +132 -0
- code/umi/common/precise_sleep.py +27 -0
- code/umi/common/timecode_util.py +56 -0
- code/umi/common/timestamp_accumulator.py +218 -0
- code/umi/common/usb_util.py +101 -0
- code/umi/pipeline/aruco_detection.py +0 -0
- code/umi/real_world/bimanual_umi_env.py +695 -0
- code/umi/real_world/cmd_measure.lua +76 -0
- code/umi/real_world/franka_interpolation_controller.py +376 -0
- code/umi/real_world/keystroke_counter.py +48 -0
- code/umi/real_world/multi_camera_visualizer.py +85 -0
- code/umi/real_world/multi_uvc_camera.py +184 -0
- code/umi/real_world/real_env.py +488 -0
- code/umi/real_world/real_inference_util.py +236 -0
- code/umi/real_world/rtde_interpolation_controller.py +376 -0
- code/umi/real_world/spacemouse_shared_memory.py +167 -0
- code/umi/real_world/umi_env.py +603 -0
- code/umi/real_world/uvc_camera.py +330 -0
- code/umi/real_world/video_recorder.py +286 -0
- code/umi/real_world/wsg_binary_driver.py +631 -0
- code/umi/real_world/wsg_controller.py +241 -0
.gitattributes
CHANGED
|
@@ -223,3 +223,15 @@ task1_hdf5/20251107_multiview_task1_S1_43_1_episode_4_merged.hdf5 filter=lfs dif
|
|
| 223 |
task1_hdf5/20251107_multiview_task1_S1_43_1_episode_2_merged.hdf5 filter=lfs diff=lfs merge=lfs -text
|
| 224 |
task1_hdf5/20251107_multiview_task1_S1_43_1_episode_1_merged.hdf5 filter=lfs diff=lfs merge=lfs -text
|
| 225 |
task1_hdf5/20251107_multiview_task1_S1_43_1_episode_3_merged.hdf5 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
task1_hdf5/20251107_multiview_task1_S1_43_1_episode_2_merged.hdf5 filter=lfs diff=lfs merge=lfs -text
|
| 224 |
task1_hdf5/20251107_multiview_task1_S1_43_1_episode_1_merged.hdf5 filter=lfs diff=lfs merge=lfs -text
|
| 225 |
task1_hdf5/20251107_multiview_task1_S1_43_1_episode_3_merged.hdf5 filter=lfs diff=lfs merge=lfs -text
|
| 226 |
+
code/nohup.out filter=lfs diff=lfs merge=lfs -text
|
| 227 |
+
code/unified_video_action/env/libero/assets/scenes/coffee_table_seats/collision/coffee_table_seats_ch.stl filter=lfs diff=lfs merge=lfs -text
|
| 228 |
+
code/unified_video_action/env/libero/assets/scenes/floor_lamp/collision/floor_lamp_ch.stl filter=lfs diff=lfs merge=lfs -text
|
| 229 |
+
code/unified_video_action/env/libero/assets/scenes/kitchen_background_pot/collision/kitchen_background_pot_ch.stl filter=lfs diff=lfs merge=lfs -text
|
| 230 |
+
code/unified_video_action/env/libero/assets/scenes/living_room_table/collision/living_room_table_ch.stl filter=lfs diff=lfs merge=lfs -text
|
| 231 |
+
code/unified_video_action/env/libero/init_files/libero_10/LIVING_ROOM_SCENE2_put_both_the_alphabet_soup_and_the_tomato_sauce_in_the_basket.init filter=lfs diff=lfs merge=lfs -text
|
| 232 |
+
code/unified_video_action/env/libero/init_files/libero_10/LIVING_ROOM_SCENE2_put_both_the_cream_cheese_box_and_the_butter_in_the_basket.init filter=lfs diff=lfs merge=lfs -text
|
| 233 |
+
code/unified_video_action/env/libero/init_files/libero_90/LIVING_ROOM_SCENE2_pick_up_the_alphabet_soup_and_put_it_in_the_basket.init filter=lfs diff=lfs merge=lfs -text
|
| 234 |
+
code/unified_video_action/env/libero/init_files/libero_90/LIVING_ROOM_SCENE2_pick_up_the_butter_and_put_it_in_the_basket.init filter=lfs diff=lfs merge=lfs -text
|
| 235 |
+
code/unified_video_action/env/libero/init_files/libero_90/LIVING_ROOM_SCENE2_pick_up_the_milk_and_put_it_in_the_basket.init filter=lfs diff=lfs merge=lfs -text
|
| 236 |
+
code/unified_video_action/env/libero/init_files/libero_90/LIVING_ROOM_SCENE2_pick_up_the_orange_juice_and_put_it_in_the_basket.init filter=lfs diff=lfs merge=lfs -text
|
| 237 |
+
code/unified_video_action/env/libero/init_files/libero_90/LIVING_ROOM_SCENE2_pick_up_the_tomato_sauce_and_put_it_in_the_basket.init filter=lfs diff=lfs merge=lfs -text
|
code/batch_test_views.sh
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# 批量测试六个视角的shell脚本
|
| 4 |
+
# 基于eval.sh,只是加了循环和view_key参数
|
| 5 |
+
|
| 6 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/nvidia
|
| 7 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/user/jfeng644/.mujoco/mujoco210/bin
|
| 8 |
+
export PYTHONLOGLEVEL=ERROR
|
| 9 |
+
export ACCELERATE_LOG_LEVEL=error
|
| 10 |
+
export TRANSFORMERS_VERBOSITY=error
|
| 11 |
+
export HF_DATASETS_VERBOSITY=error
|
| 12 |
+
|
| 13 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan5frame/train_2025-10-30T18-50-53/checkpoint-30000
|
| 14 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan/train_2025-10-30T18-21-29/checkpoint-20000
|
| 15 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/singleview/train_2025-10-29T04-42-47/checkpoint-20000
|
| 16 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan/train_2025-10-30T18-21-29/checkpoint-40000
|
| 17 |
+
export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan13frame_continue/train_2025-11-02T19-43-17/checkpoint-60000
|
| 18 |
+
|
| 19 |
+
export GLOBAL_FRAME_NUM=13
|
| 20 |
+
# 定义要测试的视角列表
|
| 21 |
+
VIEW_KEYS=( "agentview_330_image" "agentview_350_image" "agentview_30_image" "agentview_10_image")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# 遍历每个视角进行测试
|
| 25 |
+
for view_key in "${VIEW_KEYS[@]}"; do
|
| 26 |
+
echo "正在测试视角: ${view_key}"
|
| 27 |
+
CUDA_VISIBLE_DEVICES=1 python eval_sim.py \
|
| 28 |
+
--checkpoint /data/user/jfeng644/code/manires/final_continue13frame_ckpt60_areg/checkpoints/epoch=0022-test_mean_score=0.840.ckpt \
|
| 29 |
+
--output_dir /data/user/jfeng644/code/manires/final_continue13frame_ckpt60_areg/eval_epoch22/${view_key} \
|
| 30 |
+
--view-key ${view_key}
|
| 31 |
+
echo "------------------------------------------"
|
| 32 |
+
done
|
| 33 |
+
|
| 34 |
+
|
code/batch_test_views2.sh
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# 批量测试六个视角的shell脚本
|
| 4 |
+
# 基于eval.sh,只是加了循环和view_key参数
|
| 5 |
+
|
| 6 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/nvidia
|
| 7 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/user/jfeng644/.mujoco/mujoco210/bin
|
| 8 |
+
export PYTHONLOGLEVEL=ERROR
|
| 9 |
+
export ACCELERATE_LOG_LEVEL=error
|
| 10 |
+
export TRANSFORMERS_VERBOSITY=error
|
| 11 |
+
export HF_DATASETS_VERBOSITY=error
|
| 12 |
+
|
| 13 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan5frame/train_2025-10-30T18-50-53/checkpoint-30000
|
| 14 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan/train_2025-10-30T18-21-29/checkpoint-20000
|
| 15 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/singleview/train_2025-10-29T04-42-47/checkpoint-20000
|
| 16 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan/train_2025-10-30T18-21-29/checkpoint-40000
|
| 17 |
+
export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan13frame_continue/train_2025-11-02T19-43-17/checkpoint-60000
|
| 18 |
+
export GLOBAL_FRAME_NUM=13
|
| 19 |
+
# 定义要测试的视角列表
|
| 20 |
+
VIEW_KEYS=( "agentview_320_image" "agentview_340_image" "agentview_40_image" "agentview_20_image" "agentview_image")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# 遍历每个视角进行测试
|
| 24 |
+
for view_key in "${VIEW_KEYS[@]}"; do
|
| 25 |
+
echo "正在测试视角: ${view_key}"
|
| 26 |
+
CUDA_VISIBLE_DEVICES=1 python eval_sim.py \
|
| 27 |
+
--checkpoint /data/user/jfeng644/code/manires/final_continue13frame_ckpt60_areg/checkpoints/epoch=0022-test_mean_score=0.840.ckpt\
|
| 28 |
+
--output_dir /data/user/jfeng644/code/manires/final_continue13frame_ckpt60_areg/eval_epoch22/${view_key} \
|
| 29 |
+
--view-key ${view_key}
|
| 30 |
+
echo "------------------------------------------"
|
| 31 |
+
done
|
| 32 |
+
|
| 33 |
+
|
code/batch_test_views3.sh
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# 批量测试六个视角的shell脚本
|
| 4 |
+
# 基于eval.sh,只是加了循环和view_key参数
|
| 5 |
+
|
| 6 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/nvidia
|
| 7 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/user/jfeng644/.mujoco/mujoco210/bin
|
| 8 |
+
export PYTHONLOGLEVEL=ERROR
|
| 9 |
+
export ACCELERATE_LOG_LEVEL=error
|
| 10 |
+
export TRANSFORMERS_VERBOSITY=error
|
| 11 |
+
export HF_DATASETS_VERBOSITY=error
|
| 12 |
+
|
| 13 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan5frame/train_2025-10-30T18-50-53/checkpoint-30000
|
| 14 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan/train_2025-10-30T18-21-29/checkpoint-20000
|
| 15 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/singleview/train_2025-10-29T04-42-47/checkpoint-20000
|
| 16 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan/train_2025-10-30T18-21-29/checkpoint-40000
|
| 17 |
+
export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan13frame_continue/train_2025-11-02T19-43-17/checkpoint-60000
|
| 18 |
+
|
| 19 |
+
export GLOBAL_FRAME_NUM=13
|
| 20 |
+
# 定义要测试的视角列表
|
| 21 |
+
VIEW_KEYS=( "agentview_320_image" "agentview_340_image" "agentview_40_image" "agentview_20_image" "agentview_image" )
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# 遍历每个视角进行测试
|
| 25 |
+
for view_key in "${VIEW_KEYS[@]}"; do
|
| 26 |
+
echo "正在测试视角: ${view_key}"
|
| 27 |
+
CUDA_VISIBLE_DEVICES=0 python eval_sim.py \
|
| 28 |
+
--checkpoint /data/user/jfeng644/code/manires/final_continue13frame_ckpt60_areg/checkpoints/epoch=0022-test_mean_score=0.840.ckpt \
|
| 29 |
+
--output_dir /data/user/jfeng644/code/manires/final_continue13frame_ckpt60_areg/eval_epoch22/${view_key} \
|
| 30 |
+
--view-key ${view_key}
|
| 31 |
+
echo "------------------------------------------"
|
| 32 |
+
done
|
| 33 |
+
|
| 34 |
+
|
code/eval.sh
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/nvidia
|
| 2 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/user/jfeng644/.mujoco/mujoco210/bin
|
| 3 |
+
export PYTHONLOGLEVEL=ERROR
|
| 4 |
+
export ACCELERATE_LOG_LEVEL=error
|
| 5 |
+
export TRANSFORMERS_VERBOSITY=error
|
| 6 |
+
export HF_DATASETS_VERBOSITY=error
|
| 7 |
+
# python /hpc2hdd/home/jfeng644/anaconda3/envs/uva/lib/python3.9/site-packages/robosuite/scripts/setup_macros.py
|
| 8 |
+
CUDA_VISIBLE_DEVICES=0 python eval_sim.py --checkpoint /data/user/jfeng644/code/unified_video_action/vpp_multiviewtrain_ell_4gpu/checkpoints/epoch=0024-test_mean_score=0.667.ckpt --output_dir /data/user/jfeng644/code/unified_video_action/vpp_multiviewtrain_ell_4gpu/eval_epoch24
|
code/eval_real.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
import time
|
| 4 |
+
import click
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
import dill
|
| 8 |
+
import hydra
|
| 9 |
+
import zmq
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
from unified_video_action.policy.base_image_policy import BaseImagePolicy
|
| 13 |
+
from unified_video_action.workspace.base_workspace import BaseWorkspace
|
| 14 |
+
from umi.real_world.real_inference_util import (
|
| 15 |
+
get_real_obs_resolution,
|
| 16 |
+
get_real_umi_action,
|
| 17 |
+
)
|
| 18 |
+
from unified_video_action.common.pytorch_util import dict_apply
|
| 19 |
+
import omegaconf
|
| 20 |
+
import traceback
|
| 21 |
+
import pickle
|
| 22 |
+
from omegaconf import open_dict
|
| 23 |
+
|
| 24 |
+
language_latents = pickle.load(open("prepared_data/language_latents.pkl", "rb"))
|
| 25 |
+
import torch
|
| 26 |
+
import torch.nn.functional as F
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def echo_exception():
|
| 30 |
+
exc_type, exc_value, exc_traceback = sys.exc_info()
|
| 31 |
+
# Extract unformatted traceback
|
| 32 |
+
tb_lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
|
| 33 |
+
# Print line of code where the exception occurred
|
| 34 |
+
|
| 35 |
+
return "".join(tb_lines)
|
| 36 |
+
|
| 37 |
+
def smooth_action(act_out, window_size=3, pad_size=1):
|
| 38 |
+
# Define the moving average kernel
|
| 39 |
+
kernel = torch.ones(1, 1, window_size) / window_size # 1x1x3 kernel
|
| 40 |
+
kernel = kernel.to(act_out.device) # Match device of the input tensor
|
| 41 |
+
|
| 42 |
+
# Apply convolution with padding to preserve the sequence length
|
| 43 |
+
# Unsqueeze the last dimension for convolution along the time axis
|
| 44 |
+
print(act_out.shape)
|
| 45 |
+
act_out_padded = F.pad(act_out, (0, 0, pad_size, pad_size), mode="replicate")
|
| 46 |
+
|
| 47 |
+
batch_size, timesteps, action_dim = act_out_padded.shape
|
| 48 |
+
act_out_padded = act_out_padded.permute(
|
| 49 |
+
0, 2, 1
|
| 50 |
+
) # Shape: [batch_size, action_dim, timesteps]
|
| 51 |
+
act_out_padded = act_out_padded.reshape(
|
| 52 |
+
-1, 1, timesteps
|
| 53 |
+
) # Combine batch and action_dim
|
| 54 |
+
|
| 55 |
+
smoothed_act_out = F.conv1d(act_out_padded, kernel, padding=0)
|
| 56 |
+
|
| 57 |
+
smoothed_act_out = smoothed_act_out.reshape(
|
| 58 |
+
batch_size, action_dim, timesteps - 2 * pad_size
|
| 59 |
+
)
|
| 60 |
+
smoothed_act_out = smoothed_act_out.permute(
|
| 61 |
+
0, 2, 1
|
| 62 |
+
) # Shape: [batch_size, timesteps, action_dim]
|
| 63 |
+
|
| 64 |
+
return smoothed_act_out
|
| 65 |
+
|
| 66 |
+
class PolicyInferenceNode:
|
| 67 |
+
def __init__(
|
| 68 |
+
self, ckpt_path: str, ip: str, port: int, device: str, output_dir: str
|
| 69 |
+
):
|
| 70 |
+
self.ckpt_path = ckpt_path
|
| 71 |
+
if not self.ckpt_path.endswith(".ckpt"):
|
| 72 |
+
self.ckpt_path = os.path.join(self.ckpt_path, "checkpoints", "latest.ckpt")
|
| 73 |
+
payload = torch.load(
|
| 74 |
+
open(self.ckpt_path, "rb"), map_location="cpu", pickle_module=dill
|
| 75 |
+
)
|
| 76 |
+
self.cfg = payload["cfg"]
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
with open_dict(self.cfg):
|
| 80 |
+
if "autoregressive_model_params" in self.cfg.model.policy:
|
| 81 |
+
self.cfg.model.policy.autoregressive_model_params.num_sampling_steps = (
|
| 82 |
+
"100"
|
| 83 |
+
)
|
| 84 |
+
print("-----------------------------------------------")
|
| 85 |
+
print(
|
| 86 |
+
"num_sampling_steps",
|
| 87 |
+
self.cfg.model.policy.autoregressive_model_params.num_sampling_steps,
|
| 88 |
+
)
|
| 89 |
+
print("-----------------------------------------------")
|
| 90 |
+
|
| 91 |
+
# export cfg to yaml
|
| 92 |
+
cfg_path = self.ckpt_path.replace(".ckpt", ".yaml")
|
| 93 |
+
with open(cfg_path, "w") as f:
|
| 94 |
+
f.write(omegaconf.OmegaConf.to_yaml(self.cfg))
|
| 95 |
+
print(f"Exported config to {cfg_path}")
|
| 96 |
+
# print(f"Loading configure: {self.cfg.name}, workspace: {self.cfg._target_}, policy: {self.cfg.policy._target_}, model_name: {self.cfg.policy.obs_encoder.model_name}")
|
| 97 |
+
print(
|
| 98 |
+
f"Loading configure: {self.cfg.task.name}, workspace: {self.cfg.model._target_}, policy: {self.cfg.model.policy._target_}"
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
self.obs_res = get_real_obs_resolution(self.cfg.task.shape_meta)
|
| 102 |
+
self.get_class_start_time = time.monotonic()
|
| 103 |
+
|
| 104 |
+
cls = hydra.utils.get_class(self.cfg.model._target_)
|
| 105 |
+
self.workspace = cls(self.cfg, output_dir=output_dir)
|
| 106 |
+
self.workspace: BaseWorkspace
|
| 107 |
+
self.workspace.load_payload(payload, exclude_keys=None, include_keys=None)
|
| 108 |
+
|
| 109 |
+
self.policy: BaseImagePolicy = self.workspace.model
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
if self.cfg.training.use_ema:
|
| 113 |
+
self.policy = self.workspace.ema_model
|
| 114 |
+
print("Using EMA model")
|
| 115 |
+
|
| 116 |
+
self.device = torch.device(device)
|
| 117 |
+
self.policy.eval().to(self.device)
|
| 118 |
+
self.policy.reset()
|
| 119 |
+
self.ip = ip
|
| 120 |
+
self.port = port
|
| 121 |
+
|
| 122 |
+
def predict_action(self, obs_dict_np: dict, past_action_list=[]):
|
| 123 |
+
|
| 124 |
+
if "task_name" in obs_dict_np:
|
| 125 |
+
task_name = obs_dict_np["task_name"]
|
| 126 |
+
print("task_name", obs_dict_np["task_name"])
|
| 127 |
+
del obs_dict_np["task_name"]
|
| 128 |
+
|
| 129 |
+
if self.cfg.task.dataset.language_emb_model is not None:
|
| 130 |
+
if "cup" in task_name:
|
| 131 |
+
language_goal = language_latents["cup"]
|
| 132 |
+
elif "towel" in task_name:
|
| 133 |
+
language_goal = language_latents["towel"]
|
| 134 |
+
elif "mouse" in task_name:
|
| 135 |
+
language_goal = language_latents["mouse"]
|
| 136 |
+
language_goal = torch.tensor(language_goal).to(self.device)
|
| 137 |
+
language_goal = language_goal.unsqueeze(0)
|
| 138 |
+
print("task_name", task_name)
|
| 139 |
+
else:
|
| 140 |
+
language_goal = None
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
with torch.no_grad():
|
| 144 |
+
obs_dict = dict_apply(
|
| 145 |
+
obs_dict_np, lambda x: torch.from_numpy(x).unsqueeze(0).to(self.device)
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
if self.cfg.name == "uva":
|
| 150 |
+
result = self.policy.predict_action(
|
| 151 |
+
obs_dict=obs_dict, language_goal=language_goal
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
past_action_list.append(np.array(result["action"][0].cpu()))
|
| 155 |
+
if len(past_action_list) > 2:
|
| 156 |
+
past_action_list.pop(0)
|
| 157 |
+
action = smooth_action(result["action_pred"].detach().to("cpu")).numpy()[0]
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
else:
|
| 161 |
+
result = self.policy.predict_action(
|
| 162 |
+
obs_dict, language_goal=language_goal
|
| 163 |
+
)
|
| 164 |
+
action = result["action_pred"][0].detach().to("cpu").numpy()
|
| 165 |
+
print("action")
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
del result
|
| 170 |
+
del obs_dict
|
| 171 |
+
|
| 172 |
+
return action, past_action_list
|
| 173 |
+
|
| 174 |
+
def run_node(self):
|
| 175 |
+
context = zmq.Context()
|
| 176 |
+
socket = context.socket(zmq.REP)
|
| 177 |
+
socket.bind(f"tcp://{self.ip}:{self.port}")
|
| 178 |
+
print(f"PolicyInferenceNode is listening on {self.ip}:{self.port}")
|
| 179 |
+
|
| 180 |
+
past_action_list = []
|
| 181 |
+
while True:
|
| 182 |
+
obs_dict_np = socket.recv_pyobj()
|
| 183 |
+
|
| 184 |
+
try:
|
| 185 |
+
start_time = time.monotonic()
|
| 186 |
+
action, past_action_list = self.predict_action(
|
| 187 |
+
obs_dict_np, past_action_list
|
| 188 |
+
)
|
| 189 |
+
print(f"Inference time: {time.monotonic() - start_time:.3f} s")
|
| 190 |
+
|
| 191 |
+
except Exception as e:
|
| 192 |
+
err_str = echo_exception()
|
| 193 |
+
print(f"Error: {err_str}")
|
| 194 |
+
action = err_str
|
| 195 |
+
send_start_time = time.monotonic()
|
| 196 |
+
# time.sleep(0.1)
|
| 197 |
+
socket.send_pyobj(action)
|
| 198 |
+
print(f"Send time: {time.monotonic() - send_start_time:.3f} s")
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
@click.command()
|
| 202 |
+
@click.option("--input", "-i", required=True, help="Path to checkpoint")
|
| 203 |
+
@click.option("--ip", default="0.0.0.0")
|
| 204 |
+
@click.option("--port", default=8766, help="Port to listen on")
|
| 205 |
+
@click.option("--device", default="cuda", help="Device to run on")
|
| 206 |
+
@click.option("--output_dir", required=True)
|
| 207 |
+
def main(input, ip, port, device, output_dir):
|
| 208 |
+
|
| 209 |
+
node = PolicyInferenceNode(input, ip, port, device, output_dir)
|
| 210 |
+
node.run_node()
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
if __name__ == "__main__":
|
| 214 |
+
main()
|
code/eval_real_ws.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
import time
|
| 4 |
+
import json
|
| 5 |
+
import asyncio
|
| 6 |
+
import pickle
|
| 7 |
+
|
| 8 |
+
import click
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
import dill
|
| 12 |
+
import hydra
|
| 13 |
+
import omegaconf
|
| 14 |
+
import traceback
|
| 15 |
+
from omegaconf import open_dict
|
| 16 |
+
|
| 17 |
+
from unified_video_action.policy.base_image_policy import BaseImagePolicy
|
| 18 |
+
from unified_video_action.workspace.base_workspace import BaseWorkspace
|
| 19 |
+
from unified_video_action.common.pytorch_util import dict_apply
|
| 20 |
+
from umi.real_world.real_inference_util import get_real_obs_resolution
|
| 21 |
+
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
import websockets
|
| 24 |
+
|
| 25 |
+
language_latents = pickle.load(open("prepared_data/language_latents.pkl", "rb"))
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def echo_exception():
|
| 29 |
+
exc_type, exc_value, exc_traceback = sys.exc_info()
|
| 30 |
+
tb_lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
|
| 31 |
+
return "".join(tb_lines)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def smooth_action(act_out, window_size=3, pad_size=1):
|
| 35 |
+
kernel = torch.ones(1, 1, window_size) / window_size
|
| 36 |
+
kernel = kernel.to(act_out.device)
|
| 37 |
+
|
| 38 |
+
act_out_padded = F.pad(act_out, (0, 0, pad_size, pad_size), mode="replicate")
|
| 39 |
+
|
| 40 |
+
batch_size, timesteps, action_dim = act_out_padded.shape
|
| 41 |
+
act_out_padded = act_out_padded.permute(0, 2, 1)
|
| 42 |
+
act_out_padded = act_out_padded.reshape(-1, 1, timesteps)
|
| 43 |
+
|
| 44 |
+
smoothed_act_out = F.conv1d(act_out_padded, kernel, padding=0)
|
| 45 |
+
|
| 46 |
+
smoothed_act_out = smoothed_act_out.reshape(batch_size, action_dim, timesteps - 2 * pad_size)
|
| 47 |
+
smoothed_act_out = smoothed_act_out.permute(0, 2, 1)
|
| 48 |
+
|
| 49 |
+
return smoothed_act_out
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class PolicyInferenceNode:
|
| 53 |
+
def __init__(self, ckpt_path: str, ip: str, port: int, device: str, output_dir: str):
|
| 54 |
+
self.ckpt_path = ckpt_path
|
| 55 |
+
if not self.ckpt_path.endswith(".ckpt"):
|
| 56 |
+
self.ckpt_path = os.path.join(self.ckpt_path, "checkpoints", "latest.ckpt")
|
| 57 |
+
payload = torch.load(open(self.ckpt_path, "rb"), map_location="cpu", pickle_module=dill)
|
| 58 |
+
self.cfg = payload["cfg"]
|
| 59 |
+
|
| 60 |
+
with open_dict(self.cfg):
|
| 61 |
+
if "autoregressive_model_params" in self.cfg.model.policy:
|
| 62 |
+
self.cfg.model.policy.autoregressive_model_params.num_sampling_steps = "100"
|
| 63 |
+
print("-----------------------------------------------")
|
| 64 |
+
print(
|
| 65 |
+
"num_sampling_steps",
|
| 66 |
+
self.cfg.model.policy.autoregressive_model_params.num_sampling_steps,
|
| 67 |
+
)
|
| 68 |
+
print("-----------------------------------------------")
|
| 69 |
+
|
| 70 |
+
cfg_path = self.ckpt_path.replace(".ckpt", ".yaml")
|
| 71 |
+
with open(cfg_path, "w") as f:
|
| 72 |
+
f.write(omegaconf.OmegaConf.to_yaml(self.cfg))
|
| 73 |
+
print(f"Exported config to {cfg_path}")
|
| 74 |
+
print(
|
| 75 |
+
f"Loading configure: {self.cfg.task.name}, workspace: {self.cfg.model._target_}, policy: {self.cfg.model.policy._target_}"
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
self.obs_res = get_real_obs_resolution(self.cfg.task.shape_meta)
|
| 79 |
+
|
| 80 |
+
cls = hydra.utils.get_class(self.cfg.model._target_)
|
| 81 |
+
self.workspace = cls(self.cfg, output_dir=output_dir)
|
| 82 |
+
self.workspace: BaseWorkspace
|
| 83 |
+
self.workspace.load_payload(payload, exclude_keys=None, include_keys=None)
|
| 84 |
+
|
| 85 |
+
self.policy: BaseImagePolicy = self.workspace.model
|
| 86 |
+
|
| 87 |
+
if self.cfg.training.use_ema:
|
| 88 |
+
self.policy = self.workspace.ema_model
|
| 89 |
+
print("Using EMA model")
|
| 90 |
+
|
| 91 |
+
self.device = torch.device(device)
|
| 92 |
+
self.policy.eval().to(self.device)
|
| 93 |
+
self.policy.reset()
|
| 94 |
+
self.ip = ip
|
| 95 |
+
self.port = port
|
| 96 |
+
|
| 97 |
+
def _prepare_language_goal(self, task_name: str):
|
| 98 |
+
if self.cfg.task.dataset.language_emb_model is None:
|
| 99 |
+
return None
|
| 100 |
+
|
| 101 |
+
key = None
|
| 102 |
+
if task_name is None:
|
| 103 |
+
return None
|
| 104 |
+
for candidate in ["cup", "towel", "mouse"]:
|
| 105 |
+
if candidate in task_name:
|
| 106 |
+
key = candidate
|
| 107 |
+
break
|
| 108 |
+
if key is None:
|
| 109 |
+
return None
|
| 110 |
+
language_goal = language_latents.get(key)
|
| 111 |
+
if language_goal is None:
|
| 112 |
+
return None
|
| 113 |
+
language_goal = torch.tensor(language_goal).to(self.device).unsqueeze(0)
|
| 114 |
+
return language_goal
|
| 115 |
+
|
| 116 |
+
def predict_action(self, obs_dict_np: dict, past_action_list=None):
|
| 117 |
+
if past_action_list is None:
|
| 118 |
+
past_action_list = []
|
| 119 |
+
|
| 120 |
+
task_name = obs_dict_np.pop("task_name", None)
|
| 121 |
+
language_goal = self._prepare_language_goal(task_name)
|
| 122 |
+
|
| 123 |
+
with torch.no_grad():
|
| 124 |
+
obs_dict = dict_apply(
|
| 125 |
+
obs_dict_np, lambda x: torch.from_numpy(x).unsqueeze(0).to(self.device)
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
if self.cfg.name == "uva":
|
| 129 |
+
result = self.policy.predict_action(obs_dict=obs_dict, language_goal=language_goal)
|
| 130 |
+
past_action_list.append(np.array(result["action"][0].cpu()))
|
| 131 |
+
if len(past_action_list) > 2:
|
| 132 |
+
past_action_list.pop(0)
|
| 133 |
+
action = smooth_action(result["action_pred"].detach().to("cpu")).numpy()[0]
|
| 134 |
+
else:
|
| 135 |
+
result = self.policy.predict_action(obs_dict, language_goal=language_goal)
|
| 136 |
+
action = result["action_pred"][0].detach().to("cpu").numpy()
|
| 137 |
+
|
| 138 |
+
del result
|
| 139 |
+
del obs_dict
|
| 140 |
+
|
| 141 |
+
return action, past_action_list
|
| 142 |
+
|
| 143 |
+
async def _handle_connection(self, websocket):
|
| 144 |
+
past_action_list = []
|
| 145 |
+
async for message in websocket:
|
| 146 |
+
try:
|
| 147 |
+
request = json.loads(message)
|
| 148 |
+
payload = request.get("body", request.get("data", request))
|
| 149 |
+
if isinstance(payload, str):
|
| 150 |
+
payload = json.loads(payload)
|
| 151 |
+
if not isinstance(payload, dict):
|
| 152 |
+
raise ValueError("Parsed payload is not a dict")
|
| 153 |
+
|
| 154 |
+
start_time = time.monotonic()
|
| 155 |
+
action, past_action_list = self.predict_action(payload, past_action_list)
|
| 156 |
+
elapsed = time.monotonic() - start_time
|
| 157 |
+
|
| 158 |
+
response = {
|
| 159 |
+
"status": "ok",
|
| 160 |
+
"action": action.tolist(),
|
| 161 |
+
"inference_time": elapsed,
|
| 162 |
+
}
|
| 163 |
+
except Exception:
|
| 164 |
+
err_str = echo_exception()
|
| 165 |
+
response = {"status": "error", "error": err_str}
|
| 166 |
+
|
| 167 |
+
await websocket.send(json.dumps(response))
|
| 168 |
+
|
| 169 |
+
async def run_node(self):
|
| 170 |
+
print(f"PolicyInferenceNode WebSocket listening on {self.ip}:{self.port}")
|
| 171 |
+
async with websockets.serve(self._handle_connection, self.ip, self.port):
|
| 172 |
+
await asyncio.Future() # run forever
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
@click.command()
|
| 176 |
+
@click.option("--input", "-i", required=True, help="Path to checkpoint")
|
| 177 |
+
@click.option("--ip", default="0.0.0.0")
|
| 178 |
+
@click.option("--port", default=8766, help="Port to listen on")
|
| 179 |
+
@click.option("--device", default="cuda", help="Device to run on")
|
| 180 |
+
@click.option("--output_dir", required=True)
|
| 181 |
+
def main(input, ip, port, device, output_dir):
|
| 182 |
+
node = PolicyInferenceNode(input, ip, port, device, output_dir)
|
| 183 |
+
asyncio.run(node.run_node())
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
if __name__ == "__main__":
|
| 187 |
+
main()
|
| 188 |
+
|
code/eval_sim.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
sys.stdout = open(sys.stdout.fileno(), mode="w", buffering=1)
|
| 4 |
+
sys.stderr = open(sys.stderr.fileno(), mode="w", buffering=1)
|
| 5 |
+
import numpy as np
|
| 6 |
+
import os
|
| 7 |
+
import pathlib
|
| 8 |
+
import click
|
| 9 |
+
import hydra
|
| 10 |
+
import torch
|
| 11 |
+
import dill
|
| 12 |
+
import wandb
|
| 13 |
+
import json
|
| 14 |
+
import random
|
| 15 |
+
from omegaconf import open_dict, OmegaConf
|
| 16 |
+
# os.environ["LD_LIBRARY_PATH"] = "/hpc2hdd/home/jfeng644/.mujoco/mujoco210/bin:/usr/lib/nvidia:" + os.environ.get("LD_LIBRARY_PATH", "")
|
| 17 |
+
from unified_video_action.workspace.base_workspace import BaseWorkspace
|
| 18 |
+
from unified_video_action.utils.load_env import load_env_runner
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@click.command()
|
| 22 |
+
@click.option("-c", "--checkpoint", required=True)
|
| 23 |
+
@click.option("-o", "--output_dir", required=True)
|
| 24 |
+
@click.option("-d", "--device", default="cuda:0")
|
| 25 |
+
@click.option("--config-override", default='unified_video_action/config/task/libero10.yaml', help="Path to additional config file to override settings")
|
| 26 |
+
@click.option("--view-key", default="agentview_340_image", help="View key for testing")
|
| 27 |
+
def main(checkpoint, output_dir, device, config_override, view_key):
|
| 28 |
+
|
| 29 |
+
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
|
| 30 |
+
|
| 31 |
+
# load checkpoint
|
| 32 |
+
payload = torch.load(open(checkpoint, "rb"), pickle_module=dill)
|
| 33 |
+
cfg = payload["cfg"]
|
| 34 |
+
|
| 35 |
+
# # Update config with additional config file if provided
|
| 36 |
+
# if config_override is not None:
|
| 37 |
+
# override_cfg = OmegaConf.load(config_override)
|
| 38 |
+
|
| 39 |
+
# # 确保task键存在,如果不存在就创建
|
| 40 |
+
# if 'task' not in cfg:
|
| 41 |
+
# cfg['task'] = OmegaConf.create({})
|
| 42 |
+
|
| 43 |
+
# # 递归地确保所有嵌套键都存在
|
| 44 |
+
# def ensure_keys_exist(base_cfg, override_cfg):
|
| 45 |
+
# for key, value in override_cfg.items():
|
| 46 |
+
# if key not in base_cfg:
|
| 47 |
+
# base_cfg[key] = OmegaConf.create({})
|
| 48 |
+
|
| 49 |
+
# if isinstance(value, dict) and isinstance(base_cfg[key], dict):
|
| 50 |
+
# ensure_keys_exist(base_cfg[key], value)
|
| 51 |
+
# else:
|
| 52 |
+
# base_cfg[key] = value
|
| 53 |
+
|
| 54 |
+
# ensure_keys_exist(cfg['task'], override_cfg)
|
| 55 |
+
# ensure_keys_exist(cfg['model']['policy']['shape_meta'], override_cfg['shape_meta'])
|
| 56 |
+
# print(f"Updated config with override file: {config_override}")
|
| 57 |
+
# config_2 = 'unified_video_action/config/vpp_libero10.yaml'
|
| 58 |
+
# override_cfg = OmegaConf.load(config_2)
|
| 59 |
+
# del override_cfg['defaults']
|
| 60 |
+
# ensure_keys_exist(cfg, override_cfg)
|
| 61 |
+
|
| 62 |
+
# config_3 = 'unified_video_action/config/model/vpp.yaml'
|
| 63 |
+
# override_cfg = OmegaConf.load(config_3)
|
| 64 |
+
# ensure_keys_exist(cfg['model'], override_cfg)
|
| 65 |
+
# print(f"Updated config with override file: {config_3}")
|
| 66 |
+
# print(f"Updated config with override file: {config_2}")
|
| 67 |
+
# set seed
|
| 68 |
+
seed = cfg.training.seed
|
| 69 |
+
torch.manual_seed(seed)
|
| 70 |
+
np.random.seed(seed)
|
| 71 |
+
random.seed(seed)
|
| 72 |
+
|
| 73 |
+
with open_dict(cfg):
|
| 74 |
+
cfg.output_dir = output_dir
|
| 75 |
+
# 设置view_key参数
|
| 76 |
+
cfg.task.env_runner.view_key = view_key
|
| 77 |
+
|
| 78 |
+
# configure workspace
|
| 79 |
+
cls = hydra.utils.get_class(cfg.model._target_)
|
| 80 |
+
workspace = cls(cfg, output_dir=output_dir)
|
| 81 |
+
workspace: BaseWorkspace
|
| 82 |
+
|
| 83 |
+
print("Loaded checkpoint from %s" % checkpoint)
|
| 84 |
+
workspace.load_payload(payload, exclude_keys=None, include_keys=None)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
# get policy from workspace
|
| 88 |
+
policy = workspace.ema_model
|
| 89 |
+
policy.to(device)
|
| 90 |
+
policy.eval()
|
| 91 |
+
|
| 92 |
+
env_runners = load_env_runner(cfg, output_dir)
|
| 93 |
+
|
| 94 |
+
if "libero" in cfg.task.name:
|
| 95 |
+
step_log = {}
|
| 96 |
+
for env_runner in env_runners:
|
| 97 |
+
runner_log = env_runner.run(policy)
|
| 98 |
+
step_log.update(runner_log)
|
| 99 |
+
print(step_log)
|
| 100 |
+
|
| 101 |
+
assert "test_mean_score" not in step_log
|
| 102 |
+
all_test_mean_score = {
|
| 103 |
+
k: v for k, v in step_log.items() if "test/" in k and "_mean_score" in k
|
| 104 |
+
}
|
| 105 |
+
step_log["test_mean_score"] = np.mean(list(all_test_mean_score.values()))
|
| 106 |
+
|
| 107 |
+
runner_log = step_log
|
| 108 |
+
else:
|
| 109 |
+
env_runner = env_runners
|
| 110 |
+
runner_log = env_runner.run(policy)
|
| 111 |
+
|
| 112 |
+
# dump log to json
|
| 113 |
+
json_log = dict()
|
| 114 |
+
for key, value in runner_log.items():
|
| 115 |
+
if isinstance(value, wandb.sdk.data_types.video.Video):
|
| 116 |
+
json_log[key] = value._path
|
| 117 |
+
else:
|
| 118 |
+
json_log[key] = value
|
| 119 |
+
|
| 120 |
+
for k, v in json_log.items():
|
| 121 |
+
print(k, v)
|
| 122 |
+
|
| 123 |
+
out_path = os.path.join(output_dir, f'eval_log_{checkpoint.split("/")[-1]}.json')
|
| 124 |
+
print("Saving log to %s" % out_path)
|
| 125 |
+
json.dump(json_log, open(out_path, "w"), indent=2, sort_keys=True)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
if __name__ == "__main__":
|
| 129 |
+
main()
|
code/msgpack_numpy.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
"""Adds NumPy array support to msgpack.
|
| 4 |
+
|
| 5 |
+
msgpack is good for (de)serializing data over a network for multiple reasons:
|
| 6 |
+
- msgpack is secure (as opposed to pickle/dill/etc which allow for arbitrary code execution)
|
| 7 |
+
- msgpack is widely used and has good cross-language support
|
| 8 |
+
- msgpack does not require a schema (as opposed to protobuf/flatbuffers/etc) which is convenient in dynamically typed
|
| 9 |
+
languages like Python and JavaScript
|
| 10 |
+
- msgpack is fast and efficient (as opposed to readable formats like JSON/YAML/etc); I found that msgpack was ~4x faster
|
| 11 |
+
than pickle for serializing large arrays using the below strategy
|
| 12 |
+
|
| 13 |
+
The code below is adapted from GitHub - lebedov/msgpack-numpy: Serialize numpy arrays using msgpack. The reason not to use that library directly is
|
| 14 |
+
that it falls back to pickle for object arrays.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
import functools
|
| 18 |
+
|
| 19 |
+
import msgpack
|
| 20 |
+
import numpy as np
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def pack_array(obj):
|
| 24 |
+
if (isinstance(obj, (np.ndarray, np.generic))) and obj.dtype.kind in ("V", "O", "c"):
|
| 25 |
+
raise ValueError(f"Unsupported dtype: {obj.dtype}")
|
| 26 |
+
|
| 27 |
+
if isinstance(obj, np.ndarray):
|
| 28 |
+
return {
|
| 29 |
+
b"__ndarray__": True,
|
| 30 |
+
b"data": obj.tobytes(),
|
| 31 |
+
b"dtype": obj.dtype.str,
|
| 32 |
+
b"shape": obj.shape,
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
if isinstance(obj, np.generic):
|
| 36 |
+
return {
|
| 37 |
+
b"__npgeneric__": True,
|
| 38 |
+
b"data": obj.item(),
|
| 39 |
+
b"dtype": obj.dtype.str,
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
return obj
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def unpack_array(obj):
|
| 46 |
+
if b"__ndarray__" in obj:
|
| 47 |
+
return np.ndarray(buffer=obj[b"data"], dtype=np.dtype(obj[b"dtype"]), shape=obj[b"shape"])
|
| 48 |
+
|
| 49 |
+
if b"__npgeneric__" in obj:
|
| 50 |
+
return np.dtype(obj[b"dtype"]).type(obj[b"data"])
|
| 51 |
+
|
| 52 |
+
return obj
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
Packer = functools.partial(msgpack.Packer, default=pack_array)
|
| 56 |
+
packb = functools.partial(msgpack.packb, default=pack_array)
|
| 57 |
+
|
| 58 |
+
Unpacker = functools.partial(msgpack.Unpacker, object_hook=unpack_array)
|
| 59 |
+
unpackb = functools.partial(msgpack.unpackb, object_hook=unpack_array)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
|
code/nohup.out
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eaeca2f04bc98f1ee2b23467f1b027a9966e60021632905465d36f03d4a0c4e1
|
| 3 |
+
size 38751283
|
code/run.sh
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/nvidia
|
| 2 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/user/jfeng644/.mujoco/mujoco210/bin
|
| 3 |
+
export PYTHONLOGLEVEL=ERROR
|
| 4 |
+
export ACCELERATE_LOG_LEVEL=error
|
| 5 |
+
export TRANSFORMERS_VERBOSITY=error
|
| 6 |
+
export HF_DATASETS_VERBOSITY=error
|
| 7 |
+
export MUJOCO_GL=egl
|
| 8 |
+
export PYOPENGL_PLATFORM=egl
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/train_2025-10-21T03-58-20/checkpoint-64000
|
| 12 |
+
export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/singleview/train_2025-10-29T04-42-47/checkpoint-30000
|
| 13 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd/train_2025-10-03T03-11-11/checkpoint-80000
|
| 14 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd/vppmvlibero/checkpoint-80000
|
| 15 |
+
export GLOBAL_FRAME_NUM=13
|
| 16 |
+
# 映射 EGL 渲染到可见 GPU:基于 CUDA_VISIBLE_DEVICES 与 LOCAL_RANK 选择物理 GPU 索引
|
| 17 |
+
PREPARE_GPU=1,0
|
| 18 |
+
if [ -n "$PREPARE_GPU" ]; then
|
| 19 |
+
IFS=',' read -r -a __CUDA_DEV_ARR <<< "$PREPARE_GPU"
|
| 20 |
+
__LOCAL_IDX=${LOCAL_RANK:-0}
|
| 21 |
+
export MUJOCO_EGL_DEVICE_ID=${__CUDA_DEV_ARR[$__LOCAL_IDX]}
|
| 22 |
+
fi
|
| 23 |
+
# python /hpc2hdd/home/jfeng644/anaconda3/envs/uva/lib/python3.9/site-packages/robosuite/scripts/setup_macros.py
|
| 24 |
+
# model.policy.autoregressive_model_params.pretrained_model_path=checkpoints/libero10.ckpt \
|
| 25 |
+
# python train.py \
|
| 26 |
+
CUDA_VISIBLE_DEVICES=1,0 NCCL_ASYNC_ERROR_HANDLING=1 accelerate launch --num_processes=2 --main_process_port=29512 train.py \
|
| 27 |
+
--config-dir=. \
|
| 28 |
+
--config-name=vpp_xc.yaml \
|
| 29 |
+
model.policy.action_model_params.predict_action=True \
|
| 30 |
+
model.policy.optimizer.learning_rate=1e-4 \
|
| 31 |
+
logging.project=vpp \
|
| 32 |
+
hydra.run.dir="../xcres/siglip" \
|
| 33 |
+
# model.policy.autoregressive_model_params.pretrained_model_path=checkpoints/libero10.ckpt \
|
code/run0.sh
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/nvidia
|
| 2 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/user/jfeng644/.mujoco/mujoco210/bin
|
| 3 |
+
export PYTHONLOGLEVEL=ERROR
|
| 4 |
+
export ACCELERATE_LOG_LEVEL=error
|
| 5 |
+
export TRANSFORMERS_VERBOSITY=error
|
| 6 |
+
export HF_DATASETS_VERBOSITY=error
|
| 7 |
+
export MUJOCO_GL=egl
|
| 8 |
+
export PYOPENGL_PLATFORM=egl
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/train_2025-10-21T03-58-20/checkpoint-64000
|
| 12 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115/train_2025-10-24T01-59-16/checkpoint-48000
|
| 13 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd/train_2025-10-03T03-11-11/checkpoint-80000
|
| 14 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/multiview/train_2025-10-29T04-05-39/checkpoint-50000
|
| 15 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan5frame/train_2025-10-30T18-50-53/checkpoint-30000
|
| 16 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan/train_2025-10-30T18-21-29/checkpoint-40000
|
| 17 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/mvwan13frame_continue/train_2025-11-02T19-43-17/checkpoint-60000
|
| 18 |
+
export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115/5view/checkpoint-56000
|
| 19 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd/vppmvlibero/checkpoint-80000
|
| 20 |
+
# export STAGE1_MODEL_PATH=/data/user/jfeng644/code/vpp/output/svd3/livingellipse115move_studymove/singleview/train_2025-10-29T04-42-47/checkpoint-20000
|
| 21 |
+
|
| 22 |
+
export GLOBAL_FRAME_NUM=5
|
| 23 |
+
# 映射 EGL 渲染到可见 GPU:基于 CUDA_VISIBLE_DEVICES 与 LOCAL_RANK 选择物理 GPU 索引
|
| 24 |
+
PREPARE_GPU=1,0
|
| 25 |
+
if [ -n "$PREPARE_GPU" ]; then
|
| 26 |
+
IFS=',' read -r -a __CUDA_DEV_ARR <<< "$PREPARE_GPU"
|
| 27 |
+
__LOCAL_IDX=${LOCAL_RANK:-0}
|
| 28 |
+
export MUJOCO_EGL_DEVICE_ID=${__CUDA_DEV_ARR[$__LOCAL_IDX]}
|
| 29 |
+
fi
|
| 30 |
+
# python /hpc2hdd/home/jfeng644/anaconda3/envs/uva/lib/python3.9/site-packages/robosuite/scripts/setup_macros.py
|
| 31 |
+
# model.policy.autoregressive_model_params.pretrained_model_path=checkpoints/libero10.ckpt \
|
| 32 |
+
# python train.py \
|
| 33 |
+
# CUDA_VISIBLE_DEVICES=1,0 NCCL_ASYNC_ERROR_HANDLING=1 accelerate launch --num_processes=2 --main_process_port=29514
|
| 34 |
+
CUDA_VISIBLE_DEVICES=1,0 python train.py \
|
| 35 |
+
--config-dir=. \
|
| 36 |
+
--config-name=vpp_libero10.yaml \
|
| 37 |
+
model.policy.action_model_params.predict_action=True \
|
| 38 |
+
model.policy.optimizer.learning_rate=1e-4 \
|
| 39 |
+
logging.project=vpp \
|
| 40 |
+
hydra.run.dir="../manires/results/5frame_debug115ok_ckpt56" \
|
| 41 |
+
# model.policy.autoregressive_model_params.pretrained_model_path=checkpoints/libero10.ckpt \
|
code/serve_policy.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dataclasses
|
| 2 |
+
import enum
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
import pickle
|
| 6 |
+
import socket
|
| 7 |
+
import sys
|
| 8 |
+
import time
|
| 9 |
+
import traceback
|
| 10 |
+
|
| 11 |
+
import dill
|
| 12 |
+
import hydra
|
| 13 |
+
import numpy as np
|
| 14 |
+
import omegaconf
|
| 15 |
+
import torch
|
| 16 |
+
import torch.nn.functional as F
|
| 17 |
+
import tyro
|
| 18 |
+
|
| 19 |
+
from omegaconf import open_dict
|
| 20 |
+
from openpi.policies import policy as _policy
|
| 21 |
+
from openpi.policies import policy_config as _policy_config
|
| 22 |
+
from openpi.serving import websocket_policy_server
|
| 23 |
+
from openpi.training import config as _config
|
| 24 |
+
from openpi.training.config import get_data_config
|
| 25 |
+
|
| 26 |
+
from unified_video_action.common.pytorch_util import dict_apply
|
| 27 |
+
from unified_video_action.policy.base_image_policy import BaseImagePolicy
|
| 28 |
+
from unified_video_action.workspace.base_workspace import BaseWorkspace
|
| 29 |
+
from umi.real_world.real_inference_util import get_real_obs_resolution
|
| 30 |
+
|
| 31 |
+
language_latents = pickle.load(open("prepared_data/language_latents.pkl", "rb"))
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def echo_exception():
|
| 35 |
+
exc_type, exc_value, exc_traceback = sys.exc_info()
|
| 36 |
+
tb_lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
|
| 37 |
+
return "".join(tb_lines)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def smooth_action(act_out, window_size=3, pad_size=1):
|
| 41 |
+
kernel = torch.ones(1, 1, window_size) / window_size
|
| 42 |
+
kernel = kernel.to(act_out.device)
|
| 43 |
+
|
| 44 |
+
act_out_padded = F.pad(act_out, (0, 0, pad_size, pad_size), mode="replicate")
|
| 45 |
+
|
| 46 |
+
batch_size, timesteps, action_dim = act_out_padded.shape
|
| 47 |
+
act_out_padded = act_out_padded.permute(0, 2, 1)
|
| 48 |
+
act_out_padded = act_out_padded.reshape(-1, 1, timesteps)
|
| 49 |
+
|
| 50 |
+
smoothed_act_out = F.conv1d(act_out_padded, kernel, padding=0)
|
| 51 |
+
|
| 52 |
+
smoothed_act_out = smoothed_act_out.reshape(batch_size, action_dim, timesteps - 2 * pad_size)
|
| 53 |
+
smoothed_act_out = smoothed_act_out.permute(0, 2, 1)
|
| 54 |
+
|
| 55 |
+
return smoothed_act_out
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class EvalRealPolicyAdapter:
|
| 59 |
+
"""Adapter to wrap eval_real.py PolicyInferenceNode as a Policy interface."""
|
| 60 |
+
|
| 61 |
+
def __init__(self, ckpt_path: str, device: str, output_dir: str):
|
| 62 |
+
self.ckpt_path = ckpt_path
|
| 63 |
+
if not self.ckpt_path.endswith(".ckpt"):
|
| 64 |
+
self.ckpt_path = os.path.join(self.ckpt_path, "checkpoints", "latest.ckpt")
|
| 65 |
+
|
| 66 |
+
payload = torch.load(open(self.ckpt_path, "rb"), map_location="cpu", pickle_module=dill)
|
| 67 |
+
self.cfg = payload["cfg"]
|
| 68 |
+
|
| 69 |
+
with open_dict(self.cfg):
|
| 70 |
+
if "autoregressive_model_params" in self.cfg.model.policy:
|
| 71 |
+
self.cfg.model.policy.autoregressive_model_params.num_sampling_steps = "100"
|
| 72 |
+
print("-----------------------------------------------")
|
| 73 |
+
print("num_sampling_steps", self.cfg.model.policy.autoregressive_model_params.num_sampling_steps)
|
| 74 |
+
print("-----------------------------------------------")
|
| 75 |
+
|
| 76 |
+
cfg_path = self.ckpt_path.replace(".ckpt", ".yaml")
|
| 77 |
+
with open(cfg_path, "w") as f:
|
| 78 |
+
f.write(omegaconf.OmegaConf.to_yaml(self.cfg))
|
| 79 |
+
print(f"Exported config to {cfg_path}")
|
| 80 |
+
|
| 81 |
+
print(f"Loading configure: {self.cfg.task.name}, workspace: {self.cfg.model._target_}, policy: {self.cfg.model.policy._target_}")
|
| 82 |
+
|
| 83 |
+
self.obs_res = get_real_obs_resolution(self.cfg.task.shape_meta)
|
| 84 |
+
self.device = torch.device(device)
|
| 85 |
+
|
| 86 |
+
cls = hydra.utils.get_class(self.cfg.model._target_)
|
| 87 |
+
self.workspace = cls(self.cfg, output_dir=output_dir)
|
| 88 |
+
self.workspace: BaseWorkspace
|
| 89 |
+
self.workspace.load_payload(payload, exclude_keys=None, include_keys=None)
|
| 90 |
+
|
| 91 |
+
self.policy: BaseImagePolicy = self.workspace.model
|
| 92 |
+
|
| 93 |
+
if self.cfg.training.use_ema:
|
| 94 |
+
self.policy = self.workspace.ema_model
|
| 95 |
+
print("Using EMA model")
|
| 96 |
+
|
| 97 |
+
self.policy.eval().to(self.device)
|
| 98 |
+
self.policy.reset()
|
| 99 |
+
|
| 100 |
+
# Note: past_action_list is shared across connections, but since WebSocket
|
| 101 |
+
# processing is sequential per connection, this should work in practice
|
| 102 |
+
self.past_action_list = []
|
| 103 |
+
self._metadata = {"obs_resolution": self.obs_res}
|
| 104 |
+
|
| 105 |
+
@property
|
| 106 |
+
def metadata(self):
|
| 107 |
+
return self._metadata
|
| 108 |
+
|
| 109 |
+
def infer(self, obs: dict) -> dict:
|
| 110 |
+
"""Infer action from observation. Returns dict with 'actions' key."""
|
| 111 |
+
obs_dict_np = obs.copy()
|
| 112 |
+
task_name = None
|
| 113 |
+
|
| 114 |
+
if "task_name" in obs_dict_np:
|
| 115 |
+
task_name = obs_dict_np["task_name"]
|
| 116 |
+
print("task_name", obs_dict_np["task_name"])
|
| 117 |
+
del obs_dict_np["task_name"]
|
| 118 |
+
|
| 119 |
+
if self.cfg.task.dataset.language_emb_model is not None and task_name:
|
| 120 |
+
if "cup" in task_name:
|
| 121 |
+
language_goal = language_latents["cup"]
|
| 122 |
+
elif "towel" in task_name:
|
| 123 |
+
language_goal = language_latents["towel"]
|
| 124 |
+
elif "mouse" in task_name:
|
| 125 |
+
language_goal = language_latents["mouse"]
|
| 126 |
+
else:
|
| 127 |
+
language_goal = None
|
| 128 |
+
if language_goal is not None:
|
| 129 |
+
language_goal = torch.tensor(language_goal).to(self.device)
|
| 130 |
+
language_goal = language_goal.unsqueeze(0)
|
| 131 |
+
print("task_name", task_name)
|
| 132 |
+
else:
|
| 133 |
+
language_goal = None
|
| 134 |
+
|
| 135 |
+
with torch.no_grad():
|
| 136 |
+
obs_dict = dict_apply(
|
| 137 |
+
obs_dict_np, lambda x: torch.from_numpy(x).unsqueeze(0).to(self.device)
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
if self.cfg.name == "uva":
|
| 141 |
+
result = self.policy.predict_action(
|
| 142 |
+
obs_dict=obs_dict, language_goal=language_goal
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
self.past_action_list.append(np.array(result["action"][0].cpu()))
|
| 146 |
+
if len(self.past_action_list) > 2:
|
| 147 |
+
self.past_action_list.pop(0)
|
| 148 |
+
action = smooth_action(result["action_pred"].detach().to("cpu")).numpy()[0]
|
| 149 |
+
else:
|
| 150 |
+
result = self.policy.predict_action(
|
| 151 |
+
obs_dict, language_goal=language_goal
|
| 152 |
+
)
|
| 153 |
+
action = result["action_pred"][0].detach().to("cpu").numpy()
|
| 154 |
+
print("action")
|
| 155 |
+
|
| 156 |
+
del result
|
| 157 |
+
del obs_dict
|
| 158 |
+
|
| 159 |
+
return {"actions": action}
|
| 160 |
+
|
| 161 |
+
class EnvMode(enum.Enum):
|
| 162 |
+
"""Supported environments."""
|
| 163 |
+
|
| 164 |
+
ALOHA = "aloha"
|
| 165 |
+
ALOHA_SIM = "aloha_sim"
|
| 166 |
+
DROID = "droid"
|
| 167 |
+
LIBERO = "libero"
|
| 168 |
+
|
| 169 |
+
REAL = "real"
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@dataclasses.dataclass
|
| 173 |
+
class Checkpoint:
|
| 174 |
+
"""Load a policy from a trained checkpoint."""
|
| 175 |
+
|
| 176 |
+
# Training config name (e.g., "pi0_aloha_sim").
|
| 177 |
+
data_config: str
|
| 178 |
+
# Checkpoint directory (e.g., "checkpoints/pi0_aloha_sim/exp/10000").
|
| 179 |
+
dir: str | None = None
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
@dataclasses.dataclass
|
| 183 |
+
class EvalRealCheckpoint:
|
| 184 |
+
"""Load a policy from eval_real.py style checkpoint."""
|
| 185 |
+
|
| 186 |
+
# Checkpoint path (directory or .ckpt file).
|
| 187 |
+
dir: str
|
| 188 |
+
# Device to run on.
|
| 189 |
+
device: str = "cuda"
|
| 190 |
+
# Output directory.
|
| 191 |
+
output_dir: str = "."
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
@dataclasses.dataclass
|
| 195 |
+
class Default:
|
| 196 |
+
"""Use the default policy for the given environment."""
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
@dataclasses.dataclass
|
| 200 |
+
class Args:
|
| 201 |
+
"""Arguments for the serve_policy script."""
|
| 202 |
+
|
| 203 |
+
# Environment to serve the policy for. This is only used when serving default policies.
|
| 204 |
+
env: EnvMode = EnvMode.ALOHA_SIM
|
| 205 |
+
|
| 206 |
+
# If provided, will be used in case the "prompt" key is not present in the data, or if the model doesn't have a default
|
| 207 |
+
# prompt.
|
| 208 |
+
default_prompt: str | None = None
|
| 209 |
+
|
| 210 |
+
# Port to serve the policy on.
|
| 211 |
+
port: int = 8012
|
| 212 |
+
# Record the policy's behavior for debugging.
|
| 213 |
+
record: bool = False
|
| 214 |
+
|
| 215 |
+
# Specifies how to load the policy. If not provided, the default policy for the environment will be used.
|
| 216 |
+
policy: Checkpoint | EvalRealCheckpoint | Default = dataclasses.field(default_factory=Default)
|
| 217 |
+
|
| 218 |
+
# use_vllm: bool = False
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
# Default checkpoints that should be used for each environment.
|
| 222 |
+
# DEFAULT_CHECKPOINT: dict[EnvMode, Checkpoint] = {
|
| 223 |
+
# EnvMode.ALOHA: Checkpoint(
|
| 224 |
+
# config="pi0_aloha",
|
| 225 |
+
# dir="s3://openpi-assets/checkpoints/pi0_base",
|
| 226 |
+
# ),
|
| 227 |
+
# EnvMode.ALOHA_SIM: Checkpoint(
|
| 228 |
+
# config="pi0_aloha_sim",
|
| 229 |
+
# dir="s3://openpi-assets/checkpoints/pi0_aloha_sim",
|
| 230 |
+
# ),
|
| 231 |
+
# EnvMode.DROID: Checkpoint(
|
| 232 |
+
# config="pi0_fast_droid",
|
| 233 |
+
# dir="s3://openpi-assets/checkpoints/pi0_fast_droid",
|
| 234 |
+
# ),
|
| 235 |
+
# EnvMode.LIBERO: Checkpoint(
|
| 236 |
+
# config="pi0_fast_libero",
|
| 237 |
+
# dir="s3://openpi-assets/checkpoints/pi0_fast_libero",
|
| 238 |
+
# ),
|
| 239 |
+
# }
|
| 240 |
+
|
| 241 |
+
# def create_default_policy(env: EnvMode, *, default_prompt: str | None = None) -> _policy.Policy:
|
| 242 |
+
# """Create a default policy for the given environment."""
|
| 243 |
+
# if checkpoint := DEFAULT_CHECKPOINT.get(env):
|
| 244 |
+
# return _policy_config.create_trained_policy(
|
| 245 |
+
# _config.get_config(checkpoint.config), checkpoint.dir, default_prompt=default_prompt
|
| 246 |
+
# )
|
| 247 |
+
# raise ValueError(f"Unsupported environment mode: {env}")
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def create_policy(args: Args):
|
| 251 |
+
"""Create a policy from the given arguments."""
|
| 252 |
+
match args.policy:
|
| 253 |
+
case EvalRealCheckpoint():
|
| 254 |
+
return EvalRealPolicyAdapter(
|
| 255 |
+
ckpt_path=args.policy.dir,
|
| 256 |
+
device=args.policy.device,
|
| 257 |
+
output_dir=args.policy.output_dir,
|
| 258 |
+
)
|
| 259 |
+
case Checkpoint():
|
| 260 |
+
import pathlib
|
| 261 |
+
import openpi.shared.normalize as _normalize
|
| 262 |
+
# _train_config = _config.get_config(args.policy.config)
|
| 263 |
+
_data_config: _config.DataConfig = get_data_config(args.policy.data_config)
|
| 264 |
+
norm_stats = _data_config.norm_stats
|
| 265 |
+
return _policy_config.create_trained_policy(
|
| 266 |
+
_data_config, args.policy.dir, default_prompt=args.default_prompt, norm_stats=norm_stats, use_vllm=_data_config.inference_use_vllm
|
| 267 |
+
)
|
| 268 |
+
case Default():
|
| 269 |
+
raise NotImplementedError("Default policies are not yet supported.")
|
| 270 |
+
# return create_default_policy(args.env, default_prompt=args.default_prompt)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def main(args: Args) -> None:
|
| 275 |
+
policy = create_policy(args)
|
| 276 |
+
policy_metadata = policy.metadata
|
| 277 |
+
|
| 278 |
+
# Record the policy's behavior.
|
| 279 |
+
if args.record:
|
| 280 |
+
policy = _policy.PolicyRecorder(policy, "policy_records")
|
| 281 |
+
|
| 282 |
+
# hostname = socket.gethostname()
|
| 283 |
+
# local_ip = socket.gethostbyname(hostname)
|
| 284 |
+
# logging.info("Creating server (host: %s, ip: %s)", hostname, local_ip)
|
| 285 |
+
|
| 286 |
+
server = websocket_policy_server.WebsocketPolicyServer(
|
| 287 |
+
policy=policy,
|
| 288 |
+
host="0.0.0.0",
|
| 289 |
+
port=args.port,
|
| 290 |
+
metadata=policy_metadata,
|
| 291 |
+
)
|
| 292 |
+
server.serve_forever()
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
if __name__ == "__main__":
|
| 296 |
+
logging.basicConfig(level=logging.INFO, force=True)
|
| 297 |
+
main(tyro.cli(Args))
|
code/setup.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from setuptools import setup, find_packages
|
| 2 |
+
|
| 3 |
+
setup(
|
| 4 |
+
name="unified_video_action",
|
| 5 |
+
packages=find_packages(),
|
| 6 |
+
)
|
code/train.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Usage:
|
| 3 |
+
Training:
|
| 4 |
+
python train.py --config-name=train_diffusion_lowdim_workspace
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import os
|
| 9 |
+
import sys
|
| 10 |
+
import hydra
|
| 11 |
+
from omegaconf import OmegaConf
|
| 12 |
+
import pathlib
|
| 13 |
+
from unified_video_action.workspace.base_workspace import BaseWorkspace
|
| 14 |
+
from omegaconf import open_dict
|
| 15 |
+
|
| 16 |
+
# allows arbitrary python code execution in configs using the ${eval:''} resolver
|
| 17 |
+
OmegaConf.register_new_resolver("eval", eval, replace=True)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
import wandb
|
| 21 |
+
|
| 22 |
+
if "WANDB_API_KEY" in os.environ:
|
| 23 |
+
wandb.login(key=os.environ["WANDB_API_KEY"])
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@hydra.main(
|
| 27 |
+
version_base=None,
|
| 28 |
+
config_path=str(
|
| 29 |
+
pathlib.Path(__file__).parent.joinpath("unified_video_action", "config")
|
| 30 |
+
),
|
| 31 |
+
)
|
| 32 |
+
def main(cfg: OmegaConf):
|
| 33 |
+
OmegaConf.resolve(cfg)
|
| 34 |
+
|
| 35 |
+
if cfg.model.policy.action_model_params.predict_action == False:
|
| 36 |
+
cfg.checkpoint.topk.monitor_key = "video_fvd"
|
| 37 |
+
cfg.checkpoint.topk.format_str = (
|
| 38 |
+
"epoch={epoch:04d}-video_fvd={video_fvd:.3f}.ckpt"
|
| 39 |
+
)
|
| 40 |
+
cfg.checkpoint.topk.mode = "min"
|
| 41 |
+
|
| 42 |
+
with open_dict(cfg):
|
| 43 |
+
cfg.n_gpus = torch.cuda.device_count()
|
| 44 |
+
cfg.model.policy.debug = cfg.training.debug
|
| 45 |
+
|
| 46 |
+
if cfg.training.debug:
|
| 47 |
+
cfg.dataloader.batch_size = 2
|
| 48 |
+
cfg.val_dataloader.batch_size = 2
|
| 49 |
+
cfg.dataloader.shuffle = False
|
| 50 |
+
cfg.val_dataloader.shuffle = False
|
| 51 |
+
|
| 52 |
+
if "env_runner" in cfg.task:
|
| 53 |
+
cfg.task.env_runner.max_steps = 20
|
| 54 |
+
|
| 55 |
+
if "dataloader_cfg" in cfg.task.dataset:
|
| 56 |
+
cfg.task.dataset.dataloader_cfg.batch_size = 2
|
| 57 |
+
|
| 58 |
+
cls = hydra.utils.get_class(cfg.model._target_)
|
| 59 |
+
workspace: BaseWorkspace = cls(cfg)
|
| 60 |
+
workspace.run()
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
if __name__ == "__main__":
|
| 64 |
+
print(sys.argv)
|
| 65 |
+
for arg in sys.argv:
|
| 66 |
+
if "local_rank" in arg: # For deepspeed compatibility
|
| 67 |
+
sys.argv.remove(arg)
|
| 68 |
+
main()
|
code/umi/asset/mask.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"mirror_mask_pts":[
|
| 3 |
+
[540, 1700],
|
| 4 |
+
[680, 1450],
|
| 5 |
+
[590, 1070],
|
| 6 |
+
[290, 1130],
|
| 7 |
+
[290, 1770],
|
| 8 |
+
[550, 1770]
|
| 9 |
+
],
|
| 10 |
+
"gripper_mask_pts": [
|
| 11 |
+
[1100, 1700],
|
| 12 |
+
[650, 1500],
|
| 13 |
+
[0, 1350],
|
| 14 |
+
[520, 2028]
|
| 15 |
+
],
|
| 16 |
+
"resolution": [2028, 2704]
|
| 17 |
+
}
|
code/umi/common/__pycache__/cv_util.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
code/umi/common/__pycache__/cv_util.cpython-39.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
code/umi/common/__pycache__/pose_util.cpython-310.pyc
ADDED
|
Binary file (4.12 kB). View file
|
|
|
code/umi/common/__pycache__/pose_util.cpython-39.pyc
ADDED
|
Binary file (4.11 kB). View file
|
|
|
code/umi/common/cv_util.py
ADDED
|
@@ -0,0 +1,461 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Tuple
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
import pathlib
|
| 5 |
+
import math
|
| 6 |
+
import copy
|
| 7 |
+
import numpy as np
|
| 8 |
+
import cv2
|
| 9 |
+
import scipy.interpolate as si
|
| 10 |
+
|
| 11 |
+
# =================== intrinsics ===================
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def parse_fisheye_intrinsics(json_data: dict) -> Dict[str, np.ndarray]:
|
| 15 |
+
"""
|
| 16 |
+
Reads camera intrinsics from OpenCameraImuCalibration to opencv format.
|
| 17 |
+
Example:
|
| 18 |
+
{
|
| 19 |
+
"final_reproj_error": 0.17053819312281043,
|
| 20 |
+
"fps": 60.0,
|
| 21 |
+
"image_height": 1080,
|
| 22 |
+
"image_width": 1920,
|
| 23 |
+
"intrinsic_type": "FISHEYE",
|
| 24 |
+
"intrinsics": {
|
| 25 |
+
"aspect_ratio": 1.0026582765352035,
|
| 26 |
+
"focal_length": 420.56809123853304,
|
| 27 |
+
"principal_pt_x": 959.857586309181,
|
| 28 |
+
"principal_pt_y": 542.8155851051391,
|
| 29 |
+
"radial_distortion_1": -0.011968137016185161,
|
| 30 |
+
"radial_distortion_2": -0.03929790706019372,
|
| 31 |
+
"radial_distortion_3": 0.018577224235396064,
|
| 32 |
+
"radial_distortion_4": -0.005075629959840777,
|
| 33 |
+
"skew": 0.0
|
| 34 |
+
},
|
| 35 |
+
"nr_calib_images": 129,
|
| 36 |
+
"stabelized": false
|
| 37 |
+
}
|
| 38 |
+
"""
|
| 39 |
+
assert json_data["intrinsic_type"] == "FISHEYE"
|
| 40 |
+
intr_data = json_data["intrinsics"]
|
| 41 |
+
|
| 42 |
+
# img size
|
| 43 |
+
h = json_data["image_height"]
|
| 44 |
+
w = json_data["image_width"]
|
| 45 |
+
|
| 46 |
+
# pinhole parameters
|
| 47 |
+
f = intr_data["focal_length"]
|
| 48 |
+
px = intr_data["principal_pt_x"]
|
| 49 |
+
py = intr_data["principal_pt_y"]
|
| 50 |
+
|
| 51 |
+
# Kannala-Brandt non-linear parameters for distortion
|
| 52 |
+
kb8 = [
|
| 53 |
+
intr_data["radial_distortion_1"],
|
| 54 |
+
intr_data["radial_distortion_2"],
|
| 55 |
+
intr_data["radial_distortion_3"],
|
| 56 |
+
intr_data["radial_distortion_4"],
|
| 57 |
+
]
|
| 58 |
+
|
| 59 |
+
opencv_intr_dict = {
|
| 60 |
+
"DIM": np.array([w, h], dtype=np.int64),
|
| 61 |
+
"K": np.array([[f, 0, px], [0, f, py], [0, 0, 1]], dtype=np.float64),
|
| 62 |
+
"D": np.array([kb8]).T,
|
| 63 |
+
}
|
| 64 |
+
return opencv_intr_dict
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def convert_fisheye_intrinsics_resolution(
|
| 68 |
+
opencv_intr_dict: Dict[str, np.ndarray], target_resolution: Tuple[int, int]
|
| 69 |
+
) -> Dict[str, np.ndarray]:
|
| 70 |
+
"""
|
| 71 |
+
Convert fisheye intrinsics parameter to a different resolution,
|
| 72 |
+
assuming that images are not cropped in the vertical dimension,
|
| 73 |
+
and only symmetrically cropped/padded in horizontal dimension.
|
| 74 |
+
"""
|
| 75 |
+
iw, ih = opencv_intr_dict["DIM"]
|
| 76 |
+
iK = opencv_intr_dict["K"]
|
| 77 |
+
ifx = iK[0, 0]
|
| 78 |
+
ify = iK[1, 1]
|
| 79 |
+
ipx = iK[0, 2]
|
| 80 |
+
ipy = iK[1, 2]
|
| 81 |
+
|
| 82 |
+
ow, oh = target_resolution
|
| 83 |
+
ofx = ifx / ih * oh
|
| 84 |
+
ofy = ify / ih * oh
|
| 85 |
+
opx = (ipx - (iw / 2)) / ih * oh + (ow / 2)
|
| 86 |
+
opy = ipy / ih * oh
|
| 87 |
+
oK = np.array([[ofx, 0, opx], [0, ofy, opy], [0, 0, 1]], dtype=np.float64)
|
| 88 |
+
|
| 89 |
+
out_intr_dict = copy.deepcopy(opencv_intr_dict)
|
| 90 |
+
out_intr_dict["DIM"] = np.array([ow, oh], dtype=np.int64)
|
| 91 |
+
out_intr_dict["K"] = oK
|
| 92 |
+
return out_intr_dict
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class FisheyeRectConverter:
|
| 96 |
+
def __init__(self, K, D, DIM, out_size, out_fov):
|
| 97 |
+
out_size = np.array(out_size)
|
| 98 |
+
# vertical fov
|
| 99 |
+
out_f = (out_size[1] / 2) / np.tan(out_fov / 180 * np.pi / 2)
|
| 100 |
+
out_K = np.array(
|
| 101 |
+
[[out_f, 0, out_size[0] / 2], [0, out_f, out_size[1] / 2], [0, 0, 1]],
|
| 102 |
+
dtype=np.float32,
|
| 103 |
+
)
|
| 104 |
+
map1, map2 = cv2.fisheye.initUndistortRectifyMap(
|
| 105 |
+
K, D, np.eye(3), out_K, out_size, cv2.CV_16SC2
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
self.map1 = map1
|
| 109 |
+
self.map2 = map2
|
| 110 |
+
|
| 111 |
+
def forward(self, img):
|
| 112 |
+
rect_img = cv2.remap(
|
| 113 |
+
img,
|
| 114 |
+
self.map1,
|
| 115 |
+
self.map2,
|
| 116 |
+
interpolation=cv2.INTER_AREA,
|
| 117 |
+
borderMode=cv2.BORDER_CONSTANT,
|
| 118 |
+
)
|
| 119 |
+
return rect_img
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# ================= ArUcO tag =====================
|
| 123 |
+
def parse_aruco_config(aruco_config_dict: dict):
|
| 124 |
+
"""
|
| 125 |
+
example:
|
| 126 |
+
aruco_dict:
|
| 127 |
+
predefined: DICT_4X4_50
|
| 128 |
+
marker_size_map: # all unit in meters
|
| 129 |
+
default: 0.15
|
| 130 |
+
12: 0.2
|
| 131 |
+
"""
|
| 132 |
+
aruco_dict = get_aruco_dict(**aruco_config_dict["aruco_dict"])
|
| 133 |
+
|
| 134 |
+
n_markers = len(aruco_dict.bytesList)
|
| 135 |
+
marker_size_map = aruco_config_dict["marker_size_map"]
|
| 136 |
+
default_size = marker_size_map.get("default", None)
|
| 137 |
+
|
| 138 |
+
out_marker_size_map = dict()
|
| 139 |
+
for marker_id in range(n_markers):
|
| 140 |
+
size = default_size
|
| 141 |
+
if marker_id in marker_size_map:
|
| 142 |
+
size = marker_size_map[marker_id]
|
| 143 |
+
out_marker_size_map[marker_id] = size
|
| 144 |
+
|
| 145 |
+
result = {"aruco_dict": aruco_dict, "marker_size_map": out_marker_size_map}
|
| 146 |
+
return result
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def get_aruco_dict(predefined: str) -> cv2.aruco.Dictionary:
|
| 150 |
+
return cv2.aruco.getPredefinedDictionary(getattr(cv2.aruco, predefined))
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def detect_localize_aruco_tags(
|
| 154 |
+
img: np.ndarray,
|
| 155 |
+
aruco_dict: cv2.aruco.Dictionary,
|
| 156 |
+
marker_size_map: Dict[int, float],
|
| 157 |
+
fisheye_intr_dict: Dict[str, np.ndarray],
|
| 158 |
+
refine_subpix: bool = True,
|
| 159 |
+
):
|
| 160 |
+
K = fisheye_intr_dict["K"]
|
| 161 |
+
D = fisheye_intr_dict["D"]
|
| 162 |
+
param = cv2.aruco.DetectorParameters()
|
| 163 |
+
if refine_subpix:
|
| 164 |
+
param.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
|
| 165 |
+
corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
|
| 166 |
+
image=img, dictionary=aruco_dict, parameters=param
|
| 167 |
+
)
|
| 168 |
+
if len(corners) == 0:
|
| 169 |
+
return dict()
|
| 170 |
+
|
| 171 |
+
tag_dict = dict()
|
| 172 |
+
for this_id, this_corners in zip(ids, corners):
|
| 173 |
+
this_id = int(this_id[0])
|
| 174 |
+
if this_id not in marker_size_map:
|
| 175 |
+
continue
|
| 176 |
+
|
| 177 |
+
marker_size_m = marker_size_map[this_id]
|
| 178 |
+
undistorted = cv2.fisheye.undistortPoints(this_corners, K, D, P=K)
|
| 179 |
+
rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(
|
| 180 |
+
undistorted, marker_size_m, K, np.zeros((1, 5))
|
| 181 |
+
)
|
| 182 |
+
tag_dict[this_id] = {
|
| 183 |
+
"rvec": rvec.squeeze(),
|
| 184 |
+
"tvec": tvec.squeeze(),
|
| 185 |
+
"corners": this_corners.squeeze(),
|
| 186 |
+
}
|
| 187 |
+
return tag_dict
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def get_charuco_board(
|
| 191 |
+
aruco_dict=cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100),
|
| 192 |
+
tag_id_offset=50,
|
| 193 |
+
grid_size=(8, 5),
|
| 194 |
+
square_length_mm=50,
|
| 195 |
+
tag_length_mm=30,
|
| 196 |
+
):
|
| 197 |
+
|
| 198 |
+
aruco_dict = cv2.aruco.Dictionary(
|
| 199 |
+
aruco_dict.bytesList[tag_id_offset:], aruco_dict.markerSize
|
| 200 |
+
)
|
| 201 |
+
board = cv2.aruco.CharucoBoard(
|
| 202 |
+
size=grid_size,
|
| 203 |
+
squareLength=square_length_mm / 1000,
|
| 204 |
+
markerLength=tag_length_mm / 1000,
|
| 205 |
+
dictionary=aruco_dict,
|
| 206 |
+
)
|
| 207 |
+
return board
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def draw_charuco_board(board, dpi=300, padding_mm=15):
|
| 211 |
+
grid_size = np.array(board.getChessboardSize())
|
| 212 |
+
square_length_mm = board.getSquareLength() * 1000
|
| 213 |
+
|
| 214 |
+
mm_per_inch = 25.4
|
| 215 |
+
board_size_pixel = (
|
| 216 |
+
(grid_size * square_length_mm + padding_mm * 2) / mm_per_inch * dpi
|
| 217 |
+
)
|
| 218 |
+
board_size_pixel = board_size_pixel.round().astype(np.int64)
|
| 219 |
+
padding_pixel = int(padding_mm / mm_per_inch * dpi)
|
| 220 |
+
board_img = board.generateImage(outSize=board_size_pixel, marginSize=padding_pixel)
|
| 221 |
+
return board_img
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def get_gripper_width(tag_dict, left_id, right_id, nominal_z=0.072, z_tolerance=0.008):
|
| 225 |
+
zmax = nominal_z + z_tolerance
|
| 226 |
+
zmin = nominal_z - z_tolerance
|
| 227 |
+
|
| 228 |
+
left_x = None
|
| 229 |
+
if left_id in tag_dict:
|
| 230 |
+
tvec = tag_dict[left_id]["tvec"]
|
| 231 |
+
# check if depth is reasonable (to filter outliers)
|
| 232 |
+
if zmin < tvec[-1] < zmax:
|
| 233 |
+
left_x = tvec[0]
|
| 234 |
+
|
| 235 |
+
right_x = None
|
| 236 |
+
if right_id in tag_dict:
|
| 237 |
+
tvec = tag_dict[right_id]["tvec"]
|
| 238 |
+
if zmin < tvec[-1] < zmax:
|
| 239 |
+
right_x = tvec[0]
|
| 240 |
+
|
| 241 |
+
width = None
|
| 242 |
+
if (left_x is not None) and (right_x is not None):
|
| 243 |
+
width = right_x - left_x
|
| 244 |
+
elif left_x is not None:
|
| 245 |
+
width = abs(left_x) * 2
|
| 246 |
+
elif right_x is not None:
|
| 247 |
+
width = abs(right_x) * 2
|
| 248 |
+
return width
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
# =========== image mask ====================
|
| 252 |
+
def canonical_to_pixel_coords(coords, img_shape=(2028, 2704)):
|
| 253 |
+
pts = np.asarray(coords) * img_shape[0] + np.array(img_shape[::-1]) * 0.5
|
| 254 |
+
return pts
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def pixel_coords_to_canonical(pts, img_shape=(2028, 2704)):
|
| 258 |
+
coords = (np.asarray(pts) - np.array(img_shape[::-1]) * 0.5) / img_shape[0]
|
| 259 |
+
return coords
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def draw_canonical_polygon(img: np.ndarray, coords: np.ndarray, color: tuple):
|
| 263 |
+
pts = canonical_to_pixel_coords(coords, img.shape[:2])
|
| 264 |
+
pts = np.round(pts).astype(np.int32)
|
| 265 |
+
cv2.fillPoly(img, pts, color=color)
|
| 266 |
+
return img
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def get_mirror_canonical_polygon():
|
| 270 |
+
left_pts = [
|
| 271 |
+
[540, 1700],
|
| 272 |
+
[680, 1450],
|
| 273 |
+
[590, 1070],
|
| 274 |
+
[290, 1130],
|
| 275 |
+
[290, 1770],
|
| 276 |
+
[550, 1770],
|
| 277 |
+
]
|
| 278 |
+
resolution = [2028, 2704]
|
| 279 |
+
left_coords = pixel_coords_to_canonical(left_pts, resolution)
|
| 280 |
+
right_coords = left_coords.copy()
|
| 281 |
+
right_coords[:, 0] *= -1
|
| 282 |
+
coords = np.stack([left_coords, right_coords])
|
| 283 |
+
return coords
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def get_mirror_crop_slices(img_shape=(1080, 1920), left=True):
|
| 287 |
+
left_pts = [[290, 1120], [650, 1480]]
|
| 288 |
+
resolution = [2028, 2704]
|
| 289 |
+
left_coords = pixel_coords_to_canonical(left_pts, resolution)
|
| 290 |
+
if not left:
|
| 291 |
+
left_coords[:, 0] *= -1
|
| 292 |
+
left_pts = canonical_to_pixel_coords(left_coords, img_shape=img_shape)
|
| 293 |
+
left_pts = np.round(left_pts).astype(np.int32)
|
| 294 |
+
slices = (
|
| 295 |
+
slice(np.min(left_pts[:, 1]), np.max(left_pts[:, 1])),
|
| 296 |
+
slice(np.min(left_pts[:, 0]), np.max(left_pts[:, 0])),
|
| 297 |
+
)
|
| 298 |
+
return slices
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
def get_gripper_canonical_polygon():
|
| 302 |
+
left_pts = [
|
| 303 |
+
[1352, 1730],
|
| 304 |
+
[1100, 1700],
|
| 305 |
+
[650, 1500],
|
| 306 |
+
[0, 1350],
|
| 307 |
+
[0, 2028],
|
| 308 |
+
[1352, 2704],
|
| 309 |
+
]
|
| 310 |
+
resolution = [2028, 2704]
|
| 311 |
+
left_coords = pixel_coords_to_canonical(left_pts, resolution)
|
| 312 |
+
right_coords = left_coords.copy()
|
| 313 |
+
right_coords[:, 0] *= -1
|
| 314 |
+
coords = np.stack([left_coords, right_coords])
|
| 315 |
+
return coords
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def get_finger_canonical_polygon(height=0.37, top_width=0.25, bottom_width=1.4):
|
| 319 |
+
# image size
|
| 320 |
+
resolution = [2028, 2704]
|
| 321 |
+
img_h, img_w = resolution
|
| 322 |
+
|
| 323 |
+
# calculate coordinates
|
| 324 |
+
top_y = 1.0 - height
|
| 325 |
+
bottom_y = 1.0
|
| 326 |
+
width = img_w / img_h
|
| 327 |
+
middle_x = width / 2.0
|
| 328 |
+
top_left_x = middle_x - top_width / 2.0
|
| 329 |
+
top_right_x = middle_x + top_width / 2.0
|
| 330 |
+
bottom_left_x = middle_x - bottom_width / 2.0
|
| 331 |
+
bottom_right_x = middle_x + bottom_width / 2.0
|
| 332 |
+
|
| 333 |
+
top_y *= img_h
|
| 334 |
+
bottom_y *= img_h
|
| 335 |
+
top_left_x *= img_h
|
| 336 |
+
top_right_x *= img_h
|
| 337 |
+
bottom_left_x *= img_h
|
| 338 |
+
bottom_right_x *= img_h
|
| 339 |
+
|
| 340 |
+
# create polygon points for opencv API
|
| 341 |
+
points = [
|
| 342 |
+
[
|
| 343 |
+
[bottom_left_x, bottom_y],
|
| 344 |
+
[top_left_x, top_y],
|
| 345 |
+
[top_right_x, top_y],
|
| 346 |
+
[bottom_right_x, bottom_y],
|
| 347 |
+
]
|
| 348 |
+
]
|
| 349 |
+
coords = pixel_coords_to_canonical(points, img_shape=resolution)
|
| 350 |
+
return coords
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def draw_predefined_mask(
|
| 354 |
+
img, color=(0, 0, 0), mirror=True, gripper=True, finger=True, use_aa=False
|
| 355 |
+
):
|
| 356 |
+
all_coords = list()
|
| 357 |
+
if mirror:
|
| 358 |
+
all_coords.extend(get_mirror_canonical_polygon())
|
| 359 |
+
if gripper:
|
| 360 |
+
all_coords.extend(get_gripper_canonical_polygon())
|
| 361 |
+
if finger:
|
| 362 |
+
all_coords.extend(get_finger_canonical_polygon())
|
| 363 |
+
|
| 364 |
+
for coords in all_coords:
|
| 365 |
+
pts = canonical_to_pixel_coords(coords, img.shape[:2])
|
| 366 |
+
pts = np.round(pts).astype(np.int32)
|
| 367 |
+
flag = cv2.LINE_AA if use_aa else cv2.LINE_8
|
| 368 |
+
cv2.fillPoly(img, [pts], color=color, lineType=flag)
|
| 369 |
+
return img
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def get_gripper_with_finger_mask(
|
| 373 |
+
img, height=0.37, top_width=0.25, bottom_width=1.4, color=(0, 0, 0)
|
| 374 |
+
):
|
| 375 |
+
# image size
|
| 376 |
+
img_h = img.shape[0]
|
| 377 |
+
img_w = img.shape[1]
|
| 378 |
+
|
| 379 |
+
# calculate coordinates
|
| 380 |
+
top_y = 1.0 - height
|
| 381 |
+
bottom_y = 1.0
|
| 382 |
+
width = img_w / img_h
|
| 383 |
+
middle_x = width / 2.0
|
| 384 |
+
top_left_x = middle_x - top_width / 2.0
|
| 385 |
+
top_right_x = middle_x + top_width / 2.0
|
| 386 |
+
bottom_left_x = middle_x - bottom_width / 2.0
|
| 387 |
+
bottom_right_x = middle_x + bottom_width / 2.0
|
| 388 |
+
|
| 389 |
+
top_y *= img_h
|
| 390 |
+
bottom_y *= img_h
|
| 391 |
+
top_left_x *= img_h
|
| 392 |
+
top_right_x *= img_h
|
| 393 |
+
bottom_left_x *= img_h
|
| 394 |
+
bottom_right_x *= img_h
|
| 395 |
+
|
| 396 |
+
# create polygon points for opencv API
|
| 397 |
+
points = np.array(
|
| 398 |
+
[
|
| 399 |
+
[
|
| 400 |
+
[bottom_left_x, bottom_y],
|
| 401 |
+
[top_left_x, top_y],
|
| 402 |
+
[top_right_x, top_y],
|
| 403 |
+
[bottom_right_x, bottom_y],
|
| 404 |
+
]
|
| 405 |
+
],
|
| 406 |
+
dtype=np.int32,
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
img = cv2.fillPoly(img, points, color=color, lineType=cv2.LINE_AA)
|
| 410 |
+
return img
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def inpaint_tag(img, corners, tag_scale=1.4, n_samples=16):
|
| 414 |
+
# scale corners with respect to geometric center
|
| 415 |
+
center = np.mean(corners, axis=0)
|
| 416 |
+
scaled_corners = tag_scale * (corners - center) + center
|
| 417 |
+
|
| 418 |
+
# sample pixels on the boundary to obtain median color
|
| 419 |
+
sample_points = si.interp1d(
|
| 420 |
+
[0, 1, 2, 3, 4], list(scaled_corners) + [scaled_corners[0]], axis=0
|
| 421 |
+
)(np.linspace(0, 4, n_samples)).astype(np.int32)
|
| 422 |
+
sample_colors = img[
|
| 423 |
+
np.clip(sample_points[:, 1], 0, img.shape[0] - 1),
|
| 424 |
+
np.clip(sample_points[:, 0], 0, img.shape[1] - 1),
|
| 425 |
+
]
|
| 426 |
+
median_color = np.median(sample_colors, axis=0).astype(img.dtype)
|
| 427 |
+
|
| 428 |
+
# draw tag with median color
|
| 429 |
+
img = cv2.fillPoly(
|
| 430 |
+
img, scaled_corners[None, ...].astype(np.int32), color=median_color.tolist()
|
| 431 |
+
)
|
| 432 |
+
return img
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
# =========== other utils ====================
|
| 436 |
+
def get_image_transform(
|
| 437 |
+
in_res, out_res, crop_ratio: float = 1.0, bgr_to_rgb: bool = False
|
| 438 |
+
):
|
| 439 |
+
iw, ih = in_res
|
| 440 |
+
ow, oh = out_res
|
| 441 |
+
ch = round(ih * crop_ratio)
|
| 442 |
+
cw = round(ih * crop_ratio / oh * ow)
|
| 443 |
+
interp_method = cv2.INTER_AREA
|
| 444 |
+
|
| 445 |
+
w_slice_start = (iw - cw) // 2
|
| 446 |
+
w_slice = slice(w_slice_start, w_slice_start + cw)
|
| 447 |
+
h_slice_start = (ih - ch) // 2
|
| 448 |
+
h_slice = slice(h_slice_start, h_slice_start + ch)
|
| 449 |
+
c_slice = slice(None)
|
| 450 |
+
if bgr_to_rgb:
|
| 451 |
+
c_slice = slice(None, None, -1)
|
| 452 |
+
|
| 453 |
+
def transform(img: np.ndarray):
|
| 454 |
+
assert img.shape == ((ih, iw, 3))
|
| 455 |
+
# crop
|
| 456 |
+
img = img[h_slice, w_slice, c_slice]
|
| 457 |
+
# resize
|
| 458 |
+
img = cv2.resize(img, out_res, interpolation=interp_method)
|
| 459 |
+
return img
|
| 460 |
+
|
| 461 |
+
return transform
|
code/umi/common/exiftool_util.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from exiftool import ExifToolHelper
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def get_videos_metadata(
|
| 5 |
+
video_paths, keys=["QuickTime:CameraSerialNumber", "QuickTime:Model"]
|
| 6 |
+
):
|
| 7 |
+
results = dict()
|
| 8 |
+
with ExifToolHelper() as et:
|
| 9 |
+
for meta in et.get_metadata(video_paths):
|
| 10 |
+
result = dict()
|
| 11 |
+
for key in keys:
|
| 12 |
+
result[key] = meta[key]
|
| 13 |
+
results[meta["SourceFile"]] = result
|
| 14 |
+
return results
|
code/umi/common/interpolation_util.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import scipy.interpolate as si
|
| 3 |
+
import scipy.spatial.transform as st
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def get_interp1d(t, x):
|
| 7 |
+
gripper_interp = si.interp1d(
|
| 8 |
+
t, x, axis=0, bounds_error=False, fill_value=(x[0], x[-1])
|
| 9 |
+
)
|
| 10 |
+
return gripper_interp
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class PoseInterpolator:
|
| 14 |
+
def __init__(self, t, x):
|
| 15 |
+
pos = x[:, :3]
|
| 16 |
+
rot = st.Rotation.from_rotvec(x[:, 3:])
|
| 17 |
+
self.pos_interp = get_interp1d(t, pos)
|
| 18 |
+
self.rot_interp = st.Slerp(t, rot)
|
| 19 |
+
|
| 20 |
+
@property
|
| 21 |
+
def x(self):
|
| 22 |
+
return self.pos_interp.x
|
| 23 |
+
|
| 24 |
+
def __call__(self, t):
|
| 25 |
+
min_t = self.pos_interp.x[0]
|
| 26 |
+
max_t = self.pos_interp.x[-1]
|
| 27 |
+
t = np.clip(t, min_t, max_t)
|
| 28 |
+
|
| 29 |
+
pos = self.pos_interp(t)
|
| 30 |
+
rot = self.rot_interp(t)
|
| 31 |
+
rvec = rot.as_rotvec()
|
| 32 |
+
pose = np.concatenate([pos, rvec], axis=-1)
|
| 33 |
+
return pose
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_gripper_calibration_interpolator(aruco_measured_width, aruco_actual_width):
|
| 37 |
+
"""
|
| 38 |
+
Assumes the minimum width in aruco_actual_width
|
| 39 |
+
is measured when the gripper is fully closed
|
| 40 |
+
and maximum width is when the gripper is fully opened
|
| 41 |
+
"""
|
| 42 |
+
aruco_measured_width = np.array(aruco_measured_width)
|
| 43 |
+
aruco_actual_width = np.array(aruco_actual_width)
|
| 44 |
+
assert len(aruco_measured_width) == len(aruco_actual_width)
|
| 45 |
+
assert len(aruco_actual_width) >= 2
|
| 46 |
+
aruco_min_width = np.min(aruco_actual_width)
|
| 47 |
+
gripper_actual_width = aruco_actual_width - aruco_min_width
|
| 48 |
+
interp = get_interp1d(aruco_measured_width, gripper_actual_width)
|
| 49 |
+
return interp
|
code/umi/common/k3d_util.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import numba
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@numba.jit()
|
| 6 |
+
def k3d_get_pose_axis(poses, axis_size_m=0.1):
|
| 7 |
+
# points in camera frame
|
| 8 |
+
points = np.zeros((4, 3), dtype=poses.dtype)
|
| 9 |
+
points[1, 0] = axis_size_m
|
| 10 |
+
points[2, 1] = axis_size_m
|
| 11 |
+
points[3, 2] = axis_size_m
|
| 12 |
+
|
| 13 |
+
n_poses = poses.shape[0]
|
| 14 |
+
out_verts = np.zeros((n_poses * 4, 3), dtype=poses.dtype)
|
| 15 |
+
out_idxs = np.zeros((n_poses * 3, 2), dtype=np.int64)
|
| 16 |
+
out_colors = np.zeros((n_poses * 4,), dtype=np.int64)
|
| 17 |
+
for i in range(n_poses):
|
| 18 |
+
this_pose = poses[i]
|
| 19 |
+
# convert points to world frame
|
| 20 |
+
this_verts = points @ this_pose[:3, :3].T + this_pose[:3, 3]
|
| 21 |
+
# fill in vert array
|
| 22 |
+
vert_idx_start = i * 4
|
| 23 |
+
out_verts[vert_idx_start : vert_idx_start + 4] = this_verts
|
| 24 |
+
# draw 3 lines for x,y,z axis
|
| 25 |
+
this_idxs = out_idxs[i * 3 : (i + 1) * 3]
|
| 26 |
+
this_idxs[0] = [0, 1]
|
| 27 |
+
this_idxs[1] = [0, 2]
|
| 28 |
+
this_idxs[2] = [0, 3]
|
| 29 |
+
this_idxs += vert_idx_start
|
| 30 |
+
# fill out vertex colors, rgb for xyz
|
| 31 |
+
out_colors[i * 4 : (i + 1) * 4] = [0xFFFFFF, 0xFF0000, 0x00FF00, 0x0000FF]
|
| 32 |
+
|
| 33 |
+
return out_verts, out_idxs, out_colors
|
code/umi/common/latency_util.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from scipy.interpolate import interp1d
|
| 3 |
+
import scipy.signal as ss
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def regular_sample(x, t, t_samples):
|
| 7 |
+
spline = interp1d(x=t, y=x, bounds_error=False, fill_value=(x[0], x[-1]))
|
| 8 |
+
result = spline(t_samples)
|
| 9 |
+
return result
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def get_latency(
|
| 13 |
+
x_target,
|
| 14 |
+
t_target,
|
| 15 |
+
x_actual,
|
| 16 |
+
t_actual,
|
| 17 |
+
t_start=None,
|
| 18 |
+
t_end=None,
|
| 19 |
+
resample_dt=1 / 1000,
|
| 20 |
+
force_positive=False,
|
| 21 |
+
):
|
| 22 |
+
assert len(x_target) == len(t_target)
|
| 23 |
+
assert len(x_actual) == len(t_actual)
|
| 24 |
+
if t_start is None:
|
| 25 |
+
t_start = max(t_target[0], t_actual[0])
|
| 26 |
+
if t_end is None:
|
| 27 |
+
t_end = min(t_target[-1], t_actual[-1])
|
| 28 |
+
n_samples = int((t_end - t_start) / resample_dt)
|
| 29 |
+
t_samples = np.arange(n_samples) * resample_dt + t_start
|
| 30 |
+
target_samples = regular_sample(x_target, t_target, t_samples)
|
| 31 |
+
actual_samples = regular_sample(x_actual, t_actual, t_samples)
|
| 32 |
+
|
| 33 |
+
# normalize samples to zero mean unit std
|
| 34 |
+
mean = np.mean(np.concatenate([target_samples, actual_samples]))
|
| 35 |
+
std = np.std(np.concatenate([target_samples, actual_samples]))
|
| 36 |
+
target_samples = (target_samples - mean) / std
|
| 37 |
+
actual_samples = (actual_samples - mean) / std
|
| 38 |
+
|
| 39 |
+
# cross correlation
|
| 40 |
+
correlation = ss.correlate(actual_samples, target_samples)
|
| 41 |
+
lags = ss.correlation_lags(len(actual_samples), len(target_samples))
|
| 42 |
+
t_lags = lags * resample_dt
|
| 43 |
+
|
| 44 |
+
latency = None
|
| 45 |
+
if force_positive:
|
| 46 |
+
latency = t_lags[np.argmax(correlation[t_lags >= 0])]
|
| 47 |
+
else:
|
| 48 |
+
latency = t_lags[np.argmax(correlation)]
|
| 49 |
+
|
| 50 |
+
info = {
|
| 51 |
+
"t_samples": t_samples,
|
| 52 |
+
"x_target": target_samples,
|
| 53 |
+
"x_actual": actual_samples,
|
| 54 |
+
"correlation": correlation,
|
| 55 |
+
"lags": t_lags,
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
return latency, info
|
code/umi/common/mocap_util.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import numpy as np
|
| 4 |
+
from scipy.spatial.transform import Rotation
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def get_mocap_start_datetime(csv_path):
|
| 8 |
+
with open(csv_path, "r") as f:
|
| 9 |
+
first_row = f.readline().split(",")
|
| 10 |
+
meta_dict = dict()
|
| 11 |
+
for i in range(len(first_row) // 2):
|
| 12 |
+
start = i * 2
|
| 13 |
+
end = start + 1
|
| 14 |
+
meta_dict[first_row[start]] = first_row[end]
|
| 15 |
+
start_timestamp_str = meta_dict["Capture Start Time"]
|
| 16 |
+
start_date = datetime.strptime(start_timestamp_str, r"%Y-%m-%d %I.%M.%S.%f %p")
|
| 17 |
+
return start_date
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def get_mocap_data(csv_path, rigid_body_name):
|
| 21 |
+
mocap_df = pd.read_csv(csv_path, skiprows=2, index_col=0, header=[1, 3, 4])
|
| 22 |
+
assert mocap_df.index[0] == 0
|
| 23 |
+
assert mocap_df.index[-1] == (len(mocap_df) - 1)
|
| 24 |
+
assert mocap_df.columns[0][-1] == "Time (Seconds)"
|
| 25 |
+
|
| 26 |
+
time_since_start = mocap_df.iloc[:, 0].to_numpy()
|
| 27 |
+
pos = np.zeros((len(mocap_df), 3))
|
| 28 |
+
pos[:, 0] = mocap_df[(rigid_body_name, "Position", "X")]
|
| 29 |
+
pos[:, 1] = mocap_df[(rigid_body_name, "Position", "Y")]
|
| 30 |
+
pos[:, 2] = mocap_df[(rigid_body_name, "Position", "Z")]
|
| 31 |
+
|
| 32 |
+
rot_quat = np.zeros((len(mocap_df), 4))
|
| 33 |
+
rot_quat[:, 0] = mocap_df[(rigid_body_name, "Rotation", "X")]
|
| 34 |
+
rot_quat[:, 1] = mocap_df[(rigid_body_name, "Rotation", "Y")]
|
| 35 |
+
rot_quat[:, 2] = mocap_df[(rigid_body_name, "Rotation", "Z")]
|
| 36 |
+
rot_quat[:, 3] = mocap_df[(rigid_body_name, "Rotation", "W")]
|
| 37 |
+
rot = Rotation.from_quat(rot_quat)
|
| 38 |
+
|
| 39 |
+
pose = np.zeros((pos.shape[0], 4, 4), dtype=pos.dtype)
|
| 40 |
+
pose[:, 3, 3] = 1
|
| 41 |
+
pose[:, :3, :3] = rot.as_matrix()
|
| 42 |
+
pose[:, :3, 3] = pos
|
| 43 |
+
|
| 44 |
+
result = {
|
| 45 |
+
"time_since_start": time_since_start,
|
| 46 |
+
"pose": pose,
|
| 47 |
+
}
|
| 48 |
+
return result
|
code/umi/common/nested_dict_util.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def nested_dict_map(f, x):
|
| 5 |
+
"""
|
| 6 |
+
Map f over all leaf of nested dict x
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
if not isinstance(x, dict):
|
| 10 |
+
return f(x)
|
| 11 |
+
y = dict()
|
| 12 |
+
for key, value in x.items():
|
| 13 |
+
y[key] = nested_dict_map(f, value)
|
| 14 |
+
return y
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def nested_dict_reduce(f, x):
|
| 18 |
+
"""
|
| 19 |
+
Map f over all values of nested dict x, and reduce to a single value
|
| 20 |
+
"""
|
| 21 |
+
if not isinstance(x, dict):
|
| 22 |
+
return x
|
| 23 |
+
|
| 24 |
+
reduced_values = list()
|
| 25 |
+
for value in x.values():
|
| 26 |
+
reduced_values.append(nested_dict_reduce(f, value))
|
| 27 |
+
y = functools.reduce(f, reduced_values)
|
| 28 |
+
return y
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def nested_dict_check(f, x):
|
| 32 |
+
bool_dict = nested_dict_map(f, x)
|
| 33 |
+
result = nested_dict_reduce(lambda x, y: x and y, bool_dict)
|
| 34 |
+
return result
|
code/umi/common/orb_slam_util.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from scipy.spatial.transform import Rotation
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def load_tum_trajectory(tum_txt_path):
|
| 7 |
+
tum_traj_raw = np.loadtxt(tum_txt_path, delimiter=" ", dtype=np.float32)
|
| 8 |
+
if len(tum_traj_raw) == 0:
|
| 9 |
+
return {
|
| 10 |
+
"timestamp": np.array([]),
|
| 11 |
+
"pose": np.array([]),
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
timestamp_sec = tum_traj_raw[:, 0]
|
| 15 |
+
cam_pos = tum_traj_raw[:, 1:4]
|
| 16 |
+
cam_rot_quat_xyzw = tum_traj_raw[:, 4:8]
|
| 17 |
+
cam_rot = Rotation.from_quat(cam_rot_quat_xyzw)
|
| 18 |
+
|
| 19 |
+
cam_pose = np.zeros((cam_pos.shape[0], 4, 4), dtype=np.float32)
|
| 20 |
+
cam_pose[:, 3, 3] = 1
|
| 21 |
+
cam_pose[:, :3, 3] = cam_pos
|
| 22 |
+
cam_pose[:, :3, :3] = cam_rot.as_matrix()
|
| 23 |
+
|
| 24 |
+
result = {"timestamp": timestamp_sec, "pose": cam_pose}
|
| 25 |
+
return result
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def load_csv_trajectory(csv_path):
|
| 29 |
+
df = pd.read_csv(csv_path)
|
| 30 |
+
if (~df.is_lost).sum() == 0:
|
| 31 |
+
return {"raw_data": df}
|
| 32 |
+
|
| 33 |
+
valid_df = df.loc[~df.is_lost]
|
| 34 |
+
|
| 35 |
+
timestamp_sec = valid_df["timestamp"].to_numpy()
|
| 36 |
+
cam_pos = valid_df[["x", "y", "z"]].to_numpy()
|
| 37 |
+
cam_rot_quat_xyzw = valid_df[["q_x", "q_y", "q_z", "q_w"]].to_numpy()
|
| 38 |
+
cam_rot = Rotation.from_quat(cam_rot_quat_xyzw)
|
| 39 |
+
|
| 40 |
+
cam_pose = np.zeros((cam_pos.shape[0], 4, 4), dtype=np.float32)
|
| 41 |
+
cam_pose[:, 3, 3] = 1
|
| 42 |
+
cam_pose[:, :3, 3] = cam_pos
|
| 43 |
+
cam_pose[:, :3, :3] = cam_rot.as_matrix()
|
| 44 |
+
|
| 45 |
+
result = {"timestamp": timestamp_sec, "pose": cam_pose, "raw_data": df}
|
| 46 |
+
return result
|
code/umi/common/pose_trajectory_interpolator.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Union
|
| 2 |
+
import numbers
|
| 3 |
+
import numpy as np
|
| 4 |
+
import scipy.interpolate as si
|
| 5 |
+
import scipy.spatial.transform as st
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def rotation_distance(a: st.Rotation, b: st.Rotation) -> float:
|
| 9 |
+
return (b * a.inv()).magnitude()
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def pose_distance(start_pose, end_pose):
|
| 13 |
+
start_pose = np.array(start_pose)
|
| 14 |
+
end_pose = np.array(end_pose)
|
| 15 |
+
start_pos = start_pose[:3]
|
| 16 |
+
end_pos = end_pose[:3]
|
| 17 |
+
start_rot = st.Rotation.from_rotvec(start_pose[3:])
|
| 18 |
+
end_rot = st.Rotation.from_rotvec(end_pose[3:])
|
| 19 |
+
pos_dist = np.linalg.norm(end_pos - start_pos)
|
| 20 |
+
rot_dist = rotation_distance(start_rot, end_rot)
|
| 21 |
+
return pos_dist, rot_dist
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class PoseTrajectoryInterpolator:
|
| 25 |
+
def __init__(self, times: np.ndarray, poses: np.ndarray):
|
| 26 |
+
assert len(times) >= 1
|
| 27 |
+
assert len(poses) == len(times)
|
| 28 |
+
if not isinstance(times, np.ndarray):
|
| 29 |
+
times = np.array(times)
|
| 30 |
+
if not isinstance(poses, np.ndarray):
|
| 31 |
+
poses = np.array(poses)
|
| 32 |
+
|
| 33 |
+
if len(times) == 1:
|
| 34 |
+
# special treatment for single step interpolation
|
| 35 |
+
self.single_step = True
|
| 36 |
+
self._times = times
|
| 37 |
+
self._poses = poses
|
| 38 |
+
else:
|
| 39 |
+
self.single_step = False
|
| 40 |
+
assert np.all(times[1:] >= times[:-1])
|
| 41 |
+
|
| 42 |
+
pos = poses[:, :3]
|
| 43 |
+
rot = st.Rotation.from_rotvec(poses[:, 3:])
|
| 44 |
+
|
| 45 |
+
self.pos_interp = si.interp1d(times, pos, axis=0, assume_sorted=True)
|
| 46 |
+
self.rot_interp = st.Slerp(times, rot)
|
| 47 |
+
|
| 48 |
+
@property
|
| 49 |
+
def times(self) -> np.ndarray:
|
| 50 |
+
if self.single_step:
|
| 51 |
+
return self._times
|
| 52 |
+
else:
|
| 53 |
+
return self.pos_interp.x
|
| 54 |
+
|
| 55 |
+
@property
|
| 56 |
+
def poses(self) -> np.ndarray:
|
| 57 |
+
if self.single_step:
|
| 58 |
+
return self._poses
|
| 59 |
+
else:
|
| 60 |
+
n = len(self.times)
|
| 61 |
+
poses = np.zeros((n, 6))
|
| 62 |
+
poses[:, :3] = self.pos_interp.y
|
| 63 |
+
poses[:, 3:] = self.rot_interp(self.times).as_rotvec()
|
| 64 |
+
return poses
|
| 65 |
+
|
| 66 |
+
def trim(self, start_t: float, end_t: float) -> "PoseTrajectoryInterpolator":
|
| 67 |
+
assert start_t <= end_t
|
| 68 |
+
times = self.times
|
| 69 |
+
should_keep = (start_t < times) & (times < end_t)
|
| 70 |
+
keep_times = times[should_keep]
|
| 71 |
+
all_times = np.concatenate([[start_t], keep_times, [end_t]])
|
| 72 |
+
# remove duplicates, Slerp requires strictly increasing x
|
| 73 |
+
all_times = np.unique(all_times)
|
| 74 |
+
# interpolate
|
| 75 |
+
all_poses = self(all_times)
|
| 76 |
+
return PoseTrajectoryInterpolator(times=all_times, poses=all_poses)
|
| 77 |
+
|
| 78 |
+
def drive_to_waypoint(
|
| 79 |
+
self, pose, time, curr_time, max_pos_speed=np.inf, max_rot_speed=np.inf
|
| 80 |
+
) -> "PoseTrajectoryInterpolator":
|
| 81 |
+
assert max_pos_speed > 0
|
| 82 |
+
assert max_rot_speed > 0
|
| 83 |
+
time = max(time, curr_time)
|
| 84 |
+
|
| 85 |
+
curr_pose = self(curr_time)
|
| 86 |
+
pos_dist, rot_dist = pose_distance(curr_pose, pose)
|
| 87 |
+
pos_min_duration = pos_dist / max_pos_speed
|
| 88 |
+
rot_min_duration = rot_dist / max_rot_speed
|
| 89 |
+
duration = time - curr_time
|
| 90 |
+
duration = max(duration, max(pos_min_duration, rot_min_duration))
|
| 91 |
+
assert duration >= 0
|
| 92 |
+
last_waypoint_time = curr_time + duration
|
| 93 |
+
|
| 94 |
+
# insert new pose
|
| 95 |
+
trimmed_interp = self.trim(curr_time, curr_time)
|
| 96 |
+
times = np.append(trimmed_interp.times, [last_waypoint_time], axis=0)
|
| 97 |
+
poses = np.append(trimmed_interp.poses, [pose], axis=0)
|
| 98 |
+
|
| 99 |
+
# create new interpolator
|
| 100 |
+
final_interp = PoseTrajectoryInterpolator(times, poses)
|
| 101 |
+
return final_interp
|
| 102 |
+
|
| 103 |
+
def schedule_waypoint(
|
| 104 |
+
self,
|
| 105 |
+
pose,
|
| 106 |
+
time,
|
| 107 |
+
max_pos_speed=np.inf,
|
| 108 |
+
max_rot_speed=np.inf,
|
| 109 |
+
curr_time=None,
|
| 110 |
+
last_waypoint_time=None,
|
| 111 |
+
) -> "PoseTrajectoryInterpolator":
|
| 112 |
+
assert max_pos_speed > 0
|
| 113 |
+
assert max_rot_speed > 0
|
| 114 |
+
if last_waypoint_time is not None:
|
| 115 |
+
assert curr_time is not None
|
| 116 |
+
|
| 117 |
+
# trim current interpolator to between curr_time and last_waypoint_time
|
| 118 |
+
start_time = self.times[0]
|
| 119 |
+
end_time = self.times[-1]
|
| 120 |
+
assert start_time <= end_time
|
| 121 |
+
|
| 122 |
+
if curr_time is not None:
|
| 123 |
+
if time <= curr_time:
|
| 124 |
+
# if insert time is earlier than current time
|
| 125 |
+
# no effect should be done to the interpolator
|
| 126 |
+
return self
|
| 127 |
+
# now, curr_time < time
|
| 128 |
+
start_time = max(curr_time, start_time)
|
| 129 |
+
|
| 130 |
+
if last_waypoint_time is not None:
|
| 131 |
+
# if last_waypoint_time is earlier than start_time
|
| 132 |
+
# use start_time
|
| 133 |
+
if time <= last_waypoint_time:
|
| 134 |
+
end_time = curr_time
|
| 135 |
+
else:
|
| 136 |
+
end_time = max(last_waypoint_time, curr_time)
|
| 137 |
+
else:
|
| 138 |
+
end_time = curr_time
|
| 139 |
+
|
| 140 |
+
end_time = min(end_time, time)
|
| 141 |
+
start_time = min(start_time, end_time)
|
| 142 |
+
# end time should be the latest of all times except time
|
| 143 |
+
# after this we can assume order (proven by zhenjia, due to the 2 min operations)
|
| 144 |
+
|
| 145 |
+
# Constraints:
|
| 146 |
+
# start_time <= end_time <= time (proven by zhenjia)
|
| 147 |
+
# curr_time <= start_time (proven by zhenjia)
|
| 148 |
+
# curr_time <= time (proven by zhenjia)
|
| 149 |
+
|
| 150 |
+
# time can't change
|
| 151 |
+
# last_waypoint_time can't change
|
| 152 |
+
# curr_time can't change
|
| 153 |
+
assert start_time <= end_time
|
| 154 |
+
assert end_time <= time
|
| 155 |
+
if last_waypoint_time is not None:
|
| 156 |
+
if time <= last_waypoint_time:
|
| 157 |
+
assert end_time == curr_time
|
| 158 |
+
else:
|
| 159 |
+
assert end_time == max(last_waypoint_time, curr_time)
|
| 160 |
+
|
| 161 |
+
if curr_time is not None:
|
| 162 |
+
assert curr_time <= start_time
|
| 163 |
+
assert curr_time <= time
|
| 164 |
+
|
| 165 |
+
trimmed_interp = self.trim(start_time, end_time)
|
| 166 |
+
# after this, all waypoints in trimmed_interp is within start_time and end_time
|
| 167 |
+
# and is earlier than time
|
| 168 |
+
|
| 169 |
+
# determine speed
|
| 170 |
+
duration = time - end_time
|
| 171 |
+
end_pose = trimmed_interp(end_time)
|
| 172 |
+
pos_dist, rot_dist = pose_distance(pose, end_pose)
|
| 173 |
+
pos_min_duration = pos_dist / max_pos_speed
|
| 174 |
+
rot_min_duration = rot_dist / max_rot_speed
|
| 175 |
+
duration = max(duration, max(pos_min_duration, rot_min_duration))
|
| 176 |
+
assert duration >= 0
|
| 177 |
+
last_waypoint_time = end_time + duration
|
| 178 |
+
|
| 179 |
+
# insert new pose
|
| 180 |
+
times = np.append(trimmed_interp.times, [last_waypoint_time], axis=0)
|
| 181 |
+
poses = np.append(trimmed_interp.poses, [pose], axis=0)
|
| 182 |
+
|
| 183 |
+
# create new interpolator
|
| 184 |
+
final_interp = PoseTrajectoryInterpolator(times, poses)
|
| 185 |
+
return final_interp
|
| 186 |
+
|
| 187 |
+
def __call__(self, t: Union[numbers.Number, np.ndarray]) -> np.ndarray:
|
| 188 |
+
is_single = False
|
| 189 |
+
if isinstance(t, numbers.Number):
|
| 190 |
+
is_single = True
|
| 191 |
+
t = np.array([t])
|
| 192 |
+
|
| 193 |
+
pose = np.zeros((len(t), 6))
|
| 194 |
+
if self.single_step:
|
| 195 |
+
pose[:] = self._poses[0]
|
| 196 |
+
else:
|
| 197 |
+
start_time = self.times[0]
|
| 198 |
+
end_time = self.times[-1]
|
| 199 |
+
t = np.clip(t, start_time, end_time)
|
| 200 |
+
|
| 201 |
+
pose = np.zeros((len(t), 6))
|
| 202 |
+
pose[:, :3] = self.pos_interp(t)
|
| 203 |
+
pose[:, 3:] = self.rot_interp(t).as_rotvec()
|
| 204 |
+
|
| 205 |
+
if is_single:
|
| 206 |
+
pose = pose[0]
|
| 207 |
+
return pose
|
code/umi/common/pose_util.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import scipy.spatial.transform as st
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def pos_rot_to_mat(pos, rot):
|
| 6 |
+
shape = pos.shape[:-1]
|
| 7 |
+
mat = np.zeros(shape + (4, 4), dtype=pos.dtype)
|
| 8 |
+
mat[..., :3, 3] = pos
|
| 9 |
+
mat[..., :3, :3] = rot.as_matrix()
|
| 10 |
+
mat[..., 3, 3] = 1
|
| 11 |
+
return mat
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def mat_to_pos_rot(mat):
|
| 15 |
+
pos = (mat[..., :3, 3].T / mat[..., 3, 3].T).T
|
| 16 |
+
rot = st.Rotation.from_matrix(mat[..., :3, :3])
|
| 17 |
+
return pos, rot
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def pos_rot_to_pose(pos, rot):
|
| 21 |
+
shape = pos.shape[:-1]
|
| 22 |
+
pose = np.zeros(shape + (6,), dtype=pos.dtype)
|
| 23 |
+
pose[..., :3] = pos
|
| 24 |
+
pose[..., 3:] = rot.as_rotvec()
|
| 25 |
+
return pose
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def pose_to_pos_rot(pose):
|
| 29 |
+
pos = pose[..., :3]
|
| 30 |
+
rot = st.Rotation.from_rotvec(pose[..., 3:])
|
| 31 |
+
return pos, rot
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def pose_to_mat(pose):
|
| 35 |
+
return pos_rot_to_mat(*pose_to_pos_rot(pose))
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def mat_to_pose(mat):
|
| 39 |
+
return pos_rot_to_pose(*mat_to_pos_rot(mat))
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def transform_pose(tx, pose):
|
| 43 |
+
"""
|
| 44 |
+
tx: tx_new_old
|
| 45 |
+
pose: tx_old_obj
|
| 46 |
+
result: tx_new_obj
|
| 47 |
+
"""
|
| 48 |
+
pose_mat = pose_to_mat(pose)
|
| 49 |
+
tf_pose_mat = tx @ pose_mat
|
| 50 |
+
tf_pose = mat_to_pose(tf_pose_mat)
|
| 51 |
+
return tf_pose
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def transform_point(tx, point):
|
| 55 |
+
return point @ tx[:3, :3].T + tx[:3, 3]
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def project_point(k, point):
|
| 59 |
+
x = point @ k.T
|
| 60 |
+
uv = x[..., :2] / x[..., [2]]
|
| 61 |
+
return uv
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def apply_delta_pose(pose, delta_pose):
|
| 65 |
+
new_pose = np.zeros_like(pose)
|
| 66 |
+
|
| 67 |
+
# simple add for position
|
| 68 |
+
new_pose[:3] = pose[:3] + delta_pose[:3]
|
| 69 |
+
|
| 70 |
+
# matrix multiplication for rotation
|
| 71 |
+
rot = st.Rotation.from_rotvec(pose[3:])
|
| 72 |
+
drot = st.Rotation.from_rotvec(delta_pose[3:])
|
| 73 |
+
new_pose[3:] = (drot * rot).as_rotvec()
|
| 74 |
+
|
| 75 |
+
return new_pose
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def normalize(vec, tol=1e-7):
|
| 79 |
+
return vec / np.maximum(np.linalg.norm(vec), tol)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def rot_from_directions(from_vec, to_vec):
|
| 83 |
+
from_vec = normalize(from_vec)
|
| 84 |
+
to_vec = normalize(to_vec)
|
| 85 |
+
axis = np.cross(from_vec, to_vec)
|
| 86 |
+
axis = normalize(axis)
|
| 87 |
+
angle = np.arccos(np.dot(from_vec, to_vec))
|
| 88 |
+
rotvec = axis * angle
|
| 89 |
+
rot = st.Rotation.from_rotvec(rotvec)
|
| 90 |
+
return rot
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def normalize(vec, eps=1e-12):
|
| 94 |
+
norm = np.linalg.norm(vec, axis=-1)
|
| 95 |
+
norm = np.maximum(norm, eps)
|
| 96 |
+
out = (vec.T / norm).T
|
| 97 |
+
return out
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def rot6d_to_mat(d6):
|
| 101 |
+
a1, a2 = d6[..., :3], d6[..., 3:]
|
| 102 |
+
b1 = normalize(a1)
|
| 103 |
+
b2 = a2 - np.sum(b1 * a2, axis=-1, keepdims=True) * b1
|
| 104 |
+
b2 = normalize(b2)
|
| 105 |
+
b3 = np.cross(b1, b2, axis=-1)
|
| 106 |
+
out = np.stack((b1, b2, b3), axis=-2)
|
| 107 |
+
return out
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def mat_to_rot6d(mat):
|
| 111 |
+
batch_dim = mat.shape[:-2]
|
| 112 |
+
out = mat[..., :2, :].copy().reshape(batch_dim + (6,))
|
| 113 |
+
return out
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def mat_to_pose10d(mat):
|
| 117 |
+
pos = mat[..., :3, 3]
|
| 118 |
+
rotmat = mat[..., :3, :3]
|
| 119 |
+
d6 = mat_to_rot6d(rotmat)
|
| 120 |
+
d10 = np.concatenate([pos, d6], axis=-1)
|
| 121 |
+
return d10
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def pose10d_to_mat(d10):
|
| 125 |
+
pos = d10[..., :3]
|
| 126 |
+
d6 = d10[..., 3:]
|
| 127 |
+
rotmat = rot6d_to_mat(d6)
|
| 128 |
+
out = np.zeros(d10.shape[:-1] + (4, 4), dtype=d10.dtype)
|
| 129 |
+
out[..., :3, :3] = rotmat
|
| 130 |
+
out[..., :3, 3] = pos
|
| 131 |
+
out[..., 3, 3] = 1
|
| 132 |
+
return out
|
code/umi/common/precise_sleep.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def precise_sleep(dt: float, slack_time: float = 0.001, time_func=time.monotonic):
|
| 5 |
+
"""
|
| 6 |
+
Use hybrid of time.sleep and spinning to minimize jitter.
|
| 7 |
+
Sleep dt - slack_time seconds first, then spin for the rest.
|
| 8 |
+
"""
|
| 9 |
+
t_start = time_func()
|
| 10 |
+
if dt > slack_time:
|
| 11 |
+
time.sleep(dt - slack_time)
|
| 12 |
+
t_end = t_start + dt
|
| 13 |
+
while time_func() < t_end:
|
| 14 |
+
pass
|
| 15 |
+
return
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def precise_wait(t_end: float, slack_time: float = 0.001, time_func=time.monotonic):
|
| 19 |
+
t_start = time_func()
|
| 20 |
+
t_wait = t_end - t_start
|
| 21 |
+
if t_wait > 0:
|
| 22 |
+
t_sleep = t_wait - slack_time
|
| 23 |
+
if t_sleep > 0:
|
| 24 |
+
time.sleep(t_sleep)
|
| 25 |
+
while time_func() < t_end:
|
| 26 |
+
pass
|
| 27 |
+
return
|
code/umi/common/timecode_util.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Union
|
| 2 |
+
from fractions import Fraction
|
| 3 |
+
import datetime
|
| 4 |
+
import av
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def timecode_to_seconds(
|
| 8 |
+
timecode: str, frame_rate: Union[int, float, Fraction]
|
| 9 |
+
) -> Union[float, Fraction]:
|
| 10 |
+
"""
|
| 11 |
+
Convert non-skip frame timecode into seconds since midnight
|
| 12 |
+
"""
|
| 13 |
+
# calculate whole frame rate
|
| 14 |
+
# 29.97 -> 30, 59.94 -> 60
|
| 15 |
+
int_frame_rate = round(frame_rate)
|
| 16 |
+
|
| 17 |
+
# parse timecode string
|
| 18 |
+
h, m, s, f = [int(x) for x in timecode.split(":")]
|
| 19 |
+
|
| 20 |
+
# calculate frames assuming whole frame rate (i.e. non-drop frame)
|
| 21 |
+
frames = (3600 * h + 60 * m + s) * int_frame_rate + f
|
| 22 |
+
|
| 23 |
+
# convert to seconds
|
| 24 |
+
seconds = frames / frame_rate
|
| 25 |
+
return seconds
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def stream_get_start_datetime(stream: av.stream.Stream) -> datetime.datetime:
|
| 29 |
+
"""
|
| 30 |
+
Combines creation time and timecode to get high-precision
|
| 31 |
+
time for the first frame of a video.
|
| 32 |
+
"""
|
| 33 |
+
# read metadata
|
| 34 |
+
frame_rate = stream.average_rate
|
| 35 |
+
tc = stream.metadata["timecode"]
|
| 36 |
+
creation_time = stream.metadata["creation_time"]
|
| 37 |
+
|
| 38 |
+
# get time within the day
|
| 39 |
+
seconds_since_midnight = float(
|
| 40 |
+
timecode_to_seconds(timecode=tc, frame_rate=frame_rate)
|
| 41 |
+
)
|
| 42 |
+
delta = datetime.timedelta(seconds=seconds_since_midnight)
|
| 43 |
+
|
| 44 |
+
# get dates
|
| 45 |
+
create_datetime = datetime.datetime.strptime(
|
| 46 |
+
creation_time, r"%Y-%m-%dT%H:%M:%S.%fZ"
|
| 47 |
+
)
|
| 48 |
+
create_datetime = create_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
|
| 49 |
+
start_datetime = create_datetime + delta
|
| 50 |
+
return start_datetime
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def mp4_get_start_datetime(mp4_path: str) -> datetime.datetime:
|
| 54 |
+
with av.open(mp4_path) as container:
|
| 55 |
+
stream = container.streams.video[0]
|
| 56 |
+
return stream_get_start_datetime(stream=stream)
|
code/umi/common/timestamp_accumulator.py
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Tuple, Optional, Dict
|
| 2 |
+
import math
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def get_accumulate_timestamp_idxs(
|
| 7 |
+
timestamps: List[float],
|
| 8 |
+
start_time: float,
|
| 9 |
+
dt: float,
|
| 10 |
+
eps: float = 1e-5,
|
| 11 |
+
next_global_idx: Optional[int] = 0,
|
| 12 |
+
allow_negative=False,
|
| 13 |
+
) -> Tuple[List[int], List[int], int]:
|
| 14 |
+
"""
|
| 15 |
+
For each dt window, choose the first timestamp in the window.
|
| 16 |
+
Assumes timestamps sorted. One timestamp might be chosen multiple times due to dropped frames.
|
| 17 |
+
next_global_idx should start at 0 normally, and then use the returned next_global_idx.
|
| 18 |
+
However, when overwiting previous values are desired, set last_global_idx to None.
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
local_idxs: which index in the given timestamps array to chose from
|
| 22 |
+
global_idxs: the global index of each chosen timestamp
|
| 23 |
+
next_global_idx: used for next call.
|
| 24 |
+
"""
|
| 25 |
+
local_idxs = list()
|
| 26 |
+
global_idxs = list()
|
| 27 |
+
for local_idx, ts in enumerate(timestamps):
|
| 28 |
+
# add eps * dt to timestamps so that when ts == start_time + k * dt
|
| 29 |
+
# is always recorded as kth element (avoiding floating point errors)
|
| 30 |
+
global_idx = math.floor((ts - start_time) / dt + eps)
|
| 31 |
+
if (not allow_negative) and (global_idx < 0):
|
| 32 |
+
continue
|
| 33 |
+
if next_global_idx is None:
|
| 34 |
+
next_global_idx = global_idx
|
| 35 |
+
|
| 36 |
+
n_repeats = max(0, global_idx - next_global_idx + 1)
|
| 37 |
+
for i in range(n_repeats):
|
| 38 |
+
local_idxs.append(local_idx)
|
| 39 |
+
global_idxs.append(next_global_idx + i)
|
| 40 |
+
next_global_idx += n_repeats
|
| 41 |
+
return local_idxs, global_idxs, next_global_idx
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def align_timestamps(
|
| 45 |
+
timestamps: List[float],
|
| 46 |
+
target_global_idxs: List[int],
|
| 47 |
+
start_time: float,
|
| 48 |
+
dt: float,
|
| 49 |
+
eps: float = 1e-5,
|
| 50 |
+
):
|
| 51 |
+
if isinstance(target_global_idxs, np.ndarray):
|
| 52 |
+
target_global_idxs = target_global_idxs.tolist()
|
| 53 |
+
assert len(target_global_idxs) > 0
|
| 54 |
+
|
| 55 |
+
local_idxs, global_idxs, _ = get_accumulate_timestamp_idxs(
|
| 56 |
+
timestamps=timestamps,
|
| 57 |
+
start_time=start_time,
|
| 58 |
+
dt=dt,
|
| 59 |
+
eps=eps,
|
| 60 |
+
next_global_idx=target_global_idxs[0],
|
| 61 |
+
allow_negative=True,
|
| 62 |
+
)
|
| 63 |
+
if len(global_idxs) > len(target_global_idxs):
|
| 64 |
+
# if more steps available, truncate
|
| 65 |
+
global_idxs = global_idxs[: len(target_global_idxs)]
|
| 66 |
+
local_idxs = local_idxs[: len(target_global_idxs)]
|
| 67 |
+
|
| 68 |
+
if len(global_idxs) == 0:
|
| 69 |
+
import pdb
|
| 70 |
+
|
| 71 |
+
pdb.set_trace()
|
| 72 |
+
|
| 73 |
+
for i in range(len(target_global_idxs) - len(global_idxs)):
|
| 74 |
+
# if missing, repeat
|
| 75 |
+
local_idxs.append(len(timestamps) - 1)
|
| 76 |
+
global_idxs.append(global_idxs[-1] + 1)
|
| 77 |
+
assert global_idxs == target_global_idxs
|
| 78 |
+
assert len(local_idxs) == len(global_idxs)
|
| 79 |
+
return local_idxs
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class TimestampObsAccumulator:
|
| 83 |
+
def __init__(self, start_time: float, dt: float, eps: float = 1e-5):
|
| 84 |
+
self.start_time = start_time
|
| 85 |
+
self.dt = dt
|
| 86 |
+
self.eps = eps
|
| 87 |
+
self.obs_buffer = dict()
|
| 88 |
+
self.timestamp_buffer = None
|
| 89 |
+
self.next_global_idx = 0
|
| 90 |
+
|
| 91 |
+
def __len__(self):
|
| 92 |
+
return self.next_global_idx
|
| 93 |
+
|
| 94 |
+
@property
|
| 95 |
+
def data(self):
|
| 96 |
+
if self.timestamp_buffer is None:
|
| 97 |
+
return dict()
|
| 98 |
+
result = dict()
|
| 99 |
+
for key, value in self.obs_buffer.items():
|
| 100 |
+
result[key] = value[: len(self)]
|
| 101 |
+
return result
|
| 102 |
+
|
| 103 |
+
@property
|
| 104 |
+
def actual_timestamps(self):
|
| 105 |
+
if self.timestamp_buffer is None:
|
| 106 |
+
return np.array([])
|
| 107 |
+
return self.timestamp_buffer[: len(self)]
|
| 108 |
+
|
| 109 |
+
@property
|
| 110 |
+
def timestamps(self):
|
| 111 |
+
if self.timestamp_buffer is None:
|
| 112 |
+
return np.array([])
|
| 113 |
+
return self.start_time + np.arange(len(self)) * self.dt
|
| 114 |
+
|
| 115 |
+
def put(self, data: Dict[str, np.ndarray], timestamps: np.ndarray):
|
| 116 |
+
"""
|
| 117 |
+
data:
|
| 118 |
+
key: T,*
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
local_idxs, global_idxs, self.next_global_idx = get_accumulate_timestamp_idxs(
|
| 122 |
+
timestamps=timestamps,
|
| 123 |
+
start_time=self.start_time,
|
| 124 |
+
dt=self.dt,
|
| 125 |
+
eps=self.eps,
|
| 126 |
+
next_global_idx=self.next_global_idx,
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
if len(global_idxs) > 0:
|
| 130 |
+
if self.timestamp_buffer is None:
|
| 131 |
+
# first allocation
|
| 132 |
+
self.obs_buffer = dict()
|
| 133 |
+
for key, value in data.items():
|
| 134 |
+
self.obs_buffer[key] = np.zeros_like(value)
|
| 135 |
+
self.timestamp_buffer = np.zeros((len(timestamps),), dtype=np.float64)
|
| 136 |
+
|
| 137 |
+
this_max_size = global_idxs[-1] + 1
|
| 138 |
+
if this_max_size > len(self.timestamp_buffer):
|
| 139 |
+
# reallocate
|
| 140 |
+
new_size = max(this_max_size, len(self.timestamp_buffer) * 2)
|
| 141 |
+
for key in list(self.obs_buffer.keys()):
|
| 142 |
+
new_shape = (new_size,) + self.obs_buffer[key].shape[1:]
|
| 143 |
+
self.obs_buffer[key] = np.resize(self.obs_buffer[key], new_shape)
|
| 144 |
+
self.timestamp_buffer = np.resize(self.timestamp_buffer, (new_size))
|
| 145 |
+
|
| 146 |
+
# write data
|
| 147 |
+
for key, value in self.obs_buffer.items():
|
| 148 |
+
value[global_idxs] = data[key][local_idxs]
|
| 149 |
+
self.timestamp_buffer[global_idxs] = timestamps[local_idxs]
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class TimestampActionAccumulator:
|
| 153 |
+
def __init__(self, start_time: float, dt: float, eps: float = 1e-5):
|
| 154 |
+
"""
|
| 155 |
+
Different from Obs accumulator, the action accumulator
|
| 156 |
+
allows overwriting previous values.
|
| 157 |
+
"""
|
| 158 |
+
self.start_time = start_time
|
| 159 |
+
self.dt = dt
|
| 160 |
+
self.eps = eps
|
| 161 |
+
self.action_buffer = None
|
| 162 |
+
self.timestamp_buffer = None
|
| 163 |
+
self.size = 0
|
| 164 |
+
|
| 165 |
+
def __len__(self):
|
| 166 |
+
return self.size
|
| 167 |
+
|
| 168 |
+
@property
|
| 169 |
+
def actions(self):
|
| 170 |
+
if self.action_buffer is None:
|
| 171 |
+
return np.array([])
|
| 172 |
+
return self.action_buffer[: len(self)]
|
| 173 |
+
|
| 174 |
+
@property
|
| 175 |
+
def actual_timestamps(self):
|
| 176 |
+
if self.timestamp_buffer is None:
|
| 177 |
+
return np.array([])
|
| 178 |
+
return self.timestamp_buffer[: len(self)]
|
| 179 |
+
|
| 180 |
+
@property
|
| 181 |
+
def timestamps(self):
|
| 182 |
+
if self.timestamp_buffer is None:
|
| 183 |
+
return np.array([])
|
| 184 |
+
return self.start_time + np.arange(len(self)) * self.dt
|
| 185 |
+
|
| 186 |
+
def put(self, actions: np.ndarray, timestamps: np.ndarray):
|
| 187 |
+
"""
|
| 188 |
+
Note: timestamps is the time when the action will be issued,
|
| 189 |
+
not when the action will be completed (target_timestamp)
|
| 190 |
+
"""
|
| 191 |
+
|
| 192 |
+
local_idxs, global_idxs, _ = get_accumulate_timestamp_idxs(
|
| 193 |
+
timestamps=timestamps,
|
| 194 |
+
start_time=self.start_time,
|
| 195 |
+
dt=self.dt,
|
| 196 |
+
eps=self.eps,
|
| 197 |
+
# allows overwriting previous actions
|
| 198 |
+
next_global_idx=None,
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
if len(global_idxs) > 0:
|
| 202 |
+
if self.timestamp_buffer is None:
|
| 203 |
+
# first allocation
|
| 204 |
+
self.action_buffer = np.zeros_like(actions)
|
| 205 |
+
self.timestamp_buffer = np.zeros((len(actions),), dtype=np.float64)
|
| 206 |
+
|
| 207 |
+
this_max_size = global_idxs[-1] + 1
|
| 208 |
+
if this_max_size > len(self.timestamp_buffer):
|
| 209 |
+
# reallocate
|
| 210 |
+
new_size = max(this_max_size, len(self.timestamp_buffer) * 2)
|
| 211 |
+
new_shape = (new_size,) + self.action_buffer.shape[1:]
|
| 212 |
+
self.action_buffer = np.resize(self.action_buffer, new_shape)
|
| 213 |
+
self.timestamp_buffer = np.resize(self.timestamp_buffer, (new_size,))
|
| 214 |
+
|
| 215 |
+
# potentially rewrite old data (as expected)
|
| 216 |
+
self.action_buffer[global_idxs] = actions[local_idxs]
|
| 217 |
+
self.timestamp_buffer[global_idxs] = timestamps[local_idxs]
|
| 218 |
+
self.size = max(self.size, this_max_size)
|
code/umi/common/usb_util.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from subprocess import Popen, PIPE, DEVNULL
|
| 3 |
+
import fcntl
|
| 4 |
+
import pathlib
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def create_usb_list():
|
| 8 |
+
device_list = list()
|
| 9 |
+
lsusb_out = (
|
| 10 |
+
Popen(
|
| 11 |
+
"lsusb -v",
|
| 12 |
+
shell=True,
|
| 13 |
+
bufsize=64,
|
| 14 |
+
stdin=PIPE,
|
| 15 |
+
stdout=PIPE,
|
| 16 |
+
stderr=DEVNULL,
|
| 17 |
+
close_fds=True,
|
| 18 |
+
)
|
| 19 |
+
.stdout.read()
|
| 20 |
+
.strip()
|
| 21 |
+
.decode("utf-8")
|
| 22 |
+
)
|
| 23 |
+
usb_devices = lsusb_out.split("%s%s" % (os.linesep, os.linesep))
|
| 24 |
+
for device_categories in usb_devices:
|
| 25 |
+
if not device_categories:
|
| 26 |
+
continue
|
| 27 |
+
categories = device_categories.split(os.linesep)
|
| 28 |
+
device_stuff = categories[0].strip().split()
|
| 29 |
+
bus = device_stuff[1]
|
| 30 |
+
device = device_stuff[3][:-1]
|
| 31 |
+
device_dict = {"bus": bus, "device": device}
|
| 32 |
+
device_info = " ".join(device_stuff[6:])
|
| 33 |
+
device_dict["description"] = device_info
|
| 34 |
+
for category in categories:
|
| 35 |
+
if not category:
|
| 36 |
+
continue
|
| 37 |
+
categoryinfo = category.strip().split()
|
| 38 |
+
if categoryinfo[0] == "iManufacturer":
|
| 39 |
+
manufacturer_info = " ".join(categoryinfo[2:])
|
| 40 |
+
device_dict["manufacturer"] = manufacturer_info
|
| 41 |
+
if categoryinfo[0] == "iProduct":
|
| 42 |
+
device_info = " ".join(categoryinfo[2:])
|
| 43 |
+
device_dict["device"] = device_info
|
| 44 |
+
path = "/dev/bus/usb/%s/%s" % (bus, device)
|
| 45 |
+
device_dict["path"] = path
|
| 46 |
+
|
| 47 |
+
device_list.append(device_dict)
|
| 48 |
+
return device_list
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def reset_usb_device(dev_path):
|
| 52 |
+
USBDEVFS_RESET = 21780
|
| 53 |
+
try:
|
| 54 |
+
f = open(dev_path, "w", os.O_WRONLY)
|
| 55 |
+
fcntl.ioctl(f, USBDEVFS_RESET, 0)
|
| 56 |
+
print("Successfully reset %s" % dev_path)
|
| 57 |
+
except PermissionError as ex:
|
| 58 |
+
raise PermissionError('Try running "sudo chmod 777 {}"'.format(dev_path))
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def reset_all_elgato_devices():
|
| 62 |
+
"""
|
| 63 |
+
Find and reset all Elgato capture cards.
|
| 64 |
+
Required to workaround a firmware bug.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
# enumerate UBS device to find Elgato Capture Card
|
| 68 |
+
device_list = create_usb_list()
|
| 69 |
+
|
| 70 |
+
for dev in device_list:
|
| 71 |
+
if "Elgato" in dev["description"]:
|
| 72 |
+
dev_usb_path = dev["path"]
|
| 73 |
+
reset_usb_device(dev_usb_path)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def get_sorted_v4l_paths(by_id=True):
|
| 77 |
+
"""
|
| 78 |
+
If by_id, sort devices by device name + serial number (preserves device order)
|
| 79 |
+
else, sort devices by usb bus id (preserves usb port order)
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
dirname = "by-id"
|
| 83 |
+
if not by_id:
|
| 84 |
+
dirname = "by-path"
|
| 85 |
+
v4l_dir = pathlib.Path("/dev/v4l").joinpath(dirname)
|
| 86 |
+
|
| 87 |
+
valid_paths = list()
|
| 88 |
+
for dev_path in sorted(v4l_dir.glob("*video*")):
|
| 89 |
+
name = dev_path.name
|
| 90 |
+
|
| 91 |
+
# only keep devices ends with "index0"
|
| 92 |
+
# since they are the only valid video devices
|
| 93 |
+
index_str = name.split("-")[-1]
|
| 94 |
+
assert index_str.startswith("index")
|
| 95 |
+
index = int(index_str[5:])
|
| 96 |
+
if index == 0:
|
| 97 |
+
valid_paths.append(dev_path)
|
| 98 |
+
|
| 99 |
+
result = [str(x.absolute()) for x in valid_paths]
|
| 100 |
+
|
| 101 |
+
return result
|
code/umi/pipeline/aruco_detection.py
ADDED
|
File without changes
|
code/umi/real_world/bimanual_umi_env.py
ADDED
|
@@ -0,0 +1,695 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, List
|
| 2 |
+
import pathlib
|
| 3 |
+
import numpy as np
|
| 4 |
+
import time
|
| 5 |
+
import shutil
|
| 6 |
+
import math
|
| 7 |
+
from multiprocessing.managers import SharedMemoryManager
|
| 8 |
+
from umi.real_world.rtde_interpolation_controller import RTDEInterpolationController
|
| 9 |
+
from umi.real_world.wsg_controller import WSGController
|
| 10 |
+
from umi.real_world.franka_interpolation_controller import FrankaInterpolationController
|
| 11 |
+
from umi.real_world.multi_uvc_camera import MultiUvcCamera, VideoRecorder
|
| 12 |
+
from diffusion_policy.common.timestamp_accumulator import (
|
| 13 |
+
TimestampActionAccumulator,
|
| 14 |
+
ObsAccumulator,
|
| 15 |
+
)
|
| 16 |
+
from umi.common.cv_util import draw_predefined_mask
|
| 17 |
+
from umi.real_world.multi_camera_visualizer import MultiCameraVisualizer
|
| 18 |
+
from diffusion_policy.common.replay_buffer import ReplayBuffer
|
| 19 |
+
from diffusion_policy.common.cv2_util import get_image_transform, optimal_row_cols
|
| 20 |
+
from umi.common.usb_util import reset_all_elgato_devices, get_sorted_v4l_paths
|
| 21 |
+
from umi.common.pose_util import pose_to_pos_rot
|
| 22 |
+
from umi.common.interpolation_util import get_interp1d, PoseInterpolator
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class BimanualUmiEnv:
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
# required params
|
| 29 |
+
output_dir,
|
| 30 |
+
robots_config, # list of dict[{robot_type: 'ur5', robot_ip: XXX, obs_latency: 0.0001, action_latency: 0.1, tcp_offset: 0.21}]
|
| 31 |
+
grippers_config, # list of dict[{gripper_ip: XXX, gripper_port: 1000, obs_latency: 0.01, , action_latency: 0.1}]
|
| 32 |
+
# env params
|
| 33 |
+
frequency=20,
|
| 34 |
+
# obs
|
| 35 |
+
obs_image_resolution=(224, 224),
|
| 36 |
+
max_obs_buffer_size=60,
|
| 37 |
+
obs_float32=False,
|
| 38 |
+
camera_reorder=None,
|
| 39 |
+
no_mirror=False,
|
| 40 |
+
fisheye_converter=None,
|
| 41 |
+
mirror_swap=False,
|
| 42 |
+
# this latency compensates receive_timestamp
|
| 43 |
+
# all in seconds
|
| 44 |
+
camera_obs_latency=0.125,
|
| 45 |
+
# all in steps (relative to frequency)
|
| 46 |
+
camera_down_sample_steps=1,
|
| 47 |
+
robot_down_sample_steps=1,
|
| 48 |
+
gripper_down_sample_steps=1,
|
| 49 |
+
# all in steps (relative to frequency)
|
| 50 |
+
camera_obs_horizon=2,
|
| 51 |
+
robot_obs_horizon=2,
|
| 52 |
+
gripper_obs_horizon=2,
|
| 53 |
+
# action
|
| 54 |
+
max_pos_speed=0.25,
|
| 55 |
+
max_rot_speed=0.6,
|
| 56 |
+
init_joints=False,
|
| 57 |
+
# vis params
|
| 58 |
+
enable_multi_cam_vis=True,
|
| 59 |
+
multi_cam_vis_resolution=(960, 960),
|
| 60 |
+
# shared memory
|
| 61 |
+
shm_manager=None,
|
| 62 |
+
):
|
| 63 |
+
output_dir = pathlib.Path(output_dir)
|
| 64 |
+
assert output_dir.parent.is_dir()
|
| 65 |
+
video_dir = output_dir.joinpath("videos")
|
| 66 |
+
video_dir.mkdir(parents=True, exist_ok=True)
|
| 67 |
+
zarr_path = str(output_dir.joinpath("replay_buffer.zarr").absolute())
|
| 68 |
+
replay_buffer = ReplayBuffer.create_from_path(zarr_path=zarr_path, mode="a")
|
| 69 |
+
|
| 70 |
+
if shm_manager is None:
|
| 71 |
+
shm_manager = SharedMemoryManager()
|
| 72 |
+
shm_manager.start()
|
| 73 |
+
|
| 74 |
+
# Find and reset all Elgato capture cards.
|
| 75 |
+
# Required to workaround a firmware bug.
|
| 76 |
+
reset_all_elgato_devices()
|
| 77 |
+
|
| 78 |
+
# Wait for all v4l cameras to be back online
|
| 79 |
+
time.sleep(0.1)
|
| 80 |
+
v4l_paths = get_sorted_v4l_paths()
|
| 81 |
+
if camera_reorder is not None:
|
| 82 |
+
paths = [v4l_paths[i] for i in camera_reorder]
|
| 83 |
+
v4l_paths = paths
|
| 84 |
+
|
| 85 |
+
# compute resolution for vis
|
| 86 |
+
rw, rh, col, row = optimal_row_cols(
|
| 87 |
+
n_cameras=len(v4l_paths),
|
| 88 |
+
in_wh_ratio=4 / 3,
|
| 89 |
+
max_resolution=multi_cam_vis_resolution,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# HACK: Separate video setting for each camera
|
| 93 |
+
# Elagto Cam Link 4k records at 4k 30fps
|
| 94 |
+
# Other capture card records at 720p 60fps
|
| 95 |
+
resolution = list()
|
| 96 |
+
capture_fps = list()
|
| 97 |
+
cap_buffer_size = list()
|
| 98 |
+
video_recorder = list()
|
| 99 |
+
transform = list()
|
| 100 |
+
vis_transform = list()
|
| 101 |
+
for path in v4l_paths:
|
| 102 |
+
if "Cam_Link_4K" in path:
|
| 103 |
+
res = (3840, 2160)
|
| 104 |
+
fps = 30
|
| 105 |
+
buf = 3
|
| 106 |
+
bit_rate = 6000 * 1000
|
| 107 |
+
|
| 108 |
+
def tf4k(data, input_res=res):
|
| 109 |
+
img = data["color"]
|
| 110 |
+
f = get_image_transform(
|
| 111 |
+
input_res=input_res,
|
| 112 |
+
output_res=obs_image_resolution,
|
| 113 |
+
# obs output rgb
|
| 114 |
+
bgr_to_rgb=True,
|
| 115 |
+
)
|
| 116 |
+
img = f(img)
|
| 117 |
+
if obs_float32:
|
| 118 |
+
img = img.astype(np.float32) / 255
|
| 119 |
+
data["color"] = img
|
| 120 |
+
return data
|
| 121 |
+
|
| 122 |
+
transform.append(tf4k)
|
| 123 |
+
else:
|
| 124 |
+
res = (1920, 1080)
|
| 125 |
+
fps = 60
|
| 126 |
+
buf = 1
|
| 127 |
+
bit_rate = 3000 * 1000
|
| 128 |
+
|
| 129 |
+
is_mirror = None
|
| 130 |
+
if mirror_swap:
|
| 131 |
+
mirror_mask = np.ones((224, 224, 3), dtype=np.uint8)
|
| 132 |
+
mirror_mask = draw_predefined_mask(
|
| 133 |
+
mirror_mask,
|
| 134 |
+
color=(0, 0, 0),
|
| 135 |
+
mirror=True,
|
| 136 |
+
gripper=False,
|
| 137 |
+
finger=False,
|
| 138 |
+
)
|
| 139 |
+
is_mirror = mirror_mask[..., 0] == 0
|
| 140 |
+
|
| 141 |
+
def tf(data, input_res=res):
|
| 142 |
+
img = data["color"]
|
| 143 |
+
if fisheye_converter is None:
|
| 144 |
+
f = get_image_transform(
|
| 145 |
+
input_res=input_res,
|
| 146 |
+
output_res=obs_image_resolution,
|
| 147 |
+
# obs output rgb
|
| 148 |
+
bgr_to_rgb=True,
|
| 149 |
+
)
|
| 150 |
+
img = np.ascontiguousarray(f(img))
|
| 151 |
+
if is_mirror is not None:
|
| 152 |
+
img[is_mirror] = img[:, ::-1, :][is_mirror]
|
| 153 |
+
img = draw_predefined_mask(
|
| 154 |
+
img,
|
| 155 |
+
color=(0, 0, 0),
|
| 156 |
+
mirror=no_mirror,
|
| 157 |
+
gripper=True,
|
| 158 |
+
finger=False,
|
| 159 |
+
use_aa=True,
|
| 160 |
+
)
|
| 161 |
+
else:
|
| 162 |
+
img = fisheye_converter.forward(img)
|
| 163 |
+
img = img[..., ::-1]
|
| 164 |
+
if obs_float32:
|
| 165 |
+
img = img.astype(np.float32) / 255
|
| 166 |
+
data["color"] = img
|
| 167 |
+
return data
|
| 168 |
+
|
| 169 |
+
transform.append(tf)
|
| 170 |
+
|
| 171 |
+
resolution.append(res)
|
| 172 |
+
capture_fps.append(fps)
|
| 173 |
+
cap_buffer_size.append(buf)
|
| 174 |
+
video_recorder.append(
|
| 175 |
+
VideoRecorder.create_hevc_nvenc(
|
| 176 |
+
fps=fps, input_pix_fmt="bgr24", bit_rate=bit_rate
|
| 177 |
+
)
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
def vis_tf(data, input_res=res):
|
| 181 |
+
img = data["color"]
|
| 182 |
+
f = get_image_transform(
|
| 183 |
+
input_res=input_res, output_res=(rw, rh), bgr_to_rgb=False
|
| 184 |
+
)
|
| 185 |
+
img = f(img)
|
| 186 |
+
data["color"] = img
|
| 187 |
+
return data
|
| 188 |
+
|
| 189 |
+
vis_transform.append(vis_tf)
|
| 190 |
+
|
| 191 |
+
camera = MultiUvcCamera(
|
| 192 |
+
dev_video_paths=v4l_paths,
|
| 193 |
+
shm_manager=shm_manager,
|
| 194 |
+
resolution=resolution,
|
| 195 |
+
capture_fps=capture_fps,
|
| 196 |
+
# send every frame immediately after arrival
|
| 197 |
+
# ignores put_fps
|
| 198 |
+
put_downsample=False,
|
| 199 |
+
get_max_k=max_obs_buffer_size,
|
| 200 |
+
receive_latency=camera_obs_latency,
|
| 201 |
+
cap_buffer_size=cap_buffer_size,
|
| 202 |
+
transform=transform,
|
| 203 |
+
vis_transform=vis_transform,
|
| 204 |
+
video_recorder=video_recorder,
|
| 205 |
+
verbose=False,
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
multi_cam_vis = None
|
| 209 |
+
if enable_multi_cam_vis:
|
| 210 |
+
multi_cam_vis = MultiCameraVisualizer(
|
| 211 |
+
camera=camera, row=row, col=col, rgb_to_bgr=False
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
cube_diag = np.linalg.norm([1, 1, 1])
|
| 215 |
+
j_init = np.array([0, -90, -90, -90, 90, 0]) / 180 * np.pi
|
| 216 |
+
if not init_joints:
|
| 217 |
+
j_init = None
|
| 218 |
+
|
| 219 |
+
assert len(robots_config) == len(grippers_config)
|
| 220 |
+
robots: List[RTDEInterpolationController] = list()
|
| 221 |
+
grippers: List[WSGController] = list()
|
| 222 |
+
for rc in robots_config:
|
| 223 |
+
if rc["robot_type"].startswith("ur5"):
|
| 224 |
+
assert rc["robot_type"] in ["ur5", "ur5e"]
|
| 225 |
+
this_robot = RTDEInterpolationController(
|
| 226 |
+
shm_manager=shm_manager,
|
| 227 |
+
robot_ip=rc["robot_ip"],
|
| 228 |
+
frequency=500 if rc["robot_type"] == "ur5e" else 125,
|
| 229 |
+
lookahead_time=0.1,
|
| 230 |
+
gain=300,
|
| 231 |
+
max_pos_speed=max_pos_speed * cube_diag,
|
| 232 |
+
max_rot_speed=max_rot_speed * cube_diag,
|
| 233 |
+
launch_timeout=3,
|
| 234 |
+
tcp_offset_pose=[0, 0, rc["tcp_offset"], 0, 0, 0],
|
| 235 |
+
payload_mass=None,
|
| 236 |
+
payload_cog=None,
|
| 237 |
+
joints_init=j_init,
|
| 238 |
+
joints_init_speed=1.05,
|
| 239 |
+
soft_real_time=False,
|
| 240 |
+
verbose=False,
|
| 241 |
+
receive_keys=None,
|
| 242 |
+
receive_latency=rc["robot_obs_latency"],
|
| 243 |
+
)
|
| 244 |
+
elif rc["robot_type"].startswith("franka"):
|
| 245 |
+
this_robot = FrankaInterpolationController(
|
| 246 |
+
shm_manager=shm_manager,
|
| 247 |
+
robot_ip=rc["robot_ip"],
|
| 248 |
+
frequency=200,
|
| 249 |
+
Kx_scale=1.0,
|
| 250 |
+
Kxd_scale=np.array([2.0, 1.5, 2.0, 1.0, 1.0, 1.0]),
|
| 251 |
+
verbose=False,
|
| 252 |
+
receive_latency=rc["robot_obs_latency"],
|
| 253 |
+
)
|
| 254 |
+
else:
|
| 255 |
+
raise NotImplementedError()
|
| 256 |
+
robots.append(this_robot)
|
| 257 |
+
|
| 258 |
+
for gc in grippers_config:
|
| 259 |
+
this_gripper = WSGController(
|
| 260 |
+
shm_manager=shm_manager,
|
| 261 |
+
hostname=gc["gripper_ip"],
|
| 262 |
+
port=gc["gripper_port"],
|
| 263 |
+
receive_latency=gc["gripper_obs_latency"],
|
| 264 |
+
use_meters=True,
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
grippers.append(this_gripper)
|
| 268 |
+
|
| 269 |
+
self.camera = camera
|
| 270 |
+
|
| 271 |
+
self.robots = robots
|
| 272 |
+
self.robots_config = robots_config
|
| 273 |
+
self.grippers = grippers
|
| 274 |
+
self.grippers_config = grippers_config
|
| 275 |
+
|
| 276 |
+
self.multi_cam_vis = multi_cam_vis
|
| 277 |
+
self.frequency = frequency
|
| 278 |
+
self.max_obs_buffer_size = max_obs_buffer_size
|
| 279 |
+
self.max_pos_speed = max_pos_speed
|
| 280 |
+
self.max_rot_speed = max_rot_speed
|
| 281 |
+
# timing
|
| 282 |
+
self.camera_obs_latency = camera_obs_latency
|
| 283 |
+
self.camera_down_sample_steps = camera_down_sample_steps
|
| 284 |
+
self.robot_down_sample_steps = robot_down_sample_steps
|
| 285 |
+
self.gripper_down_sample_steps = gripper_down_sample_steps
|
| 286 |
+
self.camera_obs_horizon = camera_obs_horizon
|
| 287 |
+
self.robot_obs_horizon = robot_obs_horizon
|
| 288 |
+
self.gripper_obs_horizon = gripper_obs_horizon
|
| 289 |
+
# recording
|
| 290 |
+
self.output_dir = output_dir
|
| 291 |
+
self.video_dir = video_dir
|
| 292 |
+
self.replay_buffer = replay_buffer
|
| 293 |
+
# temp memory buffers
|
| 294 |
+
self.last_camera_data = None
|
| 295 |
+
# recording buffers
|
| 296 |
+
self.obs_accumulator = None
|
| 297 |
+
self.action_accumulator = None
|
| 298 |
+
|
| 299 |
+
self.start_time = None
|
| 300 |
+
self.last_time_step = 0
|
| 301 |
+
|
| 302 |
+
# ======== start-stop API =============
|
| 303 |
+
@property
|
| 304 |
+
def is_ready(self):
|
| 305 |
+
ready_flag = self.camera.is_ready
|
| 306 |
+
for robot in self.robots:
|
| 307 |
+
ready_flag = ready_flag and robot.is_ready
|
| 308 |
+
for gripper in self.grippers:
|
| 309 |
+
ready_flag = ready_flag and gripper.is_ready
|
| 310 |
+
return ready_flag
|
| 311 |
+
|
| 312 |
+
def start(self, wait=True):
|
| 313 |
+
self.camera.start(wait=False)
|
| 314 |
+
for robot in self.robots:
|
| 315 |
+
robot.start(wait=False)
|
| 316 |
+
for gripper in self.grippers:
|
| 317 |
+
gripper.start(wait=False)
|
| 318 |
+
|
| 319 |
+
if self.multi_cam_vis is not None:
|
| 320 |
+
self.multi_cam_vis.start(wait=False)
|
| 321 |
+
if wait:
|
| 322 |
+
self.start_wait()
|
| 323 |
+
|
| 324 |
+
def stop(self, wait=True):
|
| 325 |
+
self.end_episode()
|
| 326 |
+
if self.multi_cam_vis is not None:
|
| 327 |
+
self.multi_cam_vis.stop(wait=False)
|
| 328 |
+
for robot in self.robots:
|
| 329 |
+
robot.stop(wait=False)
|
| 330 |
+
for gripper in self.grippers:
|
| 331 |
+
gripper.stop(wait=False)
|
| 332 |
+
self.camera.stop(wait=False)
|
| 333 |
+
if wait:
|
| 334 |
+
self.stop_wait()
|
| 335 |
+
|
| 336 |
+
def start_wait(self):
|
| 337 |
+
self.camera.start_wait()
|
| 338 |
+
for robot in self.robots:
|
| 339 |
+
robot.start_wait()
|
| 340 |
+
for gripper in self.grippers:
|
| 341 |
+
gripper.start_wait()
|
| 342 |
+
if self.multi_cam_vis is not None:
|
| 343 |
+
self.multi_cam_vis.start_wait()
|
| 344 |
+
|
| 345 |
+
def stop_wait(self):
|
| 346 |
+
for robot in self.robots:
|
| 347 |
+
robot.stop_wait()
|
| 348 |
+
for gripper in self.grippers:
|
| 349 |
+
gripper.stop_wait()
|
| 350 |
+
self.camera.stop_wait()
|
| 351 |
+
if self.multi_cam_vis is not None:
|
| 352 |
+
self.multi_cam_vis.stop_wait()
|
| 353 |
+
|
| 354 |
+
# ========= context manager ===========
|
| 355 |
+
def __enter__(self):
|
| 356 |
+
self.start()
|
| 357 |
+
return self
|
| 358 |
+
|
| 359 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 360 |
+
self.stop()
|
| 361 |
+
|
| 362 |
+
# ========= async env API ===========
|
| 363 |
+
def get_obs(self) -> dict:
|
| 364 |
+
"""
|
| 365 |
+
Timestamp alignment policy
|
| 366 |
+
We assume the cameras used for obs are always [0, k - 1], where k is the number of robots
|
| 367 |
+
All other cameras, find corresponding frame with the nearest timestamp
|
| 368 |
+
All low-dim observations, interpolate with respect to 'current' time
|
| 369 |
+
"""
|
| 370 |
+
|
| 371 |
+
"observation dict"
|
| 372 |
+
assert self.is_ready
|
| 373 |
+
|
| 374 |
+
# get data
|
| 375 |
+
# 60 Hz, camera_calibrated_timestamp
|
| 376 |
+
k = (
|
| 377 |
+
math.ceil(
|
| 378 |
+
self.camera_obs_horizon
|
| 379 |
+
* self.camera_down_sample_steps
|
| 380 |
+
* (60 / self.frequency)
|
| 381 |
+
)
|
| 382 |
+
+ 2
|
| 383 |
+
) # here 2 is adjustable, typically 1 should be enough
|
| 384 |
+
# print('==>k ', k, self.camera_obs_horizon, self.camera_down_sample_steps, self.frequency)
|
| 385 |
+
self.last_camera_data = self.camera.get(k=k, out=self.last_camera_data)
|
| 386 |
+
|
| 387 |
+
# both have more than n_obs_steps data
|
| 388 |
+
last_robots_data = list()
|
| 389 |
+
last_grippers_data = list()
|
| 390 |
+
# 125/500 hz, robot_receive_timestamp
|
| 391 |
+
for robot in self.robots:
|
| 392 |
+
last_robots_data.append(robot.get_all_state())
|
| 393 |
+
# 30 hz, gripper_receive_timestamp
|
| 394 |
+
for gripper in self.grippers:
|
| 395 |
+
last_grippers_data.append(gripper.get_all_state())
|
| 396 |
+
|
| 397 |
+
# select align_camera_idx
|
| 398 |
+
num_obs_cameras = len(self.robots)
|
| 399 |
+
align_camera_idx = None
|
| 400 |
+
running_best_error = np.inf
|
| 401 |
+
|
| 402 |
+
for camera_idx in range(num_obs_cameras):
|
| 403 |
+
this_error = 0
|
| 404 |
+
this_timestamp = self.last_camera_data[camera_idx]["timestamp"][-1]
|
| 405 |
+
for other_camera_idx in range(num_obs_cameras):
|
| 406 |
+
if other_camera_idx == camera_idx:
|
| 407 |
+
continue
|
| 408 |
+
other_timestep_idx = -1
|
| 409 |
+
while True:
|
| 410 |
+
if (
|
| 411 |
+
self.last_camera_data[other_camera_idx]["timestamp"][
|
| 412 |
+
other_timestep_idx
|
| 413 |
+
]
|
| 414 |
+
< this_timestamp
|
| 415 |
+
):
|
| 416 |
+
this_error += (
|
| 417 |
+
this_timestamp
|
| 418 |
+
- self.last_camera_data[other_camera_idx]["timestamp"][
|
| 419 |
+
other_timestep_idx
|
| 420 |
+
]
|
| 421 |
+
)
|
| 422 |
+
break
|
| 423 |
+
other_timestep_idx -= 1
|
| 424 |
+
if align_camera_idx is None or this_error < running_best_error:
|
| 425 |
+
running_best_error = this_error
|
| 426 |
+
align_camera_idx = camera_idx
|
| 427 |
+
|
| 428 |
+
last_timestamp = self.last_camera_data[align_camera_idx]["timestamp"][-1]
|
| 429 |
+
dt = 1 / self.frequency
|
| 430 |
+
|
| 431 |
+
# align camera obs timestamps
|
| 432 |
+
camera_obs_timestamps = last_timestamp - (
|
| 433 |
+
np.arange(self.camera_obs_horizon)[::-1]
|
| 434 |
+
* self.camera_down_sample_steps
|
| 435 |
+
* dt
|
| 436 |
+
)
|
| 437 |
+
camera_obs = dict()
|
| 438 |
+
for camera_idx, value in self.last_camera_data.items():
|
| 439 |
+
this_timestamps = value["timestamp"]
|
| 440 |
+
this_idxs = list()
|
| 441 |
+
for t in camera_obs_timestamps:
|
| 442 |
+
nn_idx = np.argmin(np.abs(this_timestamps - t))
|
| 443 |
+
# if np.abs(this_timestamps - t)[nn_idx] > 1.0 / 120 and camera_idx != 3:
|
| 444 |
+
# print('ERROR!!! ', camera_idx, len(this_timestamps), nn_idx, (this_timestamps - t)[nn_idx-1: nn_idx+2])
|
| 445 |
+
this_idxs.append(nn_idx)
|
| 446 |
+
# remap key
|
| 447 |
+
camera_obs[f"camera{camera_idx}_rgb"] = value["color"][this_idxs]
|
| 448 |
+
|
| 449 |
+
# obs_data to return (it only includes camera data at this stage)
|
| 450 |
+
obs_data = dict(camera_obs)
|
| 451 |
+
|
| 452 |
+
# include camera timesteps
|
| 453 |
+
obs_data["timestamp"] = camera_obs_timestamps
|
| 454 |
+
|
| 455 |
+
# align robot obs
|
| 456 |
+
robot_obs_timestamps = last_timestamp - (
|
| 457 |
+
np.arange(self.robot_obs_horizon)[::-1] * self.robot_down_sample_steps * dt
|
| 458 |
+
)
|
| 459 |
+
for robot_idx, last_robot_data in enumerate(last_robots_data):
|
| 460 |
+
robot_pose_interpolator = PoseInterpolator(
|
| 461 |
+
t=last_robot_data["robot_timestamp"], x=last_robot_data["ActualTCPPose"]
|
| 462 |
+
)
|
| 463 |
+
robot_pose = robot_pose_interpolator(robot_obs_timestamps)
|
| 464 |
+
robot_obs = {
|
| 465 |
+
f"robot{robot_idx}_eef_pos": robot_pose[..., :3],
|
| 466 |
+
f"robot{robot_idx}_eef_rot_axis_angle": robot_pose[..., 3:],
|
| 467 |
+
}
|
| 468 |
+
# update obs_data
|
| 469 |
+
obs_data.update(robot_obs)
|
| 470 |
+
|
| 471 |
+
# align gripper obs
|
| 472 |
+
gripper_obs_timestamps = last_timestamp - (
|
| 473 |
+
np.arange(self.gripper_obs_horizon)[::-1]
|
| 474 |
+
* self.gripper_down_sample_steps
|
| 475 |
+
* dt
|
| 476 |
+
)
|
| 477 |
+
for robot_idx, last_gripper_data in enumerate(last_grippers_data):
|
| 478 |
+
# align gripper obs
|
| 479 |
+
gripper_interpolator = get_interp1d(
|
| 480 |
+
t=last_gripper_data["gripper_timestamp"],
|
| 481 |
+
x=last_gripper_data["gripper_position"][..., None],
|
| 482 |
+
)
|
| 483 |
+
gripper_obs = {
|
| 484 |
+
f"robot{robot_idx}_gripper_width": gripper_interpolator(
|
| 485 |
+
gripper_obs_timestamps
|
| 486 |
+
)
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
# update obs_data
|
| 490 |
+
obs_data.update(gripper_obs)
|
| 491 |
+
|
| 492 |
+
# accumulate obs
|
| 493 |
+
if self.obs_accumulator is not None:
|
| 494 |
+
for robot_idx, last_robot_data in enumerate(last_robots_data):
|
| 495 |
+
self.obs_accumulator.put(
|
| 496 |
+
data={
|
| 497 |
+
f"robot{robot_idx}_eef_pose": last_robot_data["ActualTCPPose"],
|
| 498 |
+
f"robot{robot_idx}_joint_pos": last_robot_data["ActualQ"],
|
| 499 |
+
f"robot{robot_idx}_joint_vel": last_robot_data["ActualQd"],
|
| 500 |
+
},
|
| 501 |
+
timestamps=last_robot_data["robot_timestamp"],
|
| 502 |
+
)
|
| 503 |
+
|
| 504 |
+
for robot_idx, last_gripper_data in enumerate(last_grippers_data):
|
| 505 |
+
self.obs_accumulator.put(
|
| 506 |
+
data={
|
| 507 |
+
f"robot{robot_idx}_gripper_width": last_gripper_data[
|
| 508 |
+
"gripper_position"
|
| 509 |
+
][..., None]
|
| 510 |
+
},
|
| 511 |
+
timestamps=last_gripper_data["gripper_timestamp"],
|
| 512 |
+
)
|
| 513 |
+
|
| 514 |
+
return obs_data
|
| 515 |
+
|
| 516 |
+
def exec_actions(
|
| 517 |
+
self, actions: np.ndarray, timestamps: np.ndarray, compensate_latency=False
|
| 518 |
+
):
|
| 519 |
+
assert self.is_ready
|
| 520 |
+
if not isinstance(actions, np.ndarray):
|
| 521 |
+
actions = np.array(actions)
|
| 522 |
+
if not isinstance(timestamps, np.ndarray):
|
| 523 |
+
timestamps = np.array(timestamps)
|
| 524 |
+
|
| 525 |
+
# convert action to pose
|
| 526 |
+
receive_time = time.time()
|
| 527 |
+
is_new = timestamps > receive_time
|
| 528 |
+
new_actions = actions[is_new]
|
| 529 |
+
new_timestamps = timestamps[is_new]
|
| 530 |
+
|
| 531 |
+
assert new_actions.shape[1] // len(self.robots) == 7
|
| 532 |
+
assert new_actions.shape[1] % len(self.robots) == 0
|
| 533 |
+
|
| 534 |
+
# schedule waypoints
|
| 535 |
+
for i in range(len(new_actions)):
|
| 536 |
+
for robot_idx, (robot, gripper, rc, gc) in enumerate(
|
| 537 |
+
zip(
|
| 538 |
+
self.robots, self.grippers, self.robots_config, self.grippers_config
|
| 539 |
+
)
|
| 540 |
+
):
|
| 541 |
+
r_latency = rc["robot_action_latency"] if compensate_latency else 0.0
|
| 542 |
+
g_latency = gc["gripper_action_latency"] if compensate_latency else 0.0
|
| 543 |
+
r_actions = new_actions[i, 7 * robot_idx + 0 : 7 * robot_idx + 6]
|
| 544 |
+
g_actions = new_actions[i, 7 * robot_idx + 6]
|
| 545 |
+
robot.schedule_waypoint(
|
| 546 |
+
pose=r_actions, target_time=new_timestamps[i] - r_latency
|
| 547 |
+
)
|
| 548 |
+
gripper.schedule_waypoint(
|
| 549 |
+
pos=g_actions, target_time=new_timestamps[i] - g_latency
|
| 550 |
+
)
|
| 551 |
+
|
| 552 |
+
# record actions
|
| 553 |
+
if self.action_accumulator is not None:
|
| 554 |
+
self.action_accumulator.put(new_actions, new_timestamps)
|
| 555 |
+
|
| 556 |
+
def get_robot_state(self):
|
| 557 |
+
return [robot.get_state() for robot in self.robots]
|
| 558 |
+
|
| 559 |
+
def get_gripper_state(self):
|
| 560 |
+
return [gripper.get_state() for gripper in self.grippers]
|
| 561 |
+
|
| 562 |
+
# recording API
|
| 563 |
+
def start_episode(self, start_time=None):
|
| 564 |
+
"Start recording and return first obs"
|
| 565 |
+
if start_time is None:
|
| 566 |
+
start_time = time.time()
|
| 567 |
+
self.start_time = start_time
|
| 568 |
+
|
| 569 |
+
assert self.is_ready
|
| 570 |
+
|
| 571 |
+
# prepare recording stuff
|
| 572 |
+
episode_id = self.replay_buffer.n_episodes
|
| 573 |
+
this_video_dir = self.video_dir.joinpath(str(episode_id))
|
| 574 |
+
this_video_dir.mkdir(parents=True, exist_ok=True)
|
| 575 |
+
n_cameras = self.camera.n_cameras
|
| 576 |
+
video_paths = list()
|
| 577 |
+
for i in range(n_cameras):
|
| 578 |
+
video_paths.append(str(this_video_dir.joinpath(f"{i}.mp4").absolute()))
|
| 579 |
+
|
| 580 |
+
# start recording on camera
|
| 581 |
+
self.camera.restart_put(start_time=start_time)
|
| 582 |
+
self.camera.start_recording(video_path=video_paths, start_time=start_time)
|
| 583 |
+
|
| 584 |
+
# create accumulators
|
| 585 |
+
self.obs_accumulator = ObsAccumulator()
|
| 586 |
+
self.action_accumulator = TimestampActionAccumulator(
|
| 587 |
+
start_time=start_time, dt=1 / self.frequency
|
| 588 |
+
)
|
| 589 |
+
print(f"Episode {episode_id} started!")
|
| 590 |
+
|
| 591 |
+
def end_episode(self):
|
| 592 |
+
"Stop recording"
|
| 593 |
+
assert self.is_ready
|
| 594 |
+
|
| 595 |
+
# stop video recorder
|
| 596 |
+
self.camera.stop_recording()
|
| 597 |
+
|
| 598 |
+
# TODO
|
| 599 |
+
if self.obs_accumulator is not None:
|
| 600 |
+
# recording
|
| 601 |
+
assert self.action_accumulator is not None
|
| 602 |
+
|
| 603 |
+
# Since the only way to accumulate obs and action is by calling
|
| 604 |
+
# get_obs and exec_actions, which will be in the same thread.
|
| 605 |
+
# We don't need to worry new data come in here.
|
| 606 |
+
end_time = float("inf")
|
| 607 |
+
for key, value in self.obs_accumulator.timestamps.items():
|
| 608 |
+
end_time = min(end_time, value[-1])
|
| 609 |
+
end_time = min(end_time, self.action_accumulator.timestamps[-1])
|
| 610 |
+
|
| 611 |
+
actions = self.action_accumulator.actions
|
| 612 |
+
action_timestamps = self.action_accumulator.timestamps
|
| 613 |
+
n_steps = 0
|
| 614 |
+
if np.sum(self.action_accumulator.timestamps <= end_time) > 0:
|
| 615 |
+
n_steps = (
|
| 616 |
+
np.nonzero(self.action_accumulator.timestamps <= end_time)[0][-1]
|
| 617 |
+
+ 1
|
| 618 |
+
)
|
| 619 |
+
|
| 620 |
+
if n_steps > 0:
|
| 621 |
+
timestamps = action_timestamps[:n_steps]
|
| 622 |
+
episode = {
|
| 623 |
+
"timestamp": timestamps,
|
| 624 |
+
"action": actions[:n_steps],
|
| 625 |
+
}
|
| 626 |
+
for robot_idx in range(len(self.robots)):
|
| 627 |
+
robot_pose_interpolator = PoseInterpolator(
|
| 628 |
+
t=np.array(
|
| 629 |
+
self.obs_accumulator.timestamps[
|
| 630 |
+
f"robot{robot_idx}_eef_pose"
|
| 631 |
+
]
|
| 632 |
+
),
|
| 633 |
+
x=np.array(
|
| 634 |
+
self.obs_accumulator.data[f"robot{robot_idx}_eef_pose"]
|
| 635 |
+
),
|
| 636 |
+
)
|
| 637 |
+
robot_pose = robot_pose_interpolator(timestamps)
|
| 638 |
+
episode[f"robot{robot_idx}_eef_pos"] = robot_pose[:, :3]
|
| 639 |
+
episode[f"robot{robot_idx}_eef_rot_axis_angle"] = robot_pose[:, 3:]
|
| 640 |
+
joint_pos_interpolator = get_interp1d(
|
| 641 |
+
np.array(
|
| 642 |
+
self.obs_accumulator.timestamps[
|
| 643 |
+
f"robot{robot_idx}_joint_pos"
|
| 644 |
+
]
|
| 645 |
+
),
|
| 646 |
+
np.array(
|
| 647 |
+
self.obs_accumulator.data[f"robot{robot_idx}_joint_pos"]
|
| 648 |
+
),
|
| 649 |
+
)
|
| 650 |
+
joint_vel_interpolator = get_interp1d(
|
| 651 |
+
np.array(
|
| 652 |
+
self.obs_accumulator.timestamps[
|
| 653 |
+
f"robot{robot_idx}_joint_vel"
|
| 654 |
+
]
|
| 655 |
+
),
|
| 656 |
+
np.array(
|
| 657 |
+
self.obs_accumulator.data[f"robot{robot_idx}_joint_vel"]
|
| 658 |
+
),
|
| 659 |
+
)
|
| 660 |
+
episode[f"robot{robot_idx}_joint_pos"] = joint_pos_interpolator(
|
| 661 |
+
timestamps
|
| 662 |
+
)
|
| 663 |
+
episode[f"robot{robot_idx}_joint_vel"] = joint_vel_interpolator(
|
| 664 |
+
timestamps
|
| 665 |
+
)
|
| 666 |
+
|
| 667 |
+
gripper_interpolator = get_interp1d(
|
| 668 |
+
t=np.array(
|
| 669 |
+
self.obs_accumulator.timestamps[
|
| 670 |
+
f"robot{robot_idx}_gripper_width"
|
| 671 |
+
]
|
| 672 |
+
),
|
| 673 |
+
x=np.array(
|
| 674 |
+
self.obs_accumulator.data[f"robot{robot_idx}_gripper_width"]
|
| 675 |
+
),
|
| 676 |
+
)
|
| 677 |
+
episode[f"robot{robot_idx}_gripper_width"] = gripper_interpolator(
|
| 678 |
+
timestamps
|
| 679 |
+
)
|
| 680 |
+
|
| 681 |
+
self.replay_buffer.add_episode(episode, compressors="disk")
|
| 682 |
+
episode_id = self.replay_buffer.n_episodes - 1
|
| 683 |
+
print(f"Episode {episode_id} saved!")
|
| 684 |
+
|
| 685 |
+
self.obs_accumulator = None
|
| 686 |
+
self.action_accumulator = None
|
| 687 |
+
|
| 688 |
+
def drop_episode(self):
|
| 689 |
+
self.end_episode()
|
| 690 |
+
self.replay_buffer.drop_episode()
|
| 691 |
+
episode_id = self.replay_buffer.n_episodes
|
| 692 |
+
this_video_dir = self.video_dir.joinpath(str(episode_id))
|
| 693 |
+
if this_video_dir.exists():
|
| 694 |
+
shutil.rmtree(str(this_video_dir))
|
| 695 |
+
print(f"Episode {episode_id} dropped!")
|
code/umi/real_world/cmd_measure.lua
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- Nicolas Alt, 2014-09-04
|
| 2 |
+
-- Cheng Chi, 2023-07-27
|
| 3 |
+
-- Command-and-measure script
|
| 4 |
+
-- Tests showed about 30Hz rate
|
| 5 |
+
require "socket"
|
| 6 |
+
cmd.register(0xB0); -- Measure only
|
| 7 |
+
cmd.register(0xB1); -- Position PD
|
| 8 |
+
|
| 9 |
+
function hasbit(x, p)
|
| 10 |
+
return x % (p + p) >= p
|
| 11 |
+
end
|
| 12 |
+
|
| 13 |
+
function send_state()
|
| 14 |
+
-- ==== Get measurements ====
|
| 15 |
+
state = gripper.state();
|
| 16 |
+
pos = mc.position();
|
| 17 |
+
speed = mc.speed();
|
| 18 |
+
force = mc.aforce();
|
| 19 |
+
time = socket.gettime();
|
| 20 |
+
|
| 21 |
+
if cmd.online() then
|
| 22 |
+
-- Only the lowest byte of state is sent!
|
| 23 |
+
cmd.send(id, etob(E_SUCCESS), state % 256, ntob(pos), ntob(speed), ntob(force), ntob(time));
|
| 24 |
+
end
|
| 25 |
+
end
|
| 26 |
+
|
| 27 |
+
function process()
|
| 28 |
+
id, payload = cmd.read();
|
| 29 |
+
|
| 30 |
+
-- Position control
|
| 31 |
+
if id == 0xB1 then
|
| 32 |
+
-- get args
|
| 33 |
+
cmd_pos = bton({payload[2],payload[3],payload[4],payload[5]});
|
| 34 |
+
cmd_vel = bton({payload[6],payload[7],payload[8],payload[9]});
|
| 35 |
+
cmd_kp = bton({payload[10],payload[11],payload[12],payload[13]});
|
| 36 |
+
cmd_kd = bton({payload[14],payload[15],payload[16],payload[17]});
|
| 37 |
+
cmd_travel_force_limit = bton({payload[18],payload[19],payload[20],payload[21]});
|
| 38 |
+
cmd_blocked_force_limit = bton({payload[22],payload[23],payload[24],payload[25]});
|
| 39 |
+
|
| 40 |
+
-- get state
|
| 41 |
+
pos = mc.position();
|
| 42 |
+
vel = mc.speed();
|
| 43 |
+
|
| 44 |
+
-- pd controller
|
| 45 |
+
e = cmd_pos - pos;
|
| 46 |
+
de = cmd_vel - vel;
|
| 47 |
+
act_vel = cmd_kp * e + cmd_kd * de;
|
| 48 |
+
|
| 49 |
+
-- command
|
| 50 |
+
mc.speed(act_vel);
|
| 51 |
+
|
| 52 |
+
-- force limit
|
| 53 |
+
if mc.blocked() then
|
| 54 |
+
mc.force(cmd_blocked_force_limit);
|
| 55 |
+
else
|
| 56 |
+
mc.force(cmd_travel_force_limit);
|
| 57 |
+
end
|
| 58 |
+
end
|
| 59 |
+
|
| 60 |
+
--t_start = socket.gettime();
|
| 61 |
+
send_state();
|
| 62 |
+
--print(socket.gettime() - t_start);
|
| 63 |
+
|
| 64 |
+
end
|
| 65 |
+
|
| 66 |
+
while true do
|
| 67 |
+
if cmd.online() then
|
| 68 |
+
-- process()
|
| 69 |
+
if not pcall(process) then
|
| 70 |
+
print("Error occured")
|
| 71 |
+
sleep(100)
|
| 72 |
+
end
|
| 73 |
+
else
|
| 74 |
+
sleep(100)
|
| 75 |
+
end
|
| 76 |
+
end
|
code/umi/real_world/franka_interpolation_controller.py
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
import enum
|
| 4 |
+
import multiprocessing as mp
|
| 5 |
+
from multiprocessing.managers import SharedMemoryManager
|
| 6 |
+
import scipy.interpolate as si
|
| 7 |
+
import scipy.spatial.transform as st
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
from umi.shared_memory.shared_memory_queue import SharedMemoryQueue, Empty
|
| 11 |
+
from umi.shared_memory.shared_memory_ring_buffer import SharedMemoryRingBuffer
|
| 12 |
+
from umi.common.pose_trajectory_interpolator import PoseTrajectoryInterpolator
|
| 13 |
+
from unified_video_action.common.precise_sleep import precise_wait
|
| 14 |
+
import torch
|
| 15 |
+
from umi.common.pose_util import pose_to_mat, mat_to_pose
|
| 16 |
+
import zerorpc
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class Command(enum.Enum):
|
| 20 |
+
STOP = 0
|
| 21 |
+
SERVOL = 1
|
| 22 |
+
SCHEDULE_WAYPOINT = 2
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
tx_flangerot90_tip = np.identity(4)
|
| 26 |
+
tx_flangerot90_tip[:3, 3] = np.array([-0.0336, 0, 0.247])
|
| 27 |
+
|
| 28 |
+
tx_flangerot45_flangerot90 = np.identity(4)
|
| 29 |
+
tx_flangerot45_flangerot90[:3, :3] = st.Rotation.from_euler(
|
| 30 |
+
"x", [np.pi / 2]
|
| 31 |
+
).as_matrix()
|
| 32 |
+
|
| 33 |
+
tx_flange_flangerot45 = np.identity(4)
|
| 34 |
+
tx_flange_flangerot45[:3, :3] = st.Rotation.from_euler("z", [np.pi / 4]).as_matrix()
|
| 35 |
+
|
| 36 |
+
tx_flange_tip = tx_flange_flangerot45 @ tx_flangerot45_flangerot90 @ tx_flangerot90_tip
|
| 37 |
+
tx_tip_flange = np.linalg.inv(tx_flange_tip)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class FrankaInterface:
|
| 41 |
+
def __init__(self, ip="172.16.0.3", port=4242):
|
| 42 |
+
self.server = zerorpc.Client(heartbeat=20)
|
| 43 |
+
self.server.connect(f"tcp://{ip}:{port}")
|
| 44 |
+
|
| 45 |
+
def get_ee_pose(self):
|
| 46 |
+
flange_pose = np.array(self.server.get_ee_pose())
|
| 47 |
+
tip_pose = mat_to_pose(pose_to_mat(flange_pose) @ tx_flange_tip)
|
| 48 |
+
return tip_pose
|
| 49 |
+
|
| 50 |
+
def get_joint_positions(self):
|
| 51 |
+
return np.array(self.server.get_joint_positions())
|
| 52 |
+
|
| 53 |
+
def get_joint_velocities(self):
|
| 54 |
+
return np.array(self.server.get_joint_velocities())
|
| 55 |
+
|
| 56 |
+
def move_to_joint_positions(self, positions: np.ndarray, time_to_go: float):
|
| 57 |
+
self.server.move_to_joint_positions(positions.tolist(), time_to_go)
|
| 58 |
+
|
| 59 |
+
def start_cartesian_impedance(self, Kx: np.ndarray, Kxd: np.ndarray):
|
| 60 |
+
self.server.start_cartesian_impedance(Kx.tolist(), Kxd.tolist())
|
| 61 |
+
|
| 62 |
+
def update_desired_ee_pose(self, pose: np.ndarray):
|
| 63 |
+
self.server.update_desired_ee_pose(pose.tolist())
|
| 64 |
+
|
| 65 |
+
def terminate_current_policy(self):
|
| 66 |
+
self.server.terminate_current_policy()
|
| 67 |
+
|
| 68 |
+
def close(self):
|
| 69 |
+
self.server.close()
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class FrankaInterpolationController(mp.Process):
|
| 73 |
+
"""
|
| 74 |
+
To ensure sending command to the robot with predictable latency
|
| 75 |
+
this controller need its separate process (due to python GIL)
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(
|
| 79 |
+
self,
|
| 80 |
+
shm_manager: SharedMemoryManager,
|
| 81 |
+
robot_ip,
|
| 82 |
+
robot_port=4242,
|
| 83 |
+
frequency=1000,
|
| 84 |
+
Kx_scale=1.0,
|
| 85 |
+
Kxd_scale=1.0,
|
| 86 |
+
launch_timeout=3,
|
| 87 |
+
joints_init=None,
|
| 88 |
+
joints_init_duration=None,
|
| 89 |
+
soft_real_time=False,
|
| 90 |
+
verbose=False,
|
| 91 |
+
get_max_k=None,
|
| 92 |
+
receive_latency=0.0,
|
| 93 |
+
):
|
| 94 |
+
"""
|
| 95 |
+
robot_ip: the ip of the middle-layer controller (NUC)
|
| 96 |
+
frequency: 1000 for franka
|
| 97 |
+
Kx_scale: the scale of position gains
|
| 98 |
+
Kxd: the scale of velocity gains
|
| 99 |
+
soft_real_time: enables round-robin scheduling and real-time priority
|
| 100 |
+
requires running scripts/rtprio_setup.sh before hand.
|
| 101 |
+
"""
|
| 102 |
+
|
| 103 |
+
if joints_init is not None:
|
| 104 |
+
joints_init = np.array(joints_init)
|
| 105 |
+
assert joints_init.shape == (7,)
|
| 106 |
+
|
| 107 |
+
super().__init__(name="FrankaPositionalController")
|
| 108 |
+
self.robot_ip = robot_ip
|
| 109 |
+
self.robot_port = robot_port
|
| 110 |
+
self.frequency = frequency
|
| 111 |
+
self.Kx = np.array([750.0, 750.0, 750.0, 15.0, 15.0, 15.0]) * Kx_scale
|
| 112 |
+
self.Kxd = np.array([37.0, 37.0, 37.0, 2.0, 2.0, 2.0]) * Kxd_scale
|
| 113 |
+
self.launch_timeout = launch_timeout
|
| 114 |
+
self.joints_init = joints_init
|
| 115 |
+
self.joints_init_duration = joints_init_duration
|
| 116 |
+
self.soft_real_time = soft_real_time
|
| 117 |
+
self.receive_latency = receive_latency
|
| 118 |
+
self.verbose = verbose
|
| 119 |
+
|
| 120 |
+
if get_max_k is None:
|
| 121 |
+
get_max_k = int(frequency * 5)
|
| 122 |
+
|
| 123 |
+
# build input queue
|
| 124 |
+
example = {
|
| 125 |
+
"cmd": Command.SERVOL.value,
|
| 126 |
+
"target_pose": np.zeros((6,), dtype=np.float64),
|
| 127 |
+
"duration": 0.0,
|
| 128 |
+
"target_time": 0.0,
|
| 129 |
+
}
|
| 130 |
+
input_queue = SharedMemoryQueue.create_from_examples(
|
| 131 |
+
shm_manager=shm_manager, examples=example, buffer_size=256
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
# build ring buffer
|
| 135 |
+
receive_keys = [
|
| 136 |
+
("ActualTCPPose", "get_ee_pose"),
|
| 137 |
+
("ActualQ", "get_joint_positions"),
|
| 138 |
+
("ActualQd", "get_joint_velocities"),
|
| 139 |
+
]
|
| 140 |
+
example = dict()
|
| 141 |
+
for key, func_name in receive_keys:
|
| 142 |
+
if "joint" in func_name:
|
| 143 |
+
example[key] = np.zeros(7)
|
| 144 |
+
elif "ee_pose" in func_name:
|
| 145 |
+
example[key] = np.zeros(6)
|
| 146 |
+
|
| 147 |
+
example["robot_receive_timestamp"] = time.time()
|
| 148 |
+
example["robot_timestamp"] = time.time()
|
| 149 |
+
ring_buffer = SharedMemoryRingBuffer.create_from_examples(
|
| 150 |
+
shm_manager=shm_manager,
|
| 151 |
+
examples=example,
|
| 152 |
+
get_max_k=get_max_k,
|
| 153 |
+
get_time_budget=0.2,
|
| 154 |
+
put_desired_frequency=frequency,
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
self.ready_event = mp.Event()
|
| 158 |
+
self.input_queue = input_queue
|
| 159 |
+
self.ring_buffer = ring_buffer
|
| 160 |
+
self.receive_keys = receive_keys
|
| 161 |
+
|
| 162 |
+
# ========= launch method ===========
|
| 163 |
+
def start(self, wait=True):
|
| 164 |
+
super().start()
|
| 165 |
+
if wait:
|
| 166 |
+
self.start_wait()
|
| 167 |
+
if self.verbose:
|
| 168 |
+
print(
|
| 169 |
+
f"[FrankaPositionalController] Controller process spawned at {self.pid}"
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
def stop(self, wait=True):
|
| 173 |
+
message = {"cmd": Command.STOP.value}
|
| 174 |
+
self.input_queue.put(message)
|
| 175 |
+
if wait:
|
| 176 |
+
self.stop_wait()
|
| 177 |
+
|
| 178 |
+
def start_wait(self):
|
| 179 |
+
self.ready_event.wait(self.launch_timeout)
|
| 180 |
+
assert self.is_alive()
|
| 181 |
+
|
| 182 |
+
def stop_wait(self):
|
| 183 |
+
self.join()
|
| 184 |
+
|
| 185 |
+
@property
|
| 186 |
+
def is_ready(self):
|
| 187 |
+
return self.ready_event.is_set()
|
| 188 |
+
|
| 189 |
+
# ========= context manager ===========
|
| 190 |
+
def __enter__(self):
|
| 191 |
+
self.start()
|
| 192 |
+
return self
|
| 193 |
+
|
| 194 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 195 |
+
self.stop()
|
| 196 |
+
|
| 197 |
+
# ========= command methods ============
|
| 198 |
+
def servoL(self, pose, duration=0.1):
|
| 199 |
+
"""
|
| 200 |
+
duration: desired time to reach pose
|
| 201 |
+
"""
|
| 202 |
+
assert self.is_alive()
|
| 203 |
+
assert duration >= (1 / self.frequency)
|
| 204 |
+
pose = np.array(pose)
|
| 205 |
+
assert pose.shape == (6,)
|
| 206 |
+
|
| 207 |
+
message = {
|
| 208 |
+
"cmd": Command.SERVOL.value,
|
| 209 |
+
"target_pose": pose,
|
| 210 |
+
"duration": duration,
|
| 211 |
+
}
|
| 212 |
+
self.input_queue.put(message)
|
| 213 |
+
|
| 214 |
+
def schedule_waypoint(self, pose, target_time):
|
| 215 |
+
pose = np.array(pose)
|
| 216 |
+
assert pose.shape == (6,)
|
| 217 |
+
|
| 218 |
+
message = {
|
| 219 |
+
"cmd": Command.SCHEDULE_WAYPOINT.value,
|
| 220 |
+
"target_pose": pose,
|
| 221 |
+
"target_time": target_time,
|
| 222 |
+
}
|
| 223 |
+
self.input_queue.put(message)
|
| 224 |
+
|
| 225 |
+
# ========= receive APIs =============
|
| 226 |
+
def get_state(self, k=None, out=None):
|
| 227 |
+
if k is None:
|
| 228 |
+
return self.ring_buffer.get(out=out)
|
| 229 |
+
else:
|
| 230 |
+
return self.ring_buffer.get_last_k(k=k, out=out)
|
| 231 |
+
|
| 232 |
+
def get_all_state(self):
|
| 233 |
+
return self.ring_buffer.get_all()
|
| 234 |
+
|
| 235 |
+
# ========= main loop in process ============
|
| 236 |
+
def run(self):
|
| 237 |
+
# enable soft real-time
|
| 238 |
+
if self.soft_real_time:
|
| 239 |
+
os.sched_setscheduler(0, os.SCHED_RR, os.sched_param(20))
|
| 240 |
+
|
| 241 |
+
# start polymetis interface
|
| 242 |
+
robot = FrankaInterface(self.robot_ip, self.robot_port)
|
| 243 |
+
|
| 244 |
+
try:
|
| 245 |
+
if self.verbose:
|
| 246 |
+
print(f"[FrankaPositionalController] Connect to robot: {self.robot_ip}")
|
| 247 |
+
|
| 248 |
+
# init pose
|
| 249 |
+
if self.joints_init is not None:
|
| 250 |
+
robot.move_to_joint_positions(
|
| 251 |
+
positions=np.asarray(self.joints_init),
|
| 252 |
+
time_to_go=self.joints_init_duration,
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
# main loop
|
| 256 |
+
dt = 1.0 / self.frequency
|
| 257 |
+
curr_pose = robot.get_ee_pose()
|
| 258 |
+
|
| 259 |
+
# use monotonic time to make sure the control loop never go backward
|
| 260 |
+
curr_t = time.monotonic()
|
| 261 |
+
last_waypoint_time = curr_t
|
| 262 |
+
pose_interp = PoseTrajectoryInterpolator(times=[curr_t], poses=[curr_pose])
|
| 263 |
+
|
| 264 |
+
# start franka cartesian impedance policy
|
| 265 |
+
robot.start_cartesian_impedance(Kx=self.Kx, Kxd=self.Kxd)
|
| 266 |
+
|
| 267 |
+
t_start = time.monotonic()
|
| 268 |
+
iter_idx = 0
|
| 269 |
+
keep_running = True
|
| 270 |
+
while keep_running:
|
| 271 |
+
# send command to robot
|
| 272 |
+
t_now = time.monotonic()
|
| 273 |
+
# diff = t_now - pose_interp.times[-1]
|
| 274 |
+
# if diff > 0:
|
| 275 |
+
# print('extrapolate', diff)
|
| 276 |
+
tip_pose = pose_interp(t_now)
|
| 277 |
+
flange_pose = mat_to_pose(pose_to_mat(tip_pose) @ tx_tip_flange)
|
| 278 |
+
|
| 279 |
+
# send command to robot
|
| 280 |
+
robot.update_desired_ee_pose(flange_pose)
|
| 281 |
+
|
| 282 |
+
# update robot state
|
| 283 |
+
state = dict()
|
| 284 |
+
for key, func_name in self.receive_keys:
|
| 285 |
+
state[key] = getattr(robot, func_name)()
|
| 286 |
+
|
| 287 |
+
t_recv = time.time()
|
| 288 |
+
state["robot_receive_timestamp"] = t_recv
|
| 289 |
+
state["robot_timestamp"] = t_recv - self.receive_latency
|
| 290 |
+
self.ring_buffer.put(state)
|
| 291 |
+
|
| 292 |
+
# fetch command from queue
|
| 293 |
+
try:
|
| 294 |
+
# commands = self.input_queue.get_all()
|
| 295 |
+
# n_cmd = len(commands['cmd'])
|
| 296 |
+
# process at most 1 command per cycle to maintain frequency
|
| 297 |
+
commands = self.input_queue.get_k(1)
|
| 298 |
+
n_cmd = len(commands["cmd"])
|
| 299 |
+
except Empty:
|
| 300 |
+
n_cmd = 0
|
| 301 |
+
|
| 302 |
+
# execute commands
|
| 303 |
+
for i in range(n_cmd):
|
| 304 |
+
command = dict()
|
| 305 |
+
for key, value in commands.items():
|
| 306 |
+
command[key] = value[i]
|
| 307 |
+
cmd = command["cmd"]
|
| 308 |
+
|
| 309 |
+
if cmd == Command.STOP.value:
|
| 310 |
+
keep_running = False
|
| 311 |
+
# stop immediately, ignore later commands
|
| 312 |
+
break
|
| 313 |
+
elif cmd == Command.SERVOL.value:
|
| 314 |
+
# since curr_pose always lag behind curr_target_pose
|
| 315 |
+
# if we start the next interpolation with curr_pose
|
| 316 |
+
# the command robot receive will have discontinouity
|
| 317 |
+
# and cause jittery robot behavior.
|
| 318 |
+
target_pose = command["target_pose"]
|
| 319 |
+
duration = float(command["duration"])
|
| 320 |
+
curr_time = t_now + dt
|
| 321 |
+
t_insert = curr_time + duration
|
| 322 |
+
pose_interp = pose_interp.drive_to_waypoint(
|
| 323 |
+
pose=target_pose,
|
| 324 |
+
time=t_insert,
|
| 325 |
+
curr_time=curr_time,
|
| 326 |
+
)
|
| 327 |
+
last_waypoint_time = t_insert
|
| 328 |
+
if self.verbose:
|
| 329 |
+
print(
|
| 330 |
+
"[FrankaPositionalController] New pose target:{} duration:{}s".format(
|
| 331 |
+
target_pose, duration
|
| 332 |
+
)
|
| 333 |
+
)
|
| 334 |
+
elif cmd == Command.SCHEDULE_WAYPOINT.value:
|
| 335 |
+
target_pose = command["target_pose"]
|
| 336 |
+
target_time = float(command["target_time"])
|
| 337 |
+
# translate global time to monotonic time
|
| 338 |
+
target_time = time.monotonic() - time.time() + target_time
|
| 339 |
+
curr_time = t_now + dt
|
| 340 |
+
pose_interp = pose_interp.schedule_waypoint(
|
| 341 |
+
pose=target_pose,
|
| 342 |
+
time=target_time,
|
| 343 |
+
curr_time=curr_time,
|
| 344 |
+
last_waypoint_time=last_waypoint_time,
|
| 345 |
+
)
|
| 346 |
+
last_waypoint_time = target_time
|
| 347 |
+
else:
|
| 348 |
+
keep_running = False
|
| 349 |
+
break
|
| 350 |
+
|
| 351 |
+
# regulate frequency
|
| 352 |
+
t_wait_util = t_start + (iter_idx + 1) * dt
|
| 353 |
+
precise_wait(t_wait_util, time_func=time.monotonic)
|
| 354 |
+
|
| 355 |
+
# first loop successful, ready to receive command
|
| 356 |
+
if iter_idx == 0:
|
| 357 |
+
self.ready_event.set()
|
| 358 |
+
iter_idx += 1
|
| 359 |
+
|
| 360 |
+
if self.verbose:
|
| 361 |
+
print(
|
| 362 |
+
f"[FrankaPositionalController] Actual frequency {1/(time.monotonic() - t_now)}"
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
finally:
|
| 366 |
+
# manditory cleanup
|
| 367 |
+
# terminate
|
| 368 |
+
print("\n\n\n\nterminate_current_policy\n\n\n\n\n")
|
| 369 |
+
robot.terminate_current_policy()
|
| 370 |
+
del robot
|
| 371 |
+
self.ready_event.set()
|
| 372 |
+
|
| 373 |
+
if self.verbose:
|
| 374 |
+
print(
|
| 375 |
+
f"[FrankaPositionalController] Disconnected from robot: {self.robot_ip}"
|
| 376 |
+
)
|
code/umi/real_world/keystroke_counter.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pynput.keyboard import Key, KeyCode, Listener
|
| 2 |
+
from collections import defaultdict
|
| 3 |
+
from threading import Lock
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class KeystrokeCounter(Listener):
|
| 7 |
+
def __init__(self):
|
| 8 |
+
self.key_count_map = defaultdict(lambda: 0)
|
| 9 |
+
self.key_press_list = list()
|
| 10 |
+
self.lock = Lock()
|
| 11 |
+
super().__init__(on_press=self.on_press, on_release=self.on_release)
|
| 12 |
+
|
| 13 |
+
def on_press(self, key):
|
| 14 |
+
with self.lock:
|
| 15 |
+
self.key_count_map[key] += 1
|
| 16 |
+
self.key_press_list.append(key)
|
| 17 |
+
|
| 18 |
+
def on_release(self, key):
|
| 19 |
+
pass
|
| 20 |
+
|
| 21 |
+
def clear(self):
|
| 22 |
+
with self.lock:
|
| 23 |
+
self.key_count_map = defaultdict(lambda: 0)
|
| 24 |
+
self.key_press_list = list()
|
| 25 |
+
|
| 26 |
+
def __getitem__(self, key):
|
| 27 |
+
with self.lock:
|
| 28 |
+
return self.key_count_map[key]
|
| 29 |
+
|
| 30 |
+
def get_press_events(self):
|
| 31 |
+
with self.lock:
|
| 32 |
+
events = list(self.key_press_list)
|
| 33 |
+
self.key_press_list = list()
|
| 34 |
+
return events
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
if __name__ == "__main__":
|
| 38 |
+
import time
|
| 39 |
+
|
| 40 |
+
with KeystrokeCounter() as counter:
|
| 41 |
+
try:
|
| 42 |
+
while True:
|
| 43 |
+
print("Space:", counter[Key.space])
|
| 44 |
+
print("q:", counter[KeyCode(char="q")])
|
| 45 |
+
time.sleep(1 / 60)
|
| 46 |
+
except KeyboardInterrupt:
|
| 47 |
+
events = counter.get_press_events()
|
| 48 |
+
print(events)
|
code/umi/real_world/multi_camera_visualizer.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import multiprocessing as mp
|
| 3 |
+
import numpy as np
|
| 4 |
+
import cv2
|
| 5 |
+
from threadpoolctl import threadpool_limits
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class MultiCameraVisualizer(mp.Process):
|
| 9 |
+
def __init__(
|
| 10 |
+
self,
|
| 11 |
+
camera,
|
| 12 |
+
row,
|
| 13 |
+
col,
|
| 14 |
+
window_name="Multi Cam Vis",
|
| 15 |
+
vis_fps=60,
|
| 16 |
+
fill_value=0,
|
| 17 |
+
rgb_to_bgr=True,
|
| 18 |
+
):
|
| 19 |
+
super().__init__()
|
| 20 |
+
self.row = row
|
| 21 |
+
self.col = col
|
| 22 |
+
self.window_name = window_name
|
| 23 |
+
self.vis_fps = vis_fps
|
| 24 |
+
self.fill_value = fill_value
|
| 25 |
+
self.rgb_to_bgr = rgb_to_bgr
|
| 26 |
+
self.camera = camera
|
| 27 |
+
# shared variables
|
| 28 |
+
self.stop_event = mp.Event()
|
| 29 |
+
|
| 30 |
+
def __enter__(self):
|
| 31 |
+
self.start()
|
| 32 |
+
return self
|
| 33 |
+
|
| 34 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 35 |
+
self.stop()
|
| 36 |
+
|
| 37 |
+
def start(self, wait=False):
|
| 38 |
+
super().start()
|
| 39 |
+
|
| 40 |
+
def stop(self, wait=False):
|
| 41 |
+
self.stop_event.set()
|
| 42 |
+
if wait:
|
| 43 |
+
self.stop_wait()
|
| 44 |
+
|
| 45 |
+
def start_wait(self):
|
| 46 |
+
pass
|
| 47 |
+
|
| 48 |
+
def stop_wait(self):
|
| 49 |
+
self.join()
|
| 50 |
+
|
| 51 |
+
def run(self):
|
| 52 |
+
cv2.setNumThreads(1)
|
| 53 |
+
threadpool_limits(1)
|
| 54 |
+
channel_slice = slice(None)
|
| 55 |
+
if self.rgb_to_bgr:
|
| 56 |
+
channel_slice = slice(None, None, -1)
|
| 57 |
+
|
| 58 |
+
vis_data = None
|
| 59 |
+
vis_img = None
|
| 60 |
+
while not self.stop_event.is_set():
|
| 61 |
+
vis_data = self.camera.get_vis(out=vis_data)
|
| 62 |
+
color = vis_data["color"]
|
| 63 |
+
N, H, W, C = color.shape
|
| 64 |
+
assert C == 3
|
| 65 |
+
oh = H * self.row
|
| 66 |
+
ow = W * self.col
|
| 67 |
+
if vis_img is None:
|
| 68 |
+
vis_img = np.full(
|
| 69 |
+
(oh, ow, 3), fill_value=self.fill_value, dtype=np.uint8
|
| 70 |
+
)
|
| 71 |
+
for row in range(self.row):
|
| 72 |
+
for col in range(self.col):
|
| 73 |
+
idx = col + row * self.col
|
| 74 |
+
h_start = H * row
|
| 75 |
+
h_end = h_start + H
|
| 76 |
+
w_start = W * col
|
| 77 |
+
w_end = w_start + W
|
| 78 |
+
if idx < N:
|
| 79 |
+
# opencv uses bgr
|
| 80 |
+
vis_img[h_start:h_end, w_start:w_end] = color[
|
| 81 |
+
idx, :, :, channel_slice
|
| 82 |
+
]
|
| 83 |
+
cv2.imshow(self.window_name, vis_img)
|
| 84 |
+
cv2.pollKey()
|
| 85 |
+
time.sleep(1 / self.vis_fps)
|
code/umi/real_world/multi_uvc_camera.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Union, Dict, Callable
|
| 2 |
+
import numbers
|
| 3 |
+
import copy
|
| 4 |
+
import time
|
| 5 |
+
import pathlib
|
| 6 |
+
from multiprocessing.managers import SharedMemoryManager
|
| 7 |
+
import numpy as np
|
| 8 |
+
from umi.real_world.uvc_camera import UvcCamera
|
| 9 |
+
from umi.real_world.video_recorder import VideoRecorder
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class MultiUvcCamera:
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
# v4l2 device file path
|
| 16 |
+
# e.g. /dev/video0
|
| 17 |
+
# or /dev/v4l/by-id/usb-Elgato_Elgato_HD60_X_A00XB320216MTR-video-index0
|
| 18 |
+
dev_video_paths: List[str],
|
| 19 |
+
shm_manager: Optional[SharedMemoryManager] = None,
|
| 20 |
+
resolution=(1280, 720),
|
| 21 |
+
capture_fps=60,
|
| 22 |
+
put_fps=None,
|
| 23 |
+
put_downsample=True,
|
| 24 |
+
get_max_k=30,
|
| 25 |
+
receive_latency=0.0,
|
| 26 |
+
cap_buffer_size=1,
|
| 27 |
+
transform: Optional[Union[Callable[[Dict], Dict], List[Callable]]] = None,
|
| 28 |
+
vis_transform: Optional[Union[Callable[[Dict], Dict], List[Callable]]] = None,
|
| 29 |
+
recording_transform: Optional[
|
| 30 |
+
Union[Callable[[Dict], Dict], List[Callable]]
|
| 31 |
+
] = None,
|
| 32 |
+
video_recorder: Optional[Union[VideoRecorder, List[VideoRecorder]]] = None,
|
| 33 |
+
verbose=False,
|
| 34 |
+
):
|
| 35 |
+
super().__init__()
|
| 36 |
+
|
| 37 |
+
if shm_manager is None:
|
| 38 |
+
shm_manager = SharedMemoryManager()
|
| 39 |
+
shm_manager.start()
|
| 40 |
+
n_cameras = len(dev_video_paths)
|
| 41 |
+
|
| 42 |
+
resolution = repeat_to_list(resolution, n_cameras, tuple)
|
| 43 |
+
capture_fps = repeat_to_list(capture_fps, n_cameras, (int, float))
|
| 44 |
+
cap_buffer_size = repeat_to_list(cap_buffer_size, n_cameras, int)
|
| 45 |
+
transform = repeat_to_list(transform, n_cameras, Callable)
|
| 46 |
+
vis_transform = repeat_to_list(vis_transform, n_cameras, Callable)
|
| 47 |
+
recording_transform = repeat_to_list(recording_transform, n_cameras, Callable)
|
| 48 |
+
video_recorder = repeat_to_list(video_recorder, n_cameras, VideoRecorder)
|
| 49 |
+
|
| 50 |
+
cameras = dict()
|
| 51 |
+
for i, path in enumerate(dev_video_paths):
|
| 52 |
+
cameras[path] = UvcCamera(
|
| 53 |
+
shm_manager=shm_manager,
|
| 54 |
+
dev_video_path=path,
|
| 55 |
+
resolution=resolution[i],
|
| 56 |
+
capture_fps=capture_fps[i],
|
| 57 |
+
put_fps=put_fps,
|
| 58 |
+
put_downsample=put_downsample,
|
| 59 |
+
get_max_k=get_max_k,
|
| 60 |
+
receive_latency=receive_latency,
|
| 61 |
+
cap_buffer_size=cap_buffer_size[i],
|
| 62 |
+
transform=transform[i],
|
| 63 |
+
vis_transform=vis_transform[i],
|
| 64 |
+
recording_transform=recording_transform[i],
|
| 65 |
+
video_recorder=video_recorder[i],
|
| 66 |
+
verbose=verbose,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
self.cameras = cameras
|
| 70 |
+
self.shm_manager = shm_manager
|
| 71 |
+
|
| 72 |
+
def __enter__(self):
|
| 73 |
+
self.start()
|
| 74 |
+
return self
|
| 75 |
+
|
| 76 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 77 |
+
self.stop()
|
| 78 |
+
|
| 79 |
+
@property
|
| 80 |
+
def n_cameras(self):
|
| 81 |
+
return len(self.cameras)
|
| 82 |
+
|
| 83 |
+
@property
|
| 84 |
+
def is_ready(self):
|
| 85 |
+
is_ready = True
|
| 86 |
+
for camera in self.cameras.values():
|
| 87 |
+
if not camera.is_ready:
|
| 88 |
+
is_ready = False
|
| 89 |
+
return is_ready
|
| 90 |
+
|
| 91 |
+
def start(self, wait=True, put_start_time=None):
|
| 92 |
+
if put_start_time is None:
|
| 93 |
+
put_start_time = time.time()
|
| 94 |
+
for camera in self.cameras.values():
|
| 95 |
+
camera.start(wait=False, put_start_time=put_start_time)
|
| 96 |
+
|
| 97 |
+
if wait:
|
| 98 |
+
self.start_wait()
|
| 99 |
+
|
| 100 |
+
def stop(self, wait=True):
|
| 101 |
+
for camera in self.cameras.values():
|
| 102 |
+
camera.stop(wait=False)
|
| 103 |
+
|
| 104 |
+
if wait:
|
| 105 |
+
self.stop_wait()
|
| 106 |
+
|
| 107 |
+
def start_wait(self):
|
| 108 |
+
for camera in self.cameras.values():
|
| 109 |
+
camera.start_wait()
|
| 110 |
+
|
| 111 |
+
def stop_wait(self):
|
| 112 |
+
for camera in self.cameras.values():
|
| 113 |
+
camera.join()
|
| 114 |
+
|
| 115 |
+
def get(self, k=None, out=None) -> Dict[int, Dict[str, np.ndarray]]:
|
| 116 |
+
"""
|
| 117 |
+
Return order T,H,W,C
|
| 118 |
+
{
|
| 119 |
+
0: {
|
| 120 |
+
'rgb': (T,H,W,C),
|
| 121 |
+
'timestamp': (T,)
|
| 122 |
+
},
|
| 123 |
+
1: ...
|
| 124 |
+
}
|
| 125 |
+
"""
|
| 126 |
+
if out is None:
|
| 127 |
+
out = dict()
|
| 128 |
+
for i, camera in enumerate(self.cameras.values()):
|
| 129 |
+
this_out = None
|
| 130 |
+
if i in out:
|
| 131 |
+
this_out = out[i]
|
| 132 |
+
this_out = camera.get(k=k, out=this_out)
|
| 133 |
+
out[i] = this_out
|
| 134 |
+
return out
|
| 135 |
+
|
| 136 |
+
def get_vis(self, out=None):
|
| 137 |
+
results = list()
|
| 138 |
+
for i, camera in enumerate(self.cameras.values()):
|
| 139 |
+
this_out = None
|
| 140 |
+
if out is not None:
|
| 141 |
+
this_out = dict()
|
| 142 |
+
for key, v in out.items():
|
| 143 |
+
# use the slicing trick to maintain the array
|
| 144 |
+
# when v is 1D
|
| 145 |
+
this_out[key] = v[i : i + 1].reshape(v.shape[1:])
|
| 146 |
+
this_out = camera.get_vis(out=this_out)
|
| 147 |
+
if out is None:
|
| 148 |
+
results.append(this_out)
|
| 149 |
+
if out is None:
|
| 150 |
+
out = dict()
|
| 151 |
+
for key in results[0].keys():
|
| 152 |
+
out[key] = np.stack([x[key] for x in results])
|
| 153 |
+
return out
|
| 154 |
+
|
| 155 |
+
def start_recording(self, video_path: Union[str, List[str]], start_time: float):
|
| 156 |
+
if isinstance(video_path, str):
|
| 157 |
+
# directory
|
| 158 |
+
video_dir = pathlib.Path(video_path)
|
| 159 |
+
assert video_dir.parent.is_dir()
|
| 160 |
+
video_dir.mkdir(parents=True, exist_ok=True)
|
| 161 |
+
video_path = list()
|
| 162 |
+
for i in range(self.n_cameras):
|
| 163 |
+
video_path.append(str(video_dir.joinpath(f"{i}.mp4").absolute()))
|
| 164 |
+
assert len(video_path) == self.n_cameras
|
| 165 |
+
|
| 166 |
+
for i, camera in enumerate(self.cameras.values()):
|
| 167 |
+
camera.start_recording(video_path[i], start_time)
|
| 168 |
+
|
| 169 |
+
def stop_recording(self):
|
| 170 |
+
for i, camera in enumerate(self.cameras.values()):
|
| 171 |
+
camera.stop_recording()
|
| 172 |
+
|
| 173 |
+
def restart_put(self, start_time):
|
| 174 |
+
for camera in self.cameras.values():
|
| 175 |
+
camera.restart_put(start_time)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def repeat_to_list(x, n: int, cls):
|
| 179 |
+
if x is None:
|
| 180 |
+
x = [None] * n
|
| 181 |
+
if isinstance(x, cls):
|
| 182 |
+
x = [copy.deepcopy(x) for _ in range(n)]
|
| 183 |
+
assert len(x) == n
|
| 184 |
+
return x
|
code/umi/real_world/real_env.py
ADDED
|
@@ -0,0 +1,488 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
import pathlib
|
| 3 |
+
import numpy as np
|
| 4 |
+
import time
|
| 5 |
+
import shutil
|
| 6 |
+
import math
|
| 7 |
+
from multiprocessing.managers import SharedMemoryManager
|
| 8 |
+
from umi.real_world.rtde_interpolation_controller import RTDEInterpolationController
|
| 9 |
+
from umi.real_world.wsg_controller import WSGController
|
| 10 |
+
from umi.real_world.multi_uvc_camera import MultiUvcCamera
|
| 11 |
+
from umi.real_world.video_recorder import VideoRecorder
|
| 12 |
+
from unified_video_action.common.timestamp_accumulator import (
|
| 13 |
+
TimestampObsAccumulator,
|
| 14 |
+
TimestampActionAccumulator,
|
| 15 |
+
align_timestamps,
|
| 16 |
+
)
|
| 17 |
+
from umi.real_world.multi_camera_visualizer import MultiCameraVisualizer
|
| 18 |
+
from unified_video_action.common.replay_buffer import ReplayBuffer
|
| 19 |
+
from unified_video_action.common.cv2_util import get_image_transform, optimal_row_cols
|
| 20 |
+
from umi.common.usb_util import reset_all_elgato_devices, get_sorted_v4l_paths
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
DEFAULT_OBS_KEY_MAP = {
|
| 24 |
+
# robot
|
| 25 |
+
"ActualTCPPose": "robot_eef_pose",
|
| 26 |
+
"ActualTCPSpeed": "robot_eef_pose_vel",
|
| 27 |
+
"ActualQ": "robot_joint",
|
| 28 |
+
"ActualQd": "robot_joint_vel",
|
| 29 |
+
# gripper
|
| 30 |
+
"gripper_position": "gripper_position",
|
| 31 |
+
"gripper_velocity": "gripper_velocity",
|
| 32 |
+
"gripper_force": "gripper_force",
|
| 33 |
+
# timestamps
|
| 34 |
+
"step_idx": "step_idx",
|
| 35 |
+
"timestamp": "timestamp",
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class RealEnv:
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
# required params
|
| 43 |
+
output_dir,
|
| 44 |
+
robot_ip,
|
| 45 |
+
gripper_ip,
|
| 46 |
+
gripper_port=1000,
|
| 47 |
+
# env params
|
| 48 |
+
frequency=10,
|
| 49 |
+
n_obs_steps=2,
|
| 50 |
+
# obs
|
| 51 |
+
obs_image_resolution=(256, 256),
|
| 52 |
+
max_obs_buffer_size=30,
|
| 53 |
+
obs_key_map=DEFAULT_OBS_KEY_MAP,
|
| 54 |
+
obs_float32=False,
|
| 55 |
+
# action
|
| 56 |
+
max_pos_speed=0.25,
|
| 57 |
+
max_rot_speed=0.6,
|
| 58 |
+
# robot
|
| 59 |
+
tcp_offset=0.13,
|
| 60 |
+
init_joints=False,
|
| 61 |
+
# video capture params
|
| 62 |
+
video_capture_fps=60,
|
| 63 |
+
video_capture_resolution=(1280, 720),
|
| 64 |
+
# saving params
|
| 65 |
+
record_raw_video=True,
|
| 66 |
+
thread_per_video=4,
|
| 67 |
+
video_crf=21,
|
| 68 |
+
# vis params
|
| 69 |
+
enable_multi_cam_vis=True,
|
| 70 |
+
multi_cam_vis_resolution=(1280, 720),
|
| 71 |
+
# shared memory
|
| 72 |
+
shm_manager=None,
|
| 73 |
+
):
|
| 74 |
+
assert frequency <= video_capture_fps
|
| 75 |
+
output_dir = pathlib.Path(output_dir)
|
| 76 |
+
assert output_dir.parent.is_dir()
|
| 77 |
+
video_dir = output_dir.joinpath("videos")
|
| 78 |
+
video_dir.mkdir(parents=True, exist_ok=True)
|
| 79 |
+
zarr_path = str(output_dir.joinpath("replay_buffer.zarr").absolute())
|
| 80 |
+
replay_buffer = ReplayBuffer.create_from_path(zarr_path=zarr_path, mode="a")
|
| 81 |
+
|
| 82 |
+
if shm_manager is None:
|
| 83 |
+
shm_manager = SharedMemoryManager()
|
| 84 |
+
shm_manager.start()
|
| 85 |
+
|
| 86 |
+
# Find and reset all Elgato capture cards.
|
| 87 |
+
# Required to workaround a firmware bug.
|
| 88 |
+
reset_all_elgato_devices()
|
| 89 |
+
|
| 90 |
+
# Wait for all v4l cameras to be back online
|
| 91 |
+
time.sleep(0.1)
|
| 92 |
+
v4l_paths = get_sorted_v4l_paths()
|
| 93 |
+
|
| 94 |
+
color_tf = get_image_transform(
|
| 95 |
+
input_res=video_capture_resolution,
|
| 96 |
+
output_res=obs_image_resolution,
|
| 97 |
+
# obs output rgb
|
| 98 |
+
bgr_to_rgb=True,
|
| 99 |
+
)
|
| 100 |
+
color_transform = color_tf
|
| 101 |
+
if obs_float32:
|
| 102 |
+
color_transform = lambda x: color_tf(x).astype(np.float32) / 255
|
| 103 |
+
|
| 104 |
+
def transform(data):
|
| 105 |
+
data["color"] = color_transform(data["color"])
|
| 106 |
+
return data
|
| 107 |
+
|
| 108 |
+
rw, rh, col, row = optimal_row_cols(
|
| 109 |
+
n_cameras=len(v4l_paths),
|
| 110 |
+
in_wh_ratio=obs_image_resolution[0] / obs_image_resolution[1],
|
| 111 |
+
max_resolution=multi_cam_vis_resolution,
|
| 112 |
+
)
|
| 113 |
+
vis_color_transform = get_image_transform(
|
| 114 |
+
input_res=video_capture_resolution, output_res=(rw, rh), bgr_to_rgb=False
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
def vis_transform(data):
|
| 118 |
+
data["color"] = vis_color_transform(data["color"])
|
| 119 |
+
return data
|
| 120 |
+
|
| 121 |
+
recording_transfrom = None
|
| 122 |
+
recording_fps = video_capture_fps
|
| 123 |
+
recording_pix_fmt = "bgr24"
|
| 124 |
+
if not record_raw_video:
|
| 125 |
+
recording_transfrom = transform
|
| 126 |
+
recording_fps = frequency
|
| 127 |
+
recording_pix_fmt = "rgb24"
|
| 128 |
+
|
| 129 |
+
video_recorder = VideoRecorder.create_h264(
|
| 130 |
+
shm_manager=shm_manager,
|
| 131 |
+
fps=recording_fps,
|
| 132 |
+
codec="h264",
|
| 133 |
+
input_pix_fmt=recording_pix_fmt,
|
| 134 |
+
crf=video_crf,
|
| 135 |
+
thread_type="FRAME",
|
| 136 |
+
thread_count=thread_per_video,
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
camera = MultiUvcCamera(
|
| 140 |
+
dev_video_paths=v4l_paths,
|
| 141 |
+
shm_manager=shm_manager,
|
| 142 |
+
resolution=video_capture_resolution,
|
| 143 |
+
capture_fps=video_capture_fps,
|
| 144 |
+
put_fps=video_capture_fps,
|
| 145 |
+
# send every frame immediately after arrival
|
| 146 |
+
# ignores put_fps
|
| 147 |
+
put_downsample=False,
|
| 148 |
+
record_fps=recording_fps,
|
| 149 |
+
get_max_k=max_obs_buffer_size,
|
| 150 |
+
transform=transform,
|
| 151 |
+
vis_transform=vis_transform,
|
| 152 |
+
recording_transform=recording_transfrom,
|
| 153 |
+
video_recorder=video_recorder,
|
| 154 |
+
verbose=False,
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
multi_cam_vis = None
|
| 158 |
+
if enable_multi_cam_vis:
|
| 159 |
+
multi_cam_vis = MultiCameraVisualizer(
|
| 160 |
+
camera=camera, row=row, col=col, rgb_to_bgr=False
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
cube_diag = np.linalg.norm([1, 1, 1])
|
| 164 |
+
j_init = np.array([0, -90, -90, -90, 90, 0]) / 180 * np.pi
|
| 165 |
+
if not init_joints:
|
| 166 |
+
j_init = None
|
| 167 |
+
|
| 168 |
+
robot = RTDEInterpolationController(
|
| 169 |
+
shm_manager=shm_manager,
|
| 170 |
+
robot_ip=robot_ip,
|
| 171 |
+
frequency=500, # UR5 CB3 RTDE
|
| 172 |
+
lookahead_time=0.1,
|
| 173 |
+
gain=300,
|
| 174 |
+
max_pos_speed=max_pos_speed * cube_diag,
|
| 175 |
+
max_rot_speed=max_rot_speed * cube_diag,
|
| 176 |
+
launch_timeout=3,
|
| 177 |
+
tcp_offset_pose=[0, 0, tcp_offset, 0, 0, 0],
|
| 178 |
+
payload_mass=None,
|
| 179 |
+
payload_cog=None,
|
| 180 |
+
joints_init=j_init,
|
| 181 |
+
joints_init_speed=1.05,
|
| 182 |
+
soft_real_time=False,
|
| 183 |
+
verbose=False,
|
| 184 |
+
receive_keys=None,
|
| 185 |
+
get_max_k=max_obs_buffer_size,
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
gripper = WSGController(
|
| 189 |
+
shm_manager=shm_manager,
|
| 190 |
+
hostname=gripper_ip,
|
| 191 |
+
port=gripper_port,
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
self.camera = camera
|
| 195 |
+
self.robot = robot
|
| 196 |
+
self.gripper = gripper
|
| 197 |
+
self.multi_cam_vis = multi_cam_vis
|
| 198 |
+
self.video_capture_fps = video_capture_fps
|
| 199 |
+
self.frequency = frequency
|
| 200 |
+
self.n_obs_steps = n_obs_steps
|
| 201 |
+
self.max_obs_buffer_size = max_obs_buffer_size
|
| 202 |
+
self.max_pos_speed = max_pos_speed
|
| 203 |
+
self.max_rot_speed = max_rot_speed
|
| 204 |
+
self.obs_key_map = obs_key_map
|
| 205 |
+
# recording
|
| 206 |
+
self.output_dir = output_dir
|
| 207 |
+
self.video_dir = video_dir
|
| 208 |
+
self.replay_buffer = replay_buffer
|
| 209 |
+
# temp memory buffers
|
| 210 |
+
self.last_camera_data = None
|
| 211 |
+
# recording buffers
|
| 212 |
+
self.robot_obs_accumulator = None
|
| 213 |
+
self.gripper_obs_accumulator = None
|
| 214 |
+
self.action_accumulator = None
|
| 215 |
+
self.stage_accumulator = None
|
| 216 |
+
|
| 217 |
+
self.start_time = None
|
| 218 |
+
|
| 219 |
+
# ======== start-stop API =============
|
| 220 |
+
@property
|
| 221 |
+
def is_ready(self):
|
| 222 |
+
return self.camera.is_ready and self.robot.is_ready and self.gripper.is_ready
|
| 223 |
+
|
| 224 |
+
def start(self, wait=True):
|
| 225 |
+
self.camera.start(wait=False)
|
| 226 |
+
self.gripper.start(wait=False)
|
| 227 |
+
self.robot.start(wait=False)
|
| 228 |
+
if self.multi_cam_vis is not None:
|
| 229 |
+
self.multi_cam_vis.start(wait=False)
|
| 230 |
+
if wait:
|
| 231 |
+
self.start_wait()
|
| 232 |
+
|
| 233 |
+
def stop(self, wait=True):
|
| 234 |
+
self.end_episode()
|
| 235 |
+
if self.multi_cam_vis is not None:
|
| 236 |
+
self.multi_cam_vis.stop(wait=False)
|
| 237 |
+
self.robot.stop(wait=False)
|
| 238 |
+
self.gripper.stop(wait=False)
|
| 239 |
+
self.camera.stop(wait=False)
|
| 240 |
+
if wait:
|
| 241 |
+
self.stop_wait()
|
| 242 |
+
|
| 243 |
+
def start_wait(self):
|
| 244 |
+
self.camera.start_wait()
|
| 245 |
+
self.gripper.start_wait()
|
| 246 |
+
self.robot.start_wait()
|
| 247 |
+
if self.multi_cam_vis is not None:
|
| 248 |
+
self.multi_cam_vis.start_wait()
|
| 249 |
+
|
| 250 |
+
def stop_wait(self):
|
| 251 |
+
self.robot.stop_wait()
|
| 252 |
+
self.gripper.stop_wait()
|
| 253 |
+
self.camera.stop_wait()
|
| 254 |
+
if self.multi_cam_vis is not None:
|
| 255 |
+
self.multi_cam_vis.stop_wait()
|
| 256 |
+
|
| 257 |
+
# ========= context manager ===========
|
| 258 |
+
def __enter__(self):
|
| 259 |
+
self.start()
|
| 260 |
+
return self
|
| 261 |
+
|
| 262 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 263 |
+
self.stop()
|
| 264 |
+
|
| 265 |
+
# ========= async env API ===========
|
| 266 |
+
def get_obs(self) -> dict:
|
| 267 |
+
"observation dict"
|
| 268 |
+
assert self.is_ready
|
| 269 |
+
|
| 270 |
+
# get data
|
| 271 |
+
# 30 Hz, camera_receive_timestamp
|
| 272 |
+
k = math.ceil(self.n_obs_steps * (self.video_capture_fps / self.frequency))
|
| 273 |
+
self.last_camera_data = self.camera.get(k=k, out=self.last_camera_data)
|
| 274 |
+
|
| 275 |
+
# 125 hz, robot_receive_timestamp
|
| 276 |
+
last_robot_data = self.robot.get_all_state()
|
| 277 |
+
# both have more than n_obs_steps data
|
| 278 |
+
|
| 279 |
+
# 30 hz, gripper_receive_timestamp
|
| 280 |
+
last_gripper_data = self.gripper.get_all_state()
|
| 281 |
+
|
| 282 |
+
# align camera obs timestamps
|
| 283 |
+
dt = 1 / self.frequency
|
| 284 |
+
last_timestamp = np.max(
|
| 285 |
+
[x["timestamp"][-1] for x in self.last_camera_data.values()]
|
| 286 |
+
)
|
| 287 |
+
obs_align_timestamps = last_timestamp - (np.arange(self.n_obs_steps)[::-1] * dt)
|
| 288 |
+
|
| 289 |
+
camera_obs = dict()
|
| 290 |
+
for camera_idx, value in self.last_camera_data.items():
|
| 291 |
+
this_timestamps = value["timestamp"]
|
| 292 |
+
this_idxs = list()
|
| 293 |
+
for t in obs_align_timestamps:
|
| 294 |
+
is_before_idxs = np.nonzero(this_timestamps < t)[0]
|
| 295 |
+
this_idx = 0
|
| 296 |
+
if len(is_before_idxs) > 0:
|
| 297 |
+
this_idx = is_before_idxs[-1]
|
| 298 |
+
this_idxs.append(this_idx)
|
| 299 |
+
# remap key
|
| 300 |
+
camera_obs[f"camera_{camera_idx}"] = value["color"][this_idxs]
|
| 301 |
+
|
| 302 |
+
# align robot obs
|
| 303 |
+
robot_timestamps = last_robot_data["robot_receive_timestamp"]
|
| 304 |
+
this_timestamps = robot_timestamps
|
| 305 |
+
this_idxs = list()
|
| 306 |
+
for t in obs_align_timestamps:
|
| 307 |
+
is_before_idxs = np.nonzero(this_timestamps < t)[0]
|
| 308 |
+
this_idx = 0
|
| 309 |
+
if len(is_before_idxs) > 0:
|
| 310 |
+
this_idx = is_before_idxs[-1]
|
| 311 |
+
this_idxs.append(this_idx)
|
| 312 |
+
|
| 313 |
+
robot_obs_raw = dict()
|
| 314 |
+
for k, v in last_robot_data.items():
|
| 315 |
+
if k in self.obs_key_map:
|
| 316 |
+
robot_obs_raw[self.obs_key_map[k]] = v
|
| 317 |
+
|
| 318 |
+
robot_obs = dict()
|
| 319 |
+
for k, v in robot_obs_raw.items():
|
| 320 |
+
robot_obs[k] = v[this_idxs]
|
| 321 |
+
|
| 322 |
+
# align gripper obs
|
| 323 |
+
gripper_timestamps = last_gripper_data["gripper_receive_timestamp"]
|
| 324 |
+
this_timestamps = gripper_timestamps
|
| 325 |
+
this_idxs = list()
|
| 326 |
+
for t in obs_align_timestamps:
|
| 327 |
+
is_before_idxs = np.nonzero(this_timestamps < t)[0]
|
| 328 |
+
this_idx = 0
|
| 329 |
+
if len(is_before_idxs) > 0:
|
| 330 |
+
this_idx = is_before_idxs[-1]
|
| 331 |
+
this_idxs.append(this_idx)
|
| 332 |
+
|
| 333 |
+
gripper_obs_raw = dict()
|
| 334 |
+
for k, v in last_gripper_data.items():
|
| 335 |
+
if k in self.obs_key_map:
|
| 336 |
+
gripper_obs_raw[self.obs_key_map[k]] = v
|
| 337 |
+
|
| 338 |
+
gripper_obs = dict()
|
| 339 |
+
for k, v in gripper_obs_raw.items():
|
| 340 |
+
gripper_obs[k] = v[this_idxs]
|
| 341 |
+
|
| 342 |
+
# accumulate obs
|
| 343 |
+
if self.robot_obs_accumulator is not None:
|
| 344 |
+
self.robot_obs_accumulator.put(robot_obs_raw, robot_timestamps)
|
| 345 |
+
if self.gripper_obs_accumulator is not None:
|
| 346 |
+
self.gripper_obs_accumulator.put(gripper_obs_raw, gripper_timestamps)
|
| 347 |
+
|
| 348 |
+
# return obs
|
| 349 |
+
obs_data = dict(camera_obs)
|
| 350 |
+
obs_data.update(robot_obs)
|
| 351 |
+
obs_data.update(gripper_obs)
|
| 352 |
+
obs_data["timestamp"] = obs_align_timestamps
|
| 353 |
+
return obs_data
|
| 354 |
+
|
| 355 |
+
def exec_actions(
|
| 356 |
+
self,
|
| 357 |
+
actions: np.ndarray,
|
| 358 |
+
timestamps: np.ndarray,
|
| 359 |
+
stages: Optional[np.ndarray] = None,
|
| 360 |
+
):
|
| 361 |
+
assert self.is_ready
|
| 362 |
+
if not isinstance(actions, np.ndarray):
|
| 363 |
+
actions = np.array(actions)
|
| 364 |
+
if not isinstance(timestamps, np.ndarray):
|
| 365 |
+
timestamps = np.array(timestamps)
|
| 366 |
+
if stages is None:
|
| 367 |
+
stages = np.zeros_like(timestamps, dtype=np.int64)
|
| 368 |
+
elif not isinstance(stages, np.ndarray):
|
| 369 |
+
stages = np.array(stages, dtype=np.int64)
|
| 370 |
+
|
| 371 |
+
# convert action to pose
|
| 372 |
+
receive_time = time.time()
|
| 373 |
+
is_new = timestamps > receive_time
|
| 374 |
+
new_actions = actions[is_new]
|
| 375 |
+
new_timestamps = timestamps[is_new]
|
| 376 |
+
new_stages = stages[is_new]
|
| 377 |
+
|
| 378 |
+
# schedule waypoints
|
| 379 |
+
for i in range(len(new_actions)):
|
| 380 |
+
r_actions = new_actions[i, :6]
|
| 381 |
+
g_actions = new_actions[i, 6:] + 1
|
| 382 |
+
self.robot.schedule_waypoint(pose=r_actions, target_time=new_timestamps[i])
|
| 383 |
+
self.gripper.schedule_waypoint(
|
| 384 |
+
pos=g_actions, target_time=new_timestamps[i] - 0.02
|
| 385 |
+
)
|
| 386 |
+
|
| 387 |
+
# record actions
|
| 388 |
+
if self.action_accumulator is not None:
|
| 389 |
+
self.action_accumulator.put(new_actions, new_timestamps)
|
| 390 |
+
if self.stage_accumulator is not None:
|
| 391 |
+
self.stage_accumulator.put(new_stages, new_timestamps)
|
| 392 |
+
|
| 393 |
+
def get_robot_state(self):
|
| 394 |
+
return self.robot.get_state()
|
| 395 |
+
|
| 396 |
+
# recording API
|
| 397 |
+
def start_episode(self, start_time=None):
|
| 398 |
+
"Start recording and return first obs"
|
| 399 |
+
if start_time is None:
|
| 400 |
+
start_time = time.time()
|
| 401 |
+
self.start_time = start_time
|
| 402 |
+
|
| 403 |
+
assert self.is_ready
|
| 404 |
+
|
| 405 |
+
# prepare recording stuff
|
| 406 |
+
episode_id = self.replay_buffer.n_episodes
|
| 407 |
+
this_video_dir = self.video_dir.joinpath(str(episode_id))
|
| 408 |
+
this_video_dir.mkdir(parents=True, exist_ok=True)
|
| 409 |
+
n_cameras = self.camera.n_cameras
|
| 410 |
+
video_paths = list()
|
| 411 |
+
for i in range(n_cameras):
|
| 412 |
+
video_paths.append(str(this_video_dir.joinpath(f"{i}.mp4").absolute()))
|
| 413 |
+
|
| 414 |
+
# start recording on camera
|
| 415 |
+
self.camera.restart_put(start_time=start_time)
|
| 416 |
+
self.camera.start_recording(video_path=video_paths, start_time=start_time)
|
| 417 |
+
|
| 418 |
+
# create accumulators
|
| 419 |
+
self.robot_obs_accumulator = TimestampObsAccumulator(
|
| 420 |
+
start_time=start_time, dt=1 / self.frequency
|
| 421 |
+
)
|
| 422 |
+
self.gripper_obs_accumulator = TimestampObsAccumulator(
|
| 423 |
+
start_time=start_time, dt=1 / self.frequency
|
| 424 |
+
)
|
| 425 |
+
self.action_accumulator = TimestampActionAccumulator(
|
| 426 |
+
start_time=start_time, dt=1 / self.frequency
|
| 427 |
+
)
|
| 428 |
+
self.stage_accumulator = TimestampActionAccumulator(
|
| 429 |
+
start_time=start_time, dt=1 / self.frequency
|
| 430 |
+
)
|
| 431 |
+
print(f"Episode {episode_id} started!")
|
| 432 |
+
|
| 433 |
+
def end_episode(self):
|
| 434 |
+
"Stop recording"
|
| 435 |
+
assert self.is_ready
|
| 436 |
+
|
| 437 |
+
# stop video recorder
|
| 438 |
+
self.camera.stop_recording()
|
| 439 |
+
|
| 440 |
+
if self.robot_obs_accumulator is not None:
|
| 441 |
+
# recording
|
| 442 |
+
assert self.gripper_obs_accumulator is not None
|
| 443 |
+
assert self.action_accumulator is not None
|
| 444 |
+
assert self.stage_accumulator is not None
|
| 445 |
+
|
| 446 |
+
# Since the only way to accumulate obs and action is by calling
|
| 447 |
+
# get_obs and exec_actions, which will be in the same thread.
|
| 448 |
+
# We don't need to worry new data come in here.
|
| 449 |
+
robot_obs_data = self.robot_obs_accumulator.data
|
| 450 |
+
robot_obs_timestamps = self.robot_obs_accumulator.timestamps
|
| 451 |
+
|
| 452 |
+
gripper_obs_data = self.gripper_obs_accumulator.data
|
| 453 |
+
gripper_obs_timestamps = self.gripper_obs_accumulator.timestamps
|
| 454 |
+
|
| 455 |
+
actions = self.action_accumulator.actions
|
| 456 |
+
action_timestamps = self.action_accumulator.timestamps
|
| 457 |
+
stages = self.stage_accumulator.actions
|
| 458 |
+
n_steps = min(
|
| 459 |
+
len(robot_obs_timestamps),
|
| 460 |
+
len(gripper_obs_timestamps),
|
| 461 |
+
len(action_timestamps),
|
| 462 |
+
)
|
| 463 |
+
if n_steps > 0:
|
| 464 |
+
episode = dict()
|
| 465 |
+
episode["timestamp"] = robot_obs_timestamps[:n_steps]
|
| 466 |
+
episode["action"] = actions[:n_steps]
|
| 467 |
+
episode["stage"] = stages[:n_steps]
|
| 468 |
+
for key, value in robot_obs_data.items():
|
| 469 |
+
episode[key] = value[:n_steps]
|
| 470 |
+
for key, value in gripper_obs_data.items():
|
| 471 |
+
episode[key] = value[:n_steps]
|
| 472 |
+
self.replay_buffer.add_episode(episode, compressors="disk")
|
| 473 |
+
episode_id = self.replay_buffer.n_episodes - 1
|
| 474 |
+
print(f"Episode {episode_id} saved!")
|
| 475 |
+
|
| 476 |
+
self.robot_obs_accumulator = None
|
| 477 |
+
self.gripper_obs_accumulator = None
|
| 478 |
+
self.action_accumulator = None
|
| 479 |
+
self.stage_accumulator = None
|
| 480 |
+
|
| 481 |
+
def drop_episode(self):
|
| 482 |
+
self.end_episode()
|
| 483 |
+
self.replay_buffer.drop_episode()
|
| 484 |
+
episode_id = self.replay_buffer.n_episodes
|
| 485 |
+
this_video_dir = self.video_dir.joinpath(str(episode_id))
|
| 486 |
+
if this_video_dir.exists():
|
| 487 |
+
shutil.rmtree(str(this_video_dir))
|
| 488 |
+
print(f"Episode {episode_id} dropped!")
|
code/umi/real_world/real_inference_util.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Callable, Tuple, List
|
| 2 |
+
import numpy as np
|
| 3 |
+
import collections
|
| 4 |
+
from unified_video_action.common.cv2_util import get_image_transform
|
| 5 |
+
from unified_video_action.common.pose_repr_util import (
|
| 6 |
+
compute_relative_pose,
|
| 7 |
+
convert_pose_mat_rep,
|
| 8 |
+
)
|
| 9 |
+
from umi.common.pose_util import (
|
| 10 |
+
pose_to_mat,
|
| 11 |
+
mat_to_pose,
|
| 12 |
+
mat_to_pose10d,
|
| 13 |
+
pose10d_to_mat,
|
| 14 |
+
)
|
| 15 |
+
from unified_video_action.model.common.rotation_transformer import RotationTransformer
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_real_obs_resolution(shape_meta: dict) -> Tuple[int, int]:
|
| 19 |
+
out_res = None
|
| 20 |
+
obs_shape_meta = shape_meta["obs"]
|
| 21 |
+
for key, attr in obs_shape_meta.items():
|
| 22 |
+
type = attr.get("type", "low_dim")
|
| 23 |
+
shape = attr.get("shape")
|
| 24 |
+
if type == "rgb":
|
| 25 |
+
co, ho, wo = shape
|
| 26 |
+
if out_res is None:
|
| 27 |
+
out_res = (wo, ho)
|
| 28 |
+
assert out_res == (wo, ho)
|
| 29 |
+
return out_res
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def get_real_obs_dict(
|
| 33 |
+
env_obs: Dict[str, np.ndarray],
|
| 34 |
+
shape_meta: dict,
|
| 35 |
+
) -> Dict[str, np.ndarray]:
|
| 36 |
+
obs_dict_np = dict()
|
| 37 |
+
obs_shape_meta = shape_meta["obs"]
|
| 38 |
+
for key, attr in obs_shape_meta.items():
|
| 39 |
+
type = attr.get("type", "low_dim")
|
| 40 |
+
shape = attr.get("shape")
|
| 41 |
+
if type == "rgb":
|
| 42 |
+
this_imgs_in = env_obs[key]
|
| 43 |
+
t, hi, wi, ci = this_imgs_in.shape
|
| 44 |
+
co, ho, wo = shape
|
| 45 |
+
assert ci == co
|
| 46 |
+
out_imgs = this_imgs_in
|
| 47 |
+
if (ho != hi) or (wo != wi) or (this_imgs_in.dtype == np.uint8):
|
| 48 |
+
tf = get_image_transform(
|
| 49 |
+
input_res=(wi, hi), output_res=(wo, ho), bgr_to_rgb=False
|
| 50 |
+
)
|
| 51 |
+
out_imgs = np.stack([tf(x) for x in this_imgs_in])
|
| 52 |
+
if this_imgs_in.dtype == np.uint8:
|
| 53 |
+
out_imgs = out_imgs.astype(np.float32) / 255
|
| 54 |
+
# THWC to TCHW
|
| 55 |
+
obs_dict_np[key] = np.moveaxis(out_imgs, -1, 1)
|
| 56 |
+
elif type == "low_dim":
|
| 57 |
+
this_data_in = env_obs[key]
|
| 58 |
+
obs_dict_np[key] = this_data_in
|
| 59 |
+
return obs_dict_np
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def get_real_umi_obs_dict(
|
| 63 |
+
env_obs: Dict[str, np.ndarray],
|
| 64 |
+
shape_meta: dict,
|
| 65 |
+
obs_pose_repr: str = "abs",
|
| 66 |
+
tx_robot1_robot0: np.ndarray = None,
|
| 67 |
+
episode_start_pose: List[np.ndarray] = None,
|
| 68 |
+
) -> Dict[str, np.ndarray]:
|
| 69 |
+
obs_dict_np = dict()
|
| 70 |
+
# process non-pose
|
| 71 |
+
obs_shape_meta = shape_meta["obs"]
|
| 72 |
+
robot_prefix_map = collections.defaultdict(list)
|
| 73 |
+
for key, attr in obs_shape_meta.items():
|
| 74 |
+
type = attr.get("type", "low_dim")
|
| 75 |
+
shape = attr.get("shape")
|
| 76 |
+
if type == "rgb":
|
| 77 |
+
this_imgs_in = env_obs[key]
|
| 78 |
+
t, hi, wi, ci = this_imgs_in.shape
|
| 79 |
+
co, ho, wo = shape
|
| 80 |
+
assert ci == co
|
| 81 |
+
out_imgs = this_imgs_in
|
| 82 |
+
if (ho != hi) or (wo != wi) or (this_imgs_in.dtype == np.uint8):
|
| 83 |
+
tf = get_image_transform(
|
| 84 |
+
input_res=(wi, hi), output_res=(wo, ho), bgr_to_rgb=False
|
| 85 |
+
)
|
| 86 |
+
out_imgs = np.stack([tf(x) for x in this_imgs_in])
|
| 87 |
+
if this_imgs_in.dtype == np.uint8:
|
| 88 |
+
out_imgs = out_imgs.astype(np.float32) / 255
|
| 89 |
+
# THWC to TCHW
|
| 90 |
+
obs_dict_np[key] = np.moveaxis(out_imgs, -1, 1)
|
| 91 |
+
elif type == "low_dim" and ("eef" not in key):
|
| 92 |
+
this_data_in = env_obs[key]
|
| 93 |
+
obs_dict_np[key] = this_data_in
|
| 94 |
+
# handle multi-robots
|
| 95 |
+
ks = key.split("_")
|
| 96 |
+
if ks[0].startswith("robot"):
|
| 97 |
+
robot_prefix_map[ks[0]].append(key)
|
| 98 |
+
|
| 99 |
+
# generate relative pose
|
| 100 |
+
for robot_prefix in robot_prefix_map.keys():
|
| 101 |
+
# convert pose to mat
|
| 102 |
+
pose_mat = pose_to_mat(
|
| 103 |
+
np.concatenate(
|
| 104 |
+
[
|
| 105 |
+
env_obs[robot_prefix + "_eef_pos"],
|
| 106 |
+
env_obs[robot_prefix + "_eef_rot_axis_angle"],
|
| 107 |
+
],
|
| 108 |
+
axis=-1,
|
| 109 |
+
)
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
# solve reltaive obs
|
| 113 |
+
obs_pose_mat = convert_pose_mat_rep(
|
| 114 |
+
pose_mat, base_pose_mat=pose_mat[-1], pose_rep=obs_pose_repr, backward=False
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
obs_pose = mat_to_pose10d(obs_pose_mat)
|
| 118 |
+
obs_dict_np[robot_prefix + "_eef_pos"] = obs_pose[..., :3]
|
| 119 |
+
obs_dict_np[robot_prefix + "_eef_rot_axis_angle"] = obs_pose[..., 3:]
|
| 120 |
+
|
| 121 |
+
# generate pose relative to other robot
|
| 122 |
+
n_robots = len(robot_prefix_map)
|
| 123 |
+
for robot_id in range(n_robots):
|
| 124 |
+
# convert pose to mat
|
| 125 |
+
assert f"robot{robot_id}" in robot_prefix_map
|
| 126 |
+
tx_robota_tcpa = pose_to_mat(
|
| 127 |
+
np.concatenate(
|
| 128 |
+
[
|
| 129 |
+
env_obs[f"robot{robot_id}_eef_pos"],
|
| 130 |
+
env_obs[f"robot{robot_id}_eef_rot_axis_angle"],
|
| 131 |
+
],
|
| 132 |
+
axis=-1,
|
| 133 |
+
)
|
| 134 |
+
)
|
| 135 |
+
for other_robot_id in range(n_robots):
|
| 136 |
+
if robot_id == other_robot_id:
|
| 137 |
+
continue
|
| 138 |
+
tx_robotb_tcpb = pose_to_mat(
|
| 139 |
+
np.concatenate(
|
| 140 |
+
[
|
| 141 |
+
env_obs[f"robot{other_robot_id}_eef_pos"],
|
| 142 |
+
env_obs[f"robot{other_robot_id}_eef_rot_axis_angle"],
|
| 143 |
+
],
|
| 144 |
+
axis=-1,
|
| 145 |
+
)
|
| 146 |
+
)
|
| 147 |
+
tx_robota_robotb = tx_robot1_robot0
|
| 148 |
+
if robot_id == 0:
|
| 149 |
+
tx_robota_robotb = np.linalg.inv(tx_robot1_robot0)
|
| 150 |
+
tx_robota_tcpb = tx_robota_robotb @ tx_robotb_tcpb
|
| 151 |
+
|
| 152 |
+
rel_obs_pose_mat = convert_pose_mat_rep(
|
| 153 |
+
tx_robota_tcpa,
|
| 154 |
+
base_pose_mat=tx_robota_tcpb[-1],
|
| 155 |
+
pose_rep="relative",
|
| 156 |
+
backward=False,
|
| 157 |
+
)
|
| 158 |
+
rel_obs_pose = mat_to_pose10d(rel_obs_pose_mat)
|
| 159 |
+
obs_dict_np[f"robot{robot_id}_eef_pos_wrt{other_robot_id}"] = rel_obs_pose[
|
| 160 |
+
:, :3
|
| 161 |
+
]
|
| 162 |
+
obs_dict_np[f"robot{robot_id}_eef_rot_axis_angle_wrt{other_robot_id}"] = (
|
| 163 |
+
rel_obs_pose[:, 3:]
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
# generate relative pose with respect to episode start
|
| 167 |
+
if episode_start_pose is not None:
|
| 168 |
+
for robot_id in range(n_robots):
|
| 169 |
+
# convert pose to mat
|
| 170 |
+
pose_mat = pose_to_mat(
|
| 171 |
+
np.concatenate(
|
| 172 |
+
[
|
| 173 |
+
env_obs[f"robot{robot_id}_eef_pos"],
|
| 174 |
+
env_obs[f"robot{robot_id}_eef_rot_axis_angle"],
|
| 175 |
+
],
|
| 176 |
+
axis=-1,
|
| 177 |
+
)
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
# get start pose
|
| 181 |
+
start_pose = episode_start_pose[robot_id]
|
| 182 |
+
start_pose_mat = pose_to_mat(start_pose)
|
| 183 |
+
rel_obs_pose_mat = convert_pose_mat_rep(
|
| 184 |
+
pose_mat,
|
| 185 |
+
base_pose_mat=start_pose_mat,
|
| 186 |
+
pose_rep="relative",
|
| 187 |
+
backward=False,
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
rel_obs_pose = mat_to_pose10d(rel_obs_pose_mat)
|
| 191 |
+
# obs_dict_np[f'robot{robot_id}_eef_pos_wrt_start'] = rel_obs_pose[:,:3]
|
| 192 |
+
obs_dict_np[f"robot{robot_id}_eef_rot_axis_angle_wrt_start"] = rel_obs_pose[
|
| 193 |
+
:, 3:
|
| 194 |
+
]
|
| 195 |
+
|
| 196 |
+
return obs_dict_np
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def get_real_umi_action(
|
| 200 |
+
action: np.ndarray, env_obs: Dict[str, np.ndarray], action_pose_repr: str = "abs"
|
| 201 |
+
):
|
| 202 |
+
|
| 203 |
+
n_robots = int(action.shape[-1] // 10)
|
| 204 |
+
env_action = list()
|
| 205 |
+
for robot_idx in range(n_robots):
|
| 206 |
+
# convert pose to mat
|
| 207 |
+
pose_mat = pose_to_mat(
|
| 208 |
+
np.concatenate(
|
| 209 |
+
[
|
| 210 |
+
env_obs[f"robot{robot_idx}_eef_pos"][-1],
|
| 211 |
+
env_obs[f"robot{robot_idx}_eef_rot_axis_angle"][-1],
|
| 212 |
+
],
|
| 213 |
+
axis=-1,
|
| 214 |
+
)
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
start = robot_idx * 10
|
| 218 |
+
action_pose10d = action[..., start : start + 9]
|
| 219 |
+
action_grip = action[..., start + 9 : start + 10]
|
| 220 |
+
action_pose_mat = pose10d_to_mat(action_pose10d)
|
| 221 |
+
|
| 222 |
+
# solve relative action
|
| 223 |
+
action_mat = convert_pose_mat_rep(
|
| 224 |
+
action_pose_mat,
|
| 225 |
+
base_pose_mat=pose_mat,
|
| 226 |
+
pose_rep=action_pose_repr,
|
| 227 |
+
backward=True,
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
# convert action to pose
|
| 231 |
+
action_pose = mat_to_pose(action_mat)
|
| 232 |
+
env_action.append(action_pose)
|
| 233 |
+
env_action.append(action_grip)
|
| 234 |
+
|
| 235 |
+
env_action = np.concatenate(env_action, axis=-1)
|
| 236 |
+
return env_action
|
code/umi/real_world/rtde_interpolation_controller.py
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
import enum
|
| 4 |
+
import multiprocessing as mp
|
| 5 |
+
from multiprocessing.managers import SharedMemoryManager
|
| 6 |
+
import scipy.interpolate as si
|
| 7 |
+
import scipy.spatial.transform as st
|
| 8 |
+
import numpy as np
|
| 9 |
+
from rtde_control import RTDEControlInterface
|
| 10 |
+
from rtde_receive import RTDEReceiveInterface
|
| 11 |
+
from umi.shared_memory.shared_memory_queue import SharedMemoryQueue, Empty
|
| 12 |
+
from umi.shared_memory.shared_memory_ring_buffer import SharedMemoryRingBuffer
|
| 13 |
+
from umi.common.pose_trajectory_interpolator import PoseTrajectoryInterpolator
|
| 14 |
+
from unified_video_action.common.precise_sleep import precise_wait
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class Command(enum.Enum):
|
| 18 |
+
STOP = 0
|
| 19 |
+
SERVOL = 1
|
| 20 |
+
SCHEDULE_WAYPOINT = 2
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class RTDEInterpolationController(mp.Process):
|
| 24 |
+
"""
|
| 25 |
+
To ensure sending command to the robot with predictable latency
|
| 26 |
+
this controller need its separate process (due to python GIL)
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
shm_manager: SharedMemoryManager,
|
| 32 |
+
robot_ip,
|
| 33 |
+
frequency=125,
|
| 34 |
+
lookahead_time=0.1,
|
| 35 |
+
gain=300,
|
| 36 |
+
max_pos_speed=0.25, # 5% of max speed
|
| 37 |
+
max_rot_speed=0.16, # 5% of max speed
|
| 38 |
+
launch_timeout=3,
|
| 39 |
+
tcp_offset_pose=None,
|
| 40 |
+
payload_mass=None,
|
| 41 |
+
payload_cog=None,
|
| 42 |
+
joints_init=None,
|
| 43 |
+
joints_init_speed=1.05,
|
| 44 |
+
soft_real_time=False,
|
| 45 |
+
verbose=False,
|
| 46 |
+
receive_keys=None,
|
| 47 |
+
get_max_k=None,
|
| 48 |
+
receive_latency=0.0,
|
| 49 |
+
):
|
| 50 |
+
"""
|
| 51 |
+
frequency: CB2=125, UR3e=500
|
| 52 |
+
lookahead_time: [0.03, 0.2]s smoothens the trajectory with this lookahead time
|
| 53 |
+
gain: [100, 2000] proportional gain for following target position
|
| 54 |
+
max_pos_speed: m/s
|
| 55 |
+
max_rot_speed: rad/s
|
| 56 |
+
tcp_offset_pose: 6d pose
|
| 57 |
+
payload_mass: float
|
| 58 |
+
payload_cog: 3d position, center of gravity
|
| 59 |
+
soft_real_time: enables round-robin scheduling and real-time priority
|
| 60 |
+
requires running scripts/rtprio_setup.sh before hand.
|
| 61 |
+
|
| 62 |
+
"""
|
| 63 |
+
# verify
|
| 64 |
+
assert 0 < frequency <= 500
|
| 65 |
+
assert 0.03 <= lookahead_time <= 0.2
|
| 66 |
+
assert 100 <= gain <= 2000
|
| 67 |
+
assert 0 < max_pos_speed
|
| 68 |
+
assert 0 < max_rot_speed
|
| 69 |
+
if tcp_offset_pose is not None:
|
| 70 |
+
tcp_offset_pose = np.array(tcp_offset_pose)
|
| 71 |
+
assert tcp_offset_pose.shape == (6,)
|
| 72 |
+
if payload_mass is not None:
|
| 73 |
+
assert 0 <= payload_mass <= 5
|
| 74 |
+
if payload_cog is not None:
|
| 75 |
+
payload_cog = np.array(payload_cog)
|
| 76 |
+
assert payload_cog.shape == (3,)
|
| 77 |
+
assert payload_mass is not None
|
| 78 |
+
if joints_init is not None:
|
| 79 |
+
joints_init = np.array(joints_init)
|
| 80 |
+
assert joints_init.shape == (6,)
|
| 81 |
+
|
| 82 |
+
super().__init__(name="RTDEPositionalController")
|
| 83 |
+
self.robot_ip = robot_ip
|
| 84 |
+
self.frequency = frequency
|
| 85 |
+
self.lookahead_time = lookahead_time
|
| 86 |
+
self.gain = gain
|
| 87 |
+
self.max_pos_speed = max_pos_speed
|
| 88 |
+
self.max_rot_speed = max_rot_speed
|
| 89 |
+
self.launch_timeout = launch_timeout
|
| 90 |
+
self.tcp_offset_pose = tcp_offset_pose
|
| 91 |
+
self.payload_mass = payload_mass
|
| 92 |
+
self.payload_cog = payload_cog
|
| 93 |
+
self.joints_init = joints_init
|
| 94 |
+
self.joints_init_speed = joints_init_speed
|
| 95 |
+
self.soft_real_time = soft_real_time
|
| 96 |
+
self.receive_latency = receive_latency
|
| 97 |
+
self.verbose = verbose
|
| 98 |
+
|
| 99 |
+
if get_max_k is None:
|
| 100 |
+
get_max_k = int(frequency * 5)
|
| 101 |
+
|
| 102 |
+
# build input queue
|
| 103 |
+
example = {
|
| 104 |
+
"cmd": Command.SERVOL.value,
|
| 105 |
+
"target_pose": np.zeros((6,), dtype=np.float64),
|
| 106 |
+
"duration": 0.0,
|
| 107 |
+
"target_time": 0.0,
|
| 108 |
+
}
|
| 109 |
+
input_queue = SharedMemoryQueue.create_from_examples(
|
| 110 |
+
shm_manager=shm_manager, examples=example, buffer_size=256
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
# build ring buffer
|
| 114 |
+
if receive_keys is None:
|
| 115 |
+
receive_keys = [
|
| 116 |
+
"ActualTCPPose",
|
| 117 |
+
"ActualTCPSpeed",
|
| 118 |
+
"ActualQ",
|
| 119 |
+
"ActualQd",
|
| 120 |
+
"TargetTCPPose",
|
| 121 |
+
"TargetTCPSpeed",
|
| 122 |
+
"TargetQ",
|
| 123 |
+
"TargetQd",
|
| 124 |
+
]
|
| 125 |
+
rtde_r = RTDEReceiveInterface(hostname=robot_ip)
|
| 126 |
+
example = dict()
|
| 127 |
+
for key in receive_keys:
|
| 128 |
+
example[key] = np.array(getattr(rtde_r, "get" + key)())
|
| 129 |
+
example["robot_receive_timestamp"] = time.time()
|
| 130 |
+
example["robot_timestamp"] = time.time()
|
| 131 |
+
ring_buffer = SharedMemoryRingBuffer.create_from_examples(
|
| 132 |
+
shm_manager=shm_manager,
|
| 133 |
+
examples=example,
|
| 134 |
+
get_max_k=get_max_k,
|
| 135 |
+
get_time_budget=0.2,
|
| 136 |
+
put_desired_frequency=frequency,
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
self.ready_event = mp.Event()
|
| 140 |
+
self.input_queue = input_queue
|
| 141 |
+
self.ring_buffer = ring_buffer
|
| 142 |
+
self.receive_keys = receive_keys
|
| 143 |
+
|
| 144 |
+
# ========= launch method ===========
|
| 145 |
+
def start(self, wait=True):
|
| 146 |
+
super().start()
|
| 147 |
+
if wait:
|
| 148 |
+
self.start_wait()
|
| 149 |
+
if self.verbose:
|
| 150 |
+
print(
|
| 151 |
+
f"[RTDEPositionalController] Controller process spawned at {self.pid}"
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
def stop(self, wait=True):
|
| 155 |
+
message = {"cmd": Command.STOP.value}
|
| 156 |
+
self.input_queue.put(message)
|
| 157 |
+
if wait:
|
| 158 |
+
self.stop_wait()
|
| 159 |
+
|
| 160 |
+
def start_wait(self):
|
| 161 |
+
self.ready_event.wait(self.launch_timeout)
|
| 162 |
+
assert self.is_alive()
|
| 163 |
+
|
| 164 |
+
def stop_wait(self):
|
| 165 |
+
self.join()
|
| 166 |
+
|
| 167 |
+
@property
|
| 168 |
+
def is_ready(self):
|
| 169 |
+
return self.ready_event.is_set()
|
| 170 |
+
|
| 171 |
+
# ========= context manager ===========
|
| 172 |
+
def __enter__(self):
|
| 173 |
+
self.start()
|
| 174 |
+
return self
|
| 175 |
+
|
| 176 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 177 |
+
self.stop()
|
| 178 |
+
|
| 179 |
+
# ========= command methods ============
|
| 180 |
+
def servoL(self, pose, duration=0.1):
|
| 181 |
+
"""
|
| 182 |
+
duration: desired time to reach pose
|
| 183 |
+
"""
|
| 184 |
+
assert self.is_alive()
|
| 185 |
+
assert duration >= (1 / self.frequency)
|
| 186 |
+
pose = np.array(pose)
|
| 187 |
+
assert pose.shape == (6,)
|
| 188 |
+
|
| 189 |
+
message = {
|
| 190 |
+
"cmd": Command.SERVOL.value,
|
| 191 |
+
"target_pose": pose,
|
| 192 |
+
"duration": duration,
|
| 193 |
+
}
|
| 194 |
+
self.input_queue.put(message)
|
| 195 |
+
|
| 196 |
+
def schedule_waypoint(self, pose, target_time):
|
| 197 |
+
pose = np.array(pose)
|
| 198 |
+
assert pose.shape == (6,)
|
| 199 |
+
|
| 200 |
+
message = {
|
| 201 |
+
"cmd": Command.SCHEDULE_WAYPOINT.value,
|
| 202 |
+
"target_pose": pose,
|
| 203 |
+
"target_time": target_time,
|
| 204 |
+
}
|
| 205 |
+
self.input_queue.put(message)
|
| 206 |
+
|
| 207 |
+
# ========= receive APIs =============
|
| 208 |
+
def get_state(self, k=None, out=None):
|
| 209 |
+
if k is None:
|
| 210 |
+
return self.ring_buffer.get(out=out)
|
| 211 |
+
else:
|
| 212 |
+
return self.ring_buffer.get_last_k(k=k, out=out)
|
| 213 |
+
|
| 214 |
+
def get_all_state(self):
|
| 215 |
+
return self.ring_buffer.get_all()
|
| 216 |
+
|
| 217 |
+
# ========= main loop in process ============
|
| 218 |
+
def run(self):
|
| 219 |
+
# enable soft real-time
|
| 220 |
+
if self.soft_real_time:
|
| 221 |
+
os.sched_setscheduler(0, os.SCHED_RR, os.sched_param(20))
|
| 222 |
+
|
| 223 |
+
# start rtde
|
| 224 |
+
robot_ip = self.robot_ip
|
| 225 |
+
rtde_c = RTDEControlInterface(hostname=robot_ip)
|
| 226 |
+
rtde_r = RTDEReceiveInterface(hostname=robot_ip)
|
| 227 |
+
|
| 228 |
+
try:
|
| 229 |
+
if self.verbose:
|
| 230 |
+
print(f"[RTDEPositionalController] Connect to robot: {robot_ip}")
|
| 231 |
+
|
| 232 |
+
# set parameters
|
| 233 |
+
if self.tcp_offset_pose is not None:
|
| 234 |
+
rtde_c.setTcp(self.tcp_offset_pose)
|
| 235 |
+
if self.payload_mass is not None:
|
| 236 |
+
if self.payload_cog is not None:
|
| 237 |
+
assert rtde_c.setPayload(self.payload_mass, self.payload_cog)
|
| 238 |
+
else:
|
| 239 |
+
assert rtde_c.setPayload(self.payload_mass)
|
| 240 |
+
|
| 241 |
+
# init pose
|
| 242 |
+
if self.joints_init is not None:
|
| 243 |
+
assert rtde_c.moveJ(self.joints_init, self.joints_init_speed, 1.4)
|
| 244 |
+
|
| 245 |
+
# main loop
|
| 246 |
+
dt = 1.0 / self.frequency
|
| 247 |
+
curr_pose = rtde_r.getActualTCPPose()
|
| 248 |
+
# use monotonic time to make sure the control loop never go backward
|
| 249 |
+
curr_t = time.monotonic()
|
| 250 |
+
last_waypoint_time = curr_t
|
| 251 |
+
pose_interp = PoseTrajectoryInterpolator(times=[curr_t], poses=[curr_pose])
|
| 252 |
+
|
| 253 |
+
t_start = time.monotonic()
|
| 254 |
+
iter_idx = 0
|
| 255 |
+
keep_running = True
|
| 256 |
+
while keep_running:
|
| 257 |
+
# start control iteration
|
| 258 |
+
# t_start = rtde_c.initPeriod()
|
| 259 |
+
|
| 260 |
+
# send command to robot
|
| 261 |
+
t_now = time.monotonic()
|
| 262 |
+
# diff = t_now - pose_interp.times[-1]
|
| 263 |
+
# if diff > 0:
|
| 264 |
+
# print('extrapolate', diff)
|
| 265 |
+
pose_command = pose_interp(t_now)
|
| 266 |
+
vel = 0.5
|
| 267 |
+
acc = 0.5
|
| 268 |
+
assert rtde_c.servoL(
|
| 269 |
+
pose_command,
|
| 270 |
+
vel,
|
| 271 |
+
acc, # dummy, not used by ur5
|
| 272 |
+
dt,
|
| 273 |
+
self.lookahead_time,
|
| 274 |
+
self.gain,
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
# update robot state
|
| 278 |
+
state = dict()
|
| 279 |
+
for key in self.receive_keys:
|
| 280 |
+
state[key] = np.array(getattr(rtde_r, "get" + key)())
|
| 281 |
+
t_recv = time.time()
|
| 282 |
+
state["robot_receive_timestamp"] = t_recv
|
| 283 |
+
state["robot_timestamp"] = t_recv - self.receive_latency
|
| 284 |
+
self.ring_buffer.put(state)
|
| 285 |
+
|
| 286 |
+
# fetch command from queue
|
| 287 |
+
try:
|
| 288 |
+
# commands = self.input_queue.get_all()
|
| 289 |
+
# n_cmd = len(commands['cmd'])
|
| 290 |
+
# process at most 1 command per cycle to maintain frequency
|
| 291 |
+
commands = self.input_queue.get_k(1)
|
| 292 |
+
n_cmd = len(commands["cmd"])
|
| 293 |
+
except Empty:
|
| 294 |
+
n_cmd = 0
|
| 295 |
+
|
| 296 |
+
# execute commands
|
| 297 |
+
for i in range(n_cmd):
|
| 298 |
+
command = dict()
|
| 299 |
+
for key, value in commands.items():
|
| 300 |
+
command[key] = value[i]
|
| 301 |
+
cmd = command["cmd"]
|
| 302 |
+
|
| 303 |
+
if cmd == Command.STOP.value:
|
| 304 |
+
keep_running = False
|
| 305 |
+
# stop immediately, ignore later commands
|
| 306 |
+
break
|
| 307 |
+
elif cmd == Command.SERVOL.value:
|
| 308 |
+
# since curr_pose always lag behind curr_target_pose
|
| 309 |
+
# if we start the next interpolation with curr_pose
|
| 310 |
+
# the command robot receive will have discontinouity
|
| 311 |
+
# and cause jittery robot behavior.
|
| 312 |
+
target_pose = command["target_pose"]
|
| 313 |
+
duration = float(command["duration"])
|
| 314 |
+
curr_time = t_now + dt
|
| 315 |
+
t_insert = curr_time + duration
|
| 316 |
+
pose_interp = pose_interp.drive_to_waypoint(
|
| 317 |
+
pose=target_pose,
|
| 318 |
+
time=t_insert,
|
| 319 |
+
curr_time=curr_time,
|
| 320 |
+
max_pos_speed=self.max_pos_speed,
|
| 321 |
+
max_rot_speed=self.max_rot_speed,
|
| 322 |
+
)
|
| 323 |
+
last_waypoint_time = t_insert
|
| 324 |
+
if self.verbose:
|
| 325 |
+
print(
|
| 326 |
+
"[RTDEPositionalController] New pose target:{} duration:{}s".format(
|
| 327 |
+
target_pose, duration
|
| 328 |
+
)
|
| 329 |
+
)
|
| 330 |
+
elif cmd == Command.SCHEDULE_WAYPOINT.value:
|
| 331 |
+
target_pose = command["target_pose"]
|
| 332 |
+
target_time = float(command["target_time"])
|
| 333 |
+
# translate global time to monotonic time
|
| 334 |
+
target_time = time.monotonic() - time.time() + target_time
|
| 335 |
+
curr_time = t_now + dt
|
| 336 |
+
pose_interp = pose_interp.schedule_waypoint(
|
| 337 |
+
pose=target_pose,
|
| 338 |
+
time=target_time,
|
| 339 |
+
max_pos_speed=self.max_pos_speed,
|
| 340 |
+
max_rot_speed=self.max_rot_speed,
|
| 341 |
+
curr_time=curr_time,
|
| 342 |
+
last_waypoint_time=last_waypoint_time,
|
| 343 |
+
)
|
| 344 |
+
last_waypoint_time = target_time
|
| 345 |
+
else:
|
| 346 |
+
keep_running = False
|
| 347 |
+
break
|
| 348 |
+
|
| 349 |
+
# regulate frequency
|
| 350 |
+
# rtde_c.waitPeriod(t_start)
|
| 351 |
+
t_wait_util = t_start + (iter_idx + 1) * dt
|
| 352 |
+
precise_wait(t_wait_util, time_func=time.monotonic)
|
| 353 |
+
|
| 354 |
+
# first loop successful, ready to receive command
|
| 355 |
+
if iter_idx == 0:
|
| 356 |
+
self.ready_event.set()
|
| 357 |
+
iter_idx += 1
|
| 358 |
+
|
| 359 |
+
if self.verbose:
|
| 360 |
+
print(
|
| 361 |
+
f"[RTDEPositionalController] Actual frequency {1/(time.monotonic() - t_now)}"
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
finally:
|
| 365 |
+
# manditory cleanup
|
| 366 |
+
# decelerate
|
| 367 |
+
rtde_c.servoStop()
|
| 368 |
+
|
| 369 |
+
# terminate
|
| 370 |
+
rtde_c.stopScript()
|
| 371 |
+
rtde_c.disconnect()
|
| 372 |
+
rtde_r.disconnect()
|
| 373 |
+
self.ready_event.set()
|
| 374 |
+
|
| 375 |
+
if self.verbose:
|
| 376 |
+
print(f"[RTDEPositionalController] Disconnected from robot: {robot_ip}")
|
code/umi/real_world/spacemouse_shared_memory.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import multiprocessing as mp
|
| 2 |
+
import numpy as np
|
| 3 |
+
import time
|
| 4 |
+
from spnav import (
|
| 5 |
+
spnav_open,
|
| 6 |
+
spnav_poll_event,
|
| 7 |
+
spnav_close,
|
| 8 |
+
SpnavMotionEvent,
|
| 9 |
+
SpnavButtonEvent,
|
| 10 |
+
)
|
| 11 |
+
from umi.shared_memory.shared_memory_ring_buffer import SharedMemoryRingBuffer
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Spacemouse(mp.Process):
|
| 15 |
+
def __init__(
|
| 16 |
+
self,
|
| 17 |
+
shm_manager,
|
| 18 |
+
get_max_k=30,
|
| 19 |
+
frequency=200,
|
| 20 |
+
max_value=500,
|
| 21 |
+
deadzone=(0, 0, 0, 0, 0, 0),
|
| 22 |
+
dtype=np.float32,
|
| 23 |
+
n_buttons=2,
|
| 24 |
+
):
|
| 25 |
+
"""
|
| 26 |
+
Continuously listen to 3D connection space naviagtor events
|
| 27 |
+
and update the latest state.
|
| 28 |
+
|
| 29 |
+
max_value: {300, 500} 300 for wired version and 500 for wireless
|
| 30 |
+
deadzone: [0,1], number or tuple, axis with value lower than this value will stay at 0
|
| 31 |
+
|
| 32 |
+
front
|
| 33 |
+
z
|
| 34 |
+
^ _
|
| 35 |
+
| (O) space mouse
|
| 36 |
+
|
|
| 37 |
+
*----->x right
|
| 38 |
+
y
|
| 39 |
+
"""
|
| 40 |
+
super().__init__()
|
| 41 |
+
if np.issubdtype(type(deadzone), np.number):
|
| 42 |
+
deadzone = np.full(6, fill_value=deadzone, dtype=dtype)
|
| 43 |
+
else:
|
| 44 |
+
deadzone = np.array(deadzone, dtype=dtype)
|
| 45 |
+
assert (deadzone >= 0).all()
|
| 46 |
+
|
| 47 |
+
# copied variables
|
| 48 |
+
self.frequency = frequency
|
| 49 |
+
self.max_value = max_value
|
| 50 |
+
self.dtype = dtype
|
| 51 |
+
self.deadzone = deadzone
|
| 52 |
+
self.n_buttons = n_buttons
|
| 53 |
+
# self.motion_event = SpnavMotionEvent([0,0,0], [0,0,0], 0)
|
| 54 |
+
# self.button_state = defaultdict(lambda: False)
|
| 55 |
+
self.tx_zup_spnav = np.array([[0, 0, -1], [1, 0, 0], [0, 1, 0]], dtype=dtype)
|
| 56 |
+
|
| 57 |
+
example = {
|
| 58 |
+
# 3 translation, 3 rotation, 1 period
|
| 59 |
+
"motion_event": np.zeros((7,), dtype=np.int64),
|
| 60 |
+
# left and right button
|
| 61 |
+
"button_state": np.zeros((n_buttons,), dtype=bool),
|
| 62 |
+
"receive_timestamp": time.time(),
|
| 63 |
+
}
|
| 64 |
+
ring_buffer = SharedMemoryRingBuffer.create_from_examples(
|
| 65 |
+
shm_manager=shm_manager,
|
| 66 |
+
examples=example,
|
| 67 |
+
get_max_k=get_max_k,
|
| 68 |
+
get_time_budget=0.2,
|
| 69 |
+
put_desired_frequency=frequency,
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
# shared variables
|
| 73 |
+
self.ready_event = mp.Event()
|
| 74 |
+
self.stop_event = mp.Event()
|
| 75 |
+
self.ring_buffer = ring_buffer
|
| 76 |
+
|
| 77 |
+
# ======= get state APIs ==========
|
| 78 |
+
|
| 79 |
+
def get_motion_state(self):
|
| 80 |
+
state = self.ring_buffer.get()
|
| 81 |
+
state = np.array(state["motion_event"][:6], dtype=self.dtype) / self.max_value
|
| 82 |
+
is_dead = (-self.deadzone < state) & (state < self.deadzone)
|
| 83 |
+
state[is_dead] = 0
|
| 84 |
+
return state
|
| 85 |
+
|
| 86 |
+
def get_motion_state_transformed(self):
|
| 87 |
+
"""
|
| 88 |
+
Return in right-handed coordinate
|
| 89 |
+
z
|
| 90 |
+
*------>y right
|
| 91 |
+
| _
|
| 92 |
+
| (O) space mouse
|
| 93 |
+
v
|
| 94 |
+
x
|
| 95 |
+
back
|
| 96 |
+
|
| 97 |
+
"""
|
| 98 |
+
state = self.get_motion_state()
|
| 99 |
+
tf_state = np.zeros_like(state)
|
| 100 |
+
tf_state[:3] = self.tx_zup_spnav @ state[:3]
|
| 101 |
+
tf_state[3:] = self.tx_zup_spnav @ state[3:]
|
| 102 |
+
return tf_state
|
| 103 |
+
|
| 104 |
+
def get_button_state(self):
|
| 105 |
+
state = self.ring_buffer.get()
|
| 106 |
+
return state["button_state"]
|
| 107 |
+
|
| 108 |
+
def is_button_pressed(self, button_id):
|
| 109 |
+
return self.get_button_state()[button_id]
|
| 110 |
+
|
| 111 |
+
# ========== start stop API ===========
|
| 112 |
+
|
| 113 |
+
def start(self, wait=True):
|
| 114 |
+
super().start()
|
| 115 |
+
if wait:
|
| 116 |
+
self.ready_event.wait()
|
| 117 |
+
|
| 118 |
+
def stop(self, wait=True):
|
| 119 |
+
self.stop_event.set()
|
| 120 |
+
if wait:
|
| 121 |
+
self.join()
|
| 122 |
+
|
| 123 |
+
def __enter__(self):
|
| 124 |
+
self.start()
|
| 125 |
+
return self
|
| 126 |
+
|
| 127 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 128 |
+
self.stop()
|
| 129 |
+
|
| 130 |
+
# ========= main loop ==========
|
| 131 |
+
def run(self):
|
| 132 |
+
spnav_open()
|
| 133 |
+
try:
|
| 134 |
+
motion_event = np.zeros((7,), dtype=np.int64)
|
| 135 |
+
button_state = np.zeros((self.n_buttons,), dtype=bool)
|
| 136 |
+
# send one message immediately so client can start reading
|
| 137 |
+
self.ring_buffer.put(
|
| 138 |
+
{
|
| 139 |
+
"motion_event": motion_event,
|
| 140 |
+
"button_state": button_state,
|
| 141 |
+
"receive_timestamp": time.time(),
|
| 142 |
+
}
|
| 143 |
+
)
|
| 144 |
+
self.ready_event.set()
|
| 145 |
+
|
| 146 |
+
while not self.stop_event.is_set():
|
| 147 |
+
event = spnav_poll_event()
|
| 148 |
+
receive_timestamp = time.time()
|
| 149 |
+
if isinstance(event, SpnavMotionEvent):
|
| 150 |
+
motion_event[:3] = event.translation
|
| 151 |
+
motion_event[3:6] = event.rotation
|
| 152 |
+
motion_event[6] = event.period
|
| 153 |
+
elif isinstance(event, SpnavButtonEvent):
|
| 154 |
+
button_state[event.bnum] = event.press
|
| 155 |
+
else:
|
| 156 |
+
# finish integrating this round of events
|
| 157 |
+
# before sending over
|
| 158 |
+
self.ring_buffer.put(
|
| 159 |
+
{
|
| 160 |
+
"motion_event": motion_event,
|
| 161 |
+
"button_state": button_state,
|
| 162 |
+
"receive_timestamp": receive_timestamp,
|
| 163 |
+
}
|
| 164 |
+
)
|
| 165 |
+
time.sleep(1 / self.frequency)
|
| 166 |
+
finally:
|
| 167 |
+
spnav_close()
|
code/umi/real_world/umi_env.py
ADDED
|
@@ -0,0 +1,603 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
import pathlib
|
| 3 |
+
import numpy as np
|
| 4 |
+
import time
|
| 5 |
+
import shutil
|
| 6 |
+
import math
|
| 7 |
+
import cv2
|
| 8 |
+
from multiprocessing.managers import SharedMemoryManager
|
| 9 |
+
from umi.real_world.rtde_interpolation_controller import RTDEInterpolationController
|
| 10 |
+
from umi.real_world.wsg_controller import WSGController
|
| 11 |
+
from umi.real_world.franka_interpolation_controller import FrankaInterpolationController
|
| 12 |
+
from umi.real_world.multi_uvc_camera import MultiUvcCamera, VideoRecorder
|
| 13 |
+
from unified_video_action.common.timestamp_accumulator import (
|
| 14 |
+
TimestampActionAccumulator,
|
| 15 |
+
ObsAccumulator,
|
| 16 |
+
)
|
| 17 |
+
from umi.common.cv_util import draw_predefined_mask, get_mirror_crop_slices
|
| 18 |
+
from umi.real_world.multi_camera_visualizer import MultiCameraVisualizer
|
| 19 |
+
from unified_video_action.common.replay_buffer import ReplayBuffer
|
| 20 |
+
from unified_video_action.common.cv2_util import get_image_transform, optimal_row_cols
|
| 21 |
+
from umi.common.usb_util import reset_all_elgato_devices, get_sorted_v4l_paths
|
| 22 |
+
from umi.common.pose_util import pose_to_pos_rot
|
| 23 |
+
from umi.common.interpolation_util import get_interp1d, PoseInterpolator
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class UmiEnv:
|
| 27 |
+
def __init__(
|
| 28 |
+
self,
|
| 29 |
+
# required params
|
| 30 |
+
output_dir,
|
| 31 |
+
robot_ip,
|
| 32 |
+
gripper_ip,
|
| 33 |
+
gripper_port=1000,
|
| 34 |
+
# env params
|
| 35 |
+
frequency=20,
|
| 36 |
+
robot_type="ur5",
|
| 37 |
+
# obs
|
| 38 |
+
obs_image_resolution=(224, 224),
|
| 39 |
+
max_obs_buffer_size=60,
|
| 40 |
+
obs_float32=False,
|
| 41 |
+
camera_reorder=None,
|
| 42 |
+
no_mirror=False,
|
| 43 |
+
fisheye_converter=None,
|
| 44 |
+
mirror_crop=False,
|
| 45 |
+
mirror_swap=False,
|
| 46 |
+
# timing
|
| 47 |
+
align_camera_idx=0,
|
| 48 |
+
# this latency compensates receive_timestamp
|
| 49 |
+
# all in seconds
|
| 50 |
+
camera_obs_latency=0.125,
|
| 51 |
+
robot_obs_latency=0.0001,
|
| 52 |
+
gripper_obs_latency=0.01,
|
| 53 |
+
robot_action_latency=0.1,
|
| 54 |
+
gripper_action_latency=0.1,
|
| 55 |
+
# all in steps (relative to frequency)
|
| 56 |
+
camera_down_sample_steps=1,
|
| 57 |
+
robot_down_sample_steps=1,
|
| 58 |
+
gripper_down_sample_steps=1,
|
| 59 |
+
# all in steps (relative to frequency)
|
| 60 |
+
camera_obs_horizon=2,
|
| 61 |
+
robot_obs_horizon=2,
|
| 62 |
+
gripper_obs_horizon=2,
|
| 63 |
+
# action
|
| 64 |
+
max_pos_speed=0.25,
|
| 65 |
+
max_rot_speed=0.6,
|
| 66 |
+
# robot
|
| 67 |
+
tcp_offset=0.21,
|
| 68 |
+
init_joints=False,
|
| 69 |
+
# vis params
|
| 70 |
+
enable_multi_cam_vis=True,
|
| 71 |
+
multi_cam_vis_resolution=(960, 960),
|
| 72 |
+
# shared memory
|
| 73 |
+
shm_manager=None,
|
| 74 |
+
):
|
| 75 |
+
output_dir = pathlib.Path(output_dir)
|
| 76 |
+
assert output_dir.parent.is_dir()
|
| 77 |
+
video_dir = output_dir.joinpath("videos")
|
| 78 |
+
video_dir.mkdir(parents=True, exist_ok=True)
|
| 79 |
+
zarr_path = str(output_dir.joinpath("replay_buffer.zarr").absolute())
|
| 80 |
+
replay_buffer = ReplayBuffer.create_from_path(zarr_path=zarr_path, mode="a")
|
| 81 |
+
|
| 82 |
+
if shm_manager is None:
|
| 83 |
+
shm_manager = SharedMemoryManager()
|
| 84 |
+
shm_manager.start()
|
| 85 |
+
|
| 86 |
+
# Find and reset all Elgato capture cards.
|
| 87 |
+
# Required to workaround a firmware bug.
|
| 88 |
+
reset_all_elgato_devices()
|
| 89 |
+
|
| 90 |
+
# Wait for all v4l cameras to be back online
|
| 91 |
+
time.sleep(0.1)
|
| 92 |
+
v4l_paths = get_sorted_v4l_paths()
|
| 93 |
+
if camera_reorder is not None:
|
| 94 |
+
paths = [v4l_paths[i] for i in camera_reorder]
|
| 95 |
+
v4l_paths = paths
|
| 96 |
+
|
| 97 |
+
# compute resolution for vis
|
| 98 |
+
rw, rh, col, row = optimal_row_cols(
|
| 99 |
+
n_cameras=len(v4l_paths),
|
| 100 |
+
in_wh_ratio=4 / 3,
|
| 101 |
+
max_resolution=multi_cam_vis_resolution,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
# HACK: Separate video setting for each camera
|
| 105 |
+
# Elagto Cam Link 4k records at 4k 30fps
|
| 106 |
+
# Other capture card records at 720p 60fps
|
| 107 |
+
resolution = list()
|
| 108 |
+
capture_fps = list()
|
| 109 |
+
cap_buffer_size = list()
|
| 110 |
+
video_recorder = list()
|
| 111 |
+
transform = list()
|
| 112 |
+
vis_transform = list()
|
| 113 |
+
for idx, path in enumerate(v4l_paths):
|
| 114 |
+
if "Cam_Link_4K" in path:
|
| 115 |
+
res = (3840, 2160)
|
| 116 |
+
fps = 30
|
| 117 |
+
buf = 3
|
| 118 |
+
bit_rate = 6000 * 1000
|
| 119 |
+
|
| 120 |
+
def tf4k(data, input_res=res):
|
| 121 |
+
img = data["color"]
|
| 122 |
+
f = get_image_transform(
|
| 123 |
+
input_res=input_res,
|
| 124 |
+
output_res=obs_image_resolution,
|
| 125 |
+
# obs output rgb
|
| 126 |
+
bgr_to_rgb=True,
|
| 127 |
+
)
|
| 128 |
+
img = f(img)
|
| 129 |
+
if obs_float32:
|
| 130 |
+
img = img.astype(np.float32) / 255
|
| 131 |
+
data["color"] = img
|
| 132 |
+
return data
|
| 133 |
+
|
| 134 |
+
transform.append(tf4k)
|
| 135 |
+
else:
|
| 136 |
+
res = (1920, 1080)
|
| 137 |
+
fps = 60
|
| 138 |
+
buf = 1
|
| 139 |
+
bit_rate = 3000 * 1000
|
| 140 |
+
stack_crop = (idx == 0) and mirror_crop
|
| 141 |
+
is_mirror = None
|
| 142 |
+
if mirror_swap:
|
| 143 |
+
mirror_mask = np.ones((224, 224, 3), dtype=np.uint8)
|
| 144 |
+
mirror_mask = draw_predefined_mask(
|
| 145 |
+
mirror_mask,
|
| 146 |
+
color=(0, 0, 0),
|
| 147 |
+
mirror=True,
|
| 148 |
+
gripper=False,
|
| 149 |
+
finger=False,
|
| 150 |
+
)
|
| 151 |
+
is_mirror = mirror_mask[..., 0] == 0
|
| 152 |
+
|
| 153 |
+
def tf(data, input_res=res, stack_crop=stack_crop, is_mirror=is_mirror):
|
| 154 |
+
img = data["color"]
|
| 155 |
+
if fisheye_converter is None:
|
| 156 |
+
crop_img = None
|
| 157 |
+
if stack_crop:
|
| 158 |
+
slices = get_mirror_crop_slices(img.shape[:2], left=False)
|
| 159 |
+
crop = img[slices]
|
| 160 |
+
crop_img = cv2.resize(crop, obs_image_resolution)
|
| 161 |
+
crop_img = crop_img[:, ::-1, ::-1] # bgr to rgb
|
| 162 |
+
f = get_image_transform(
|
| 163 |
+
input_res=input_res,
|
| 164 |
+
output_res=obs_image_resolution,
|
| 165 |
+
# obs output rgb
|
| 166 |
+
bgr_to_rgb=True,
|
| 167 |
+
)
|
| 168 |
+
img = np.ascontiguousarray(f(img))
|
| 169 |
+
if is_mirror is not None:
|
| 170 |
+
img[is_mirror] = img[:, ::-1, :][is_mirror]
|
| 171 |
+
img = draw_predefined_mask(
|
| 172 |
+
img,
|
| 173 |
+
color=(0, 0, 0),
|
| 174 |
+
mirror=no_mirror,
|
| 175 |
+
gripper=True,
|
| 176 |
+
finger=False,
|
| 177 |
+
use_aa=True,
|
| 178 |
+
)
|
| 179 |
+
if crop_img is not None:
|
| 180 |
+
img = np.concatenate([img, crop_img], axis=-1)
|
| 181 |
+
else:
|
| 182 |
+
img = fisheye_converter.forward(img)
|
| 183 |
+
img = img[..., ::-1]
|
| 184 |
+
if obs_float32:
|
| 185 |
+
img = img.astype(np.float32) / 255
|
| 186 |
+
data["color"] = img
|
| 187 |
+
return data
|
| 188 |
+
|
| 189 |
+
transform.append(tf)
|
| 190 |
+
|
| 191 |
+
resolution.append(res)
|
| 192 |
+
capture_fps.append(fps)
|
| 193 |
+
cap_buffer_size.append(buf)
|
| 194 |
+
video_recorder.append(
|
| 195 |
+
VideoRecorder.create_hevc_nvenc(
|
| 196 |
+
fps=fps, input_pix_fmt="bgr24", bit_rate=bit_rate
|
| 197 |
+
)
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
def vis_tf(data, input_res=res):
|
| 201 |
+
img = data["color"]
|
| 202 |
+
f = get_image_transform(
|
| 203 |
+
input_res=input_res, output_res=(rw, rh), bgr_to_rgb=False
|
| 204 |
+
)
|
| 205 |
+
img = f(img)
|
| 206 |
+
data["color"] = img
|
| 207 |
+
return data
|
| 208 |
+
|
| 209 |
+
vis_transform.append(vis_tf)
|
| 210 |
+
|
| 211 |
+
camera = MultiUvcCamera(
|
| 212 |
+
dev_video_paths=v4l_paths,
|
| 213 |
+
shm_manager=shm_manager,
|
| 214 |
+
resolution=resolution,
|
| 215 |
+
capture_fps=capture_fps,
|
| 216 |
+
# send every frame immediately after arrival
|
| 217 |
+
# ignores put_fps
|
| 218 |
+
put_downsample=False,
|
| 219 |
+
get_max_k=max_obs_buffer_size,
|
| 220 |
+
receive_latency=camera_obs_latency,
|
| 221 |
+
cap_buffer_size=cap_buffer_size,
|
| 222 |
+
transform=transform,
|
| 223 |
+
vis_transform=vis_transform,
|
| 224 |
+
video_recorder=video_recorder,
|
| 225 |
+
verbose=False,
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
multi_cam_vis = None
|
| 229 |
+
if enable_multi_cam_vis:
|
| 230 |
+
multi_cam_vis = MultiCameraVisualizer(
|
| 231 |
+
camera=camera, row=row, col=col, rgb_to_bgr=False
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
cube_diag = np.linalg.norm([1, 1, 1])
|
| 235 |
+
j_init = np.array([0, -90, -90, -90, 90, 0]) / 180 * np.pi
|
| 236 |
+
if not init_joints:
|
| 237 |
+
j_init = None
|
| 238 |
+
|
| 239 |
+
if robot_type.startswith("ur5"):
|
| 240 |
+
robot = RTDEInterpolationController(
|
| 241 |
+
shm_manager=shm_manager,
|
| 242 |
+
robot_ip=robot_ip,
|
| 243 |
+
frequency=500, # UR5 CB3 RTDE
|
| 244 |
+
lookahead_time=0.1,
|
| 245 |
+
gain=300,
|
| 246 |
+
max_pos_speed=max_pos_speed * cube_diag,
|
| 247 |
+
max_rot_speed=max_rot_speed * cube_diag,
|
| 248 |
+
launch_timeout=3,
|
| 249 |
+
tcp_offset_pose=[0, 0, tcp_offset, 0, 0, 0],
|
| 250 |
+
payload_mass=None,
|
| 251 |
+
payload_cog=None,
|
| 252 |
+
joints_init=j_init,
|
| 253 |
+
joints_init_speed=1.05,
|
| 254 |
+
soft_real_time=False,
|
| 255 |
+
verbose=False,
|
| 256 |
+
receive_keys=None,
|
| 257 |
+
receive_latency=robot_obs_latency,
|
| 258 |
+
)
|
| 259 |
+
elif robot_type.startswith("franka"):
|
| 260 |
+
robot = FrankaInterpolationController(
|
| 261 |
+
shm_manager=shm_manager,
|
| 262 |
+
robot_ip=robot_ip,
|
| 263 |
+
frequency=200,
|
| 264 |
+
Kx_scale=1.0,
|
| 265 |
+
Kxd_scale=np.array([2.0, 1.5, 2.0, 1.0, 1.0, 1.0]),
|
| 266 |
+
verbose=False,
|
| 267 |
+
receive_latency=robot_obs_latency,
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
gripper = WSGController(
|
| 271 |
+
shm_manager=shm_manager,
|
| 272 |
+
hostname=gripper_ip,
|
| 273 |
+
port=gripper_port,
|
| 274 |
+
receive_latency=gripper_obs_latency,
|
| 275 |
+
use_meters=True,
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
self.camera = camera
|
| 279 |
+
self.robot = robot
|
| 280 |
+
self.gripper = gripper
|
| 281 |
+
self.multi_cam_vis = multi_cam_vis
|
| 282 |
+
self.frequency = frequency
|
| 283 |
+
self.max_obs_buffer_size = max_obs_buffer_size
|
| 284 |
+
self.max_pos_speed = max_pos_speed
|
| 285 |
+
self.max_rot_speed = max_rot_speed
|
| 286 |
+
self.mirror_crop = mirror_crop
|
| 287 |
+
# timing
|
| 288 |
+
self.align_camera_idx = align_camera_idx
|
| 289 |
+
self.camera_obs_latency = camera_obs_latency
|
| 290 |
+
self.robot_obs_latency = robot_obs_latency
|
| 291 |
+
self.gripper_obs_latency = gripper_obs_latency
|
| 292 |
+
self.robot_action_latency = robot_action_latency
|
| 293 |
+
self.gripper_action_latency = gripper_action_latency
|
| 294 |
+
self.camera_down_sample_steps = camera_down_sample_steps
|
| 295 |
+
self.robot_down_sample_steps = robot_down_sample_steps
|
| 296 |
+
self.gripper_down_sample_steps = gripper_down_sample_steps
|
| 297 |
+
self.camera_obs_horizon = camera_obs_horizon
|
| 298 |
+
self.robot_obs_horizon = robot_obs_horizon
|
| 299 |
+
self.gripper_obs_horizon = gripper_obs_horizon
|
| 300 |
+
# recording
|
| 301 |
+
self.output_dir = output_dir
|
| 302 |
+
self.video_dir = video_dir
|
| 303 |
+
self.replay_buffer = replay_buffer
|
| 304 |
+
# temp memory buffers
|
| 305 |
+
self.last_camera_data = None
|
| 306 |
+
# recording buffers
|
| 307 |
+
self.obs_accumulator = None
|
| 308 |
+
self.action_accumulator = None
|
| 309 |
+
|
| 310 |
+
self.start_time = None
|
| 311 |
+
|
| 312 |
+
# ======== start-stop API =============
|
| 313 |
+
@property
|
| 314 |
+
def is_ready(self):
|
| 315 |
+
return self.camera.is_ready and self.robot.is_ready and self.gripper.is_ready
|
| 316 |
+
|
| 317 |
+
def start(self, wait=True):
|
| 318 |
+
self.camera.start(wait=False)
|
| 319 |
+
self.gripper.start(wait=False)
|
| 320 |
+
self.robot.start(wait=False)
|
| 321 |
+
if self.multi_cam_vis is not None:
|
| 322 |
+
self.multi_cam_vis.start(wait=False)
|
| 323 |
+
if wait:
|
| 324 |
+
self.start_wait()
|
| 325 |
+
|
| 326 |
+
def stop(self, wait=True):
|
| 327 |
+
self.end_episode()
|
| 328 |
+
if self.multi_cam_vis is not None:
|
| 329 |
+
self.multi_cam_vis.stop(wait=False)
|
| 330 |
+
self.robot.stop(wait=False)
|
| 331 |
+
self.gripper.stop(wait=False)
|
| 332 |
+
self.camera.stop(wait=False)
|
| 333 |
+
if wait:
|
| 334 |
+
self.stop_wait()
|
| 335 |
+
|
| 336 |
+
def start_wait(self):
|
| 337 |
+
self.camera.start_wait()
|
| 338 |
+
self.gripper.start_wait()
|
| 339 |
+
self.robot.start_wait()
|
| 340 |
+
if self.multi_cam_vis is not None:
|
| 341 |
+
self.multi_cam_vis.start_wait()
|
| 342 |
+
|
| 343 |
+
def stop_wait(self):
|
| 344 |
+
self.robot.stop_wait()
|
| 345 |
+
self.gripper.stop_wait()
|
| 346 |
+
self.camera.stop_wait()
|
| 347 |
+
if self.multi_cam_vis is not None:
|
| 348 |
+
self.multi_cam_vis.stop_wait()
|
| 349 |
+
|
| 350 |
+
# ========= context manager ===========
|
| 351 |
+
def __enter__(self):
|
| 352 |
+
self.start()
|
| 353 |
+
return self
|
| 354 |
+
|
| 355 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 356 |
+
self.stop()
|
| 357 |
+
|
| 358 |
+
# ========= async env API ===========
|
| 359 |
+
def get_obs(self) -> dict:
|
| 360 |
+
"""
|
| 361 |
+
Timestamp alignment policy
|
| 362 |
+
'current' time is the last timestamp of align_camera_idx
|
| 363 |
+
All other cameras, find corresponding frame with the nearest timestamp
|
| 364 |
+
All low-dim observations, interpolate with respect to 'current' time
|
| 365 |
+
"""
|
| 366 |
+
|
| 367 |
+
"observation dict"
|
| 368 |
+
assert self.is_ready
|
| 369 |
+
|
| 370 |
+
# get data
|
| 371 |
+
# 60 Hz, camera_calibrated_timestamp
|
| 372 |
+
k = math.ceil(
|
| 373 |
+
self.camera_obs_horizon
|
| 374 |
+
* self.camera_down_sample_steps
|
| 375 |
+
* (60 / self.frequency)
|
| 376 |
+
)
|
| 377 |
+
self.last_camera_data = self.camera.get(k=k, out=self.last_camera_data)
|
| 378 |
+
|
| 379 |
+
# 125/500 hz, robot_receive_timestamp
|
| 380 |
+
last_robot_data = self.robot.get_all_state()
|
| 381 |
+
# both have more than n_obs_steps data
|
| 382 |
+
|
| 383 |
+
# 30 hz, gripper_receive_timestamp
|
| 384 |
+
last_gripper_data = self.gripper.get_all_state()
|
| 385 |
+
|
| 386 |
+
last_timestamp = self.last_camera_data[self.align_camera_idx]["timestamp"][-1]
|
| 387 |
+
dt = 1 / self.frequency
|
| 388 |
+
|
| 389 |
+
# align camera obs timestamps
|
| 390 |
+
camera_obs_timestamps = last_timestamp - (
|
| 391 |
+
np.arange(self.camera_obs_horizon)[::-1]
|
| 392 |
+
* self.camera_down_sample_steps
|
| 393 |
+
* dt
|
| 394 |
+
)
|
| 395 |
+
camera_obs = dict()
|
| 396 |
+
for camera_idx, value in self.last_camera_data.items():
|
| 397 |
+
this_timestamps = value["timestamp"]
|
| 398 |
+
this_idxs = list()
|
| 399 |
+
for t in camera_obs_timestamps:
|
| 400 |
+
nn_idx = np.argmin(np.abs(this_timestamps - t))
|
| 401 |
+
this_idxs.append(nn_idx)
|
| 402 |
+
# remap key
|
| 403 |
+
if camera_idx == 0 and self.mirror_crop:
|
| 404 |
+
camera_obs["camera0_rgb"] = value["color"][..., :3][this_idxs]
|
| 405 |
+
camera_obs["camera0_rgb_mirror_crop"] = value["color"][..., 3:][
|
| 406 |
+
this_idxs
|
| 407 |
+
]
|
| 408 |
+
else:
|
| 409 |
+
camera_obs[f"camera{camera_idx}_rgb"] = value["color"][this_idxs]
|
| 410 |
+
|
| 411 |
+
# align robot obs
|
| 412 |
+
robot_obs_timestamps = last_timestamp - (
|
| 413 |
+
np.arange(self.robot_obs_horizon)[::-1] * self.robot_down_sample_steps * dt
|
| 414 |
+
)
|
| 415 |
+
robot_pose_interpolator = PoseInterpolator(
|
| 416 |
+
t=last_robot_data["robot_timestamp"], x=last_robot_data["ActualTCPPose"]
|
| 417 |
+
)
|
| 418 |
+
robot_pose = robot_pose_interpolator(robot_obs_timestamps)
|
| 419 |
+
robot_obs = {
|
| 420 |
+
"robot0_eef_pos": robot_pose[..., :3],
|
| 421 |
+
"robot0_eef_rot_axis_angle": robot_pose[..., 3:],
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
# align gripper obs
|
| 425 |
+
gripper_obs_timestamps = last_timestamp - (
|
| 426 |
+
np.arange(self.gripper_obs_horizon)[::-1]
|
| 427 |
+
* self.gripper_down_sample_steps
|
| 428 |
+
* dt
|
| 429 |
+
)
|
| 430 |
+
gripper_interpolator = get_interp1d(
|
| 431 |
+
t=last_gripper_data["gripper_timestamp"],
|
| 432 |
+
x=last_gripper_data["gripper_position"][..., None],
|
| 433 |
+
)
|
| 434 |
+
gripper_obs = {
|
| 435 |
+
"robot0_gripper_width": gripper_interpolator(gripper_obs_timestamps)
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
# accumulate obs
|
| 439 |
+
if self.obs_accumulator is not None:
|
| 440 |
+
self.obs_accumulator.put(
|
| 441 |
+
data={
|
| 442 |
+
"robot0_eef_pose": last_robot_data["ActualTCPPose"],
|
| 443 |
+
"robot0_joint_pos": last_robot_data["ActualQ"],
|
| 444 |
+
"robot0_joint_vel": last_robot_data["ActualQd"],
|
| 445 |
+
},
|
| 446 |
+
timestamps=last_robot_data["robot_timestamp"],
|
| 447 |
+
)
|
| 448 |
+
self.obs_accumulator.put(
|
| 449 |
+
data={
|
| 450 |
+
"robot0_gripper_width": last_gripper_data["gripper_position"][
|
| 451 |
+
..., None
|
| 452 |
+
]
|
| 453 |
+
},
|
| 454 |
+
timestamps=last_gripper_data["gripper_timestamp"],
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
# return obs
|
| 458 |
+
obs_data = dict(camera_obs)
|
| 459 |
+
obs_data.update(robot_obs)
|
| 460 |
+
obs_data.update(gripper_obs)
|
| 461 |
+
obs_data["timestamp"] = camera_obs_timestamps
|
| 462 |
+
|
| 463 |
+
return obs_data
|
| 464 |
+
|
| 465 |
+
def exec_actions(
|
| 466 |
+
self, actions: np.ndarray, timestamps: np.ndarray, compensate_latency=False
|
| 467 |
+
):
|
| 468 |
+
assert self.is_ready
|
| 469 |
+
if not isinstance(actions, np.ndarray):
|
| 470 |
+
actions = np.array(actions)
|
| 471 |
+
if not isinstance(timestamps, np.ndarray):
|
| 472 |
+
timestamps = np.array(timestamps)
|
| 473 |
+
|
| 474 |
+
# convert action to pose
|
| 475 |
+
receive_time = time.time()
|
| 476 |
+
is_new = timestamps > receive_time
|
| 477 |
+
new_actions = actions[is_new]
|
| 478 |
+
new_timestamps = timestamps[is_new]
|
| 479 |
+
|
| 480 |
+
r_latency = self.robot_action_latency if compensate_latency else 0.0
|
| 481 |
+
g_latency = self.gripper_action_latency if compensate_latency else 0.0
|
| 482 |
+
|
| 483 |
+
# schedule waypoints
|
| 484 |
+
for i in range(len(new_actions)):
|
| 485 |
+
r_actions = new_actions[i, :6]
|
| 486 |
+
g_actions = new_actions[i, 6:]
|
| 487 |
+
self.robot.schedule_waypoint(
|
| 488 |
+
pose=r_actions, target_time=new_timestamps[i] - r_latency
|
| 489 |
+
)
|
| 490 |
+
self.gripper.schedule_waypoint(
|
| 491 |
+
pos=g_actions, target_time=new_timestamps[i] - g_latency
|
| 492 |
+
)
|
| 493 |
+
|
| 494 |
+
# record actions
|
| 495 |
+
if self.action_accumulator is not None:
|
| 496 |
+
self.action_accumulator.put(new_actions, new_timestamps)
|
| 497 |
+
|
| 498 |
+
def get_robot_state(self):
|
| 499 |
+
return self.robot.get_state()
|
| 500 |
+
|
| 501 |
+
# recording API
|
| 502 |
+
def start_episode(self, start_time=None):
|
| 503 |
+
"Start recording and return first obs"
|
| 504 |
+
if start_time is None:
|
| 505 |
+
start_time = time.time()
|
| 506 |
+
self.start_time = start_time
|
| 507 |
+
|
| 508 |
+
assert self.is_ready
|
| 509 |
+
|
| 510 |
+
# prepare recording stuff
|
| 511 |
+
episode_id = self.replay_buffer.n_episodes
|
| 512 |
+
this_video_dir = self.video_dir.joinpath(str(episode_id))
|
| 513 |
+
this_video_dir.mkdir(parents=True, exist_ok=True)
|
| 514 |
+
n_cameras = self.camera.n_cameras
|
| 515 |
+
video_paths = list()
|
| 516 |
+
for i in range(n_cameras):
|
| 517 |
+
video_paths.append(str(this_video_dir.joinpath(f"{i}.mp4").absolute()))
|
| 518 |
+
|
| 519 |
+
# start recording on camera
|
| 520 |
+
self.camera.restart_put(start_time=start_time)
|
| 521 |
+
self.camera.start_recording(video_path=video_paths, start_time=start_time)
|
| 522 |
+
|
| 523 |
+
# create accumulators
|
| 524 |
+
self.obs_accumulator = ObsAccumulator()
|
| 525 |
+
self.action_accumulator = TimestampActionAccumulator(
|
| 526 |
+
start_time=start_time, dt=1 / self.frequency
|
| 527 |
+
)
|
| 528 |
+
print(f"Episode {episode_id} started!")
|
| 529 |
+
|
| 530 |
+
def end_episode(self):
|
| 531 |
+
"Stop recording"
|
| 532 |
+
assert self.is_ready
|
| 533 |
+
|
| 534 |
+
# stop video recorder
|
| 535 |
+
self.camera.stop_recording()
|
| 536 |
+
|
| 537 |
+
# TODO
|
| 538 |
+
if self.obs_accumulator is not None:
|
| 539 |
+
# recording
|
| 540 |
+
assert self.action_accumulator is not None
|
| 541 |
+
|
| 542 |
+
# Since the only way to accumulate obs and action is by calling
|
| 543 |
+
# get_obs and exec_actions, which will be in the same thread.
|
| 544 |
+
# We don't need to worry new data come in here.
|
| 545 |
+
end_time = float("inf")
|
| 546 |
+
for key, value in self.obs_accumulator.timestamps.items():
|
| 547 |
+
end_time = min(end_time, value[-1])
|
| 548 |
+
end_time = min(end_time, self.action_accumulator.timestamps[-1])
|
| 549 |
+
|
| 550 |
+
actions = self.action_accumulator.actions
|
| 551 |
+
action_timestamps = self.action_accumulator.timestamps
|
| 552 |
+
n_steps = 0
|
| 553 |
+
if np.sum(self.action_accumulator.timestamps <= end_time) > 0:
|
| 554 |
+
n_steps = (
|
| 555 |
+
np.nonzero(self.action_accumulator.timestamps <= end_time)[0][-1]
|
| 556 |
+
+ 1
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
if n_steps > 0:
|
| 560 |
+
timestamps = action_timestamps[:n_steps]
|
| 561 |
+
episode = {
|
| 562 |
+
"timestamp": timestamps,
|
| 563 |
+
"action": actions[:n_steps],
|
| 564 |
+
}
|
| 565 |
+
robot_pose_interpolator = PoseInterpolator(
|
| 566 |
+
t=np.array(self.obs_accumulator.timestamps["robot0_eef_pose"]),
|
| 567 |
+
x=np.array(self.obs_accumulator.data["robot0_eef_pose"]),
|
| 568 |
+
)
|
| 569 |
+
robot_pose = robot_pose_interpolator(timestamps)
|
| 570 |
+
episode["robot0_eef_pos"] = robot_pose[:, :3]
|
| 571 |
+
episode["robot0_eef_rot_axis_angle"] = robot_pose[:, 3:]
|
| 572 |
+
joint_pos_interpolator = get_interp1d(
|
| 573 |
+
np.array(self.obs_accumulator.timestamps["robot0_joint_pos"]),
|
| 574 |
+
np.array(self.obs_accumulator.data["robot0_joint_pos"]),
|
| 575 |
+
)
|
| 576 |
+
joint_vel_interpolator = get_interp1d(
|
| 577 |
+
np.array(self.obs_accumulator.timestamps["robot0_joint_vel"]),
|
| 578 |
+
np.array(self.obs_accumulator.data["robot0_joint_vel"]),
|
| 579 |
+
)
|
| 580 |
+
episode["robot0_joint_pos"] = joint_pos_interpolator(timestamps)
|
| 581 |
+
episode["robot0_joint_vel"] = joint_vel_interpolator(timestamps)
|
| 582 |
+
|
| 583 |
+
gripper_interpolator = get_interp1d(
|
| 584 |
+
t=np.array(self.obs_accumulator.timestamps["robot0_gripper_width"]),
|
| 585 |
+
x=np.array(self.obs_accumulator.data["robot0_gripper_width"]),
|
| 586 |
+
)
|
| 587 |
+
episode["robot0_gripper_width"] = gripper_interpolator(timestamps)
|
| 588 |
+
|
| 589 |
+
self.replay_buffer.add_episode(episode, compressors="disk")
|
| 590 |
+
episode_id = self.replay_buffer.n_episodes - 1
|
| 591 |
+
print(f"Episode {episode_id} saved!")
|
| 592 |
+
|
| 593 |
+
self.obs_accumulator = None
|
| 594 |
+
self.action_accumulator = None
|
| 595 |
+
|
| 596 |
+
def drop_episode(self):
|
| 597 |
+
self.end_episode()
|
| 598 |
+
self.replay_buffer.drop_episode()
|
| 599 |
+
episode_id = self.replay_buffer.n_episodes
|
| 600 |
+
this_video_dir = self.video_dir.joinpath(str(episode_id))
|
| 601 |
+
if this_video_dir.exists():
|
| 602 |
+
shutil.rmtree(str(this_video_dir))
|
| 603 |
+
print(f"Episode {episode_id} dropped!")
|
code/umi/real_world/uvc_camera.py
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Callable, Dict
|
| 2 |
+
import enum
|
| 3 |
+
import time
|
| 4 |
+
import cv2
|
| 5 |
+
import numpy as np
|
| 6 |
+
import multiprocessing as mp
|
| 7 |
+
from threadpoolctl import threadpool_limits
|
| 8 |
+
from multiprocessing.managers import SharedMemoryManager
|
| 9 |
+
from umi.common.timestamp_accumulator import get_accumulate_timestamp_idxs
|
| 10 |
+
from umi.shared_memory.shared_memory_ring_buffer import SharedMemoryRingBuffer
|
| 11 |
+
from umi.shared_memory.shared_memory_queue import SharedMemoryQueue, Full, Empty
|
| 12 |
+
from umi.real_world.video_recorder import VideoRecorder
|
| 13 |
+
from umi.common.usb_util import reset_usb_device
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class Command(enum.Enum):
|
| 17 |
+
RESTART_PUT = 0
|
| 18 |
+
START_RECORDING = 1
|
| 19 |
+
STOP_RECORDING = 2
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class UvcCamera(mp.Process):
|
| 23 |
+
"""
|
| 24 |
+
Call umi.common.usb_util.reset_all_elgato_devices
|
| 25 |
+
if you are using Elgato capture cards.
|
| 26 |
+
Required to workaround firmware bugs.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
MAX_PATH_LENGTH = 4096 # linux path has a limit of 4096 bytes
|
| 30 |
+
|
| 31 |
+
def __init__(
|
| 32 |
+
self,
|
| 33 |
+
shm_manager: SharedMemoryManager,
|
| 34 |
+
# v4l2 device file path
|
| 35 |
+
# e.g. /dev/video0
|
| 36 |
+
# or /dev/v4l/by-id/usb-Elgato_Elgato_HD60_X_A00XB320216MTR-video-index0
|
| 37 |
+
dev_video_path,
|
| 38 |
+
resolution=(1280, 720),
|
| 39 |
+
capture_fps=60,
|
| 40 |
+
put_fps=None,
|
| 41 |
+
put_downsample=True,
|
| 42 |
+
get_max_k=30,
|
| 43 |
+
receive_latency=0.0,
|
| 44 |
+
cap_buffer_size=1,
|
| 45 |
+
num_threads=2,
|
| 46 |
+
transform: Optional[Callable[[Dict], Dict]] = None,
|
| 47 |
+
vis_transform: Optional[Callable[[Dict], Dict]] = None,
|
| 48 |
+
recording_transform: Optional[Callable[[Dict], Dict]] = None,
|
| 49 |
+
video_recorder: Optional[VideoRecorder] = None,
|
| 50 |
+
verbose=False,
|
| 51 |
+
):
|
| 52 |
+
super().__init__()
|
| 53 |
+
|
| 54 |
+
if put_fps is None:
|
| 55 |
+
put_fps = capture_fps
|
| 56 |
+
|
| 57 |
+
# create ring buffer
|
| 58 |
+
resolution = tuple(resolution)
|
| 59 |
+
shape = resolution[::-1]
|
| 60 |
+
examples = {"color": np.empty(shape=shape + (3,), dtype=np.uint8)}
|
| 61 |
+
examples["camera_capture_timestamp"] = 0.0
|
| 62 |
+
examples["camera_receive_timestamp"] = 0.0
|
| 63 |
+
examples["timestamp"] = 0.0
|
| 64 |
+
examples["step_idx"] = 0
|
| 65 |
+
|
| 66 |
+
vis_ring_buffer = SharedMemoryRingBuffer.create_from_examples(
|
| 67 |
+
shm_manager=shm_manager,
|
| 68 |
+
examples=(
|
| 69 |
+
examples if vis_transform is None else vis_transform(dict(examples))
|
| 70 |
+
),
|
| 71 |
+
get_max_k=1,
|
| 72 |
+
get_time_budget=0.2,
|
| 73 |
+
put_desired_frequency=capture_fps,
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
ring_buffer = SharedMemoryRingBuffer.create_from_examples(
|
| 77 |
+
shm_manager=shm_manager,
|
| 78 |
+
examples=examples if transform is None else transform(dict(examples)),
|
| 79 |
+
get_max_k=get_max_k,
|
| 80 |
+
get_time_budget=0.2,
|
| 81 |
+
put_desired_frequency=put_fps,
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
# create command queue
|
| 85 |
+
examples = {
|
| 86 |
+
"cmd": Command.RESTART_PUT.value,
|
| 87 |
+
"put_start_time": 0.0,
|
| 88 |
+
"video_path": np.array("a" * self.MAX_PATH_LENGTH),
|
| 89 |
+
"recording_start_time": 0.0,
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
command_queue = SharedMemoryQueue.create_from_examples(
|
| 93 |
+
shm_manager=shm_manager, examples=examples, buffer_size=128
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
# create video recorder
|
| 97 |
+
if video_recorder is None:
|
| 98 |
+
# default to nvenc GPU encoder
|
| 99 |
+
video_recorder = VideoRecorder.create_hevc_nvenc(
|
| 100 |
+
shm_manager=shm_manager,
|
| 101 |
+
fps=capture_fps,
|
| 102 |
+
input_pix_fmt="bgr24",
|
| 103 |
+
bit_rate=6000 * 1000,
|
| 104 |
+
)
|
| 105 |
+
assert video_recorder.fps == capture_fps
|
| 106 |
+
|
| 107 |
+
# copied variables
|
| 108 |
+
self.shm_manager = shm_manager
|
| 109 |
+
self.dev_video_path = dev_video_path
|
| 110 |
+
self.resolution = resolution
|
| 111 |
+
self.capture_fps = capture_fps
|
| 112 |
+
self.put_fps = put_fps
|
| 113 |
+
self.put_downsample = put_downsample
|
| 114 |
+
self.receive_latency = receive_latency
|
| 115 |
+
self.cap_buffer_size = cap_buffer_size
|
| 116 |
+
self.transform = transform
|
| 117 |
+
self.vis_transform = vis_transform
|
| 118 |
+
self.recording_transform = recording_transform
|
| 119 |
+
self.video_recorder = video_recorder
|
| 120 |
+
self.verbose = verbose
|
| 121 |
+
self.put_start_time = None
|
| 122 |
+
self.num_threads = num_threads
|
| 123 |
+
|
| 124 |
+
# shared variables
|
| 125 |
+
self.stop_event = mp.Event()
|
| 126 |
+
self.ready_event = mp.Event()
|
| 127 |
+
self.ring_buffer = ring_buffer
|
| 128 |
+
self.vis_ring_buffer = vis_ring_buffer
|
| 129 |
+
self.command_queue = command_queue
|
| 130 |
+
|
| 131 |
+
# ========= context manager ===========
|
| 132 |
+
def __enter__(self):
|
| 133 |
+
self.start()
|
| 134 |
+
return self
|
| 135 |
+
|
| 136 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 137 |
+
self.stop()
|
| 138 |
+
|
| 139 |
+
# ========= user API ===========
|
| 140 |
+
def start(self, wait=True, put_start_time=None):
|
| 141 |
+
self.put_start_time = put_start_time
|
| 142 |
+
shape = self.resolution[::-1]
|
| 143 |
+
data_example = np.empty(shape=shape + (3,), dtype=np.uint8)
|
| 144 |
+
self.video_recorder.start(
|
| 145 |
+
shm_manager=self.shm_manager, data_example=data_example
|
| 146 |
+
)
|
| 147 |
+
# must start video recorder first to create share memories
|
| 148 |
+
super().start()
|
| 149 |
+
if wait:
|
| 150 |
+
self.start_wait()
|
| 151 |
+
|
| 152 |
+
def stop(self, wait=True):
|
| 153 |
+
self.video_recorder.stop()
|
| 154 |
+
self.stop_event.set()
|
| 155 |
+
if wait:
|
| 156 |
+
self.end_wait()
|
| 157 |
+
|
| 158 |
+
def start_wait(self):
|
| 159 |
+
self.ready_event.wait()
|
| 160 |
+
self.video_recorder.start_wait()
|
| 161 |
+
|
| 162 |
+
def end_wait(self):
|
| 163 |
+
self.join()
|
| 164 |
+
self.video_recorder.end_wait()
|
| 165 |
+
|
| 166 |
+
@property
|
| 167 |
+
def is_ready(self):
|
| 168 |
+
return self.ready_event.is_set()
|
| 169 |
+
|
| 170 |
+
def get(self, k=None, out=None):
|
| 171 |
+
if k is None:
|
| 172 |
+
return self.ring_buffer.get(out=out)
|
| 173 |
+
else:
|
| 174 |
+
return self.ring_buffer.get_last_k(k, out=out)
|
| 175 |
+
|
| 176 |
+
def get_vis(self, out=None):
|
| 177 |
+
return self.vis_ring_buffer.get(out=out)
|
| 178 |
+
|
| 179 |
+
def start_recording(self, video_path: str, start_time: float = -1):
|
| 180 |
+
path_len = len(video_path.encode("utf-8"))
|
| 181 |
+
if path_len > self.MAX_PATH_LENGTH:
|
| 182 |
+
raise RuntimeError("video_path too long.")
|
| 183 |
+
self.command_queue.put(
|
| 184 |
+
{
|
| 185 |
+
"cmd": Command.START_RECORDING.value,
|
| 186 |
+
"video_path": video_path,
|
| 187 |
+
"recording_start_time": start_time,
|
| 188 |
+
}
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
def stop_recording(self):
|
| 192 |
+
self.command_queue.put({"cmd": Command.STOP_RECORDING.value})
|
| 193 |
+
|
| 194 |
+
def restart_put(self, start_time):
|
| 195 |
+
self.command_queue.put(
|
| 196 |
+
{"cmd": Command.RESTART_PUT.value, "put_start_time": start_time}
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# ========= interval API ===========
|
| 200 |
+
def run(self):
|
| 201 |
+
# limit threads
|
| 202 |
+
threadpool_limits(self.num_threads)
|
| 203 |
+
cv2.setNumThreads(self.num_threads)
|
| 204 |
+
|
| 205 |
+
# open VideoCapture
|
| 206 |
+
cap = cv2.VideoCapture(self.dev_video_path, cv2.CAP_V4L2)
|
| 207 |
+
|
| 208 |
+
try:
|
| 209 |
+
# set resolution and fps
|
| 210 |
+
w, h = self.resolution
|
| 211 |
+
fps = self.capture_fps
|
| 212 |
+
cap.set(cv2.CAP_PROP_FRAME_WIDTH, w)
|
| 213 |
+
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
|
| 214 |
+
# set fps
|
| 215 |
+
cap.set(cv2.CAP_PROP_BUFFERSIZE, self.cap_buffer_size)
|
| 216 |
+
cap.set(cv2.CAP_PROP_FPS, fps)
|
| 217 |
+
|
| 218 |
+
# put frequency regulation
|
| 219 |
+
put_idx = None
|
| 220 |
+
put_start_time = self.put_start_time
|
| 221 |
+
if put_start_time is None:
|
| 222 |
+
put_start_time = time.time()
|
| 223 |
+
|
| 224 |
+
# reuse frame buffer
|
| 225 |
+
iter_idx = 0
|
| 226 |
+
t_start = time.time()
|
| 227 |
+
while not self.stop_event.is_set():
|
| 228 |
+
ts = time.time()
|
| 229 |
+
ret = cap.grab()
|
| 230 |
+
assert ret
|
| 231 |
+
|
| 232 |
+
# directly write into shared memory to avoid copy
|
| 233 |
+
frame = self.video_recorder.get_img_buffer()
|
| 234 |
+
ret, frame = cap.retrieve(frame)
|
| 235 |
+
t_recv = time.time()
|
| 236 |
+
assert ret
|
| 237 |
+
mt_cap = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000
|
| 238 |
+
t_cap = mt_cap - time.monotonic() + time.time()
|
| 239 |
+
t_cal = t_recv - self.receive_latency # calibrated latency
|
| 240 |
+
|
| 241 |
+
# record frame
|
| 242 |
+
if self.video_recorder.is_ready():
|
| 243 |
+
self.video_recorder.write_img_buffer(frame, frame_time=t_cal)
|
| 244 |
+
|
| 245 |
+
data = dict()
|
| 246 |
+
data["camera_receive_timestamp"] = t_recv
|
| 247 |
+
data["camera_capture_timestamp"] = t_cap
|
| 248 |
+
data["color"] = frame
|
| 249 |
+
|
| 250 |
+
# apply transform
|
| 251 |
+
put_data = data
|
| 252 |
+
if self.transform is not None:
|
| 253 |
+
put_data = self.transform(dict(data))
|
| 254 |
+
|
| 255 |
+
if self.put_downsample:
|
| 256 |
+
# put frequency regulation
|
| 257 |
+
local_idxs, global_idxs, put_idx = get_accumulate_timestamp_idxs(
|
| 258 |
+
timestamps=[t_cal],
|
| 259 |
+
start_time=put_start_time,
|
| 260 |
+
dt=1 / self.put_fps,
|
| 261 |
+
# this is non in first iteration
|
| 262 |
+
# and then replaced with a concrete number
|
| 263 |
+
next_global_idx=put_idx,
|
| 264 |
+
# continue to pump frames even if not started.
|
| 265 |
+
# start_time is simply used to align timestamps.
|
| 266 |
+
allow_negative=True,
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
for step_idx in global_idxs:
|
| 270 |
+
put_data["step_idx"] = step_idx
|
| 271 |
+
put_data["timestamp"] = t_cal
|
| 272 |
+
self.ring_buffer.put(put_data, wait=False)
|
| 273 |
+
else:
|
| 274 |
+
step_idx = int((t_cal - put_start_time) * self.put_fps)
|
| 275 |
+
put_data["step_idx"] = step_idx
|
| 276 |
+
put_data["timestamp"] = t_cal
|
| 277 |
+
self.ring_buffer.put(put_data, wait=False)
|
| 278 |
+
|
| 279 |
+
# signal ready
|
| 280 |
+
if iter_idx == 0:
|
| 281 |
+
self.ready_event.set()
|
| 282 |
+
|
| 283 |
+
# put to vis
|
| 284 |
+
vis_data = data
|
| 285 |
+
if self.vis_transform == self.transform:
|
| 286 |
+
vis_data = put_data
|
| 287 |
+
elif self.vis_transform is not None:
|
| 288 |
+
vis_data = self.vis_transform(dict(data))
|
| 289 |
+
self.vis_ring_buffer.put(vis_data, wait=False)
|
| 290 |
+
|
| 291 |
+
# perf
|
| 292 |
+
t_end = time.time()
|
| 293 |
+
duration = t_end - t_start
|
| 294 |
+
frequency = np.round(1 / duration, 1)
|
| 295 |
+
t_start = t_end
|
| 296 |
+
if self.verbose:
|
| 297 |
+
print(f"[UvcCamera {self.dev_video_path}] FPS {frequency}")
|
| 298 |
+
|
| 299 |
+
# fetch command from queue
|
| 300 |
+
try:
|
| 301 |
+
commands = self.command_queue.get_all()
|
| 302 |
+
n_cmd = len(commands["cmd"])
|
| 303 |
+
except Empty:
|
| 304 |
+
n_cmd = 0
|
| 305 |
+
|
| 306 |
+
# execute commands
|
| 307 |
+
for i in range(n_cmd):
|
| 308 |
+
command = dict()
|
| 309 |
+
for key, value in commands.items():
|
| 310 |
+
command[key] = value[i]
|
| 311 |
+
cmd = command["cmd"]
|
| 312 |
+
if cmd == Command.RESTART_PUT.value:
|
| 313 |
+
put_idx = None
|
| 314 |
+
put_start_time = command["put_start_time"]
|
| 315 |
+
elif cmd == Command.START_RECORDING.value:
|
| 316 |
+
video_path = str(command["video_path"])
|
| 317 |
+
start_time = command["recording_start_time"]
|
| 318 |
+
if start_time < 0:
|
| 319 |
+
start_time = None
|
| 320 |
+
self.video_recorder.start_recording(
|
| 321 |
+
video_path, start_time=start_time
|
| 322 |
+
)
|
| 323 |
+
elif cmd == Command.STOP_RECORDING.value:
|
| 324 |
+
self.video_recorder.stop_recording()
|
| 325 |
+
|
| 326 |
+
iter_idx += 1
|
| 327 |
+
finally:
|
| 328 |
+
self.video_recorder.stop()
|
| 329 |
+
# When everything done, release the capture
|
| 330 |
+
cap.release()
|
code/umi/real_world/video_recorder.py
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Callable, Generator
|
| 2 |
+
import numpy as np
|
| 3 |
+
import av
|
| 4 |
+
import time
|
| 5 |
+
import enum
|
| 6 |
+
import multiprocessing as mp
|
| 7 |
+
from multiprocessing.managers import SharedMemoryManager
|
| 8 |
+
from unified_video_action.shared_memory.shared_memory_queue import (
|
| 9 |
+
SharedMemoryQueue,
|
| 10 |
+
Full,
|
| 11 |
+
Empty,
|
| 12 |
+
)
|
| 13 |
+
from umi.common.timestamp_accumulator import get_accumulate_timestamp_idxs
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class VideoRecorder(mp.Process):
|
| 17 |
+
MAX_PATH_LENGTH = 4096 # linux path has a limit of 4096 bytes
|
| 18 |
+
|
| 19 |
+
class Command(enum.Enum):
|
| 20 |
+
START_RECORDING = 0
|
| 21 |
+
STOP_RECORDING = 1
|
| 22 |
+
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
fps,
|
| 26 |
+
codec,
|
| 27 |
+
input_pix_fmt,
|
| 28 |
+
buffer_size=128,
|
| 29 |
+
no_repeat=False,
|
| 30 |
+
# options for codec
|
| 31 |
+
**kwargs
|
| 32 |
+
):
|
| 33 |
+
self.fps = fps
|
| 34 |
+
self.codec = codec
|
| 35 |
+
self.input_pix_fmt = input_pix_fmt
|
| 36 |
+
self.buffer_size = buffer_size
|
| 37 |
+
self.no_repeat = no_repeat
|
| 38 |
+
self.kwargs = kwargs
|
| 39 |
+
|
| 40 |
+
self.img_queue = None
|
| 41 |
+
self.cmd_queue = None
|
| 42 |
+
self.stop_event = None
|
| 43 |
+
self.ready_event = None
|
| 44 |
+
self.is_started = False
|
| 45 |
+
self.shape = None
|
| 46 |
+
|
| 47 |
+
self._reset_state()
|
| 48 |
+
|
| 49 |
+
# ======== custom constructors =======
|
| 50 |
+
@classmethod
|
| 51 |
+
def create_h264(
|
| 52 |
+
cls,
|
| 53 |
+
fps,
|
| 54 |
+
codec="h264",
|
| 55 |
+
input_pix_fmt="rgb24",
|
| 56 |
+
output_pix_fmt="yuv420p",
|
| 57 |
+
crf=18,
|
| 58 |
+
profile="high",
|
| 59 |
+
**kwargs
|
| 60 |
+
):
|
| 61 |
+
obj = cls(
|
| 62 |
+
fps=fps,
|
| 63 |
+
codec=codec,
|
| 64 |
+
input_pix_fmt=input_pix_fmt,
|
| 65 |
+
pix_fmt=output_pix_fmt,
|
| 66 |
+
options={"crf": str(crf), "profile": profile},
|
| 67 |
+
**kwargs
|
| 68 |
+
)
|
| 69 |
+
return obj
|
| 70 |
+
|
| 71 |
+
@classmethod
|
| 72 |
+
def create_hevc_nvenc(
|
| 73 |
+
cls,
|
| 74 |
+
fps,
|
| 75 |
+
codec="hevc_nvenc",
|
| 76 |
+
input_pix_fmt="rgb24",
|
| 77 |
+
output_pix_fmt="yuv420p",
|
| 78 |
+
bit_rate=6000 * 1000,
|
| 79 |
+
options={"tune": "ll", "preset": "p1"},
|
| 80 |
+
**kwargs
|
| 81 |
+
):
|
| 82 |
+
obj = cls(
|
| 83 |
+
fps=fps,
|
| 84 |
+
codec=codec,
|
| 85 |
+
input_pix_fmt=input_pix_fmt,
|
| 86 |
+
pix_fmt=output_pix_fmt,
|
| 87 |
+
bit_rate=bit_rate,
|
| 88 |
+
options=options,
|
| 89 |
+
**kwargs
|
| 90 |
+
)
|
| 91 |
+
return obj
|
| 92 |
+
|
| 93 |
+
# ========= context manager ===========
|
| 94 |
+
def __enter__(self):
|
| 95 |
+
self.start()
|
| 96 |
+
return self
|
| 97 |
+
|
| 98 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 99 |
+
self.stop()
|
| 100 |
+
|
| 101 |
+
# ========= user API ===========
|
| 102 |
+
def start(self, shm_manager: SharedMemoryManager, data_example: np.ndarray):
|
| 103 |
+
super().__init__()
|
| 104 |
+
self.ready_event = mp.Event()
|
| 105 |
+
self.stop_event = mp.Event()
|
| 106 |
+
self.img_queue = SharedMemoryQueue.create_from_examples(
|
| 107 |
+
shm_manager=shm_manager,
|
| 108 |
+
examples={"img": data_example, "repeat": 1},
|
| 109 |
+
buffer_size=self.buffer_size,
|
| 110 |
+
)
|
| 111 |
+
self.cmd_queue = SharedMemoryQueue.create_from_examples(
|
| 112 |
+
shm_manager=shm_manager,
|
| 113 |
+
examples={
|
| 114 |
+
"cmd": self.Command.START_RECORDING.value,
|
| 115 |
+
"video_path": np.array("a" * self.MAX_PATH_LENGTH),
|
| 116 |
+
},
|
| 117 |
+
buffer_size=self.buffer_size,
|
| 118 |
+
)
|
| 119 |
+
self.shape = data_example.shape
|
| 120 |
+
self.is_started = True
|
| 121 |
+
super().start()
|
| 122 |
+
|
| 123 |
+
def stop(self):
|
| 124 |
+
self.stop_event.set()
|
| 125 |
+
|
| 126 |
+
def start_wait(self):
|
| 127 |
+
self.ready_event.wait()
|
| 128 |
+
|
| 129 |
+
def end_wait(self):
|
| 130 |
+
self.join()
|
| 131 |
+
|
| 132 |
+
def is_ready(self):
|
| 133 |
+
return (
|
| 134 |
+
(self.start_time is not None)
|
| 135 |
+
and (self.ready_event.is_set())
|
| 136 |
+
and (not self.stop_event.is_set())
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
def start_recording(self, video_path: str, start_time: float = -1):
|
| 140 |
+
path_len = len(video_path.encode("utf-8"))
|
| 141 |
+
if path_len > self.MAX_PATH_LENGTH:
|
| 142 |
+
raise RuntimeError("video_path too long.")
|
| 143 |
+
self.start_time = start_time
|
| 144 |
+
self.cmd_queue.put(
|
| 145 |
+
{"cmd": self.Command.START_RECORDING.value, "video_path": video_path}
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
def stop_recording(self):
|
| 149 |
+
self.cmd_queue.put({"cmd": self.Command.STOP_RECORDING.value})
|
| 150 |
+
self._reset_state()
|
| 151 |
+
|
| 152 |
+
def write_frame(self, img: np.ndarray, frame_time=None):
|
| 153 |
+
if not self.is_ready():
|
| 154 |
+
raise RuntimeError("Must run start() before writing!")
|
| 155 |
+
|
| 156 |
+
n_repeats = 1
|
| 157 |
+
if (not self.no_repeat) and (self.start_time is not None):
|
| 158 |
+
local_idxs, global_idxs, self.next_global_idx = (
|
| 159 |
+
get_accumulate_timestamp_idxs(
|
| 160 |
+
# only one timestamp
|
| 161 |
+
timestamps=[frame_time],
|
| 162 |
+
start_time=self.start_time,
|
| 163 |
+
dt=1 / self.fps,
|
| 164 |
+
next_global_idx=self.next_global_idx,
|
| 165 |
+
)
|
| 166 |
+
)
|
| 167 |
+
# number of apperance means repeats
|
| 168 |
+
n_repeats = len(local_idxs)
|
| 169 |
+
|
| 170 |
+
self.img_queue.put({"img": img, "repeat": n_repeats})
|
| 171 |
+
|
| 172 |
+
def get_img_buffer(self):
|
| 173 |
+
"""
|
| 174 |
+
Get view to the next img queue memory
|
| 175 |
+
for zero-copy writing
|
| 176 |
+
"""
|
| 177 |
+
data = self.img_queue.get_next_view()
|
| 178 |
+
img = data["img"]
|
| 179 |
+
return img
|
| 180 |
+
|
| 181 |
+
def write_img_buffer(self, img: np.ndarray, frame_time=None):
|
| 182 |
+
"""
|
| 183 |
+
Must be used with the buffer returned by get_img_buffer
|
| 184 |
+
for zero-copy writing
|
| 185 |
+
"""
|
| 186 |
+
if not self.is_ready():
|
| 187 |
+
raise RuntimeError("Must run start() before writing!")
|
| 188 |
+
|
| 189 |
+
n_repeats = 1
|
| 190 |
+
if (not self.no_repeat) and (self.start_time is not None):
|
| 191 |
+
local_idxs, global_idxs, self.next_global_idx = (
|
| 192 |
+
get_accumulate_timestamp_idxs(
|
| 193 |
+
# only one timestamp
|
| 194 |
+
timestamps=[frame_time],
|
| 195 |
+
start_time=self.start_time,
|
| 196 |
+
dt=1 / self.fps,
|
| 197 |
+
next_global_idx=self.next_global_idx,
|
| 198 |
+
)
|
| 199 |
+
)
|
| 200 |
+
# number of apperance means repeats
|
| 201 |
+
n_repeats = len(local_idxs)
|
| 202 |
+
|
| 203 |
+
self.img_queue.put_next_view({"img": img, "repeat": n_repeats})
|
| 204 |
+
|
| 205 |
+
# ========= interval API ===========
|
| 206 |
+
def _reset_state(self):
|
| 207 |
+
self.start_time = None
|
| 208 |
+
self.next_global_idx = 0
|
| 209 |
+
|
| 210 |
+
def run(self):
|
| 211 |
+
# I'm sorry it has to be this complicated...
|
| 212 |
+
self.ready_event.set()
|
| 213 |
+
while not self.stop_event.is_set():
|
| 214 |
+
video_path = None
|
| 215 |
+
# ========= stopped state ============
|
| 216 |
+
while (video_path is None) and (not self.stop_event.is_set()):
|
| 217 |
+
try:
|
| 218 |
+
commands = self.cmd_queue.get_all()
|
| 219 |
+
for i in range(len(commands["cmd"])):
|
| 220 |
+
cmd = commands["cmd"][i]
|
| 221 |
+
if cmd == self.Command.START_RECORDING.value:
|
| 222 |
+
video_path = str(commands["video_path"][i])
|
| 223 |
+
elif cmd == self.Command.STOP_RECORDING.value:
|
| 224 |
+
video_path = None
|
| 225 |
+
else:
|
| 226 |
+
raise RuntimeError("Unknown command: ", cmd)
|
| 227 |
+
except Empty:
|
| 228 |
+
time.sleep(0.1 / self.fps)
|
| 229 |
+
if self.stop_event.is_set():
|
| 230 |
+
break
|
| 231 |
+
assert video_path is not None
|
| 232 |
+
# ========= recording state ==========
|
| 233 |
+
with av.open(video_path, mode="w") as container:
|
| 234 |
+
stream = container.add_stream(self.codec, rate=self.fps)
|
| 235 |
+
h, w, c = self.shape
|
| 236 |
+
stream.width = w
|
| 237 |
+
stream.height = h
|
| 238 |
+
codec_context = stream.codec_context
|
| 239 |
+
for k, v in self.kwargs.items():
|
| 240 |
+
setattr(codec_context, k, v)
|
| 241 |
+
|
| 242 |
+
# loop
|
| 243 |
+
while not self.stop_event.is_set():
|
| 244 |
+
try:
|
| 245 |
+
command = self.cmd_queue.get()
|
| 246 |
+
cmd = int(command["cmd"])
|
| 247 |
+
if cmd == self.Command.STOP_RECORDING.value:
|
| 248 |
+
break
|
| 249 |
+
elif cmd == self.Command.START_RECORDING.value:
|
| 250 |
+
continue
|
| 251 |
+
else:
|
| 252 |
+
raise RuntimeError("Unknown command: ", cmd)
|
| 253 |
+
except Empty:
|
| 254 |
+
pass
|
| 255 |
+
|
| 256 |
+
try:
|
| 257 |
+
with self.img_queue.get_view() as data:
|
| 258 |
+
img = data["img"]
|
| 259 |
+
repeat = data["repeat"]
|
| 260 |
+
frame = av.VideoFrame.from_ndarray(
|
| 261 |
+
img, format=self.input_pix_fmt
|
| 262 |
+
)
|
| 263 |
+
for _ in range(repeat):
|
| 264 |
+
for packet in stream.encode(frame):
|
| 265 |
+
container.mux(packet)
|
| 266 |
+
except Empty:
|
| 267 |
+
time.sleep(0.1 / self.fps)
|
| 268 |
+
|
| 269 |
+
# Flush queue
|
| 270 |
+
try:
|
| 271 |
+
while not self.img_queue.empty():
|
| 272 |
+
with self.img_queue.get_view() as data:
|
| 273 |
+
img = data["img"]
|
| 274 |
+
repeat = data["repeat"]
|
| 275 |
+
frame = av.VideoFrame.from_ndarray(
|
| 276 |
+
img, format=self.input_pix_fmt
|
| 277 |
+
)
|
| 278 |
+
for _ in range(repeat):
|
| 279 |
+
for packet in stream.encode(frame):
|
| 280 |
+
container.mux(packet)
|
| 281 |
+
except Empty:
|
| 282 |
+
pass
|
| 283 |
+
|
| 284 |
+
# Flush stream
|
| 285 |
+
for packet in stream.encode():
|
| 286 |
+
container.mux(packet)
|
code/umi/real_world/wsg_binary_driver.py
ADDED
|
@@ -0,0 +1,631 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Union, Optional
|
| 2 |
+
import socket
|
| 3 |
+
import enum
|
| 4 |
+
import struct
|
| 5 |
+
|
| 6 |
+
CRC_TABLE_CCITT16 = [
|
| 7 |
+
0x0000,
|
| 8 |
+
0x1021,
|
| 9 |
+
0x2042,
|
| 10 |
+
0x3063,
|
| 11 |
+
0x4084,
|
| 12 |
+
0x50A5,
|
| 13 |
+
0x60C6,
|
| 14 |
+
0x70E7,
|
| 15 |
+
0x8108,
|
| 16 |
+
0x9129,
|
| 17 |
+
0xA14A,
|
| 18 |
+
0xB16B,
|
| 19 |
+
0xC18C,
|
| 20 |
+
0xD1AD,
|
| 21 |
+
0xE1CE,
|
| 22 |
+
0xF1EF,
|
| 23 |
+
0x1231,
|
| 24 |
+
0x0210,
|
| 25 |
+
0x3273,
|
| 26 |
+
0x2252,
|
| 27 |
+
0x52B5,
|
| 28 |
+
0x4294,
|
| 29 |
+
0x72F7,
|
| 30 |
+
0x62D6,
|
| 31 |
+
0x9339,
|
| 32 |
+
0x8318,
|
| 33 |
+
0xB37B,
|
| 34 |
+
0xA35A,
|
| 35 |
+
0xD3BD,
|
| 36 |
+
0xC39C,
|
| 37 |
+
0xF3FF,
|
| 38 |
+
0xE3DE,
|
| 39 |
+
0x2462,
|
| 40 |
+
0x3443,
|
| 41 |
+
0x0420,
|
| 42 |
+
0x1401,
|
| 43 |
+
0x64E6,
|
| 44 |
+
0x74C7,
|
| 45 |
+
0x44A4,
|
| 46 |
+
0x5485,
|
| 47 |
+
0xA56A,
|
| 48 |
+
0xB54B,
|
| 49 |
+
0x8528,
|
| 50 |
+
0x9509,
|
| 51 |
+
0xE5EE,
|
| 52 |
+
0xF5CF,
|
| 53 |
+
0xC5AC,
|
| 54 |
+
0xD58D,
|
| 55 |
+
0x3653,
|
| 56 |
+
0x2672,
|
| 57 |
+
0x1611,
|
| 58 |
+
0x0630,
|
| 59 |
+
0x76D7,
|
| 60 |
+
0x66F6,
|
| 61 |
+
0x5695,
|
| 62 |
+
0x46B4,
|
| 63 |
+
0xB75B,
|
| 64 |
+
0xA77A,
|
| 65 |
+
0x9719,
|
| 66 |
+
0x8738,
|
| 67 |
+
0xF7DF,
|
| 68 |
+
0xE7FE,
|
| 69 |
+
0xD79D,
|
| 70 |
+
0xC7BC,
|
| 71 |
+
0x48C4,
|
| 72 |
+
0x58E5,
|
| 73 |
+
0x6886,
|
| 74 |
+
0x78A7,
|
| 75 |
+
0x0840,
|
| 76 |
+
0x1861,
|
| 77 |
+
0x2802,
|
| 78 |
+
0x3823,
|
| 79 |
+
0xC9CC,
|
| 80 |
+
0xD9ED,
|
| 81 |
+
0xE98E,
|
| 82 |
+
0xF9AF,
|
| 83 |
+
0x8948,
|
| 84 |
+
0x9969,
|
| 85 |
+
0xA90A,
|
| 86 |
+
0xB92B,
|
| 87 |
+
0x5AF5,
|
| 88 |
+
0x4AD4,
|
| 89 |
+
0x7AB7,
|
| 90 |
+
0x6A96,
|
| 91 |
+
0x1A71,
|
| 92 |
+
0x0A50,
|
| 93 |
+
0x3A33,
|
| 94 |
+
0x2A12,
|
| 95 |
+
0xDBFD,
|
| 96 |
+
0xCBDC,
|
| 97 |
+
0xFBBF,
|
| 98 |
+
0xEB9E,
|
| 99 |
+
0x9B79,
|
| 100 |
+
0x8B58,
|
| 101 |
+
0xBB3B,
|
| 102 |
+
0xAB1A,
|
| 103 |
+
0x6CA6,
|
| 104 |
+
0x7C87,
|
| 105 |
+
0x4CE4,
|
| 106 |
+
0x5CC5,
|
| 107 |
+
0x2C22,
|
| 108 |
+
0x3C03,
|
| 109 |
+
0x0C60,
|
| 110 |
+
0x1C41,
|
| 111 |
+
0xEDAE,
|
| 112 |
+
0xFD8F,
|
| 113 |
+
0xCDEC,
|
| 114 |
+
0xDDCD,
|
| 115 |
+
0xAD2A,
|
| 116 |
+
0xBD0B,
|
| 117 |
+
0x8D68,
|
| 118 |
+
0x9D49,
|
| 119 |
+
0x7E97,
|
| 120 |
+
0x6EB6,
|
| 121 |
+
0x5ED5,
|
| 122 |
+
0x4EF4,
|
| 123 |
+
0x3E13,
|
| 124 |
+
0x2E32,
|
| 125 |
+
0x1E51,
|
| 126 |
+
0x0E70,
|
| 127 |
+
0xFF9F,
|
| 128 |
+
0xEFBE,
|
| 129 |
+
0xDFDD,
|
| 130 |
+
0xCFFC,
|
| 131 |
+
0xBF1B,
|
| 132 |
+
0xAF3A,
|
| 133 |
+
0x9F59,
|
| 134 |
+
0x8F78,
|
| 135 |
+
0x9188,
|
| 136 |
+
0x81A9,
|
| 137 |
+
0xB1CA,
|
| 138 |
+
0xA1EB,
|
| 139 |
+
0xD10C,
|
| 140 |
+
0xC12D,
|
| 141 |
+
0xF14E,
|
| 142 |
+
0xE16F,
|
| 143 |
+
0x1080,
|
| 144 |
+
0x00A1,
|
| 145 |
+
0x30C2,
|
| 146 |
+
0x20E3,
|
| 147 |
+
0x5004,
|
| 148 |
+
0x4025,
|
| 149 |
+
0x7046,
|
| 150 |
+
0x6067,
|
| 151 |
+
0x83B9,
|
| 152 |
+
0x9398,
|
| 153 |
+
0xA3FB,
|
| 154 |
+
0xB3DA,
|
| 155 |
+
0xC33D,
|
| 156 |
+
0xD31C,
|
| 157 |
+
0xE37F,
|
| 158 |
+
0xF35E,
|
| 159 |
+
0x02B1,
|
| 160 |
+
0x1290,
|
| 161 |
+
0x22F3,
|
| 162 |
+
0x32D2,
|
| 163 |
+
0x4235,
|
| 164 |
+
0x5214,
|
| 165 |
+
0x6277,
|
| 166 |
+
0x7256,
|
| 167 |
+
0xB5EA,
|
| 168 |
+
0xA5CB,
|
| 169 |
+
0x95A8,
|
| 170 |
+
0x8589,
|
| 171 |
+
0xF56E,
|
| 172 |
+
0xE54F,
|
| 173 |
+
0xD52C,
|
| 174 |
+
0xC50D,
|
| 175 |
+
0x34E2,
|
| 176 |
+
0x24C3,
|
| 177 |
+
0x14A0,
|
| 178 |
+
0x0481,
|
| 179 |
+
0x7466,
|
| 180 |
+
0x6447,
|
| 181 |
+
0x5424,
|
| 182 |
+
0x4405,
|
| 183 |
+
0xA7DB,
|
| 184 |
+
0xB7FA,
|
| 185 |
+
0x8799,
|
| 186 |
+
0x97B8,
|
| 187 |
+
0xE75F,
|
| 188 |
+
0xF77E,
|
| 189 |
+
0xC71D,
|
| 190 |
+
0xD73C,
|
| 191 |
+
0x26D3,
|
| 192 |
+
0x36F2,
|
| 193 |
+
0x0691,
|
| 194 |
+
0x16B0,
|
| 195 |
+
0x6657,
|
| 196 |
+
0x7676,
|
| 197 |
+
0x4615,
|
| 198 |
+
0x5634,
|
| 199 |
+
0xD94C,
|
| 200 |
+
0xC96D,
|
| 201 |
+
0xF90E,
|
| 202 |
+
0xE92F,
|
| 203 |
+
0x99C8,
|
| 204 |
+
0x89E9,
|
| 205 |
+
0xB98A,
|
| 206 |
+
0xA9AB,
|
| 207 |
+
0x5844,
|
| 208 |
+
0x4865,
|
| 209 |
+
0x7806,
|
| 210 |
+
0x6827,
|
| 211 |
+
0x18C0,
|
| 212 |
+
0x08E1,
|
| 213 |
+
0x3882,
|
| 214 |
+
0x28A3,
|
| 215 |
+
0xCB7D,
|
| 216 |
+
0xDB5C,
|
| 217 |
+
0xEB3F,
|
| 218 |
+
0xFB1E,
|
| 219 |
+
0x8BF9,
|
| 220 |
+
0x9BD8,
|
| 221 |
+
0xABBB,
|
| 222 |
+
0xBB9A,
|
| 223 |
+
0x4A75,
|
| 224 |
+
0x5A54,
|
| 225 |
+
0x6A37,
|
| 226 |
+
0x7A16,
|
| 227 |
+
0x0AF1,
|
| 228 |
+
0x1AD0,
|
| 229 |
+
0x2AB3,
|
| 230 |
+
0x3A92,
|
| 231 |
+
0xFD2E,
|
| 232 |
+
0xED0F,
|
| 233 |
+
0xDD6C,
|
| 234 |
+
0xCD4D,
|
| 235 |
+
0xBDAA,
|
| 236 |
+
0xAD8B,
|
| 237 |
+
0x9DE8,
|
| 238 |
+
0x8DC9,
|
| 239 |
+
0x7C26,
|
| 240 |
+
0x6C07,
|
| 241 |
+
0x5C64,
|
| 242 |
+
0x4C45,
|
| 243 |
+
0x3CA2,
|
| 244 |
+
0x2C83,
|
| 245 |
+
0x1CE0,
|
| 246 |
+
0x0CC1,
|
| 247 |
+
0xEF1F,
|
| 248 |
+
0xFF3E,
|
| 249 |
+
0xCF5D,
|
| 250 |
+
0xDF7C,
|
| 251 |
+
0xAF9B,
|
| 252 |
+
0xBFBA,
|
| 253 |
+
0x8FD9,
|
| 254 |
+
0x9FF8,
|
| 255 |
+
0x6E17,
|
| 256 |
+
0x7E36,
|
| 257 |
+
0x4E55,
|
| 258 |
+
0x5E74,
|
| 259 |
+
0x2E93,
|
| 260 |
+
0x3EB2,
|
| 261 |
+
0x0ED1,
|
| 262 |
+
0x1EF0,
|
| 263 |
+
]
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def checksum_update_crc16(data: bytes, crc: int = 0xFFFF):
|
| 267 |
+
for b in data:
|
| 268 |
+
crc = CRC_TABLE_CCITT16[(crc ^ b) & 0x00FF] ^ (crc >> 8)
|
| 269 |
+
return crc
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
class StatusCode(enum.IntEnum):
|
| 273 |
+
E_SUCCESS = 0
|
| 274 |
+
E_NOT_AVAILABLE = 1
|
| 275 |
+
E_NO_SENSOR = 2
|
| 276 |
+
E_NOT_INITIALIZED = 3
|
| 277 |
+
E_ALREADY_RUNNING = 4
|
| 278 |
+
E_FEATURE_NOT_SUPPORTED = 5
|
| 279 |
+
E_INCONSISTENT_DATA = 6
|
| 280 |
+
E_TIMEOUT = 7
|
| 281 |
+
E_READ_ERROR = 8
|
| 282 |
+
E_WRITE_ERROR = 9
|
| 283 |
+
E_INSUFFICIENT_RESOURCES = 10
|
| 284 |
+
E_CHECKSUM_ERROR = 11
|
| 285 |
+
E_NO_PARAM_EXPECTED = 12
|
| 286 |
+
E_NOT_ENOUGH_PARAMS = 13
|
| 287 |
+
E_CMD_UNKNOWN = 14
|
| 288 |
+
E_CMD_FORMAT_ERROR = 15
|
| 289 |
+
E_ACCESS_DENIED = 16
|
| 290 |
+
E_ALREADY_OPEN = 17
|
| 291 |
+
E_CMD_FAILED = 18
|
| 292 |
+
E_CMD_ABORTED = 19
|
| 293 |
+
E_INVALID_HANDLE = 20
|
| 294 |
+
E_NOT_FOUND = 21
|
| 295 |
+
E_NOT_OPEN = 22
|
| 296 |
+
E_IO_ERROR = 23
|
| 297 |
+
E_INVALID_PARAMETER = 24
|
| 298 |
+
E_INDEX_OUT_OF_BOUNDS = 25
|
| 299 |
+
E_CMD_PENDING = 26
|
| 300 |
+
E_OVERRUN = 27
|
| 301 |
+
RANGE_ERROR = 28
|
| 302 |
+
E_AXIS_BLOCKED = 29
|
| 303 |
+
E_FILE_EXIST = 30
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
class CommandId(enum.IntEnum):
|
| 307 |
+
Disconnect = 0x07
|
| 308 |
+
Homing = 0x20
|
| 309 |
+
PrePosition = 0x21
|
| 310 |
+
Stop = 0x22
|
| 311 |
+
FastStop = 0x23
|
| 312 |
+
AckFastStop = 0x24
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def args_to_bytes(*args, int_bytes=1):
|
| 316 |
+
buf = list()
|
| 317 |
+
for arg in args:
|
| 318 |
+
if isinstance(arg, float):
|
| 319 |
+
# little endian 32bit float
|
| 320 |
+
buf.append(struct.pack("<f", arg))
|
| 321 |
+
elif isinstance(arg, int):
|
| 322 |
+
buf.append(arg.to_bytes(length=int_bytes, byteorder="little"))
|
| 323 |
+
elif isinstance(arg, str):
|
| 324 |
+
buf.append(arg.encode("ascii"))
|
| 325 |
+
else:
|
| 326 |
+
raise RuntimeError(f"Unsupported type {type(arg)}")
|
| 327 |
+
result = b"".join(buf)
|
| 328 |
+
return result
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
class WSGBinaryDriver:
|
| 332 |
+
def __init__(self, hostname="192.168.0.103", port=1000):
|
| 333 |
+
self.hostname = hostname
|
| 334 |
+
self.port = port
|
| 335 |
+
self.tcp_sock = None
|
| 336 |
+
|
| 337 |
+
def start(self):
|
| 338 |
+
self.tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 339 |
+
self.tcp_sock.connect((self.hostname, self.port))
|
| 340 |
+
# self.ack_fast_stop()
|
| 341 |
+
|
| 342 |
+
def stop(self):
|
| 343 |
+
self.stop_cmd()
|
| 344 |
+
self.disconnect()
|
| 345 |
+
self.tcp_sock.close()
|
| 346 |
+
return
|
| 347 |
+
|
| 348 |
+
def __enter__(self):
|
| 349 |
+
self.start()
|
| 350 |
+
return self
|
| 351 |
+
|
| 352 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 353 |
+
self.stop()
|
| 354 |
+
|
| 355 |
+
# ================= low level API ================
|
| 356 |
+
|
| 357 |
+
def msg_send(self, cmd_id: int, payload: bytes):
|
| 358 |
+
preamble_b = 0xAA.to_bytes(1, "little") * 3
|
| 359 |
+
cmd_b = int(cmd_id).to_bytes(1, "little")
|
| 360 |
+
size_b = len(payload).to_bytes(2, "little")
|
| 361 |
+
msg_b = preamble_b + cmd_b + size_b + payload
|
| 362 |
+
checksum_b = checksum_update_crc16(msg_b).to_bytes(2, "little")
|
| 363 |
+
msg_b += checksum_b
|
| 364 |
+
return self.tcp_sock.send(msg_b)
|
| 365 |
+
|
| 366 |
+
def msg_receive(self) -> dict:
|
| 367 |
+
# syncing
|
| 368 |
+
sync = 0
|
| 369 |
+
while sync != 3:
|
| 370 |
+
res = self.tcp_sock.recv(1)
|
| 371 |
+
if res == 0xAA.to_bytes(1, "little"):
|
| 372 |
+
sync += 1
|
| 373 |
+
|
| 374 |
+
# read header
|
| 375 |
+
cmd_id_b = self.tcp_sock.recv(1)
|
| 376 |
+
cmd_id = int.from_bytes(cmd_id_b, "little")
|
| 377 |
+
|
| 378 |
+
# read size
|
| 379 |
+
size_b = self.tcp_sock.recv(2)
|
| 380 |
+
size = int.from_bytes(size_b, "little")
|
| 381 |
+
|
| 382 |
+
# read payload
|
| 383 |
+
payload_b = self.tcp_sock.recv(size)
|
| 384 |
+
status_code = int.from_bytes(payload_b[:2], "little")
|
| 385 |
+
|
| 386 |
+
parameters_b = payload_b[2:]
|
| 387 |
+
|
| 388 |
+
# read checksum
|
| 389 |
+
checksum_b = self.tcp_sock.recv(2)
|
| 390 |
+
|
| 391 |
+
# correct checksum ends in zero
|
| 392 |
+
header_checksum = 0x50F5
|
| 393 |
+
msg_checksum = checksum_update_crc16(
|
| 394 |
+
cmd_id_b + size_b + payload_b + checksum_b, crc=header_checksum
|
| 395 |
+
)
|
| 396 |
+
if msg_checksum != 0:
|
| 397 |
+
raise RuntimeError("Corrupted packet received from WSG")
|
| 398 |
+
|
| 399 |
+
result = {
|
| 400 |
+
"command_id": cmd_id,
|
| 401 |
+
"status_code": status_code,
|
| 402 |
+
"payload_bytes": parameters_b,
|
| 403 |
+
}
|
| 404 |
+
return result
|
| 405 |
+
|
| 406 |
+
def cmd_submit(
|
| 407 |
+
self,
|
| 408 |
+
cmd_id: int,
|
| 409 |
+
payload: bytes = b"",
|
| 410 |
+
pending: bool = True,
|
| 411 |
+
ignore_other=False,
|
| 412 |
+
):
|
| 413 |
+
res = self.msg_send(cmd_id, payload)
|
| 414 |
+
if res < 0:
|
| 415 |
+
raise RuntimeError("Message send failed.")
|
| 416 |
+
|
| 417 |
+
# receive response, repeat if pending
|
| 418 |
+
msg = None
|
| 419 |
+
keep_running = True
|
| 420 |
+
while keep_running:
|
| 421 |
+
msg = self.msg_receive()
|
| 422 |
+
if ignore_other and msg["command_id"] != cmd_id:
|
| 423 |
+
continue
|
| 424 |
+
|
| 425 |
+
if msg["command_id"] != cmd_id:
|
| 426 |
+
raise RuntimeError(
|
| 427 |
+
"Response ID ({:02X}) does not match submitted command ID ({:02X})\n".format(
|
| 428 |
+
msg["command_id"], cmd_id
|
| 429 |
+
)
|
| 430 |
+
)
|
| 431 |
+
if pending:
|
| 432 |
+
status = msg["status_code"]
|
| 433 |
+
keep_running = pending and status == StatusCode.E_CMD_PENDING.value
|
| 434 |
+
return msg
|
| 435 |
+
|
| 436 |
+
# ============== mid level API ================
|
| 437 |
+
|
| 438 |
+
def act(self, cmd: CommandId, *args, wait=True, ignore_other=False):
|
| 439 |
+
msg = self.cmd_submit(
|
| 440 |
+
cmd_id=cmd.value,
|
| 441 |
+
payload=args_to_bytes(*args),
|
| 442 |
+
pending=wait,
|
| 443 |
+
ignore_other=ignore_other,
|
| 444 |
+
)
|
| 445 |
+
msg["command_id"] = CommandId(msg["command_id"])
|
| 446 |
+
msg["status_code"] = StatusCode(msg["status_code"])
|
| 447 |
+
|
| 448 |
+
status = msg["status_code"]
|
| 449 |
+
if status != StatusCode.E_SUCCESS:
|
| 450 |
+
raise RuntimeError(f"Command {cmd} not successful: {status}")
|
| 451 |
+
return msg
|
| 452 |
+
|
| 453 |
+
# =============== high level API ===============
|
| 454 |
+
|
| 455 |
+
def disconnect(self):
|
| 456 |
+
# use msg_send to no wait for response
|
| 457 |
+
return self.msg_send(CommandId.Disconnect.value, b"")
|
| 458 |
+
|
| 459 |
+
def homing(self, positive_direction=True, wait=True):
|
| 460 |
+
arg = 0
|
| 461 |
+
if positive_direction is None:
|
| 462 |
+
arg = 0
|
| 463 |
+
elif positive_direction:
|
| 464 |
+
arg = 1
|
| 465 |
+
else:
|
| 466 |
+
arg = 2
|
| 467 |
+
|
| 468 |
+
return self.act(CommandId.Homing, arg, wait=wait)
|
| 469 |
+
|
| 470 |
+
def pre_position(
|
| 471 |
+
self, width: float, speed: float, clamp_on_block: bool = True, wait=True
|
| 472 |
+
):
|
| 473 |
+
flag = 0
|
| 474 |
+
if clamp_on_block:
|
| 475 |
+
flag = 0
|
| 476 |
+
else:
|
| 477 |
+
flag = 1
|
| 478 |
+
|
| 479 |
+
return self.act(
|
| 480 |
+
CommandId.PrePosition, flag, float(width), float(speed), wait=wait
|
| 481 |
+
)
|
| 482 |
+
|
| 483 |
+
def ack_fault(self):
|
| 484 |
+
return self.act(CommandId.AckFastStop, "ack", wait=False, ignore_other=True)
|
| 485 |
+
|
| 486 |
+
def stop_cmd(self):
|
| 487 |
+
return self.act(CommandId.Stop, wait=False, ignore_other=True)
|
| 488 |
+
|
| 489 |
+
def custom_script(self, cmd_id: int, *args):
|
| 490 |
+
# Custom payload format:
|
| 491 |
+
# 0: Unused
|
| 492 |
+
# 1..4 float
|
| 493 |
+
# .... one float each
|
| 494 |
+
payload_args = [0]
|
| 495 |
+
for arg in args:
|
| 496 |
+
payload_args.append(float(arg))
|
| 497 |
+
payload = args_to_bytes(*payload_args, int_bytes=1)
|
| 498 |
+
|
| 499 |
+
# send message
|
| 500 |
+
msg = self.cmd_submit(cmd_id=cmd_id, payload=payload, pending=False)
|
| 501 |
+
status = StatusCode(msg["status_code"])
|
| 502 |
+
response_payload = msg["payload_bytes"]
|
| 503 |
+
if status == StatusCode.E_CMD_UNKNOWN:
|
| 504 |
+
raise RuntimeError(
|
| 505 |
+
"Command unknown - make sure script (cmd_measure.lua) is running"
|
| 506 |
+
)
|
| 507 |
+
if status != StatusCode.E_SUCCESS:
|
| 508 |
+
raise RuntimeError("Command failed")
|
| 509 |
+
if len(response_payload) != 17:
|
| 510 |
+
raise RuntimeError(
|
| 511 |
+
"Response payload incorrect (",
|
| 512 |
+
"".join("{:02X}".format(b) for b in response_payload),
|
| 513 |
+
")",
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
# parse payload
|
| 517 |
+
state = response_payload[0]
|
| 518 |
+
values = list()
|
| 519 |
+
for i in range(4):
|
| 520 |
+
start = i * 4 + 1
|
| 521 |
+
end = start + 4
|
| 522 |
+
values.append(struct.unpack("<f", response_payload[start:end])[0])
|
| 523 |
+
|
| 524 |
+
info = {
|
| 525 |
+
"state": state,
|
| 526 |
+
"position": values[0],
|
| 527 |
+
"velocity": values[1],
|
| 528 |
+
"force_motor": values[2],
|
| 529 |
+
"measure_timestamp": values[3],
|
| 530 |
+
"is_moving": (state & 0x02) != 0,
|
| 531 |
+
}
|
| 532 |
+
# info = {
|
| 533 |
+
# 'state': 0,
|
| 534 |
+
# 'position': 100.,
|
| 535 |
+
# 'velocity': 0.,
|
| 536 |
+
# 'force_motor': 0.,
|
| 537 |
+
# 'is_moving': 0.
|
| 538 |
+
# }
|
| 539 |
+
return info
|
| 540 |
+
|
| 541 |
+
def script_query(self):
|
| 542 |
+
return self.custom_script(0xB0)
|
| 543 |
+
|
| 544 |
+
def script_position_pd(
|
| 545 |
+
self,
|
| 546 |
+
position: float,
|
| 547 |
+
velocity: float,
|
| 548 |
+
kp: float = 15.0,
|
| 549 |
+
kd: float = 1e-3,
|
| 550 |
+
travel_force_limit: float = 80.0,
|
| 551 |
+
blocked_force_limit: float = None,
|
| 552 |
+
):
|
| 553 |
+
if blocked_force_limit is None:
|
| 554 |
+
blocked_force_limit = travel_force_limit
|
| 555 |
+
assert kp > 0
|
| 556 |
+
assert kd >= 0
|
| 557 |
+
return self.custom_script(
|
| 558 |
+
0xB1, position, velocity, kp, kd, travel_force_limit, blocked_force_limit
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
def test():
|
| 563 |
+
import numpy as np
|
| 564 |
+
import time
|
| 565 |
+
|
| 566 |
+
with WSGBinaryDriver(
|
| 567 |
+
hostname="wsg50-00004544.internal.tri.global", port=1000
|
| 568 |
+
) as wsg:
|
| 569 |
+
# ACK
|
| 570 |
+
# msg = wsg.cmd_submit(0x24, bytearray([0x61, 0x63, 0x6B]))
|
| 571 |
+
msg = wsg.ack_fault()
|
| 572 |
+
print(msg)
|
| 573 |
+
|
| 574 |
+
# HOME
|
| 575 |
+
# msg = wsg.cmd_submit(0x20, bytearray([0x01]))
|
| 576 |
+
msg = wsg.homing()
|
| 577 |
+
print(msg)
|
| 578 |
+
# time.sleep(1.0)
|
| 579 |
+
|
| 580 |
+
# msg = wsg.pre_position(0, 150)
|
| 581 |
+
# print(msg)
|
| 582 |
+
# time.sleep(1.0)
|
| 583 |
+
|
| 584 |
+
T = 2
|
| 585 |
+
dt = 1 / 30
|
| 586 |
+
pos = np.linspace(0.0, 110.0, int(T / dt))[::-1]
|
| 587 |
+
vel = np.diff(pos) / dt
|
| 588 |
+
vel = np.append(vel, vel[-1])
|
| 589 |
+
|
| 590 |
+
t_start = time.time()
|
| 591 |
+
for i in range(len(pos)):
|
| 592 |
+
p = pos[i]
|
| 593 |
+
v = vel[i]
|
| 594 |
+
print(p, v)
|
| 595 |
+
info = wsg.script_position(position=p, dt=dt)
|
| 596 |
+
print(info)
|
| 597 |
+
|
| 598 |
+
t_end = t_start + i * dt
|
| 599 |
+
t_sleep = t_end - time.time()
|
| 600 |
+
print(t_sleep)
|
| 601 |
+
if t_sleep > 0:
|
| 602 |
+
time.sleep(t_sleep)
|
| 603 |
+
print(time.time() - t_start)
|
| 604 |
+
# cmd_id_b, payload_b, checksum_b = wsg.msg_receive()
|
| 605 |
+
# cmd_id_b, payload_b, checksum_b = wsg.msg_receive()
|
| 606 |
+
time.sleep(3.0)
|
| 607 |
+
|
| 608 |
+
T = 2
|
| 609 |
+
dt = 1 / 30
|
| 610 |
+
pos = np.linspace(0.0, 110.0, int(T / dt))
|
| 611 |
+
vel = np.diff(pos) / dt
|
| 612 |
+
vel = np.append(vel, vel[-1])
|
| 613 |
+
|
| 614 |
+
t_start = time.time()
|
| 615 |
+
for i in range(len(pos)):
|
| 616 |
+
p = pos[i]
|
| 617 |
+
v = vel[i]
|
| 618 |
+
print(p, v)
|
| 619 |
+
info = wsg.script_position(position=p, dt=dt)
|
| 620 |
+
print(info)
|
| 621 |
+
|
| 622 |
+
t_end = t_start + i * dt
|
| 623 |
+
t_sleep = t_end - time.time()
|
| 624 |
+
print(t_sleep)
|
| 625 |
+
if t_sleep > 0:
|
| 626 |
+
time.sleep(t_sleep)
|
| 627 |
+
print(time.time() - t_start)
|
| 628 |
+
|
| 629 |
+
# wsg.msg_send(0x30, bytearray([0x00, 0x00, 0x00, 0x00, 0x16, 0x43]))
|
| 630 |
+
# cmd_id_b, payload_b, checksum_b = wsg.msg_receive()
|
| 631 |
+
# time.sleep(1.0)
|
code/umi/real_world/wsg_controller.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
import enum
|
| 4 |
+
import multiprocessing as mp
|
| 5 |
+
from multiprocessing.managers import SharedMemoryManager
|
| 6 |
+
from umi.shared_memory.shared_memory_queue import SharedMemoryQueue, Empty
|
| 7 |
+
from umi.shared_memory.shared_memory_ring_buffer import SharedMemoryRingBuffer
|
| 8 |
+
from umi.common.precise_sleep import precise_wait
|
| 9 |
+
from umi.real_world.wsg_binary_driver import WSGBinaryDriver
|
| 10 |
+
from umi.common.pose_trajectory_interpolator import PoseTrajectoryInterpolator
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class Command(enum.Enum):
|
| 14 |
+
SHUTDOWN = 0
|
| 15 |
+
SCHEDULE_WAYPOINT = 1
|
| 16 |
+
RESTART_PUT = 2
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class WSGController(mp.Process):
|
| 20 |
+
def __init__(
|
| 21 |
+
self,
|
| 22 |
+
shm_manager: SharedMemoryManager,
|
| 23 |
+
hostname,
|
| 24 |
+
port=1000,
|
| 25 |
+
frequency=30,
|
| 26 |
+
home_to_open=True,
|
| 27 |
+
move_max_speed=200.0,
|
| 28 |
+
get_max_k=None,
|
| 29 |
+
command_queue_size=1024,
|
| 30 |
+
launch_timeout=3,
|
| 31 |
+
receive_latency=0.0,
|
| 32 |
+
use_meters=False,
|
| 33 |
+
verbose=False,
|
| 34 |
+
):
|
| 35 |
+
super().__init__(name="WSGController")
|
| 36 |
+
self.hostname = hostname
|
| 37 |
+
self.port = port
|
| 38 |
+
self.frequency = frequency
|
| 39 |
+
self.home_to_open = home_to_open
|
| 40 |
+
self.move_max_speed = move_max_speed
|
| 41 |
+
self.launch_timeout = launch_timeout
|
| 42 |
+
self.receive_latency = receive_latency
|
| 43 |
+
self.scale = 1000.0 if use_meters else 1.0
|
| 44 |
+
self.verbose = verbose
|
| 45 |
+
|
| 46 |
+
if get_max_k is None:
|
| 47 |
+
get_max_k = int(frequency * 10)
|
| 48 |
+
|
| 49 |
+
# build input queue
|
| 50 |
+
example = {
|
| 51 |
+
"cmd": Command.SCHEDULE_WAYPOINT.value,
|
| 52 |
+
"target_pos": 0.0,
|
| 53 |
+
"target_time": 0.0,
|
| 54 |
+
}
|
| 55 |
+
input_queue = SharedMemoryQueue.create_from_examples(
|
| 56 |
+
shm_manager=shm_manager, examples=example, buffer_size=command_queue_size
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
# build ring buffer
|
| 60 |
+
example = {
|
| 61 |
+
"gripper_state": 0,
|
| 62 |
+
"gripper_position": 0.0,
|
| 63 |
+
"gripper_velocity": 0.0,
|
| 64 |
+
"gripper_force": 0.0,
|
| 65 |
+
"gripper_measure_timestamp": time.time(),
|
| 66 |
+
"gripper_receive_timestamp": time.time(),
|
| 67 |
+
"gripper_timestamp": time.time(),
|
| 68 |
+
}
|
| 69 |
+
ring_buffer = SharedMemoryRingBuffer.create_from_examples(
|
| 70 |
+
shm_manager=shm_manager,
|
| 71 |
+
examples=example,
|
| 72 |
+
get_max_k=get_max_k,
|
| 73 |
+
get_time_budget=0.2,
|
| 74 |
+
put_desired_frequency=frequency,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
self.ready_event = mp.Event()
|
| 78 |
+
self.input_queue = input_queue
|
| 79 |
+
self.ring_buffer = ring_buffer
|
| 80 |
+
|
| 81 |
+
# ========= launch method ===========
|
| 82 |
+
def start(self, wait=True):
|
| 83 |
+
super().start()
|
| 84 |
+
if wait:
|
| 85 |
+
self.start_wait()
|
| 86 |
+
if self.verbose:
|
| 87 |
+
print(f"[WSGController] Controller process spawned at {self.pid}")
|
| 88 |
+
|
| 89 |
+
def stop(self, wait=True):
|
| 90 |
+
message = {"cmd": Command.SHUTDOWN.value}
|
| 91 |
+
self.input_queue.put(message)
|
| 92 |
+
if wait:
|
| 93 |
+
self.stop_wait()
|
| 94 |
+
|
| 95 |
+
def start_wait(self):
|
| 96 |
+
self.ready_event.wait(self.launch_timeout)
|
| 97 |
+
assert self.is_alive()
|
| 98 |
+
|
| 99 |
+
def stop_wait(self):
|
| 100 |
+
self.join()
|
| 101 |
+
|
| 102 |
+
@property
|
| 103 |
+
def is_ready(self):
|
| 104 |
+
return self.ready_event.is_set()
|
| 105 |
+
|
| 106 |
+
# ========= context manager ===========
|
| 107 |
+
def __enter__(self):
|
| 108 |
+
self.start()
|
| 109 |
+
return self
|
| 110 |
+
|
| 111 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 112 |
+
self.stop()
|
| 113 |
+
|
| 114 |
+
# ========= command methods ============
|
| 115 |
+
def schedule_waypoint(self, pos: float, target_time: float):
|
| 116 |
+
message = {
|
| 117 |
+
"cmd": Command.SCHEDULE_WAYPOINT.value,
|
| 118 |
+
"target_pos": pos,
|
| 119 |
+
"target_time": target_time,
|
| 120 |
+
}
|
| 121 |
+
self.input_queue.put(message)
|
| 122 |
+
|
| 123 |
+
def restart_put(self, start_time):
|
| 124 |
+
self.input_queue.put(
|
| 125 |
+
{"cmd": Command.RESTART_PUT.value, "target_time": start_time}
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# ========= receive APIs =============
|
| 129 |
+
def get_state(self, k=None, out=None):
|
| 130 |
+
if k is None:
|
| 131 |
+
return self.ring_buffer.get(out=out)
|
| 132 |
+
else:
|
| 133 |
+
return self.ring_buffer.get_last_k(k=k, out=out)
|
| 134 |
+
|
| 135 |
+
def get_all_state(self):
|
| 136 |
+
return self.ring_buffer.get_all()
|
| 137 |
+
|
| 138 |
+
# ========= main loop in process ============
|
| 139 |
+
def run(self):
|
| 140 |
+
# start connection
|
| 141 |
+
try:
|
| 142 |
+
with WSGBinaryDriver(hostname=self.hostname, port=self.port) as wsg:
|
| 143 |
+
|
| 144 |
+
# home gripper to initialize
|
| 145 |
+
wsg.ack_fault()
|
| 146 |
+
wsg.homing(positive_direction=self.home_to_open, wait=True)
|
| 147 |
+
|
| 148 |
+
# get initial
|
| 149 |
+
curr_info = wsg.script_query()
|
| 150 |
+
curr_pos = curr_info["position"]
|
| 151 |
+
# curr_pos = 100.0
|
| 152 |
+
curr_t = time.monotonic()
|
| 153 |
+
last_waypoint_time = curr_t
|
| 154 |
+
pose_interp = PoseTrajectoryInterpolator(
|
| 155 |
+
times=[curr_t], poses=[[curr_pos, 0, 0, 0, 0, 0]]
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
keep_running = True
|
| 159 |
+
t_start = time.monotonic()
|
| 160 |
+
iter_idx = 0
|
| 161 |
+
while keep_running:
|
| 162 |
+
# command gripper
|
| 163 |
+
t_now = time.monotonic()
|
| 164 |
+
dt = 1 / self.frequency
|
| 165 |
+
t_target = t_now
|
| 166 |
+
target_pos = pose_interp(t_target)[0]
|
| 167 |
+
target_vel = (target_pos - pose_interp(t_target - dt)[0]) / dt
|
| 168 |
+
# print('controller', target_pos, target_vel)
|
| 169 |
+
info = wsg.script_position_pd(
|
| 170 |
+
position=target_pos, velocity=target_vel
|
| 171 |
+
)
|
| 172 |
+
# time.sleep(1e-3)
|
| 173 |
+
|
| 174 |
+
# get state from robot
|
| 175 |
+
state = {
|
| 176 |
+
"gripper_state": info["state"],
|
| 177 |
+
"gripper_position": info["position"] / self.scale,
|
| 178 |
+
"gripper_velocity": info["velocity"] / self.scale,
|
| 179 |
+
"gripper_force": info["force_motor"],
|
| 180 |
+
"gripper_measure_timestamp": info["measure_timestamp"],
|
| 181 |
+
"gripper_receive_timestamp": time.time(),
|
| 182 |
+
"gripper_timestamp": time.time() - self.receive_latency,
|
| 183 |
+
}
|
| 184 |
+
self.ring_buffer.put(state)
|
| 185 |
+
|
| 186 |
+
# fetch command from queue
|
| 187 |
+
try:
|
| 188 |
+
commands = self.input_queue.get_all()
|
| 189 |
+
n_cmd = len(commands["cmd"])
|
| 190 |
+
except Empty:
|
| 191 |
+
n_cmd = 0
|
| 192 |
+
|
| 193 |
+
# execute commands
|
| 194 |
+
for i in range(n_cmd):
|
| 195 |
+
command = dict()
|
| 196 |
+
for key, value in commands.items():
|
| 197 |
+
command[key] = value[i]
|
| 198 |
+
cmd = command["cmd"]
|
| 199 |
+
|
| 200 |
+
if cmd == Command.SHUTDOWN.value:
|
| 201 |
+
keep_running = False
|
| 202 |
+
# stop immediately, ignore later commands
|
| 203 |
+
break
|
| 204 |
+
elif cmd == Command.SCHEDULE_WAYPOINT.value:
|
| 205 |
+
target_pos = command["target_pos"] * self.scale
|
| 206 |
+
target_time = command["target_time"]
|
| 207 |
+
# translate global time to monotonic time
|
| 208 |
+
target_time = time.monotonic() - time.time() + target_time
|
| 209 |
+
curr_time = t_now
|
| 210 |
+
pose_interp = pose_interp.schedule_waypoint(
|
| 211 |
+
pose=[target_pos, 0, 0, 0, 0, 0],
|
| 212 |
+
time=target_time,
|
| 213 |
+
max_pos_speed=self.move_max_speed,
|
| 214 |
+
max_rot_speed=self.move_max_speed,
|
| 215 |
+
curr_time=curr_time,
|
| 216 |
+
last_waypoint_time=last_waypoint_time,
|
| 217 |
+
)
|
| 218 |
+
last_waypoint_time = target_time
|
| 219 |
+
elif cmd == Command.RESTART_PUT.value:
|
| 220 |
+
t_start = (
|
| 221 |
+
command["target_time"] - time.time() + time.monotonic()
|
| 222 |
+
)
|
| 223 |
+
iter_idx = 1
|
| 224 |
+
else:
|
| 225 |
+
keep_running = False
|
| 226 |
+
break
|
| 227 |
+
|
| 228 |
+
# first loop successful, ready to receive command
|
| 229 |
+
if iter_idx == 0:
|
| 230 |
+
self.ready_event.set()
|
| 231 |
+
iter_idx += 1
|
| 232 |
+
|
| 233 |
+
# regulate frequency
|
| 234 |
+
dt = 1 / self.frequency
|
| 235 |
+
t_end = t_start + dt * iter_idx
|
| 236 |
+
precise_wait(t_end=t_end, time_func=time.monotonic)
|
| 237 |
+
|
| 238 |
+
finally:
|
| 239 |
+
self.ready_event.set()
|
| 240 |
+
if self.verbose:
|
| 241 |
+
print(f"[WSGController] Disconnected from robot: {self.hostname}")
|