Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- code/policy_models/VPP_policy.py +688 -0
- code/policy_models/VPP_policy_xbot.py +941 -0
- code/policy_models/__init__.py +0 -0
- code/policy_models/__pycache__/VPP_policy.cpython-310.pyc +0 -0
- code/policy_models/__pycache__/__init__.cpython-310.pyc +0 -0
- code/policy_models/__pycache__/__init__.cpython-39.pyc +0 -0
- code/policy_models/callbacks/__init__.py +0 -0
- code/policy_models/callbacks/ema.py +211 -0
- code/policy_models/datasets/__pycache__/base_dataset.cpython-310.pyc +0 -0
- code/policy_models/datasets/__pycache__/disk_dataset.cpython-310.pyc +0 -0
- code/policy_models/datasets/__pycache__/hulc_data_module.cpython-310.pyc +0 -0
- code/policy_models/datasets/__pycache__/shm_dataset.cpython-310.pyc +0 -0
- code/policy_models/datasets/base_dataset.py +296 -0
- code/policy_models/datasets/disk_dataset.py +280 -0
- code/policy_models/datasets/hulc_data_module.py +160 -0
- code/policy_models/datasets/mvlibero_dataset.py +230 -0
- code/policy_models/datasets/real_dataset.py +287 -0
- code/policy_models/datasets/shm_dataset.py +176 -0
- code/policy_models/datasets/utils/__init__.py +0 -0
- code/policy_models/datasets/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- code/policy_models/datasets/utils/__pycache__/episode_utils.cpython-310.pyc +0 -0
- code/policy_models/datasets/utils/__pycache__/shared_memory_utils.cpython-310.pyc +0 -0
- code/policy_models/datasets/utils/episode_utils.py +237 -0
- code/policy_models/datasets/utils/shared_memory_utils.py +336 -0
- code/policy_models/datasets/xbot_dataset.py +230 -0
- code/policy_models/edm_diffusion/__init__.py +0 -0
- code/policy_models/edm_diffusion/__pycache__/__init__.cpython-310.pyc +0 -0
- code/policy_models/edm_diffusion/__pycache__/__init__.cpython-39.pyc +0 -0
- code/policy_models/edm_diffusion/__pycache__/gc_sampling.cpython-310.pyc +0 -0
- code/policy_models/edm_diffusion/__pycache__/gc_sampling.cpython-39.pyc +0 -0
- code/policy_models/edm_diffusion/__pycache__/score_wrappers.cpython-310.pyc +0 -0
- code/policy_models/edm_diffusion/__pycache__/score_wrappers.cpython-39.pyc +0 -0
- code/policy_models/edm_diffusion/__pycache__/utils.cpython-310.pyc +0 -0
- code/policy_models/edm_diffusion/__pycache__/utils.cpython-39.pyc +0 -0
- code/policy_models/edm_diffusion/gc_sampling.py +1007 -0
- code/policy_models/edm_diffusion/score_wrappers.py +119 -0
- code/policy_models/edm_diffusion/utils.py +203 -0
- code/policy_models/losses/__pycache__/step_unet_mse.cpython-310.pyc +0 -0
- code/policy_models/losses/__pycache__/step_unet_mse.cpython-39.pyc +0 -0
- code/policy_models/losses/step_unet_mse.py +373 -0
- code/policy_models/module/Video_Former copy 2.py +538 -0
- code/policy_models/module/Video_Former copy.py +554 -0
- code/policy_models/module/Video_Former.py +670 -0
- code/policy_models/module/__init__.py +0 -0
- code/policy_models/module/__pycache__/Video_Former.cpython-310.pyc +0 -0
- code/policy_models/module/__pycache__/Video_Former.cpython-39.pyc +0 -0
- code/policy_models/module/__pycache__/__init__.cpython-310.pyc +0 -0
- code/policy_models/module/__pycache__/__init__.cpython-39.pyc +0 -0
- code/policy_models/module/__pycache__/clip.cpython-310.pyc +0 -0
- code/policy_models/module/__pycache__/clip.cpython-39.pyc +0 -0
code/policy_models/VPP_policy.py
ADDED
|
@@ -0,0 +1,688 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import Dict, Optional, Tuple
|
| 3 |
+
from functools import partial
|
| 4 |
+
from torch import einsum, nn
|
| 5 |
+
from einops import rearrange, repeat
|
| 6 |
+
from omegaconf import DictConfig, OmegaConf
|
| 7 |
+
import pytorch_lightning as pl
|
| 8 |
+
from pytorch_lightning.utilities import rank_zero_only
|
| 9 |
+
import einops
|
| 10 |
+
from policy_models.edm_diffusion.score_wrappers import GCDenoiser
|
| 11 |
+
|
| 12 |
+
from policy_models.module.clip_lang_encoder import LangClip
|
| 13 |
+
from policy_models.edm_diffusion.gc_sampling import *
|
| 14 |
+
from policy_models.utils.lr_schedulers.tri_stage_scheduler import TriStageLRScheduler
|
| 15 |
+
from policy_models.module.Video_Former import Video_Former_2D,Video_Former_3D
|
| 16 |
+
from diffusers import StableVideoDiffusionPipeline
|
| 17 |
+
from policy_models.module.diffusion_extract import Diffusion_feature_extractor
|
| 18 |
+
from transformers import AutoTokenizer, CLIPTextModelWithProjection
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
def load_primary_models(pretrained_model_path, eval=False):
|
| 24 |
+
if eval:
|
| 25 |
+
pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path, torch_dtype=torch.float16)
|
| 26 |
+
else:
|
| 27 |
+
pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path)
|
| 28 |
+
return pipeline, None, pipeline.feature_extractor, pipeline.scheduler, pipeline.video_processor, \
|
| 29 |
+
pipeline.image_encoder, pipeline.vae, pipeline.unet
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class VPP_Policy(pl.LightningModule):
|
| 33 |
+
"""
|
| 34 |
+
The lightning module used for training.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(
|
| 38 |
+
self,
|
| 39 |
+
optimizer: DictConfig,
|
| 40 |
+
lr_scheduler: DictConfig,
|
| 41 |
+
latent_dim: int = 512,
|
| 42 |
+
multistep: int = 10,
|
| 43 |
+
sampler_type: str = 'ddim',
|
| 44 |
+
num_sampling_steps: int = 10,
|
| 45 |
+
sigma_data: float = 0.5,
|
| 46 |
+
sigma_min: float = 0.001,
|
| 47 |
+
sigma_max: float = 80,
|
| 48 |
+
noise_scheduler: str = 'exponential',
|
| 49 |
+
sigma_sample_density_type: str = 'loglogistic',
|
| 50 |
+
use_lr_scheduler: bool = True,
|
| 51 |
+
act_window_size: int = 10,
|
| 52 |
+
use_text_not_embedding: bool = False,
|
| 53 |
+
seed: int = 42,
|
| 54 |
+
pretrained_model_path: str = '/cephfs/shared/gyj/ckpt/svd_pre/checkpoint-100000',
|
| 55 |
+
text_encoder_path: str = '/home/disk2/gyj/hyc_ckpt/llm/clip-vit-base-patch32',
|
| 56 |
+
use_position_encoding: bool = True,
|
| 57 |
+
Former_depth: int = 3,
|
| 58 |
+
Former_heads: int = 8,
|
| 59 |
+
Former_dim_head: int = 64,
|
| 60 |
+
Former_num_time_embeds: int = 1,
|
| 61 |
+
num_latents: int = 3,
|
| 62 |
+
use_Former: str = '3d',
|
| 63 |
+
timestep: int = 20,
|
| 64 |
+
max_length: int = 20,
|
| 65 |
+
extract_layer_idx: int = 1,
|
| 66 |
+
use_all_layer: bool = False,
|
| 67 |
+
obs_seq_len: int = 1,
|
| 68 |
+
action_dim: int = 7,
|
| 69 |
+
action_seq_len: int = 10,
|
| 70 |
+
):
|
| 71 |
+
super(VPP_Policy, self).__init__()
|
| 72 |
+
self.latent_dim = latent_dim
|
| 73 |
+
self.use_all_layer = use_all_layer
|
| 74 |
+
self.use_position_encoding = use_position_encoding
|
| 75 |
+
|
| 76 |
+
self.act_window_size = act_window_size
|
| 77 |
+
self.action_dim = action_dim
|
| 78 |
+
|
| 79 |
+
self.timestep = timestep
|
| 80 |
+
self.extract_layer_idx = extract_layer_idx
|
| 81 |
+
self.use_Former = use_Former
|
| 82 |
+
self.Former_num_time_embeds = Former_num_time_embeds
|
| 83 |
+
self.max_length = max_length
|
| 84 |
+
|
| 85 |
+
condition_dim_list = [1280,1280,1280,640]
|
| 86 |
+
sum_dim = 0
|
| 87 |
+
for i in range(extract_layer_idx+1):
|
| 88 |
+
sum_dim = sum_dim + condition_dim_list[i+1]
|
| 89 |
+
condition_dim = condition_dim_list[extract_layer_idx+1] if not self.use_all_layer else sum_dim
|
| 90 |
+
|
| 91 |
+
if use_Former=='3d':
|
| 92 |
+
self.Video_Former = Video_Former_3D(
|
| 93 |
+
dim=latent_dim,
|
| 94 |
+
depth=Former_depth,
|
| 95 |
+
dim_head=Former_dim_head,
|
| 96 |
+
heads=Former_heads,
|
| 97 |
+
num_time_embeds=Former_num_time_embeds,
|
| 98 |
+
num_latents=num_latents,
|
| 99 |
+
condition_dim=condition_dim,
|
| 100 |
+
use_temporal=True,
|
| 101 |
+
)
|
| 102 |
+
elif use_Former == '2d':
|
| 103 |
+
self.Video_Former = Video_Former_2D(
|
| 104 |
+
dim=latent_dim,
|
| 105 |
+
depth=Former_depth,
|
| 106 |
+
dim_head=Former_dim_head,
|
| 107 |
+
heads=Former_heads,
|
| 108 |
+
num_time_embeds=Former_num_time_embeds,
|
| 109 |
+
num_latents=num_latents,
|
| 110 |
+
condition_dim=condition_dim,
|
| 111 |
+
)
|
| 112 |
+
else:
|
| 113 |
+
self.Video_Former = nn.Linear(condition_dim,latent_dim)
|
| 114 |
+
|
| 115 |
+
print('use_Former:', self.use_Former)
|
| 116 |
+
print('use_all_layer',self.use_all_layer)
|
| 117 |
+
|
| 118 |
+
self.seed = seed
|
| 119 |
+
self.use_lr_scheduler = use_lr_scheduler
|
| 120 |
+
# goal encoders
|
| 121 |
+
self.language_goal = LangClip(model_name='ViT-B/32').to(self.device)
|
| 122 |
+
|
| 123 |
+
pipeline, tokenizer, feature_extractor, train_scheduler, vae_processor, text_encoder, vae, unet = load_primary_models(
|
| 124 |
+
pretrained_model_path , eval = True)
|
| 125 |
+
|
| 126 |
+
#text_encoder = CLIPTextModelWithProjection.from_pretrained("/cephfs/shared/llm/clip-vit-base-patch32")
|
| 127 |
+
#tokenizer = AutoTokenizer.from_pretrained("/cephfs/shared/llm/clip-vit-base-patch32", use_fast=False)
|
| 128 |
+
text_encoder = CLIPTextModelWithProjection.from_pretrained(text_encoder_path)
|
| 129 |
+
tokenizer = AutoTokenizer.from_pretrained(text_encoder_path, use_fast=False)
|
| 130 |
+
|
| 131 |
+
text_encoder = text_encoder.to(self.device).eval()
|
| 132 |
+
|
| 133 |
+
for param in pipeline.image_encoder.parameters():
|
| 134 |
+
param.requires_grad = False
|
| 135 |
+
for param in text_encoder.parameters():
|
| 136 |
+
param.requires_grad = False
|
| 137 |
+
|
| 138 |
+
for param in pipeline.vae.parameters():
|
| 139 |
+
param.requires_grad = False
|
| 140 |
+
for param in pipeline.unet.parameters():
|
| 141 |
+
param.requires_grad = False
|
| 142 |
+
|
| 143 |
+
pipeline = pipeline.to(self.device)
|
| 144 |
+
pipeline.unet.eval()
|
| 145 |
+
|
| 146 |
+
self.TVP_encoder = Diffusion_feature_extractor(pipeline=pipeline,
|
| 147 |
+
tokenizer=tokenizer,
|
| 148 |
+
text_encoder=text_encoder,
|
| 149 |
+
position_encoding = self.use_position_encoding)
|
| 150 |
+
self.TVP_encoder = self.TVP_encoder.to(self.device)
|
| 151 |
+
# policy network
|
| 152 |
+
self.model = GCDenoiser(action_dim = action_dim,
|
| 153 |
+
obs_dim=latent_dim,
|
| 154 |
+
goal_dim=512,
|
| 155 |
+
num_tokens=num_latents,
|
| 156 |
+
goal_window_size = 1,
|
| 157 |
+
obs_seq_len = obs_seq_len,
|
| 158 |
+
act_seq_len = action_seq_len,
|
| 159 |
+
device=self.device,
|
| 160 |
+
sigma_data=0.5).to(self.device)
|
| 161 |
+
|
| 162 |
+
self.optimizer_config = optimizer
|
| 163 |
+
self.lr_scheduler = lr_scheduler
|
| 164 |
+
self.save_hyperparameters()
|
| 165 |
+
# diffusion stuff
|
| 166 |
+
self.sampler_type = sampler_type
|
| 167 |
+
self.num_sampling_steps = num_sampling_steps
|
| 168 |
+
self.noise_scheduler = noise_scheduler
|
| 169 |
+
self.sigma_data = sigma_data
|
| 170 |
+
self.sigma_min = sigma_min
|
| 171 |
+
self.sigma_max = sigma_max
|
| 172 |
+
self.sigma_sample_density_type = sigma_sample_density_type
|
| 173 |
+
# for inference
|
| 174 |
+
self.rollout_step_counter = 0
|
| 175 |
+
self.multistep = multistep
|
| 176 |
+
self.latent_goal = None
|
| 177 |
+
self.plan = None
|
| 178 |
+
self.use_text_not_embedding = use_text_not_embedding
|
| 179 |
+
# print_model_parameters(self.perceptual_encoder.perceiver_resampler)
|
| 180 |
+
# for clip loss ground truth plot
|
| 181 |
+
self.ema_callback_idx = None
|
| 182 |
+
|
| 183 |
+
for param in self.model.inner_model.proprio_emb.parameters():
|
| 184 |
+
param.requires_grad = False
|
| 185 |
+
for param in self.model.inner_model.goal_emb.parameters():
|
| 186 |
+
param.requires_grad = False
|
| 187 |
+
self.model.inner_model.pos_emb.requires_grad = False
|
| 188 |
+
|
| 189 |
+
def process_device(self):
|
| 190 |
+
self.TVP_encoder.pipeline = self.TVP_encoder.pipeline.to(self.device)
|
| 191 |
+
self.TVP_encoder.text_encoder = self.TVP_encoder.text_encoder.to(self.device)
|
| 192 |
+
|
| 193 |
+
def configure_optimizers(self):
|
| 194 |
+
"""
|
| 195 |
+
Initialize optimizers and learning rate schedulers based on model configuration.
|
| 196 |
+
"""
|
| 197 |
+
# Configuration for models using transformer weight decay
|
| 198 |
+
'''optim_groups = self.action_decoder.model.inner_model.get_optim_groups(
|
| 199 |
+
weight_decay=self.optimizer_config.transformer_weight_decay
|
| 200 |
+
)'''
|
| 201 |
+
optim_groups = [
|
| 202 |
+
{"params": self.model.inner_model.parameters(),
|
| 203 |
+
"weight_decay": self.optimizer_config.transformer_weight_decay},
|
| 204 |
+
{"params": self.Video_Former.parameters(), "weight_decay": self.optimizer_config.transformer_weight_decay},
|
| 205 |
+
]
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
optimizer = torch.optim.AdamW(optim_groups, lr=self.optimizer_config.learning_rate,
|
| 209 |
+
betas=self.optimizer_config.betas)
|
| 210 |
+
|
| 211 |
+
# Optionally initialize the scheduler
|
| 212 |
+
if self.use_lr_scheduler:
|
| 213 |
+
lr_configs = OmegaConf.create(self.lr_scheduler)
|
| 214 |
+
scheduler = TriStageLRScheduler(optimizer, lr_configs)
|
| 215 |
+
lr_scheduler = {
|
| 216 |
+
"scheduler": scheduler,
|
| 217 |
+
"interval": 'step',
|
| 218 |
+
"frequency": 1,
|
| 219 |
+
}
|
| 220 |
+
return {"optimizer": optimizer, "lr_scheduler": lr_scheduler}
|
| 221 |
+
else:
|
| 222 |
+
return optimizer
|
| 223 |
+
|
| 224 |
+
def on_before_zero_grad(self, optimizer=None):
|
| 225 |
+
total_grad_norm = 0.0
|
| 226 |
+
total_param_norm = 0.0
|
| 227 |
+
for p in self.model.parameters():
|
| 228 |
+
if p.grad is not None:
|
| 229 |
+
total_grad_norm += p.grad.norm().item() ** 2
|
| 230 |
+
total_param_norm += p.norm().item() ** 2
|
| 231 |
+
total_grad_norm = total_grad_norm ** 0.5
|
| 232 |
+
total_param_norm = total_param_norm ** 0.5
|
| 233 |
+
|
| 234 |
+
self.log("train/grad_norm", total_grad_norm, on_step=True, on_epoch=False, sync_dist=True)
|
| 235 |
+
self.log("train/param_norm", total_param_norm, on_step=True, on_epoch=False, sync_dist=True)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def training_step(self, dataset_batch: Dict[str, Dict],) -> torch.Tensor: # type: ignore
|
| 239 |
+
"""
|
| 240 |
+
Compute and return the training loss for the MDT Agent.
|
| 241 |
+
The training loss consists of the score matching loss of the diffusion model
|
| 242 |
+
and the contrastive loss of the CLIP model for the multimodal encoder.
|
| 243 |
+
|
| 244 |
+
Args:
|
| 245 |
+
batch: Dictionary containing the batch data for each modality.
|
| 246 |
+
batch_idx: Index of the batch. used for compatibility with pytorch lightning.
|
| 247 |
+
dataloader_idx: Index of the dataloader. used for compatibility with pytorch lightning.
|
| 248 |
+
|
| 249 |
+
Returns:
|
| 250 |
+
loss tensor
|
| 251 |
+
"""
|
| 252 |
+
total_loss, action_loss = (
|
| 253 |
+
torch.tensor(0.0).to(self.device),
|
| 254 |
+
torch.tensor(0.0).to(self.device),
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
predictive_feature, latent_goal= self.extract_predictive_feature(dataset_batch)
|
| 258 |
+
|
| 259 |
+
act_loss, sigmas, noise = self.diffusion_loss(
|
| 260 |
+
predictive_feature,
|
| 261 |
+
latent_goal,
|
| 262 |
+
dataset_batch["actions"],
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
action_loss += act_loss
|
| 266 |
+
total_loss += act_loss
|
| 267 |
+
|
| 268 |
+
total_bs = dataset_batch["actions"].shape[0]
|
| 269 |
+
|
| 270 |
+
self._log_training_metrics(action_loss, total_loss, total_bs)
|
| 271 |
+
return total_loss
|
| 272 |
+
|
| 273 |
+
@torch.no_grad()
|
| 274 |
+
def validation_step(self, dataset_batch: Dict[str, Dict]) -> Dict[
|
| 275 |
+
str, torch.Tensor]: # type: ignore
|
| 276 |
+
"""
|
| 277 |
+
Compute and log the validation losses and additional metrics.
|
| 278 |
+
During the validation step, the diffusion model predicts the next action sequence given the current state
|
| 279 |
+
|
| 280 |
+
Args:
|
| 281 |
+
batch: Dictionary containing the batch data for each modality.
|
| 282 |
+
batch_idx: Index of the batch. used for compatibility with pytorch lightning.
|
| 283 |
+
dataloader_idx: Index of the dataloader. used for compatibility with pytorch lightning.
|
| 284 |
+
|
| 285 |
+
Returns:
|
| 286 |
+
Dictionary containing the sampled plans of plan recognition and plan proposal module, as well as the
|
| 287 |
+
episode indices.
|
| 288 |
+
"""
|
| 289 |
+
output = {}
|
| 290 |
+
val_total_act_loss_pp = torch.tensor(0.0).to(self.device)
|
| 291 |
+
# Compute the required embeddings
|
| 292 |
+
predictive_feature, latent_goal= self.extract_predictive_feature(dataset_batch)
|
| 293 |
+
|
| 294 |
+
# predict the next action sequence
|
| 295 |
+
action_pred = self.denoise_actions(
|
| 296 |
+
torch.zeros_like(latent_goal).to(latent_goal.device),
|
| 297 |
+
predictive_feature,
|
| 298 |
+
latent_goal,
|
| 299 |
+
inference=True,
|
| 300 |
+
)
|
| 301 |
+
dataset_batch["actions"] = dataset_batch["actions"].to(action_pred.device)
|
| 302 |
+
# compute the mse action loss
|
| 303 |
+
pred_loss = torch.nn.functional.mse_loss(action_pred, dataset_batch["actions"])
|
| 304 |
+
val_total_act_loss_pp += pred_loss
|
| 305 |
+
|
| 306 |
+
output[f"idx:"] = dataset_batch["idx"]
|
| 307 |
+
output["validation_loss"] = val_total_act_loss_pp
|
| 308 |
+
return output
|
| 309 |
+
|
| 310 |
+
def extract_predictive_feature(self, dataset_batch):
|
| 311 |
+
"""
|
| 312 |
+
Compute the required embeddings for the visual ones and the latent goal.
|
| 313 |
+
"""
|
| 314 |
+
# 1. extract the revelant visual observations
|
| 315 |
+
rgb_static = dataset_batch["rgb_obs"]['rgb_static'].to(self.device)
|
| 316 |
+
rgb_gripper = dataset_batch["rgb_obs"]['rgb_gripper'].to(self.device)
|
| 317 |
+
# 3. we compute the language goal if the language modality is in the scope
|
| 318 |
+
modality = "lang"
|
| 319 |
+
if self.use_text_not_embedding:
|
| 320 |
+
latent_goal = self.language_goal(dataset_batch["lang_text"]).to(rgb_static.dtype)
|
| 321 |
+
else:
|
| 322 |
+
latent_goal = self.language_goal(dataset_batch["lang"]).to(rgb_static.dtype)
|
| 323 |
+
|
| 324 |
+
language = dataset_batch["lang_text"]
|
| 325 |
+
|
| 326 |
+
num_frames = self.Former_num_time_embeds
|
| 327 |
+
rgb_static = rgb_static.to(self.device)
|
| 328 |
+
rgb_gripper = rgb_gripper.to(self.device)
|
| 329 |
+
batch = rgb_static.shape[0]
|
| 330 |
+
|
| 331 |
+
with torch.no_grad():
|
| 332 |
+
input_rgb = torch.cat([rgb_static, rgb_gripper], dim=0)
|
| 333 |
+
language = language + language
|
| 334 |
+
perceptual_features = self.TVP_encoder(input_rgb, language, self.timestep,
|
| 335 |
+
self.extract_layer_idx, all_layer=self.use_all_layer,
|
| 336 |
+
step_time=1, max_length=self.max_length)
|
| 337 |
+
|
| 338 |
+
perceptual_features = einops.rearrange(perceptual_features, 'b f c h w-> b f c (h w)')
|
| 339 |
+
perceptual_features = einops.rearrange(perceptual_features, 'b f c l-> b f l c')
|
| 340 |
+
perceptual_features = perceptual_features[:, :num_frames, :, :]
|
| 341 |
+
#print('perceptual_features_shape:', perceptual_features.shape)
|
| 342 |
+
|
| 343 |
+
perceptual_features, gripper_feature = torch.split(perceptual_features, [batch, batch], dim=0)
|
| 344 |
+
perceptual_features = torch.cat([perceptual_features, gripper_feature], dim=2)
|
| 345 |
+
|
| 346 |
+
perceptual_features = perceptual_features.to(torch.float32)
|
| 347 |
+
perceptual_features = self.Video_Former(perceptual_features)
|
| 348 |
+
if self.use_Former=='linear':
|
| 349 |
+
perceptual_features = rearrange(perceptual_features, 'b T q d -> b (T q) d')
|
| 350 |
+
predictive_feature = {'state_images': perceptual_features}
|
| 351 |
+
predictive_feature['modality'] = modality
|
| 352 |
+
return predictive_feature, latent_goal
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def _log_training_metrics(self, action_loss, total_loss, total_bs):
|
| 356 |
+
"""
|
| 357 |
+
Log the training metrics.
|
| 358 |
+
"""
|
| 359 |
+
self.log("train/action_loss", action_loss, on_step=False, on_epoch=True, sync_dist=True, batch_size=total_bs)
|
| 360 |
+
self.log("train/total_loss", total_loss, on_step=False, on_epoch=True, sync_dist=True, batch_size=total_bs)
|
| 361 |
+
|
| 362 |
+
def _log_validation_metrics(self, pred_loss, img_gen_loss, val_total_act_loss_pp):
|
| 363 |
+
"""
|
| 364 |
+
Log the validation metrics.
|
| 365 |
+
"""
|
| 366 |
+
self.log(
|
| 367 |
+
"val_act/action_loss",
|
| 368 |
+
val_total_act_loss_pp / len(self.trainer.datamodule.modalities), # type:ignore
|
| 369 |
+
sync_dist=True,
|
| 370 |
+
)
|
| 371 |
+
self.log(f"val_act/img_gen_loss_pp", img_gen_loss, sync_dist=True)
|
| 372 |
+
|
| 373 |
+
def diffusion_loss(
|
| 374 |
+
self,
|
| 375 |
+
perceptual_emb: torch.Tensor,
|
| 376 |
+
latent_goal: torch.Tensor,
|
| 377 |
+
actions: torch.Tensor,
|
| 378 |
+
) -> torch.Tensor:
|
| 379 |
+
"""
|
| 380 |
+
Computes the score matching loss given the perceptual embedding, latent goal, and desired actions.
|
| 381 |
+
"""
|
| 382 |
+
self.model.train()
|
| 383 |
+
sigmas = self.make_sample_density()(shape=(len(actions),), device=self.device).to(self.device)
|
| 384 |
+
noise = torch.randn_like(actions).to(self.device)
|
| 385 |
+
loss, _ = self.model.loss(perceptual_emb, actions, latent_goal, noise, sigmas)
|
| 386 |
+
return loss, sigmas, noise
|
| 387 |
+
|
| 388 |
+
def denoise_actions( # type: ignore
|
| 389 |
+
self,
|
| 390 |
+
latent_plan: torch.Tensor,
|
| 391 |
+
perceptual_emb: torch.Tensor,
|
| 392 |
+
latent_goal: torch.Tensor,
|
| 393 |
+
inference: Optional[bool] = False,
|
| 394 |
+
extra_args={}
|
| 395 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 396 |
+
"""
|
| 397 |
+
Denoise the next sequence of actions
|
| 398 |
+
"""
|
| 399 |
+
if inference:
|
| 400 |
+
sampling_steps = self.num_sampling_steps
|
| 401 |
+
else:
|
| 402 |
+
sampling_steps = 10
|
| 403 |
+
self.model.eval()
|
| 404 |
+
if len(latent_goal.shape) < len(
|
| 405 |
+
perceptual_emb['state_images'].shape if isinstance(perceptual_emb, dict) else perceptual_emb.shape):
|
| 406 |
+
latent_goal = latent_goal.unsqueeze(1) # .expand(-1, seq_len, -1)
|
| 407 |
+
input_state = perceptual_emb
|
| 408 |
+
sigmas = self.get_noise_schedule(sampling_steps, self.noise_scheduler)
|
| 409 |
+
|
| 410 |
+
x = torch.randn((len(latent_goal), self.act_window_size, self.action_dim), device=self.device) * self.sigma_max
|
| 411 |
+
|
| 412 |
+
actions = self.sample_loop(sigmas, x, input_state, latent_goal, latent_plan, self.sampler_type, extra_args)
|
| 413 |
+
|
| 414 |
+
return actions
|
| 415 |
+
|
| 416 |
+
def make_sample_density(self):
|
| 417 |
+
"""
|
| 418 |
+
Generate a sample density function based on the desired type for training the model
|
| 419 |
+
We mostly use log-logistic as it has no additional hyperparameters to tune.
|
| 420 |
+
"""
|
| 421 |
+
sd_config = []
|
| 422 |
+
if self.sigma_sample_density_type == 'lognormal':
|
| 423 |
+
loc = self.sigma_sample_density_mean # if 'mean' in sd_config else sd_config['loc']
|
| 424 |
+
scale = self.sigma_sample_density_std # if 'std' in sd_config else sd_config['scale']
|
| 425 |
+
return partial(utils.rand_log_normal, loc=loc, scale=scale)
|
| 426 |
+
|
| 427 |
+
if self.sigma_sample_density_type == 'loglogistic':
|
| 428 |
+
loc = sd_config['loc'] if 'loc' in sd_config else math.log(self.sigma_data)
|
| 429 |
+
scale = sd_config['scale'] if 'scale' in sd_config else 0.5
|
| 430 |
+
min_value = sd_config['min_value'] if 'min_value' in sd_config else self.sigma_min
|
| 431 |
+
max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max
|
| 432 |
+
return partial(utils.rand_log_logistic, loc=loc, scale=scale, min_value=min_value, max_value=max_value)
|
| 433 |
+
|
| 434 |
+
if self.sigma_sample_density_type == 'loguniform':
|
| 435 |
+
min_value = sd_config['min_value'] if 'min_value' in sd_config else self.sigma_min
|
| 436 |
+
max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max
|
| 437 |
+
return partial(utils.rand_log_uniform, min_value=min_value, max_value=max_value)
|
| 438 |
+
|
| 439 |
+
if self.sigma_sample_density_type == 'uniform':
|
| 440 |
+
return partial(utils.rand_uniform, min_value=self.sigma_min, max_value=self.sigma_max)
|
| 441 |
+
|
| 442 |
+
if self.sigma_sample_density_type == 'v-diffusion':
|
| 443 |
+
min_value = self.min_value if 'min_value' in sd_config else self.sigma_min
|
| 444 |
+
max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max
|
| 445 |
+
return partial(utils.rand_v_diffusion, sigma_data=self.sigma_data, min_value=min_value, max_value=max_value)
|
| 446 |
+
if self.sigma_sample_density_type == 'discrete':
|
| 447 |
+
sigmas = self.get_noise_schedule(self.num_sampling_steps * 1e5, 'exponential')
|
| 448 |
+
return partial(utils.rand_discrete, values=sigmas)
|
| 449 |
+
if self.sigma_sample_density_type == 'split-lognormal':
|
| 450 |
+
loc = sd_config['mean'] if 'mean' in sd_config else sd_config['loc']
|
| 451 |
+
scale_1 = sd_config['std_1'] if 'std_1' in sd_config else sd_config['scale_1']
|
| 452 |
+
scale_2 = sd_config['std_2'] if 'std_2' in sd_config else sd_config['scale_2']
|
| 453 |
+
return partial(utils.rand_split_log_normal, loc=loc, scale_1=scale_1, scale_2=scale_2)
|
| 454 |
+
else:
|
| 455 |
+
raise ValueError('Unknown sample density type')
|
| 456 |
+
|
| 457 |
+
def sample_loop(
|
| 458 |
+
self,
|
| 459 |
+
sigmas,
|
| 460 |
+
x_t: torch.Tensor,
|
| 461 |
+
state: torch.Tensor,
|
| 462 |
+
goal: torch.Tensor,
|
| 463 |
+
latent_plan: torch.Tensor,
|
| 464 |
+
sampler_type: str,
|
| 465 |
+
extra_args={},
|
| 466 |
+
):
|
| 467 |
+
"""
|
| 468 |
+
Main method to generate samples depending on the chosen sampler type. DDIM is the default as it works well in all settings.
|
| 469 |
+
"""
|
| 470 |
+
s_churn = extra_args['s_churn'] if 's_churn' in extra_args else 0
|
| 471 |
+
s_min = extra_args['s_min'] if 's_min' in extra_args else 0
|
| 472 |
+
use_scaler = extra_args['use_scaler'] if 'use_scaler' in extra_args else False
|
| 473 |
+
keys = ['s_churn', 'keep_last_actions']
|
| 474 |
+
if bool(extra_args):
|
| 475 |
+
reduced_args = {x: extra_args[x] for x in keys}
|
| 476 |
+
else:
|
| 477 |
+
reduced_args = {}
|
| 478 |
+
if use_scaler:
|
| 479 |
+
scaler = self.scaler
|
| 480 |
+
else:
|
| 481 |
+
scaler = None
|
| 482 |
+
# ODE deterministic
|
| 483 |
+
if sampler_type == 'lms':
|
| 484 |
+
x_0 = sample_lms(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True, extra_args=reduced_args)
|
| 485 |
+
# ODE deterministic can be made stochastic by S_churn != 0
|
| 486 |
+
elif sampler_type == 'heun':
|
| 487 |
+
x_0 = sample_heun(self.model, state, x_t, goal, sigmas, scaler=scaler, s_churn=s_churn, s_tmin=s_min,
|
| 488 |
+
disable=True)
|
| 489 |
+
# ODE deterministic
|
| 490 |
+
elif sampler_type == 'euler':
|
| 491 |
+
x_0 = sample_euler(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 492 |
+
# SDE stochastic
|
| 493 |
+
elif sampler_type == 'ancestral':
|
| 494 |
+
x_0 = sample_dpm_2_ancestral(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 495 |
+
# SDE stochastic: combines an ODE euler step with an stochastic noise correcting step
|
| 496 |
+
elif sampler_type == 'euler_ancestral':
|
| 497 |
+
x_0 = sample_euler_ancestral(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 498 |
+
# ODE deterministic
|
| 499 |
+
elif sampler_type == 'dpm':
|
| 500 |
+
x_0 = sample_dpm_2(self.model, state, x_t, goal, sigmas, disable=True)
|
| 501 |
+
# ODE deterministic
|
| 502 |
+
elif sampler_type == 'dpm_adaptive':
|
| 503 |
+
x_0 = sample_dpm_adaptive(self.model, state, x_t, goal, sigmas[-2].item(), sigmas[0].item(), disable=True)
|
| 504 |
+
# ODE deterministic
|
| 505 |
+
elif sampler_type == 'dpm_fast':
|
| 506 |
+
x_0 = sample_dpm_fast(self.model, state, x_t, goal, sigmas[-2].item(), sigmas[0].item(), len(sigmas),
|
| 507 |
+
disable=True)
|
| 508 |
+
# 2nd order solver
|
| 509 |
+
elif sampler_type == 'dpmpp_2s_ancestral':
|
| 510 |
+
x_0 = sample_dpmpp_2s_ancestral(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 511 |
+
# 2nd order solver
|
| 512 |
+
elif sampler_type == 'dpmpp_2m':
|
| 513 |
+
x_0 = sample_dpmpp_2m(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 514 |
+
elif sampler_type == 'dpmpp_2m_sde':
|
| 515 |
+
x_0 = sample_dpmpp_sde(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 516 |
+
elif sampler_type == 'ddim':
|
| 517 |
+
x_0 = sample_ddim(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 518 |
+
elif sampler_type == 'dpmpp_2s':
|
| 519 |
+
x_0 = sample_dpmpp_2s(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 520 |
+
elif sampler_type == 'dpmpp_2_with_lms':
|
| 521 |
+
x_0 = sample_dpmpp_2_with_lms(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 522 |
+
else:
|
| 523 |
+
raise ValueError('desired sampler type not found!')
|
| 524 |
+
return x_0
|
| 525 |
+
|
| 526 |
+
def get_noise_schedule(self, n_sampling_steps, noise_schedule_type):
|
| 527 |
+
"""
|
| 528 |
+
Get the noise schedule for the sampling steps. Describes the distribution over the noise levels from sigma_min to sigma_max.
|
| 529 |
+
"""
|
| 530 |
+
if noise_schedule_type == 'karras':
|
| 531 |
+
return get_sigmas_karras(n_sampling_steps, self.sigma_min, self.sigma_max, 7,
|
| 532 |
+
self.device) # rho=7 is the default from EDM karras
|
| 533 |
+
elif noise_schedule_type == 'exponential':
|
| 534 |
+
return get_sigmas_exponential(n_sampling_steps, self.sigma_min, self.sigma_max, self.device)
|
| 535 |
+
elif noise_schedule_type == 'vp':
|
| 536 |
+
return get_sigmas_vp(n_sampling_steps, device=self.device)
|
| 537 |
+
elif noise_schedule_type == 'linear':
|
| 538 |
+
return get_sigmas_linear(n_sampling_steps, self.sigma_min, self.sigma_max, device=self.device)
|
| 539 |
+
elif noise_schedule_type == 'cosine_beta':
|
| 540 |
+
return cosine_beta_schedule(n_sampling_steps, device=self.device)
|
| 541 |
+
elif noise_schedule_type == 've':
|
| 542 |
+
return get_sigmas_ve(n_sampling_steps, self.sigma_min, self.sigma_max, device=self.device)
|
| 543 |
+
elif noise_schedule_type == 'iddpm':
|
| 544 |
+
return get_iddpm_sigmas(n_sampling_steps, self.sigma_min, self.sigma_max, device=self.device)
|
| 545 |
+
raise ValueError('Unknown noise schedule type')
|
| 546 |
+
|
| 547 |
+
def reset(self):
|
| 548 |
+
"""
|
| 549 |
+
Call this at the beginning of a new rollout when doing inference.
|
| 550 |
+
"""
|
| 551 |
+
self.plan = None
|
| 552 |
+
self.latent_goal = None
|
| 553 |
+
self.rollout_step_counter = 0
|
| 554 |
+
|
| 555 |
+
def forward(self,batch):
|
| 556 |
+
return self.training_step(batch)
|
| 557 |
+
#def training_step(self, batch: Dict[str, Dict], batch_idx: int,
|
| 558 |
+
# dataloader_idx: int = 0) -> torch.Tensor
|
| 559 |
+
|
| 560 |
+
def eval_forward(self, obs, goal):
|
| 561 |
+
"""
|
| 562 |
+
Method for doing inference with the model.
|
| 563 |
+
"""
|
| 564 |
+
if 'lang_text' in goal:
|
| 565 |
+
if self.use_text_not_embedding:
|
| 566 |
+
# print(goal.keys())
|
| 567 |
+
latent_goal = self.language_goal(goal["lang_text"])
|
| 568 |
+
latent_goal = latent_goal.to(torch.float32)
|
| 569 |
+
else:
|
| 570 |
+
latent_goal = self.language_goal(goal["lang"]).unsqueeze(0).to(torch.float32).to(
|
| 571 |
+
obs["rgb_obs"]['rgb_static'].device)
|
| 572 |
+
|
| 573 |
+
rgb_static = obs["rgb_obs"]['rgb_static']
|
| 574 |
+
# rgb_gripper = dataset_batch["rgb_obs"]['rgb_gripper'][:, :-1]
|
| 575 |
+
rgb_gripper = obs["rgb_obs"]['rgb_gripper']
|
| 576 |
+
|
| 577 |
+
language = goal["lang_text"]
|
| 578 |
+
|
| 579 |
+
num_frames = self.Former_num_time_embeds
|
| 580 |
+
rgb_static = rgb_static.to(self.device)
|
| 581 |
+
rgb_gripper = rgb_gripper.to(self.device)
|
| 582 |
+
batch = rgb_static.shape[0]
|
| 583 |
+
|
| 584 |
+
with torch.no_grad():
|
| 585 |
+
input_rgb = torch.cat([rgb_static, rgb_gripper], dim=0)
|
| 586 |
+
language = [language] + [language]
|
| 587 |
+
perceptual_features = self.TVP_encoder(input_rgb, language, self.timestep,
|
| 588 |
+
self.extract_layer_idx, all_layer=self.use_all_layer,
|
| 589 |
+
step_time=1, max_length=self.max_length)
|
| 590 |
+
|
| 591 |
+
perceptual_features = einops.rearrange(perceptual_features, 'b f c h w-> b f c (h w)')
|
| 592 |
+
perceptual_features = einops.rearrange(perceptual_features, 'b f c l-> b f l c')
|
| 593 |
+
perceptual_features = perceptual_features[:, :num_frames, :, :]
|
| 594 |
+
|
| 595 |
+
perceptual_features, gripper_feature = torch.split(perceptual_features, [batch, batch], dim=0)
|
| 596 |
+
perceptual_features = torch.cat([perceptual_features, gripper_feature], dim=2)
|
| 597 |
+
|
| 598 |
+
perceptual_features = perceptual_features.to(torch.float32)
|
| 599 |
+
perceptual_features = self.Video_Former(perceptual_features)
|
| 600 |
+
if self.use_Former == 'linear':
|
| 601 |
+
perceptual_features = rearrange(perceptual_features, 'b T q d -> b (T q) d')
|
| 602 |
+
|
| 603 |
+
perceptual_emb = {'state_images': perceptual_features}
|
| 604 |
+
|
| 605 |
+
perceptual_emb['modality'] = "lang"
|
| 606 |
+
#print('latent_goal_shape:',latent_goal.shape)
|
| 607 |
+
#print('perceptual_features_shape:', perceptual_features.shape)
|
| 608 |
+
|
| 609 |
+
act_seq = self.denoise_actions(
|
| 610 |
+
torch.zeros_like(latent_goal).to(latent_goal.device),
|
| 611 |
+
perceptual_emb,
|
| 612 |
+
latent_goal,
|
| 613 |
+
inference=True,
|
| 614 |
+
)
|
| 615 |
+
return act_seq
|
| 616 |
+
|
| 617 |
+
def step(self, obs, goal):
|
| 618 |
+
"""
|
| 619 |
+
Do one step of inference with the model. THis method handles the action chunking case.
|
| 620 |
+
Our model is trained to predict a sequence of actions.
|
| 621 |
+
We only compute the sequence once every self.multistep steps.
|
| 622 |
+
|
| 623 |
+
Args:
|
| 624 |
+
obs (dict): Observation from environment.
|
| 625 |
+
goal (dict): Goal as visual observation or embedded language instruction.
|
| 626 |
+
|
| 627 |
+
Returns:
|
| 628 |
+
Predicted action.
|
| 629 |
+
"""
|
| 630 |
+
if self.rollout_step_counter % self.multistep == 0:
|
| 631 |
+
pred_action_seq = self.eval_forward(obs, goal)
|
| 632 |
+
|
| 633 |
+
self.pred_action_seq = pred_action_seq
|
| 634 |
+
|
| 635 |
+
current_action = self.pred_action_seq[0, self.rollout_step_counter]
|
| 636 |
+
if len(current_action.shape) == 2:
|
| 637 |
+
current_action = einops.rearrange(current_action, 'b d -> b 1 d')
|
| 638 |
+
self.rollout_step_counter += 1
|
| 639 |
+
if self.rollout_step_counter == self.multistep:
|
| 640 |
+
self.rollout_step_counter = 0
|
| 641 |
+
|
| 642 |
+
return current_action
|
| 643 |
+
|
| 644 |
+
def on_train_start(self) -> None:
|
| 645 |
+
|
| 646 |
+
self.model.to(dtype=self.dtype)
|
| 647 |
+
|
| 648 |
+
self.Video_Former.to(dtype=self.dtype)
|
| 649 |
+
self.language_goal.to(dtype=self.dtype)
|
| 650 |
+
#self.vae.to(dtype=self.dtype)
|
| 651 |
+
self.TVP_encoder.to(dtype=self.dtype)
|
| 652 |
+
|
| 653 |
+
@rank_zero_only
|
| 654 |
+
def on_train_epoch_start(self) -> None:
|
| 655 |
+
logger.info(f"Start training epoch {self.current_epoch}")
|
| 656 |
+
|
| 657 |
+
@rank_zero_only
|
| 658 |
+
def on_train_epoch_end(self, unused: Optional = None) -> None: # type: ignore
|
| 659 |
+
logger.info(f"Finished training epoch {self.current_epoch}")
|
| 660 |
+
|
| 661 |
+
@rank_zero_only
|
| 662 |
+
def on_validation_epoch_end(self) -> None:
|
| 663 |
+
logger.info(f"Finished validation epoch {self.current_epoch}")
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def on_validation_epoch_start(self) -> None:
|
| 667 |
+
log_rank_0(f"Start validation epoch {self.current_epoch}")
|
| 668 |
+
|
| 669 |
+
@rank_zero_only
|
| 670 |
+
def on_train_epoch_start(self) -> None:
|
| 671 |
+
logger.info(f"Start training epoch {self.current_epoch}")
|
| 672 |
+
|
| 673 |
+
@rank_zero_only
|
| 674 |
+
def on_train_epoch_end(self, unused: Optional = None) -> None: # type: ignore
|
| 675 |
+
logger.info(f"Finished training epoch {self.current_epoch}")
|
| 676 |
+
|
| 677 |
+
@rank_zero_only
|
| 678 |
+
def on_validation_epoch_end(self) -> None:
|
| 679 |
+
logger.info(f"Finished validation epoch {self.current_epoch}")
|
| 680 |
+
|
| 681 |
+
def on_validation_epoch_start(self) -> None:
|
| 682 |
+
log_rank_0(f"Start validation epoch {self.current_epoch}")
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
@rank_zero_only
|
| 686 |
+
def log_rank_0(*args, **kwargs):
|
| 687 |
+
# when using ddp, only log with rank 0 process
|
| 688 |
+
logger.info(*args, **kwargs)
|
code/policy_models/VPP_policy_xbot.py
ADDED
|
@@ -0,0 +1,941 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import Dict, Optional, Tuple
|
| 3 |
+
from functools import partial
|
| 4 |
+
|
| 5 |
+
from omegaconf import DictConfig, OmegaConf
|
| 6 |
+
import pytorch_lightning as pl
|
| 7 |
+
from pytorch_lightning.utilities import rank_zero_only
|
| 8 |
+
import einops
|
| 9 |
+
from policy_models.edm_diffusion.score_wrappers import GCDenoiser
|
| 10 |
+
|
| 11 |
+
from policy_models.module.clip_lang_encoder import LangClip
|
| 12 |
+
from policy_models.edm_diffusion.gc_sampling import *
|
| 13 |
+
from policy_models.utils.lr_schedulers.tri_stage_scheduler import TriStageLRScheduler
|
| 14 |
+
from policy_models.module.Video_Former import Video_Former_2D,Video_Former_3D
|
| 15 |
+
from diffusers import StableVideoDiffusionPipeline
|
| 16 |
+
from policy_models.module.diffusion_extract import Diffusion_feature_extractor
|
| 17 |
+
from transformers import AutoTokenizer, CLIPTextModelWithProjection
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
def load_primary_models(pretrained_model_path, eval=False):
|
| 23 |
+
print('load primary models from:', pretrained_model_path)
|
| 24 |
+
if eval:
|
| 25 |
+
pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path, torch_dtype=torch.float16)
|
| 26 |
+
else:
|
| 27 |
+
pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path)
|
| 28 |
+
return pipeline
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class VPP_Policy(nn.Module):#pl.LightningModule):
|
| 32 |
+
"""
|
| 33 |
+
The lightning module used for training.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(
|
| 37 |
+
self,
|
| 38 |
+
optimizer: DictConfig,
|
| 39 |
+
lr_scheduler: DictConfig,
|
| 40 |
+
latent_dim: int = 512,
|
| 41 |
+
multistep: int = 10,
|
| 42 |
+
sampler_type: str = 'ddim',
|
| 43 |
+
num_sampling_steps: int = 10,
|
| 44 |
+
sigma_data: float = 0.5,
|
| 45 |
+
sigma_min: float = 0.001,
|
| 46 |
+
sigma_max: float = 80,
|
| 47 |
+
noise_scheduler: str = 'exponential',
|
| 48 |
+
sigma_sample_density_type: str = 'loglogistic',
|
| 49 |
+
use_lr_scheduler: bool = True,
|
| 50 |
+
act_window_size: int = 10,
|
| 51 |
+
use_text_not_embedding: bool = False,
|
| 52 |
+
seed: int = 42,
|
| 53 |
+
pretrained_model_path: str = '/cephfs/shared/gyj/ckpt/svd_pre/checkpoint-100000',
|
| 54 |
+
text_encoder_path: str = '/cephfs/shared/llm/clip-vit-base-patch32',
|
| 55 |
+
Former_depth: int = 3,
|
| 56 |
+
Former_heads: int = 8,
|
| 57 |
+
Former_dim_head: int = 64,
|
| 58 |
+
Former_num_time_embeds: int = 1,
|
| 59 |
+
num_latents: int = 3,
|
| 60 |
+
use_3d_Former: bool = False,
|
| 61 |
+
timestep: int = 20,
|
| 62 |
+
extract_layer_idx: int = 1,
|
| 63 |
+
use_all_layer: bool = False,
|
| 64 |
+
obs_seq_len: int = 1,
|
| 65 |
+
action_dim: int = 18,
|
| 66 |
+
action_seq_len: int = 10,
|
| 67 |
+
proprio_dim: int = 19,
|
| 68 |
+
device: str = 'cuda',
|
| 69 |
+
):
|
| 70 |
+
super(VPP_Policy, self).__init__()
|
| 71 |
+
self.device = device
|
| 72 |
+
self.dtype = torch.float32
|
| 73 |
+
self.latent_dim = latent_dim
|
| 74 |
+
self.use_all_layer = use_all_layer
|
| 75 |
+
|
| 76 |
+
self.act_window_size = act_window_size
|
| 77 |
+
self.action_dim = action_dim
|
| 78 |
+
|
| 79 |
+
self.timestep = timestep
|
| 80 |
+
self.extract_layer_idx = extract_layer_idx
|
| 81 |
+
self.use_3d_Former = use_3d_Former
|
| 82 |
+
|
| 83 |
+
condition_dim_list = [1280,1280,640]
|
| 84 |
+
condition_dim = condition_dim_list[extract_layer_idx]
|
| 85 |
+
|
| 86 |
+
if use_3d_Former:
|
| 87 |
+
self.Video_Former = Video_Former_3D(
|
| 88 |
+
dim=latent_dim,
|
| 89 |
+
depth=Former_depth,
|
| 90 |
+
dim_head=Former_dim_head,
|
| 91 |
+
heads=Former_heads,
|
| 92 |
+
num_time_embeds=Former_num_time_embeds,
|
| 93 |
+
num_latents=num_latents,
|
| 94 |
+
condition_dim=condition_dim,
|
| 95 |
+
use_temporal=True,
|
| 96 |
+
)
|
| 97 |
+
else:
|
| 98 |
+
self.Video_Former = Video_Former_2D(
|
| 99 |
+
dim=latent_dim,
|
| 100 |
+
depth=Former_depth,
|
| 101 |
+
dim_head=Former_dim_head,
|
| 102 |
+
heads=Former_heads,
|
| 103 |
+
num_time_embeds=Former_num_time_embeds,
|
| 104 |
+
num_latents=num_latents,
|
| 105 |
+
condition_dim=condition_dim,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
print('use_3d_Former:', self.use_3d_Former)
|
| 109 |
+
print('use_all_layer',self.use_all_layer)
|
| 110 |
+
|
| 111 |
+
self.seed = seed
|
| 112 |
+
self.use_lr_scheduler = use_lr_scheduler
|
| 113 |
+
# goal encoders
|
| 114 |
+
# self.language_goal = LangClip(model_name='ViT-B/32').to(self.device)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
pipeline = load_primary_models(
|
| 118 |
+
pretrained_model_path , eval = True)
|
| 119 |
+
|
| 120 |
+
#text_encoder = CLIPTextModelWithProjection.from_pretrained("/cephfs/shared/llm/clip-vit-base-patch32")
|
| 121 |
+
#tokenizer = AutoTokenizer.from_pretrained("/cephfs/shared/llm/clip-vit-base-patch32", use_fast=False)
|
| 122 |
+
text_encoder = CLIPTextModelWithProjection.from_pretrained(text_encoder_path)
|
| 123 |
+
tokenizer = AutoTokenizer.from_pretrained(text_encoder_path, use_fast=False)
|
| 124 |
+
|
| 125 |
+
self.tokenizer = tokenizer
|
| 126 |
+
self.text_encoder = text_encoder
|
| 127 |
+
|
| 128 |
+
text_encoder = text_encoder.to(self.device).eval()
|
| 129 |
+
|
| 130 |
+
for param in pipeline.image_encoder.parameters():
|
| 131 |
+
param.requires_grad = False
|
| 132 |
+
for param in text_encoder.parameters():
|
| 133 |
+
param.requires_grad = False
|
| 134 |
+
|
| 135 |
+
for param in pipeline.vae.parameters():
|
| 136 |
+
param.requires_grad = False
|
| 137 |
+
for param in pipeline.unet.parameters():
|
| 138 |
+
param.requires_grad = False
|
| 139 |
+
|
| 140 |
+
pipeline = pipeline.to(self.device)
|
| 141 |
+
pipeline.unet.eval()
|
| 142 |
+
|
| 143 |
+
self.TVP_encoder = Diffusion_feature_extractor(pipeline=pipeline,
|
| 144 |
+
tokenizer=tokenizer,
|
| 145 |
+
text_encoder=text_encoder, )
|
| 146 |
+
self.TVP_encoder = self.TVP_encoder.to(self.device)
|
| 147 |
+
# policy network
|
| 148 |
+
self.model = GCDenoiser(action_dim = action_dim,
|
| 149 |
+
obs_dim=latent_dim,
|
| 150 |
+
proprio_dim=proprio_dim,
|
| 151 |
+
goal_dim=512,
|
| 152 |
+
num_tokens=num_latents,
|
| 153 |
+
goal_window_size = 1,
|
| 154 |
+
obs_seq_len = obs_seq_len,
|
| 155 |
+
act_seq_len = action_seq_len,
|
| 156 |
+
device=self.device,
|
| 157 |
+
sigma_data=0.5).to(self.device)
|
| 158 |
+
|
| 159 |
+
self.optimizer_config = optimizer
|
| 160 |
+
self.lr_scheduler = lr_scheduler
|
| 161 |
+
# self.save_hyperparameters()
|
| 162 |
+
# diffusion stuff
|
| 163 |
+
self.sampler_type = sampler_type
|
| 164 |
+
self.num_sampling_steps = num_sampling_steps
|
| 165 |
+
self.noise_scheduler = noise_scheduler
|
| 166 |
+
self.sigma_data = sigma_data
|
| 167 |
+
self.sigma_min = sigma_min
|
| 168 |
+
self.sigma_max = sigma_max
|
| 169 |
+
self.sigma_sample_density_type = sigma_sample_density_type
|
| 170 |
+
# for inference
|
| 171 |
+
self.rollout_step_counter = 0
|
| 172 |
+
self.multistep = multistep
|
| 173 |
+
self.latent_goal = None
|
| 174 |
+
self.plan = None
|
| 175 |
+
self.use_text_not_embedding = use_text_not_embedding
|
| 176 |
+
# print_model_parameters(self.perceptual_encoder.perceiver_resampler)
|
| 177 |
+
# for clip loss ground truth plot
|
| 178 |
+
self.ema_callback_idx = None
|
| 179 |
+
|
| 180 |
+
for param in self.model.inner_model.proprio_emb.parameters():
|
| 181 |
+
param.requires_grad = False
|
| 182 |
+
for param in self.model.inner_model.goal_emb.parameters():
|
| 183 |
+
param.requires_grad = False
|
| 184 |
+
self.model.inner_model.pos_emb.requires_grad = False
|
| 185 |
+
|
| 186 |
+
def process_device(self):
|
| 187 |
+
self.TVP_encoder.pipeline = self.TVP_encoder.pipeline.to(self.device)
|
| 188 |
+
self.TVP_encoder.text_encoder = self.TVP_encoder.text_encoder.to(self.device)
|
| 189 |
+
|
| 190 |
+
def configure_optimizers(self):
|
| 191 |
+
"""
|
| 192 |
+
Initialize optimizers and learning rate schedulers based on model configuration.
|
| 193 |
+
"""
|
| 194 |
+
# Configuration for models using transformer weight decay
|
| 195 |
+
'''optim_groups = self.action_decoder.model.inner_model.get_optim_groups(
|
| 196 |
+
weight_decay=self.optimizer_config.transformer_weight_decay
|
| 197 |
+
)'''
|
| 198 |
+
optim_groups = [
|
| 199 |
+
{"params": self.model.inner_model.parameters(),
|
| 200 |
+
"weight_decay": self.optimizer_config.transformer_weight_decay},
|
| 201 |
+
{"params": self.Video_Former.parameters(), "weight_decay": self.optimizer_config.transformer_weight_decay},
|
| 202 |
+
]
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
optimizer = torch.optim.AdamW(optim_groups, lr=self.optimizer_config.learning_rate,
|
| 206 |
+
betas=self.optimizer_config.betas)
|
| 207 |
+
|
| 208 |
+
# Optionally initialize the scheduler
|
| 209 |
+
if self.use_lr_scheduler:
|
| 210 |
+
lr_configs = OmegaConf.create(self.lr_scheduler)
|
| 211 |
+
scheduler = TriStageLRScheduler(optimizer, lr_configs)
|
| 212 |
+
lr_scheduler = {
|
| 213 |
+
"scheduler": scheduler,
|
| 214 |
+
"interval": 'step',
|
| 215 |
+
"frequency": 1,
|
| 216 |
+
}
|
| 217 |
+
return {"optimizer": optimizer, "lr_scheduler": lr_scheduler}
|
| 218 |
+
else:
|
| 219 |
+
return optimizer
|
| 220 |
+
|
| 221 |
+
def on_before_zero_grad(self, optimizer=None):
|
| 222 |
+
total_grad_norm = 0.0
|
| 223 |
+
total_param_norm = 0.0
|
| 224 |
+
for p in self.model.parameters():
|
| 225 |
+
if p.grad is not None:
|
| 226 |
+
total_grad_norm += p.grad.norm().item() ** 2
|
| 227 |
+
total_param_norm += p.norm().item() ** 2
|
| 228 |
+
total_grad_norm = total_grad_norm ** 0.5
|
| 229 |
+
total_param_norm = total_param_norm ** 0.5
|
| 230 |
+
|
| 231 |
+
self.log("train/grad_norm", total_grad_norm, on_step=True, on_epoch=False, sync_dist=True)
|
| 232 |
+
self.log("train/param_norm", total_param_norm, on_step=True, on_epoch=False, sync_dist=True)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def training_step(self, dataset_batch: Dict[str, Dict],) -> torch.Tensor: # type: ignore
|
| 236 |
+
"""
|
| 237 |
+
Compute and return the training loss for the MDT Agent.
|
| 238 |
+
The training loss consists of the score matching loss of the diffusion model
|
| 239 |
+
and the contrastive loss of the CLIP model for the multimodal encoder.
|
| 240 |
+
|
| 241 |
+
Args:
|
| 242 |
+
batch: Dictionary containing the batch data for each modality.
|
| 243 |
+
batch_idx: Index of the batch. used for compatibility with pytorch lightning.
|
| 244 |
+
dataloader_idx: Index of the dataloader. used for compatibility with pytorch lightning.
|
| 245 |
+
|
| 246 |
+
Returns:
|
| 247 |
+
loss tensor
|
| 248 |
+
"""
|
| 249 |
+
total_loss, action_loss = (
|
| 250 |
+
torch.tensor(0.0).to(self.device),
|
| 251 |
+
torch.tensor(0.0).to(self.device),
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
predictive_feature, latent_goal= self.extract_predictive_feature(dataset_batch)
|
| 255 |
+
|
| 256 |
+
target, model_output = self.diffusion_loss(
|
| 257 |
+
predictive_feature,
|
| 258 |
+
latent_goal,
|
| 259 |
+
dataset_batch["actions"],
|
| 260 |
+
)
|
| 261 |
+
act_loss = (model_output - target).pow(2).flatten(1).mean(),
|
| 262 |
+
|
| 263 |
+
action_loss += act_loss
|
| 264 |
+
total_loss += act_loss
|
| 265 |
+
|
| 266 |
+
total_bs = dataset_batch["actions"].shape[0]
|
| 267 |
+
|
| 268 |
+
self._log_training_metrics(action_loss, total_loss, total_bs)
|
| 269 |
+
return total_loss
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
@torch.no_grad()
|
| 273 |
+
def validation_step(self, dataset_batch: Dict[str, Dict]) -> Dict[
|
| 274 |
+
str, torch.Tensor]: # type: ignore
|
| 275 |
+
"""
|
| 276 |
+
Compute and log the validation losses and additional metrics.
|
| 277 |
+
During the validation step, the diffusion model predicts the next action sequence given the current state
|
| 278 |
+
|
| 279 |
+
Args:
|
| 280 |
+
batch: Dictionary containing the batch data for each modality.
|
| 281 |
+
batch_idx: Index of the batch. used for compatibility with pytorch lightning.
|
| 282 |
+
dataloader_idx: Index of the dataloader. used for compatibility with pytorch lightning.
|
| 283 |
+
|
| 284 |
+
Returns:
|
| 285 |
+
Dictionary containing the sampled plans of plan recognition and plan proposal module, as well as the
|
| 286 |
+
episode indices.
|
| 287 |
+
"""
|
| 288 |
+
output = {}
|
| 289 |
+
val_total_act_loss_pp = torch.tensor(0.0).to(self.device)
|
| 290 |
+
# Compute the required embeddings
|
| 291 |
+
predictive_feature, latent_goal= self.extract_predictive_feature(dataset_batch)
|
| 292 |
+
|
| 293 |
+
# predict the next action sequence
|
| 294 |
+
action_pred = self.denoise_actions(
|
| 295 |
+
torch.zeros_like(latent_goal).to(latent_goal.device),
|
| 296 |
+
predictive_feature,
|
| 297 |
+
latent_goal,
|
| 298 |
+
inference=True,
|
| 299 |
+
)
|
| 300 |
+
dataset_batch["actions"] = dataset_batch["actions"].to(action_pred.device)
|
| 301 |
+
# compute the mse action loss
|
| 302 |
+
pred_loss = torch.nn.functional.mse_loss(action_pred, dataset_batch["actions"])
|
| 303 |
+
val_total_act_loss_pp += pred_loss
|
| 304 |
+
|
| 305 |
+
output[f"idx:"] = dataset_batch["idx"]
|
| 306 |
+
output["validation_loss"] = val_total_act_loss_pp
|
| 307 |
+
return output
|
| 308 |
+
|
| 309 |
+
def training_step_xbot(self, dataset_batch: Dict[str, Dict],):
|
| 310 |
+
total_loss, action_loss, loss_xyz, loss_rot, loss_hand = (
|
| 311 |
+
torch.tensor(0.0).to(self.device),
|
| 312 |
+
torch.tensor(0.0).to(self.device),
|
| 313 |
+
torch.tensor(0.0).to(self.device),
|
| 314 |
+
torch.tensor(0.0).to(self.device),
|
| 315 |
+
torch.tensor(0.0).to(self.device),
|
| 316 |
+
)
|
| 317 |
+
predictive_feature, latent_goal= self.extract_predictive_feature(dataset_batch)
|
| 318 |
+
|
| 319 |
+
target, model_output = self.diffusion_loss(
|
| 320 |
+
predictive_feature,
|
| 321 |
+
latent_goal,
|
| 322 |
+
dataset_batch["actions"],
|
| 323 |
+
)
|
| 324 |
+
loss_dict = {}
|
| 325 |
+
loss_dict['loss_xyz'] = (model_output[:,:,:3] - target[:,:,:3]).pow(2).mean()*3/38+(model_output[:,:,19:22] - target[:,:,19:22]).pow(2).mean()*3/38
|
| 326 |
+
loss_dict['loss_rot'] = (model_output[:,:,3:7] - target[:,:,3:7]).pow(2).mean()*4/38+(model_output[:,:,22:26] - target[:,:,22:26]).pow(2).mean()*4/38
|
| 327 |
+
loss_dict['loss_hand'] = (model_output[:,:,7:19] - target[:,:,7:19]).pow(2).mean()*12/38+(model_output[:,:,26:38] - target[:,:,26:38]).pow(2).mean()*12/38
|
| 328 |
+
|
| 329 |
+
act_loss = loss_dict['loss_xyz']+loss_dict['loss_rot']+loss_dict['loss_hand']
|
| 330 |
+
loss_dict['loss'] = act_loss
|
| 331 |
+
|
| 332 |
+
action_loss += act_loss
|
| 333 |
+
loss_xyz += loss_dict['loss_xyz']
|
| 334 |
+
loss_rot += loss_dict['loss_rot']
|
| 335 |
+
loss_hand += loss_dict['loss_hand']
|
| 336 |
+
|
| 337 |
+
total_loss += act_loss
|
| 338 |
+
|
| 339 |
+
return total_loss, loss_xyz, loss_rot, loss_hand
|
| 340 |
+
|
| 341 |
+
@torch.no_grad()
|
| 342 |
+
def validation_step_xbot(self, dataset_batch: Dict[str, Dict], print_it=False) -> Dict[
|
| 343 |
+
str, torch.Tensor]: # type: ignore
|
| 344 |
+
output = {}
|
| 345 |
+
val_total_act_loss_pp = torch.tensor(0.0).to(self.device)
|
| 346 |
+
# Compute the required embeddings
|
| 347 |
+
# perceptual_emb, latent_goal, image_latent_goal = self.compute_input_embeddings(dataset_batch)
|
| 348 |
+
predictive_feature, latent_goal= self.extract_predictive_feature(dataset_batch)
|
| 349 |
+
|
| 350 |
+
# predict the next action sequence
|
| 351 |
+
action_pred = self.denoise_actions(
|
| 352 |
+
torch.zeros_like(latent_goal).to(latent_goal.device),
|
| 353 |
+
predictive_feature,
|
| 354 |
+
latent_goal,
|
| 355 |
+
inference=True,
|
| 356 |
+
)
|
| 357 |
+
dataset_batch["actions"] = dataset_batch["actions"].to(action_pred.device) #(batch, time, dim)
|
| 358 |
+
# loss_xyz = torch.nn.functional.mse_loss(action_pred[:, :, :3], dataset_batch["actions"][:, :, :3])*3/18
|
| 359 |
+
# loss_rot = torch.nn.functional.mse_loss(action_pred[:, :, 3:-12], dataset_batch["actions"][:, :, 3:-12])*3/18
|
| 360 |
+
# loss_hand = torch.nn.functional.mse_loss(action_pred[:, :, -12:], dataset_batch["actions"][:, :, -12:])*12/18
|
| 361 |
+
model_output = action_pred
|
| 362 |
+
target = dataset_batch["actions"]
|
| 363 |
+
loss_xyz = (model_output[:,:,:3] - target[:,:,:3]).pow(2).mean()*3/38+(model_output[:,:,19:22] - target[:,:,19:22]).pow(2).mean()*3/38
|
| 364 |
+
loss_rot = (model_output[:,:,3:7] - target[:,:,3:7]).pow(2).mean()*4/38+(model_output[:,:,22:26] - target[:,:,22:26]).pow(2).mean()*4/38
|
| 365 |
+
loss_hand = (model_output[:,:,7:19] - target[:,:,7:19]).pow(2).mean()*12/38+(model_output[:,:,26:38] - target[:,:,26:38]).pow(2).mean()*12/38
|
| 366 |
+
pred_loss = loss_xyz + loss_rot + loss_hand
|
| 367 |
+
|
| 368 |
+
# pred_loss = torch.nn.functional.mse_loss(action_pred, dataset_batch["actions"])
|
| 369 |
+
# loss_xyz, loss_rot, loss_hand = 0, 0, 0
|
| 370 |
+
|
| 371 |
+
latent_encoder_emb = self.model.inner_model.latent_encoder_emb
|
| 372 |
+
val_total_act_loss_pp += pred_loss
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
# output[f"idx_{self.modality_scope}"] = dataset_batch["idx"]
|
| 376 |
+
output["validation_loss"] = val_total_act_loss_pp
|
| 377 |
+
output["loss_xyz"] = loss_xyz
|
| 378 |
+
output["loss_rot"] = loss_rot
|
| 379 |
+
output["loss_hand"] = loss_hand
|
| 380 |
+
|
| 381 |
+
if print_it:
|
| 382 |
+
# print(dataset_batch['frame_ids'])
|
| 383 |
+
# print(dataset_batch['ann_file'])
|
| 384 |
+
# print("action_pred_shape:", action_pred.shape)
|
| 385 |
+
frame_ids = [idx.cpu().numpy() for idx in dataset_batch['frame_ids']]
|
| 386 |
+
frame_ids = np.array(frame_ids)
|
| 387 |
+
# print("frame_ids:", frame_ids)
|
| 388 |
+
for i in range(action_pred.shape[0]):
|
| 389 |
+
print('data info', dataset_batch['ann_file'][i], frame_ids[:,i])
|
| 390 |
+
# print("true_action:", dataset_batch["actions"][i].flatten()[:19])
|
| 391 |
+
# print("pred_action:", action_pred[i].flatten()[:19])
|
| 392 |
+
print("true_action:", dataset_batch["actions"][i][:4, :7].flatten())
|
| 393 |
+
print("pred_action:", action_pred[i][:4, :7].flatten())
|
| 394 |
+
|
| 395 |
+
print("true_hand:", dataset_batch["actions"][i][:1, -12:].flatten())
|
| 396 |
+
print("pred_hand:", action_pred[i][:1, -12:].flatten())
|
| 397 |
+
print('-----------------------------------------------')
|
| 398 |
+
return output
|
| 399 |
+
|
| 400 |
+
def training_step_xhand(self, dataset_batch: Dict[str, Dict],):
|
| 401 |
+
total_loss, action_loss, loss_xyz, loss_rot, loss_hand = (
|
| 402 |
+
torch.tensor(0.0).to(self.device),
|
| 403 |
+
torch.tensor(0.0).to(self.device),
|
| 404 |
+
torch.tensor(0.0).to(self.device),
|
| 405 |
+
torch.tensor(0.0).to(self.device),
|
| 406 |
+
torch.tensor(0.0).to(self.device),
|
| 407 |
+
)
|
| 408 |
+
predictive_feature, latent_goal= self.extract_predictive_feature(dataset_batch)
|
| 409 |
+
|
| 410 |
+
target, model_output = self.diffusion_loss(
|
| 411 |
+
predictive_feature,
|
| 412 |
+
latent_goal,
|
| 413 |
+
dataset_batch["actions"],
|
| 414 |
+
)
|
| 415 |
+
loss_dict = {}
|
| 416 |
+
loss_dict['loss_xyz'] = (model_output[:,:,:3] - target[:,:,:3]).pow(2).mean()*3/18
|
| 417 |
+
loss_dict['loss_rot'] = (model_output[:,:,3:-12] - target[:,:,3:-12]).pow(2).mean()*3/18
|
| 418 |
+
loss_dict['loss_hand'] = (model_output[:,:,-12:] - target[:,:,-12:]).pow(2).mean()*12/18
|
| 419 |
+
act_loss = loss_dict['loss_xyz']+loss_dict['loss_rot']+loss_dict['loss_hand']
|
| 420 |
+
loss_dict['loss'] = act_loss
|
| 421 |
+
|
| 422 |
+
action_loss += act_loss
|
| 423 |
+
loss_xyz += loss_dict['loss_xyz']
|
| 424 |
+
loss_rot += loss_dict['loss_rot']
|
| 425 |
+
loss_hand += loss_dict['loss_hand']
|
| 426 |
+
|
| 427 |
+
total_loss += act_loss
|
| 428 |
+
|
| 429 |
+
return total_loss, loss_xyz, loss_rot, loss_hand
|
| 430 |
+
|
| 431 |
+
@torch.no_grad()
|
| 432 |
+
def validation_step_xhand(self, dataset_batch: Dict[str, Dict], print_it=False) -> Dict[
|
| 433 |
+
str, torch.Tensor]: # type: ignore
|
| 434 |
+
"""
|
| 435 |
+
Compute and log the validation losses and additional metrics.
|
| 436 |
+
During the validation step, the diffusion model predicts the next action sequence given the current state
|
| 437 |
+
|
| 438 |
+
Args:
|
| 439 |
+
batch: Dictionary containing the batch data for each modality.
|
| 440 |
+
batch_idx: Index of the batch. used for compatibility with pytorch lightning.
|
| 441 |
+
dataloader_idx: Index of the dataloader. used for compatibility with pytorch lightning.
|
| 442 |
+
|
| 443 |
+
Returns:
|
| 444 |
+
Dictionary containing the sampled plans of plan recognition and plan proposal networks, as well as the
|
| 445 |
+
episode indices.
|
| 446 |
+
"""
|
| 447 |
+
output = {}
|
| 448 |
+
val_total_act_loss_pp = torch.tensor(0.0).to(self.device)
|
| 449 |
+
# Compute the required embeddings
|
| 450 |
+
# perceptual_emb, latent_goal, image_latent_goal = self.compute_input_embeddings(dataset_batch)
|
| 451 |
+
predictive_feature, latent_goal= self.extract_predictive_feature(dataset_batch)
|
| 452 |
+
|
| 453 |
+
# predict the next action sequence
|
| 454 |
+
action_pred = self.denoise_actions(
|
| 455 |
+
torch.zeros_like(latent_goal).to(latent_goal.device),
|
| 456 |
+
predictive_feature,
|
| 457 |
+
latent_goal,
|
| 458 |
+
inference=True,
|
| 459 |
+
)
|
| 460 |
+
dataset_batch["actions"] = dataset_batch["actions"].to(action_pred.device) #(batch, time, dim)
|
| 461 |
+
# print("action_pred_shape:", action_pred.shape)
|
| 462 |
+
# print("action_truth_shape:", dataset_batch["actions"].shape)
|
| 463 |
+
|
| 464 |
+
# compute the mse action loss
|
| 465 |
+
loss_xyz = torch.nn.functional.mse_loss(action_pred[:, :, :3], dataset_batch["actions"][:, :, :3])*3/18
|
| 466 |
+
loss_rot = torch.nn.functional.mse_loss(action_pred[:, :, 3:-12], dataset_batch["actions"][:, :, 3:-12])*3/18
|
| 467 |
+
loss_hand = torch.nn.functional.mse_loss(action_pred[:, :, -12:], dataset_batch["actions"][:, :, -12:])*12/18
|
| 468 |
+
pred_loss = loss_xyz + loss_rot + loss_hand
|
| 469 |
+
|
| 470 |
+
# pred_loss = torch.nn.functional.mse_loss(action_pred, dataset_batch["actions"])
|
| 471 |
+
# loss_xyz, loss_rot, loss_hand = 0, 0, 0
|
| 472 |
+
|
| 473 |
+
latent_encoder_emb = self.model.inner_model.latent_encoder_emb
|
| 474 |
+
val_total_act_loss_pp += pred_loss
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
# output[f"idx_{self.modality_scope}"] = dataset_batch["idx"]
|
| 478 |
+
output["validation_loss"] = val_total_act_loss_pp
|
| 479 |
+
output["loss_xyz"] = loss_xyz
|
| 480 |
+
output["loss_rot"] = loss_rot
|
| 481 |
+
output["loss_hand"] = loss_hand
|
| 482 |
+
|
| 483 |
+
if print_it:
|
| 484 |
+
# print(dataset_batch['frame_ids'])
|
| 485 |
+
# print(dataset_batch['ann_file'])
|
| 486 |
+
# print("action_pred_shape:", action_pred.shape)
|
| 487 |
+
frame_ids = [idx.cpu().numpy() for idx in dataset_batch['frame_ids']]
|
| 488 |
+
frame_ids = np.array(frame_ids)
|
| 489 |
+
# print("frame_ids:", frame_ids)
|
| 490 |
+
for i in range(action_pred.shape[0]):
|
| 491 |
+
print('data info', dataset_batch['ann_file'][i], frame_ids[:,i])
|
| 492 |
+
# print("true_action:", dataset_batch["actions"][i].flatten()[:19])
|
| 493 |
+
# print("pred_action:", action_pred[i].flatten()[:19])
|
| 494 |
+
print("true_action:", dataset_batch["actions"][i][:4, :-12].flatten())
|
| 495 |
+
print("pred_action:", action_pred[i][:4, :-12].flatten())
|
| 496 |
+
|
| 497 |
+
print("true_hand:", dataset_batch["actions"][i][:1, -12:].flatten())
|
| 498 |
+
print("pred_hand:", action_pred[i][:1, -12:].flatten())
|
| 499 |
+
print('-----------------------------------------------')
|
| 500 |
+
return output
|
| 501 |
+
|
| 502 |
+
def extract_predictive_feature(self, dataset_batch):
|
| 503 |
+
"""
|
| 504 |
+
Compute the required embeddings for the visual ones and the latent goal.
|
| 505 |
+
"""
|
| 506 |
+
# 1. extract the revelant visual observations
|
| 507 |
+
rgb_static = dataset_batch["rgb_obs"]['rgb_static'].to(self.device)
|
| 508 |
+
rgb_gripper = dataset_batch["rgb_obs"]['rgb_gripper'].to(self.device)
|
| 509 |
+
if 'rgb_gripper2' in dataset_batch["rgb_obs"]:
|
| 510 |
+
rgb_gripper2 = dataset_batch["rgb_obs"]['rgb_gripper2'].to(self.device)
|
| 511 |
+
# 3. we compute the language goal if the language modality is in the scope
|
| 512 |
+
modality = "lang"
|
| 513 |
+
|
| 514 |
+
assert self.use_text_not_embedding == True
|
| 515 |
+
if self.use_text_not_embedding:
|
| 516 |
+
inputs = self.tokenizer(text=dataset_batch["lang_text"], padding='max_length', return_tensors="pt",truncation=True).to(self.text_encoder.device)
|
| 517 |
+
outputs = self.text_encoder(**inputs)
|
| 518 |
+
latent_goal = outputs.text_embeds
|
| 519 |
+
|
| 520 |
+
language = dataset_batch["lang_text"]
|
| 521 |
+
|
| 522 |
+
batch = rgb_static.shape[0]
|
| 523 |
+
|
| 524 |
+
if 'rgb_gripper2' not in dataset_batch["rgb_obs"]:
|
| 525 |
+
with torch.no_grad():
|
| 526 |
+
input_rgb = torch.cat([rgb_static, rgb_gripper], dim=0)
|
| 527 |
+
language = language + language
|
| 528 |
+
# print("input_rgb_shape:", input_rgb.shape)
|
| 529 |
+
# print("language_shape:", len(language))
|
| 530 |
+
perceptual_features = self.TVP_encoder(input_rgb, language, self.timestep,
|
| 531 |
+
self.extract_layer_idx)
|
| 532 |
+
# perceptual_features = self.TVP_encoder(input_rgb, language, self.timestep,
|
| 533 |
+
# self.extract_layer_idx, all_layer=self.use_all_layer,
|
| 534 |
+
# step_time=1)
|
| 535 |
+
|
| 536 |
+
perceptual_features = einops.rearrange(perceptual_features, 'b f c h w-> b f c (h w)')
|
| 537 |
+
perceptual_features = einops.rearrange(perceptual_features, 'b f c l-> b f l c')
|
| 538 |
+
|
| 539 |
+
perceptual_features, gripper_feature = torch.split(perceptual_features, [batch, batch], dim=0)
|
| 540 |
+
perceptual_features = torch.cat([perceptual_features, gripper_feature], dim=2)
|
| 541 |
+
|
| 542 |
+
else:
|
| 543 |
+
with torch.no_grad():
|
| 544 |
+
input_rgb = torch.cat([rgb_static, rgb_gripper, rgb_gripper2], dim=0)
|
| 545 |
+
language = language + language + language
|
| 546 |
+
perceptual_features = self.TVP_encoder(input_rgb, language, self.timestep,
|
| 547 |
+
self.extract_layer_idx)
|
| 548 |
+
|
| 549 |
+
perceptual_features = einops.rearrange(perceptual_features, 'b f c h w-> b f c (h w)')
|
| 550 |
+
perceptual_features = einops.rearrange(perceptual_features, 'b f c l-> b f l c')
|
| 551 |
+
|
| 552 |
+
perceptual_features, gripper_feature1, gripper_feature2 = torch.split(perceptual_features, [batch, batch, batch], dim=0)
|
| 553 |
+
perceptual_features = torch.cat([perceptual_features, gripper_feature1, gripper_feature2], dim=2)
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
perceptual_features = perceptual_features.to(torch.float32)
|
| 557 |
+
perceptual_features = self.Video_Former(perceptual_features)
|
| 558 |
+
|
| 559 |
+
predictive_feature = {'state_images': perceptual_features}
|
| 560 |
+
predictive_feature['modality'] = modality
|
| 561 |
+
if 'state_obs' in dataset_batch.keys():
|
| 562 |
+
predictive_feature['state_obs'] = dataset_batch['state_obs'].to(self.device)
|
| 563 |
+
|
| 564 |
+
return predictive_feature, latent_goal
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
def _log_training_metrics(self, action_loss, total_loss, total_bs):
|
| 568 |
+
"""
|
| 569 |
+
Log the training metrics.
|
| 570 |
+
"""
|
| 571 |
+
self.log("train/action_loss", action_loss, on_step=False, on_epoch=True, sync_dist=True, batch_size=total_bs)
|
| 572 |
+
self.log("train/total_loss", total_loss, on_step=False, on_epoch=True, sync_dist=True, batch_size=total_bs)
|
| 573 |
+
|
| 574 |
+
def _log_validation_metrics(self, pred_loss, img_gen_loss, val_total_act_loss_pp):
|
| 575 |
+
"""
|
| 576 |
+
Log the validation metrics.
|
| 577 |
+
"""
|
| 578 |
+
self.log(
|
| 579 |
+
"val_act/action_loss",
|
| 580 |
+
val_total_act_loss_pp / len(self.trainer.datamodule.modalities), # type:ignore
|
| 581 |
+
sync_dist=True,
|
| 582 |
+
)
|
| 583 |
+
self.log(f"val_act/img_gen_loss_pp", img_gen_loss, sync_dist=True)
|
| 584 |
+
|
| 585 |
+
def diffusion_loss(
|
| 586 |
+
self,
|
| 587 |
+
perceptual_emb: torch.Tensor,
|
| 588 |
+
latent_goal: torch.Tensor,
|
| 589 |
+
actions: torch.Tensor,
|
| 590 |
+
) -> torch.Tensor:
|
| 591 |
+
"""
|
| 592 |
+
Computes the score matching loss given the perceptual embedding, latent goal, and desired actions.
|
| 593 |
+
"""
|
| 594 |
+
self.model.train()
|
| 595 |
+
sigmas = self.make_sample_density()(shape=(len(actions),), device=self.device).to(self.device)
|
| 596 |
+
noise = torch.randn_like(actions).to(self.device)
|
| 597 |
+
target, model_output = self.model.loss(perceptual_emb, actions, latent_goal, noise, sigmas)
|
| 598 |
+
return target, model_output
|
| 599 |
+
|
| 600 |
+
def denoise_actions( # type: ignore
|
| 601 |
+
self,
|
| 602 |
+
latent_plan: torch.Tensor,
|
| 603 |
+
perceptual_emb: torch.Tensor,
|
| 604 |
+
latent_goal: torch.Tensor,
|
| 605 |
+
inference: Optional[bool] = False,
|
| 606 |
+
extra_args={}
|
| 607 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 608 |
+
"""
|
| 609 |
+
Denoise the next sequence of actions
|
| 610 |
+
"""
|
| 611 |
+
if inference:
|
| 612 |
+
sampling_steps = self.num_sampling_steps
|
| 613 |
+
else:
|
| 614 |
+
sampling_steps = 10
|
| 615 |
+
self.model.eval()
|
| 616 |
+
if len(latent_goal.shape) < len(
|
| 617 |
+
perceptual_emb['state_images'].shape if isinstance(perceptual_emb, dict) else perceptual_emb.shape):
|
| 618 |
+
latent_goal = latent_goal.unsqueeze(1) # .expand(-1, seq_len, -1)
|
| 619 |
+
input_state = perceptual_emb
|
| 620 |
+
sigmas = self.get_noise_schedule(sampling_steps, self.noise_scheduler)
|
| 621 |
+
|
| 622 |
+
x = torch.randn((len(latent_goal), self.act_window_size, self.action_dim), device=self.device) * self.sigma_max
|
| 623 |
+
|
| 624 |
+
actions = self.sample_loop(sigmas, x, input_state, latent_goal, latent_plan, self.sampler_type, extra_args)
|
| 625 |
+
|
| 626 |
+
return actions
|
| 627 |
+
|
| 628 |
+
def make_sample_density(self):
|
| 629 |
+
"""
|
| 630 |
+
Generate a sample density function based on the desired type for training the model
|
| 631 |
+
We mostly use log-logistic as it has no additional hyperparameters to tune.
|
| 632 |
+
"""
|
| 633 |
+
sd_config = []
|
| 634 |
+
if self.sigma_sample_density_type == 'lognormal':
|
| 635 |
+
loc = self.sigma_sample_density_mean # if 'mean' in sd_config else sd_config['loc']
|
| 636 |
+
scale = self.sigma_sample_density_std # if 'std' in sd_config else sd_config['scale']
|
| 637 |
+
return partial(utils.rand_log_normal, loc=loc, scale=scale)
|
| 638 |
+
|
| 639 |
+
if self.sigma_sample_density_type == 'loglogistic':
|
| 640 |
+
loc = sd_config['loc'] if 'loc' in sd_config else math.log(self.sigma_data)
|
| 641 |
+
scale = sd_config['scale'] if 'scale' in sd_config else 0.5
|
| 642 |
+
min_value = sd_config['min_value'] if 'min_value' in sd_config else self.sigma_min
|
| 643 |
+
max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max
|
| 644 |
+
return partial(utils.rand_log_logistic, loc=loc, scale=scale, min_value=min_value, max_value=max_value)
|
| 645 |
+
|
| 646 |
+
if self.sigma_sample_density_type == 'loguniform':
|
| 647 |
+
min_value = sd_config['min_value'] if 'min_value' in sd_config else self.sigma_min
|
| 648 |
+
max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max
|
| 649 |
+
return partial(utils.rand_log_uniform, min_value=min_value, max_value=max_value)
|
| 650 |
+
|
| 651 |
+
if self.sigma_sample_density_type == 'uniform':
|
| 652 |
+
return partial(utils.rand_uniform, min_value=self.sigma_min, max_value=self.sigma_max)
|
| 653 |
+
|
| 654 |
+
if self.sigma_sample_density_type == 'v-diffusion':
|
| 655 |
+
min_value = self.min_value if 'min_value' in sd_config else self.sigma_min
|
| 656 |
+
max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max
|
| 657 |
+
return partial(utils.rand_v_diffusion, sigma_data=self.sigma_data, min_value=min_value, max_value=max_value)
|
| 658 |
+
if self.sigma_sample_density_type == 'discrete':
|
| 659 |
+
sigmas = self.get_noise_schedule(self.num_sampling_steps * 1e5, 'exponential')
|
| 660 |
+
return partial(utils.rand_discrete, values=sigmas)
|
| 661 |
+
if self.sigma_sample_density_type == 'split-lognormal':
|
| 662 |
+
loc = sd_config['mean'] if 'mean' in sd_config else sd_config['loc']
|
| 663 |
+
scale_1 = sd_config['std_1'] if 'std_1' in sd_config else sd_config['scale_1']
|
| 664 |
+
scale_2 = sd_config['std_2'] if 'std_2' in sd_config else sd_config['scale_2']
|
| 665 |
+
return partial(utils.rand_split_log_normal, loc=loc, scale_1=scale_1, scale_2=scale_2)
|
| 666 |
+
else:
|
| 667 |
+
raise ValueError('Unknown sample density type')
|
| 668 |
+
|
| 669 |
+
def sample_loop(
|
| 670 |
+
self,
|
| 671 |
+
sigmas,
|
| 672 |
+
x_t: torch.Tensor,
|
| 673 |
+
state: torch.Tensor,
|
| 674 |
+
goal: torch.Tensor,
|
| 675 |
+
latent_plan: torch.Tensor,
|
| 676 |
+
sampler_type: str,
|
| 677 |
+
extra_args={},
|
| 678 |
+
):
|
| 679 |
+
"""
|
| 680 |
+
Main method to generate samples depending on the chosen sampler type. DDIM is the default as it works well in all settings.
|
| 681 |
+
"""
|
| 682 |
+
s_churn = extra_args['s_churn'] if 's_churn' in extra_args else 0
|
| 683 |
+
s_min = extra_args['s_min'] if 's_min' in extra_args else 0
|
| 684 |
+
use_scaler = extra_args['use_scaler'] if 'use_scaler' in extra_args else False
|
| 685 |
+
keys = ['s_churn', 'keep_last_actions']
|
| 686 |
+
if bool(extra_args):
|
| 687 |
+
reduced_args = {x: extra_args[x] for x in keys}
|
| 688 |
+
else:
|
| 689 |
+
reduced_args = {}
|
| 690 |
+
if use_scaler:
|
| 691 |
+
scaler = self.scaler
|
| 692 |
+
else:
|
| 693 |
+
scaler = None
|
| 694 |
+
# ODE deterministic
|
| 695 |
+
if sampler_type == 'lms':
|
| 696 |
+
x_0 = sample_lms(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True, extra_args=reduced_args)
|
| 697 |
+
# ODE deterministic can be made stochastic by S_churn != 0
|
| 698 |
+
elif sampler_type == 'heun':
|
| 699 |
+
x_0 = sample_heun(self.model, state, x_t, goal, sigmas, scaler=scaler, s_churn=s_churn, s_tmin=s_min,
|
| 700 |
+
disable=True)
|
| 701 |
+
# ODE deterministic
|
| 702 |
+
elif sampler_type == 'euler':
|
| 703 |
+
x_0 = sample_euler(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 704 |
+
# SDE stochastic
|
| 705 |
+
elif sampler_type == 'ancestral':
|
| 706 |
+
x_0 = sample_dpm_2_ancestral(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 707 |
+
# SDE stochastic: combines an ODE euler step with an stochastic noise correcting step
|
| 708 |
+
elif sampler_type == 'euler_ancestral':
|
| 709 |
+
x_0 = sample_euler_ancestral(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 710 |
+
# ODE deterministic
|
| 711 |
+
elif sampler_type == 'dpm':
|
| 712 |
+
x_0 = sample_dpm_2(self.model, state, x_t, goal, sigmas, disable=True)
|
| 713 |
+
# ODE deterministic
|
| 714 |
+
elif sampler_type == 'dpm_adaptive':
|
| 715 |
+
x_0 = sample_dpm_adaptive(self.model, state, x_t, goal, sigmas[-2].item(), sigmas[0].item(), disable=True)
|
| 716 |
+
# ODE deterministic
|
| 717 |
+
elif sampler_type == 'dpm_fast':
|
| 718 |
+
x_0 = sample_dpm_fast(self.model, state, x_t, goal, sigmas[-2].item(), sigmas[0].item(), len(sigmas),
|
| 719 |
+
disable=True)
|
| 720 |
+
# 2nd order solver
|
| 721 |
+
elif sampler_type == 'dpmpp_2s_ancestral':
|
| 722 |
+
x_0 = sample_dpmpp_2s_ancestral(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 723 |
+
# 2nd order solver
|
| 724 |
+
elif sampler_type == 'dpmpp_2m':
|
| 725 |
+
x_0 = sample_dpmpp_2m(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 726 |
+
elif sampler_type == 'dpmpp_2m_sde':
|
| 727 |
+
x_0 = sample_dpmpp_sde(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 728 |
+
elif sampler_type == 'ddim':
|
| 729 |
+
x_0 = sample_ddim(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 730 |
+
elif sampler_type == 'dpmpp_2s':
|
| 731 |
+
x_0 = sample_dpmpp_2s(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 732 |
+
elif sampler_type == 'dpmpp_2_with_lms':
|
| 733 |
+
x_0 = sample_dpmpp_2_with_lms(self.model, state, x_t, goal, sigmas, scaler=scaler, disable=True)
|
| 734 |
+
else:
|
| 735 |
+
raise ValueError('desired sampler type not found!')
|
| 736 |
+
return x_0
|
| 737 |
+
|
| 738 |
+
def get_noise_schedule(self, n_sampling_steps, noise_schedule_type):
|
| 739 |
+
"""
|
| 740 |
+
Get the noise schedule for the sampling steps. Describes the distribution over the noise levels from sigma_min to sigma_max.
|
| 741 |
+
"""
|
| 742 |
+
if noise_schedule_type == 'karras':
|
| 743 |
+
return get_sigmas_karras(n_sampling_steps, self.sigma_min, self.sigma_max, 7,
|
| 744 |
+
self.device) # rho=7 is the default from EDM karras
|
| 745 |
+
elif noise_schedule_type == 'exponential':
|
| 746 |
+
return get_sigmas_exponential(n_sampling_steps, self.sigma_min, self.sigma_max, self.device)
|
| 747 |
+
elif noise_schedule_type == 'vp':
|
| 748 |
+
return get_sigmas_vp(n_sampling_steps, device=self.device)
|
| 749 |
+
elif noise_schedule_type == 'linear':
|
| 750 |
+
return get_sigmas_linear(n_sampling_steps, self.sigma_min, self.sigma_max, device=self.device)
|
| 751 |
+
elif noise_schedule_type == 'cosine_beta':
|
| 752 |
+
return cosine_beta_schedule(n_sampling_steps, device=self.device)
|
| 753 |
+
elif noise_schedule_type == 've':
|
| 754 |
+
return get_sigmas_ve(n_sampling_steps, self.sigma_min, self.sigma_max, device=self.device)
|
| 755 |
+
elif noise_schedule_type == 'iddpm':
|
| 756 |
+
return get_iddpm_sigmas(n_sampling_steps, self.sigma_min, self.sigma_max, device=self.device)
|
| 757 |
+
raise ValueError('Unknown noise schedule type')
|
| 758 |
+
|
| 759 |
+
def reset(self):
|
| 760 |
+
"""
|
| 761 |
+
Call this at the beginning of a new rollout when doing inference.
|
| 762 |
+
"""
|
| 763 |
+
self.plan = None
|
| 764 |
+
self.latent_goal = None
|
| 765 |
+
self.rollout_step_counter = 0
|
| 766 |
+
|
| 767 |
+
def forward(self,batch, xhand=False, xbot=False):
|
| 768 |
+
if xhand:
|
| 769 |
+
return self.training_step_xhand(batch)
|
| 770 |
+
elif xbot:
|
| 771 |
+
return self.training_step_xbot(batch)
|
| 772 |
+
else:
|
| 773 |
+
return self.training_step(batch)
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
def eval_forward(self, obs, goal, xhand=False, xbot=False):
|
| 777 |
+
"""
|
| 778 |
+
Method for doing inference with the model.
|
| 779 |
+
"""
|
| 780 |
+
if 'lang_text' in goal:
|
| 781 |
+
assert self.use_text_not_embedding == True
|
| 782 |
+
if self.use_text_not_embedding:
|
| 783 |
+
inputs = self.tokenizer(text=goal["lang_text"], padding='max_length', return_tensors="pt",truncation=True).to(self.text_encoder.device)
|
| 784 |
+
outputs = self.text_encoder(**inputs)
|
| 785 |
+
latent_goal = outputs.text_embeds
|
| 786 |
+
|
| 787 |
+
# if self.use_text_not_embedding:
|
| 788 |
+
# # print(goal.keys())
|
| 789 |
+
# latent_goal = self.language_goal(goal["lang_text"])
|
| 790 |
+
# latent_goal = latent_goal.to(torch.float32)
|
| 791 |
+
# else:
|
| 792 |
+
# latent_goal = self.language_goal(goal["lang"]).unsqueeze(0).to(torch.float32).to(
|
| 793 |
+
# obs["rgb_obs"]['rgb_static'].device)
|
| 794 |
+
|
| 795 |
+
rgb_static = obs["rgb_obs"]['rgb_static'].to(self.device)
|
| 796 |
+
rgb_gripper = obs["rgb_obs"]['rgb_gripper'].to(self.device)
|
| 797 |
+
if 'rgb_gripper2' in obs["rgb_obs"]:
|
| 798 |
+
rgb_gripper2 = obs["rgb_obs"]['rgb_gripper2'].to(self.device)
|
| 799 |
+
|
| 800 |
+
language = goal["lang_text"]
|
| 801 |
+
batch = rgb_static.shape[0]
|
| 802 |
+
|
| 803 |
+
if 'rgb_gripper2' not in obs["rgb_obs"]:
|
| 804 |
+
with torch.no_grad():
|
| 805 |
+
input_rgb = torch.cat([rgb_static, rgb_gripper], dim=0)
|
| 806 |
+
language = [language] + [language]
|
| 807 |
+
print("input_rgb_shape:", input_rgb.shape)
|
| 808 |
+
print("language_shape:", len(language))
|
| 809 |
+
perceptual_features = self.TVP_encoder(input_rgb, language, self.timestep,
|
| 810 |
+
self.extract_layer_idx)
|
| 811 |
+
# perceptual_features = self.TVP_encoder(input_rgb, language, self.timestep,
|
| 812 |
+
# self.extract_layer_idx, all_layer=self.use_all_layer,
|
| 813 |
+
# step_time=1)
|
| 814 |
+
print("perceptual_features_shape:", perceptual_features.shape)
|
| 815 |
+
perceptual_features = einops.rearrange(perceptual_features, 'b f c h w-> b f c (h w)')
|
| 816 |
+
perceptual_features = einops.rearrange(perceptual_features, 'b f c l-> b f l c')
|
| 817 |
+
|
| 818 |
+
perceptual_features, gripper_feature = torch.split(perceptual_features, [batch, batch], dim=0)
|
| 819 |
+
perceptual_features = torch.cat([perceptual_features, gripper_feature], dim=2)
|
| 820 |
+
else:
|
| 821 |
+
with torch.no_grad():
|
| 822 |
+
input_rgb = torch.cat([rgb_static, rgb_gripper, rgb_gripper2], dim=0)
|
| 823 |
+
language = [language] + [language] + [language]
|
| 824 |
+
perceptual_features = self.TVP_encoder(input_rgb, language, self.timestep,
|
| 825 |
+
self.extract_layer_idx)
|
| 826 |
+
|
| 827 |
+
perceptual_features = einops.rearrange(perceptual_features, 'b f c h w-> b f c (h w)')
|
| 828 |
+
perceptual_features = einops.rearrange(perceptual_features, 'b f c l-> b f l c')
|
| 829 |
+
|
| 830 |
+
perceptual_features, gripper_feature1, gripper_feature2 = torch.split(perceptual_features, [batch, batch, batch], dim=0)
|
| 831 |
+
perceptual_features = torch.cat([perceptual_features, gripper_feature1, gripper_feature2], dim=2)
|
| 832 |
+
|
| 833 |
+
perceptual_features = perceptual_features.to(torch.float32)
|
| 834 |
+
perceptual_features = self.Video_Former(perceptual_features)
|
| 835 |
+
|
| 836 |
+
perceptual_emb = {'state_images': perceptual_features}
|
| 837 |
+
|
| 838 |
+
perceptual_emb['modality'] = "lang"
|
| 839 |
+
|
| 840 |
+
if 'state_obs' in obs.keys():
|
| 841 |
+
perceptual_emb['state_obs'] = obs['state_obs'].to(self.device)
|
| 842 |
+
|
| 843 |
+
act_seq = self.denoise_actions(
|
| 844 |
+
torch.zeros_like(latent_goal).to(latent_goal.device),
|
| 845 |
+
perceptual_emb,
|
| 846 |
+
latent_goal,
|
| 847 |
+
inference=True,
|
| 848 |
+
)
|
| 849 |
+
return act_seq
|
| 850 |
+
|
| 851 |
+
def step(self, obs, goal):
|
| 852 |
+
"""
|
| 853 |
+
Do one step of inference with the model. THis method handles the action chunking case.
|
| 854 |
+
Our model is trained to predict a sequence of actions.
|
| 855 |
+
We only compute the sequence once every self.multistep steps.
|
| 856 |
+
|
| 857 |
+
Args:
|
| 858 |
+
obs (dict): Observation from environment.
|
| 859 |
+
goal (dict): Goal as visual observation or embedded language instruction.
|
| 860 |
+
|
| 861 |
+
Returns:
|
| 862 |
+
Predicted action.
|
| 863 |
+
"""
|
| 864 |
+
if self.rollout_step_counter % self.multistep == 0:
|
| 865 |
+
pred_action_seq = self.eval_forward(obs, goal)
|
| 866 |
+
|
| 867 |
+
self.pred_action_seq = pred_action_seq
|
| 868 |
+
|
| 869 |
+
current_action = self.pred_action_seq[0, self.rollout_step_counter]
|
| 870 |
+
if len(current_action.shape) == 2:
|
| 871 |
+
current_action = einops.rearrange(current_action, 'b d -> b 1 d')
|
| 872 |
+
self.rollout_step_counter += 1
|
| 873 |
+
if self.rollout_step_counter == self.multistep:
|
| 874 |
+
self.rollout_step_counter = 0
|
| 875 |
+
|
| 876 |
+
return current_action
|
| 877 |
+
|
| 878 |
+
def step_real(self, obs, goal):
|
| 879 |
+
"""
|
| 880 |
+
Do one step of inference with the model. THis method handles the action chunking case.
|
| 881 |
+
Our model is trained to predict a sequence of actions.
|
| 882 |
+
We only compute the sequence once every self.multistep steps.
|
| 883 |
+
|
| 884 |
+
Args:
|
| 885 |
+
obs (dict): Observation from environment.
|
| 886 |
+
goal (dict): Goal as visual observation or embedded language instruction.
|
| 887 |
+
|
| 888 |
+
Returns:
|
| 889 |
+
Predicted action.
|
| 890 |
+
"""
|
| 891 |
+
|
| 892 |
+
pred_action_seq = self.eval_forward(obs, goal)
|
| 893 |
+
self.pred_action_seq = pred_action_seq
|
| 894 |
+
|
| 895 |
+
return pred_action_seq
|
| 896 |
+
|
| 897 |
+
def on_train_start(self) -> None:
|
| 898 |
+
|
| 899 |
+
self.model.to(dtype=self.dtype)
|
| 900 |
+
|
| 901 |
+
self.Video_Former.to(dtype=self.dtype)
|
| 902 |
+
self.text_encoder.to(dtype=self.dtype)
|
| 903 |
+
#self.vae.to(dtype=self.dtype)
|
| 904 |
+
self.TVP_encoder.to(dtype=self.dtype)
|
| 905 |
+
|
| 906 |
+
@rank_zero_only
|
| 907 |
+
def on_train_epoch_start(self) -> None:
|
| 908 |
+
logger.info(f"Start training epoch {self.current_epoch}")
|
| 909 |
+
|
| 910 |
+
@rank_zero_only
|
| 911 |
+
def on_train_epoch_end(self, unused: Optional = None) -> None: # type: ignore
|
| 912 |
+
logger.info(f"Finished training epoch {self.current_epoch}")
|
| 913 |
+
|
| 914 |
+
@rank_zero_only
|
| 915 |
+
def on_validation_epoch_end(self) -> None:
|
| 916 |
+
logger.info(f"Finished validation epoch {self.current_epoch}")
|
| 917 |
+
|
| 918 |
+
|
| 919 |
+
def on_validation_epoch_start(self) -> None:
|
| 920 |
+
log_rank_0(f"Start validation epoch {self.current_epoch}")
|
| 921 |
+
|
| 922 |
+
@rank_zero_only
|
| 923 |
+
def on_train_epoch_start(self) -> None:
|
| 924 |
+
logger.info(f"Start training epoch {self.current_epoch}")
|
| 925 |
+
|
| 926 |
+
@rank_zero_only
|
| 927 |
+
def on_train_epoch_end(self, unused: Optional = None) -> None: # type: ignore
|
| 928 |
+
logger.info(f"Finished training epoch {self.current_epoch}")
|
| 929 |
+
|
| 930 |
+
@rank_zero_only
|
| 931 |
+
def on_validation_epoch_end(self) -> None:
|
| 932 |
+
logger.info(f"Finished validation epoch {self.current_epoch}")
|
| 933 |
+
|
| 934 |
+
def on_validation_epoch_start(self) -> None:
|
| 935 |
+
log_rank_0(f"Start validation epoch {self.current_epoch}")
|
| 936 |
+
|
| 937 |
+
|
| 938 |
+
@rank_zero_only
|
| 939 |
+
def log_rank_0(*args, **kwargs):
|
| 940 |
+
# when using ddp, only log with rank 0 process
|
| 941 |
+
logger.info(*args, **kwargs)
|
code/policy_models/__init__.py
ADDED
|
File without changes
|
code/policy_models/__pycache__/VPP_policy.cpython-310.pyc
ADDED
|
Binary file (19.6 kB). View file
|
|
|
code/policy_models/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (147 Bytes). View file
|
|
|
code/policy_models/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (145 Bytes). View file
|
|
|
code/policy_models/callbacks/__init__.py
ADDED
|
File without changes
|
code/policy_models/callbacks/ema.py
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import os.path
|
| 15 |
+
import warnings
|
| 16 |
+
from typing import Any, Dict, List, Optional
|
| 17 |
+
import logging
|
| 18 |
+
|
| 19 |
+
import pytorch_lightning as pl
|
| 20 |
+
import torch
|
| 21 |
+
from pytorch_lightning import Callback
|
| 22 |
+
from pytorch_lightning.utilities import rank_zero_warn
|
| 23 |
+
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
| 24 |
+
from pytorch_lightning.utilities.types import STEP_OUTPUT
|
| 25 |
+
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
try:
|
| 29 |
+
import amp_C
|
| 30 |
+
|
| 31 |
+
apex_available = True
|
| 32 |
+
except Exception:
|
| 33 |
+
apex_available = False
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class EMA(Callback):
|
| 37 |
+
"""
|
| 38 |
+
Implements Exponential Moving Averaging (EMA).
|
| 39 |
+
When training a model, this callback will maintain moving averages of the trained parameters.
|
| 40 |
+
When evaluating, we use the moving averages copy of the trained parameters.
|
| 41 |
+
When saving, we save an additional set of parameters with the prefix `ema`.
|
| 42 |
+
Args:
|
| 43 |
+
decay: The exponential decay used when calculating the moving average. Has to be between 0-1.
|
| 44 |
+
apply_ema_every_n_steps: Apply EMA every n global steps.
|
| 45 |
+
start_step: Start applying EMA from ``start_step`` global step onwards.
|
| 46 |
+
evaluate_ema_weights_instead: Validate the EMA weights instead of the original weights.
|
| 47 |
+
Note this means that when saving the model, the validation metrics are calculated with the EMA weights.
|
| 48 |
+
save_ema_weights_in_callback_state: Enable saving ema weights in callback state.
|
| 49 |
+
This is not required when using NeMo as the experiment manager handles saving weights.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def __init__(
|
| 53 |
+
self,
|
| 54 |
+
decay: float,
|
| 55 |
+
apply_ema_every_n_steps: int = 1,
|
| 56 |
+
start_step: int = 0,
|
| 57 |
+
save_ema_weights_in_callback_state: bool = False,
|
| 58 |
+
evaluate_ema_weights_instead: bool = False,
|
| 59 |
+
inv_gamma: float = 1.0,
|
| 60 |
+
power: float = 2 / 3,
|
| 61 |
+
min_value: float = 0.0,
|
| 62 |
+
max_value: float = 0.9999,
|
| 63 |
+
):
|
| 64 |
+
if not apex_available:
|
| 65 |
+
rank_zero_warn(
|
| 66 |
+
"EMA has better performance when Apex is installed: https://github.com/NVIDIA/apex#installation."
|
| 67 |
+
)
|
| 68 |
+
if not (0 <= decay <= 1):
|
| 69 |
+
raise MisconfigurationException("EMA decay value must be between 0 and 1")
|
| 70 |
+
self._ema_model_weights: Optional[List[torch.Tensor]] = None
|
| 71 |
+
self._overflow_buf: Optional[torch.Tensor] = None
|
| 72 |
+
self._cur_step: Optional[int] = None
|
| 73 |
+
self._weights_buffer: Optional[List[torch.Tensor]] = None
|
| 74 |
+
self.apply_ema_every_n_steps = apply_ema_every_n_steps
|
| 75 |
+
self.start_step = start_step
|
| 76 |
+
self.save_ema_weights_in_callback_state = save_ema_weights_in_callback_state
|
| 77 |
+
self.evaluate_ema_weights_instead = evaluate_ema_weights_instead
|
| 78 |
+
self.decay = decay
|
| 79 |
+
self.inv_gamma = inv_gamma
|
| 80 |
+
self.power = power
|
| 81 |
+
self.min_value = min_value
|
| 82 |
+
self.max_value = max_value
|
| 83 |
+
|
| 84 |
+
def get_decay(self, optimization_step):
|
| 85 |
+
step = max(0, optimization_step - self.start_step - 1)
|
| 86 |
+
value = 1 - (1 + step / self.inv_gamma) ** -self.power
|
| 87 |
+
|
| 88 |
+
# clamp the value between min_value and max_value
|
| 89 |
+
value = max(min(value, self.max_value), self.min_value)
|
| 90 |
+
|
| 91 |
+
return value
|
| 92 |
+
|
| 93 |
+
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
|
| 94 |
+
logging.info('Creating EMA weights copy.')
|
| 95 |
+
if self._ema_model_weights is None:
|
| 96 |
+
self._ema_model_weights = [p.detach().clone() for p in pl_module.state_dict().values()]
|
| 97 |
+
# ensure that all the weights are on the correct device
|
| 98 |
+
self._ema_model_weights = [p.to(pl_module.device) for p in self._ema_model_weights]
|
| 99 |
+
self._overflow_buf = torch.IntTensor([0]).to(pl_module.device)
|
| 100 |
+
|
| 101 |
+
def ema(self, pl_module: "pl.LightningModule") -> None:
|
| 102 |
+
if apex_available and pl_module.device.type == "cuda":
|
| 103 |
+
return self.apply_multi_tensor_ema(pl_module)
|
| 104 |
+
return self.apply_ema(pl_module)
|
| 105 |
+
|
| 106 |
+
def apply_multi_tensor_ema(self, pl_module: "pl.LightningModule") -> None:
|
| 107 |
+
model_weights = list(pl_module.state_dict().values())
|
| 108 |
+
amp_C.multi_tensor_axpby(
|
| 109 |
+
65536, # todo (sean): chunk size, should we expose?
|
| 110 |
+
self._overflow_buf,
|
| 111 |
+
[self._ema_model_weights, model_weights, self._ema_model_weights],
|
| 112 |
+
self.decay,
|
| 113 |
+
1 - self.decay,
|
| 114 |
+
-1,
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
def apply_ema(self, pl_module: "pl.LightningModule") -> None:
|
| 118 |
+
decay = self.get_decay(self._cur_step)
|
| 119 |
+
for orig_weight, ema_weight in zip(list(pl_module.state_dict().values()), self._ema_model_weights):
|
| 120 |
+
if orig_weight.dtype == torch.uint8 or orig_weight.dtype == torch.int64:
|
| 121 |
+
ema_weight.data = orig_weight.data.clone()
|
| 122 |
+
else:
|
| 123 |
+
diff = ema_weight.data - orig_weight.data
|
| 124 |
+
diff.mul_(1.0 - decay)
|
| 125 |
+
ema_weight.sub_(diff)
|
| 126 |
+
self.log("train/ema_rate", decay, on_step=True, on_epoch=False, prog_bar=True)
|
| 127 |
+
|
| 128 |
+
def should_apply_ema(self, step: int) -> bool:
|
| 129 |
+
return step != self._cur_step and step >= self.start_step and step % self.apply_ema_every_n_steps == 0
|
| 130 |
+
|
| 131 |
+
def on_train_batch_end(
|
| 132 |
+
self,
|
| 133 |
+
trainer: "pl.Trainer",
|
| 134 |
+
pl_module: "pl.LightningModule",
|
| 135 |
+
outputs: STEP_OUTPUT,
|
| 136 |
+
batch: Any,
|
| 137 |
+
batch_idx: int,
|
| 138 |
+
# dataloader_idx: int,
|
| 139 |
+
) -> None:
|
| 140 |
+
if self.should_apply_ema(trainer.global_step):
|
| 141 |
+
self._cur_step = trainer.global_step
|
| 142 |
+
self.ema(pl_module)
|
| 143 |
+
|
| 144 |
+
def state_dict(self) -> Dict[str, Any]:
|
| 145 |
+
if self.save_ema_weights_in_callback_state:
|
| 146 |
+
return dict(cur_step=self._cur_step, ema_weights=self._ema_model_weights)
|
| 147 |
+
return dict(cur_step=self._cur_step)
|
| 148 |
+
|
| 149 |
+
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
|
| 150 |
+
self._cur_step = state_dict['cur_step']
|
| 151 |
+
# when loading using NeMo, ema weights will be loaded by the experiment manager separately.
|
| 152 |
+
if self._ema_model_weights is None:
|
| 153 |
+
self._ema_model_weights = state_dict.get('ema_weights')
|
| 154 |
+
|
| 155 |
+
def on_load_checkpoint(
|
| 156 |
+
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any]
|
| 157 |
+
) -> None:
|
| 158 |
+
checkpoint_callback = trainer.checkpoint_callback
|
| 159 |
+
|
| 160 |
+
if trainer.ckpt_path and checkpoint_callback is not None and 'NeMo' in type(checkpoint_callback).__name__:
|
| 161 |
+
ext = checkpoint_callback.FILE_EXTENSION
|
| 162 |
+
if trainer.ckpt_path.endswith(f'-EMA{ext}'):
|
| 163 |
+
logging.info(
|
| 164 |
+
"loading EMA based weights. "
|
| 165 |
+
"The callback will treat the loaded EMA weights as the main weights"
|
| 166 |
+
" and create a new EMA copy when training."
|
| 167 |
+
)
|
| 168 |
+
return
|
| 169 |
+
ema_path = trainer.ckpt_path.replace(ext, f'-EMA{ext}')
|
| 170 |
+
if os.path.exists(ema_path):
|
| 171 |
+
ema_state_dict = torch.load(ema_path, map_location=torch.device('cpu'))
|
| 172 |
+
self._ema_model_weights = ema_state_dict['state_dict'].values()
|
| 173 |
+
del ema_state_dict
|
| 174 |
+
logging.info("EMA weights have been loaded successfully. Continuing training with saved EMA weights.")
|
| 175 |
+
else:
|
| 176 |
+
warnings.warn(
|
| 177 |
+
"we were unable to find the associated EMA weights when re-loading, "
|
| 178 |
+
"training will start with new EMA weights.",
|
| 179 |
+
UserWarning,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
def replace_model_weights(self, pl_module: "pl.LightningModule") -> None:
|
| 183 |
+
self._weights_buffer = [p.detach().clone().to('cpu') for p in pl_module.state_dict().values()]
|
| 184 |
+
new_state_dict = {k: v for k, v in zip(pl_module.state_dict().keys(), self._ema_model_weights)}
|
| 185 |
+
pl_module.load_state_dict(new_state_dict)
|
| 186 |
+
|
| 187 |
+
def restore_original_weights(self, pl_module: "pl.LightningModule") -> None:
|
| 188 |
+
state_dict = pl_module.state_dict()
|
| 189 |
+
new_state_dict = {k: v for k, v in zip(state_dict.keys(), self._weights_buffer)}
|
| 190 |
+
pl_module.load_state_dict(new_state_dict)
|
| 191 |
+
del self._weights_buffer
|
| 192 |
+
|
| 193 |
+
@property
|
| 194 |
+
def ema_initialized(self) -> bool:
|
| 195 |
+
return self._ema_model_weights is not None
|
| 196 |
+
|
| 197 |
+
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
|
| 198 |
+
if self.ema_initialized and self.evaluate_ema_weights_instead:
|
| 199 |
+
self.replace_model_weights(pl_module)
|
| 200 |
+
|
| 201 |
+
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
|
| 202 |
+
if self.ema_initialized and self.evaluate_ema_weights_instead:
|
| 203 |
+
self.restore_original_weights(pl_module)
|
| 204 |
+
|
| 205 |
+
def on_test_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
|
| 206 |
+
if self.ema_initialized and self.evaluate_ema_weights_instead:
|
| 207 |
+
self.replace_model_weights(pl_module)
|
| 208 |
+
|
| 209 |
+
def on_test_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
|
| 210 |
+
if self.ema_initialized and self.evaluate_ema_weights_instead:
|
| 211 |
+
self.restore_original_weights(pl_module)
|
code/policy_models/datasets/__pycache__/base_dataset.cpython-310.pyc
ADDED
|
Binary file (9.28 kB). View file
|
|
|
code/policy_models/datasets/__pycache__/disk_dataset.cpython-310.pyc
ADDED
|
Binary file (8.71 kB). View file
|
|
|
code/policy_models/datasets/__pycache__/hulc_data_module.cpython-310.pyc
ADDED
|
Binary file (5.1 kB). View file
|
|
|
code/policy_models/datasets/__pycache__/shm_dataset.cpython-310.pyc
ADDED
|
Binary file (5.1 kB). View file
|
|
|
code/policy_models/datasets/base_dataset.py
ADDED
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
from omegaconf import DictConfig
|
| 7 |
+
import pyhash
|
| 8 |
+
import torch
|
| 9 |
+
from torch.utils.data import Dataset
|
| 10 |
+
|
| 11 |
+
from policy_models.datasets.utils.episode_utils import (
|
| 12 |
+
get_state_info_dict,
|
| 13 |
+
process_actions,
|
| 14 |
+
process_depth,
|
| 15 |
+
process_language,
|
| 16 |
+
process_rgb,
|
| 17 |
+
process_state,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
hasher = pyhash.fnv1_32()
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def get_validation_window_size(idx: int, min_window_size: int, max_window_size: int) -> int:
|
| 25 |
+
"""
|
| 26 |
+
In validation step, use hash function instead of random sampling for consistent window sizes across epochs.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
idx: Sequence index.
|
| 30 |
+
min_window_size: Minimum window size.
|
| 31 |
+
max_window_size: Maximum window size.
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
Window size computed with hash function.
|
| 35 |
+
"""
|
| 36 |
+
window_range = max_window_size - min_window_size + 1
|
| 37 |
+
return min_window_size + hasher(str(idx)) % window_range
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class BaseDataset(Dataset):
|
| 41 |
+
"""
|
| 42 |
+
Abstract dataset base class.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
datasets_dir: Path of folder containing episode files (string must contain 'validation' or 'training').
|
| 46 |
+
obs_space: DictConfig of observation space.
|
| 47 |
+
proprio_state: DictConfig with shape of prioprioceptive state.
|
| 48 |
+
key: 'vis' or 'lang'.
|
| 49 |
+
lang_folder: Name of the subdirectory of the dataset containing the language annotations.
|
| 50 |
+
num_workers: Number of dataloading workers for this dataset.
|
| 51 |
+
transforms: Dict with pytorch data transforms.
|
| 52 |
+
batch_size: Batch size.
|
| 53 |
+
min_window_size: Minimum window length of loaded sequences.
|
| 54 |
+
max_window_size: Maximum window length of loaded sequences.
|
| 55 |
+
pad: If True, repeat last frame such that all sequences have length 'max_window_size'.
|
| 56 |
+
aux_lang_loss_window: How many sliding windows to consider for auxiliary language losses, counted from the end
|
| 57 |
+
of an annotated language episode.
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
def __init__(
|
| 61 |
+
self,
|
| 62 |
+
datasets_dir: Path,
|
| 63 |
+
obs_space: DictConfig,
|
| 64 |
+
proprio_state: DictConfig,
|
| 65 |
+
key: str,
|
| 66 |
+
lang_folder: str,
|
| 67 |
+
num_workers: int,
|
| 68 |
+
transforms: Dict = {},
|
| 69 |
+
batch_size: int = 32,
|
| 70 |
+
min_window_size: int = 16,
|
| 71 |
+
max_window_size: int = 32,
|
| 72 |
+
pad: bool = True,
|
| 73 |
+
aux_lang_loss_window: int = 1,
|
| 74 |
+
window_sampling_strategy: str = 'random',
|
| 75 |
+
geometric_p_value: float = 0.1,
|
| 76 |
+
):
|
| 77 |
+
self.observation_space = obs_space
|
| 78 |
+
self.proprio_state = proprio_state
|
| 79 |
+
self.transforms = transforms
|
| 80 |
+
self.with_lang = key == "lang"
|
| 81 |
+
self.relative_actions = "rel_actions" in self.observation_space["actions"]
|
| 82 |
+
assert window_sampling_strategy in ('random', 'geometric')
|
| 83 |
+
self.window_sampling_strategy = window_sampling_strategy
|
| 84 |
+
self.geometric_p_value = geometric_p_value # only needed for geomtric sampling
|
| 85 |
+
self.pad = pad
|
| 86 |
+
self.batch_size = batch_size
|
| 87 |
+
self.num_workers = num_workers
|
| 88 |
+
self.min_window_size = min_window_size
|
| 89 |
+
self.max_window_size = max_window_size
|
| 90 |
+
self.abs_datasets_dir = datasets_dir
|
| 91 |
+
self.lang_folder = lang_folder # if self.with_lang else None
|
| 92 |
+
self.aux_lang_loss_window = aux_lang_loss_window
|
| 93 |
+
assert "validation" in self.abs_datasets_dir.as_posix() or "training" in self.abs_datasets_dir.as_posix()
|
| 94 |
+
self.validation = "validation" in self.abs_datasets_dir.as_posix()
|
| 95 |
+
assert self.abs_datasets_dir.is_dir()
|
| 96 |
+
logger.info(f"loading dataset at {self.abs_datasets_dir}")
|
| 97 |
+
logger.info("finished loading dataset")
|
| 98 |
+
|
| 99 |
+
def __getitem__(self, idx: Union[int, Tuple[int, int]]) -> Dict:
|
| 100 |
+
"""
|
| 101 |
+
Get sequence of dataset.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
idx: Index of the sequence.
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
Loaded sequence.
|
| 108 |
+
"""
|
| 109 |
+
if isinstance(idx, int):
|
| 110 |
+
# When max_ws_size and min_ws_size are equal, avoid unnecessary padding
|
| 111 |
+
# acts like Constant dataset. Currently, used for language data
|
| 112 |
+
if self.min_window_size == self.max_window_size:
|
| 113 |
+
window_size = self.max_window_size
|
| 114 |
+
elif self.min_window_size < self.max_window_size:
|
| 115 |
+
window_size = self._get_window_size(idx)
|
| 116 |
+
else:
|
| 117 |
+
logger.error(f"min_window_size {self.min_window_size} > max_window_size {self.max_window_size}")
|
| 118 |
+
raise ValueError
|
| 119 |
+
else:
|
| 120 |
+
idx, window_size = idx
|
| 121 |
+
sequence = self._get_sequences(idx, window_size)
|
| 122 |
+
if self.pad:
|
| 123 |
+
pad_size = self._get_pad_size(sequence)
|
| 124 |
+
sequence = self._pad_sequence(sequence, pad_size)
|
| 125 |
+
return sequence
|
| 126 |
+
|
| 127 |
+
def _get_sequences(self, idx: int, window_size: int) -> Dict:
|
| 128 |
+
"""
|
| 129 |
+
Load sequence of length window_size.
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
idx: Index of starting frame.
|
| 133 |
+
window_size: Length of sampled episode.
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
dict: Dictionary of tensors of loaded sequence with different input modalities and actions.
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
episode = self._load_episode(idx, window_size)
|
| 140 |
+
|
| 141 |
+
seq_state_obs = process_state(episode, self.observation_space, self.transforms, self.proprio_state)
|
| 142 |
+
seq_rgb_obs = process_rgb(episode, self.observation_space, self.transforms)
|
| 143 |
+
#seq_depth_obs = process_depth(episode, self.observation_space, self.transforms)
|
| 144 |
+
seq_acts = process_actions(episode, self.observation_space, self.transforms)
|
| 145 |
+
info = get_state_info_dict(episode)
|
| 146 |
+
seq_lang = process_language(episode, self.transforms, self.with_lang)
|
| 147 |
+
info = self._add_language_info(info, idx)
|
| 148 |
+
seq_dict = {**seq_state_obs, **seq_rgb_obs, **seq_acts, **info, **seq_lang} # type:ignore
|
| 149 |
+
seq_dict["idx"] = idx # type:ignore
|
| 150 |
+
return seq_dict
|
| 151 |
+
|
| 152 |
+
def _load_episode(self, idx: int, window_size: int) -> Dict[str, np.ndarray]:
|
| 153 |
+
raise NotImplementedError
|
| 154 |
+
|
| 155 |
+
def _get_window_size(self, idx: int) -> int:
|
| 156 |
+
"""
|
| 157 |
+
Sample a window size taking into account the episode limits.
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
idx: Index of the sequence to load.
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
Window size.
|
| 164 |
+
"""
|
| 165 |
+
window_diff = self.max_window_size - self.min_window_size
|
| 166 |
+
if len(self.episode_lookup) <= idx + window_diff:
|
| 167 |
+
# last episode
|
| 168 |
+
max_window = self.min_window_size + len(self.episode_lookup) - idx - 1
|
| 169 |
+
elif self.episode_lookup[idx + window_diff] != self.episode_lookup[idx] + window_diff:
|
| 170 |
+
# less than max_episode steps until next episode
|
| 171 |
+
steps_to_next_episode = int(
|
| 172 |
+
np.nonzero(
|
| 173 |
+
self.episode_lookup[idx : idx + window_diff + 1]
|
| 174 |
+
- (self.episode_lookup[idx] + np.arange(window_diff + 1))
|
| 175 |
+
)[0][0]
|
| 176 |
+
)
|
| 177 |
+
max_window = min(self.max_window_size, (self.min_window_size + steps_to_next_episode - 1))
|
| 178 |
+
else:
|
| 179 |
+
max_window = self.max_window_size
|
| 180 |
+
|
| 181 |
+
if self.validation:
|
| 182 |
+
# in validation step, repeat the window sizes for each epoch.
|
| 183 |
+
return get_validation_window_size(idx, self.min_window_size, max_window)
|
| 184 |
+
else:
|
| 185 |
+
if self.window_sampling_strategy == 'geometric':
|
| 186 |
+
p = self.geometric_p_value # Choose a suitable value for p
|
| 187 |
+
while True:
|
| 188 |
+
sampled_window_size = 1 + np.random.geometric(p)
|
| 189 |
+
if self.min_window_size <= sampled_window_size <= max_window:
|
| 190 |
+
return sampled_window_size
|
| 191 |
+
else:
|
| 192 |
+
return np.random.randint(self.min_window_size, max_window + 1)
|
| 193 |
+
|
| 194 |
+
def __len__(self) -> int:
|
| 195 |
+
"""
|
| 196 |
+
Returns:
|
| 197 |
+
Size of the dataset.
|
| 198 |
+
"""
|
| 199 |
+
return len(self.episode_lookup)
|
| 200 |
+
|
| 201 |
+
def _get_pad_size(self, sequence: Dict) -> int:
|
| 202 |
+
"""
|
| 203 |
+
Determine how many frames to append to end of the sequence
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
sequence: Loaded sequence.
|
| 207 |
+
|
| 208 |
+
Returns:
|
| 209 |
+
Number of frames to pad.
|
| 210 |
+
"""
|
| 211 |
+
return self.max_window_size - len(sequence["actions"])
|
| 212 |
+
|
| 213 |
+
def _pad_sequence(self, seq: Dict, pad_size: int) -> Dict:
|
| 214 |
+
"""
|
| 215 |
+
Pad a sequence by repeating the last frame.
|
| 216 |
+
|
| 217 |
+
Args:
|
| 218 |
+
seq: Sequence to pad.
|
| 219 |
+
pad_size: Number of frames to pad.
|
| 220 |
+
|
| 221 |
+
Returns:
|
| 222 |
+
Padded sequence.
|
| 223 |
+
"""
|
| 224 |
+
seq.update({"robot_obs": self._pad_with_repetition(seq["robot_obs"], pad_size)})
|
| 225 |
+
seq.update({"rgb_obs": {k: self._pad_with_repetition(v, pad_size) for k, v in seq["rgb_obs"].items()}})
|
| 226 |
+
seq.update({"depth_obs": {k: self._pad_with_repetition(v, pad_size) for k, v in seq["depth_obs"].items()}})
|
| 227 |
+
# todo: find better way of distinguishing rk and play action spaces
|
| 228 |
+
if not self.relative_actions:
|
| 229 |
+
# repeat action for world coordinates action space
|
| 230 |
+
seq.update({"actions": self._pad_with_repetition(seq["actions"], pad_size)})
|
| 231 |
+
else:
|
| 232 |
+
# for relative actions zero pad all but the last action dims and repeat last action dim (gripper action)
|
| 233 |
+
seq_acts = torch.cat(
|
| 234 |
+
[
|
| 235 |
+
self._pad_with_zeros(seq["actions"][..., :-1], pad_size),
|
| 236 |
+
self._pad_with_repetition(seq["actions"][..., -1:], pad_size),
|
| 237 |
+
],
|
| 238 |
+
dim=-1,
|
| 239 |
+
)
|
| 240 |
+
seq.update({"actions": seq_acts})
|
| 241 |
+
seq.update({"state_info": {k: self._pad_with_repetition(v, pad_size) for k, v in seq["state_info"].items()}})
|
| 242 |
+
return seq
|
| 243 |
+
|
| 244 |
+
@staticmethod
|
| 245 |
+
def _pad_with_repetition(input_tensor: torch.Tensor, pad_size: int) -> torch.Tensor:
|
| 246 |
+
"""
|
| 247 |
+
Pad a sequence Tensor by repeating last element pad_size times.
|
| 248 |
+
|
| 249 |
+
Args:
|
| 250 |
+
input_tensor: Sequence to pad.
|
| 251 |
+
pad_size: Number of frames to pad.
|
| 252 |
+
|
| 253 |
+
Returns:
|
| 254 |
+
Padded Tensor.
|
| 255 |
+
"""
|
| 256 |
+
last_repeated = torch.repeat_interleave(torch.unsqueeze(input_tensor[-1], dim=0), repeats=pad_size, dim=0)
|
| 257 |
+
padded = torch.vstack((input_tensor, last_repeated))
|
| 258 |
+
return padded
|
| 259 |
+
|
| 260 |
+
@staticmethod
|
| 261 |
+
def _pad_with_zeros(input_tensor: torch.Tensor, pad_size: int) -> torch.Tensor:
|
| 262 |
+
"""
|
| 263 |
+
Pad a Tensor with zeros.
|
| 264 |
+
|
| 265 |
+
Args:
|
| 266 |
+
input_tensor: Sequence to pad.
|
| 267 |
+
pad_size: Number of frames to pad.
|
| 268 |
+
|
| 269 |
+
Returns:
|
| 270 |
+
Padded Tensor.
|
| 271 |
+
"""
|
| 272 |
+
zeros_repeated = torch.repeat_interleave(
|
| 273 |
+
torch.unsqueeze(torch.zeros(input_tensor.shape[-1]), dim=0), repeats=pad_size, dim=0
|
| 274 |
+
)
|
| 275 |
+
padded = torch.vstack((input_tensor, zeros_repeated))
|
| 276 |
+
return padded
|
| 277 |
+
|
| 278 |
+
def _add_language_info(self, info: Dict, idx: int) -> Dict:
|
| 279 |
+
"""
|
| 280 |
+
If dataset contains language, add info to determine if this sequence will be used for the auxiliary losses.
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
info: Info dictionary.
|
| 284 |
+
idx: Sequence index.
|
| 285 |
+
|
| 286 |
+
Returns:
|
| 287 |
+
Info dictionary with updated information.
|
| 288 |
+
"""
|
| 289 |
+
if not self.with_lang:
|
| 290 |
+
return info
|
| 291 |
+
use_for_aux_lang_loss = (
|
| 292 |
+
idx + self.aux_lang_loss_window >= len(self.lang_lookup)
|
| 293 |
+
or self.lang_lookup[idx] < self.lang_lookup[idx + self.aux_lang_loss_window]
|
| 294 |
+
)
|
| 295 |
+
info["use_for_aux_lang_loss"] = use_for_aux_lang_loss
|
| 296 |
+
return info
|
code/policy_models/datasets/disk_dataset.py
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path
|
| 2 |
+
from itertools import chain
|
| 3 |
+
import logging
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import pickle
|
| 6 |
+
from typing import Any, Dict, List, Tuple
|
| 7 |
+
import random
|
| 8 |
+
|
| 9 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 10 |
+
import concurrent.futures
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
from policy_models.datasets.base_dataset import BaseDataset
|
| 14 |
+
from policy_models.datasets.utils.episode_utils import lookup_naming_pattern
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def load_pkl(filename: Path) -> Dict[str, np.ndarray]:
|
| 20 |
+
with open(filename, "rb") as f:
|
| 21 |
+
return pickle.load(f)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def load_npz(filename: Path) -> Dict[str, np.ndarray]:
|
| 25 |
+
return np.load(filename.as_posix())
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class DiskDataset(BaseDataset):
|
| 29 |
+
"""
|
| 30 |
+
Dataset that loads episodes as individual files from disk.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
skip_frames: Skip this amount of windows for language dataset.
|
| 34 |
+
save_format: File format in datasets_dir (pkl or npz).
|
| 35 |
+
pretrain: Set to True when pretraining.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def __init__(
|
| 39 |
+
self,
|
| 40 |
+
*args: Any,
|
| 41 |
+
skip_frames: int = 1,
|
| 42 |
+
save_format: str = "npz",
|
| 43 |
+
pretrain: bool = False,
|
| 44 |
+
**kwargs: Any,
|
| 45 |
+
):
|
| 46 |
+
super().__init__(*args, **kwargs)
|
| 47 |
+
self.save_format = save_format
|
| 48 |
+
if self.save_format == "pkl":
|
| 49 |
+
self.load_file = load_pkl
|
| 50 |
+
elif self.save_format == "npz":
|
| 51 |
+
self.load_file = load_npz
|
| 52 |
+
else:
|
| 53 |
+
raise NotImplementedError
|
| 54 |
+
self.pretrain = pretrain
|
| 55 |
+
self.skip_frames = skip_frames
|
| 56 |
+
|
| 57 |
+
if self.with_lang:
|
| 58 |
+
self.episode_lookup, self.lang_lookup, self.lang_ann, self.lang_text = self._build_file_indices_lang(self.abs_datasets_dir)
|
| 59 |
+
else:
|
| 60 |
+
self.episode_lookup = self._build_file_indices(self.abs_datasets_dir)
|
| 61 |
+
|
| 62 |
+
self.naming_pattern, self.n_digits = lookup_naming_pattern(self.abs_datasets_dir, self.save_format)
|
| 63 |
+
|
| 64 |
+
def _get_episode_name(self, file_idx: int) -> Path:
|
| 65 |
+
"""
|
| 66 |
+
Convert file idx to file path.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
file_idx: index of starting frame.
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
Path to file.
|
| 73 |
+
"""
|
| 74 |
+
return Path(f"{self.naming_pattern[0]}{file_idx:0{self.n_digits}d}{self.naming_pattern[1]}")
|
| 75 |
+
|
| 76 |
+
def _load_episode(self, idx: int, window_size: int) -> Dict[str, np.ndarray]:
|
| 77 |
+
"""
|
| 78 |
+
Load consecutive frames saved as individual files on disk and combine to episode dict.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
idx: Index of first frame.
|
| 82 |
+
window_size: Length of sampled episode.
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
episode: Dict of numpy arrays containing the episode where keys are the names of modalities.
|
| 86 |
+
"""
|
| 87 |
+
start_idx = self.episode_lookup[idx]
|
| 88 |
+
end_idx = start_idx + window_size
|
| 89 |
+
keys = list(chain(*self.observation_space.values()))
|
| 90 |
+
keys.remove("language")
|
| 91 |
+
keys.append("scene_obs")
|
| 92 |
+
episodes = [self.load_file(self._get_episode_name(file_idx)) for file_idx in range(start_idx, end_idx)]
|
| 93 |
+
episode = {key: np.stack([ep[key] for ep in episodes]) for key in keys}
|
| 94 |
+
if self.with_lang:
|
| 95 |
+
episode["language"] = self.lang_ann[self.lang_lookup[idx]][0] # TODO check [0]
|
| 96 |
+
return episode
|
| 97 |
+
|
| 98 |
+
def _build_file_indices_lang(self, abs_datasets_dir: Path) -> Tuple[np.ndarray, List, np.ndarray]:
|
| 99 |
+
"""
|
| 100 |
+
This method builds the mapping from index to file_name used for loading the episodes of the language dataset.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
abs_datasets_dir: Absolute path of the directory containing the dataset.
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
episode_lookup: Mapping from training example index to episode (file) index.
|
| 107 |
+
lang_lookup: Mapping from training example to index of language instruction.
|
| 108 |
+
lang_ann: Language embeddings.
|
| 109 |
+
"""
|
| 110 |
+
assert abs_datasets_dir.is_dir()
|
| 111 |
+
|
| 112 |
+
episode_lookup = []
|
| 113 |
+
|
| 114 |
+
try:
|
| 115 |
+
print("trying to load lang data from: ", abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy")
|
| 116 |
+
lang_data = np.load(abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy", allow_pickle=True).item()
|
| 117 |
+
except Exception:
|
| 118 |
+
print("Exception, trying to load lang data from: ", abs_datasets_dir / "auto_lang_ann.npy")
|
| 119 |
+
lang_data = np.load(abs_datasets_dir / "auto_lang_ann.npy", allow_pickle=True).item()
|
| 120 |
+
|
| 121 |
+
ep_start_end_ids = lang_data["info"]["indx"] # each of them are 64
|
| 122 |
+
lang_ann = lang_data["language"]["emb"] # length total number of annotations
|
| 123 |
+
lang_text = lang_data["language"]["ann"] # length total number of annotations
|
| 124 |
+
lang_lookup = []
|
| 125 |
+
for i, (start_idx, end_idx) in enumerate(ep_start_end_ids):
|
| 126 |
+
if self.pretrain:
|
| 127 |
+
start_idx = max(start_idx, end_idx + 1 - self.min_window_size - self.aux_lang_loss_window)
|
| 128 |
+
assert end_idx >= self.max_window_size
|
| 129 |
+
cnt = 0
|
| 130 |
+
for idx in range(start_idx, end_idx + 1 - self.min_window_size):
|
| 131 |
+
if cnt % self.skip_frames == 0:
|
| 132 |
+
lang_lookup.append(i)
|
| 133 |
+
episode_lookup.append(idx)
|
| 134 |
+
cnt += 1
|
| 135 |
+
|
| 136 |
+
return np.array(episode_lookup), lang_lookup, lang_ann, lang_text
|
| 137 |
+
|
| 138 |
+
def _build_file_indices(self, abs_datasets_dir: Path) -> np.ndarray:
|
| 139 |
+
"""
|
| 140 |
+
This method builds the mapping from index to file_name used for loading the episodes of the non language
|
| 141 |
+
dataset.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
abs_datasets_dir: Absolute path of the directory containing the dataset.
|
| 145 |
+
|
| 146 |
+
Returns:
|
| 147 |
+
episode_lookup: Mapping from training example index to episode (file) index.
|
| 148 |
+
"""
|
| 149 |
+
assert abs_datasets_dir.is_dir()
|
| 150 |
+
|
| 151 |
+
episode_lookup = []
|
| 152 |
+
ep_start_end_ids = np.load(abs_datasets_dir / "ep_start_end_ids.npy")
|
| 153 |
+
logger.info(f'Found "ep_start_end_ids.npy" with {len(ep_start_end_ids)} episodes.')
|
| 154 |
+
for start_idx, end_idx in ep_start_end_ids:
|
| 155 |
+
assert end_idx > self.max_window_size
|
| 156 |
+
for idx in range(start_idx, end_idx + 1 - self.min_window_size):
|
| 157 |
+
episode_lookup.append(idx)
|
| 158 |
+
return np.array(episode_lookup)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
class ExtendedDiskDataset(DiskDataset):
|
| 162 |
+
def __init__(
|
| 163 |
+
self,
|
| 164 |
+
*args: Any,
|
| 165 |
+
obs_seq_len: int,
|
| 166 |
+
action_seq_len: int,
|
| 167 |
+
future_range: int,
|
| 168 |
+
img_gen_frame_diff: int = 3,
|
| 169 |
+
**kwargs: Any,
|
| 170 |
+
):
|
| 171 |
+
super().__init__(*args, **kwargs)
|
| 172 |
+
self.obs_seq_len = obs_seq_len
|
| 173 |
+
self.action_seq_len = action_seq_len
|
| 174 |
+
self.future_range = future_range # Number of steps into the future to sample goals
|
| 175 |
+
self.ep_start_end_ids = np.load(self.abs_datasets_dir / "ep_start_end_ids.npy") # Load sequence boundaries
|
| 176 |
+
self.img_gen_frame_diff = img_gen_frame_diff
|
| 177 |
+
self.random_frame_diff = False if img_gen_frame_diff > -1 else True
|
| 178 |
+
# self.min_window_size = self.action_seq_len
|
| 179 |
+
# self.max_window_size = self.action_seq_len + self.future_range
|
| 180 |
+
|
| 181 |
+
def find_sequence_boundaries(self, idx: int) -> Tuple[int, int]:
|
| 182 |
+
for start_idx, end_idx in self.ep_start_end_ids:
|
| 183 |
+
if start_idx <= idx < end_idx:
|
| 184 |
+
return start_idx, end_idx
|
| 185 |
+
raise ValueError(f"Index {idx} does not belong to any sequence.")
|
| 186 |
+
|
| 187 |
+
def _load_episode(self, idx: int, window_size: int) -> Dict[str, np.ndarray]:
|
| 188 |
+
"""
|
| 189 |
+
Load consecutive frames saved as individual files on disk and combine to episode dict.
|
| 190 |
+
|
| 191 |
+
Args:
|
| 192 |
+
idx: Index of first frame.
|
| 193 |
+
window_size: Length of sampled episode.
|
| 194 |
+
|
| 195 |
+
Returns:
|
| 196 |
+
episode: Dict of numpy arrays containing the episode where keys are the names of modalities.
|
| 197 |
+
"""
|
| 198 |
+
start_idx = self.episode_lookup[idx]
|
| 199 |
+
end_idx = start_idx + self.action_seq_len + self.obs_seq_len-1
|
| 200 |
+
keys = list(chain(*self.observation_space.values()))
|
| 201 |
+
keys.remove("language")
|
| 202 |
+
keys.append("scene_obs")
|
| 203 |
+
episodes = [self.load_file(self._get_episode_name(file_idx)) for file_idx in range(start_idx, end_idx)]
|
| 204 |
+
|
| 205 |
+
episode = {}
|
| 206 |
+
for key in keys:
|
| 207 |
+
if 'gen' in key:
|
| 208 |
+
continue
|
| 209 |
+
stacked_data = np.stack([ep[key] for ep in episodes])
|
| 210 |
+
if key == "rel_actions" or key == 'actions':
|
| 211 |
+
episode[key] = stacked_data[(self.obs_seq_len-1):((self.obs_seq_len-1) + self.action_seq_len), :]
|
| 212 |
+
else:
|
| 213 |
+
episode[key] = stacked_data[:self.obs_seq_len, :]
|
| 214 |
+
|
| 215 |
+
if self.with_lang:
|
| 216 |
+
episode["language"] = self.lang_ann[self.lang_lookup[idx]][0] # TODO check [0]
|
| 217 |
+
episode["language_text"] = self.lang_text[self.lang_lookup[idx]] #[0] # TODO check [0]
|
| 218 |
+
|
| 219 |
+
# get the random future state as goal
|
| 220 |
+
# goal_idx = end_idx + window_size
|
| 221 |
+
# # print(start_idx, end_idx, goal_idx)
|
| 222 |
+
# eps_start_idx, eps_end_idx = self.find_sequence_boundaries(end_idx)
|
| 223 |
+
#
|
| 224 |
+
# # Check if future goal can be sampled
|
| 225 |
+
#
|
| 226 |
+
# if eps_end_idx < goal_idx:
|
| 227 |
+
# goal_idx = eps_end_idx
|
| 228 |
+
|
| 229 |
+
# goal_episodes = self.load_file(self._get_episode_name(goal_idx))
|
| 230 |
+
# goal_episode = {}
|
| 231 |
+
# for key in keys:
|
| 232 |
+
# if 'gen' in key:
|
| 233 |
+
# continue
|
| 234 |
+
# goal_stacked_data = np.stack([goal_episodes[key]])
|
| 235 |
+
# if key == "rel_actions" or key == 'actions':
|
| 236 |
+
# pass
|
| 237 |
+
# else:
|
| 238 |
+
# goal_episode[key] = goal_stacked_data[:self.obs_seq_len, :]
|
| 239 |
+
# # store for merging
|
| 240 |
+
#
|
| 241 |
+
# episode = self.merge_episodes(episode, goal_episode)
|
| 242 |
+
return episode
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def merge_episodes(self, episode1: Dict[str, np.ndarray], episode2: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
|
| 246 |
+
merged_episode = {}
|
| 247 |
+
all_keys = set(episode1.keys()).union(set(episode2.keys()))
|
| 248 |
+
for key in all_keys:
|
| 249 |
+
if key in episode1 and key in episode2:
|
| 250 |
+
# Merge logic here, for example:
|
| 251 |
+
merged_episode[key] = np.concatenate([episode1[key], episode2[key]], axis=0)
|
| 252 |
+
elif key in episode1:
|
| 253 |
+
merged_episode[key] = episode1[key]
|
| 254 |
+
else:
|
| 255 |
+
merged_episode[key] = episode2[key]
|
| 256 |
+
return merged_episode
|
| 257 |
+
|
| 258 |
+
def _build_file_indices(self, abs_datasets_dir: Path) -> np.ndarray:
|
| 259 |
+
"""
|
| 260 |
+
This method builds the mapping from index to file_name used for loading the episodes of the non language
|
| 261 |
+
dataset.
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
abs_datasets_dir: Absolute path of the directory containing the dataset.
|
| 265 |
+
|
| 266 |
+
Returns:
|
| 267 |
+
episode_lookup: Mapping from training example index to episode (file) index.
|
| 268 |
+
"""
|
| 269 |
+
assert abs_datasets_dir.is_dir()
|
| 270 |
+
|
| 271 |
+
episode_lookup = []
|
| 272 |
+
|
| 273 |
+
ep_start_end_ids = np.load(abs_datasets_dir / "ep_start_end_ids.npy")
|
| 274 |
+
logger.info(f'Found "ep_start_end_ids.npy" with {len(ep_start_end_ids)} episodes.')
|
| 275 |
+
for start_idx, end_idx in ep_start_end_ids:
|
| 276 |
+
assert end_idx > self.max_window_size
|
| 277 |
+
for idx in range(start_idx, end_idx + 1 - self.min_window_size):
|
| 278 |
+
episode_lookup.append(idx)
|
| 279 |
+
return np.array(episode_lookup)
|
| 280 |
+
|
code/policy_models/datasets/hulc_data_module.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from typing import Dict, List
|
| 5 |
+
|
| 6 |
+
import hydra
|
| 7 |
+
import numpy as np
|
| 8 |
+
from omegaconf import DictConfig, OmegaConf
|
| 9 |
+
|
| 10 |
+
from torch.utils.data import DataLoader
|
| 11 |
+
import torchvision
|
| 12 |
+
import pytorch_lightning as pl
|
| 13 |
+
|
| 14 |
+
import policy_models
|
| 15 |
+
from policy_models.datasets.utils.episode_utils import load_dataset_statistics
|
| 16 |
+
from policy_models.datasets.utils.shared_memory_utils import load_shm_lookup, save_shm_lookup, SharedMemoryLoader
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
DEFAULT_TRANSFORM = OmegaConf.create({"train": None, "val": None})
|
| 20 |
+
ONE_EP_DATASET_URL = "http://www.informatik.uni-freiburg.de/~meeso/50steps.tar.xz"
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class HulcDataModule(pl.LightningDataModule):
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
datasets: DictConfig,
|
| 27 |
+
root_data_dir: str = "data",
|
| 28 |
+
num_workers: int = 8,
|
| 29 |
+
transforms: DictConfig = DEFAULT_TRANSFORM,
|
| 30 |
+
shuffle_val: bool = False,
|
| 31 |
+
**kwargs: Dict,
|
| 32 |
+
):
|
| 33 |
+
super().__init__()
|
| 34 |
+
self.datasets_cfg = datasets
|
| 35 |
+
self.train_datasets = None
|
| 36 |
+
self.val_datasets = None
|
| 37 |
+
self.train_sampler = None
|
| 38 |
+
self.val_sampler = None
|
| 39 |
+
self.num_workers = num_workers
|
| 40 |
+
root_data_path = Path(root_data_dir)
|
| 41 |
+
if not root_data_path.is_absolute():
|
| 42 |
+
root_data_path = Path(policy_models.__file__).parent / root_data_path
|
| 43 |
+
self.training_dir = root_data_path / "training"
|
| 44 |
+
self.val_dir = root_data_path / "validation"
|
| 45 |
+
self.shuffle_val = shuffle_val
|
| 46 |
+
self.modalities: List[str] = []
|
| 47 |
+
self.transforms = transforms
|
| 48 |
+
self.use_shm = False
|
| 49 |
+
|
| 50 |
+
#if 'lang_dataset' in self.datasets_cfg:
|
| 51 |
+
# if "shm_dataset" in self.datasets_cfg.lang_dataset._target_:
|
| 52 |
+
# self.use_shm = "shm_dataset" in self.datasets_cfg.lang_dataset._target_
|
| 53 |
+
# else:
|
| 54 |
+
# self.use_shm = False
|
| 55 |
+
#elif 'shm_dataset' in self.datasets_cfg.vision_dataset._target_:
|
| 56 |
+
# self.use_shm = True
|
| 57 |
+
#else:
|
| 58 |
+
# self.use_shm = False
|
| 59 |
+
|
| 60 |
+
def prepare_data(self, *args, **kwargs):
|
| 61 |
+
# check if files already exist
|
| 62 |
+
dataset_exist = np.any([len(list(self.training_dir.glob(extension))) for extension in ["*.npz", "*.pkl"]])
|
| 63 |
+
|
| 64 |
+
# download and unpack images
|
| 65 |
+
if not dataset_exist:
|
| 66 |
+
if "CI" not in os.environ:
|
| 67 |
+
print(f"No dataset found in {self.training_dir}.")
|
| 68 |
+
print("For information how to download to full CALVIN dataset, please visit")
|
| 69 |
+
print("https://github.com/mees/calvin/tree/main/dataset")
|
| 70 |
+
print("Do you wish to download small debug dataset to continue training?")
|
| 71 |
+
s = input("YES / no")
|
| 72 |
+
if s == "no":
|
| 73 |
+
exit()
|
| 74 |
+
logger.info(f"downloading dataset to {self.training_dir} and {self.val_dir}")
|
| 75 |
+
torchvision.datasets.utils.download_and_extract_archive(ONE_EP_DATASET_URL, self.training_dir)
|
| 76 |
+
torchvision.datasets.utils.download_and_extract_archive(ONE_EP_DATASET_URL, self.val_dir)
|
| 77 |
+
|
| 78 |
+
# if self.use_shm:
|
| 79 |
+
# # When using shared memory dataset, initialize lookups
|
| 80 |
+
# train_shmem_loader = SharedMemoryLoader(self.datasets_cfg, self.training_dir)
|
| 81 |
+
# train_shm_lookup = train_shmem_loader.load_data_in_shared_memory()
|
| 82 |
+
#
|
| 83 |
+
# val_shmem_loader = SharedMemoryLoader(self.datasets_cfg, self.val_dir)
|
| 84 |
+
# val_shm_lookup = val_shmem_loader.load_data_in_shared_memory()
|
| 85 |
+
#
|
| 86 |
+
# save_shm_lookup(train_shm_lookup, val_shm_lookup)
|
| 87 |
+
|
| 88 |
+
def setup(self, stage=None):
|
| 89 |
+
transforms = load_dataset_statistics(self.training_dir, self.val_dir, self.transforms)
|
| 90 |
+
|
| 91 |
+
# self.train_transforms = {
|
| 92 |
+
# cam: [hydra.utils.instantiate(transform) for transform in transforms.train[cam]] for cam in transforms.train
|
| 93 |
+
#}
|
| 94 |
+
self.train_transforms = {}
|
| 95 |
+
for cam in transforms.train:
|
| 96 |
+
# print("Processing camera:", cam)
|
| 97 |
+
cam_transforms = []
|
| 98 |
+
for transform in transforms.train[cam]:
|
| 99 |
+
# print("Instantiating transform for camera", cam, ":", transform)
|
| 100 |
+
if transform._target_ == "torchvision.transforms.ColorJitter":
|
| 101 |
+
instantiated_transform = torchvision.transforms.ColorJitter(
|
| 102 |
+
brightness=transform.brightness,
|
| 103 |
+
contrast=tuple(transform.contrast),
|
| 104 |
+
saturation=tuple(transform.saturation),
|
| 105 |
+
)
|
| 106 |
+
else:
|
| 107 |
+
instantiated_transform = hydra.utils.instantiate(transform)
|
| 108 |
+
cam_transforms.append(instantiated_transform)
|
| 109 |
+
self.train_transforms[cam] = cam_transforms
|
| 110 |
+
|
| 111 |
+
self.val_transforms = {
|
| 112 |
+
cam: [hydra.utils.instantiate(transform) for transform in transforms.val[cam]] for cam in transforms.val
|
| 113 |
+
}
|
| 114 |
+
self.train_transforms = {key: torchvision.transforms.Compose(val) for key, val in self.train_transforms.items()}
|
| 115 |
+
self.val_transforms = {key: torchvision.transforms.Compose(val) for key, val in self.val_transforms.items()}
|
| 116 |
+
self.train_datasets, self.train_sampler, self.val_datasets, self.val_sampler = {}, {}, {}, {}
|
| 117 |
+
|
| 118 |
+
# if self.use_shm:
|
| 119 |
+
# train_shm_lookup, val_shm_lookup = load_shm_lookup()
|
| 120 |
+
|
| 121 |
+
for _, dataset in self.datasets_cfg.items():
|
| 122 |
+
if dataset == 'lang_paraphrase-MiniLM-L3-v2':
|
| 123 |
+
continue
|
| 124 |
+
else:
|
| 125 |
+
train_dataset = hydra.utils.instantiate(
|
| 126 |
+
dataset, datasets_dir=self.training_dir, transforms=self.train_transforms
|
| 127 |
+
)
|
| 128 |
+
val_dataset = hydra.utils.instantiate(dataset, datasets_dir=self.val_dir, transforms=self.val_transforms)
|
| 129 |
+
# if self.use_shm:
|
| 130 |
+
# train_dataset.setup_shm_lookup(train_shm_lookup)
|
| 131 |
+
# val_dataset.setup_shm_lookup(val_shm_lookup)
|
| 132 |
+
key = dataset.key
|
| 133 |
+
self.train_datasets[key] = train_dataset
|
| 134 |
+
self.val_datasets[key] = val_dataset
|
| 135 |
+
self.modalities.append(key)
|
| 136 |
+
|
| 137 |
+
def train_dataloader(self):
|
| 138 |
+
return {
|
| 139 |
+
key: DataLoader(
|
| 140 |
+
dataset,
|
| 141 |
+
batch_size=dataset.batch_size,
|
| 142 |
+
num_workers=dataset.num_workers,
|
| 143 |
+
pin_memory=True,
|
| 144 |
+
shuffle=True,
|
| 145 |
+
prefetch_factor=2,
|
| 146 |
+
)
|
| 147 |
+
for key, dataset in self.train_datasets.items()
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
def val_dataloader(self):
|
| 151 |
+
return {
|
| 152 |
+
key: DataLoader(
|
| 153 |
+
dataset,
|
| 154 |
+
batch_size=dataset.batch_size,
|
| 155 |
+
num_workers=dataset.num_workers,
|
| 156 |
+
pin_memory=True,
|
| 157 |
+
)
|
| 158 |
+
for key, dataset in self.val_datasets.items()
|
| 159 |
+
}
|
| 160 |
+
|
code/policy_models/datasets/mvlibero_dataset.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (2024) Bytedance Ltd. and/or its affiliates
|
| 2 |
+
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import json
|
| 16 |
+
import os
|
| 17 |
+
import random
|
| 18 |
+
import warnings
|
| 19 |
+
import traceback
|
| 20 |
+
import argparse
|
| 21 |
+
from omegaconf import OmegaConf
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
from torchvision import transforms as T
|
| 24 |
+
import torch
|
| 25 |
+
from torch.utils.data import Dataset,DataLoader
|
| 26 |
+
import numpy as np
|
| 27 |
+
import imageio
|
| 28 |
+
from decord import VideoReader, cpu
|
| 29 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 30 |
+
from einops import rearrange
|
| 31 |
+
|
| 32 |
+
# from mdt.datasets.utils.dataset_util import euler2rotm, rotm2euler
|
| 33 |
+
# from mdt.datasets.utils.video_transforms import Resize_Preprocess, ToTensorVideo
|
| 34 |
+
# from mdt.datasets.utils.util import update_paths
|
| 35 |
+
from scipy.spatial.transform import Rotation as R
|
| 36 |
+
import decord
|
| 37 |
+
|
| 38 |
+
class Dataset_mvlibero(Dataset):
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
args,
|
| 42 |
+
mode = 'val',
|
| 43 |
+
):
|
| 44 |
+
"""Constructor."""
|
| 45 |
+
super().__init__()
|
| 46 |
+
self.args = args
|
| 47 |
+
self.mode = mode
|
| 48 |
+
data_json_path = args.data_json_path
|
| 49 |
+
data_root_path = args.data_root_path
|
| 50 |
+
|
| 51 |
+
# dataset stucture
|
| 52 |
+
# dataset_dir/dataset_name/annotation_name/mode/traj
|
| 53 |
+
# dataset_dir/dataset_name/video/mode/traj
|
| 54 |
+
# dataset_dir/dataset_name/latent_video/mode/traj
|
| 55 |
+
|
| 56 |
+
# samles:{'ann_file':xxx, 'frame_idx':xxx, 'dataset_name':xxx}
|
| 57 |
+
|
| 58 |
+
# prepare all datasets path
|
| 59 |
+
self.video_path = []
|
| 60 |
+
data_json_path = f'{data_json_path}/{mode}_all.json'
|
| 61 |
+
with open(data_json_path, "r") as f:
|
| 62 |
+
self.samples = json.load(f)
|
| 63 |
+
self.video_path = [os.path.join(data_root_path, sample['dataset_name']) for sample in self.samples]
|
| 64 |
+
|
| 65 |
+
print(f"ALL dataset, {len(self.samples)} samples in total")
|
| 66 |
+
|
| 67 |
+
# with open(f'{self.args.action_json}', "r") as f:
|
| 68 |
+
# self.stat = json.load(f)
|
| 69 |
+
self.a_min = np.array(args.action_01)[None,:]
|
| 70 |
+
self.a_max = np.array(args.action_99)[None,:]
|
| 71 |
+
self.s_min = np.array(args.state_01)[None,:]
|
| 72 |
+
self.s_max = np.array(args.state_99)[None,:]
|
| 73 |
+
print(f"action min: {self.a_min.shape}, action max: {self.a_max.shape}")
|
| 74 |
+
|
| 75 |
+
def __len__(self):
|
| 76 |
+
return len(self.samples)
|
| 77 |
+
|
| 78 |
+
def _load_latent_video(self, video_path, frame_ids):
|
| 79 |
+
# video_path = video_path.split('/')[:-1]
|
| 80 |
+
# video_path = '/'.join(video_path)+'/0.pt'
|
| 81 |
+
|
| 82 |
+
# print(video_path)
|
| 83 |
+
with open(video_path,'rb') as file:
|
| 84 |
+
video_tensor = torch.load(file)
|
| 85 |
+
video_tensor.requires_grad = False
|
| 86 |
+
# vr = VideoReader(video_path, ctx=cpu(0), num_threads=2)
|
| 87 |
+
# print(video_tensor.size(),np.array(frame_ids))
|
| 88 |
+
try:
|
| 89 |
+
assert (np.array(frame_ids) < video_tensor.size()[0]).all()
|
| 90 |
+
assert (np.array(frame_ids) >= 0).all()
|
| 91 |
+
except:
|
| 92 |
+
assert False
|
| 93 |
+
frame_data = video_tensor[frame_ids]
|
| 94 |
+
return frame_data
|
| 95 |
+
|
| 96 |
+
def _get_frames(self, label, frame_ids, cam_id, pre_encode, video_dir, use_img_cond=False):
|
| 97 |
+
# directly load videos latent after svd-vae encoder
|
| 98 |
+
assert cam_id is not None
|
| 99 |
+
assert pre_encode == True
|
| 100 |
+
if pre_encode:
|
| 101 |
+
video_path = label['latent_videos'][cam_id]['latent_video_path']
|
| 102 |
+
try:
|
| 103 |
+
video_path = os.path.join(video_dir,video_path)
|
| 104 |
+
frames = self._load_latent_video(video_path, frame_ids)
|
| 105 |
+
except:
|
| 106 |
+
video_path = video_path.replace("latent_videos", "latent_videos_svd")
|
| 107 |
+
frames = self._load_latent_video(video_path, frame_ids)
|
| 108 |
+
# load original videos
|
| 109 |
+
else:
|
| 110 |
+
if use_img_cond:
|
| 111 |
+
frame_ids = frame_ids[0]
|
| 112 |
+
video_path = label['videos'][cam_id]['video_path']
|
| 113 |
+
video_path = os.path.join(video_dir,video_path)
|
| 114 |
+
# frames = self._load_video(video_path, frame_ids)
|
| 115 |
+
# frames = mediapy.read_video(video_path)
|
| 116 |
+
vr = decord.VideoReader(video_path)
|
| 117 |
+
frames = vr[frame_ids].asnumpy()
|
| 118 |
+
frames = torch.from_numpy(frames).permute(2,0,1).unsqueeze(0) # (frame, h, w, c) -> (frame, c, h, w)
|
| 119 |
+
# resize the video to self.args.video_size
|
| 120 |
+
frames = self.preprocess(frames)
|
| 121 |
+
return frames
|
| 122 |
+
|
| 123 |
+
def _get_obs(self, label, frame_ids, cam_id, pre_encode, video_dir):
|
| 124 |
+
if cam_id is None:
|
| 125 |
+
temp_cam_id = random.choice(self.cam_ids)
|
| 126 |
+
else:
|
| 127 |
+
temp_cam_id = cam_id
|
| 128 |
+
frames = self._get_frames(label, frame_ids, cam_id = temp_cam_id, pre_encode = pre_encode, video_dir=video_dir)
|
| 129 |
+
return frames, temp_cam_id
|
| 130 |
+
|
| 131 |
+
def normalize_bound(
|
| 132 |
+
self,
|
| 133 |
+
data: np.ndarray,
|
| 134 |
+
data_min: np.ndarray,
|
| 135 |
+
data_max: np.ndarray,
|
| 136 |
+
clip_min: float = -1,
|
| 137 |
+
clip_max: float = 1,
|
| 138 |
+
eps: float = 1e-8,
|
| 139 |
+
) -> np.ndarray:
|
| 140 |
+
ndata = 2 * (data - data_min) / (data_max - data_min + eps) - 1
|
| 141 |
+
return np.clip(ndata, clip_min, clip_max)
|
| 142 |
+
|
| 143 |
+
def denormalize_bound(
|
| 144 |
+
self,
|
| 145 |
+
data: np.ndarray,
|
| 146 |
+
data_min: np.ndarray,
|
| 147 |
+
data_max: np.ndarray,
|
| 148 |
+
clip_min: float = -1,
|
| 149 |
+
clip_max: float = 1,
|
| 150 |
+
eps=1e-8,
|
| 151 |
+
) -> np.ndarray:
|
| 152 |
+
clip_range = clip_max - clip_min
|
| 153 |
+
rdata = (data - clip_min) / clip_range * (data_max - data_min) + data_min
|
| 154 |
+
return rdata
|
| 155 |
+
|
| 156 |
+
def process_action_xhand(self, label,frame_ids, rel = False):
|
| 157 |
+
num_frames = len(frame_ids)
|
| 158 |
+
frame_ids = frame_ids[:int(self.args.num_frames)] # (f)
|
| 159 |
+
states = np.array(label['states'])[frame_ids] #(f, 38)
|
| 160 |
+
command = np.array(label['actions'])[frame_ids]
|
| 161 |
+
|
| 162 |
+
# print(f'states: {states.shape}, actions: {command.shape}')
|
| 163 |
+
|
| 164 |
+
state = states[0:1] # current state
|
| 165 |
+
|
| 166 |
+
a_dim = command.shape[-1]
|
| 167 |
+
action_base = state[:,:a_dim] #(1,38)
|
| 168 |
+
actions = command - action_base #(self.args.num_frames,38)
|
| 169 |
+
|
| 170 |
+
# normalize
|
| 171 |
+
action_scaled = self.normalize_bound(actions, self.a_min, self.a_max)
|
| 172 |
+
state_scaled = self.normalize_bound(state, self.s_min, self.s_max)
|
| 173 |
+
return torch.from_numpy(action_scaled).float(), torch.from_numpy(state_scaled).float()
|
| 174 |
+
|
| 175 |
+
def __getitem__(self, index, cam_id = None, return_video = False):
|
| 176 |
+
|
| 177 |
+
sample = self.samples[index]
|
| 178 |
+
sampled_video_dir = self.video_path[index]
|
| 179 |
+
|
| 180 |
+
ann_file = sample['ann_file']
|
| 181 |
+
# dataset_name = sample['dataset_name']
|
| 182 |
+
ann_file = f'{sampled_video_dir}/{ann_file}'
|
| 183 |
+
frame_ids = sample['frame_ids']
|
| 184 |
+
with open(ann_file, "r") as f:
|
| 185 |
+
label = json.load(f)
|
| 186 |
+
|
| 187 |
+
data = dict()
|
| 188 |
+
# action
|
| 189 |
+
data['actions'], data['state_obs'] = self.process_action_xhand(label,frame_ids,rel=self.args.relative)
|
| 190 |
+
# instructions
|
| 191 |
+
data['lang_text'] = label['texts'][0]
|
| 192 |
+
# observation
|
| 193 |
+
static_latent, cam_id = self._get_obs(label, frame_ids[0], cam_id=0, pre_encode=self.args.pre_encode,video_dir=sampled_video_dir)
|
| 194 |
+
gripper_latent, cam_id = self._get_obs(label, frame_ids[0], cam_id=1, pre_encode=self.args.pre_encode,video_dir=sampled_video_dir)
|
| 195 |
+
gripper_latent2, cam_id = self._get_obs(label, frame_ids[0], cam_id=2, pre_encode=self.args.pre_encode,video_dir=sampled_video_dir)
|
| 196 |
+
static_latent = static_latent.unsqueeze(0)
|
| 197 |
+
gripper_latent = gripper_latent.unsqueeze(0) # (1,4,32,32)
|
| 198 |
+
gripper_latent2 = gripper_latent2.unsqueeze(0)
|
| 199 |
+
|
| 200 |
+
# one sample
|
| 201 |
+
rgb_obs = {'rgb_static': static_latent, 'rgb_gripper': gripper_latent, 'rgb_gripper2': gripper_latent2}
|
| 202 |
+
data['rgb_obs'] = rgb_obs
|
| 203 |
+
data['ann_file'] = ann_file
|
| 204 |
+
data['frame_ids'] = frame_ids
|
| 205 |
+
|
| 206 |
+
return data
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
if __name__ == "__main__":
|
| 210 |
+
from hydra import compose, initialize
|
| 211 |
+
from omegaconf import OmegaConf
|
| 212 |
+
with initialize(config_path="../../conf", job_name="VPP_xbot_train.yaml"):
|
| 213 |
+
cfg = compose(config_name="VPP_xbot_train")
|
| 214 |
+
|
| 215 |
+
# import sys
|
| 216 |
+
# sys.path.append('/cephfs/cjyyj/code/video_robot_svd-main/mdt')
|
| 217 |
+
# from utils.util import get_args
|
| 218 |
+
# train_args = get_args(cfg.datamodule.args)
|
| 219 |
+
# print(train_args)
|
| 220 |
+
train_dataset = Dataset_xbot(cfg.dataset_args,mode="val")
|
| 221 |
+
train_loader = torch.utils.data.DataLoader(
|
| 222 |
+
train_dataset,
|
| 223 |
+
batch_size=cfg.dataset_args.batch_size,
|
| 224 |
+
shuffle=cfg.dataset_args.shuffle,
|
| 225 |
+
)
|
| 226 |
+
for data in tqdm(train_loader,total=len(train_loader)):
|
| 227 |
+
print(data['ann_file'])
|
| 228 |
+
print(len(data['rgb_obs']))
|
| 229 |
+
|
| 230 |
+
|
code/policy_models/datasets/real_dataset.py
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (2024) Bytedance Ltd. and/or its affiliates
|
| 2 |
+
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import json
|
| 16 |
+
import os
|
| 17 |
+
import random
|
| 18 |
+
import warnings
|
| 19 |
+
import traceback
|
| 20 |
+
import argparse
|
| 21 |
+
from omegaconf import OmegaConf
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
from torchvision import transforms as T
|
| 24 |
+
import torch
|
| 25 |
+
from torch.utils.data import Dataset,DataLoader
|
| 26 |
+
import numpy as np
|
| 27 |
+
import imageio
|
| 28 |
+
from decord import VideoReader, cpu
|
| 29 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 30 |
+
from einops import rearrange
|
| 31 |
+
|
| 32 |
+
# from mdt.datasets.utils.dataset_util import euler2rotm, rotm2euler
|
| 33 |
+
# from mdt.datasets.utils.video_transforms import Resize_Preprocess, ToTensorVideo
|
| 34 |
+
# from mdt.datasets.utils.util import update_paths
|
| 35 |
+
from scipy.spatial.transform import Rotation as R
|
| 36 |
+
import decord
|
| 37 |
+
|
| 38 |
+
class Dataset_policy(Dataset):
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
args,
|
| 42 |
+
mode = 'val',
|
| 43 |
+
data_json_path = '/localssd/gyj/opensource_robotdata/annotation_all/0407',
|
| 44 |
+
data_root_path = '/localssd/gyj/opensource_robotdata/',
|
| 45 |
+
):
|
| 46 |
+
"""Constructor."""
|
| 47 |
+
super().__init__()
|
| 48 |
+
self.args = args
|
| 49 |
+
self.mode = mode
|
| 50 |
+
|
| 51 |
+
# dataset stucture
|
| 52 |
+
# dataset_dir/dataset_name/annotation_name/mode/traj
|
| 53 |
+
# dataset_dir/dataset_name/video/mode/traj
|
| 54 |
+
# dataset_dir/dataset_name/latent_video/mode/traj
|
| 55 |
+
|
| 56 |
+
# samles:{'ann_file':xxx, 'frame_idx':xxx, 'dataset_name':xxx}
|
| 57 |
+
|
| 58 |
+
# prepare all datasets path
|
| 59 |
+
self.video_path = []
|
| 60 |
+
data_json_path = f'{data_json_path}/{mode}_all.json'
|
| 61 |
+
with open(data_json_path, "r") as f:
|
| 62 |
+
self.samples = json.load(f)
|
| 63 |
+
self.video_path = [os.path.join(data_root_path, sample['dataset_name']) for sample in self.samples]
|
| 64 |
+
|
| 65 |
+
print(f"ALL dataset, {len(self.samples)} samples in total")
|
| 66 |
+
|
| 67 |
+
self.a_min = np.array(args.action_01)[None,:]
|
| 68 |
+
self.a_max = np.array(args.action_99)[None,:]
|
| 69 |
+
self.s_min = np.array(args.state_01)[None,:]
|
| 70 |
+
self.s_max = np.array(args.state_99)[None,:]
|
| 71 |
+
|
| 72 |
+
def __len__(self):
|
| 73 |
+
return len(self.samples)
|
| 74 |
+
|
| 75 |
+
def _load_latent_video(self, video_path, frame_ids):
|
| 76 |
+
# video_path = video_path.split('/')[:-1]
|
| 77 |
+
# video_path = '/'.join(video_path)+'/0.pt'
|
| 78 |
+
|
| 79 |
+
# print(video_path)
|
| 80 |
+
with open(video_path,'rb') as file:
|
| 81 |
+
video_tensor = torch.load(file)
|
| 82 |
+
video_tensor.requires_grad = False
|
| 83 |
+
# vr = VideoReader(video_path, ctx=cpu(0), num_threads=2)
|
| 84 |
+
# print(video_tensor.size(),np.array(frame_ids))
|
| 85 |
+
try:
|
| 86 |
+
assert (np.array(frame_ids) < video_tensor.size()[0]).all()
|
| 87 |
+
assert (np.array(frame_ids) >= 0).all()
|
| 88 |
+
except:
|
| 89 |
+
assert False
|
| 90 |
+
frame_data = video_tensor[frame_ids]
|
| 91 |
+
return frame_data
|
| 92 |
+
|
| 93 |
+
def _get_frames(self, label, frame_ids, cam_id, pre_encode, video_dir, use_img_cond=False):
|
| 94 |
+
# directly load videos latent after svd-vae encoder
|
| 95 |
+
assert cam_id is not None
|
| 96 |
+
assert pre_encode == True
|
| 97 |
+
if pre_encode:
|
| 98 |
+
video_path = label['latent_videos'][cam_id]['latent_video_path']
|
| 99 |
+
try:
|
| 100 |
+
video_path = os.path.join(video_dir,video_path)
|
| 101 |
+
frames = self._load_latent_video(video_path, frame_ids)
|
| 102 |
+
except:
|
| 103 |
+
video_path = video_path.replace("latent_videos", "latent_videos_svd")
|
| 104 |
+
frames = self._load_latent_video(video_path, frame_ids)
|
| 105 |
+
# load original videos
|
| 106 |
+
else:
|
| 107 |
+
if use_img_cond:
|
| 108 |
+
frame_ids = frame_ids[0]
|
| 109 |
+
video_path = label['videos'][cam_id]['video_path']
|
| 110 |
+
video_path = os.path.join(video_dir,video_path)
|
| 111 |
+
# frames = self._load_video(video_path, frame_ids)
|
| 112 |
+
# frames = mediapy.read_video(video_path)
|
| 113 |
+
vr = decord.VideoReader(video_path)
|
| 114 |
+
frames = vr[frame_ids].asnumpy()
|
| 115 |
+
frames = torch.from_numpy(frames).permute(2,0,1).unsqueeze(0) # (frame, h, w, c) -> (frame, c, h, w)
|
| 116 |
+
# resize the video to self.args.video_size
|
| 117 |
+
frames = self.preprocess(frames)
|
| 118 |
+
return frames
|
| 119 |
+
|
| 120 |
+
def _get_obs(self, label, frame_ids, cam_id, pre_encode, video_dir):
|
| 121 |
+
if cam_id is None:
|
| 122 |
+
temp_cam_id = random.choice(self.cam_ids)
|
| 123 |
+
else:
|
| 124 |
+
temp_cam_id = cam_id
|
| 125 |
+
frames = self._get_frames(label, frame_ids, cam_id = temp_cam_id, pre_encode = pre_encode, video_dir=video_dir)
|
| 126 |
+
return frames, temp_cam_id
|
| 127 |
+
|
| 128 |
+
def process_action_xhand(self, label,frame_ids, rel = False):
|
| 129 |
+
num_frames = len(frame_ids)
|
| 130 |
+
frame_ids = frame_ids[:int(self.args.num_frames+1)] # (10,)
|
| 131 |
+
states = np.array(label['states'])[frame_ids]
|
| 132 |
+
command = np.array(label['actions'])[frame_ids]
|
| 133 |
+
|
| 134 |
+
state_input = states[0:1] #(1,19)
|
| 135 |
+
# always use the set the first item of quat >0
|
| 136 |
+
if state_input[0,3] <0:
|
| 137 |
+
state_input[0,3:7] *= -1
|
| 138 |
+
|
| 139 |
+
states_raw = states if not self.args.learn_command else command
|
| 140 |
+
|
| 141 |
+
if not rel:
|
| 142 |
+
state_next = states_raw[:-1] # command
|
| 143 |
+
mu = np.array(self.args.mu)
|
| 144 |
+
std = np.array(self.args.std)
|
| 145 |
+
|
| 146 |
+
state_input = (state_input-mu)/std
|
| 147 |
+
action_sclaed = (state_next-mu)/std
|
| 148 |
+
|
| 149 |
+
else: # relative ro fiest frame
|
| 150 |
+
xyz, rot, hand = states_raw[:,:3], states_raw[:,3:7], states_raw[:,7:]
|
| 151 |
+
# xyz
|
| 152 |
+
current_xyz = state_input[:,:3]
|
| 153 |
+
delta_xyz = (xyz[:-1]-current_xyz)*self.args.rel_xyz_scale
|
| 154 |
+
# rot
|
| 155 |
+
current_quat = state_input[:,3:7]
|
| 156 |
+
rotm = [R.from_quat(rot[i]).as_matrix() for i in range(len(rot-1))]
|
| 157 |
+
current_rotm = R.from_quat(current_quat[0]).as_matrix()
|
| 158 |
+
rel_rotm = [current_rotm.T @ next_rotm for next_rotm in rotm[:-1]]
|
| 159 |
+
rel_rpy = [R.from_matrix(rot).as_euler('xyz', degrees=False) for rot in rel_rotm]
|
| 160 |
+
|
| 161 |
+
rel_rpy = np.array(rel_rpy)*self.args.rel_rot_scale
|
| 162 |
+
# hand
|
| 163 |
+
hand = hand[:-1]*self.args.rel_hand_scale
|
| 164 |
+
|
| 165 |
+
action_sclaed = np.concatenate([delta_xyz,rel_rpy,hand],axis=1) # (10,18)
|
| 166 |
+
|
| 167 |
+
if self.args.norm_input:
|
| 168 |
+
mu = np.array(self.args.mu)
|
| 169 |
+
std = np.array(self.args.std)
|
| 170 |
+
state_input = (state_input-mu)/std
|
| 171 |
+
|
| 172 |
+
return torch.from_numpy(action_sclaed).float(), torch.from_numpy(state_input).float()
|
| 173 |
+
|
| 174 |
+
def normalize_bound(
|
| 175 |
+
self,
|
| 176 |
+
data: np.ndarray,
|
| 177 |
+
data_min: np.ndarray,
|
| 178 |
+
data_max: np.ndarray,
|
| 179 |
+
clip_min: float = -1,
|
| 180 |
+
clip_max: float = 1,
|
| 181 |
+
eps: float = 1e-8,
|
| 182 |
+
) -> np.ndarray:
|
| 183 |
+
ndata = 2 * (data - data_min) / (data_max - data_min + eps) - 1
|
| 184 |
+
return np.clip(ndata, clip_min, clip_max)
|
| 185 |
+
|
| 186 |
+
def denormalize_bound(
|
| 187 |
+
self,
|
| 188 |
+
data: np.ndarray,
|
| 189 |
+
data_min: np.ndarray,
|
| 190 |
+
data_max: np.ndarray,
|
| 191 |
+
clip_min: float = -1,
|
| 192 |
+
clip_max: float = 1,
|
| 193 |
+
eps=1e-8,
|
| 194 |
+
) -> np.ndarray:
|
| 195 |
+
clip_range = clip_max - clip_min
|
| 196 |
+
rdata = (data - clip_min) / clip_range * (data_max - data_min) + data_min
|
| 197 |
+
return rdata
|
| 198 |
+
|
| 199 |
+
def process_action_xhand_v2(self, label,frame_ids, rel = False):
|
| 200 |
+
frame_ids = frame_ids[:int(self.args.num_frames)]
|
| 201 |
+
states = np.array(label['states'])[frame_ids]
|
| 202 |
+
command = np.array(label['actions'])[frame_ids]
|
| 203 |
+
|
| 204 |
+
state_input = states[0:1] #(1,19)
|
| 205 |
+
# always use the set the first item of quat >0
|
| 206 |
+
if state_input[0,3] <0:
|
| 207 |
+
state_input[0,3:7] *= -1
|
| 208 |
+
|
| 209 |
+
states_raw = states if not self.args.learn_command else command
|
| 210 |
+
|
| 211 |
+
xyz, rot, hand = states_raw[:,:3], states_raw[:,3:7], states_raw[:,7:]
|
| 212 |
+
# xyz
|
| 213 |
+
current_xyz = state_input[:,:3]
|
| 214 |
+
delta_xyz = (xyz-current_xyz)
|
| 215 |
+
# rot
|
| 216 |
+
current_quat = state_input[:,3:7]
|
| 217 |
+
rotm = [R.from_quat(rot[i]).as_matrix() for i in range(len(rot))]
|
| 218 |
+
current_rotm = R.from_quat(current_quat[0]).as_matrix()
|
| 219 |
+
rel_rotm = [current_rotm.T @ next_rotm for next_rotm in rotm]
|
| 220 |
+
rel_rpy = [R.from_matrix(rot).as_euler('xyz', degrees=False) for rot in rel_rotm]
|
| 221 |
+
# hand
|
| 222 |
+
hand = hand
|
| 223 |
+
|
| 224 |
+
action = np.concatenate([delta_xyz,rel_rpy,hand],axis=1) # (10,18)
|
| 225 |
+
state = state_input
|
| 226 |
+
|
| 227 |
+
action_scaled = self.normalize_bound(action, self.a_min, self.a_max)
|
| 228 |
+
state_scaled = self.normalize_bound(state, self.s_min, self.s_max)
|
| 229 |
+
|
| 230 |
+
return torch.from_numpy(action_scaled).float(), torch.from_numpy(state_scaled).float()
|
| 231 |
+
|
| 232 |
+
def __getitem__(self, index, cam_id = None, return_video = False):
|
| 233 |
+
|
| 234 |
+
sample = self.samples[index]
|
| 235 |
+
sampled_video_dir = self.video_path[index]
|
| 236 |
+
|
| 237 |
+
ann_file = sample['ann_file']
|
| 238 |
+
# dataset_name = sample['dataset_name']
|
| 239 |
+
ann_file = f'{sampled_video_dir}/{ann_file}'
|
| 240 |
+
frame_ids = sample['frame_ids']
|
| 241 |
+
with open(ann_file, "r") as f:
|
| 242 |
+
label = json.load(f)
|
| 243 |
+
|
| 244 |
+
data = dict()
|
| 245 |
+
# action
|
| 246 |
+
if self.args.action_v2:
|
| 247 |
+
data['actions'], data['state_obs'] = self.process_action_xhand_v2(label,frame_ids,rel=self.args.relative)
|
| 248 |
+
else:
|
| 249 |
+
data['actions'], data['state_obs'] = self.process_action_xhand(label,frame_ids,rel=self.args.relative)
|
| 250 |
+
# instructions
|
| 251 |
+
data['lang_text'] = label['texts'][0]
|
| 252 |
+
# observation
|
| 253 |
+
static_latent, cam_id = self._get_obs(label, frame_ids[0], cam_id=0, pre_encode=self.args.pre_encode,video_dir=sampled_video_dir)
|
| 254 |
+
gripper_latent, cam_id = self._get_obs(label, frame_ids[0], cam_id=1, pre_encode=self.args.pre_encode,video_dir=sampled_video_dir)
|
| 255 |
+
static_latent = static_latent.unsqueeze(0)
|
| 256 |
+
gripper_latent = gripper_latent.unsqueeze(0) # (1,4,32,32)
|
| 257 |
+
|
| 258 |
+
# one sample
|
| 259 |
+
rgb_obs = {'rgb_static': static_latent, 'rgb_gripper': gripper_latent}
|
| 260 |
+
data['rgb_obs'] = rgb_obs
|
| 261 |
+
data['ann_file'] = ann_file
|
| 262 |
+
data['frame_ids'] = frame_ids
|
| 263 |
+
|
| 264 |
+
return data
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
if __name__ == "__main__":
|
| 268 |
+
from hydra import compose, initialize
|
| 269 |
+
from omegaconf import OmegaConf
|
| 270 |
+
with initialize(config_path="../../conf", job_name="VPP_xhand_train.yaml"):
|
| 271 |
+
cfg = compose(config_name="VPP_xhand_train")
|
| 272 |
+
|
| 273 |
+
# import sys
|
| 274 |
+
# sys.path.append('/cephfs/cjyyj/code/video_robot_svd-main/mdt')
|
| 275 |
+
# from utils.util import get_args
|
| 276 |
+
# train_args = get_args(cfg.datamodule.args)
|
| 277 |
+
# print(train_args)
|
| 278 |
+
train_dataset = Dataset_policy(cfg.dataset_args,mode="val")
|
| 279 |
+
train_loader = torch.utils.data.DataLoader(
|
| 280 |
+
train_dataset,
|
| 281 |
+
batch_size=cfg.dataset_args.batch_size,
|
| 282 |
+
shuffle=cfg.dataset_args.shuffle,
|
| 283 |
+
)
|
| 284 |
+
for data in tqdm(train_loader,total=len(train_loader)):
|
| 285 |
+
print(data['ann_file'])
|
| 286 |
+
|
| 287 |
+
|
code/policy_models/datasets/shm_dataset.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from multiprocessing.shared_memory import SharedMemory
|
| 3 |
+
from typing import Dict, List, Optional
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from policy_models.datasets.base_dataset import BaseDataset
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class ShmDataset(BaseDataset):
|
| 13 |
+
"""
|
| 14 |
+
Dataset that loads episodes from shared memory.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, *args, **kwargs): # type: ignore
|
| 18 |
+
super().__init__(*args, **kwargs)
|
| 19 |
+
self.episode_lookup_dict: Dict[str, List] = {}
|
| 20 |
+
self.episode_lookup: Optional[np.ndarray] = None
|
| 21 |
+
self.lang_lookup = None
|
| 22 |
+
self.lang_ann = None
|
| 23 |
+
self.shapes = None
|
| 24 |
+
self.sizes = None
|
| 25 |
+
self.dtypes = None
|
| 26 |
+
self.dataset_type = None
|
| 27 |
+
self.shared_memories = None
|
| 28 |
+
|
| 29 |
+
def setup_shm_lookup(self, shm_lookup: Dict) -> None:
|
| 30 |
+
"""
|
| 31 |
+
Initialize episode lookups.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
shm_lookup: Dictionary containing precomputed lookups.
|
| 35 |
+
"""
|
| 36 |
+
if self.with_lang:
|
| 37 |
+
self.episode_lookup_dict = shm_lookup["episode_lookup_lang"]
|
| 38 |
+
self.lang_lookup = shm_lookup["lang_lookup"]
|
| 39 |
+
self.lang_ann = shm_lookup["lang_ann"]
|
| 40 |
+
else:
|
| 41 |
+
self.episode_lookup_dict = shm_lookup["episode_lookup_vision"]
|
| 42 |
+
key = list(self.episode_lookup_dict.keys())[0]
|
| 43 |
+
self.episode_lookup = np.array(self.episode_lookup_dict[key])[:, 1]
|
| 44 |
+
self.shapes = shm_lookup["shapes"]
|
| 45 |
+
self.sizes = shm_lookup["sizes"]
|
| 46 |
+
self.dtypes = shm_lookup["dtypes"]
|
| 47 |
+
self.dataset_type = "train" if "training" in self.abs_datasets_dir.as_posix() else "val"
|
| 48 |
+
# attach to shared memories
|
| 49 |
+
self.shared_memories = {
|
| 50 |
+
key: SharedMemory(name=f"{self.dataset_type}_{key}") for key in self.episode_lookup_dict
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
def _load_episode(self, idx: int, window_size: int) -> Dict[str, np.ndarray]:
|
| 54 |
+
"""
|
| 55 |
+
Load consecutive frames from shared memory and combine to episode dict.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
idx: Index of first frame.
|
| 59 |
+
window_size: Length of sampled episode.
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
episode: Dict of numpy arrays containing the episode where keys are the names of modalities.
|
| 63 |
+
"""
|
| 64 |
+
episode = {}
|
| 65 |
+
for key, lookup in self.episode_lookup_dict.items():
|
| 66 |
+
offset, j = lookup[idx]
|
| 67 |
+
shape = (window_size + j,) + self.shapes[key]
|
| 68 |
+
array = np.ndarray(shape, dtype=self.dtypes[key], buffer=self.shared_memories[key].buf, offset=offset)[j:] # type: ignore
|
| 69 |
+
episode[key] = array
|
| 70 |
+
if self.with_lang:
|
| 71 |
+
episode["language"] = self.lang_ann[self.lang_lookup[idx]][0] # TODO check [0]
|
| 72 |
+
return episode
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class BesoSHmDataset(BaseDataset):
|
| 77 |
+
|
| 78 |
+
def __init__(
|
| 79 |
+
self,
|
| 80 |
+
obs_seq_len: int,
|
| 81 |
+
action_seq_len: int,
|
| 82 |
+
future_range: int,
|
| 83 |
+
*args,
|
| 84 |
+
**kwargs): # type: ignore
|
| 85 |
+
super().__init__(*args, **kwargs)
|
| 86 |
+
self.episode_lookup_dict: Dict[str, List] = {}
|
| 87 |
+
self.episode_lookup: Optional[np.ndarray] = None
|
| 88 |
+
self.lang_lookup = None
|
| 89 |
+
self.lang_ann = None
|
| 90 |
+
self.shapes = None
|
| 91 |
+
self.sizes = None
|
| 92 |
+
self.dtypes = None
|
| 93 |
+
self.dataset_type = None
|
| 94 |
+
self.shared_memories = None
|
| 95 |
+
|
| 96 |
+
# new stuff for our dataset
|
| 97 |
+
self.obs_seq_len = obs_seq_len
|
| 98 |
+
self.action_seq_len = action_seq_len
|
| 99 |
+
self.future_range = future_range
|
| 100 |
+
|
| 101 |
+
def setup_shm_lookup(self, shm_lookup: Dict) -> None:
|
| 102 |
+
"""
|
| 103 |
+
Initialize episode lookups.
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
shm_lookup: Dictionary containing precomputed lookups.
|
| 107 |
+
"""
|
| 108 |
+
if self.with_lang:
|
| 109 |
+
self.episode_lookup_dict = shm_lookup["episode_lookup_lang"]
|
| 110 |
+
self.lang_lookup = shm_lookup["lang_lookup"]
|
| 111 |
+
self.lang_ann = shm_lookup["lang_ann"]
|
| 112 |
+
else:
|
| 113 |
+
self.episode_lookup_dict = shm_lookup["episode_lookup_vision"]
|
| 114 |
+
key = list(self.episode_lookup_dict.keys())[0]
|
| 115 |
+
self.episode_lookup = np.array(self.episode_lookup_dict[key])[:, 1]
|
| 116 |
+
self.shapes = shm_lookup["shapes"]
|
| 117 |
+
self.sizes = shm_lookup["sizes"]
|
| 118 |
+
self.dtypes = shm_lookup["dtypes"]
|
| 119 |
+
self.dataset_type = "train" if "training" in self.abs_datasets_dir.as_posix() else "val"
|
| 120 |
+
# attach to shared memories
|
| 121 |
+
self.shared_memories = {
|
| 122 |
+
key: SharedMemory(name=f"{self.dataset_type}_{key}") for key in self.episode_lookup_dict
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
def _load_episode(self, idx: int, window_size: int) -> Dict[str, np.ndarray]:
|
| 126 |
+
episode = {}
|
| 127 |
+
keys = list(chain(*self.observation_space.values()))
|
| 128 |
+
keys.remove("language")
|
| 129 |
+
keys.append("scene_obs")
|
| 130 |
+
|
| 131 |
+
# Load main episode data from shared memory
|
| 132 |
+
for key, lookup in self.episode_lookup_dict.items():
|
| 133 |
+
offset, j = lookup[idx]
|
| 134 |
+
shape = (window_size + j,) + self.shapes[key]
|
| 135 |
+
array = np.ndarray(shape, dtype=self.dtypes[key], buffer=self.shared_memories[key].buf, offset=offset)[j:]
|
| 136 |
+
|
| 137 |
+
# Slice the data to match action_seq_len and obs_seq_len
|
| 138 |
+
if key == "rel_actions" or key == 'actions':
|
| 139 |
+
episode[key] = array[:self.action_seq_len, :]
|
| 140 |
+
else:
|
| 141 |
+
episode[key] = array[:self.obs_seq_len, :]
|
| 142 |
+
|
| 143 |
+
if self.with_lang:
|
| 144 |
+
episode["language"] = self.lang_ann[self.lang_lookup[idx]][0] # TODO check [0]
|
| 145 |
+
|
| 146 |
+
# Logic for future goal
|
| 147 |
+
delta = np.random.randint(self.future_range)
|
| 148 |
+
goal_idx = self.episode_lookup[idx] + self.action_seq_len + delta
|
| 149 |
+
eps_start_idx, eps_end_idx = self.find_sequence_boundaries(goal_idx)
|
| 150 |
+
if eps_end_idx < goal_idx:
|
| 151 |
+
goal_idx = eps_end_idx
|
| 152 |
+
|
| 153 |
+
# Load future goal from shared memory
|
| 154 |
+
offset, j = self.episode_lookup_dict['scene_obs'][goal_idx] # Assuming 'scene_obs' is the key for goals
|
| 155 |
+
shape = (1,) + self.shapes['scene_obs']
|
| 156 |
+
goal_array = np.ndarray(shape, dtype=self.dtypes['scene_obs'], buffer=self.shared_memories['scene_obs'].buf, offset=offset)[j:]
|
| 157 |
+
|
| 158 |
+
goal_episode = {'scene_obs': goal_array[:self.obs_seq_len, :]}
|
| 159 |
+
|
| 160 |
+
# Merge episodes
|
| 161 |
+
episode = self.merge_episodes(episode, goal_episode)
|
| 162 |
+
|
| 163 |
+
return episode
|
| 164 |
+
|
| 165 |
+
def merge_episodes(self, episode1: Dict[str, np.ndarray], episode2: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
|
| 166 |
+
merged_episode = {}
|
| 167 |
+
all_keys = set(episode1.keys()).union(set(episode2.keys()))
|
| 168 |
+
for key in all_keys:
|
| 169 |
+
if key in episode1 and key in episode2:
|
| 170 |
+
# Merge logic here, for example:
|
| 171 |
+
merged_episode[key] = np.concatenate([episode1[key], episode2[key]], axis=0)
|
| 172 |
+
elif key in episode1:
|
| 173 |
+
merged_episode[key] = episode1[key]
|
| 174 |
+
else:
|
| 175 |
+
merged_episode[key] = episode2[key]
|
| 176 |
+
return merged_episode
|
code/policy_models/datasets/utils/__init__.py
ADDED
|
File without changes
|
code/policy_models/datasets/utils/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (190 Bytes). View file
|
|
|
code/policy_models/datasets/utils/__pycache__/episode_utils.cpython-310.pyc
ADDED
|
Binary file (6.2 kB). View file
|
|
|
code/policy_models/datasets/utils/__pycache__/shared_memory_utils.cpython-310.pyc
ADDED
|
Binary file (12.1 kB). View file
|
|
|
code/policy_models/datasets/utils/episode_utils.py
ADDED
|
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
import re
|
| 5 |
+
from typing import Dict, Tuple
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from omegaconf import DictConfig, ListConfig, OmegaConf
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def process_state(
|
| 15 |
+
episode: Dict[str, np.ndarray],
|
| 16 |
+
observation_space: DictConfig,
|
| 17 |
+
transforms: Dict,
|
| 18 |
+
proprio_state: DictConfig,
|
| 19 |
+
seq_idx: int = 0,
|
| 20 |
+
window_size: int = 0,
|
| 21 |
+
) -> Dict[str, torch.Tensor]:
|
| 22 |
+
state_obs_keys = observation_space["state_obs"]
|
| 23 |
+
state_obs_list_normalized = []
|
| 24 |
+
state_obs_list_unnormalized = []
|
| 25 |
+
for state_ob in state_obs_keys:
|
| 26 |
+
if window_size == 0 and seq_idx == 0: # single file loader
|
| 27 |
+
state_tensor = torch.from_numpy(episode[state_ob]).float()
|
| 28 |
+
else: # episode loader
|
| 29 |
+
state_tensor = torch.from_numpy(episode[state_ob][seq_idx : seq_idx + window_size]).float()
|
| 30 |
+
# expand dims for single environment obs
|
| 31 |
+
if len(state_tensor.shape) != 2:
|
| 32 |
+
state_tensor = state_tensor.unsqueeze(0)
|
| 33 |
+
# shape: (BxN_state_obs)
|
| 34 |
+
assert len(state_tensor.shape) == 2
|
| 35 |
+
if state_ob in transforms:
|
| 36 |
+
state_tensor_normalized = transforms[state_ob](state_tensor)
|
| 37 |
+
state_obs_list_normalized.append(state_tensor_normalized)
|
| 38 |
+
else:
|
| 39 |
+
state_obs_list_normalized.append(state_tensor)
|
| 40 |
+
state_obs_list_unnormalized.append(state_tensor)
|
| 41 |
+
seq_state_obs = torch.cat(state_obs_list_normalized, dim=1)
|
| 42 |
+
seq_state_obs_unnormalized = torch.cat(state_obs_list_unnormalized, dim=1)
|
| 43 |
+
|
| 44 |
+
if not proprio_state.normalize_robot_orientation and "robot_orientation_idx" in proprio_state:
|
| 45 |
+
seq_state_obs[:, slice(*proprio_state.robot_orientation_idx)] = seq_state_obs_unnormalized[
|
| 46 |
+
:, slice(*proprio_state.robot_orientation_idx)
|
| 47 |
+
]
|
| 48 |
+
|
| 49 |
+
if not proprio_state.normalize:
|
| 50 |
+
seq_state_obs = seq_state_obs_unnormalized
|
| 51 |
+
|
| 52 |
+
# slice the specified parts of the proprioception state
|
| 53 |
+
state_obs_sliced = []
|
| 54 |
+
for slice_ids in proprio_state.keep_indices:
|
| 55 |
+
seq_state_obs_ = seq_state_obs[:, slice(*slice_ids)]
|
| 56 |
+
state_obs_sliced.append(seq_state_obs_)
|
| 57 |
+
seq_state_obs = torch.cat(state_obs_sliced, dim=1)
|
| 58 |
+
|
| 59 |
+
return {"robot_obs": seq_state_obs}
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def process_rgb(
|
| 63 |
+
episode: Dict[str, np.ndarray],
|
| 64 |
+
observation_space: DictConfig,
|
| 65 |
+
transforms: Dict,
|
| 66 |
+
seq_idx: int = 0,
|
| 67 |
+
window_size: int = 0,
|
| 68 |
+
) -> Dict[str, Dict[str, torch.Tensor]]:
|
| 69 |
+
rgb_obs_keys = observation_space["rgb_obs"]
|
| 70 |
+
seq_rgb_obs_dict = {}
|
| 71 |
+
for _, rgb_obs_key in enumerate(rgb_obs_keys):
|
| 72 |
+
if rgb_obs_key not in episode:
|
| 73 |
+
# If the key is not found, skip to the next iteration
|
| 74 |
+
continue
|
| 75 |
+
rgb_obs = episode[rgb_obs_key]
|
| 76 |
+
# expand dims for single environment obs
|
| 77 |
+
if len(rgb_obs.shape) != 4:
|
| 78 |
+
rgb_obs = np.expand_dims(rgb_obs, axis=0)
|
| 79 |
+
assert len(rgb_obs.shape) == 4
|
| 80 |
+
if window_size == 0 and seq_idx == 0: # single file loader
|
| 81 |
+
# To Square image
|
| 82 |
+
seq_rgb_obs_ = torch.from_numpy(rgb_obs).byte().permute(0, 3, 1, 2)
|
| 83 |
+
else: # episode loader
|
| 84 |
+
seq_rgb_obs_ = torch.from_numpy(rgb_obs[seq_idx : seq_idx + window_size]).byte().permute(0, 3, 1, 2)
|
| 85 |
+
# we might have different transformations for the different cameras
|
| 86 |
+
if rgb_obs_key in transforms:
|
| 87 |
+
seq_rgb_obs_ = transforms[rgb_obs_key](seq_rgb_obs_)
|
| 88 |
+
seq_rgb_obs_dict[rgb_obs_key] = seq_rgb_obs_
|
| 89 |
+
# shape: N_rgb_obs x (BxCxHxW)
|
| 90 |
+
return {"rgb_obs": seq_rgb_obs_dict}
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def process_depth(
|
| 95 |
+
episode: Dict[str, np.ndarray],
|
| 96 |
+
observation_space: DictConfig,
|
| 97 |
+
transforms: Dict,
|
| 98 |
+
seq_idx: int = 0,
|
| 99 |
+
window_size: int = 0,
|
| 100 |
+
) -> Dict[str, Dict[str, torch.Tensor]]:
|
| 101 |
+
# expand dims for single environment obs
|
| 102 |
+
def exp_dim(depth_img):
|
| 103 |
+
if len(depth_img.shape) != 3:
|
| 104 |
+
depth_img = np.expand_dims(depth_img, axis=0)
|
| 105 |
+
return depth_img
|
| 106 |
+
|
| 107 |
+
depth_obs_keys = observation_space["depth_obs"]
|
| 108 |
+
seq_depth_obs_dict = {}
|
| 109 |
+
for _, depth_obs_key in enumerate(depth_obs_keys):
|
| 110 |
+
depth_ob = exp_dim(episode[depth_obs_key])
|
| 111 |
+
assert len(depth_ob.shape) == 3
|
| 112 |
+
if window_size == 0 and seq_idx == 0: # single file loader
|
| 113 |
+
depth_ob_ = torch.from_numpy(depth_ob).float()
|
| 114 |
+
else: # episode loader
|
| 115 |
+
depth_ob_ = torch.from_numpy(depth_ob[seq_idx : seq_idx + window_size]).float()
|
| 116 |
+
# we might have different transformations for the different cameras
|
| 117 |
+
if depth_obs_key in transforms:
|
| 118 |
+
depth_ob_ = transforms[depth_obs_key](depth_ob_)
|
| 119 |
+
seq_depth_obs_dict[depth_obs_key] = depth_ob_
|
| 120 |
+
# shape: N_depth_obs x(BxHxW)
|
| 121 |
+
return {"depth_obs": seq_depth_obs_dict}
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def process_actions(
|
| 125 |
+
episode: Dict[str, np.ndarray],
|
| 126 |
+
observation_space: DictConfig,
|
| 127 |
+
transforms: Dict,
|
| 128 |
+
seq_idx: int = 0,
|
| 129 |
+
window_size: int = 0,
|
| 130 |
+
) -> Dict[str, torch.Tensor]:
|
| 131 |
+
# shape: (N_actions)
|
| 132 |
+
action_keys = observation_space["actions"]
|
| 133 |
+
if len(action_keys) != 1:
|
| 134 |
+
raise NotImplementedError
|
| 135 |
+
action_key = action_keys[0]
|
| 136 |
+
if window_size == 0 and seq_idx == 0: # single file loader
|
| 137 |
+
action = episode[action_key]
|
| 138 |
+
if "actions" in transforms:
|
| 139 |
+
action = transforms["actions"]((action, episode["robot_obs"]))
|
| 140 |
+
seq_acts = torch.from_numpy(action).float()
|
| 141 |
+
else: # episode loader
|
| 142 |
+
seq_acts = torch.from_numpy(episode[action_key][seq_idx : seq_idx + window_size]).float()
|
| 143 |
+
return {"actions": seq_acts}
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def process_language(episode: Dict[str, np.ndarray], transforms: Dict, with_lang: bool) -> Dict[str, torch.Tensor]:
|
| 147 |
+
seq_lang = {"lang": torch.empty(0)}
|
| 148 |
+
if with_lang:
|
| 149 |
+
lang = torch.from_numpy(episode["language"]).float()
|
| 150 |
+
if "language" in transforms:
|
| 151 |
+
lang = transforms["language"](lang)
|
| 152 |
+
seq_lang["lang"] = lang
|
| 153 |
+
seq_lang['lang_text'] = episode['language_text']
|
| 154 |
+
return seq_lang
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def get_state_info_dict(episode: Dict[str, np.ndarray]) -> Dict[str, Dict[str, torch.Tensor]]:
|
| 158 |
+
"""
|
| 159 |
+
Create a dictionary with raw state observations for environment resets.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
episode: Sequence dictionary.
|
| 163 |
+
|
| 164 |
+
Returns:
|
| 165 |
+
Info dict of full robot and scene state (for env resets).
|
| 166 |
+
"""
|
| 167 |
+
return {
|
| 168 |
+
"state_info": {
|
| 169 |
+
"robot_obs": torch.from_numpy(episode["robot_obs"]),
|
| 170 |
+
"scene_obs": torch.from_numpy(episode["scene_obs"]),
|
| 171 |
+
}
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def load_dataset_statistics(train_dataset_dir, val_dataset_dir, transforms):
|
| 176 |
+
"""
|
| 177 |
+
Tries to load statistics.yaml in every dataset folder in order to update the transforms hardcoded in the
|
| 178 |
+
hydra config file. If no statistics.yaml exists, nothing is changed
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
train_dataset_dir: path of the training folder
|
| 182 |
+
val_dataset_dir: path of the validation folder
|
| 183 |
+
transforms: transforms loaded from hydra conf
|
| 184 |
+
|
| 185 |
+
Returns:
|
| 186 |
+
transforms: potentially updated transforms
|
| 187 |
+
"""
|
| 188 |
+
paths = {"train": train_dataset_dir, "val": val_dataset_dir}
|
| 189 |
+
for dataset_type in ["train", "val"]:
|
| 190 |
+
try:
|
| 191 |
+
statistics = OmegaConf.load(Path(paths[dataset_type]) / "statistics.yaml")
|
| 192 |
+
# Hack for maintaining two repositories with transforms
|
| 193 |
+
statistics = OmegaConf.create(OmegaConf.to_yaml(statistics).replace("calvin_agent", "policy_models"))
|
| 194 |
+
# this ugly piece of code only exists because OmegaConf actually can't merge ListConfigs.
|
| 195 |
+
# we do not want to override everything, but just the transforms that are specified in both
|
| 196 |
+
# see https://stackoverflow.com/questions/61315623/omegaconf-can-i-influence-how-lists-are-merged
|
| 197 |
+
for modality in transforms[dataset_type]:
|
| 198 |
+
if modality in statistics:
|
| 199 |
+
conf_transforms = transforms[dataset_type][modality]
|
| 200 |
+
dataset_transforms = statistics[modality]
|
| 201 |
+
for dataset_trans in dataset_transforms:
|
| 202 |
+
exists = False
|
| 203 |
+
for i, conf_trans in enumerate(conf_transforms):
|
| 204 |
+
if dataset_trans["_target_"] == conf_trans["_target_"]:
|
| 205 |
+
exists = True
|
| 206 |
+
transforms[dataset_type][modality][i] = dataset_trans
|
| 207 |
+
break
|
| 208 |
+
if not exists:
|
| 209 |
+
transforms[dataset_type][modality] = ListConfig([*conf_transforms, dataset_trans])
|
| 210 |
+
except FileNotFoundError:
|
| 211 |
+
logger.warning("Could not load statistics.yaml")
|
| 212 |
+
return transforms
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def lookup_naming_pattern(dataset_dir: Path, save_format: str) -> Tuple[Tuple[Path, str], int]:
|
| 216 |
+
"""
|
| 217 |
+
Check naming pattern of dataset files.
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
dataset_dir: Path to dataset.
|
| 221 |
+
save_format: File format (CALVIN default is npz).
|
| 222 |
+
|
| 223 |
+
Returns:
|
| 224 |
+
naming_pattern: 'file_0000001.npz' -> ('file_', '.npz')
|
| 225 |
+
n_digits: Zero padding of file enumeration.
|
| 226 |
+
"""
|
| 227 |
+
it = os.scandir(dataset_dir)
|
| 228 |
+
while True:
|
| 229 |
+
filename = Path(next(it))
|
| 230 |
+
if save_format in filename.suffix:
|
| 231 |
+
break
|
| 232 |
+
aux_naming_pattern = re.split(r"\d+", filename.stem)
|
| 233 |
+
naming_pattern = (filename.parent / aux_naming_pattern[0], filename.suffix)
|
| 234 |
+
n_digits = len(re.findall(r"\d+", filename.stem)[0])
|
| 235 |
+
assert len(naming_pattern) == 2
|
| 236 |
+
assert n_digits > 0
|
| 237 |
+
return naming_pattern, n_digits
|
code/policy_models/datasets/utils/shared_memory_utils.py
ADDED
|
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import defaultdict
|
| 2 |
+
from functools import partial
|
| 3 |
+
from itertools import chain
|
| 4 |
+
import logging
|
| 5 |
+
import multiprocessing
|
| 6 |
+
from multiprocessing.shared_memory import SharedMemory
|
| 7 |
+
import os
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
import signal
|
| 10 |
+
from typing import Dict, Optional, Tuple
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
from omegaconf import DictConfig
|
| 14 |
+
from pytorch_lightning import Callback, LightningModule, Trainer
|
| 15 |
+
from tqdm import tqdm
|
| 16 |
+
|
| 17 |
+
from policy_models.datasets.shm_dataset import ShmDataset
|
| 18 |
+
from policy_models.datasets.utils.episode_utils import lookup_naming_pattern
|
| 19 |
+
|
| 20 |
+
log = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def gather_results(return_dict: Dict) -> Tuple[Dict, Dict]:
|
| 24 |
+
"""
|
| 25 |
+
Combine results of worker processes.
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
return_dict: Dictionary with results of worker processes.
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
episode_lookup_vision: Combined results of vision lookup.
|
| 32 |
+
lang_episode_dict: Combined results of lanugage lookup.
|
| 33 |
+
"""
|
| 34 |
+
episode_lookup_vision: Dict = defaultdict(list)
|
| 35 |
+
lang_episode_dict: Dict = defaultdict(dict)
|
| 36 |
+
for proc in sorted(return_dict):
|
| 37 |
+
for key in return_dict[proc][0]:
|
| 38 |
+
episode_lookup_vision[key] += return_dict[proc][0][key]
|
| 39 |
+
lang_episode_dict[key].update(return_dict[proc][1][key])
|
| 40 |
+
return episode_lookup_vision, lang_episode_dict
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def check_shm_lookup_exists(dataset_type: str) -> Optional[Dict]:
|
| 44 |
+
"""
|
| 45 |
+
Check if there is already a shared memory lookup file saved on the disk.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
dataset_type: 'train' or 'val'.
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
Lookup file if exists, None otherwise.
|
| 52 |
+
"""
|
| 53 |
+
load_path = Path("/tmp/") if "TMPDIR" not in os.environ else Path(os.environ["TMPDIR"])
|
| 54 |
+
try:
|
| 55 |
+
data: Dict = np.load(load_path / f"{dataset_type}_shm_lookup.npy", allow_pickle=True).item()
|
| 56 |
+
return data
|
| 57 |
+
except FileNotFoundError:
|
| 58 |
+
return None
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def save_shm_lookup(train_shm_lookup: Dict, val_shm_lookup: Dict) -> None:
|
| 62 |
+
"""
|
| 63 |
+
Save shared memory lookups to disk, such that they can be reused by ddp subprocesses.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
train_shm_lookup: Shared memory lookup for training data.
|
| 67 |
+
val_shm_lookup: Shared memory lookup for validation data.
|
| 68 |
+
"""
|
| 69 |
+
save_path = Path("/tmp/") if "TMPDIR" not in os.environ else Path(os.environ["TMPDIR"])
|
| 70 |
+
np.save(save_path / "train_shm_lookup.npy", train_shm_lookup) # type: ignore
|
| 71 |
+
np.save(save_path / "val_shm_lookup.npy", val_shm_lookup) # type: ignore
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def load_shm_lookup() -> Tuple[Dict, Dict]:
|
| 75 |
+
"""
|
| 76 |
+
Load shared memory lookup.
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
train_shm_lookup: Shared memory lookup for training data.
|
| 80 |
+
val_shm_lookup: Shared memory lookup for validation data.
|
| 81 |
+
"""
|
| 82 |
+
load_path = Path("/tmp/") if "TMPDIR" not in os.environ else Path(os.environ["TMPDIR"])
|
| 83 |
+
train_shm_lookup: Dict = np.load(load_path / "train_shm_lookup.npy", allow_pickle=True).item()
|
| 84 |
+
val_shm_lookup: Dict = np.load(load_path / "val_shm_lookup.npy", allow_pickle=True).item()
|
| 85 |
+
return train_shm_lookup, val_shm_lookup
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class SharedMemoryLoader:
|
| 89 |
+
"""
|
| 90 |
+
Helper class for loading dataset into shared memory.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
datasets_cfg: Hydra config of datasets.
|
| 94 |
+
dataset_dir: Path to dataset.
|
| 95 |
+
"""
|
| 96 |
+
|
| 97 |
+
def __init__(self, datasets_cfg: DictConfig, dataset_dir: Path):
|
| 98 |
+
self.obs_space = datasets_cfg.lang_dataset.obs_space if "lang" in datasets_cfg else datasets_cfg.vision_dataset.obs_space
|
| 99 |
+
self.dataset_dir = dataset_dir
|
| 100 |
+
self.dataset_type = "train" if "training" in dataset_dir.as_posix() else "val"
|
| 101 |
+
self.lang_folder = datasets_cfg.lang_dataset.lang_folder if "lang" in datasets_cfg else datasets_cfg.vision_dataset.lang_folder # lang_folder: "lang_paraphrase-MiniLM-L3-v2"
|
| 102 |
+
self.naming_pattern, self.n_digits = lookup_naming_pattern(self.dataset_dir, "npz")
|
| 103 |
+
self.min_window_size_vision = datasets_cfg.vision_dataset.min_window_size
|
| 104 |
+
self.min_window_size_lang = datasets_cfg.lang_dataset.min_window_size if "lang" in datasets_cfg else datasets_cfg.vision_dataset.min_window_size
|
| 105 |
+
self.n_proc = 8
|
| 106 |
+
|
| 107 |
+
def _worker_process(self, proc_num, ep_start_end_ids, offsets, shmem, lang_ep_start_end_ids, return_dict):
|
| 108 |
+
"""
|
| 109 |
+
Multiprocessing worker to speed up the loading of the data into shared memory.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
proc_num: Process number.
|
| 113 |
+
ep_start_end_ids: Episode start and end indices for this worker.
|
| 114 |
+
offsets: Offset for addressing right portion of shared array.
|
| 115 |
+
shmem: Shared memory handles.
|
| 116 |
+
lang_ep_start_end_ids: Episode start and end indices of language data for this worker.
|
| 117 |
+
return_dict: Dictionary for saving the results.
|
| 118 |
+
"""
|
| 119 |
+
episode_lookup_vision = defaultdict(list)
|
| 120 |
+
lang_episode_dict = defaultdict(dict)
|
| 121 |
+
if proc_num == 0:
|
| 122 |
+
pbar = tqdm(total=np.sum(np.diff(ep_start_end_ids)), leave=False)
|
| 123 |
+
else:
|
| 124 |
+
pbar = None
|
| 125 |
+
for i, (start_idx, end_idx) in enumerate(ep_start_end_ids):
|
| 126 |
+
seq = self._zip_sequence(start_idx, end_idx, pbar)
|
| 127 |
+
for key, array in seq.items():
|
| 128 |
+
shared_array = np.ndarray(array.shape, dtype=array.dtype, buffer=shmem[key].buf, offset=offsets[key])
|
| 129 |
+
shared_array[:] = array[:]
|
| 130 |
+
|
| 131 |
+
for j, idx in enumerate(range(start_idx, end_idx + 1 - self.min_window_size_vision)):
|
| 132 |
+
episode_lookup_vision[key].append((offsets[key], j))
|
| 133 |
+
if idx in lang_ep_start_end_ids[:, 0]:
|
| 134 |
+
lang_episode_dict[key][idx] = (offsets[key], j)
|
| 135 |
+
offsets[key] += array.nbytes
|
| 136 |
+
return_dict[proc_num] = episode_lookup_vision, lang_episode_dict
|
| 137 |
+
if pbar is not None:
|
| 138 |
+
pbar.close()
|
| 139 |
+
|
| 140 |
+
def load_data_in_shared_memory(self):
|
| 141 |
+
"""
|
| 142 |
+
Load the dataset from disk into shared memory once at the beginning of the training to speed up data loading.
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
Shared memory lookup dict.
|
| 146 |
+
"""
|
| 147 |
+
lang_data = np.load(self.dataset_dir / self.lang_folder / "auto_lang_ann.npy", allow_pickle=True).item() if self.lang_folder is not None else None
|
| 148 |
+
ep_start_end_ids = np.load(self.dataset_dir / "ep_start_end_ids.npy")
|
| 149 |
+
lang_ep_start_end_ids = np.array(lang_data["info"]["indx"]) if self.lang_folder is not None else None #np.array(vision_data["info"]["indx"])
|
| 150 |
+
lang_ann = lang_data["language"]["emb"] if self.lang_folder is not None else None
|
| 151 |
+
shmem, shapes, sizes, dtypes, shmem_lookup = self._init_shmem(ep_start_end_ids)
|
| 152 |
+
|
| 153 |
+
if shmem_lookup is not None:
|
| 154 |
+
pass
|
| 155 |
+
# using existing shared memory
|
| 156 |
+
# log.info("Using existing shared memory without reloading it.")
|
| 157 |
+
# return shmem_lookup
|
| 158 |
+
|
| 159 |
+
lang_lookup = []
|
| 160 |
+
|
| 161 |
+
episode_lookup_lang = defaultdict(list)
|
| 162 |
+
log.info(
|
| 163 |
+
f"Loading {self.dataset_type} language episodes into shared memory. "
|
| 164 |
+
f"(progress bar shows only worker process 0)."
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
if self.n_proc > len(ep_start_end_ids):
|
| 168 |
+
self.n_proc = len(ep_start_end_ids)
|
| 169 |
+
split_indices = np.array_split(ep_start_end_ids, self.n_proc, axis=0)
|
| 170 |
+
split_lens = [np.sum(np.diff(split_indices[i])) for i in range(len(split_indices))]
|
| 171 |
+
obs_size = {key: dtypes[key].itemsize * np.prod(shapes[key]) for key in dtypes}
|
| 172 |
+
offsets = [{key: n * obs_size[key] for key in dtypes} for n in np.cumsum([0] + split_lens[:-1])]
|
| 173 |
+
|
| 174 |
+
manager = multiprocessing.Manager()
|
| 175 |
+
return_dict = manager.dict()
|
| 176 |
+
processes = []
|
| 177 |
+
# load vision data with multiple processes
|
| 178 |
+
for i in range(self.n_proc):
|
| 179 |
+
p = multiprocessing.Process(
|
| 180 |
+
target=self._worker_process,
|
| 181 |
+
args=(i, split_indices[i], offsets[i], shmem, lang_ep_start_end_ids, return_dict),
|
| 182 |
+
)
|
| 183 |
+
processes.append(p)
|
| 184 |
+
p.start()
|
| 185 |
+
for proc in processes:
|
| 186 |
+
proc.join()
|
| 187 |
+
|
| 188 |
+
episode_lookup_vision, lang_episode_dict = gather_results(return_dict)
|
| 189 |
+
|
| 190 |
+
# lang data
|
| 191 |
+
if lang_ep_start_end_ids is not None:
|
| 192 |
+
for i, (start_idx, end_idx) in enumerate(tqdm(lang_ep_start_end_ids)):
|
| 193 |
+
for key in lang_episode_dict:
|
| 194 |
+
offset, step = lang_episode_dict[key][start_idx]
|
| 195 |
+
for j, idx in enumerate(range(start_idx, end_idx + 1 - self.min_window_size_lang)):
|
| 196 |
+
episode_lookup_lang[key].append((offset, step + j))
|
| 197 |
+
for idx in range(start_idx, end_idx + 1 - self.min_window_size_lang):
|
| 198 |
+
lang_lookup.append(i)
|
| 199 |
+
result = {
|
| 200 |
+
"episode_lookup_vision": episode_lookup_vision,
|
| 201 |
+
"episode_lookup_lang": episode_lookup_lang,
|
| 202 |
+
"lang_lookup": lang_lookup,
|
| 203 |
+
"lang_ann": lang_ann,
|
| 204 |
+
"shapes": shapes,
|
| 205 |
+
"sizes": sizes,
|
| 206 |
+
"dtypes": dtypes,
|
| 207 |
+
}
|
| 208 |
+
return result
|
| 209 |
+
|
| 210 |
+
def _init_shmem(self, ep_start_end_ids: np.ndarray) -> Tuple[Dict, Dict, Dict, Dict, Optional[Dict]]:
|
| 211 |
+
"""
|
| 212 |
+
Initialize shared memory.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
ep_start_end_ids: Episode start and end indices of dataset.
|
| 216 |
+
|
| 217 |
+
Returns:
|
| 218 |
+
shmem: Dictionary with shared memory handles for each dataset key (rgb_static, etc ...).
|
| 219 |
+
shapes: Dictionary with the shape of one datapoint for each dataset key.
|
| 220 |
+
sizes: Dictionary with the memory size of one datapoint for each dataset key.
|
| 221 |
+
dtypes: Dictionary with the dtype of data for each dataset key.
|
| 222 |
+
shm_lookup: If shared memory lookup dict already exists, return it here.
|
| 223 |
+
"""
|
| 224 |
+
# load first episode to determine memory usage
|
| 225 |
+
seq = self._zip_sequence(ep_start_end_ids[0][0], ep_start_end_ids[0][0] + 1)
|
| 226 |
+
total_size = np.sum(ep_start_end_ids[:, 1] - ep_start_end_ids[:, 0])
|
| 227 |
+
shmem: Dict[str, SharedMemory] = {}
|
| 228 |
+
shapes: Dict[str, Tuple] = {}
|
| 229 |
+
sizes: Dict[str, int] = {}
|
| 230 |
+
dtypes: Dict[str, str] = {}
|
| 231 |
+
|
| 232 |
+
shm_lookup = check_shm_lookup_exists(self.dataset_type)
|
| 233 |
+
# check if all necessary shared memories are already loaded
|
| 234 |
+
if shm_lookup is not None:
|
| 235 |
+
print("shm_lookup exists")
|
| 236 |
+
try:
|
| 237 |
+
if np.all(
|
| 238 |
+
[
|
| 239 |
+
SharedMemory(name=f"{self.dataset_type}_{key}").size == size * total_size
|
| 240 |
+
for key, size in shm_lookup["sizes"].items()
|
| 241 |
+
]
|
| 242 |
+
):
|
| 243 |
+
return shmem, shapes, sizes, dtypes, shm_lookup
|
| 244 |
+
except FileNotFoundError as e:
|
| 245 |
+
pass
|
| 246 |
+
for key, array in seq.items():
|
| 247 |
+
try:
|
| 248 |
+
# see if exists
|
| 249 |
+
s = SharedMemory(name=f"{self.dataset_type}_{key}")
|
| 250 |
+
s.close()
|
| 251 |
+
s.unlink()
|
| 252 |
+
log.warning(
|
| 253 |
+
f"Found existing shared memory {self.dataset_type}_{key}, freeing up memory."
|
| 254 |
+
"In case of multiple training runs on the same node, this will lead to problems."
|
| 255 |
+
)
|
| 256 |
+
except FileNotFoundError:
|
| 257 |
+
pass
|
| 258 |
+
shmem[key] = SharedMemory(create=True, size=array.nbytes * total_size, name=f"{self.dataset_type}_{key}")
|
| 259 |
+
shapes[key] = array.shape[1:]
|
| 260 |
+
sizes[key] = array.nbytes
|
| 261 |
+
dtypes[key] = array.dtype
|
| 262 |
+
|
| 263 |
+
# register signal handler for the case that shm data loading process gets interrupted.
|
| 264 |
+
signal.signal(signal.SIGTERM, partial(delete_shm, shmem.keys()))
|
| 265 |
+
|
| 266 |
+
return shmem, shapes, sizes, dtypes, None
|
| 267 |
+
|
| 268 |
+
def _zip_sequence(self, start_idx, end_idx, pbar=None):
|
| 269 |
+
"""
|
| 270 |
+
Load consecutive frames saved as individual files on disk and combine to episode dict.
|
| 271 |
+
|
| 272 |
+
Args:
|
| 273 |
+
start_idx: Start index of file.
|
| 274 |
+
end_idx: End index of file.
|
| 275 |
+
pbar: Tqdm progress bar.
|
| 276 |
+
|
| 277 |
+
Returns:
|
| 278 |
+
Episode dict.
|
| 279 |
+
"""
|
| 280 |
+
keys = list(chain(*self.obs_space.values()))
|
| 281 |
+
keys.remove("language")
|
| 282 |
+
keys.append("scene_obs")
|
| 283 |
+
n_items = end_idx - start_idx
|
| 284 |
+
episode = {}
|
| 285 |
+
data = np.load(self._get_episode_name(start_idx))
|
| 286 |
+
for key in keys:
|
| 287 |
+
shape = (n_items,) + data[key].shape
|
| 288 |
+
dtype = data[key].dtype
|
| 289 |
+
episode[key] = np.empty(shape=shape, dtype=dtype)
|
| 290 |
+
for i, file_idx in enumerate(range(start_idx, end_idx)):
|
| 291 |
+
with np.load(self._get_episode_name(file_idx)) as data:
|
| 292 |
+
for key in keys:
|
| 293 |
+
episode[key][i] = data[key]
|
| 294 |
+
if pbar is not None:
|
| 295 |
+
pbar.update(1)
|
| 296 |
+
return episode
|
| 297 |
+
|
| 298 |
+
def _get_episode_name(self, file_idx):
|
| 299 |
+
"""
|
| 300 |
+
Convert file idx to file path.
|
| 301 |
+
|
| 302 |
+
Args:
|
| 303 |
+
file_idx: index of starting frame.
|
| 304 |
+
|
| 305 |
+
Returns:
|
| 306 |
+
Path to file.
|
| 307 |
+
"""
|
| 308 |
+
return Path(f"{self.naming_pattern[0]}{file_idx:0{self.n_digits}d}{self.naming_pattern[1]}")
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def delete_shm(shm_keys, signal, frame):
|
| 312 |
+
"""
|
| 313 |
+
Close and unlink the shared memories.
|
| 314 |
+
"""
|
| 315 |
+
for dataset_type in ["train", "val"]:
|
| 316 |
+
for shm_key in shm_keys:
|
| 317 |
+
try:
|
| 318 |
+
s = SharedMemory(name=f"{dataset_type}_{shm_key}")
|
| 319 |
+
s.close()
|
| 320 |
+
s.unlink()
|
| 321 |
+
print(f"successfully unlinked {shm_key}")
|
| 322 |
+
except Exception as e:
|
| 323 |
+
print(e)
|
| 324 |
+
exit()
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
class SignalCallback(Callback):
|
| 328 |
+
"""
|
| 329 |
+
Register a signal handler for closing and unlinking the shared memory that get's activated with a SIGTERM signal.
|
| 330 |
+
"""
|
| 331 |
+
|
| 332 |
+
def on_fit_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
|
| 333 |
+
if isinstance(trainer.datamodule.train_dataloader()["vis"].dataset, ShmDataset): # type: ignore
|
| 334 |
+
shm_keys = trainer.datamodule.train_dataloader()["vis"].dataset.episode_lookup_dict.keys() # type: ignore
|
| 335 |
+
signal.signal(signal.SIGTERM, partial(delete_shm, shm_keys))
|
| 336 |
+
print("Registered shared memory signal handler.")
|
code/policy_models/datasets/xbot_dataset.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (2024) Bytedance Ltd. and/or its affiliates
|
| 2 |
+
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import json
|
| 16 |
+
import os
|
| 17 |
+
import random
|
| 18 |
+
import warnings
|
| 19 |
+
import traceback
|
| 20 |
+
import argparse
|
| 21 |
+
from omegaconf import OmegaConf
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
from torchvision import transforms as T
|
| 24 |
+
import torch
|
| 25 |
+
from torch.utils.data import Dataset,DataLoader
|
| 26 |
+
import numpy as np
|
| 27 |
+
import imageio
|
| 28 |
+
from decord import VideoReader, cpu
|
| 29 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 30 |
+
from einops import rearrange
|
| 31 |
+
|
| 32 |
+
# from mdt.datasets.utils.dataset_util import euler2rotm, rotm2euler
|
| 33 |
+
# from mdt.datasets.utils.video_transforms import Resize_Preprocess, ToTensorVideo
|
| 34 |
+
# from mdt.datasets.utils.util import update_paths
|
| 35 |
+
from scipy.spatial.transform import Rotation as R
|
| 36 |
+
import decord
|
| 37 |
+
|
| 38 |
+
class Dataset_xbot(Dataset):
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
args,
|
| 42 |
+
mode = 'val',
|
| 43 |
+
):
|
| 44 |
+
"""Constructor."""
|
| 45 |
+
super().__init__()
|
| 46 |
+
self.args = args
|
| 47 |
+
self.mode = mode
|
| 48 |
+
data_json_path = args.data_json_path
|
| 49 |
+
data_root_path = args.data_root_path
|
| 50 |
+
|
| 51 |
+
# dataset stucture
|
| 52 |
+
# dataset_dir/dataset_name/annotation_name/mode/traj
|
| 53 |
+
# dataset_dir/dataset_name/video/mode/traj
|
| 54 |
+
# dataset_dir/dataset_name/latent_video/mode/traj
|
| 55 |
+
|
| 56 |
+
# samles:{'ann_file':xxx, 'frame_idx':xxx, 'dataset_name':xxx}
|
| 57 |
+
|
| 58 |
+
# prepare all datasets path
|
| 59 |
+
self.video_path = []
|
| 60 |
+
data_json_path = f'{data_json_path}/{mode}_all.json'
|
| 61 |
+
with open(data_json_path, "r") as f:
|
| 62 |
+
self.samples = json.load(f)
|
| 63 |
+
self.video_path = [os.path.join(data_root_path, sample['dataset_name']) for sample in self.samples]
|
| 64 |
+
|
| 65 |
+
print(f"ALL dataset, {len(self.samples)} samples in total")
|
| 66 |
+
|
| 67 |
+
# with open(f'{self.args.action_json}', "r") as f:
|
| 68 |
+
# self.stat = json.load(f)
|
| 69 |
+
self.a_min = np.array(args.action_01)[None,:]
|
| 70 |
+
self.a_max = np.array(args.action_99)[None,:]
|
| 71 |
+
self.s_min = np.array(args.state_01)[None,:]
|
| 72 |
+
self.s_max = np.array(args.state_99)[None,:]
|
| 73 |
+
print(f"action min: {self.a_min.shape}, action max: {self.a_max.shape}")
|
| 74 |
+
|
| 75 |
+
def __len__(self):
|
| 76 |
+
return len(self.samples)
|
| 77 |
+
|
| 78 |
+
def _load_latent_video(self, video_path, frame_ids):
|
| 79 |
+
# video_path = video_path.split('/')[:-1]
|
| 80 |
+
# video_path = '/'.join(video_path)+'/0.pt'
|
| 81 |
+
|
| 82 |
+
# print(video_path)
|
| 83 |
+
with open(video_path,'rb') as file:
|
| 84 |
+
video_tensor = torch.load(file)
|
| 85 |
+
video_tensor.requires_grad = False
|
| 86 |
+
# vr = VideoReader(video_path, ctx=cpu(0), num_threads=2)
|
| 87 |
+
# print(video_tensor.size(),np.array(frame_ids))
|
| 88 |
+
try:
|
| 89 |
+
assert (np.array(frame_ids) < video_tensor.size()[0]).all()
|
| 90 |
+
assert (np.array(frame_ids) >= 0).all()
|
| 91 |
+
except:
|
| 92 |
+
assert False
|
| 93 |
+
frame_data = video_tensor[frame_ids]
|
| 94 |
+
return frame_data
|
| 95 |
+
|
| 96 |
+
def _get_frames(self, label, frame_ids, cam_id, pre_encode, video_dir, use_img_cond=False):
|
| 97 |
+
# directly load videos latent after svd-vae encoder
|
| 98 |
+
assert cam_id is not None
|
| 99 |
+
assert pre_encode == True
|
| 100 |
+
if pre_encode:
|
| 101 |
+
video_path = label['latent_videos'][cam_id]['latent_video_path']
|
| 102 |
+
try:
|
| 103 |
+
video_path = os.path.join(video_dir,video_path)
|
| 104 |
+
frames = self._load_latent_video(video_path, frame_ids)
|
| 105 |
+
except:
|
| 106 |
+
video_path = video_path.replace("latent_videos", "latent_videos_svd")
|
| 107 |
+
frames = self._load_latent_video(video_path, frame_ids)
|
| 108 |
+
# load original videos
|
| 109 |
+
else:
|
| 110 |
+
if use_img_cond:
|
| 111 |
+
frame_ids = frame_ids[0]
|
| 112 |
+
video_path = label['videos'][cam_id]['video_path']
|
| 113 |
+
video_path = os.path.join(video_dir,video_path)
|
| 114 |
+
# frames = self._load_video(video_path, frame_ids)
|
| 115 |
+
# frames = mediapy.read_video(video_path)
|
| 116 |
+
vr = decord.VideoReader(video_path)
|
| 117 |
+
frames = vr[frame_ids].asnumpy()
|
| 118 |
+
frames = torch.from_numpy(frames).permute(2,0,1).unsqueeze(0) # (frame, h, w, c) -> (frame, c, h, w)
|
| 119 |
+
# resize the video to self.args.video_size
|
| 120 |
+
frames = self.preprocess(frames)
|
| 121 |
+
return frames
|
| 122 |
+
|
| 123 |
+
def _get_obs(self, label, frame_ids, cam_id, pre_encode, video_dir):
|
| 124 |
+
if cam_id is None:
|
| 125 |
+
temp_cam_id = random.choice(self.cam_ids)
|
| 126 |
+
else:
|
| 127 |
+
temp_cam_id = cam_id
|
| 128 |
+
frames = self._get_frames(label, frame_ids, cam_id = temp_cam_id, pre_encode = pre_encode, video_dir=video_dir)
|
| 129 |
+
return frames, temp_cam_id
|
| 130 |
+
|
| 131 |
+
def normalize_bound(
|
| 132 |
+
self,
|
| 133 |
+
data: np.ndarray,
|
| 134 |
+
data_min: np.ndarray,
|
| 135 |
+
data_max: np.ndarray,
|
| 136 |
+
clip_min: float = -1,
|
| 137 |
+
clip_max: float = 1,
|
| 138 |
+
eps: float = 1e-8,
|
| 139 |
+
) -> np.ndarray:
|
| 140 |
+
ndata = 2 * (data - data_min) / (data_max - data_min + eps) - 1
|
| 141 |
+
return np.clip(ndata, clip_min, clip_max)
|
| 142 |
+
|
| 143 |
+
def denormalize_bound(
|
| 144 |
+
self,
|
| 145 |
+
data: np.ndarray,
|
| 146 |
+
data_min: np.ndarray,
|
| 147 |
+
data_max: np.ndarray,
|
| 148 |
+
clip_min: float = -1,
|
| 149 |
+
clip_max: float = 1,
|
| 150 |
+
eps=1e-8,
|
| 151 |
+
) -> np.ndarray:
|
| 152 |
+
clip_range = clip_max - clip_min
|
| 153 |
+
rdata = (data - clip_min) / clip_range * (data_max - data_min) + data_min
|
| 154 |
+
return rdata
|
| 155 |
+
|
| 156 |
+
def process_action_xhand(self, label,frame_ids, rel = False):
|
| 157 |
+
num_frames = len(frame_ids)
|
| 158 |
+
frame_ids = frame_ids[:int(self.args.num_frames)] # (f)
|
| 159 |
+
states = np.array(label['states'])[frame_ids] #(f, 38)
|
| 160 |
+
command = np.array(label['actions'])[frame_ids]
|
| 161 |
+
|
| 162 |
+
# print(f'states: {states.shape}, actions: {command.shape}')
|
| 163 |
+
|
| 164 |
+
state = states[0:1] # current state
|
| 165 |
+
|
| 166 |
+
a_dim = command.shape[-1]
|
| 167 |
+
action_base = state[:,:a_dim] #(1,38)
|
| 168 |
+
actions = command - action_base #(self.args.num_frames,38)
|
| 169 |
+
|
| 170 |
+
# normalize
|
| 171 |
+
action_scaled = self.normalize_bound(actions, self.a_min, self.a_max)
|
| 172 |
+
state_scaled = self.normalize_bound(state, self.s_min, self.s_max)
|
| 173 |
+
return torch.from_numpy(action_scaled).float(), torch.from_numpy(state_scaled).float()
|
| 174 |
+
|
| 175 |
+
def __getitem__(self, index, cam_id = None, return_video = False):
|
| 176 |
+
|
| 177 |
+
sample = self.samples[index]
|
| 178 |
+
sampled_video_dir = self.video_path[index]
|
| 179 |
+
|
| 180 |
+
ann_file = sample['ann_file']
|
| 181 |
+
# dataset_name = sample['dataset_name']
|
| 182 |
+
ann_file = f'{sampled_video_dir}/{ann_file}'
|
| 183 |
+
frame_ids = sample['frame_ids']
|
| 184 |
+
with open(ann_file, "r") as f:
|
| 185 |
+
label = json.load(f)
|
| 186 |
+
|
| 187 |
+
data = dict()
|
| 188 |
+
# action
|
| 189 |
+
data['actions'], data['state_obs'] = self.process_action_xhand(label,frame_ids,rel=self.args.relative)
|
| 190 |
+
# instructions
|
| 191 |
+
data['lang_text'] = label['texts'][0]
|
| 192 |
+
# observation
|
| 193 |
+
static_latent, cam_id = self._get_obs(label, frame_ids[0], cam_id=0, pre_encode=self.args.pre_encode,video_dir=sampled_video_dir)
|
| 194 |
+
gripper_latent, cam_id = self._get_obs(label, frame_ids[0], cam_id=1, pre_encode=self.args.pre_encode,video_dir=sampled_video_dir)
|
| 195 |
+
gripper_latent2, cam_id = self._get_obs(label, frame_ids[0], cam_id=2, pre_encode=self.args.pre_encode,video_dir=sampled_video_dir)
|
| 196 |
+
static_latent = static_latent.unsqueeze(0)
|
| 197 |
+
gripper_latent = gripper_latent.unsqueeze(0) # (1,4,32,32)
|
| 198 |
+
gripper_latent2 = gripper_latent2.unsqueeze(0)
|
| 199 |
+
|
| 200 |
+
# one sample
|
| 201 |
+
rgb_obs = {'rgb_static': static_latent, 'rgb_gripper': gripper_latent, 'rgb_gripper2': gripper_latent2}
|
| 202 |
+
data['rgb_obs'] = rgb_obs
|
| 203 |
+
data['ann_file'] = ann_file
|
| 204 |
+
data['frame_ids'] = frame_ids
|
| 205 |
+
|
| 206 |
+
return data
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
if __name__ == "__main__":
|
| 210 |
+
from hydra import compose, initialize
|
| 211 |
+
from omegaconf import OmegaConf
|
| 212 |
+
with initialize(config_path="../../conf", job_name="VPP_xbot_train.yaml"):
|
| 213 |
+
cfg = compose(config_name="VPP_xbot_train")
|
| 214 |
+
|
| 215 |
+
# import sys
|
| 216 |
+
# sys.path.append('/cephfs/cjyyj/code/video_robot_svd-main/mdt')
|
| 217 |
+
# from utils.util import get_args
|
| 218 |
+
# train_args = get_args(cfg.datamodule.args)
|
| 219 |
+
# print(train_args)
|
| 220 |
+
train_dataset = Dataset_xbot(cfg.dataset_args,mode="val")
|
| 221 |
+
train_loader = torch.utils.data.DataLoader(
|
| 222 |
+
train_dataset,
|
| 223 |
+
batch_size=cfg.dataset_args.batch_size,
|
| 224 |
+
shuffle=cfg.dataset_args.shuffle,
|
| 225 |
+
)
|
| 226 |
+
for data in tqdm(train_loader,total=len(train_loader)):
|
| 227 |
+
print(data['ann_file'])
|
| 228 |
+
print(len(data['rgb_obs']))
|
| 229 |
+
|
| 230 |
+
|
code/policy_models/edm_diffusion/__init__.py
ADDED
|
File without changes
|
code/policy_models/edm_diffusion/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (189 Bytes). View file
|
|
|
code/policy_models/edm_diffusion/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (159 Bytes). View file
|
|
|
code/policy_models/edm_diffusion/__pycache__/gc_sampling.cpython-310.pyc
ADDED
|
Binary file (29.7 kB). View file
|
|
|
code/policy_models/edm_diffusion/__pycache__/gc_sampling.cpython-39.pyc
ADDED
|
Binary file (30 kB). View file
|
|
|
code/policy_models/edm_diffusion/__pycache__/score_wrappers.cpython-310.pyc
ADDED
|
Binary file (4.16 kB). View file
|
|
|
code/policy_models/edm_diffusion/__pycache__/score_wrappers.cpython-39.pyc
ADDED
|
Binary file (4.04 kB). View file
|
|
|
code/policy_models/edm_diffusion/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (8.21 kB). View file
|
|
|
code/policy_models/edm_diffusion/__pycache__/utils.cpython-39.pyc
ADDED
|
Binary file (8.44 kB). View file
|
|
|
code/policy_models/edm_diffusion/gc_sampling.py
ADDED
|
@@ -0,0 +1,1007 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
from scipy import integrate
|
| 5 |
+
import torch
|
| 6 |
+
from torch import nn
|
| 7 |
+
import torchsde
|
| 8 |
+
from torchdiffeq import odeint
|
| 9 |
+
from tqdm.auto import trange, tqdm
|
| 10 |
+
from matplotlib import pyplot as plt
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
from . import utils
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
'''
|
| 17 |
+
Code adapted for state-action based sampling with/without goal-conditioning:
|
| 18 |
+
|
| 19 |
+
https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/sampling.py
|
| 20 |
+
'''
|
| 21 |
+
|
| 22 |
+
def append_zero(action):
|
| 23 |
+
return torch.cat([action, action.new_zeros([1])])
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'):
|
| 27 |
+
"""Constructs the noise schedule of Karras et al. (2022)."""
|
| 28 |
+
ramp = torch.linspace(0, 1, n)
|
| 29 |
+
min_inv_rho = sigma_min ** (1 / rho)
|
| 30 |
+
max_inv_rho = sigma_max ** (1 / rho)
|
| 31 |
+
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
|
| 32 |
+
return append_zero(sigmas).to(device)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def get_sigmas_exponential(n, sigma_min, sigma_max, device='cpu'):
|
| 36 |
+
"""Constructs an exponential noise schedule."""
|
| 37 |
+
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), n, device=device).exp()
|
| 38 |
+
return append_zero(sigmas)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def get_sigmas_linear(n, sigma_min, sigma_max, device='cpu'):
|
| 42 |
+
"""Constructs an linear noise schedule."""
|
| 43 |
+
sigmas = torch.linspace(sigma_max, sigma_min, n, device=device)
|
| 44 |
+
return append_zero(sigmas)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def cosine_beta_schedule(n, s=0.008, device='cpu'):
|
| 48 |
+
"""
|
| 49 |
+
cosine schedule
|
| 50 |
+
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
|
| 51 |
+
"""
|
| 52 |
+
steps = n + 1
|
| 53 |
+
x = np.linspace(0, steps, steps)
|
| 54 |
+
alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
|
| 55 |
+
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
|
| 56 |
+
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
|
| 57 |
+
betas_clipped = np.clip(betas, a_min=0, a_max=0.999)
|
| 58 |
+
return append_zero(torch.tensor(np.flip(betas_clipped).copy(), device=device, dtype=torch.float32))
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def get_sigmas_ve(n, sigma_min=0.02, sigma_max=100, device='cpu'):
|
| 62 |
+
"""Constructs a continuous VP noise schedule."""
|
| 63 |
+
# (sigma_max ** 2) * ((sigma_min ** 2 / sigma_max ** 2) ** (step_indices / (num_steps - 1)))
|
| 64 |
+
steps = n + 1
|
| 65 |
+
t = torch.linspace(0, steps, n, device=device)
|
| 66 |
+
t = (sigma_max ** 2) * ((sigma_min ** 2 / sigma_max ** 2) ** (t / (n - 1)))
|
| 67 |
+
sigmas = torch.sqrt(t)
|
| 68 |
+
return append_zero(sigmas)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def get_iddpm_sigmas(n, sigma_min=0.02, sigma_max=100, M=1000, j_0=0, C_1=0.001, C_2=0.008, device='cpu'):
|
| 72 |
+
"""Constructs a continuous VP noise schedule."""
|
| 73 |
+
# (sigma_max ** 2) * ((sigma_min ** 2 / sigma_max ** 2) ** (step_indices / (num_steps - 1)))
|
| 74 |
+
step_indices = torch.arange(n, dtype=torch.float64, device=device)
|
| 75 |
+
u = torch.zeros(M + 1, dtype=torch.float64, device=device)
|
| 76 |
+
alpha_bar = lambda j: (0.5 * np.pi * j / M / (C_2 + 1)).sin() ** 2
|
| 77 |
+
for j in torch.arange(M, j_0, -1, device=device): # M, ..., 1
|
| 78 |
+
u[j - 1] = ((u[j] ** 2 + 1) / (alpha_bar(j - 1) / alpha_bar(j)).clip(min=C_1) - 1).sqrt()
|
| 79 |
+
u_filtered = u[torch.logical_and(u >= sigma_min, u <= sigma_max)]
|
| 80 |
+
sigmas = u_filtered[((len(u_filtered) - 1) / (n - 1) * step_indices).round().to(torch.int64)]
|
| 81 |
+
return append_zero(sigmas).to(torch.float32)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def get_sigmas_vp(n, beta_d=19.9, beta_min=0.1, eps_s=1e-3, device='cpu'):
|
| 85 |
+
"""Constructs a continuous VP noise schedule."""
|
| 86 |
+
t = torch.linspace(1, eps_s, n, device=device)
|
| 87 |
+
sigmas = torch.sqrt(torch.exp(beta_d * t ** 2 / 2 + beta_min * t) - 1)
|
| 88 |
+
return append_zero(sigmas)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def to_d(action, sigma, denoised):
|
| 92 |
+
"""Converts a denoiser output to a Karras ODE derivative."""
|
| 93 |
+
return (action- denoised) / utils.append_dims(sigma, action.ndim)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def default_noise_sampler(x):
|
| 98 |
+
return lambda sigma, sigma_next: torch.randn_like(x)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def get_ancestral_step(sigma_from, sigma_to, eta=1.):
|
| 103 |
+
"""Calculates the noise level (sigma_down) to step down to and the amount
|
| 104 |
+
of noise to add (sigma_up) when doing an ancestral sampling step."""
|
| 105 |
+
if not eta:
|
| 106 |
+
return sigma_to, 0.
|
| 107 |
+
sigma_up = min(sigma_to, eta * (sigma_to ** 2 * (sigma_from ** 2 - sigma_to ** 2) / sigma_from ** 2) ** 0.5)
|
| 108 |
+
sigma_down = (sigma_to ** 2 - sigma_up ** 2) ** 0.5
|
| 109 |
+
return sigma_down, sigma_up
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class BatchedBrownianTree:
|
| 113 |
+
"""A wrapper around torchsde.BrownianTree that enables batches of entropy."""
|
| 114 |
+
|
| 115 |
+
def __init__(self, x, t0, t1, seed=None, **kwargs):
|
| 116 |
+
t0, t1, self.sign = self.sort(t0, t1)
|
| 117 |
+
w0 = kwargs.get('w0', torch.zeros_like(x))
|
| 118 |
+
if seed is None:
|
| 119 |
+
seed = torch.randint(0, 2 ** 63 - 1, []).item()
|
| 120 |
+
self.batched = True
|
| 121 |
+
try:
|
| 122 |
+
assert len(seed) == x.shape[0]
|
| 123 |
+
w0 = w0[0]
|
| 124 |
+
except TypeError:
|
| 125 |
+
seed = [seed]
|
| 126 |
+
self.batched = False
|
| 127 |
+
self.trees = [torchsde.BrownianTree(t0, w0, t1, entropy=s, **kwargs) for s in seed]
|
| 128 |
+
|
| 129 |
+
@staticmethod
|
| 130 |
+
def sort(a, b):
|
| 131 |
+
return (a, b, 1) if a < b else (b, a, -1)
|
| 132 |
+
|
| 133 |
+
def __call__(self, t0, t1):
|
| 134 |
+
t0, t1, sign = self.sort(t0, t1)
|
| 135 |
+
w = torch.stack([tree(t0, t1) for tree in self.trees]) * (self.sign * sign)
|
| 136 |
+
return w if self.batched else w[0]
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class BrownianTreeNoiseSampler:
|
| 140 |
+
"""A noise sampler backed by a torchsde.BrownianTree.
|
| 141 |
+
Args:
|
| 142 |
+
x (Tensor): The tensor whose shape, device and dtype to use to generate
|
| 143 |
+
random samples.
|
| 144 |
+
sigma_min (float): The low end of the valid interval.
|
| 145 |
+
sigma_max (float): The high end of the valid interval.
|
| 146 |
+
seed (int or List[int]): The random seed. If a list of seeds is
|
| 147 |
+
supplied instead of a single integer, then the noise sampler will
|
| 148 |
+
use one BrownianTree per batch item, each with its own seed.
|
| 149 |
+
transform (callable): A function that maps sigma to the sampler's
|
| 150 |
+
internal timestep.
|
| 151 |
+
"""
|
| 152 |
+
|
| 153 |
+
def __init__(self, x, sigma_min, sigma_max, seed=None, transform=lambda x: x):
|
| 154 |
+
self.transform = transform
|
| 155 |
+
t0, t1 = self.transform(torch.as_tensor(sigma_min)), self.transform(torch.as_tensor(sigma_max))
|
| 156 |
+
self.tree = BatchedBrownianTree(x, t0, t1, seed)
|
| 157 |
+
|
| 158 |
+
def __call__(self, sigma, sigma_next):
|
| 159 |
+
t0, t1 = self.transform(torch.as_tensor(sigma)), self.transform(torch.as_tensor(sigma_next))
|
| 160 |
+
return self.tree(t0, t1) / (t1 - t0).abs().sqrt()
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
@torch.no_grad()
|
| 165 |
+
def sample_euler(
|
| 166 |
+
model,
|
| 167 |
+
state: torch.Tensor,
|
| 168 |
+
action: torch.Tensor,
|
| 169 |
+
goal: torch.Tensor,
|
| 170 |
+
sigmas,
|
| 171 |
+
scaler=None,
|
| 172 |
+
extra_args=None,
|
| 173 |
+
callback=None,
|
| 174 |
+
disable=None,
|
| 175 |
+
s_churn=0.,
|
| 176 |
+
s_tmin=0.,
|
| 177 |
+
s_tmax=float('inf'),
|
| 178 |
+
s_noise=1.
|
| 179 |
+
):
|
| 180 |
+
"""
|
| 181 |
+
Implements a variant of Algorithm 2 (Euler steps) from Karras et al. (2022).
|
| 182 |
+
Stochastic sampler, which combines a first order ODE solver with explicit Langevin-like "churn"
|
| 183 |
+
of adding and removing noise.
|
| 184 |
+
Every update consists of these substeps:
|
| 185 |
+
1. Addition of noise given the factor eps
|
| 186 |
+
2. Solving the ODE dx/dt at timestep t using the score model
|
| 187 |
+
3. Take Euler step from t -> t+1 to get x_{i+1}
|
| 188 |
+
|
| 189 |
+
In contrast to the Heun variant, this variant does not compute a 2nd order correction step
|
| 190 |
+
For S_churn=0 the solver is an ODE solver
|
| 191 |
+
"""
|
| 192 |
+
extra_args = {} if extra_args is None else extra_args
|
| 193 |
+
s_in = action.new_ones([action.shape[0]])
|
| 194 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 195 |
+
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
| 196 |
+
eps = torch.randn_like(action) * s_noise # sample current noise depnding on S_noise
|
| 197 |
+
sigma_hat = sigmas[i] * (gamma + 1) # add noise to sigma
|
| 198 |
+
# print(action[:, -1, :])
|
| 199 |
+
if gamma > 0: # if gamma > 0, use additional noise level for computation
|
| 200 |
+
action = action + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
|
| 201 |
+
denoised = model(state, action, goal, sigma_hat * s_in, **extra_args) # compute denoised action
|
| 202 |
+
d = to_d(action, sigma_hat, denoised) # compute derivative
|
| 203 |
+
if callback is not None:
|
| 204 |
+
callback({'x': action, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
| 205 |
+
dt = sigmas[i + 1] - sigma_hat # compute timestep
|
| 206 |
+
# Euler method
|
| 207 |
+
action = action + d * dt # take Euler step
|
| 208 |
+
if scaler is not None:
|
| 209 |
+
action = scaler.clip_output(action)
|
| 210 |
+
return action
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
@torch.no_grad()
|
| 214 |
+
def sample_euler_ancestral(
|
| 215 |
+
model,
|
| 216 |
+
state,
|
| 217 |
+
action,
|
| 218 |
+
goal,
|
| 219 |
+
sigmas,
|
| 220 |
+
scaler=None,
|
| 221 |
+
extra_args=None,
|
| 222 |
+
callback=None,
|
| 223 |
+
disable=None,
|
| 224 |
+
eta=1.
|
| 225 |
+
):
|
| 226 |
+
"""
|
| 227 |
+
Ancestral sampling with Euler method steps.
|
| 228 |
+
|
| 229 |
+
1. compute dx_{i}/dt at the current timestep
|
| 230 |
+
2. get \sigma_{up} and \sigma_{down} from ancestral method
|
| 231 |
+
3. compute x_{t-1} = x_{t} + dx_{t}/dt * \sigma_{down}
|
| 232 |
+
4. Add additional noise after the update step x_{t-1} =x_{t-1} + z * \sigma_{up}
|
| 233 |
+
"""
|
| 234 |
+
extra_args = {} if extra_args is None else extra_args
|
| 235 |
+
s_in = action.new_ones([action.shape[0]])
|
| 236 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 237 |
+
# compute x_{t-1}
|
| 238 |
+
denoised = model(state, action, goal, sigmas[i] * s_in, **extra_args)
|
| 239 |
+
# get ancestral steps
|
| 240 |
+
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
|
| 241 |
+
if callback is not None:
|
| 242 |
+
callback({'x': action, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
| 243 |
+
# compute dx/dt
|
| 244 |
+
d = to_d(action, sigmas[i], denoised)
|
| 245 |
+
# compute dt based on sigma_down value
|
| 246 |
+
dt = sigma_down - sigmas[i]
|
| 247 |
+
# update current action
|
| 248 |
+
action = action + d * dt
|
| 249 |
+
if sigma_down > 0:
|
| 250 |
+
action = action + torch.randn_like(action) * sigma_up
|
| 251 |
+
if scaler is not None:
|
| 252 |
+
action = scaler.clip_output(action)
|
| 253 |
+
return action
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
@torch.no_grad()
|
| 257 |
+
def sample_heun(
|
| 258 |
+
model,
|
| 259 |
+
state,
|
| 260 |
+
action,
|
| 261 |
+
goal,
|
| 262 |
+
sigmas,
|
| 263 |
+
scaler=None,
|
| 264 |
+
extra_args=None,
|
| 265 |
+
callback=None,
|
| 266 |
+
disable=None,
|
| 267 |
+
s_churn=0.,
|
| 268 |
+
s_tmin=0.,
|
| 269 |
+
s_tmax=float('inf'),
|
| 270 |
+
s_noise=1.
|
| 271 |
+
):
|
| 272 |
+
"""
|
| 273 |
+
Implements Algorithm 2 (Heun steps) from Karras et al. (2022).
|
| 274 |
+
For S_churn =0 this is an ODE solver otherwise SDE
|
| 275 |
+
Every update consists of these substeps:
|
| 276 |
+
1. Addition of noise given the factor eps
|
| 277 |
+
2. Solving the ODE dx/dt at timestep t using the score model
|
| 278 |
+
3. Take Euler step from t -> t+1 to get x_{i+1}
|
| 279 |
+
4. 2nd order correction step to get x_{i+1}^{(2)}
|
| 280 |
+
|
| 281 |
+
In contrast to the Euler variant, this variant computes a 2nd order correction step.
|
| 282 |
+
"""
|
| 283 |
+
extra_args = {} if extra_args is None else extra_args
|
| 284 |
+
s_in = action.new_ones([action.shape[0]])
|
| 285 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 286 |
+
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
| 287 |
+
eps = torch.randn_like(action) * s_noise
|
| 288 |
+
sigma_hat = sigmas[i] * (gamma + 1)
|
| 289 |
+
# if gamma > 0, use additional noise level for computation ODE-> SDE Solver
|
| 290 |
+
if gamma > 0:
|
| 291 |
+
action= action+ eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
|
| 292 |
+
denoised = model(state, action, goal, sigma_hat * s_in, **extra_args)
|
| 293 |
+
d = to_d(action, sigma_hat, denoised)
|
| 294 |
+
if callback is not None:
|
| 295 |
+
callback({'x': action, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
| 296 |
+
dt = sigmas[i + 1] - sigma_hat
|
| 297 |
+
# if we only are at the last step we use an Euler step for our update otherwise the heun one
|
| 298 |
+
if sigmas[i + 1] == 0:
|
| 299 |
+
# Euler method
|
| 300 |
+
action= action+ d * dt
|
| 301 |
+
else:
|
| 302 |
+
# Heun's method
|
| 303 |
+
action_2 = action+ d * dt
|
| 304 |
+
denoised_2 = model(state, action_2, goal, sigmas[i + 1] * s_in,**extra_args)
|
| 305 |
+
d_2 = to_d( action_2, sigmas[i + 1], denoised_2)
|
| 306 |
+
d_prime = (d + d_2) / 2
|
| 307 |
+
action= action+ d_prime * dt
|
| 308 |
+
# scale if wanted
|
| 309 |
+
if scaler is not None:
|
| 310 |
+
action = scaler.clip_output(action)
|
| 311 |
+
return action
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
@torch.no_grad()
|
| 315 |
+
def sample_dpm_2(
|
| 316 |
+
model,
|
| 317 |
+
state,
|
| 318 |
+
action,
|
| 319 |
+
goal,
|
| 320 |
+
sigmas,
|
| 321 |
+
scaler=None,
|
| 322 |
+
extra_args=None,
|
| 323 |
+
callback=None,
|
| 324 |
+
disable=None,
|
| 325 |
+
s_churn=0.,
|
| 326 |
+
s_tmin=0.,
|
| 327 |
+
s_tmax=float('inf'),
|
| 328 |
+
s_noise=1.
|
| 329 |
+
):
|
| 330 |
+
"""
|
| 331 |
+
A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022).
|
| 332 |
+
SDE for S_churn!=0 and ODE otherwise
|
| 333 |
+
|
| 334 |
+
1.
|
| 335 |
+
|
| 336 |
+
Last denoising step is an Euler step
|
| 337 |
+
"""
|
| 338 |
+
extra_args = {} if extra_args is None else extra_args
|
| 339 |
+
s_in = action.new_ones([action.shape[0]])
|
| 340 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 341 |
+
# compute stochastic gamma if s_churn > 0:
|
| 342 |
+
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
| 343 |
+
|
| 344 |
+
eps = torch.randn_like(action) * s_noise
|
| 345 |
+
sigma_hat = sigmas[i] * (gamma + 1)
|
| 346 |
+
# add noise to our current action sample in SDE case
|
| 347 |
+
if gamma > 0:
|
| 348 |
+
action = action + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
|
| 349 |
+
# compute the derivative dx/dt at timestep t
|
| 350 |
+
denoised = model(state, action, goal, sigma_hat * s_in, **extra_args)
|
| 351 |
+
d = to_d(action, sigma_hat, denoised)
|
| 352 |
+
|
| 353 |
+
if callback is not None:
|
| 354 |
+
callback({'action': action, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
| 355 |
+
|
| 356 |
+
# if we are at the last timestep: use Euler method
|
| 357 |
+
if sigmas[i + 1] == 0:
|
| 358 |
+
# Euler method
|
| 359 |
+
dt = sigmas[i + 1] - sigma_hat
|
| 360 |
+
action = action + d * dt
|
| 361 |
+
else:
|
| 362 |
+
# use Heun 2nd order update step
|
| 363 |
+
sigma_mid = sigma_hat.log().lerp(sigmas[i + 1].log(), 0.5).exp()
|
| 364 |
+
dt_1 = sigma_mid - sigma_hat
|
| 365 |
+
dt_2 = sigmas[i + 1] - sigma_hat
|
| 366 |
+
action_2 = action + d * dt_1
|
| 367 |
+
denoised_2 = model(state, action_2, goal, sigma_mid * s_in, **extra_args)
|
| 368 |
+
d_2 = to_d( action_2, sigma_mid, denoised_2)
|
| 369 |
+
action = action + d_2 * dt_2
|
| 370 |
+
if scaler is not None:
|
| 371 |
+
action = scaler.clip_output(action)
|
| 372 |
+
return action
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
@torch.no_grad()
|
| 376 |
+
def sample_dpm_2_ancestral(model, state, action, goal, sigmas, scaler=None, extra_args=None, callback=None, disable=None, eta=1.):
|
| 377 |
+
"""
|
| 378 |
+
Ancestral sampling with DPM-Solver inspired second-order steps.
|
| 379 |
+
|
| 380 |
+
Ancestral sampling is based on the DDPM paper (https://arxiv.org/abs/2006.11239) generation process.
|
| 381 |
+
Song et al. (2021) show that ancestral sampling can be used to improve the performance of DDPM for its SDE formulation.
|
| 382 |
+
|
| 383 |
+
1. Compute dx_{i}/dt at the current timestep
|
| 384 |
+
|
| 385 |
+
"""
|
| 386 |
+
extra_args = {} if extra_args is None else extra_args
|
| 387 |
+
s_in = action.new_ones([action.shape[0]])
|
| 388 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 389 |
+
denoised = model(state, action, goal, sigmas[i] * s_in, **extra_args)
|
| 390 |
+
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
|
| 391 |
+
if callback is not None:
|
| 392 |
+
callback({'x': action, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
| 393 |
+
d = to_d(action, sigmas[i], denoised)
|
| 394 |
+
if sigma_down == 0:
|
| 395 |
+
# Euler method
|
| 396 |
+
dt = sigma_down - sigmas[i]
|
| 397 |
+
action= action+ d * dt
|
| 398 |
+
else:
|
| 399 |
+
# DPM-Solver-2
|
| 400 |
+
sigma_mid = sigmas[i].log().lerp(sigma_down.log(), 0.5).exp()
|
| 401 |
+
dt_1 = sigma_mid - sigmas[i]
|
| 402 |
+
dt_2 = sigma_down - sigmas[i]
|
| 403 |
+
action_2 = action+ d * dt_1
|
| 404 |
+
denoised_2 = model(state, action_2, goal, sigma_mid * s_in, **extra_args)
|
| 405 |
+
d_2 = to_d( action_2, sigma_mid, denoised_2)
|
| 406 |
+
action= action+ d_2 * dt_2
|
| 407 |
+
action= action+ torch.randn_like(action) * sigma_up
|
| 408 |
+
if scaler is not None:
|
| 409 |
+
action = scaler.clip_output(action)
|
| 410 |
+
return action
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def linear_multistep_coeff(order, t, i, j):
|
| 414 |
+
'''
|
| 415 |
+
Returns the coefficient of the j-th derivative of the i-th step of a linear multistep method.
|
| 416 |
+
'''
|
| 417 |
+
if order - 1 > i:
|
| 418 |
+
raise ValueError(f'Order {order} too high for step {i}')
|
| 419 |
+
def fn(tau):
|
| 420 |
+
prod = 1.
|
| 421 |
+
for k in range(order):
|
| 422 |
+
if j == k:
|
| 423 |
+
continue
|
| 424 |
+
prod *= (tau - t[i - k]) / (t[i - j] - t[i - k])
|
| 425 |
+
return prod
|
| 426 |
+
return integrate.quad(fn, t[i], t[i + 1], epsrel=1e-4)[0]
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
@torch.no_grad()
|
| 430 |
+
def sample_lms(
|
| 431 |
+
model,
|
| 432 |
+
state,
|
| 433 |
+
action,
|
| 434 |
+
goal,
|
| 435 |
+
sigmas,
|
| 436 |
+
scaler=None,
|
| 437 |
+
extra_args=None,
|
| 438 |
+
callback=None,
|
| 439 |
+
disable=None,
|
| 440 |
+
order=4
|
| 441 |
+
):
|
| 442 |
+
'''
|
| 443 |
+
A linear multistep sampler.
|
| 444 |
+
|
| 445 |
+
1. compute x_{t-1} using the current noise level
|
| 446 |
+
2. compute dx/dt at x_{t-1} using the current noise level
|
| 447 |
+
'''
|
| 448 |
+
extra_args = {} if extra_args is None else extra_args
|
| 449 |
+
s_in = action.new_ones([action.shape[0]])
|
| 450 |
+
sigmas_cpu = sigmas.detach().cpu().numpy()
|
| 451 |
+
ds = []
|
| 452 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 453 |
+
denoised = model(state, action, goal, sigmas[i] * s_in, **extra_args)
|
| 454 |
+
d = to_d(action, sigmas[i], denoised)
|
| 455 |
+
ds.append(d)
|
| 456 |
+
if len(ds) > order:
|
| 457 |
+
ds.pop(0)
|
| 458 |
+
if callback is not None:
|
| 459 |
+
callback({'x': action, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
| 460 |
+
cur_order = min(i + 1, order)
|
| 461 |
+
coeffs = [linear_multistep_coeff(cur_order, sigmas_cpu, i, j) for j in range(cur_order)]
|
| 462 |
+
action = action + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
| 463 |
+
if scaler is not None:
|
| 464 |
+
action = scaler.clip_output(action)
|
| 465 |
+
return action
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
@torch.no_grad()
|
| 469 |
+
def log_likelihood(model, state, action, goal, sigma_min, sigma_max, extra_args=None, atol=1e-4, rtol=1e-4):
|
| 470 |
+
'''
|
| 471 |
+
Computes the log-likelihood of actions
|
| 472 |
+
'''
|
| 473 |
+
extra_args = {} if extra_args is None else extra_args
|
| 474 |
+
s_in = action.new_ones([action.shape[0]])
|
| 475 |
+
v = torch.randint_like(action, 2) * 2 - 1
|
| 476 |
+
fevals = 0
|
| 477 |
+
def ode_fn(sigma, action):
|
| 478 |
+
nonlocal fevals
|
| 479 |
+
with torch.enable_grad():
|
| 480 |
+
action= action[0].detach().requires_grad_()
|
| 481 |
+
denoised = model(state, action, goal, sigma * s_in, **extra_args)
|
| 482 |
+
d = to_d(action, sigma, denoised)
|
| 483 |
+
fevals += 1
|
| 484 |
+
grad = torch.autograd.grad((d * v).sum(), action)[0]
|
| 485 |
+
d_ll = (v * grad).flatten(1).sum(1)
|
| 486 |
+
return d.detach(), d_ll
|
| 487 |
+
action_min = action, action.new_zeros([action.shape[0]])
|
| 488 |
+
t = action.new_tensor([sigma_min, sigma_max])
|
| 489 |
+
sol = odeint(ode_fn, action_min, t, atol=atol, rtol=rtol, method='dopri5')
|
| 490 |
+
latent, delta_ll = sol[0][-1], sol[1][-1]
|
| 491 |
+
ll_prior = torch.distributions.Normal(0, sigma_max).log_prob(latent).flatten(1).sum(1)
|
| 492 |
+
return ll_prior + delta_ll, {'fevals': fevals}
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
class PIDStepSizeController:
|
| 496 |
+
"""A PID controller for ODE adaptive step size control."""
|
| 497 |
+
def __init__(self, h, pcoeff, icoeff, dcoeff, order=1, accept_safety=0.81, eps=1e-8):
|
| 498 |
+
self.h = h
|
| 499 |
+
self.b1 = (pcoeff + icoeff + dcoeff) / order
|
| 500 |
+
self.b2 = -(pcoeff + 2 * dcoeff) / order
|
| 501 |
+
self.b3 = dcoeff / order
|
| 502 |
+
self.accept_safety = accept_safety
|
| 503 |
+
self.eps = eps
|
| 504 |
+
self.errs = []
|
| 505 |
+
|
| 506 |
+
def limiter(self, action):
|
| 507 |
+
return 1 + math.atan(action- 1)
|
| 508 |
+
|
| 509 |
+
def propose_step(self, error):
|
| 510 |
+
inv_error = 1 / (float(error) + self.eps)
|
| 511 |
+
if not self.errs:
|
| 512 |
+
self.errs = [inv_error, inv_error, inv_error]
|
| 513 |
+
self.errs[0] = inv_error
|
| 514 |
+
factor = self.errs[0] ** self.b1 * self.errs[1] ** self.b2 * self.errs[2] ** self.b3
|
| 515 |
+
factor = self.limiter(factor)
|
| 516 |
+
accept = factor >= self.accept_safety
|
| 517 |
+
if accept:
|
| 518 |
+
self.errs[2] = self.errs[1]
|
| 519 |
+
self.errs[1] = self.errs[0]
|
| 520 |
+
self.h *= factor
|
| 521 |
+
return accept
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
class DPMSolver(nn.Module):
|
| 525 |
+
"""DPM-Solver. See https://arxiv.org/abs/2206.00927."""
|
| 526 |
+
|
| 527 |
+
def __init__(self, model, extra_args=None, eps_callback=None, info_callback=None):
|
| 528 |
+
super().__init__()
|
| 529 |
+
self.model = model
|
| 530 |
+
self.extra_args = {} if extra_args is None else extra_args
|
| 531 |
+
self.eps_callback = eps_callback
|
| 532 |
+
self.info_callback = info_callback
|
| 533 |
+
|
| 534 |
+
def t(self, sigma):
|
| 535 |
+
return -sigma.log()
|
| 536 |
+
|
| 537 |
+
def sigma(self, t):
|
| 538 |
+
return t.neg().exp()
|
| 539 |
+
|
| 540 |
+
def eps(self, eps_cache, key, state, action, goal, t, *args, **kwargs):
|
| 541 |
+
if key in eps_cache:
|
| 542 |
+
return eps_cache[key], eps_cache
|
| 543 |
+
sigma = self.sigma(t) * action.new_ones([action.shape[0]])
|
| 544 |
+
eps = (action - self.model(state, action, goal, sigma, *args, **self.extra_args, **kwargs)) / self.sigma(t)
|
| 545 |
+
if self.eps_callback is not None:
|
| 546 |
+
self.eps_callback()
|
| 547 |
+
return eps, {key: eps, **eps_cache}
|
| 548 |
+
|
| 549 |
+
def dpm_solver_1_step(self, state, action, goal, t, t_next, eps_cache=None):
|
| 550 |
+
eps_cache = {} if eps_cache is None else eps_cache
|
| 551 |
+
h = t_next - t
|
| 552 |
+
eps, eps_cache = self.eps(eps_cache, 'eps', state, action, goal, t)
|
| 553 |
+
action_1 = action- self.sigma(t_next) * h.expm1() * eps
|
| 554 |
+
return action_1, eps_cache
|
| 555 |
+
|
| 556 |
+
def dpm_solver_2_step(self, state, action, goal, t, t_next, r1=1 / 2, eps_cache=None):
|
| 557 |
+
eps_cache = {} if eps_cache is None else eps_cache
|
| 558 |
+
h = t_next - t
|
| 559 |
+
eps, eps_cache = self.eps(eps_cache, 'eps', state, action, goal, t)
|
| 560 |
+
s1 = t + r1 * h
|
| 561 |
+
u1 = action - self.sigma(s1) * (r1 * h).expm1() * eps
|
| 562 |
+
eps_r1, eps_cache = self.eps(eps_cache, 'eps_r1', state, u1, goal, s1)
|
| 563 |
+
action_2 = action - self.sigma(t_next) * h.expm1() * eps - self.sigma(t_next) / (2 * r1) * h.expm1() * (eps_r1 - eps)
|
| 564 |
+
return action_2, eps_cache
|
| 565 |
+
|
| 566 |
+
def dpm_solver_3_step(self, state, action, goal, t, t_next, r1=1 / 3, r2=2 / 3, eps_cache=None):
|
| 567 |
+
eps_cache = {} if eps_cache is None else eps_cache
|
| 568 |
+
h = t_next - t
|
| 569 |
+
eps, eps_cache = self.eps(eps_cache, 'eps', state, action, goal, t)
|
| 570 |
+
s1 = t + r1 * h
|
| 571 |
+
s2 = t + r2 * h
|
| 572 |
+
u1 = action - self.sigma(s1) * (r1 * h).expm1() * eps
|
| 573 |
+
eps_r1, eps_cache = self.eps(eps_cache, 'eps_r1', state, u1, goal, s1)
|
| 574 |
+
u2 = action - self.sigma(s2) * (r2 * h).expm1() * eps - self.sigma(s2) * (r2 / r1) * ((r2 * h).expm1() / (r2 * h) - 1) * (eps_r1 - eps)
|
| 575 |
+
eps_r2, eps_cache = self.eps(eps_cache, 'eps_r2', state, u2, goal, s2)
|
| 576 |
+
action_3 = action - self.sigma(t_next) * h.expm1() * eps - self.sigma(t_next) / r2 * (h.expm1() / h - 1) * (eps_r2 - eps)
|
| 577 |
+
return action_3, eps_cache
|
| 578 |
+
|
| 579 |
+
def dpm_solver_fast(self, state, action, goal, t_start, t_end, nfe, eta=0., s_noise=1., noise_sampler=None):
|
| 580 |
+
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
| 581 |
+
if not t_end > t_start and eta:
|
| 582 |
+
raise ValueError('eta must be 0 for reverse sampling')
|
| 583 |
+
|
| 584 |
+
m = math.floor(nfe / 3) + 1
|
| 585 |
+
ts = torch.linspace(t_start, t_end, m + 1, device=action.device)
|
| 586 |
+
|
| 587 |
+
if nfe % 3 == 0:
|
| 588 |
+
orders = [3] * (m - 2) + [2, 1]
|
| 589 |
+
else:
|
| 590 |
+
orders = [3] * (m - 1) + [nfe % 3]
|
| 591 |
+
|
| 592 |
+
for i in range(len(orders)):
|
| 593 |
+
eps_cache = {}
|
| 594 |
+
t, t_next = ts[i], ts[i + 1]
|
| 595 |
+
if eta:
|
| 596 |
+
sd, su = get_ancestral_step(self.sigma(t), self.sigma(t_next), eta)
|
| 597 |
+
t_next_ = torch.minimum(t_end, self.t(sd))
|
| 598 |
+
su = (self.sigma(t_next) ** 2 - self.sigma(t_next_) ** 2) ** 0.5
|
| 599 |
+
else:
|
| 600 |
+
t_next_, su = t_next, 0.
|
| 601 |
+
|
| 602 |
+
eps, eps_cache = self.eps(eps_cache, 'eps', state, action, goal, t)
|
| 603 |
+
denoised = action- self.sigma(t) * eps
|
| 604 |
+
if self.info_callback is not None:
|
| 605 |
+
self.info_callback({'x': action, 'i': i, 't': ts[i], 't_up': t, 'denoised': denoised})
|
| 606 |
+
|
| 607 |
+
if orders[i] == 1:
|
| 608 |
+
action, eps_cache = self.dpm_solver_1_step(state, action, goal, t, t_next_, eps_cache=eps_cache)
|
| 609 |
+
elif orders[i] == 2:
|
| 610 |
+
action, eps_cache = self.dpm_solver_2_step(state, action, goal, t, t_next_, eps_cache=eps_cache)
|
| 611 |
+
else:
|
| 612 |
+
action, eps_cache = self.dpm_solver_3_step(state, action, goal, t, t_next_, eps_cache=eps_cache)
|
| 613 |
+
|
| 614 |
+
action= action+ su * s_noise * noise_sampler(self.sigma(t), self.sigma(t_next))
|
| 615 |
+
|
| 616 |
+
return action
|
| 617 |
+
|
| 618 |
+
def dpm_solver_adaptive(self, state, action, goal, t_start, t_end, order=3, rtol=0.05, atol=0.0078, h_init=0.05, pcoeff=0., icoeff=1., dcoeff=0., accept_safety=0.81, eta=0., s_noise=1.):
|
| 619 |
+
noise_sampler = default_noise_sampler(action) if noise_sampler is None else noise_sampler
|
| 620 |
+
if order not in {2, 3}:
|
| 621 |
+
raise ValueError('order should be 2 or 3')
|
| 622 |
+
forward = t_end > t_start
|
| 623 |
+
if not forward and eta:
|
| 624 |
+
raise ValueError('eta must be 0 for reverse sampling')
|
| 625 |
+
h_init = abs(h_init) * (1 if forward else -1)
|
| 626 |
+
atol = torch.tensor(atol)
|
| 627 |
+
rtol = torch.tensor(rtol)
|
| 628 |
+
s = t_start
|
| 629 |
+
action_prev = action
|
| 630 |
+
accept = True
|
| 631 |
+
pid = PIDStepSizeController(h_init, pcoeff, icoeff, dcoeff, 1.5 if eta else order, accept_safety)
|
| 632 |
+
info = {'steps': 0, 'nfe': 0, 'n_accept': 0, 'n_reject': 0}
|
| 633 |
+
|
| 634 |
+
while s < t_end - 1e-5 if forward else s > t_end + 1e-5:
|
| 635 |
+
eps_cache = {}
|
| 636 |
+
t = torch.minimum(t_end, s + pid.h) if forward else torch.maximum(t_end, s + pid.h)
|
| 637 |
+
if eta:
|
| 638 |
+
sd, su = get_ancestral_step(self.sigma(s), self.sigma(t), eta)
|
| 639 |
+
t_ = torch.minimum(t_end, self.t(sd))
|
| 640 |
+
su = (self.sigma(t) ** 2 - self.sigma(t_) ** 2) ** 0.5
|
| 641 |
+
else:
|
| 642 |
+
t_, su = t, 0.
|
| 643 |
+
|
| 644 |
+
eps, eps_cache = self.eps(eps_cache, 'eps', state, action, goal, s)
|
| 645 |
+
denoised = action - self.sigma(s) * eps
|
| 646 |
+
|
| 647 |
+
if order == 2:
|
| 648 |
+
action_low, eps_cache = self.dpm_solver_1_step(state, action, goal, s, t_, eps_cache=eps_cache)
|
| 649 |
+
action_high, eps_cache = self.dpm_solver_2_step(state, action, goal, s, t_, eps_cache=eps_cache)
|
| 650 |
+
else:
|
| 651 |
+
action_low, eps_cache = self.dpm_solver_2_step(state, action, goal, s, t_, r1=1 / 3, eps_cache=eps_cache)
|
| 652 |
+
action_high, eps_cache = self.dpm_solver_3_step(state, action, goal, s, t_, eps_cache=eps_cache)
|
| 653 |
+
delta = torch.maximum(atol, rtol * torch.maximum( action_low.abs(), action_prev.abs()))
|
| 654 |
+
error = torch.linalg.norm(( action_low - action_high) / delta) / action.numel() ** 0.5
|
| 655 |
+
accept = pid.propose_step(error)
|
| 656 |
+
if accept:
|
| 657 |
+
action_prev = action_low
|
| 658 |
+
action = action_high + su * s_noise * noise_sampler(self.sigma(s), self.sigma(t))
|
| 659 |
+
s = t
|
| 660 |
+
info['n_accept'] += 1
|
| 661 |
+
else:
|
| 662 |
+
info['n_reject'] += 1
|
| 663 |
+
info['nfe'] += order
|
| 664 |
+
info['steps'] += 1
|
| 665 |
+
|
| 666 |
+
if self.info_callback is not None:
|
| 667 |
+
self.info_callback({'x': action, 'i': info['steps'] - 1, 't': s, 't_up': s, 'denoised': denoised, 'error': error, 'h': pid.h, **info})
|
| 668 |
+
|
| 669 |
+
return action, info
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
@torch.no_grad()
|
| 673 |
+
def sample_dpm_fast(
|
| 674 |
+
model,
|
| 675 |
+
state,
|
| 676 |
+
action,
|
| 677 |
+
goal,
|
| 678 |
+
sigma_min,
|
| 679 |
+
sigma_max,
|
| 680 |
+
n,
|
| 681 |
+
scaler=None,
|
| 682 |
+
extra_args=None,
|
| 683 |
+
callback=None,
|
| 684 |
+
disable=None,
|
| 685 |
+
eta=0.,
|
| 686 |
+
s_noise=1.,
|
| 687 |
+
noise_sampler=None
|
| 688 |
+
):
|
| 689 |
+
"""DPM-Solver-Fast (fixed step size). See https://arxiv.org/abs/2206.00927."""
|
| 690 |
+
if sigma_min <= 0 or sigma_max <= 0:
|
| 691 |
+
raise ValueError('sigma_min and sigma_maactionmust not be 0')
|
| 692 |
+
with tqdm(total=n, disable=disable) as pbar:
|
| 693 |
+
dpm_solver = DPMSolver(model, extra_args, eps_callback=pbar.update)
|
| 694 |
+
if callback is not None:
|
| 695 |
+
dpm_solver.info_callback = lambda info: callback({'sigma': dpm_solver.sigma(info['t']), 'sigma_hat': dpm_solver.sigma(info['t_up']), **info})
|
| 696 |
+
return dpm_solver.dpm_solver_fast(state, action, goal, dpm_solver.t(torch.tensor(sigma_max)), dpm_solver.t(torch.tensor(sigma_min)), n, eta, s_noise, noise_sampler)
|
| 697 |
+
|
| 698 |
+
|
| 699 |
+
@torch.no_grad()
|
| 700 |
+
def sample_dpmpp_2m(
|
| 701 |
+
model,
|
| 702 |
+
state,
|
| 703 |
+
action,
|
| 704 |
+
goal,
|
| 705 |
+
sigmas,
|
| 706 |
+
scaler=None,
|
| 707 |
+
extra_args=None,
|
| 708 |
+
callback=None,
|
| 709 |
+
disable=None
|
| 710 |
+
):
|
| 711 |
+
"""DPM-Solver++(2M)."""
|
| 712 |
+
extra_args = {} if extra_args is None else extra_args
|
| 713 |
+
s_in = action.new_ones([action.shape[0]])
|
| 714 |
+
sigma_fn = lambda t: t.neg().exp()
|
| 715 |
+
t_fn = lambda sigma: sigma.log().neg()
|
| 716 |
+
old_denoised = None
|
| 717 |
+
|
| 718 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 719 |
+
# predict the next action
|
| 720 |
+
denoised = model(state, action, goal, sigmas[i] * s_in, **extra_args)
|
| 721 |
+
if callback is not None:
|
| 722 |
+
callback({'action': action, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
| 723 |
+
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
|
| 724 |
+
h = t_next - t
|
| 725 |
+
if old_denoised is None or sigmas[i + 1] == 0:
|
| 726 |
+
action = (sigma_fn(t_next) / sigma_fn(t)) * action - (-h).expm1() * denoised
|
| 727 |
+
else:
|
| 728 |
+
h_last = t - t_fn(sigmas[i - 1])
|
| 729 |
+
r = h_last / h
|
| 730 |
+
denoised_d = (1 + 1 / (2 * r)) * denoised - (1 / (2 * r)) * old_denoised
|
| 731 |
+
action = (sigma_fn(t_next) / sigma_fn(t)) * action - (-h).expm1() * denoised_d
|
| 732 |
+
old_denoised = denoised
|
| 733 |
+
return action
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
@torch.no_grad()
|
| 737 |
+
def sample_dpmpp_sde(
|
| 738 |
+
model,
|
| 739 |
+
state,
|
| 740 |
+
action,
|
| 741 |
+
goal,
|
| 742 |
+
sigmas,
|
| 743 |
+
extra_args=None,
|
| 744 |
+
callback=None,
|
| 745 |
+
disable=None,
|
| 746 |
+
eta=1.,
|
| 747 |
+
s_noise=1.,
|
| 748 |
+
scaler=None,
|
| 749 |
+
noise_sampler=None,
|
| 750 |
+
r=1 / 2
|
| 751 |
+
):
|
| 752 |
+
"""DPM-Solver++ (stochastic)."""
|
| 753 |
+
x = action
|
| 754 |
+
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
| 755 |
+
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max) if noise_sampler is None else noise_sampler
|
| 756 |
+
extra_args = {} if extra_args is None else extra_args
|
| 757 |
+
s_in = x.new_ones([x.shape[0]])
|
| 758 |
+
sigma_fn = lambda t: t.neg().exp()
|
| 759 |
+
t_fn = lambda sigma: sigma.log().neg()
|
| 760 |
+
|
| 761 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 762 |
+
denoised = model(state, x, goal, sigmas[i] * s_in, **extra_args)
|
| 763 |
+
if callback is not None:
|
| 764 |
+
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
| 765 |
+
if sigmas[i + 1] == 0:
|
| 766 |
+
# Euler method
|
| 767 |
+
d = to_d(x, sigmas[i], denoised)
|
| 768 |
+
dt = sigmas[i + 1] - sigmas[i]
|
| 769 |
+
x = x + d * dt
|
| 770 |
+
else:
|
| 771 |
+
# DPM-Solver++
|
| 772 |
+
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
|
| 773 |
+
h = t_next - t
|
| 774 |
+
s = t + h * r
|
| 775 |
+
fac = 1 / (2 * r)
|
| 776 |
+
|
| 777 |
+
# Step 1
|
| 778 |
+
sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(s), eta)
|
| 779 |
+
s_ = t_fn(sd)
|
| 780 |
+
x_2 = (sigma_fn(s_) / sigma_fn(t)) * x - (t - s_).expm1() * denoised
|
| 781 |
+
x_2 = x_2 + noise_sampler(sigma_fn(t), sigma_fn(s)) * s_noise * su
|
| 782 |
+
denoised_2 = model(state, x_2, goal, sigma_fn(s) * s_in, **extra_args)
|
| 783 |
+
|
| 784 |
+
# Step 2
|
| 785 |
+
sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(t_next), eta)
|
| 786 |
+
t_next_ = t_fn(sd)
|
| 787 |
+
denoised_d = (1 - fac) * denoised + fac * denoised_2
|
| 788 |
+
x = (sigma_fn(t_next_) / sigma_fn(t)) * x - (t - t_next_).expm1() * denoised_d
|
| 789 |
+
x = x + noise_sampler(sigma_fn(t), sigma_fn(t_next)) * s_noise * su
|
| 790 |
+
if scaler is not None:
|
| 791 |
+
x = scaler.clip_output(x)
|
| 792 |
+
return x
|
| 793 |
+
|
| 794 |
+
|
| 795 |
+
|
| 796 |
+
@torch.no_grad()
|
| 797 |
+
def sample_dpmpp_2_with_lms(
|
| 798 |
+
model,
|
| 799 |
+
state,
|
| 800 |
+
action,
|
| 801 |
+
goal,
|
| 802 |
+
sigmas,
|
| 803 |
+
scaler=None,
|
| 804 |
+
extra_args=None,
|
| 805 |
+
callback=None,
|
| 806 |
+
disable=None
|
| 807 |
+
):
|
| 808 |
+
"""DPM-Solver++(2M)."""
|
| 809 |
+
extra_args = {} if extra_args is None else extra_args
|
| 810 |
+
s_in = action.new_ones([action.shape[0]])
|
| 811 |
+
sigma_fn = lambda t: t.neg().exp()
|
| 812 |
+
t_fn = lambda sigma: sigma.log().neg()
|
| 813 |
+
old_denoised = None
|
| 814 |
+
|
| 815 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 816 |
+
# predict the next action
|
| 817 |
+
denoised = model(state, action, goal, sigmas[i] * s_in, **extra_args)
|
| 818 |
+
if callback is not None:
|
| 819 |
+
callback({'action': action, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
| 820 |
+
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
|
| 821 |
+
h = t_next - t
|
| 822 |
+
if old_denoised is None or sigmas[i + 1] == 0:
|
| 823 |
+
action = (sigma_fn(t_next) / sigma_fn(t)) * action - (-h).expm1() * denoised
|
| 824 |
+
else:
|
| 825 |
+
h_last = t - t_fn(sigmas[i - 1])
|
| 826 |
+
r = h_last / h
|
| 827 |
+
denoised_d = (1 + 1 / (2 * r)) * denoised - (1 / (2 * r)) * old_denoised
|
| 828 |
+
action = (sigma_fn(t_next) / sigma_fn(t)) * action - (-h).expm1() * denoised_d
|
| 829 |
+
old_denoised = denoised
|
| 830 |
+
return action
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
@torch.no_grad()
|
| 834 |
+
def sample_dpm_adaptive(
|
| 835 |
+
model,
|
| 836 |
+
state,
|
| 837 |
+
action,
|
| 838 |
+
goal,
|
| 839 |
+
sigma_min,
|
| 840 |
+
sigma_max,
|
| 841 |
+
extra_args=None,
|
| 842 |
+
callback=None,
|
| 843 |
+
disable=None,
|
| 844 |
+
order=3,
|
| 845 |
+
rtol=0.05,
|
| 846 |
+
atol=0.0078,
|
| 847 |
+
h_init=0.05,
|
| 848 |
+
pcoeff=0.,
|
| 849 |
+
icoeff=1.,
|
| 850 |
+
dcoeff=0.,
|
| 851 |
+
accept_safety=0.81,
|
| 852 |
+
eta=0.,
|
| 853 |
+
s_noise=1.,
|
| 854 |
+
return_info=False
|
| 855 |
+
):
|
| 856 |
+
"""
|
| 857 |
+
DPM-Solver-12 and 23 (adaptive step size).
|
| 858 |
+
|
| 859 |
+
See https://arxiv.org/abs/2206.00927.
|
| 860 |
+
"""
|
| 861 |
+
if sigma_min <= 0 or sigma_max <= 0:
|
| 862 |
+
raise ValueError('sigma_min and sigma_max action nmust not be 0')
|
| 863 |
+
with tqdm(disable=disable) as pbar:
|
| 864 |
+
dpm_solver = DPMSolver(model, extra_args, eps_callback=pbar.update)
|
| 865 |
+
if callback is not None:
|
| 866 |
+
dpm_solver.info_callback = lambda info: callback({'sigma': dpm_solver.sigma(info['t']), 'sigma_hat': dpm_solver.sigma(info['t_up']), **info})
|
| 867 |
+
action, info = dpm_solver.dpm_solver_adaptive(state, action, goal, dpm_solver.t(torch.tensor(sigma_max)), dpm_solver.t(torch.tensor(sigma_min)), order, rtol, atol, h_init, pcoeff, icoeff, dcoeff, accept_safety, eta, s_noise)
|
| 868 |
+
if return_info:
|
| 869 |
+
return action, info
|
| 870 |
+
return action
|
| 871 |
+
|
| 872 |
+
|
| 873 |
+
@torch.no_grad()
|
| 874 |
+
def sample_dpmpp_2s_ancestral(
|
| 875 |
+
model,
|
| 876 |
+
state,
|
| 877 |
+
action,
|
| 878 |
+
goal,
|
| 879 |
+
sigmas,
|
| 880 |
+
scaler=None,
|
| 881 |
+
extra_args=None,
|
| 882 |
+
callback=None,
|
| 883 |
+
disable=None,
|
| 884 |
+
eta=1.,
|
| 885 |
+
s_noise=1.,
|
| 886 |
+
noise_sampler=None
|
| 887 |
+
):
|
| 888 |
+
"""
|
| 889 |
+
Ancestral sampling combined with DPM-Solver++(2S) second-order steps."""
|
| 890 |
+
extra_args = {} if extra_args is None else extra_args
|
| 891 |
+
noise_sampler = default_noise_sampler(action) if noise_sampler is None else noise_sampler
|
| 892 |
+
s_in = action.new_ones([action.shape[0]])
|
| 893 |
+
sigma_fn = lambda t: t.neg().exp()
|
| 894 |
+
t_fn = lambda sigma: sigma.log().neg()
|
| 895 |
+
|
| 896 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 897 |
+
denoised = model(state, action, goal, sigmas[i] * s_in, **extra_args)
|
| 898 |
+
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
|
| 899 |
+
if callback is not None:
|
| 900 |
+
callback({'action': action, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
| 901 |
+
if sigma_down == 0:
|
| 902 |
+
# Euler method
|
| 903 |
+
d = to_d(action, sigmas[i], denoised)
|
| 904 |
+
dt = sigma_down - sigmas[i]
|
| 905 |
+
action = action + d * dt
|
| 906 |
+
else:
|
| 907 |
+
# DPM-Solver-2++(2S)
|
| 908 |
+
t, t_next = t_fn(sigmas[i]), t_fn(sigma_down)
|
| 909 |
+
r = 1 / 2
|
| 910 |
+
h = t_next - t
|
| 911 |
+
s = t + r * h
|
| 912 |
+
x_2 = (sigma_fn(s) / sigma_fn(t)) * action - (-h * r).expm1() * denoised
|
| 913 |
+
denoised_2 = model(state, x_2, goal, sigma_fn(s) * s_in, **extra_args)
|
| 914 |
+
action = (sigma_fn(t_next) / sigma_fn(t)) * action - (-h).expm1() * denoised_2
|
| 915 |
+
# Noise addition
|
| 916 |
+
action = action + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
|
| 917 |
+
if scaler is not None:
|
| 918 |
+
action = scaler.clip_output(action)
|
| 919 |
+
return action
|
| 920 |
+
|
| 921 |
+
|
| 922 |
+
@torch.no_grad()
|
| 923 |
+
def sample_ddim(
|
| 924 |
+
model,
|
| 925 |
+
state,
|
| 926 |
+
action,
|
| 927 |
+
goal,
|
| 928 |
+
sigmas,
|
| 929 |
+
scaler=None,
|
| 930 |
+
extra_args=None,
|
| 931 |
+
callback=None,
|
| 932 |
+
disable=None,
|
| 933 |
+
eta=1.,
|
| 934 |
+
):
|
| 935 |
+
"""
|
| 936 |
+
DPM-Solver 1( or DDIM sampler"""
|
| 937 |
+
extra_args = {} if extra_args is None else extra_args
|
| 938 |
+
s_in = action.new_ones([action.shape[0]])
|
| 939 |
+
sigma_fn = lambda t: t.neg().exp()
|
| 940 |
+
t_fn = lambda sigma: sigma.log().neg()
|
| 941 |
+
old_denoised = None
|
| 942 |
+
|
| 943 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 944 |
+
# predict the next action
|
| 945 |
+
denoised = model(state, action, goal, sigmas[i] * s_in, **extra_args)
|
| 946 |
+
if callback is not None:
|
| 947 |
+
callback({'action': action, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
| 948 |
+
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
|
| 949 |
+
h = t_next - t
|
| 950 |
+
action = (sigma_fn(t_next) / sigma_fn(t)) * action - (-h).expm1() * denoised
|
| 951 |
+
return action
|
| 952 |
+
|
| 953 |
+
|
| 954 |
+
|
| 955 |
+
@torch.no_grad()
|
| 956 |
+
def sample_dpmpp_2s(
|
| 957 |
+
model,
|
| 958 |
+
state,
|
| 959 |
+
action,
|
| 960 |
+
goal,
|
| 961 |
+
sigmas,
|
| 962 |
+
scaler=None,
|
| 963 |
+
extra_args=None,
|
| 964 |
+
callback=None,
|
| 965 |
+
disable=None,
|
| 966 |
+
eta=1.,
|
| 967 |
+
):
|
| 968 |
+
"""
|
| 969 |
+
DPM-Solver++(2S) second-order steps."""
|
| 970 |
+
extra_args = {} if extra_args is None else extra_args
|
| 971 |
+
sigma_fn = lambda t: t.neg().exp()
|
| 972 |
+
t_fn = lambda sigma: sigma.log().neg()
|
| 973 |
+
s_in = action.new_ones([action.shape[0]])
|
| 974 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
| 975 |
+
denoised = model(state, action, goal, sigmas[i] * s_in, **extra_args)
|
| 976 |
+
if callback is not None:
|
| 977 |
+
callback({'action': action, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
| 978 |
+
if sigmas[i + 1] == 0:
|
| 979 |
+
# Euler method
|
| 980 |
+
d = to_d(action, sigmas[i], denoised)
|
| 981 |
+
dt = sigmas[i + 1] - sigmas[i]
|
| 982 |
+
action = action + d * dt
|
| 983 |
+
else:
|
| 984 |
+
# DPM-Solver-2++(2S)
|
| 985 |
+
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
|
| 986 |
+
r = 1 / 2
|
| 987 |
+
h = t_next - t
|
| 988 |
+
s = t + r * h
|
| 989 |
+
x_2 = (sigma_fn(s) / sigma_fn(t)) * action - (-h * r).expm1() * denoised
|
| 990 |
+
denoised_2 = model(state, x_2, goal, sigma_fn(s) * s_in, **extra_args)
|
| 991 |
+
action = (sigma_fn(t_next) / sigma_fn(t)) * action - (-h).expm1() * denoised_2
|
| 992 |
+
if scaler is not None:
|
| 993 |
+
action = scaler.clip_output(action)
|
| 994 |
+
return action
|
| 995 |
+
|
| 996 |
+
|
| 997 |
+
def make_sample_contour_plot(actions, n_steps, file_store_path):
|
| 998 |
+
|
| 999 |
+
store_path = os.path.join(file_store_path, 'action_visualization.png')
|
| 1000 |
+
rows = n_steps % 2
|
| 1001 |
+
for idx, step in range(n_steps):
|
| 1002 |
+
fig, axs = plt.subplots()
|
| 1003 |
+
actions
|
| 1004 |
+
store_path = os.path.join(file_store_path, f'action_visualization_step_{idx}.png')
|
| 1005 |
+
|
| 1006 |
+
|
| 1007 |
+
|
code/policy_models/edm_diffusion/score_wrappers.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch import nn
|
| 2 |
+
from .utils import append_dims
|
| 3 |
+
from policy_models.module.diffusion_decoder import DiffusionTransformer
|
| 4 |
+
|
| 5 |
+
'''
|
| 6 |
+
Wrappers for the score-based models based on Karras et al. 2022
|
| 7 |
+
They are used to get improved scaling of different noise levels, which
|
| 8 |
+
improves training stability and model performance
|
| 9 |
+
|
| 10 |
+
Code is adapted from:
|
| 11 |
+
|
| 12 |
+
https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/layers.py
|
| 13 |
+
'''
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class GCDenoiser(nn.Module):
|
| 17 |
+
"""
|
| 18 |
+
A Karras et al. preconditioner for denoising diffusion models.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
inner_model: The inner model used for denoising.
|
| 22 |
+
sigma_data: The data sigma for scalings (default: 1.0).
|
| 23 |
+
"""
|
| 24 |
+
def __init__(self, action_dim, obs_dim, goal_dim, num_tokens, goal_window_size, obs_seq_len, act_seq_len, device, sigma_data=1., proprio_dim=8):
|
| 25 |
+
super().__init__()
|
| 26 |
+
self.inner_model = DiffusionTransformer(
|
| 27 |
+
action_dim = action_dim,
|
| 28 |
+
obs_dim = obs_dim,
|
| 29 |
+
goal_dim = goal_dim,
|
| 30 |
+
proprio_dim= proprio_dim,
|
| 31 |
+
goal_conditioned = True,
|
| 32 |
+
embed_dim = 384,
|
| 33 |
+
n_dec_layers = 4,
|
| 34 |
+
n_enc_layers = 4,
|
| 35 |
+
n_obs_token = num_tokens,
|
| 36 |
+
goal_seq_len = goal_window_size,
|
| 37 |
+
obs_seq_len = obs_seq_len,
|
| 38 |
+
action_seq_len =act_seq_len,
|
| 39 |
+
embed_pdrob = 0,
|
| 40 |
+
goal_drop = 0,
|
| 41 |
+
attn_pdrop = 0.3,
|
| 42 |
+
resid_pdrop = 0.1,
|
| 43 |
+
mlp_pdrop = 0.05,
|
| 44 |
+
n_heads= 8,
|
| 45 |
+
device = device,
|
| 46 |
+
use_mlp_goal = True,
|
| 47 |
+
)
|
| 48 |
+
self.sigma_data = sigma_data
|
| 49 |
+
|
| 50 |
+
def get_scalings(self, sigma):
|
| 51 |
+
"""
|
| 52 |
+
Compute the scalings for the denoising process.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
sigma: The input sigma.
|
| 56 |
+
Returns:
|
| 57 |
+
The computed scalings for skip connections, output, and input.
|
| 58 |
+
"""
|
| 59 |
+
c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2)
|
| 60 |
+
c_out = sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
| 61 |
+
c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
| 62 |
+
return c_skip, c_out, c_in
|
| 63 |
+
|
| 64 |
+
def loss(self, state, action, goal, noise, sigma, **kwargs):
|
| 65 |
+
"""
|
| 66 |
+
Compute the loss for the denoising process.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
state: The input state.
|
| 70 |
+
action: The input action.
|
| 71 |
+
goal: The input goal.
|
| 72 |
+
noise: The input noise.
|
| 73 |
+
sigma: The input sigma.
|
| 74 |
+
**kwargs: Additional keyword arguments.
|
| 75 |
+
Returns:
|
| 76 |
+
The computed loss.
|
| 77 |
+
"""
|
| 78 |
+
c_skip, c_out, c_in = [append_dims(x, action.ndim) for x in self.get_scalings(sigma)]
|
| 79 |
+
noised_input = action + noise * append_dims(sigma, action.ndim)
|
| 80 |
+
model_output = self.inner_model(state, noised_input * c_in, goal, sigma, **kwargs)
|
| 81 |
+
target = (action - c_skip * noised_input) / c_out
|
| 82 |
+
return (model_output - target).pow(2).flatten(1).mean(), model_output
|
| 83 |
+
|
| 84 |
+
def forward(self, state, action, goal, sigma, **kwargs):
|
| 85 |
+
"""
|
| 86 |
+
Perform the forward pass of the denoising process.
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
state: The input state.
|
| 90 |
+
action: The input action.
|
| 91 |
+
goal: The input goal.
|
| 92 |
+
sigma: The input sigma.
|
| 93 |
+
**kwargs: Additional keyword arguments.
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
The output of the forward pass.
|
| 97 |
+
"""
|
| 98 |
+
c_skip, c_out, c_in = [append_dims(x, action.ndim) for x in self.get_scalings(sigma)]
|
| 99 |
+
return self.inner_model(state, action * c_in, goal, sigma, **kwargs) * c_out + action * c_skip
|
| 100 |
+
|
| 101 |
+
def forward_context_only(self, state, action, goal, sigma, **kwargs):
|
| 102 |
+
"""
|
| 103 |
+
Perform the forward pass of the denoising process.
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
state: The input state.
|
| 107 |
+
action: The input action.
|
| 108 |
+
goal: The input goal.
|
| 109 |
+
sigma: The input sigma.
|
| 110 |
+
**kwargs: Additional keyword arguments.
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
The output of the forward pass.
|
| 114 |
+
"""
|
| 115 |
+
c_skip, c_out, c_in = [append_dims(x, action.ndim) for x in self.get_scalings(sigma)]
|
| 116 |
+
return self.inner_model.forward_enc_only(state, action * c_in, goal, sigma, **kwargs)
|
| 117 |
+
|
| 118 |
+
def get_params(self):
|
| 119 |
+
return self.inner_model.parameters()
|
code/policy_models/edm_diffusion/utils.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import math
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import numpy as np
|
| 5 |
+
import einops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def return_time_sigma_embedding_model(embedding_type, time_embed_dim, device):
|
| 9 |
+
'''
|
| 10 |
+
Method returns an embedding model given the chosen type
|
| 11 |
+
'''
|
| 12 |
+
if embedding_type == 'GaussianFourier':
|
| 13 |
+
return GaussianFourierEmbedding(time_embed_dim, device)
|
| 14 |
+
elif embedding_type == 'Sinusoidal':
|
| 15 |
+
return SinusoidalPosEmbedding(time_embed_dim, device)
|
| 16 |
+
elif embedding_type == 'FourierFeatures':
|
| 17 |
+
return FourierFeatures(time_embed_dim, device)
|
| 18 |
+
else:
|
| 19 |
+
raise ValueError('Embedding not avaiable, please chose an existing one!')
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class GaussianFourierProjection(nn.Module):
|
| 23 |
+
"""Gaussian random features for encoding time steps."""
|
| 24 |
+
def __init__(self, embed_dim, scale=30.):
|
| 25 |
+
super().__init__()
|
| 26 |
+
# Randomly sample weights during initialization. These weights are fixed
|
| 27 |
+
# during optimization and are not trainable.
|
| 28 |
+
self.W = nn.Parameter(torch.randn(embed_dim // 2) * scale, requires_grad=False)
|
| 29 |
+
|
| 30 |
+
def forward(self, x):
|
| 31 |
+
x_proj = x[:, None] * self.W[None, :] * 2 * np.pi
|
| 32 |
+
return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class FourierFeatures(nn.Module):
|
| 36 |
+
def __init__(self, time_embed_dim, device, in_features=1, std=1.):
|
| 37 |
+
super().__init__()
|
| 38 |
+
self.device = device
|
| 39 |
+
assert time_embed_dim % 2 == 0
|
| 40 |
+
self.register_buffer('weight', torch.randn([time_embed_dim // 2, in_features]) * std
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
def forward(self, input):
|
| 44 |
+
if len(input.shape) == 1:
|
| 45 |
+
input = einops.rearrange(input, 'b -> b 1')
|
| 46 |
+
f = 2 * math.pi * input @ self.weight.T
|
| 47 |
+
return torch.cat([f.cos(), f.sin()], dim=-1).to(self.device)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class GaussianFourierEmbedding(nn.Module):
|
| 51 |
+
|
| 52 |
+
def __init__(self, time_embed_dim, device):
|
| 53 |
+
super().__init__()
|
| 54 |
+
self.t_dim = time_embed_dim
|
| 55 |
+
self.embed = nn.Sequential(
|
| 56 |
+
GaussianFourierProjection(embed_dim=time_embed_dim),
|
| 57 |
+
nn.Linear(time_embed_dim, 2*time_embed_dim),
|
| 58 |
+
nn.Mish(),
|
| 59 |
+
nn.Linear(2*time_embed_dim, time_embed_dim)
|
| 60 |
+
).to(device)
|
| 61 |
+
|
| 62 |
+
def forward(self, t):
|
| 63 |
+
return self.embed(t)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class SinusoidalPosEmbedding(nn.Module):
|
| 67 |
+
|
| 68 |
+
def __init__(self, time_embed_dim, device):
|
| 69 |
+
super().__init__()
|
| 70 |
+
self.device = device
|
| 71 |
+
self.embed = nn.Sequential(
|
| 72 |
+
SinusoidalPosEmb(time_embed_dim),
|
| 73 |
+
nn.Linear(time_embed_dim, time_embed_dim * 2),
|
| 74 |
+
nn.Mish(),
|
| 75 |
+
nn.Linear(time_embed_dim * 2, time_embed_dim),
|
| 76 |
+
).to(self.device)
|
| 77 |
+
|
| 78 |
+
def forward(self, t):
|
| 79 |
+
return self.embed(t)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class PositionalEncoding(nn.Module):
|
| 84 |
+
def __init__(self, d_model, dropout=0.1, max_len=5000):
|
| 85 |
+
super(PositionalEncoding, self).__init__()
|
| 86 |
+
self.dropout = nn.Dropout(p=dropout)
|
| 87 |
+
|
| 88 |
+
pe = torch.zeros(max_len, d_model)
|
| 89 |
+
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
|
| 90 |
+
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model))
|
| 91 |
+
pe[:, 0::2] = torch.sin(position * div_term)
|
| 92 |
+
pe[:, 1::2] = torch.cos(position * div_term)
|
| 93 |
+
pe = pe.unsqueeze(0).transpose(0, 1)
|
| 94 |
+
|
| 95 |
+
self.register_buffer('pe', pe)
|
| 96 |
+
|
| 97 |
+
def forward(self, x):
|
| 98 |
+
# not used in the final model
|
| 99 |
+
x = x + self.pe[:x.shape[0], :]
|
| 100 |
+
return self.dropout(x)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class SinusoidalPosEmb(nn.Module):
|
| 104 |
+
def __init__(self, dim):
|
| 105 |
+
super().__init__()
|
| 106 |
+
self.dim = dim
|
| 107 |
+
|
| 108 |
+
def forward(self, x):
|
| 109 |
+
device = x.device
|
| 110 |
+
half_dim = self.dim // 2
|
| 111 |
+
emb = math.log(10000) / (half_dim - 1)
|
| 112 |
+
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
|
| 113 |
+
emb = x[:, None] * emb[None, :]
|
| 114 |
+
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
|
| 115 |
+
return emb
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class InputEncoder(nn.Module):
|
| 119 |
+
|
| 120 |
+
def __init__(self, input_dim, latent_dim):
|
| 121 |
+
super().__init__()
|
| 122 |
+
|
| 123 |
+
self.input_dim = input_dim
|
| 124 |
+
self.latent_dim = latent_dim
|
| 125 |
+
|
| 126 |
+
self.emb = nn.Linear(self.input_dim, self.latent_dim)
|
| 127 |
+
|
| 128 |
+
def forward(self, x):
|
| 129 |
+
return self.emb(x)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class TEncoder(nn.Module):
|
| 133 |
+
|
| 134 |
+
def __init__(self, input_dim, latent_dim):
|
| 135 |
+
super().__init__()
|
| 136 |
+
|
| 137 |
+
self.input_dim = input_dim
|
| 138 |
+
self.latent_dim = latent_dim
|
| 139 |
+
|
| 140 |
+
self.emb = nn.Linear(self.input_dim, self.latent_dim)
|
| 141 |
+
|
| 142 |
+
def forward(self, x):
|
| 143 |
+
return self.emb(x)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def append_dims(x, target_dims):
|
| 147 |
+
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
|
| 148 |
+
dims_to_append = target_dims - x.ndim
|
| 149 |
+
if dims_to_append < 0:
|
| 150 |
+
raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
|
| 151 |
+
return x[(...,) + (None,) * dims_to_append]
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def rand_log_normal(shape, loc=0., scale=1., device='cpu', dtype=torch.float32):
|
| 155 |
+
"""Draws samples from a lognormal distribution."""
|
| 156 |
+
return (torch.randn(shape, device=device, dtype=dtype) * scale + loc).exp()
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def rand_log_logistic(shape, loc=0., scale=1., min_value=0., max_value=float('inf'), device='cpu', dtype=torch.float32):
|
| 160 |
+
"""Draws samples from an optionally truncated log-logistic distribution."""
|
| 161 |
+
min_value = torch.as_tensor(min_value, device=device, dtype=torch.float64)
|
| 162 |
+
max_value = torch.as_tensor(max_value, device=device, dtype=torch.float64)
|
| 163 |
+
min_cdf = min_value.log().sub(loc).div(scale).sigmoid()
|
| 164 |
+
max_cdf = max_value.log().sub(loc).div(scale).sigmoid()
|
| 165 |
+
u = torch.rand(shape, device=device, dtype=torch.float64) * (max_cdf - min_cdf) + min_cdf
|
| 166 |
+
return u.logit().mul(scale).add(loc).exp().to(dtype)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def rand_log_uniform(shape, min_value, max_value, device='cpu', dtype=torch.float32):
|
| 170 |
+
"""Draws samples from an log-uniform distribution."""
|
| 171 |
+
min_value = math.log(min_value)
|
| 172 |
+
max_value = math.log(max_value)
|
| 173 |
+
return (torch.rand(shape, device=device, dtype=dtype) * (max_value - min_value) + min_value).exp()
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def rand_v_diffusion(shape, sigma_data=1., min_value=0., max_value=float('inf'), device='cpu', dtype=torch.float32):
|
| 177 |
+
"""Draws samples from a truncated v-diffusion training timestep distribution."""
|
| 178 |
+
min_cdf = math.atan(min_value / sigma_data) * 2 / math.pi
|
| 179 |
+
max_cdf = math.atan(max_value / sigma_data) * 2 / math.pi
|
| 180 |
+
u = torch.rand(shape, device=device, dtype=dtype) * (max_cdf - min_cdf) + min_cdf
|
| 181 |
+
return torch.tan(u * math.pi / 2) * sigma_data
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def rand_split_log_normal(shape, loc, scale_1, scale_2, device='cpu', dtype=torch.float32):
|
| 185 |
+
"""Draws samples from a split lognormal distribution."""
|
| 186 |
+
n = torch.randn(shape, device=device, dtype=dtype).abs()
|
| 187 |
+
u = torch.rand(shape, device=device, dtype=dtype)
|
| 188 |
+
n_left = n * -scale_1 + loc
|
| 189 |
+
n_right = n * scale_2 + loc
|
| 190 |
+
ratio = scale_1 / (scale_1 + scale_2)
|
| 191 |
+
return torch.where(u < ratio, n_left, n_right).exp()
|
| 192 |
+
|
| 193 |
+
# Function to sample from discrete values
|
| 194 |
+
def rand_discrete(shape, values, device='cpu', dtype=torch.float32):
|
| 195 |
+
"""Draws samples from the given discrete values."""
|
| 196 |
+
indices = torch.randint(0, len(values), shape, device=device)
|
| 197 |
+
samples = torch.index_select(values, 0, indices).to(dtype)
|
| 198 |
+
return samples
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def rand_uniform(shape, min_value, max_value, device='cpu', dtype=torch.float32):
|
| 202 |
+
"""Draws samples from a uniform distribution."""
|
| 203 |
+
return torch.rand(shape, device=device, dtype=dtype) * (max_value - min_value) + min_value
|
code/policy_models/losses/__pycache__/step_unet_mse.cpython-310.pyc
ADDED
|
Binary file (7.6 kB). View file
|
|
|
code/policy_models/losses/__pycache__/step_unet_mse.cpython-39.pyc
ADDED
|
Binary file (7.56 kB). View file
|
|
|
code/policy_models/losses/step_unet_mse.py
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
from einops import rearrange, repeat
|
| 4 |
+
import numpy as np
|
| 5 |
+
import random
|
| 6 |
+
|
| 7 |
+
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
|
| 8 |
+
"""
|
| 9 |
+
embed_dim: output dimension for each position
|
| 10 |
+
pos: a list of positions to be encoded: size (M,)
|
| 11 |
+
out: (M, D)
|
| 12 |
+
"""
|
| 13 |
+
assert embed_dim % 2 == 0
|
| 14 |
+
omega = np.arange(embed_dim // 2, dtype=np.float64)
|
| 15 |
+
omega /= embed_dim / 2.
|
| 16 |
+
omega = 1. / 10000**omega # (D/2,)
|
| 17 |
+
|
| 18 |
+
pos = pos.reshape(-1) # (M,)
|
| 19 |
+
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
|
| 20 |
+
|
| 21 |
+
emb_sin = np.sin(out) # (M, D/2)
|
| 22 |
+
emb_cos = np.cos(out) # (M, D/2)
|
| 23 |
+
|
| 24 |
+
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
|
| 25 |
+
return emb
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def encode_text(texts, tokenizer, text_encoder, img_cond=None, img_cond_mask=None, img_encoder=None, position_encode=True, use_clip=False, max_length=20):
|
| 29 |
+
"""Encode text with optional image conditioning."""
|
| 30 |
+
with torch.no_grad():
|
| 31 |
+
if use_clip:
|
| 32 |
+
inputs = tokenizer(texts, padding='max_length', return_tensors="pt",truncation=True, max_length=max_length).to(text_encoder.device)
|
| 33 |
+
outputs = text_encoder(**inputs)
|
| 34 |
+
encoder_hidden_states = outputs.last_hidden_state # (batch, 30, 512)
|
| 35 |
+
if position_encode:
|
| 36 |
+
embed_dim, pos_num = encoder_hidden_states.shape[-1], encoder_hidden_states.shape[1]
|
| 37 |
+
pos = np.arange(pos_num,dtype=np.float64)
|
| 38 |
+
|
| 39 |
+
position_encode = get_1d_sincos_pos_embed_from_grid(embed_dim, pos)
|
| 40 |
+
position_encode = torch.tensor(position_encode, device=encoder_hidden_states.device, dtype=encoder_hidden_states.dtype, requires_grad=False)
|
| 41 |
+
|
| 42 |
+
encoder_hidden_states += position_encode
|
| 43 |
+
assert encoder_hidden_states.shape[-1] == 512
|
| 44 |
+
|
| 45 |
+
if img_encoder is not None:
|
| 46 |
+
assert img_cond is not None
|
| 47 |
+
assert img_cond_mask is not None
|
| 48 |
+
img_cond = img_cond.to(img_encoder.device)
|
| 49 |
+
if len(img_cond.shape) == 5:
|
| 50 |
+
img_cond = img_cond.squeeze(1)
|
| 51 |
+
|
| 52 |
+
img_hidden_states = img_encoder(img_cond).image_embeds
|
| 53 |
+
img_hidden_states[img_cond_mask] = 0.0
|
| 54 |
+
img_hidden_states = img_hidden_states.unsqueeze(1).expand(-1,encoder_hidden_states.shape[1],-1)
|
| 55 |
+
assert img_hidden_states.shape[-1] == 512
|
| 56 |
+
encoder_hidden_states = torch.cat([encoder_hidden_states, img_hidden_states], dim=-1)
|
| 57 |
+
assert encoder_hidden_states.shape[-1] == 1024
|
| 58 |
+
else:
|
| 59 |
+
encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states], dim=-1)
|
| 60 |
+
|
| 61 |
+
else:
|
| 62 |
+
inputs = tokenizer(texts, padding='max_length', return_tensors="pt",truncation=True, max_length=32).to(text_encoder.device)
|
| 63 |
+
outputs = text_encoder(**inputs)
|
| 64 |
+
encoder_hidden_states = outputs.last_hidden_state # (batch, 30, 512)
|
| 65 |
+
assert encoder_hidden_states.shape[1:] == (32,1024)
|
| 66 |
+
|
| 67 |
+
return encoder_hidden_states
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def unwrap_unet(unet):
|
| 71 |
+
if hasattr(unet, "module"):
|
| 72 |
+
unet = unet.module
|
| 73 |
+
return unet
|
| 74 |
+
|
| 75 |
+
def _step_unet(unet, sample, timestep, encoder_hidden_states, added_time_ids, use_layer_idx=5, all_layer=False, complete=False):
|
| 76 |
+
"""Direct UNet forward pass with early stopping at specified layer."""
|
| 77 |
+
unet = unwrap_unet(unet)
|
| 78 |
+
# 1. time
|
| 79 |
+
timesteps = timestep
|
| 80 |
+
if not torch.is_tensor(timesteps):
|
| 81 |
+
is_mps = sample.device.type == "mps"
|
| 82 |
+
if isinstance(timestep, float):
|
| 83 |
+
dtype = torch.float32 if is_mps else torch.float64
|
| 84 |
+
else:
|
| 85 |
+
dtype = torch.int32 if is_mps else torch.int64
|
| 86 |
+
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
|
| 87 |
+
elif len(timesteps.shape) == 0:
|
| 88 |
+
timesteps = timesteps[None].to(sample.device)
|
| 89 |
+
|
| 90 |
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
| 91 |
+
batch_size, num_frames = sample.shape[:2]
|
| 92 |
+
timesteps = timesteps.expand(batch_size)
|
| 93 |
+
|
| 94 |
+
t_emb = unet.time_proj(timesteps)
|
| 95 |
+
t_emb = t_emb.to(dtype=sample.dtype)
|
| 96 |
+
emb = unet.time_embedding(t_emb)
|
| 97 |
+
|
| 98 |
+
time_embeds = unet.add_time_proj(added_time_ids.flatten())
|
| 99 |
+
time_embeds = time_embeds.reshape((batch_size, -1))
|
| 100 |
+
time_embeds = time_embeds.to(emb.dtype)
|
| 101 |
+
aug_emb = unet.add_embedding(time_embeds)
|
| 102 |
+
emb = emb + aug_emb
|
| 103 |
+
|
| 104 |
+
# Flatten the batch and frames dimensions
|
| 105 |
+
sample = sample.flatten(0, 1)
|
| 106 |
+
emb = emb.repeat_interleave(num_frames, dim=0)
|
| 107 |
+
encoder_hidden_states = encoder_hidden_states.repeat_interleave(num_frames, dim=0)
|
| 108 |
+
|
| 109 |
+
# 2. pre-process
|
| 110 |
+
sample = unet.conv_in(sample)
|
| 111 |
+
|
| 112 |
+
image_only_indicator = torch.zeros(batch_size, num_frames, dtype=sample.dtype, device=sample.device)
|
| 113 |
+
|
| 114 |
+
down_block_res_samples = (sample,)
|
| 115 |
+
for downsample_block in unet.down_blocks:
|
| 116 |
+
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
|
| 117 |
+
sample, res_samples = downsample_block(
|
| 118 |
+
hidden_states=sample,
|
| 119 |
+
temb=emb,
|
| 120 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 121 |
+
image_only_indicator=image_only_indicator,
|
| 122 |
+
)
|
| 123 |
+
else:
|
| 124 |
+
sample, res_samples = downsample_block(
|
| 125 |
+
hidden_states=sample,
|
| 126 |
+
temb=emb,
|
| 127 |
+
image_only_indicator=image_only_indicator,
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
down_block_res_samples += res_samples
|
| 131 |
+
|
| 132 |
+
# 4. mid
|
| 133 |
+
sample = unet.mid_block(
|
| 134 |
+
hidden_states=sample,
|
| 135 |
+
temb=emb,
|
| 136 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 137 |
+
image_only_indicator=image_only_indicator,
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
feature_list = []
|
| 141 |
+
|
| 142 |
+
# 5. up
|
| 143 |
+
for i, upsample_block in enumerate(unet.up_blocks):
|
| 144 |
+
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
|
| 145 |
+
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
|
| 146 |
+
|
| 147 |
+
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
|
| 148 |
+
sample = upsample_block(
|
| 149 |
+
hidden_states=sample,
|
| 150 |
+
temb=emb,
|
| 151 |
+
res_hidden_states_tuple=res_samples,
|
| 152 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 153 |
+
image_only_indicator=image_only_indicator,
|
| 154 |
+
)
|
| 155 |
+
else:
|
| 156 |
+
sample = upsample_block(
|
| 157 |
+
hidden_states=sample,
|
| 158 |
+
temb=emb,
|
| 159 |
+
res_hidden_states_tuple=res_samples,
|
| 160 |
+
image_only_indicator=image_only_indicator,
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
if i < use_layer_idx:
|
| 164 |
+
factor = 2**(use_layer_idx - i)
|
| 165 |
+
feature_list.append(torch.nn.functional.interpolate(sample, scale_factor=factor))
|
| 166 |
+
|
| 167 |
+
if i == use_layer_idx and not complete:
|
| 168 |
+
feature_list.append(sample)
|
| 169 |
+
break
|
| 170 |
+
|
| 171 |
+
if not complete:
|
| 172 |
+
if all_layer:
|
| 173 |
+
sample = torch.cat(feature_list, dim=1)
|
| 174 |
+
sample = sample.reshape(batch_size, num_frames, *sample.shape[1:])
|
| 175 |
+
else:
|
| 176 |
+
sample = sample.reshape(batch_size, num_frames, *sample.shape[1:])
|
| 177 |
+
return sample
|
| 178 |
+
else:
|
| 179 |
+
sample = unet.conv_norm_out(sample)
|
| 180 |
+
sample = unet.conv_act(sample)
|
| 181 |
+
sample = unet.conv_out(sample)
|
| 182 |
+
sample = sample.reshape(batch_size, num_frames, *sample.shape[1:])
|
| 183 |
+
return sample
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def _compute_step_unet_feature(videos, images, texts, unet, pipeline, tokenizer, text_encoder, image_encoder, args, timestep=20, extract_layer_idx=1, step_time=1, max_length=20, seed=None):
|
| 187 |
+
"""Compute intermediate UNet feature with gradients enabled via direct UNet forward."""
|
| 188 |
+
height = unet.module.config.sample_size * pipeline.vae_scale_factor // 3
|
| 189 |
+
width = unet.module.config.sample_size * pipeline.vae_scale_factor // 3
|
| 190 |
+
pipeline.vae.eval()
|
| 191 |
+
pipeline.image_encoder.eval()
|
| 192 |
+
device = images.device
|
| 193 |
+
dtype = pipeline.vae.dtype
|
| 194 |
+
#print('dtype:',dtype)
|
| 195 |
+
vae = pipeline.vae
|
| 196 |
+
|
| 197 |
+
num_videos_per_prompt = 1
|
| 198 |
+
|
| 199 |
+
frames = rearrange(videos, 'b f c h w-> (b f) c h w').to(dtype)
|
| 200 |
+
with torch.no_grad():
|
| 201 |
+
latents = vae.encode(frames).latent_dist.mode() * vae.config.scaling_factor
|
| 202 |
+
latents = rearrange(latents, '(b f) c h w-> b f c h w', b=images.shape[0])
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
pixel_values = images
|
| 209 |
+
batch_size = pixel_values.shape[0]
|
| 210 |
+
|
| 211 |
+
pixel_values = rearrange(pixel_values, 'b f c h w-> (b f) c h w').to(dtype)
|
| 212 |
+
|
| 213 |
+
with torch.no_grad():
|
| 214 |
+
# texts, tokenizer, text_encoder, img_cond=None, img_cond_mask=None, img_encoder=None, position_encode=True, use_clip=False, max_length=20
|
| 215 |
+
encoder_hidden_states = encode_text(texts, tokenizer, text_encoder, position_encode=args.position_encode, use_clip=True, max_length=max_length)
|
| 216 |
+
encoder_hidden_states = encoder_hidden_states.to(dtype)
|
| 217 |
+
image_embeddings = encoder_hidden_states
|
| 218 |
+
|
| 219 |
+
needs_upcasting = pipeline.vae.dtype == torch.float16 and pipeline.vae.config.force_upcast
|
| 220 |
+
#if needs_upcasting:
|
| 221 |
+
# pipeline.vae.to(dtype=torch.float32)
|
| 222 |
+
# pixel_values.to(dtype=torch.float32)
|
| 223 |
+
if pixel_values.shape[-3] == 4:
|
| 224 |
+
image_latents = pixel_values/vae.config.scaling_factor
|
| 225 |
+
else:
|
| 226 |
+
image_latents = pipeline._encode_vae_image(pixel_values, device, num_videos_per_prompt, False)
|
| 227 |
+
image_latents = image_latents.to(image_embeddings.dtype)
|
| 228 |
+
|
| 229 |
+
#print('dtype:', image_latents.dtype)
|
| 230 |
+
|
| 231 |
+
#if needs_upcasting:
|
| 232 |
+
# pipeline.vae.to(dtype=torch.float16)
|
| 233 |
+
|
| 234 |
+
#num_frames = unet.config.num_frames
|
| 235 |
+
num_frames = args.num_frames
|
| 236 |
+
image_latents = image_latents.unsqueeze(1).repeat(1, num_frames, 1, 1, 1)
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
# num_channels_latents = unet.module.config.in_channels
|
| 240 |
+
|
| 241 |
+
gen = None
|
| 242 |
+
if seed is not None:
|
| 243 |
+
gen = torch.Generator(device=device)
|
| 244 |
+
gen.manual_seed(int(seed))
|
| 245 |
+
|
| 246 |
+
# latents = pipeline.prepare_latents(
|
| 247 |
+
# batch_size * num_videos_per_prompt,
|
| 248 |
+
# num_frames,
|
| 249 |
+
# num_channels_latents,
|
| 250 |
+
# height,
|
| 251 |
+
# width,
|
| 252 |
+
# image_embeddings.dtype,
|
| 253 |
+
# device,
|
| 254 |
+
# gen,
|
| 255 |
+
# None,
|
| 256 |
+
# )
|
| 257 |
+
rnd_normal = torch.randn([batch_size//2, 1, 1, 1, 1], device=device)
|
| 258 |
+
sigma = (rnd_normal * 1.6 + 0.7).exp()
|
| 259 |
+
c_in = 1 / (sigma**2 + 1) ** 0.5
|
| 260 |
+
c_noise = (sigma.log() / 4).reshape([batch_size//2])
|
| 261 |
+
loss_weight = (sigma ** 2 + 1) / sigma ** 2
|
| 262 |
+
|
| 263 |
+
# repreat items twice
|
| 264 |
+
sigma = torch.cat([sigma, sigma], dim=0)
|
| 265 |
+
c_noise = torch.cat([c_noise, c_noise], dim=0)
|
| 266 |
+
c_in = torch.cat([c_in, c_in], dim=0)
|
| 267 |
+
loss_weight = torch.cat([loss_weight, loss_weight], dim=0)
|
| 268 |
+
tmp_noise = torch.randn_like(latents.chunk(2, dim=0)[0])
|
| 269 |
+
|
| 270 |
+
latents = latents + torch.cat([tmp_noise, tmp_noise], dim=0) * sigma
|
| 271 |
+
|
| 272 |
+
fps = 4
|
| 273 |
+
motion_bucket_id = 127
|
| 274 |
+
added_time_ids = pipeline._get_add_time_ids(
|
| 275 |
+
fps,
|
| 276 |
+
motion_bucket_id,
|
| 277 |
+
0,
|
| 278 |
+
image_embeddings.dtype,
|
| 279 |
+
batch_size,
|
| 280 |
+
num_videos_per_prompt,
|
| 281 |
+
False,
|
| 282 |
+
)
|
| 283 |
+
added_time_ids = added_time_ids.to(device)
|
| 284 |
+
|
| 285 |
+
pipeline.scheduler.set_timesteps(timestep, device=device)
|
| 286 |
+
timesteps = pipeline.scheduler.timesteps
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
for i, t in enumerate(timesteps):
|
| 290 |
+
#print('step:',i)
|
| 291 |
+
if i == step_time - 1:
|
| 292 |
+
complete = False
|
| 293 |
+
else:
|
| 294 |
+
complete = True
|
| 295 |
+
# complete = True
|
| 296 |
+
#print('complete:',complete)
|
| 297 |
+
|
| 298 |
+
latent_model_input = latents * c_in
|
| 299 |
+
# latent_model_input = pipeline.scheduler.scale_model_input(latent_model_input, t)
|
| 300 |
+
|
| 301 |
+
# Concatenate image_latents over channels dimention
|
| 302 |
+
# latent_model_input = torch.cat([mask, latent_model_input, image_latents], dim=2)
|
| 303 |
+
latent_model_input = torch.cat([latent_model_input, image_latents], dim=2)
|
| 304 |
+
#print('latent_model_input_shape:',latent_model_input.shape)
|
| 305 |
+
#print('image_embeddings_shape:',image_embeddings.shape)
|
| 306 |
+
|
| 307 |
+
# predict the noise residual
|
| 308 |
+
# print('extract_layer_idx:',extract_layer_idx)
|
| 309 |
+
# print('latent_model_input_shape:',latent_model_input.shape)
|
| 310 |
+
# print('encoder_hidden_states:',image_embeddings.shape)
|
| 311 |
+
feature_pred = _step_unet(
|
| 312 |
+
unet,
|
| 313 |
+
latent_model_input,
|
| 314 |
+
c_noise,
|
| 315 |
+
encoder_hidden_states=image_embeddings,
|
| 316 |
+
added_time_ids=added_time_ids,
|
| 317 |
+
use_layer_idx=extract_layer_idx,
|
| 318 |
+
all_layer=True,
|
| 319 |
+
complete=complete,
|
| 320 |
+
)
|
| 321 |
+
# feature_pred = unet(latent_model_input,t,encoder_hidden_states=image_embeddings,added_time_ids=added_time_ids,return_dict=False,)[0]
|
| 322 |
+
|
| 323 |
+
# print('feature_pred_shape:',feature_pred.shape)
|
| 324 |
+
|
| 325 |
+
if not complete:
|
| 326 |
+
break
|
| 327 |
+
|
| 328 |
+
latents = pipeline.scheduler.step(feature_pred, t, latents).prev_sample
|
| 329 |
+
|
| 330 |
+
# latents = outputs.prev_sample
|
| 331 |
+
# pred_x0 = outputs.pred_original_sample
|
| 332 |
+
# frames = pipeline.decode_latents(pred_x0, num_frames)
|
| 333 |
+
# frames = pipeline.video_processor.postprocess_video(video=frames, output_type="np")
|
| 334 |
+
# import imageio
|
| 335 |
+
# breakpoint()
|
| 336 |
+
# imageio.mimsave("output.mp4", frames[0], fps=8)
|
| 337 |
+
return feature_pred
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def step_unet_pairwise_mse_loss(batch, unet, pipeline, tokenizer, text_encoder, image_encoder, args, timestep=20, extract_layer_idx=1, step_time=1, max_length=20):
|
| 341 |
+
"""
|
| 342 |
+
Compute MSE between two step_unet feature passes derived from the same batch but with different noise seeds.
|
| 343 |
+
This keeps gradients w.r.t. UNet parameters.
|
| 344 |
+
"""
|
| 345 |
+
batchsize = batch['video'].shape[0]
|
| 346 |
+
# text = batch['text'][:batchsize//2]
|
| 347 |
+
# video = batch['video'][:batchsize//2]
|
| 348 |
+
text = batch['text']
|
| 349 |
+
video = batch['video']
|
| 350 |
+
# random select a batch of two numbers as index not the same
|
| 351 |
+
pairs = [random.sample(range(video.shape[1]), 2) for _ in range(video.shape[0])]
|
| 352 |
+
pairs = torch.tensor(pairs, device=video.device, dtype=torch.long)
|
| 353 |
+
|
| 354 |
+
image1 = video[torch.arange(video.shape[0], device=video.device), pairs[:, 0]].unsqueeze(1)
|
| 355 |
+
image2 = video[torch.arange(video.shape[0], device=video.device), pairs[:, 1]].unsqueeze(1)
|
| 356 |
+
|
| 357 |
+
# use the same seed for both images
|
| 358 |
+
# seed = torch.seed()
|
| 359 |
+
feats = _compute_step_unet_feature(torch.cat([video, video], dim=0), torch.cat([image1, image2], dim=0), text+text, unet, pipeline, tokenizer, text_encoder, image_encoder, args, timestep, extract_layer_idx, step_time, max_length)
|
| 360 |
+
# feat2 = _compute_step_unet_feature(image2, text, unet, pipeline, tokenizer, text_encoder, image_encoder, args, timestep, extract_layer_idx, step_time, max_length, seed=seed)
|
| 361 |
+
feat1, feat2 = feats.chunk(2, dim=0)
|
| 362 |
+
# use cosine similarity
|
| 363 |
+
# return F.mse_loss(feat1, feat2)
|
| 364 |
+
feat1 = feat1.permute(0,1,3,4,2)
|
| 365 |
+
feat1 = feat1.reshape(-1, feat1.shape[-1])
|
| 366 |
+
feat2 = feat2.permute(0,1,3,4,2)
|
| 367 |
+
feat2 = feat2.reshape(-1, feat2.shape[-1])
|
| 368 |
+
assert feat1.shape[-1] == 2560
|
| 369 |
+
feat1 = F.normalize(feat1, dim=1)
|
| 370 |
+
feat2 = F.normalize(feat2, dim=1)
|
| 371 |
+
return 1 - (feat1 * feat2).sum(dim=1).mean()
|
| 372 |
+
|
| 373 |
+
|
code/policy_models/module/Video_Former copy 2.py
ADDED
|
@@ -0,0 +1,538 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This code is referenced from https://github.com/dhansmair/flamingo-mini
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from einops import rearrange, repeat
|
| 5 |
+
from einops_exts import rearrange_many
|
| 6 |
+
from torch import einsum, nn
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
|
| 9 |
+
from policy_models.module.transformers.utils import feed_forward_layer
|
| 10 |
+
|
| 11 |
+
class Attention(nn.Module):
|
| 12 |
+
def __init__(
|
| 13 |
+
self,
|
| 14 |
+
dim: int,
|
| 15 |
+
num_heads: int = 8,
|
| 16 |
+
use_cross_attn=False,
|
| 17 |
+
y_dim=512,
|
| 18 |
+
qkv_bias: bool = False,
|
| 19 |
+
qk_norm: bool = False,
|
| 20 |
+
attn_drop: float = 0.,
|
| 21 |
+
proj_drop: float = 0.,
|
| 22 |
+
norm_layer: nn.Module = nn.LayerNorm,
|
| 23 |
+
attn_mask = None,
|
| 24 |
+
) -> None:
|
| 25 |
+
super().__init__()
|
| 26 |
+
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
|
| 27 |
+
self.num_heads = num_heads
|
| 28 |
+
self.head_dim = dim // num_heads
|
| 29 |
+
self.scale = self.head_dim ** -0.5
|
| 30 |
+
self.fused_attn = True
|
| 31 |
+
|
| 32 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
| 33 |
+
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 34 |
+
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 35 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 36 |
+
self.proj = nn.Linear(dim, dim)
|
| 37 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 38 |
+
self.attn_mask = attn_mask
|
| 39 |
+
self.use_cross_attn=use_cross_attn
|
| 40 |
+
if self.use_cross_attn:
|
| 41 |
+
#print('use_cross_attn')
|
| 42 |
+
self.y_kv = nn.Linear(y_dim, dim * 2, bias=qkv_bias)
|
| 43 |
+
self.y_k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 44 |
+
self.gate = nn.Parameter(torch.zeros([self.num_heads]))
|
| 45 |
+
|
| 46 |
+
def forward(self, x: torch.Tensor, y=None, attn_mask=None) -> torch.Tensor:
|
| 47 |
+
B, N, C = x.shape
|
| 48 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
|
| 49 |
+
q, k, v = qkv.unbind(0)
|
| 50 |
+
q, k = self.q_norm(q), self.k_norm(k)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
if self.fused_attn:
|
| 54 |
+
if self.attn_mask is not None:
|
| 55 |
+
self.attn_mask = self.attn_mask.to(x.device)
|
| 56 |
+
x = F.scaled_dot_product_attention(
|
| 57 |
+
q, k, v,
|
| 58 |
+
dropout_p=self.attn_drop.p if self.training else 0.,
|
| 59 |
+
attn_mask=self.attn_mask
|
| 60 |
+
)
|
| 61 |
+
else:
|
| 62 |
+
q = q * self.scale
|
| 63 |
+
attn = q @ k.transpose(-2, -1)
|
| 64 |
+
attn = attn.softmax(dim=-1)
|
| 65 |
+
attn = self.attn_drop(attn)
|
| 66 |
+
x = attn @ v
|
| 67 |
+
|
| 68 |
+
if self.use_cross_attn:
|
| 69 |
+
#print('y_shape:',y.shape)
|
| 70 |
+
N_y = y.shape[1]
|
| 71 |
+
y_kv = self.y_kv(y).reshape(B, N_y, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
|
| 72 |
+
y_k, y_v = y_kv.unbind(0)
|
| 73 |
+
y_k = self.y_k_norm(y_k)
|
| 74 |
+
y_out = F.scaled_dot_product_attention(
|
| 75 |
+
q, y_k, y_v,
|
| 76 |
+
dropout_p=self.attn_drop.p if self.training else 0.,
|
| 77 |
+
)
|
| 78 |
+
#print('y_out_shape:', y_out.shape)
|
| 79 |
+
y_out = y_out*self.gate.tanh().view(1, -1, 1, 1)
|
| 80 |
+
x = x + y_out
|
| 81 |
+
|
| 82 |
+
x = x.transpose(1, 2).reshape(B, N, C)
|
| 83 |
+
x = self.proj(x)
|
| 84 |
+
x = self.proj_drop(x)
|
| 85 |
+
return x
|
| 86 |
+
|
| 87 |
+
class PerceiverAttentionLayer(nn.Module):
|
| 88 |
+
"""Perceiver Attention Layer"""
|
| 89 |
+
|
| 90 |
+
def __init__(self, dim: int, dim_head: int = 64, heads: int = 8):
|
| 91 |
+
super().__init__()
|
| 92 |
+
self.scale = dim_head**-0.5
|
| 93 |
+
self.heads = heads
|
| 94 |
+
self.dim_head = dim_head
|
| 95 |
+
inner_dim = dim_head * heads
|
| 96 |
+
|
| 97 |
+
# trainable components of PerceiverAttentionLayer
|
| 98 |
+
self.norm_media = nn.LayerNorm(dim)
|
| 99 |
+
self.norm_latents = nn.LayerNorm(dim)
|
| 100 |
+
|
| 101 |
+
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
| 102 |
+
self.to_k = nn.Linear(dim, inner_dim, bias=False)
|
| 103 |
+
self.to_v = nn.Linear(dim, inner_dim, bias=False)
|
| 104 |
+
self.to_out = nn.Linear(inner_dim, dim, bias=False)
|
| 105 |
+
|
| 106 |
+
def forward(self, features, latents):
|
| 107 |
+
"""Latent vectors are cross-attending to the visual features x
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
features: Batch of visual features with shape (batch_size, n_features, dim)
|
| 111 |
+
latents: Latent learnt vectors which are used to compute queries with shape (batch_size, n_latents, dim)
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
Attention score with shape (batch_size, n_latents, dim)
|
| 115 |
+
"""
|
| 116 |
+
assert features.ndim == 3
|
| 117 |
+
assert latents.ndim == 3
|
| 118 |
+
assert features.shape[0] == latents.shape[0]
|
| 119 |
+
assert features.shape[2] == latents.shape[2]
|
| 120 |
+
|
| 121 |
+
n_heads = self.heads
|
| 122 |
+
n_batch, n_features, dim = features.shape
|
| 123 |
+
n_queries = latents.shape[1]
|
| 124 |
+
|
| 125 |
+
# Layer normalization
|
| 126 |
+
x = self.norm_media(features)
|
| 127 |
+
latents = self.norm_latents(latents)
|
| 128 |
+
|
| 129 |
+
# Compute the queries from the latents, for all attention heads simultaneously
|
| 130 |
+
q = self.to_q(latents)
|
| 131 |
+
q = rearrange(q, 'b q (h d) -> b h q d', h=n_heads)
|
| 132 |
+
assert q.shape == torch.Size([n_batch, n_heads, n_queries, self.dim_head])
|
| 133 |
+
|
| 134 |
+
# Keys and values for all attention heads
|
| 135 |
+
kv_input = torch.cat((x, latents), dim=-2)
|
| 136 |
+
n_features_latents = n_features + n_queries
|
| 137 |
+
k = self.to_k(kv_input)
|
| 138 |
+
v = self.to_v(kv_input)
|
| 139 |
+
|
| 140 |
+
k, v = rearrange_many((k, v), 'b f (h d) -> b h f d', h=n_heads)
|
| 141 |
+
assert v.shape == torch.Size([n_batch, n_heads, n_features_latents, self.dim_head])
|
| 142 |
+
|
| 143 |
+
q = q * self.scale
|
| 144 |
+
|
| 145 |
+
# Attention scores
|
| 146 |
+
sim = einsum('b h q d, b h f d -> b h q f', q, k)
|
| 147 |
+
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
|
| 148 |
+
alphas = sim.softmax(dim=-1)
|
| 149 |
+
|
| 150 |
+
out = einsum('b h q f, b h f v -> b h q v', alphas, v)
|
| 151 |
+
out = rearrange(out, 'b h q v -> b q (h v)')
|
| 152 |
+
|
| 153 |
+
return self.to_out(out)
|
| 154 |
+
|
| 155 |
+
class TempAttentionLayer(nn.Module):
|
| 156 |
+
"""Perceiver Attention Layer"""
|
| 157 |
+
|
| 158 |
+
def __init__(self, dim: int, dim_head: int = 64, heads: int = 8):
|
| 159 |
+
super().__init__()
|
| 160 |
+
self.scale = dim_head**-0.5
|
| 161 |
+
self.heads = heads
|
| 162 |
+
self.dim_head = dim_head
|
| 163 |
+
inner_dim = dim_head * heads
|
| 164 |
+
|
| 165 |
+
# trainable components of PerceiverAttentionLayer
|
| 166 |
+
self.norm_media = nn.LayerNorm(dim)
|
| 167 |
+
|
| 168 |
+
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
| 169 |
+
self.to_k = nn.Linear(dim, inner_dim, bias=False)
|
| 170 |
+
self.to_v = nn.Linear(dim, inner_dim, bias=False)
|
| 171 |
+
self.to_out = nn.Linear(inner_dim, dim, bias=False)
|
| 172 |
+
|
| 173 |
+
def forward(self, features):
|
| 174 |
+
"""Latent vectors are cross-attending to the visual features x
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
features: Batch of visual features with shape (batch_size, n_features, dim)
|
| 178 |
+
latents: Latent learnt vectors which are used to compute queries with shape (batch_size, n_latents, dim)
|
| 179 |
+
|
| 180 |
+
Returns:
|
| 181 |
+
Attention score with shape (batch_size, n_latents, dim)
|
| 182 |
+
"""
|
| 183 |
+
assert features.ndim == 3
|
| 184 |
+
|
| 185 |
+
n_heads = self.heads
|
| 186 |
+
n_batch, n_features, dim = features.shape
|
| 187 |
+
n_queries = features.shape[1]
|
| 188 |
+
|
| 189 |
+
# Layer normalization
|
| 190 |
+
x = self.norm_media(features)
|
| 191 |
+
|
| 192 |
+
# Compute the queries from the latents, for all attention heads simultaneously
|
| 193 |
+
q = self.to_q(x)
|
| 194 |
+
q = rearrange(q, 'b q (h d) -> b h q d', h=n_heads)
|
| 195 |
+
assert q.shape == torch.Size([n_batch, n_heads, n_queries, self.dim_head])
|
| 196 |
+
|
| 197 |
+
# Keys and values for all attention heads
|
| 198 |
+
n_features_latents = n_features
|
| 199 |
+
k = self.to_k(x)
|
| 200 |
+
v = self.to_v(x)
|
| 201 |
+
|
| 202 |
+
k, v = rearrange_many((k, v), 'b f (h d) -> b h f d', h=n_heads)
|
| 203 |
+
assert v.shape == torch.Size([n_batch, n_heads, n_features_latents, self.dim_head])
|
| 204 |
+
|
| 205 |
+
q = q * self.scale
|
| 206 |
+
|
| 207 |
+
# Attention scores
|
| 208 |
+
sim = einsum('b h q d, b h f d -> b h q f', q, k)
|
| 209 |
+
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
|
| 210 |
+
alphas = sim.softmax(dim=-1)
|
| 211 |
+
|
| 212 |
+
out = einsum('b h q f, b h f v -> b h q v', alphas, v)
|
| 213 |
+
out = rearrange(out, 'b h q v -> b q (h v)')
|
| 214 |
+
|
| 215 |
+
return self.to_out(out)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
class Video_Former_3D(nn.Module):
|
| 219 |
+
"""Perceiver Resampler with multi-head attention layer"""
|
| 220 |
+
|
| 221 |
+
def __init__(
|
| 222 |
+
self,
|
| 223 |
+
dim: int,
|
| 224 |
+
depth: int,
|
| 225 |
+
condition_dim: int = 1280,
|
| 226 |
+
dim_head: int = 64,
|
| 227 |
+
heads: int = 8,
|
| 228 |
+
num_latents: int = 64,
|
| 229 |
+
num_frame: int = 14,
|
| 230 |
+
num_time_embeds: int = 4,
|
| 231 |
+
ff_mult: int = 4,
|
| 232 |
+
activation: str = 'gelu',
|
| 233 |
+
trainable: bool = True,
|
| 234 |
+
use_temporal: bool = False,
|
| 235 |
+
):
|
| 236 |
+
super().__init__()
|
| 237 |
+
|
| 238 |
+
self.dim = dim
|
| 239 |
+
self.num_queries = num_latents
|
| 240 |
+
self.num_frame = num_frame
|
| 241 |
+
self.condition_dim = condition_dim
|
| 242 |
+
self.use_temporal = use_temporal
|
| 243 |
+
self.input_mask_mode = 'zero' # 'none' | 'zero' | 'gaussian' | 'learnable'
|
| 244 |
+
|
| 245 |
+
self.goal_emb = nn.Sequential(
|
| 246 |
+
nn.Linear(condition_dim, dim * 2),
|
| 247 |
+
nn.GELU(),
|
| 248 |
+
nn.Linear(dim * 2, dim)
|
| 249 |
+
)
|
| 250 |
+
frame_seq_len = num_latents // num_frame
|
| 251 |
+
self.latents = nn.Parameter(torch.randn(self.num_frame, frame_seq_len, dim)) # type: ignore[reportPrivateUsage]
|
| 252 |
+
self.time_pos_emb = nn.Parameter(torch.randn(num_time_embeds, 1, dim)) # type: ignore[reportPrivateUsage]
|
| 253 |
+
attn_mask = torch.ones((num_frame, num_frame))
|
| 254 |
+
#attn_mask = torch.tril(attn_mask).bool()
|
| 255 |
+
|
| 256 |
+
self.layers = nn.ModuleList([])
|
| 257 |
+
|
| 258 |
+
if self.use_temporal:
|
| 259 |
+
for _ in range(depth):
|
| 260 |
+
self.layers.append(
|
| 261 |
+
nn.ModuleList(
|
| 262 |
+
[
|
| 263 |
+
PerceiverAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 264 |
+
#TempAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 265 |
+
Attention(dim, num_heads=heads, qkv_bias=True, use_cross_attn=False,
|
| 266 |
+
y_dim=512, attn_mask=attn_mask),
|
| 267 |
+
feed_forward_layer(dim=dim, mult=ff_mult, activation=activation),
|
| 268 |
+
]
|
| 269 |
+
)
|
| 270 |
+
)
|
| 271 |
+
else:
|
| 272 |
+
for _ in range(depth):
|
| 273 |
+
self.layers.append(
|
| 274 |
+
nn.ModuleList(
|
| 275 |
+
[
|
| 276 |
+
PerceiverAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 277 |
+
feed_forward_layer(dim=dim, mult=ff_mult, activation=activation),
|
| 278 |
+
]
|
| 279 |
+
)
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
# Layer normalization takes as input the query vector length
|
| 283 |
+
self.norm = nn.LayerNorm(dim)
|
| 284 |
+
|
| 285 |
+
self._update_trainable_state(trainable)
|
| 286 |
+
|
| 287 |
+
# learnable frame token (used when input_mask_mode == 'learnable')
|
| 288 |
+
if self.input_mask_mode == 'learnable':
|
| 289 |
+
# shape: (1, 1, n_features, dim) after goal_emb
|
| 290 |
+
self.learnable_mask_token = nn.Parameter(torch.zeros(1, 1, 1, dim))
|
| 291 |
+
|
| 292 |
+
def _update_trainable_state(self, trainable: bool = True):
|
| 293 |
+
for param in self.parameters():
|
| 294 |
+
param.requires_grad = trainable
|
| 295 |
+
|
| 296 |
+
def forward(self, x_f: torch.Tensor, mask: torch.BoolTensor = None, extra : torch.Tensor = None, frame_mask_prob: float = 0.0, language: torch.Tensor = None):
|
| 297 |
+
"""Run perceiver resampler on the input visual embeddings
|
| 298 |
+
|
| 299 |
+
Args:
|
| 300 |
+
x_f: Input visual embeddings of shape (batch_size, n_frames, n_features, d_visual)
|
| 301 |
+
mask: Mask for the input visual embeddings of shape (batch_size, n_frames)
|
| 302 |
+
extra: Extra tensor for concatenation
|
| 303 |
+
frame_mask_prob: Probability of masking each frame during training (0.0 = no masking)
|
| 304 |
+
language: Language embeddings of shape (batch_size, 1, lang_dim)
|
| 305 |
+
|
| 306 |
+
Returns:
|
| 307 |
+
Resampler features of shape (batch_size, num_queries, d_visual)
|
| 308 |
+
"""
|
| 309 |
+
assert x_f.ndim == 4
|
| 310 |
+
|
| 311 |
+
batch_size, max_length, _, dim = x_f.shape
|
| 312 |
+
|
| 313 |
+
# Generate per-batch frame mask (True=keep, False=mask) with non-uniform probability centered at index 6
|
| 314 |
+
frame_mask = None
|
| 315 |
+
if frame_mask_prob > 0.0 and self.training:
|
| 316 |
+
# per-frame mask probabilities p_i: highest at center_idx=6, decays with distance
|
| 317 |
+
center_idx = 6 if max_length > 6 else (max_length // 2)
|
| 318 |
+
frame_indices = torch.arange(max_length, device=x_f.device).float()
|
| 319 |
+
distances = (frame_indices - float(center_idx)).abs()
|
| 320 |
+
sigma = 2.0
|
| 321 |
+
# Gaussian decay: p_i = frame_mask_prob * exp(-0.5 * (d/sigma)^2)
|
| 322 |
+
per_frame_p = frame_mask_prob * torch.exp(-0.5 * (distances / sigma) ** 2)
|
| 323 |
+
# broadcast to batch and sample Bernoulli per (b, t)
|
| 324 |
+
rand_vals = torch.rand(batch_size, max_length, device=x_f.device)
|
| 325 |
+
# True=keep, False=mask
|
| 326 |
+
frame_mask = rand_vals > per_frame_p.unsqueeze(0)
|
| 327 |
+
# ensure at least one frame kept per sample
|
| 328 |
+
needs_fix = frame_mask.sum(dim=1) == 0
|
| 329 |
+
if needs_fix.any():
|
| 330 |
+
idx = torch.nonzero(needs_fix, as_tuple=False).squeeze(-1)
|
| 331 |
+
rand_cols = torch.randint(0, max_length, (idx.numel(),), device=x_f.device)
|
| 332 |
+
frame_mask[idx, rand_cols] = True
|
| 333 |
+
|
| 334 |
+
# Mask the position embeddings for the padded frames
|
| 335 |
+
time_pos_emb = (
|
| 336 |
+
self.time_pos_emb[:max_length].unsqueeze(0).expand(batch_size, -1, -1, -1)
|
| 337 |
+
) # [batch_size, max_length, 1, dim]
|
| 338 |
+
if mask is not None:
|
| 339 |
+
time_pos_emb = time_pos_emb * mask.unsqueeze(-1).unsqueeze(-1)
|
| 340 |
+
|
| 341 |
+
# Apply the position embeddings
|
| 342 |
+
x_f = self.goal_emb(x_f)
|
| 343 |
+
# Frame-level input masking before adding positional encoding
|
| 344 |
+
if frame_mask is not None:
|
| 345 |
+
bsz = batch_size
|
| 346 |
+
T = max_length
|
| 347 |
+
n_features = x_f.shape[2]
|
| 348 |
+
d = x_f.shape[3]
|
| 349 |
+
mask_expand = frame_mask.unsqueeze(-1).unsqueeze(-1).expand(bsz, T, n_features, d)
|
| 350 |
+
if self.input_mask_mode == 'zero':
|
| 351 |
+
x_f = torch.where(mask_expand, x_f, torch.zeros_like(x_f))
|
| 352 |
+
elif self.input_mask_mode == 'gaussian':
|
| 353 |
+
noise = torch.randn_like(x_f)
|
| 354 |
+
x_f = torch.where(mask_expand, x_f, noise)
|
| 355 |
+
elif self.input_mask_mode == 'learnable':
|
| 356 |
+
token = self.learnable_mask_token
|
| 357 |
+
token = token.expand(bsz, T, n_features, d)
|
| 358 |
+
x_f = torch.where(mask_expand, x_f, token)
|
| 359 |
+
# 'none' -> do nothing
|
| 360 |
+
# 融合语言:将 language (b,1,512) 投影到dim,并按帧复制后拼接为额外特征
|
| 361 |
+
# if language is not None:
|
| 362 |
+
# lang = self.lang_emb(language.squeeze(1)) # (b, dim)
|
| 363 |
+
# lang = lang.unsqueeze(1) # (b,1,dim)
|
| 364 |
+
# lang = repeat(lang, 'b q d -> b T q d', T=max_length) # (b,T,1,dim)
|
| 365 |
+
# x_f = torch.cat([x_f, lang], dim=2)
|
| 366 |
+
if extra is not None:
|
| 367 |
+
extra = repeat(extra, 'b q d -> b T q d', T=max_length)
|
| 368 |
+
x_f = torch.cat([x_f, extra],dim = 2)
|
| 369 |
+
x_f = x_f + time_pos_emb
|
| 370 |
+
|
| 371 |
+
# Select temporal keep mask for spatial attention only (do not change latent length)
|
| 372 |
+
keep_mask_time = None
|
| 373 |
+
if frame_mask_prob > 1.0 and self.training:
|
| 374 |
+
b, T, n, d = x_f.shape
|
| 375 |
+
# center-weighted dropping: closer to center has higher drop prob
|
| 376 |
+
center_idx = 6 if T > 6 else (T // 2)
|
| 377 |
+
frame_indices = torch.arange(T, device=x_f.device).float()
|
| 378 |
+
distances = (frame_indices - float(center_idx)).abs()
|
| 379 |
+
sigma = 2.0
|
| 380 |
+
# unnormalized weights (max at center, decay with distance)
|
| 381 |
+
weights = torch.exp(-0.5 * (distances / sigma) ** 2)
|
| 382 |
+
weights = weights / (weights.sum() + 1e-8)
|
| 383 |
+
# sample p ~ Uniform(0, frame_mask_prob) then fixed count per batch
|
| 384 |
+
p = (torch.rand((), device=x_f.device) * frame_mask_prob).item()
|
| 385 |
+
num_to_remove = int(T * p)
|
| 386 |
+
if num_to_remove >= T:
|
| 387 |
+
num_to_remove = T - 1
|
| 388 |
+
if num_to_remove > 0:
|
| 389 |
+
drop_idx = torch.multinomial(weights, num_samples=num_to_remove, replacement=False)
|
| 390 |
+
keep_mask_time = torch.ones(T, dtype=torch.bool, device=x_f.device)
|
| 391 |
+
keep_mask_time[drop_idx] = False
|
| 392 |
+
|
| 393 |
+
# Copy the latents for every element in the batch (full timeline, no dropping)
|
| 394 |
+
x_full = repeat(self.latents, 'T q d -> b T q d', b=batch_size) # (b, T, q, d)
|
| 395 |
+
|
| 396 |
+
# Apply attention and feed forward layer
|
| 397 |
+
if self.use_temporal:
|
| 398 |
+
for attn, Temp_attn, ffw in self.layers:
|
| 399 |
+
# spatial attention only on kept frames in b T q d layout
|
| 400 |
+
if keep_mask_time is not None:
|
| 401 |
+
# select kept time steps
|
| 402 |
+
x_kept = x_full[:, keep_mask_time, :, :] # (b, T_kept, q, d)
|
| 403 |
+
x_f_kept = x_f[:, keep_mask_time, :, :] # (b, T_kept, n, d)
|
| 404 |
+
# flatten kept slices for attention
|
| 405 |
+
x_kept_flat = rearrange(x_kept, 'b T q d -> (b T) q d')
|
| 406 |
+
x_f_kept_flat = rearrange(x_f_kept, 'b T n d -> (b T) n d')
|
| 407 |
+
# update kept positions
|
| 408 |
+
x_kept_flat = x_kept_flat + attn(x_f_kept_flat, x_kept_flat)
|
| 409 |
+
x_kept = rearrange(x_kept_flat, '(b T) q d -> b T q d', b=batch_size)
|
| 410 |
+
# create new tensor combining kept and unmasked parts
|
| 411 |
+
x_full_new = torch.zeros_like(x_full)
|
| 412 |
+
x_full_new[:, keep_mask_time, :, :] = x_kept
|
| 413 |
+
x_full_new[:, ~keep_mask_time, :, :] = x_full[:, ~keep_mask_time, :, :]
|
| 414 |
+
x_full = x_full_new
|
| 415 |
+
else:
|
| 416 |
+
# no dropping, full spatial attention
|
| 417 |
+
x_full_flat = rearrange(x_full, 'b T q d -> (b T) q d')
|
| 418 |
+
x_f_flat = rearrange(x_f, 'b T n d -> (b T) n d')
|
| 419 |
+
x_full_flat = x_full_flat + attn(x_f_flat, x_full_flat)
|
| 420 |
+
x_full = rearrange(x_full_flat, '(b T) q d -> b T q d', b=batch_size)
|
| 421 |
+
|
| 422 |
+
# temporal attention on full timeline (no additional mask)
|
| 423 |
+
x_full_flat = rearrange(x_full, 'b T q d -> (b q) T d')
|
| 424 |
+
x_full_flat = x_full_flat + Temp_attn(x_full_flat, attn_mask=None)
|
| 425 |
+
x_full_flat = rearrange(x_full_flat, '(b q) T d -> (b T) q d', b=batch_size)
|
| 426 |
+
x_full_flat = x_full_flat + ffw(x_full_flat)
|
| 427 |
+
x_full = rearrange(x_full_flat, '(b T) q d -> b T q d', b=batch_size)
|
| 428 |
+
else:
|
| 429 |
+
for attn, ffw in self.layers:
|
| 430 |
+
x = x + attn(x_f, x)
|
| 431 |
+
x = x + ffw(x)
|
| 432 |
+
|
| 433 |
+
#x = rearrange(x, 'l q d -> b T q d', b=batch_size)
|
| 434 |
+
x = rearrange(x_full, 'b T q d -> b (T q) d')
|
| 435 |
+
assert x.shape == torch.Size([batch_size, self.num_queries, self.dim])
|
| 436 |
+
norm = self.norm(x)
|
| 437 |
+
|
| 438 |
+
return norm
|
| 439 |
+
|
| 440 |
+
class Video_Former_2D(nn.Module):
|
| 441 |
+
"""Perceiver Resampler with multi-head attention layer"""
|
| 442 |
+
|
| 443 |
+
def __init__(
|
| 444 |
+
self,
|
| 445 |
+
dim: int,
|
| 446 |
+
depth: int,
|
| 447 |
+
condition_dim: int = 1280,
|
| 448 |
+
dim_head: int = 64,
|
| 449 |
+
heads: int = 8,
|
| 450 |
+
num_latents: int = 64,
|
| 451 |
+
num_frame: int = 16,
|
| 452 |
+
num_time_embeds: int = 4,
|
| 453 |
+
ff_mult: int = 4,
|
| 454 |
+
activation: str = 'gelu',
|
| 455 |
+
trainable: bool = True,
|
| 456 |
+
):
|
| 457 |
+
super().__init__()
|
| 458 |
+
|
| 459 |
+
self.dim = dim
|
| 460 |
+
self.num_queries = num_latents
|
| 461 |
+
self.num_frame = num_frame
|
| 462 |
+
self.condition_dim = condition_dim
|
| 463 |
+
|
| 464 |
+
self.goal_emb = nn.Sequential(
|
| 465 |
+
nn.Linear(condition_dim, dim * 2),
|
| 466 |
+
nn.GELU(),
|
| 467 |
+
nn.Linear(dim * 2, dim)
|
| 468 |
+
)
|
| 469 |
+
seq_len = num_latents // num_frame
|
| 470 |
+
self.latents = nn.Parameter(torch.randn(num_frame, seq_len, dim)) # type: ignore[reportPrivateUsage]
|
| 471 |
+
self.time_pos_emb = nn.Parameter(torch.randn(num_time_embeds, 1, dim)) # type: ignore[reportPrivateUsage]
|
| 472 |
+
|
| 473 |
+
self.layers = nn.ModuleList([])
|
| 474 |
+
for _ in range(depth):
|
| 475 |
+
self.layers.append(
|
| 476 |
+
nn.ModuleList(
|
| 477 |
+
[
|
| 478 |
+
PerceiverAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 479 |
+
feed_forward_layer(dim=dim, mult=ff_mult, activation=activation),
|
| 480 |
+
]
|
| 481 |
+
)
|
| 482 |
+
)
|
| 483 |
+
|
| 484 |
+
# Layer normalization takes as input the query vector length
|
| 485 |
+
self.norm = nn.LayerNorm(dim)
|
| 486 |
+
|
| 487 |
+
self._update_trainable_state(trainable)
|
| 488 |
+
|
| 489 |
+
def _update_trainable_state(self, trainable: bool = True):
|
| 490 |
+
for param in self.parameters():
|
| 491 |
+
param.requires_grad = trainable
|
| 492 |
+
|
| 493 |
+
def forward(self, x_f: torch.Tensor, mask: torch.BoolTensor = None):
|
| 494 |
+
"""Run perceiver resampler on the input visual embeddings
|
| 495 |
+
|
| 496 |
+
Args:
|
| 497 |
+
x_f: Input visual embeddings of shape (batch_size, n_frames, n_features, d_visual)
|
| 498 |
+
mask: Mask for the input visual embeddings of shape (batch_size, n_frames)
|
| 499 |
+
|
| 500 |
+
Returns:
|
| 501 |
+
Resampler features of shape (batch_size, num_queries, d_visual)
|
| 502 |
+
"""
|
| 503 |
+
assert x_f.ndim == 4
|
| 504 |
+
|
| 505 |
+
batch_size, max_length, _, dim = x_f.shape
|
| 506 |
+
|
| 507 |
+
assert dim == self.condition_dim
|
| 508 |
+
|
| 509 |
+
# Mask the position embeddings for the padded frames
|
| 510 |
+
time_pos_emb = (
|
| 511 |
+
self.time_pos_emb[:max_length].unsqueeze(0).expand(batch_size, -1, -1, -1)
|
| 512 |
+
) # [batch_size, max_length, 1, dim]
|
| 513 |
+
if mask is not None:
|
| 514 |
+
time_pos_emb = time_pos_emb * mask.unsqueeze(-1).unsqueeze(-1)
|
| 515 |
+
|
| 516 |
+
# Apply the position embeddings
|
| 517 |
+
x_f = self.goal_emb(x_f)
|
| 518 |
+
x_f = x_f + time_pos_emb
|
| 519 |
+
|
| 520 |
+
# Flatten the frames
|
| 521 |
+
x_f = rearrange(x_f, 'b T n d -> (b T) n d')
|
| 522 |
+
|
| 523 |
+
# Copy the latents for every element in the batch
|
| 524 |
+
x = repeat(self.latents, 'T q d -> b T q d', b=batch_size)
|
| 525 |
+
x = rearrange(x, 'b T q d -> (b T) q d')
|
| 526 |
+
|
| 527 |
+
# Apply attention and feed forward layer
|
| 528 |
+
for attn, ffw in self.layers:
|
| 529 |
+
x = x + attn(x_f, x)
|
| 530 |
+
x = x + ffw(x)
|
| 531 |
+
|
| 532 |
+
#x = rearrange(x, 'l q d -> b T q d', b=batch_size)
|
| 533 |
+
x = x.reshape(batch_size, -1 ,x.shape[1],x.shape[2])
|
| 534 |
+
x = rearrange(x, 'b T q d -> b (T q) d')
|
| 535 |
+
assert x.shape == torch.Size([batch_size, self.num_queries, self.dim])
|
| 536 |
+
norm = self.norm(x)
|
| 537 |
+
|
| 538 |
+
return norm
|
code/policy_models/module/Video_Former copy.py
ADDED
|
@@ -0,0 +1,554 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This code is referenced from https://github.com/dhansmair/flamingo-mini
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from einops import rearrange, repeat
|
| 5 |
+
from einops_exts import rearrange_many
|
| 6 |
+
from torch import einsum, nn
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
|
| 9 |
+
from policy_models.module.transformers.utils import feed_forward_layer
|
| 10 |
+
|
| 11 |
+
class Attention(nn.Module):
|
| 12 |
+
def __init__(
|
| 13 |
+
self,
|
| 14 |
+
dim: int,
|
| 15 |
+
num_heads: int = 8,
|
| 16 |
+
use_cross_attn=False,
|
| 17 |
+
y_dim=512,
|
| 18 |
+
qkv_bias: bool = False,
|
| 19 |
+
qk_norm: bool = False,
|
| 20 |
+
attn_drop: float = 0.,
|
| 21 |
+
proj_drop: float = 0.,
|
| 22 |
+
norm_layer: nn.Module = nn.LayerNorm,
|
| 23 |
+
attn_mask = None,
|
| 24 |
+
) -> None:
|
| 25 |
+
super().__init__()
|
| 26 |
+
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
|
| 27 |
+
self.num_heads = num_heads
|
| 28 |
+
self.head_dim = dim // num_heads
|
| 29 |
+
self.scale = self.head_dim ** -0.5
|
| 30 |
+
self.fused_attn = True
|
| 31 |
+
|
| 32 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
| 33 |
+
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 34 |
+
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 35 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 36 |
+
self.proj = nn.Linear(dim, dim)
|
| 37 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 38 |
+
self.attn_mask = attn_mask
|
| 39 |
+
self.use_cross_attn=use_cross_attn
|
| 40 |
+
if self.use_cross_attn:
|
| 41 |
+
#print('use_cross_attn')
|
| 42 |
+
self.y_kv = nn.Linear(y_dim, dim * 2, bias=qkv_bias)
|
| 43 |
+
self.y_k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 44 |
+
self.gate = nn.Parameter(torch.zeros([self.num_heads]))
|
| 45 |
+
|
| 46 |
+
def forward(self, x: torch.Tensor, y=None, attn_mask=None) -> torch.Tensor:
|
| 47 |
+
B, N, C = x.shape
|
| 48 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
|
| 49 |
+
q, k, v = qkv.unbind(0)
|
| 50 |
+
q, k = self.q_norm(q), self.k_norm(k)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
if self.fused_attn:
|
| 54 |
+
runtime_mask = None
|
| 55 |
+
if attn_mask is not None:
|
| 56 |
+
runtime_mask = attn_mask.to(x.device)
|
| 57 |
+
elif self.attn_mask is not None:
|
| 58 |
+
runtime_mask = self.attn_mask.to(x.device)[:q.shape[2],:k.shape[2]]
|
| 59 |
+
x = F.scaled_dot_product_attention(
|
| 60 |
+
q, k, v,
|
| 61 |
+
dropout_p=self.attn_drop.p if self.training else 0.,
|
| 62 |
+
attn_mask=runtime_mask
|
| 63 |
+
)
|
| 64 |
+
else:
|
| 65 |
+
q = q * self.scale
|
| 66 |
+
attn = q @ k.transpose(-2, -1)
|
| 67 |
+
attn = attn.softmax(dim=-1)
|
| 68 |
+
attn = self.attn_drop(attn)
|
| 69 |
+
x = attn @ v
|
| 70 |
+
|
| 71 |
+
if self.use_cross_attn:
|
| 72 |
+
#print('y_shape:',y.shape)
|
| 73 |
+
N_y = y.shape[1]
|
| 74 |
+
y_kv = self.y_kv(y).reshape(B, N_y, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
|
| 75 |
+
y_k, y_v = y_kv.unbind(0)
|
| 76 |
+
y_k = self.y_k_norm(y_k)
|
| 77 |
+
y_out = F.scaled_dot_product_attention(
|
| 78 |
+
q, y_k, y_v,
|
| 79 |
+
dropout_p=self.attn_drop.p if self.training else 0.,
|
| 80 |
+
)
|
| 81 |
+
#print('y_out_shape:', y_out.shape)
|
| 82 |
+
y_out = y_out*self.gate.tanh().view(1, -1, 1, 1)
|
| 83 |
+
x = x + y_out
|
| 84 |
+
|
| 85 |
+
x = x.transpose(1, 2).reshape(B, N, C)
|
| 86 |
+
x = self.proj(x)
|
| 87 |
+
x = self.proj_drop(x)
|
| 88 |
+
return x
|
| 89 |
+
|
| 90 |
+
class PerceiverAttentionLayer(nn.Module):
|
| 91 |
+
"""Perceiver Attention Layer"""
|
| 92 |
+
|
| 93 |
+
def __init__(self, dim: int, dim_head: int = 64, heads: int = 8):
|
| 94 |
+
super().__init__()
|
| 95 |
+
self.scale = dim_head**-0.5
|
| 96 |
+
self.heads = heads
|
| 97 |
+
self.dim_head = dim_head
|
| 98 |
+
inner_dim = dim_head * heads
|
| 99 |
+
|
| 100 |
+
# trainable components of PerceiverAttentionLayer
|
| 101 |
+
self.norm_media = nn.LayerNorm(dim)
|
| 102 |
+
self.norm_latents = nn.LayerNorm(dim)
|
| 103 |
+
|
| 104 |
+
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
| 105 |
+
self.to_k = nn.Linear(dim, inner_dim, bias=False)
|
| 106 |
+
self.to_v = nn.Linear(dim, inner_dim, bias=False)
|
| 107 |
+
self.to_out = nn.Linear(inner_dim, dim, bias=False)
|
| 108 |
+
|
| 109 |
+
def forward(self, features, latents):
|
| 110 |
+
"""Latent vectors are cross-attending to the visual features x
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
features: Batch of visual features with shape (batch_size, n_features, dim)
|
| 114 |
+
latents: Latent learnt vectors which are used to compute queries with shape (batch_size, n_latents, dim)
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
Attention score with shape (batch_size, n_latents, dim)
|
| 118 |
+
"""
|
| 119 |
+
assert features.ndim == 3
|
| 120 |
+
assert latents.ndim == 3
|
| 121 |
+
assert features.shape[0] == latents.shape[0]
|
| 122 |
+
assert features.shape[2] == latents.shape[2]
|
| 123 |
+
|
| 124 |
+
n_heads = self.heads
|
| 125 |
+
n_batch, n_features, dim = features.shape
|
| 126 |
+
n_queries = latents.shape[1]
|
| 127 |
+
|
| 128 |
+
# Layer normalization
|
| 129 |
+
x = self.norm_media(features)
|
| 130 |
+
latents = self.norm_latents(latents)
|
| 131 |
+
|
| 132 |
+
# Compute the queries from the latents, for all attention heads simultaneously
|
| 133 |
+
q = self.to_q(latents)
|
| 134 |
+
q = rearrange(q, 'b q (h d) -> b h q d', h=n_heads)
|
| 135 |
+
assert q.shape == torch.Size([n_batch, n_heads, n_queries, self.dim_head])
|
| 136 |
+
|
| 137 |
+
# Keys and values for all attention heads
|
| 138 |
+
kv_input = torch.cat((x, latents), dim=-2)
|
| 139 |
+
n_features_latents = n_features + n_queries
|
| 140 |
+
k = self.to_k(kv_input)
|
| 141 |
+
v = self.to_v(kv_input)
|
| 142 |
+
|
| 143 |
+
k, v = rearrange_many((k, v), 'b f (h d) -> b h f d', h=n_heads)
|
| 144 |
+
assert v.shape == torch.Size([n_batch, n_heads, n_features_latents, self.dim_head])
|
| 145 |
+
|
| 146 |
+
q = q * self.scale
|
| 147 |
+
|
| 148 |
+
# Attention scores
|
| 149 |
+
sim = einsum('b h q d, b h f d -> b h q f', q, k)
|
| 150 |
+
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
|
| 151 |
+
alphas = sim.softmax(dim=-1)
|
| 152 |
+
|
| 153 |
+
out = einsum('b h q f, b h f v -> b h q v', alphas, v)
|
| 154 |
+
out = rearrange(out, 'b h q v -> b q (h v)')
|
| 155 |
+
|
| 156 |
+
return self.to_out(out)
|
| 157 |
+
|
| 158 |
+
class TempAttentionLayer(nn.Module):
|
| 159 |
+
"""Perceiver Attention Layer"""
|
| 160 |
+
|
| 161 |
+
def __init__(self, dim: int, dim_head: int = 64, heads: int = 8):
|
| 162 |
+
super().__init__()
|
| 163 |
+
self.scale = dim_head**-0.5
|
| 164 |
+
self.heads = heads
|
| 165 |
+
self.dim_head = dim_head
|
| 166 |
+
inner_dim = dim_head * heads
|
| 167 |
+
|
| 168 |
+
# trainable components of PerceiverAttentionLayer
|
| 169 |
+
self.norm_media = nn.LayerNorm(dim)
|
| 170 |
+
|
| 171 |
+
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
| 172 |
+
self.to_k = nn.Linear(dim, inner_dim, bias=False)
|
| 173 |
+
self.to_v = nn.Linear(dim, inner_dim, bias=False)
|
| 174 |
+
self.to_out = nn.Linear(inner_dim, dim, bias=False)
|
| 175 |
+
|
| 176 |
+
def forward(self, features):
|
| 177 |
+
"""Latent vectors are cross-attending to the visual features x
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
features: Batch of visual features with shape (batch_size, n_features, dim)
|
| 181 |
+
latents: Latent learnt vectors which are used to compute queries with shape (batch_size, n_latents, dim)
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
Attention score with shape (batch_size, n_latents, dim)
|
| 185 |
+
"""
|
| 186 |
+
assert features.ndim == 3
|
| 187 |
+
|
| 188 |
+
n_heads = self.heads
|
| 189 |
+
n_batch, n_features, dim = features.shape
|
| 190 |
+
n_queries = features.shape[1]
|
| 191 |
+
|
| 192 |
+
# Layer normalization
|
| 193 |
+
x = self.norm_media(features)
|
| 194 |
+
|
| 195 |
+
# Compute the queries from the latents, for all attention heads simultaneously
|
| 196 |
+
q = self.to_q(x)
|
| 197 |
+
q = rearrange(q, 'b q (h d) -> b h q d', h=n_heads)
|
| 198 |
+
assert q.shape == torch.Size([n_batch, n_heads, n_queries, self.dim_head])
|
| 199 |
+
|
| 200 |
+
# Keys and values for all attention heads
|
| 201 |
+
n_features_latents = n_features
|
| 202 |
+
k = self.to_k(x)
|
| 203 |
+
v = self.to_v(x)
|
| 204 |
+
|
| 205 |
+
k, v = rearrange_many((k, v), 'b f (h d) -> b h f d', h=n_heads)
|
| 206 |
+
assert v.shape == torch.Size([n_batch, n_heads, n_features_latents, self.dim_head])
|
| 207 |
+
|
| 208 |
+
q = q * self.scale
|
| 209 |
+
|
| 210 |
+
# Attention scores
|
| 211 |
+
sim = einsum('b h q d, b h f d -> b h q f', q, k)
|
| 212 |
+
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
|
| 213 |
+
alphas = sim.softmax(dim=-1)
|
| 214 |
+
|
| 215 |
+
out = einsum('b h q f, b h f v -> b h q v', alphas, v)
|
| 216 |
+
out = rearrange(out, 'b h q v -> b q (h v)')
|
| 217 |
+
|
| 218 |
+
return self.to_out(out)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
class Video_Former_3D(nn.Module):
|
| 222 |
+
"""Perceiver Resampler with multi-head attention layer"""
|
| 223 |
+
|
| 224 |
+
def __init__(
|
| 225 |
+
self,
|
| 226 |
+
dim: int,
|
| 227 |
+
depth: int,
|
| 228 |
+
condition_dim: int = 1280,
|
| 229 |
+
dim_head: int = 64,
|
| 230 |
+
heads: int = 8,
|
| 231 |
+
num_latents: int = 64,
|
| 232 |
+
num_frame: int = 14,
|
| 233 |
+
num_time_embeds: int = 4,
|
| 234 |
+
ff_mult: int = 4,
|
| 235 |
+
activation: str = 'gelu',
|
| 236 |
+
trainable: bool = True,
|
| 237 |
+
use_temporal: bool = False,
|
| 238 |
+
):
|
| 239 |
+
super().__init__()
|
| 240 |
+
|
| 241 |
+
self.dim = dim
|
| 242 |
+
self.num_queries = num_latents
|
| 243 |
+
self.num_frame = num_frame
|
| 244 |
+
self.condition_dim = condition_dim
|
| 245 |
+
self.use_temporal = use_temporal
|
| 246 |
+
self.input_mask_mode = 'zero' # 'none' | 'zero' | 'gaussian' | 'learnable'
|
| 247 |
+
|
| 248 |
+
self.goal_emb = nn.Sequential(
|
| 249 |
+
nn.Linear(condition_dim, dim * 2),
|
| 250 |
+
nn.GELU(),
|
| 251 |
+
nn.Linear(dim * 2, dim)
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
# 语言嵌入层(将512维语言向量映射到与视觉相同的dim)
|
| 255 |
+
# self.lang_emb = nn.Linear(512, dim)
|
| 256 |
+
|
| 257 |
+
frame_seq_len = num_latents // num_frame
|
| 258 |
+
self.latents = nn.Parameter(torch.randn(self.num_frame, frame_seq_len, dim)) # type: ignore[reportPrivateUsage]
|
| 259 |
+
self.time_pos_emb = nn.Parameter(torch.randn(num_time_embeds, 1, dim)) # type: ignore[reportPrivateUsage]
|
| 260 |
+
attn_mask = torch.ones((num_frame, num_frame))
|
| 261 |
+
#attn_mask = torch.tril(attn_mask).bool()
|
| 262 |
+
|
| 263 |
+
self.layers = nn.ModuleList([])
|
| 264 |
+
|
| 265 |
+
if self.use_temporal:
|
| 266 |
+
for _ in range(depth):
|
| 267 |
+
self.layers.append(
|
| 268 |
+
nn.ModuleList(
|
| 269 |
+
[
|
| 270 |
+
PerceiverAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 271 |
+
#TempAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 272 |
+
Attention(dim, num_heads=heads, qkv_bias=True, use_cross_attn=False,
|
| 273 |
+
y_dim=512, attn_mask=attn_mask),
|
| 274 |
+
feed_forward_layer(dim=dim, mult=ff_mult, activation=activation),
|
| 275 |
+
]
|
| 276 |
+
)
|
| 277 |
+
)
|
| 278 |
+
else:
|
| 279 |
+
for _ in range(depth):
|
| 280 |
+
self.layers.append(
|
| 281 |
+
nn.ModuleList(
|
| 282 |
+
[
|
| 283 |
+
PerceiverAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 284 |
+
feed_forward_layer(dim=dim, mult=ff_mult, activation=activation),
|
| 285 |
+
]
|
| 286 |
+
)
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
# Layer normalization takes as input the query vector length
|
| 290 |
+
self.norm = nn.LayerNorm(dim)
|
| 291 |
+
|
| 292 |
+
self._update_trainable_state(trainable)
|
| 293 |
+
|
| 294 |
+
# learnable frame token (used when input_mask_mode == 'learnable')
|
| 295 |
+
if self.input_mask_mode == 'learnable':
|
| 296 |
+
# shape: (1, 1, n_features, dim) after goal_emb
|
| 297 |
+
self.learnable_mask_token = nn.Parameter(torch.zeros(1, 1, 1, dim))
|
| 298 |
+
|
| 299 |
+
def _update_trainable_state(self, trainable: bool = True):
|
| 300 |
+
for param in self.parameters():
|
| 301 |
+
param.requires_grad = trainable
|
| 302 |
+
|
| 303 |
+
def forward(self, x_f: torch.Tensor, mask: torch.BoolTensor = None, extra : torch.Tensor = None, frame_mask_prob: float = 0.0, language: torch.Tensor = None):
|
| 304 |
+
"""Run perceiver resampler on the input visual embeddings
|
| 305 |
+
|
| 306 |
+
Args:
|
| 307 |
+
x_f: Input visual embeddings of shape (batch_size, n_frames, n_features, d_visual)
|
| 308 |
+
mask: Mask for the input visual embeddings of shape (batch_size, n_frames)
|
| 309 |
+
extra: Extra tensor for concatenation
|
| 310 |
+
frame_mask_prob: Probability of masking each frame during training (0.0 = no masking)
|
| 311 |
+
language: Language embeddings of shape (batch_size, 1, lang_dim)
|
| 312 |
+
|
| 313 |
+
Returns:
|
| 314 |
+
Resampler features of shape (batch_size, num_queries, d_visual)
|
| 315 |
+
"""
|
| 316 |
+
assert x_f.ndim == 4
|
| 317 |
+
batch_size, max_length, _, dim = x_f.shape
|
| 318 |
+
|
| 319 |
+
# Generate per-batch frame mask (True=keep, False=mask) with non-uniform probability centered at index 6
|
| 320 |
+
frame_mask = None
|
| 321 |
+
if frame_mask_prob > 1.0 and self.training:
|
| 322 |
+
# per-frame mask probabilities p_i: highest at center_idx=6, decays with distance
|
| 323 |
+
center_idx = 6 if max_length > 6 else (max_length // 2)
|
| 324 |
+
frame_indices = torch.arange(max_length, device=x_f.device).float()
|
| 325 |
+
distances = (frame_indices - float(center_idx)).abs()
|
| 326 |
+
sigma = 2.0
|
| 327 |
+
# Gaussian decay: p_i = frame_mask_prob * exp(-0.5 * (d/sigma)^2)
|
| 328 |
+
per_frame_p = frame_mask_prob * torch.exp(-0.5 * (distances / sigma) ** 2)
|
| 329 |
+
# broadcast to batch and sample Bernoulli per (b, t)
|
| 330 |
+
rand_vals = torch.rand(batch_size, max_length, device=x_f.device)
|
| 331 |
+
# True=keep, False=mask
|
| 332 |
+
frame_mask = rand_vals > per_frame_p.unsqueeze(0)
|
| 333 |
+
# ensure at least one frame kept per sample
|
| 334 |
+
needs_fix = frame_mask.sum(dim=1) == 0
|
| 335 |
+
if needs_fix.any():
|
| 336 |
+
idx = torch.nonzero(needs_fix, as_tuple=False).squeeze(-1)
|
| 337 |
+
rand_cols = torch.randint(0, max_length, (idx.numel(),), device=x_f.device)
|
| 338 |
+
frame_mask[idx, rand_cols] = True
|
| 339 |
+
|
| 340 |
+
# Mask the position embeddings for the padded frames
|
| 341 |
+
time_pos_emb = (
|
| 342 |
+
self.time_pos_emb[:max_length].unsqueeze(0).expand(batch_size, -1, -1, -1)
|
| 343 |
+
) # [batch_size, max_length, 1, dim]
|
| 344 |
+
if mask is not None:
|
| 345 |
+
time_pos_emb = time_pos_emb * mask.unsqueeze(-1).unsqueeze(-1)
|
| 346 |
+
|
| 347 |
+
# Apply the position embeddings
|
| 348 |
+
x_f = self.goal_emb(x_f)
|
| 349 |
+
# Frame-level input masking before adding positional encoding
|
| 350 |
+
if frame_mask is not None:
|
| 351 |
+
bsz = batch_size
|
| 352 |
+
T = max_length
|
| 353 |
+
n_features = x_f.shape[2]
|
| 354 |
+
d = x_f.shape[3]
|
| 355 |
+
mask_expand = frame_mask.unsqueeze(-1).unsqueeze(-1).expand(bsz, T, n_features, d)
|
| 356 |
+
if self.input_mask_mode == 'zero':
|
| 357 |
+
x_f = torch.where(mask_expand, x_f, torch.zeros_like(x_f))
|
| 358 |
+
elif self.input_mask_mode == 'gaussian':
|
| 359 |
+
noise = torch.randn_like(x_f)
|
| 360 |
+
x_f = torch.where(mask_expand, x_f, noise)
|
| 361 |
+
elif self.input_mask_mode == 'learnable':
|
| 362 |
+
token = self.learnable_mask_token
|
| 363 |
+
token = token.expand(bsz, T, n_features, d)
|
| 364 |
+
x_f = torch.where(mask_expand, x_f, token)
|
| 365 |
+
# 'none' -> do nothing
|
| 366 |
+
# 融合语言:将 language (b,1,512) 投影到dim,并按帧复制后拼接为额外特征
|
| 367 |
+
# if language is not None:
|
| 368 |
+
# lang = self.lang_emb(language.squeeze(1)) # (b, dim)
|
| 369 |
+
# lang = lang.unsqueeze(1) # (b,1,dim)
|
| 370 |
+
# lang = repeat(lang, 'b q d -> b T q d', T=max_length) # (b,T,1,dim)
|
| 371 |
+
# x_f = torch.cat([x_f, lang], dim=2)
|
| 372 |
+
if extra is not None:
|
| 373 |
+
extra = repeat(extra, 'b q d -> b T q d', T=max_length)
|
| 374 |
+
x_f = torch.cat([x_f, extra],dim = 2)
|
| 375 |
+
x_f = x_f + time_pos_emb
|
| 376 |
+
|
| 377 |
+
# Apply Token Masking Augmentation (TMAug) - frame dropping on temporal dimension
|
| 378 |
+
dropped_indices = None
|
| 379 |
+
if frame_mask_prob > 0.0 and self.training:
|
| 380 |
+
b, T, n, d = x_f.shape
|
| 381 |
+
|
| 382 |
+
# Randomly select frames to drop
|
| 383 |
+
num_to_remove = int(T * frame_mask_prob)
|
| 384 |
+
if num_to_remove > 0 and num_to_remove < T:
|
| 385 |
+
# Randomly select frame indices to remove
|
| 386 |
+
indices_to_remove = torch.randperm(T, device=x_f.device)[:num_to_remove]
|
| 387 |
+
keep_mask = torch.ones(T, dtype=torch.bool, device=x_f.device)
|
| 388 |
+
keep_mask[indices_to_remove] = False
|
| 389 |
+
dropped_indices = indices_to_remove
|
| 390 |
+
|
| 391 |
+
# Apply frame dropping
|
| 392 |
+
x_f = x_f[:, keep_mask, :, :] # (b, T_new, n, d)
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
# Flatten the frames
|
| 396 |
+
x_f = rearrange(x_f, 'b T n d -> (b T) n d')
|
| 397 |
+
|
| 398 |
+
# Get actual time dimension after dropping
|
| 399 |
+
actual_T = x_f.shape[0] // batch_size # (b*T_new) // b = T_new
|
| 400 |
+
|
| 401 |
+
# Copy the latents for every element in the batch
|
| 402 |
+
# x = repeat(self.latents, 'T q d -> b T q d', b=batch_size)
|
| 403 |
+
# Copy the latents for every element in the batch, matching the actual time dimension
|
| 404 |
+
if dropped_indices is not None:
|
| 405 |
+
# Apply the same dropping to latents
|
| 406 |
+
latents_keep_mask = torch.ones(self.num_frame, dtype=torch.bool, device=x_f.device)
|
| 407 |
+
latents_keep_mask[dropped_indices] = False
|
| 408 |
+
latents_after_drop = self.latents[latents_keep_mask] # (T_new, q, d)
|
| 409 |
+
x = repeat(latents_after_drop, 'T q d -> b T q d', b=batch_size)
|
| 410 |
+
else:
|
| 411 |
+
# No dropping, use original latents
|
| 412 |
+
x = repeat(self.latents, 'T q d -> b T q d', b=batch_size)
|
| 413 |
+
|
| 414 |
+
x = rearrange(x, 'b T q d -> (b T) q d')
|
| 415 |
+
|
| 416 |
+
# Apply attention and feed forward layer
|
| 417 |
+
if self.use_temporal:
|
| 418 |
+
for attn, Temp_attn, ffw in self.layers:
|
| 419 |
+
x = x + attn(x_f, x)
|
| 420 |
+
x = rearrange(x, '(b T) q d -> (b q) T d', b = batch_size)
|
| 421 |
+
# build per-batch temporal attention mask if frame_mask is provided
|
| 422 |
+
runtime_temporal_mask = None
|
| 423 |
+
if frame_mask is not None:
|
| 424 |
+
# frame_mask: (b, T) True=keep, False=mask
|
| 425 |
+
keep = frame_mask # (b, T)
|
| 426 |
+
# expand along batch for each latent query: current batch for attention is (b * q)
|
| 427 |
+
q_per_frame = x.shape[0] // batch_size
|
| 428 |
+
# construct (b, 1, 1, T) -> (b*q, 1, 1, T)
|
| 429 |
+
mask_bt = keep.unsqueeze(1).unsqueeze(2) # (b,1,1,T)
|
| 430 |
+
runtime_temporal_mask = mask_bt.repeat_interleave(q_per_frame, dim=0) # (b*q,1,1,T)
|
| 431 |
+
# convert to additive mask with 0 for keep and -inf for masked
|
| 432 |
+
runtime_temporal_mask = runtime_temporal_mask.to(x.dtype)
|
| 433 |
+
runtime_temporal_mask = torch.where(
|
| 434 |
+
runtime_temporal_mask > 0,
|
| 435 |
+
torch.zeros_like(runtime_temporal_mask),
|
| 436 |
+
torch.full_like(runtime_temporal_mask, -1e9)
|
| 437 |
+
)
|
| 438 |
+
x = x + Temp_attn(x, attn_mask=runtime_temporal_mask)
|
| 439 |
+
x = rearrange(x, '(b q) T d -> (b T) q d', b = batch_size)
|
| 440 |
+
x = x + ffw(x)
|
| 441 |
+
else:
|
| 442 |
+
for attn, ffw in self.layers:
|
| 443 |
+
x = x + attn(x_f, x)
|
| 444 |
+
x = x + ffw(x)
|
| 445 |
+
|
| 446 |
+
#x = rearrange(x, 'l q d -> b T q d', b=batch_size)
|
| 447 |
+
x = x.reshape(batch_size, actual_T, x.shape[1],x.shape[2])
|
| 448 |
+
x = rearrange(x, 'b T q d -> b (T q) d')
|
| 449 |
+
# assert x.shape == torch.Size([batch_size, self.num_queries, self.dim])
|
| 450 |
+
expected_queries = actual_T * (self.num_queries // self.num_frame)
|
| 451 |
+
assert x.shape == torch.Size([batch_size, expected_queries, self.dim])
|
| 452 |
+
norm = self.norm(x)
|
| 453 |
+
|
| 454 |
+
return norm
|
| 455 |
+
|
| 456 |
+
class Video_Former_2D(nn.Module):
|
| 457 |
+
"""Perceiver Resampler with multi-head attention layer"""
|
| 458 |
+
|
| 459 |
+
def __init__(
|
| 460 |
+
self,
|
| 461 |
+
dim: int,
|
| 462 |
+
depth: int,
|
| 463 |
+
condition_dim: int = 1280,
|
| 464 |
+
dim_head: int = 64,
|
| 465 |
+
heads: int = 8,
|
| 466 |
+
num_latents: int = 64,
|
| 467 |
+
num_frame: int = 16,
|
| 468 |
+
num_time_embeds: int = 4,
|
| 469 |
+
ff_mult: int = 4,
|
| 470 |
+
activation: str = 'gelu',
|
| 471 |
+
trainable: bool = True,
|
| 472 |
+
):
|
| 473 |
+
super().__init__()
|
| 474 |
+
|
| 475 |
+
self.dim = dim
|
| 476 |
+
self.num_queries = num_latents
|
| 477 |
+
self.num_frame = num_frame
|
| 478 |
+
self.condition_dim = condition_dim
|
| 479 |
+
|
| 480 |
+
self.goal_emb = nn.Sequential(
|
| 481 |
+
nn.Linear(condition_dim, dim * 2),
|
| 482 |
+
nn.GELU(),
|
| 483 |
+
nn.Linear(dim * 2, dim)
|
| 484 |
+
)
|
| 485 |
+
seq_len = num_latents // num_frame
|
| 486 |
+
self.latents = nn.Parameter(torch.randn(num_frame, seq_len, dim)) # type: ignore[reportPrivateUsage]
|
| 487 |
+
self.time_pos_emb = nn.Parameter(torch.randn(num_time_embeds, 1, dim)) # type: ignore[reportPrivateUsage]
|
| 488 |
+
|
| 489 |
+
self.layers = nn.ModuleList([])
|
| 490 |
+
for _ in range(depth):
|
| 491 |
+
self.layers.append(
|
| 492 |
+
nn.ModuleList(
|
| 493 |
+
[
|
| 494 |
+
PerceiverAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 495 |
+
feed_forward_layer(dim=dim, mult=ff_mult, activation=activation),
|
| 496 |
+
]
|
| 497 |
+
)
|
| 498 |
+
)
|
| 499 |
+
|
| 500 |
+
# Layer normalization takes as input the query vector length
|
| 501 |
+
self.norm = nn.LayerNorm(dim)
|
| 502 |
+
|
| 503 |
+
self._update_trainable_state(trainable)
|
| 504 |
+
|
| 505 |
+
def _update_trainable_state(self, trainable: bool = True):
|
| 506 |
+
for param in self.parameters():
|
| 507 |
+
param.requires_grad = trainable
|
| 508 |
+
|
| 509 |
+
def forward(self, x_f: torch.Tensor, mask: torch.BoolTensor = None):
|
| 510 |
+
"""Run perceiver resampler on the input visual embeddings
|
| 511 |
+
|
| 512 |
+
Args:
|
| 513 |
+
x_f: Input visual embeddings of shape (batch_size, n_frames, n_features, d_visual)
|
| 514 |
+
mask: Mask for the input visual embeddings of shape (batch_size, n_frames)
|
| 515 |
+
|
| 516 |
+
Returns:
|
| 517 |
+
Resampler features of shape (batch_size, num_queries, d_visual)
|
| 518 |
+
"""
|
| 519 |
+
assert x_f.ndim == 4
|
| 520 |
+
|
| 521 |
+
batch_size, max_length, _, dim = x_f.shape
|
| 522 |
+
|
| 523 |
+
assert dim == self.condition_dim
|
| 524 |
+
|
| 525 |
+
# Mask the position embeddings for the padded frames
|
| 526 |
+
time_pos_emb = (
|
| 527 |
+
self.time_pos_emb[:max_length].unsqueeze(0).expand(batch_size, -1, -1, -1)
|
| 528 |
+
) # [batch_size, max_length, 1, dim]
|
| 529 |
+
if mask is not None:
|
| 530 |
+
time_pos_emb = time_pos_emb * mask.unsqueeze(-1).unsqueeze(-1)
|
| 531 |
+
|
| 532 |
+
# Apply the position embeddings
|
| 533 |
+
x_f = self.goal_emb(x_f)
|
| 534 |
+
x_f = x_f + time_pos_emb
|
| 535 |
+
|
| 536 |
+
# Flatten the frames
|
| 537 |
+
x_f = rearrange(x_f, 'b T n d -> (b T) n d')
|
| 538 |
+
|
| 539 |
+
# Copy the latents for every element in the batch
|
| 540 |
+
x = repeat(self.latents, 'T q d -> b T q d', b=batch_size)
|
| 541 |
+
x = rearrange(x, 'b T q d -> (b T) q d')
|
| 542 |
+
|
| 543 |
+
# Apply attention and feed forward layer
|
| 544 |
+
for attn, ffw in self.layers:
|
| 545 |
+
x = x + attn(x_f, x)
|
| 546 |
+
x = x + ffw(x)
|
| 547 |
+
|
| 548 |
+
#x = rearrange(x, 'l q d -> b T q d', b=batch_size)
|
| 549 |
+
x = x.reshape(batch_size, -1 ,x.shape[1],x.shape[2])
|
| 550 |
+
x = rearrange(x, 'b T q d -> b (T q) d')
|
| 551 |
+
assert x.shape == torch.Size([batch_size, self.num_queries, self.dim])
|
| 552 |
+
norm = self.norm(x)
|
| 553 |
+
|
| 554 |
+
return norm
|
code/policy_models/module/Video_Former.py
ADDED
|
@@ -0,0 +1,670 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This code is referenced from https://github.com/dhansmair/flamingo-mini
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from einops import rearrange, repeat
|
| 5 |
+
from einops_exts import rearrange_many
|
| 6 |
+
from torch import einsum, nn
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
|
| 9 |
+
from policy_models.module.transformers.utils import feed_forward_layer
|
| 10 |
+
|
| 11 |
+
class Attention(nn.Module):
|
| 12 |
+
def __init__(
|
| 13 |
+
self,
|
| 14 |
+
dim: int,
|
| 15 |
+
num_heads: int = 8,
|
| 16 |
+
use_cross_attn=False,
|
| 17 |
+
y_dim=512,
|
| 18 |
+
qkv_bias: bool = False,
|
| 19 |
+
qk_norm: bool = False,
|
| 20 |
+
attn_drop: float = 0.,
|
| 21 |
+
proj_drop: float = 0.,
|
| 22 |
+
norm_layer: nn.Module = nn.LayerNorm,
|
| 23 |
+
attn_mask = None,
|
| 24 |
+
) -> None:
|
| 25 |
+
super().__init__()
|
| 26 |
+
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
|
| 27 |
+
self.num_heads = num_heads
|
| 28 |
+
self.head_dim = dim // num_heads
|
| 29 |
+
self.scale = self.head_dim ** -0.5
|
| 30 |
+
self.fused_attn = True
|
| 31 |
+
|
| 32 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
| 33 |
+
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 34 |
+
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 35 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 36 |
+
self.proj = nn.Linear(dim, dim)
|
| 37 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 38 |
+
self.attn_mask = attn_mask
|
| 39 |
+
self.use_cross_attn=use_cross_attn
|
| 40 |
+
if self.use_cross_attn:
|
| 41 |
+
#print('use_cross_attn')
|
| 42 |
+
self.y_kv = nn.Linear(y_dim, dim * 2, bias=qkv_bias)
|
| 43 |
+
self.y_k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
|
| 44 |
+
self.gate = nn.Parameter(torch.zeros([self.num_heads]))
|
| 45 |
+
|
| 46 |
+
def forward(self, x: torch.Tensor, y=None, attn_mask=None) -> torch.Tensor:
|
| 47 |
+
B, N, C = x.shape
|
| 48 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
|
| 49 |
+
q, k, v = qkv.unbind(0)
|
| 50 |
+
q, k = self.q_norm(q), self.k_norm(k)
|
| 51 |
+
|
| 52 |
+
# TODO: whether to use attn_mask
|
| 53 |
+
if self.fused_attn:
|
| 54 |
+
runtime_mask = None
|
| 55 |
+
if attn_mask is not None:
|
| 56 |
+
runtime_mask = attn_mask.to(x.device)
|
| 57 |
+
elif self.attn_mask is not None:
|
| 58 |
+
runtime_mask = self.attn_mask.to(x.device)[:q.shape[2],:k.shape[2]]
|
| 59 |
+
x = F.scaled_dot_product_attention(
|
| 60 |
+
q, k, v,
|
| 61 |
+
dropout_p=self.attn_drop.p if self.training else 0.,
|
| 62 |
+
attn_mask=runtime_mask
|
| 63 |
+
)
|
| 64 |
+
else:
|
| 65 |
+
q = q * self.scale
|
| 66 |
+
attn = q @ k.transpose(-2, -1)
|
| 67 |
+
attn = attn.softmax(dim=-1)
|
| 68 |
+
attn = self.attn_drop(attn)
|
| 69 |
+
x = attn @ v
|
| 70 |
+
|
| 71 |
+
if self.use_cross_attn:
|
| 72 |
+
#print('y_shape:',y.shape)
|
| 73 |
+
N_y = y.shape[1]
|
| 74 |
+
y_kv = self.y_kv(y).reshape(B, N_y, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
|
| 75 |
+
y_k, y_v = y_kv.unbind(0)
|
| 76 |
+
y_k = self.y_k_norm(y_k)
|
| 77 |
+
y_out = F.scaled_dot_product_attention(
|
| 78 |
+
q, y_k, y_v,
|
| 79 |
+
dropout_p=self.attn_drop.p if self.training else 0.,
|
| 80 |
+
)
|
| 81 |
+
#print('y_out_shape:', y_out.shape)
|
| 82 |
+
y_out = y_out*self.gate.tanh().view(1, -1, 1, 1)
|
| 83 |
+
x = x + y_out
|
| 84 |
+
|
| 85 |
+
x = x.transpose(1, 2).reshape(B, N, C)
|
| 86 |
+
x = self.proj(x)
|
| 87 |
+
x = self.proj_drop(x)
|
| 88 |
+
return x
|
| 89 |
+
|
| 90 |
+
class PerceiverAttentionLayer(nn.Module):
|
| 91 |
+
"""Perceiver Attention Layer"""
|
| 92 |
+
|
| 93 |
+
def __init__(self, dim: int, dim_head: int = 64, heads: int = 8):
|
| 94 |
+
super().__init__()
|
| 95 |
+
self.scale = dim_head**-0.5
|
| 96 |
+
self.heads = heads
|
| 97 |
+
self.dim_head = dim_head
|
| 98 |
+
inner_dim = dim_head * heads
|
| 99 |
+
|
| 100 |
+
# trainable components of PerceiverAttentionLayer
|
| 101 |
+
self.norm_media = nn.LayerNorm(dim)
|
| 102 |
+
self.norm_latents = nn.LayerNorm(dim)
|
| 103 |
+
|
| 104 |
+
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
| 105 |
+
self.to_k = nn.Linear(dim, inner_dim, bias=False)
|
| 106 |
+
self.to_v = nn.Linear(dim, inner_dim, bias=False)
|
| 107 |
+
self.to_out = nn.Linear(inner_dim, dim, bias=False)
|
| 108 |
+
|
| 109 |
+
def forward(self, features, latents):
|
| 110 |
+
"""Latent vectors are cross-attending to the visual features x
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
features: Batch of visual features with shape (batch_size, n_features, dim)
|
| 114 |
+
latents: Latent learnt vectors which are used to compute queries with shape (batch_size, n_latents, dim)
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
Attention score with shape (batch_size, n_latents, dim)
|
| 118 |
+
"""
|
| 119 |
+
assert features.ndim == 3
|
| 120 |
+
assert latents.ndim == 3
|
| 121 |
+
assert features.shape[0] == latents.shape[0]
|
| 122 |
+
assert features.shape[2] == latents.shape[2]
|
| 123 |
+
|
| 124 |
+
n_heads = self.heads
|
| 125 |
+
n_batch, n_features, dim = features.shape
|
| 126 |
+
n_queries = latents.shape[1]
|
| 127 |
+
|
| 128 |
+
# Layer normalization
|
| 129 |
+
x = self.norm_media(features)
|
| 130 |
+
latents = self.norm_latents(latents)
|
| 131 |
+
|
| 132 |
+
# Compute the queries from the latents, for all attention heads simultaneously
|
| 133 |
+
q = self.to_q(latents)
|
| 134 |
+
q = rearrange(q, 'b q (h d) -> b h q d', h=n_heads)
|
| 135 |
+
assert q.shape == torch.Size([n_batch, n_heads, n_queries, self.dim_head])
|
| 136 |
+
|
| 137 |
+
# Keys and values for all attention heads
|
| 138 |
+
kv_input = torch.cat((x, latents), dim=-2)
|
| 139 |
+
n_features_latents = n_features + n_queries
|
| 140 |
+
k = self.to_k(kv_input)
|
| 141 |
+
v = self.to_v(kv_input)
|
| 142 |
+
|
| 143 |
+
k, v = rearrange_many((k, v), 'b f (h d) -> b h f d', h=n_heads)
|
| 144 |
+
assert v.shape == torch.Size([n_batch, n_heads, n_features_latents, self.dim_head])
|
| 145 |
+
|
| 146 |
+
q = q * self.scale
|
| 147 |
+
|
| 148 |
+
# Attention scores
|
| 149 |
+
sim = einsum('b h q d, b h f d -> b h q f', q, k)
|
| 150 |
+
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
|
| 151 |
+
alphas = sim.softmax(dim=-1)
|
| 152 |
+
|
| 153 |
+
out = einsum('b h q f, b h f v -> b h q v', alphas, v)
|
| 154 |
+
out = rearrange(out, 'b h q v -> b q (h v)')
|
| 155 |
+
|
| 156 |
+
return self.to_out(out)
|
| 157 |
+
|
| 158 |
+
class TempAttentionLayer(nn.Module):
|
| 159 |
+
"""Perceiver Attention Layer"""
|
| 160 |
+
|
| 161 |
+
def __init__(self, dim: int, dim_head: int = 64, heads: int = 8):
|
| 162 |
+
super().__init__()
|
| 163 |
+
self.scale = dim_head**-0.5
|
| 164 |
+
self.heads = heads
|
| 165 |
+
self.dim_head = dim_head
|
| 166 |
+
inner_dim = dim_head * heads
|
| 167 |
+
|
| 168 |
+
# trainable components of PerceiverAttentionLayer
|
| 169 |
+
self.norm_media = nn.LayerNorm(dim)
|
| 170 |
+
|
| 171 |
+
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
| 172 |
+
self.to_k = nn.Linear(dim, inner_dim, bias=False)
|
| 173 |
+
self.to_v = nn.Linear(dim, inner_dim, bias=False)
|
| 174 |
+
self.to_out = nn.Linear(inner_dim, dim, bias=False)
|
| 175 |
+
|
| 176 |
+
def forward(self, features):
|
| 177 |
+
"""Latent vectors are cross-attending to the visual features x
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
features: Batch of visual features with shape (batch_size, n_features, dim)
|
| 181 |
+
latents: Latent learnt vectors which are used to compute queries with shape (batch_size, n_latents, dim)
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
Attention score with shape (batch_size, n_latents, dim)
|
| 185 |
+
"""
|
| 186 |
+
assert features.ndim == 3
|
| 187 |
+
|
| 188 |
+
n_heads = self.heads
|
| 189 |
+
n_batch, n_features, dim = features.shape
|
| 190 |
+
n_queries = features.shape[1]
|
| 191 |
+
|
| 192 |
+
# Layer normalization
|
| 193 |
+
x = self.norm_media(features)
|
| 194 |
+
|
| 195 |
+
# Compute the queries from the latents, for all attention heads simultaneously
|
| 196 |
+
q = self.to_q(x)
|
| 197 |
+
q = rearrange(q, 'b q (h d) -> b h q d', h=n_heads)
|
| 198 |
+
assert q.shape == torch.Size([n_batch, n_heads, n_queries, self.dim_head])
|
| 199 |
+
|
| 200 |
+
# Keys and values for all attention heads
|
| 201 |
+
n_features_latents = n_features
|
| 202 |
+
k = self.to_k(x)
|
| 203 |
+
v = self.to_v(x)
|
| 204 |
+
|
| 205 |
+
k, v = rearrange_many((k, v), 'b f (h d) -> b h f d', h=n_heads)
|
| 206 |
+
assert v.shape == torch.Size([n_batch, n_heads, n_features_latents, self.dim_head])
|
| 207 |
+
|
| 208 |
+
q = q * self.scale
|
| 209 |
+
|
| 210 |
+
# Attention scores
|
| 211 |
+
sim = einsum('b h q d, b h f d -> b h q f', q, k)
|
| 212 |
+
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
|
| 213 |
+
alphas = sim.softmax(dim=-1)
|
| 214 |
+
|
| 215 |
+
out = einsum('b h q f, b h f v -> b h q v', alphas, v)
|
| 216 |
+
out = rearrange(out, 'b h q v -> b q (h v)')
|
| 217 |
+
|
| 218 |
+
return self.to_out(out)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
class Video_Former_3D(nn.Module):
|
| 222 |
+
"""Perceiver Resampler with multi-head attention layer"""
|
| 223 |
+
|
| 224 |
+
def __init__(
|
| 225 |
+
self,
|
| 226 |
+
dim: int,
|
| 227 |
+
depth: int,
|
| 228 |
+
condition_dim: int = 1280,
|
| 229 |
+
dim_head: int = 64,
|
| 230 |
+
heads: int = 8,
|
| 231 |
+
num_latents: int = 64,
|
| 232 |
+
num_frame: int = 14,
|
| 233 |
+
num_time_embeds: int = 4,
|
| 234 |
+
ff_mult: int = 4,
|
| 235 |
+
activation: str = 'gelu',
|
| 236 |
+
trainable: bool = True,
|
| 237 |
+
use_temporal: bool = False,
|
| 238 |
+
):
|
| 239 |
+
super().__init__()
|
| 240 |
+
|
| 241 |
+
self.dim = dim
|
| 242 |
+
self.num_queries = num_latents
|
| 243 |
+
self.num_frame = num_frame
|
| 244 |
+
self.condition_dim = condition_dim
|
| 245 |
+
self.use_temporal = use_temporal
|
| 246 |
+
self.input_mask_mode = 'zero' # 'none' | 'zero' | 'gaussian' | 'learnable'
|
| 247 |
+
|
| 248 |
+
self.goal_emb = nn.Sequential(
|
| 249 |
+
nn.Linear(condition_dim, dim * 2),
|
| 250 |
+
nn.GELU(),
|
| 251 |
+
nn.Linear(dim * 2, dim)
|
| 252 |
+
)
|
| 253 |
+
# self.goal_emb = nn.Sequential(
|
| 254 |
+
# nn.Linear(condition_dim, dim),
|
| 255 |
+
# nn.LayerNorm(dim),
|
| 256 |
+
# )
|
| 257 |
+
frame_seq_len = num_latents // num_frame
|
| 258 |
+
self.latents = nn.Parameter(torch.randn(self.num_frame, frame_seq_len, dim)) # type: ignore[reportPrivateUsage]
|
| 259 |
+
self.time_pos_emb = nn.Parameter(torch.randn(num_time_embeds, 1, dim)) # type: ignore[reportPrivateUsage]
|
| 260 |
+
attn_mask = torch.ones((num_frame, num_frame))
|
| 261 |
+
#attn_mask = torch.tril(attn_mask).bool()
|
| 262 |
+
|
| 263 |
+
self.layers = nn.ModuleList([])
|
| 264 |
+
|
| 265 |
+
if self.use_temporal:
|
| 266 |
+
for _ in range(depth):
|
| 267 |
+
self.layers.append(
|
| 268 |
+
nn.ModuleList(
|
| 269 |
+
[
|
| 270 |
+
PerceiverAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 271 |
+
#TempAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 272 |
+
Attention(dim, num_heads=heads, qkv_bias=True, use_cross_attn=False,
|
| 273 |
+
y_dim=512, attn_mask=attn_mask),
|
| 274 |
+
feed_forward_layer(dim=dim, mult=ff_mult, activation=activation),
|
| 275 |
+
]
|
| 276 |
+
)
|
| 277 |
+
)
|
| 278 |
+
else:
|
| 279 |
+
for _ in range(depth):
|
| 280 |
+
self.layers.append(
|
| 281 |
+
nn.ModuleList(
|
| 282 |
+
[
|
| 283 |
+
PerceiverAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 284 |
+
feed_forward_layer(dim=dim, mult=ff_mult, activation=activation),
|
| 285 |
+
]
|
| 286 |
+
)
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
# Layer normalization takes as input the query vector length
|
| 290 |
+
self.norm = nn.LayerNorm(dim)
|
| 291 |
+
|
| 292 |
+
self._update_trainable_state(trainable)
|
| 293 |
+
|
| 294 |
+
# learnable frame token (used when input_mask_mode == 'learnable')
|
| 295 |
+
if self.input_mask_mode == 'learnable':
|
| 296 |
+
# shape: (1, 1, n_features, dim) after goal_emb
|
| 297 |
+
self.learnable_mask_token = nn.Parameter(torch.zeros(1, 1, 1, dim))
|
| 298 |
+
|
| 299 |
+
def _update_trainable_state(self, trainable: bool = True):
|
| 300 |
+
for param in self.parameters():
|
| 301 |
+
param.requires_grad = trainable
|
| 302 |
+
|
| 303 |
+
def forward(self, x_f: torch.Tensor, mask: torch.BoolTensor = None, extra : torch.Tensor = None, frame_mask_prob: float = 0.0, language: torch.Tensor = None):
|
| 304 |
+
"""Run perceiver resampler on the input visual embeddings
|
| 305 |
+
|
| 306 |
+
Args:
|
| 307 |
+
x_f: Input visual embeddings of shape (batch_size, n_frames, n_features, d_visual)
|
| 308 |
+
mask: Mask for the input visual embeddings of shape (batch_size, n_frames)
|
| 309 |
+
extra: Extra tensor for concatenation
|
| 310 |
+
frame_mask_prob: Probability of masking each frame during training (0.0 = no masking)
|
| 311 |
+
language: Language embeddings of shape (batch_size, 1, lang_dim)
|
| 312 |
+
|
| 313 |
+
Returns:
|
| 314 |
+
Resampler features of shape (batch_size, num_queries, d_visual)
|
| 315 |
+
"""
|
| 316 |
+
assert x_f.ndim == 4
|
| 317 |
+
|
| 318 |
+
batch_size, max_length, _, dim = x_f.shape
|
| 319 |
+
|
| 320 |
+
# Generate per-batch frame mask (True=keep, False=mask) with non-uniform probability centered at index 6
|
| 321 |
+
frame_mask = None
|
| 322 |
+
if frame_mask_prob > 0.0 and self.training:
|
| 323 |
+
# per-frame mask probabilities p_i: highest at center_idx=6, decays with distance
|
| 324 |
+
center_idx = 6 if max_length == 14 else (max_length // 2)
|
| 325 |
+
frame_indices = torch.arange(max_length, device=x_f.device).float()
|
| 326 |
+
distances = (frame_indices - float(center_idx)).abs()
|
| 327 |
+
sigma = 2.0
|
| 328 |
+
# Gaussian decay: p_i = frame_mask_prob * exp(-0.5 * (d/sigma)^2)
|
| 329 |
+
per_frame_p = frame_mask_prob * torch.exp(-0.5 * (distances / sigma) ** 2)
|
| 330 |
+
# broadcast to batch and sample Bernoulli per (b, t)
|
| 331 |
+
rand_vals = torch.rand(batch_size, max_length, device=x_f.device)
|
| 332 |
+
# True=keep, False=mask
|
| 333 |
+
frame_mask = rand_vals > per_frame_p.unsqueeze(0)
|
| 334 |
+
# ensure at least one frame kept per sample
|
| 335 |
+
needs_fix = frame_mask.sum(dim=1) == 0
|
| 336 |
+
if needs_fix.any():
|
| 337 |
+
idx = torch.nonzero(needs_fix, as_tuple=False).squeeze(-1)
|
| 338 |
+
rand_cols = torch.randint(0, max_length, (idx.numel(),), device=x_f.device)
|
| 339 |
+
frame_mask[idx, rand_cols] = True
|
| 340 |
+
|
| 341 |
+
# Mask the position embeddings for the padded frames
|
| 342 |
+
time_pos_emb = (
|
| 343 |
+
self.time_pos_emb[:max_length].unsqueeze(0).expand(batch_size, -1, -1, -1)
|
| 344 |
+
) # [batch_size, max_length, 1, dim]
|
| 345 |
+
if mask is not None:
|
| 346 |
+
time_pos_emb = time_pos_emb * mask.unsqueeze(-1).unsqueeze(-1)
|
| 347 |
+
|
| 348 |
+
# Apply the position embeddings
|
| 349 |
+
x_f = self.goal_emb(x_f)
|
| 350 |
+
# Frame-level input masking before adding positional encoding
|
| 351 |
+
if frame_mask is not None:
|
| 352 |
+
bsz = batch_size
|
| 353 |
+
T = max_length
|
| 354 |
+
n_features = x_f.shape[2]
|
| 355 |
+
d = x_f.shape[3]
|
| 356 |
+
mask_expand = frame_mask.unsqueeze(-1).unsqueeze(-1).expand(bsz, T, n_features, d)
|
| 357 |
+
if self.input_mask_mode == 'zero':
|
| 358 |
+
x_f = torch.where(mask_expand, x_f, torch.zeros_like(x_f))
|
| 359 |
+
elif self.input_mask_mode == 'gaussian':
|
| 360 |
+
noise = torch.randn_like(x_f)
|
| 361 |
+
x_f = torch.where(mask_expand, x_f, noise)
|
| 362 |
+
elif self.input_mask_mode == 'learnable':
|
| 363 |
+
token = self.learnable_mask_token
|
| 364 |
+
token = token.expand(bsz, T, n_features, d)
|
| 365 |
+
x_f = torch.where(mask_expand, x_f, token)
|
| 366 |
+
# 'none' -> do nothing
|
| 367 |
+
if extra is not None:
|
| 368 |
+
extra = repeat(extra, 'b q d -> b T q d', T=max_length)
|
| 369 |
+
x_f = torch.cat([x_f, extra],dim = 2)
|
| 370 |
+
x_f = x_f + time_pos_emb
|
| 371 |
+
|
| 372 |
+
# Flatten the frames
|
| 373 |
+
x_f = rearrange(x_f, 'b T n d -> (b T) n d')
|
| 374 |
+
|
| 375 |
+
# Copy the latents for every element in the batch
|
| 376 |
+
x = repeat(self.latents, 'T q d -> b T q d', b=batch_size)
|
| 377 |
+
x = rearrange(x, 'b T q d -> (b T) q d')
|
| 378 |
+
|
| 379 |
+
# Apply attention and feed forward layer
|
| 380 |
+
if self.use_temporal:
|
| 381 |
+
for attn, Temp_attn, ffw in self.layers:
|
| 382 |
+
x = x + attn(x_f, x)
|
| 383 |
+
x = rearrange(x, '(b T) q d -> (b q) T d', b = batch_size)
|
| 384 |
+
# build per-batch temporal attention mask if frame_mask is provided
|
| 385 |
+
runtime_temporal_mask = None
|
| 386 |
+
if frame_mask is not None:
|
| 387 |
+
# frame_mask: (b, T) True=keep, False=mask
|
| 388 |
+
keep = frame_mask # (b, T)
|
| 389 |
+
# expand along batch for each latent query: current batch for attention is (b * q)
|
| 390 |
+
q_per_frame = x.shape[0] // batch_size
|
| 391 |
+
# construct (b, 1, 1, T) -> (b*q, 1, 1, T)
|
| 392 |
+
mask_bt = keep.unsqueeze(1).unsqueeze(2) # (b,1,1,T)
|
| 393 |
+
runtime_temporal_mask = mask_bt.repeat_interleave(q_per_frame, dim=0) # (b*q,1,1,T)
|
| 394 |
+
# convert to additive mask with 0 for keep and -inf for masked
|
| 395 |
+
runtime_temporal_mask = runtime_temporal_mask.to(x.dtype)
|
| 396 |
+
runtime_temporal_mask = torch.where(
|
| 397 |
+
runtime_temporal_mask > 0,
|
| 398 |
+
torch.zeros_like(runtime_temporal_mask),
|
| 399 |
+
torch.full_like(runtime_temporal_mask, -1e9)
|
| 400 |
+
)
|
| 401 |
+
x = x + Temp_attn(x, attn_mask=runtime_temporal_mask)
|
| 402 |
+
x = rearrange(x, '(b q) T d -> (b T) q d', b = batch_size)
|
| 403 |
+
x = x + ffw(x)
|
| 404 |
+
else:
|
| 405 |
+
for attn, ffw in self.layers:
|
| 406 |
+
x = x + attn(x_f, x)
|
| 407 |
+
x = x + ffw(x)
|
| 408 |
+
|
| 409 |
+
#x = rearrange(x, 'l q d -> b T q d', b=batch_size)
|
| 410 |
+
x = x.reshape(batch_size, -1 ,x.shape[1],x.shape[2])
|
| 411 |
+
x = rearrange(x, 'b T q d -> b (T q) d')
|
| 412 |
+
assert x.shape == torch.Size([batch_size, self.num_queries, self.dim])
|
| 413 |
+
norm = self.norm(x)
|
| 414 |
+
|
| 415 |
+
return norm
|
| 416 |
+
|
| 417 |
+
class Video_Former_2D(nn.Module):
|
| 418 |
+
"""Perceiver Resampler with multi-head attention layer"""
|
| 419 |
+
|
| 420 |
+
def __init__(
|
| 421 |
+
self,
|
| 422 |
+
dim: int,
|
| 423 |
+
depth: int,
|
| 424 |
+
condition_dim: int = 1280,
|
| 425 |
+
dim_head: int = 64,
|
| 426 |
+
heads: int = 8,
|
| 427 |
+
num_latents: int = 64,
|
| 428 |
+
num_frame: int = 16,
|
| 429 |
+
num_time_embeds: int = 4,
|
| 430 |
+
ff_mult: int = 4,
|
| 431 |
+
activation: str = 'gelu',
|
| 432 |
+
trainable: bool = True,
|
| 433 |
+
):
|
| 434 |
+
super().__init__()
|
| 435 |
+
|
| 436 |
+
self.dim = dim
|
| 437 |
+
self.num_queries = num_latents
|
| 438 |
+
self.num_frame = num_frame
|
| 439 |
+
self.condition_dim = condition_dim
|
| 440 |
+
|
| 441 |
+
self.goal_emb = nn.Sequential(
|
| 442 |
+
nn.Linear(condition_dim, dim * 2),
|
| 443 |
+
nn.GELU(),
|
| 444 |
+
nn.Linear(dim * 2, dim)
|
| 445 |
+
)
|
| 446 |
+
seq_len = num_latents // num_frame
|
| 447 |
+
self.latents = nn.Parameter(torch.randn(num_frame, seq_len, dim)) # type: ignore[reportPrivateUsage]
|
| 448 |
+
self.time_pos_emb = nn.Parameter(torch.randn(num_time_embeds, 1, dim)) # type: ignore[reportPrivateUsage]
|
| 449 |
+
|
| 450 |
+
self.layers = nn.ModuleList([])
|
| 451 |
+
for _ in range(depth):
|
| 452 |
+
self.layers.append(
|
| 453 |
+
nn.ModuleList(
|
| 454 |
+
[
|
| 455 |
+
PerceiverAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 456 |
+
feed_forward_layer(dim=dim, mult=ff_mult, activation=activation),
|
| 457 |
+
]
|
| 458 |
+
)
|
| 459 |
+
)
|
| 460 |
+
|
| 461 |
+
# Layer normalization takes as input the query vector length
|
| 462 |
+
self.norm = nn.LayerNorm(dim)
|
| 463 |
+
|
| 464 |
+
self._update_trainable_state(trainable)
|
| 465 |
+
|
| 466 |
+
def _update_trainable_state(self, trainable: bool = True):
|
| 467 |
+
for param in self.parameters():
|
| 468 |
+
param.requires_grad = trainable
|
| 469 |
+
|
| 470 |
+
def forward(self, x_f: torch.Tensor, mask: torch.BoolTensor = None):
|
| 471 |
+
"""Run perceiver resampler on the input visual embeddings
|
| 472 |
+
|
| 473 |
+
Args:
|
| 474 |
+
x_f: Input visual embeddings of shape (batch_size, n_frames, n_features, d_visual)
|
| 475 |
+
mask: Mask for the input visual embeddings of shape (batch_size, n_frames)
|
| 476 |
+
|
| 477 |
+
Returns:
|
| 478 |
+
Resampler features of shape (batch_size, num_queries, d_visual)
|
| 479 |
+
"""
|
| 480 |
+
assert x_f.ndim == 4
|
| 481 |
+
|
| 482 |
+
batch_size, max_length, _, dim = x_f.shape
|
| 483 |
+
|
| 484 |
+
assert dim == self.condition_dim
|
| 485 |
+
|
| 486 |
+
# Mask the position embeddings for the padded frames
|
| 487 |
+
time_pos_emb = (
|
| 488 |
+
self.time_pos_emb[:max_length].unsqueeze(0).expand(batch_size, -1, -1, -1)
|
| 489 |
+
) # [batch_size, max_length, 1, dim]
|
| 490 |
+
if mask is not None:
|
| 491 |
+
time_pos_emb = time_pos_emb * mask.unsqueeze(-1).unsqueeze(-1)
|
| 492 |
+
|
| 493 |
+
# Apply the position embeddings
|
| 494 |
+
x_f = self.goal_emb(x_f)
|
| 495 |
+
x_f = x_f + time_pos_emb
|
| 496 |
+
|
| 497 |
+
# Flatten the frames
|
| 498 |
+
x_f = rearrange(x_f, 'b T n d -> (b T) n d')
|
| 499 |
+
|
| 500 |
+
# Copy the latents for every element in the batch
|
| 501 |
+
x = repeat(self.latents, 'T q d -> b T q d', b=batch_size)
|
| 502 |
+
x = rearrange(x, 'b T q d -> (b T) q d')
|
| 503 |
+
|
| 504 |
+
# Apply attention and feed forward layer
|
| 505 |
+
for attn, ffw in self.layers:
|
| 506 |
+
x = x + attn(x_f, x)
|
| 507 |
+
x = x + ffw(x)
|
| 508 |
+
|
| 509 |
+
#x = rearrange(x, 'l q d -> b T q d', b=batch_size)
|
| 510 |
+
x = x.reshape(batch_size, -1 ,x.shape[1],x.shape[2])
|
| 511 |
+
x = rearrange(x, 'b T q d -> b (T q) d')
|
| 512 |
+
assert x.shape == torch.Size([batch_size, self.num_queries, self.dim])
|
| 513 |
+
norm = self.norm(x)
|
| 514 |
+
|
| 515 |
+
return norm
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
class Video_Former_3D_vggt(nn.Module):
|
| 520 |
+
"""Perceiver Resampler with multi-head attention layer"""
|
| 521 |
+
|
| 522 |
+
def __init__(
|
| 523 |
+
self,
|
| 524 |
+
dim: int,
|
| 525 |
+
depth: int,
|
| 526 |
+
condition_dim: int = 1280,
|
| 527 |
+
dim_head: int = 64,
|
| 528 |
+
heads: int = 8,
|
| 529 |
+
num_latents: int = 64,
|
| 530 |
+
num_frame: int = 14,
|
| 531 |
+
num_time_embeds: int = 4,
|
| 532 |
+
ff_mult: int = 4,
|
| 533 |
+
activation: str = 'gelu',
|
| 534 |
+
trainable: bool = True,
|
| 535 |
+
use_temporal: bool = False,
|
| 536 |
+
):
|
| 537 |
+
super().__init__()
|
| 538 |
+
|
| 539 |
+
self.dim = dim
|
| 540 |
+
self.num_queries = num_latents
|
| 541 |
+
self.num_frame = num_frame
|
| 542 |
+
self.condition_dim = condition_dim
|
| 543 |
+
self.use_temporal = use_temporal
|
| 544 |
+
self.input_mask_mode = 'zero' # 'none' | 'zero' | 'gaussian' | 'learnable'
|
| 545 |
+
|
| 546 |
+
self.goal_emb = nn.Sequential(
|
| 547 |
+
nn.Linear(condition_dim, dim * 2),
|
| 548 |
+
nn.GELU(),
|
| 549 |
+
nn.Linear(dim * 2, dim)
|
| 550 |
+
)
|
| 551 |
+
frame_seq_len = num_latents // num_frame
|
| 552 |
+
self.latents = nn.Parameter(torch.randn(self.num_frame, frame_seq_len, dim)) # type: ignore[reportPrivateUsage]
|
| 553 |
+
self.time_pos_emb = nn.Parameter(torch.randn(num_time_embeds, 1, dim)) # type: ignore[reportPrivateUsage]
|
| 554 |
+
attn_mask = torch.ones((num_frame, num_frame))
|
| 555 |
+
attn_mask2 = torch.ones((256, 256))
|
| 556 |
+
#attn_mask = torch.tril(attn_mask).bool()
|
| 557 |
+
|
| 558 |
+
self.layers = nn.ModuleList([])
|
| 559 |
+
|
| 560 |
+
self.vggt_emb = nn.Sequential(
|
| 561 |
+
nn.Linear(dim, 2048),
|
| 562 |
+
nn.GELU(),
|
| 563 |
+
nn.Linear(2048, 2048)
|
| 564 |
+
)
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
self.spatial_attn = Attention(dim, num_heads=heads, qkv_bias=True, use_cross_attn=False,
|
| 568 |
+
y_dim=512, attn_mask=attn_mask2)
|
| 569 |
+
self.temporal_attn = Attention(dim, num_heads=heads, qkv_bias=True, use_cross_attn=False,
|
| 570 |
+
y_dim=512, attn_mask=attn_mask)
|
| 571 |
+
self.feature_ffw = feed_forward_layer(dim=dim, mult=ff_mult, activation=activation)
|
| 572 |
+
|
| 573 |
+
if self.use_temporal:
|
| 574 |
+
for _ in range(depth):
|
| 575 |
+
self.layers.append(
|
| 576 |
+
nn.ModuleList(
|
| 577 |
+
[
|
| 578 |
+
PerceiverAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 579 |
+
Attention(dim, num_heads=heads, qkv_bias=True, use_cross_attn=False,
|
| 580 |
+
y_dim=512, attn_mask=attn_mask),
|
| 581 |
+
feed_forward_layer(dim=dim, mult=ff_mult, activation=activation),
|
| 582 |
+
]
|
| 583 |
+
)
|
| 584 |
+
)
|
| 585 |
+
else:
|
| 586 |
+
for _ in range(depth):
|
| 587 |
+
self.layers.append(
|
| 588 |
+
nn.ModuleList(
|
| 589 |
+
[
|
| 590 |
+
PerceiverAttentionLayer(dim=dim, dim_head=dim_head, heads=heads),
|
| 591 |
+
feed_forward_layer(dim=dim, mult=ff_mult, activation=activation),
|
| 592 |
+
]
|
| 593 |
+
)
|
| 594 |
+
)
|
| 595 |
+
|
| 596 |
+
# Layer normalization takes as input the query vector length
|
| 597 |
+
self.norm = nn.LayerNorm(dim)
|
| 598 |
+
self.norm_g = nn.LayerNorm(dim)
|
| 599 |
+
|
| 600 |
+
self._update_trainable_state(trainable)
|
| 601 |
+
|
| 602 |
+
# learnable frame token (used when input_mask_mode == 'learnable')
|
| 603 |
+
if self.input_mask_mode == 'learnable':
|
| 604 |
+
# shape: (1, 1, n_features, dim) after goal_emb
|
| 605 |
+
self.learnable_mask_token = nn.Parameter(torch.zeros(1, 1, 1, dim))
|
| 606 |
+
|
| 607 |
+
def _update_trainable_state(self, trainable: bool = True):
|
| 608 |
+
for param in self.parameters():
|
| 609 |
+
param.requires_grad = trainable
|
| 610 |
+
|
| 611 |
+
def forward(self, x_f: torch.Tensor, mask: torch.BoolTensor = None, extra : torch.Tensor = None, frame_mask_prob: float = 0.0, language: torch.Tensor = None):
|
| 612 |
+
"""Run perceiver resampler on the input visual embeddings
|
| 613 |
+
Args:
|
| 614 |
+
x_f: Input visual embeddings of shape (batch_size, n_frames, n_features, d_visual)
|
| 615 |
+
mask: Mask for the input visual embeddings of shape (batch_size, n_frames)
|
| 616 |
+
extra: Extra tensor for concatenation
|
| 617 |
+
Returns:
|
| 618 |
+
Resampler features of shape (batch_size, num_queries, d_visual)
|
| 619 |
+
"""
|
| 620 |
+
assert x_f.ndim == 4
|
| 621 |
+
|
| 622 |
+
batch_size, max_length, _, dim = x_f.shape
|
| 623 |
+
|
| 624 |
+
# Mask the position embeddings for the padded frames
|
| 625 |
+
time_pos_emb = (
|
| 626 |
+
self.time_pos_emb[:max_length].unsqueeze(0).expand(batch_size, -1, -1, -1)
|
| 627 |
+
) # [batch_size, max_length, 1, dim]
|
| 628 |
+
if mask is not None:
|
| 629 |
+
time_pos_emb = time_pos_emb * mask.unsqueeze(-1).unsqueeze(-1)
|
| 630 |
+
|
| 631 |
+
# Apply the position embeddings
|
| 632 |
+
x_f = self.goal_emb(x_f)
|
| 633 |
+
|
| 634 |
+
if extra is not None:
|
| 635 |
+
extra = repeat(extra, 'b q d -> b T q d', T=max_length)
|
| 636 |
+
x_f = torch.cat([x_f, extra],dim = 2)
|
| 637 |
+
x_f = x_f + time_pos_emb
|
| 638 |
+
|
| 639 |
+
# Flatten the frames
|
| 640 |
+
x_f = rearrange(x_f, 'b T n d -> (b T) n d')
|
| 641 |
+
|
| 642 |
+
# Copy the latents for every element in the batch
|
| 643 |
+
x = repeat(self.latents, 'T q d -> b T q d', b=batch_size)
|
| 644 |
+
x = rearrange(x, 'b T q d -> (b T) q d')
|
| 645 |
+
|
| 646 |
+
x_g = x_f + self.spatial_attn(x_f)
|
| 647 |
+
x_g = rearrange(x_g, '(b T) q d -> (b q) T d', b = batch_size)
|
| 648 |
+
x_g = x_g + self.temporal_attn(x_g)
|
| 649 |
+
x_g = rearrange(x_g, '(b q) T d -> (b T) q d', b = batch_size)
|
| 650 |
+
x_g = x_g + self.feature_ffw(x_g)
|
| 651 |
+
|
| 652 |
+
# x_f = torch.cat([x_f, x_g], dim = 1)
|
| 653 |
+
x_f = x_g
|
| 654 |
+
|
| 655 |
+
# Apply attention and feed forward layer
|
| 656 |
+
for attn, Temp_attn, ffw in self.layers:
|
| 657 |
+
x = x + attn(x_f, x)
|
| 658 |
+
x = rearrange(x, '(b T) q d -> (b q) T d', b = batch_size)
|
| 659 |
+
x = x + Temp_attn(x)
|
| 660 |
+
x = rearrange(x, '(b q) T d -> (b T) q d', b = batch_size)
|
| 661 |
+
x = x + ffw(x)
|
| 662 |
+
|
| 663 |
+
#x = rearrange(x, 'l q d -> b T q d', b=batch_size)
|
| 664 |
+
x = x.reshape(batch_size, -1 ,x.shape[1],x.shape[2])
|
| 665 |
+
x = rearrange(x, 'b T q d -> b (T q) d')
|
| 666 |
+
assert x.shape == torch.Size([batch_size, self.num_queries, self.dim])
|
| 667 |
+
norm = self.norm(x)
|
| 668 |
+
x_g = rearrange(x_g, '(b T) q d -> b T q d', b = batch_size)
|
| 669 |
+
|
| 670 |
+
return norm, self.vggt_emb(self.norm_g(x_g))
|
code/policy_models/module/__init__.py
ADDED
|
File without changes
|
code/policy_models/module/__pycache__/Video_Former.cpython-310.pyc
ADDED
|
Binary file (10.6 kB). View file
|
|
|
code/policy_models/module/__pycache__/Video_Former.cpython-39.pyc
ADDED
|
Binary file (15.4 kB). View file
|
|
|
code/policy_models/module/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (182 Bytes). View file
|
|
|
code/policy_models/module/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (152 Bytes). View file
|
|
|
code/policy_models/module/__pycache__/clip.cpython-310.pyc
ADDED
|
Binary file (23.6 kB). View file
|
|
|
code/policy_models/module/__pycache__/clip.cpython-39.pyc
ADDED
|
Binary file (23.2 kB). View file
|
|
|