Robotics
LeRobot
Safetensors
multi_task_dit
satyadevineni commited on
Commit
37894de
·
verified ·
1 Parent(s): 9250753

Upload policy weights, train config and readme

Browse files
Files changed (4) hide show
  1. README.md +62 -0
  2. config.json +86 -0
  3. model.safetensors +3 -0
  4. train_config.json +222 -0
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets: satyadevineni/gesture-commands
3
+ library_name: lerobot
4
+ license: apache-2.0
5
+ model_name: multi_task_dit
6
+ pipeline_tag: robotics
7
+ tags:
8
+ - lerobot
9
+ - multi_task_dit
10
+ - robotics
11
+ ---
12
+
13
+ # Model Card for multi_task_dit
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+ _Model type not recognized — please update this template._
19
+
20
+
21
+ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
22
+ See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index).
23
+
24
+ ---
25
+
26
+ ## How to Get Started with the Model
27
+
28
+ For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy).
29
+ Below is the short version on how to train and run inference/eval:
30
+
31
+ ### Train from scratch
32
+
33
+ ```bash
34
+ lerobot-train \
35
+ --dataset.repo_id=${HF_USER}/<dataset> \
36
+ --policy.type=act \
37
+ --output_dir=outputs/train/<desired_policy_repo_id> \
38
+ --job_name=lerobot_training \
39
+ --policy.device=cuda \
40
+ --policy.repo_id=${HF_USER}/<desired_policy_repo_id>
41
+ --wandb.enable=true
42
+ ```
43
+
44
+ _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._
45
+
46
+ ### Evaluate the policy/run inference
47
+
48
+ ```bash
49
+ lerobot-record \
50
+ --robot.type=so100_follower \
51
+ --dataset.repo_id=<hf_user>/eval_<dataset> \
52
+ --policy.path=<hf_user>/<desired_policy_repo_id> \
53
+ --episodes=10
54
+ ```
55
+
56
+ Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint.
57
+
58
+ ---
59
+
60
+ ## Model Details
61
+
62
+ - **License:** apache-2.0
config.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "multi_task_dit",
3
+ "n_obs_steps": 2,
4
+ "input_features": {
5
+ "observation.state": {
6
+ "type": "STATE",
7
+ "shape": [
8
+ 6
9
+ ]
10
+ }
11
+ },
12
+ "output_features": {
13
+ "action": {
14
+ "type": "ACTION",
15
+ "shape": [
16
+ 6
17
+ ]
18
+ }
19
+ },
20
+ "device": "cuda",
21
+ "use_amp": false,
22
+ "use_peft": false,
23
+ "push_to_hub": true,
24
+ "repo_id": "satyadevineni/multidit_gesture_commands",
25
+ "private": null,
26
+ "tags": null,
27
+ "license": null,
28
+ "pretrained_path": null,
29
+ "horizon": 32,
30
+ "n_action_steps": 24,
31
+ "objective": "diffusion",
32
+ "noise_scheduler_type": "DDPM",
33
+ "num_train_timesteps": 100,
34
+ "beta_schedule": "squaredcos_cap_v2",
35
+ "beta_start": 0.0001,
36
+ "beta_end": 0.02,
37
+ "prediction_type": "epsilon",
38
+ "clip_sample": true,
39
+ "clip_sample_range": 1.0,
40
+ "num_inference_steps": null,
41
+ "sigma_min": 0.0,
42
+ "num_integration_steps": 100,
43
+ "integration_method": "euler",
44
+ "timestep_sampling_strategy": "beta",
45
+ "timestep_sampling_s": 0.999,
46
+ "timestep_sampling_alpha": 1.5,
47
+ "timestep_sampling_beta": 1.0,
48
+ "hidden_dim": 512,
49
+ "num_layers": 6,
50
+ "num_heads": 8,
51
+ "dropout": 0.1,
52
+ "use_positional_encoding": false,
53
+ "timestep_embed_dim": 256,
54
+ "use_rope": true,
55
+ "rope_base": 10000.0,
56
+ "vision_encoder_name": "openai/clip-vit-base-patch16",
57
+ "use_separate_rgb_encoder_per_camera": false,
58
+ "vision_encoder_lr_multiplier": 0.1,
59
+ "image_resize_shape": null,
60
+ "image_crop_shape": [
61
+ 224,
62
+ 224
63
+ ],
64
+ "image_crop_is_random": true,
65
+ "text_encoder_name": "openai/clip-vit-base-patch16",
66
+ "tokenizer_max_length": 77,
67
+ "tokenizer_padding": "max_length",
68
+ "tokenizer_padding_side": "right",
69
+ "tokenizer_truncation": true,
70
+ "normalization_mapping": {
71
+ "VISUAL": "MEAN_STD",
72
+ "STATE": "MIN_MAX",
73
+ "ACTION": "MIN_MAX"
74
+ },
75
+ "optimizer_lr": 2e-05,
76
+ "optimizer_betas": [
77
+ 0.95,
78
+ 0.999
79
+ ],
80
+ "optimizer_eps": 1e-08,
81
+ "optimizer_weight_decay": 0.0,
82
+ "scheduler_name": "cosine",
83
+ "scheduler_warmup_steps": 0,
84
+ "do_mask_loss_for_padding": false,
85
+ "drop_n_last_frames": 7
86
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:facc8de861ba46cc65028971ee8d2d9a92a6675f5347fd9b31eb04a6673938ab
3
+ size 425771880
train_config.json ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "satyadevineni/gesture-commands",
4
+ "root": null,
5
+ "episodes": null,
6
+ "image_transforms": {
7
+ "enable": false,
8
+ "max_num_transforms": 3,
9
+ "random_order": false,
10
+ "tfs": {
11
+ "brightness": {
12
+ "weight": 1.0,
13
+ "type": "ColorJitter",
14
+ "kwargs": {
15
+ "brightness": [
16
+ 0.8,
17
+ 1.2
18
+ ]
19
+ }
20
+ },
21
+ "contrast": {
22
+ "weight": 1.0,
23
+ "type": "ColorJitter",
24
+ "kwargs": {
25
+ "contrast": [
26
+ 0.8,
27
+ 1.2
28
+ ]
29
+ }
30
+ },
31
+ "saturation": {
32
+ "weight": 1.0,
33
+ "type": "ColorJitter",
34
+ "kwargs": {
35
+ "saturation": [
36
+ 0.5,
37
+ 1.5
38
+ ]
39
+ }
40
+ },
41
+ "hue": {
42
+ "weight": 1.0,
43
+ "type": "ColorJitter",
44
+ "kwargs": {
45
+ "hue": [
46
+ -0.05,
47
+ 0.05
48
+ ]
49
+ }
50
+ },
51
+ "sharpness": {
52
+ "weight": 1.0,
53
+ "type": "SharpnessJitter",
54
+ "kwargs": {
55
+ "sharpness": [
56
+ 0.5,
57
+ 1.5
58
+ ]
59
+ }
60
+ },
61
+ "affine": {
62
+ "weight": 1.0,
63
+ "type": "RandomAffine",
64
+ "kwargs": {
65
+ "degrees": [
66
+ -5.0,
67
+ 5.0
68
+ ],
69
+ "translate": [
70
+ 0.05,
71
+ 0.05
72
+ ]
73
+ }
74
+ }
75
+ }
76
+ },
77
+ "revision": null,
78
+ "use_imagenet_stats": true,
79
+ "video_backend": "torchcodec",
80
+ "return_uint8": false,
81
+ "streaming": false
82
+ },
83
+ "env": null,
84
+ "policy": {
85
+ "type": "multi_task_dit",
86
+ "n_obs_steps": 2,
87
+ "input_features": {
88
+ "observation.state": {
89
+ "type": "STATE",
90
+ "shape": [
91
+ 6
92
+ ]
93
+ }
94
+ },
95
+ "output_features": {
96
+ "action": {
97
+ "type": "ACTION",
98
+ "shape": [
99
+ 6
100
+ ]
101
+ }
102
+ },
103
+ "device": "cuda",
104
+ "use_amp": false,
105
+ "use_peft": false,
106
+ "push_to_hub": true,
107
+ "repo_id": "satyadevineni/multidit_gesture_commands",
108
+ "private": null,
109
+ "tags": null,
110
+ "license": null,
111
+ "pretrained_path": null,
112
+ "horizon": 32,
113
+ "n_action_steps": 24,
114
+ "objective": "diffusion",
115
+ "noise_scheduler_type": "DDPM",
116
+ "num_train_timesteps": 100,
117
+ "beta_schedule": "squaredcos_cap_v2",
118
+ "beta_start": 0.0001,
119
+ "beta_end": 0.02,
120
+ "prediction_type": "epsilon",
121
+ "clip_sample": true,
122
+ "clip_sample_range": 1.0,
123
+ "num_inference_steps": null,
124
+ "sigma_min": 0.0,
125
+ "num_integration_steps": 100,
126
+ "integration_method": "euler",
127
+ "timestep_sampling_strategy": "beta",
128
+ "timestep_sampling_s": 0.999,
129
+ "timestep_sampling_alpha": 1.5,
130
+ "timestep_sampling_beta": 1.0,
131
+ "hidden_dim": 512,
132
+ "num_layers": 6,
133
+ "num_heads": 8,
134
+ "dropout": 0.1,
135
+ "use_positional_encoding": false,
136
+ "timestep_embed_dim": 256,
137
+ "use_rope": true,
138
+ "rope_base": 10000.0,
139
+ "vision_encoder_name": "openai/clip-vit-base-patch16",
140
+ "use_separate_rgb_encoder_per_camera": false,
141
+ "vision_encoder_lr_multiplier": 0.1,
142
+ "image_resize_shape": null,
143
+ "image_crop_shape": [
144
+ 224,
145
+ 224
146
+ ],
147
+ "image_crop_is_random": true,
148
+ "text_encoder_name": "openai/clip-vit-base-patch16",
149
+ "tokenizer_max_length": 77,
150
+ "tokenizer_padding": "max_length",
151
+ "tokenizer_padding_side": "right",
152
+ "tokenizer_truncation": true,
153
+ "normalization_mapping": {
154
+ "VISUAL": "MEAN_STD",
155
+ "STATE": "MIN_MAX",
156
+ "ACTION": "MIN_MAX"
157
+ },
158
+ "optimizer_lr": 2e-05,
159
+ "optimizer_betas": [
160
+ 0.95,
161
+ 0.999
162
+ ],
163
+ "optimizer_eps": 1e-08,
164
+ "optimizer_weight_decay": 0.0,
165
+ "scheduler_name": "cosine",
166
+ "scheduler_warmup_steps": 0,
167
+ "do_mask_loss_for_padding": false,
168
+ "drop_n_last_frames": 7
169
+ },
170
+ "reward_model": null,
171
+ "output_dir": "outputs/train/multidit_gesture_5ksteps",
172
+ "job_name": "multidit_gesture_5ksteps",
173
+ "resume": false,
174
+ "seed": 1000,
175
+ "cudnn_deterministic": false,
176
+ "num_workers": 4,
177
+ "batch_size": 64,
178
+ "prefetch_factor": 4,
179
+ "persistent_workers": true,
180
+ "steps": 5000,
181
+ "eval_freq": 20000,
182
+ "log_freq": 200,
183
+ "tolerance_s": 0.0001,
184
+ "save_checkpoint": true,
185
+ "save_freq": 200,
186
+ "use_policy_training_preset": true,
187
+ "optimizer": {
188
+ "type": "adam",
189
+ "lr": 2e-05,
190
+ "weight_decay": 0.0,
191
+ "grad_clip_norm": 10.0,
192
+ "betas": [
193
+ 0.95,
194
+ 0.999
195
+ ],
196
+ "eps": 1e-08
197
+ },
198
+ "scheduler": {
199
+ "type": "diffuser",
200
+ "num_warmup_steps": 0,
201
+ "name": "cosine"
202
+ },
203
+ "eval": {
204
+ "n_episodes": 50,
205
+ "batch_size": 14,
206
+ "use_async_envs": true
207
+ },
208
+ "wandb": {
209
+ "enable": true,
210
+ "disable_artifact": false,
211
+ "project": "lerobot",
212
+ "entity": null,
213
+ "notes": null,
214
+ "run_id": "msfhslcq",
215
+ "mode": null,
216
+ "add_tags": true
217
+ },
218
+ "peft": null,
219
+ "sample_weighting": null,
220
+ "rename_map": {},
221
+ "checkpoint_path": null
222
+ }