Add files using upload-large-folder tool
Browse files- .gitattributes +1 -0
- InternScenes/InternScenes_Real2Sim/textures/WoodFloor040_1K-JPG.blend +3 -0
- InternScenes/InternScenes_Real2Sim/textures/WoodFloor051_1K-JPG_NormalGL.jpg +3 -0
- eval/Holodeck/data/evaluation/Arrange_a_dining_space_in_the_-2025-12-25-20-06-25-952983/render_topdown.png +3 -0
- eval/Holodeck/data/evaluation/Design_a_decorative_shelving_v-2025-12-24-12-57-13-064231/render_topdown.png +3 -0
- eval/respace/dataset-ssr3dfront_stage2_with_doors_windows/04684207-3d45-4d33-bd9d-0f66c9a45402-a05d04de-e667-4d0d-855b-18de6d267648.json +457 -0
- eval/respace/dataset-ssr3dfront_stage2_with_doors_windows/1e9ab322-0cd4-4101-985c-16d0297af49b-0d7be190-efc9-42fb-91e6-4d5e4ec8c5fc.json +177 -0
- eval/respace/dataset-ssr3dfront_stage2_with_doors_windows/4625da8a-48ed-4930-abd5-58dae51a9b06-79b2c882-23b1-4e39-bcb8-d11bc9033997.json +332 -0
- eval/respace/dataset-ssr3dfront_stage2_with_doors_windows/53cf7da8-7c02-419d-bc4a-acf5d4337cac-957fb842-6774-4540-bb6c-81f5c7132bc9.json +317 -0
- eval/respace/dataset-ssr3dfront_stage2_with_doors_windows/83873897-a199-40c8-94f4-34a5f25f53ca-6a131d07-68c2-4049-8c15-4ac0454bf32e.json +661 -0
- src/verl/recipe/__pycache__/__init__.cpython-310.pyc +0 -0
- src/verl/recipe/entropy/32b_kl_cov.sh +143 -0
- src/verl/recipe/entropy/32b_kl_cov_mininbsz.sh +142 -0
- src/verl/recipe/entropy/7b_clip_cov.sh +146 -0
- src/verl/recipe/entropy/README.md +110 -0
- src/verl/recipe/entropy/entropy_ray_trainer.py +347 -0
- src/verl/recipe/spin/dp_actor.py +288 -0
- src/verl/recipe/spin/fsdp_workers.py +600 -0
- src/verl/recipe/spin/main_spin.py +167 -0
- src/verl/recipe/spin/run_spin.sh +29 -0
- src/verl/recipe/spin/spin_trainer.py +1308 -0
.gitattributes
CHANGED
|
@@ -118,3 +118,4 @@ InternScenes/InternScenes_Real2Sim/textures/Concrete034_1K-JPG.blend filter=lfs
|
|
| 118 |
InternScenes/InternScenes_Real2Sim/textures/Tiles093_1K-JPG.blend filter=lfs diff=lfs merge=lfs -text
|
| 119 |
src/train_sft/data/mllm_demo_data/2.avi filter=lfs diff=lfs merge=lfs -text
|
| 120 |
tools/data_gen/zones_data_coarse.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 118 |
InternScenes/InternScenes_Real2Sim/textures/Tiles093_1K-JPG.blend filter=lfs diff=lfs merge=lfs -text
|
| 119 |
src/train_sft/data/mllm_demo_data/2.avi filter=lfs diff=lfs merge=lfs -text
|
| 120 |
tools/data_gen/zones_data_coarse.json filter=lfs diff=lfs merge=lfs -text
|
| 121 |
+
InternScenes/InternScenes_Real2Sim/textures/WoodFloor040_1K-JPG.blend filter=lfs diff=lfs merge=lfs -text
|
InternScenes/InternScenes_Real2Sim/textures/WoodFloor040_1K-JPG.blend
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:53d5e9b7021c4b3fd98f881cf3fba7c7ef4e435a11962804a59658838808553f
|
| 3 |
+
size 1109428
|
InternScenes/InternScenes_Real2Sim/textures/WoodFloor051_1K-JPG_NormalGL.jpg
ADDED
|
Git LFS Details
|
eval/Holodeck/data/evaluation/Arrange_a_dining_space_in_the_-2025-12-25-20-06-25-952983/render_topdown.png
ADDED
|
Git LFS Details
|
eval/Holodeck/data/evaluation/Design_a_decorative_shelving_v-2025-12-24-12-57-13-064231/render_topdown.png
ADDED
|
Git LFS Details
|
eval/respace/dataset-ssr3dfront_stage2_with_doors_windows/04684207-3d45-4d33-bd9d-0f66c9a45402-a05d04de-e667-4d0d-855b-18de6d267648.json
ADDED
|
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bounds_top": [
|
| 3 |
+
[
|
| 4 |
+
-3.15,
|
| 5 |
+
2.8,
|
| 6 |
+
-0.2
|
| 7 |
+
],
|
| 8 |
+
[
|
| 9 |
+
-2.95,
|
| 10 |
+
2.8,
|
| 11 |
+
-0.2
|
| 12 |
+
],
|
| 13 |
+
[
|
| 14 |
+
-2.95,
|
| 15 |
+
2.8,
|
| 16 |
+
2.9
|
| 17 |
+
],
|
| 18 |
+
[
|
| 19 |
+
-2.25,
|
| 20 |
+
2.8,
|
| 21 |
+
2.9
|
| 22 |
+
],
|
| 23 |
+
[
|
| 24 |
+
-2.25,
|
| 25 |
+
2.8,
|
| 26 |
+
3.1
|
| 27 |
+
],
|
| 28 |
+
[
|
| 29 |
+
-0.25,
|
| 30 |
+
2.8,
|
| 31 |
+
3.1
|
| 32 |
+
],
|
| 33 |
+
[
|
| 34 |
+
-0.25,
|
| 35 |
+
2.8,
|
| 36 |
+
2.9
|
| 37 |
+
],
|
| 38 |
+
[
|
| 39 |
+
0.45,
|
| 40 |
+
2.8,
|
| 41 |
+
2.9
|
| 42 |
+
],
|
| 43 |
+
[
|
| 44 |
+
0.45,
|
| 45 |
+
2.8,
|
| 46 |
+
0.1
|
| 47 |
+
],
|
| 48 |
+
[
|
| 49 |
+
0.85,
|
| 50 |
+
2.8,
|
| 51 |
+
0.1
|
| 52 |
+
],
|
| 53 |
+
[
|
| 54 |
+
0.85,
|
| 55 |
+
2.8,
|
| 56 |
+
0.2
|
| 57 |
+
],
|
| 58 |
+
[
|
| 59 |
+
1.55,
|
| 60 |
+
2.8,
|
| 61 |
+
0.2
|
| 62 |
+
],
|
| 63 |
+
[
|
| 64 |
+
1.55,
|
| 65 |
+
2.8,
|
| 66 |
+
0.1
|
| 67 |
+
],
|
| 68 |
+
[
|
| 69 |
+
3.15,
|
| 70 |
+
2.8,
|
| 71 |
+
0.1
|
| 72 |
+
],
|
| 73 |
+
[
|
| 74 |
+
3.15,
|
| 75 |
+
2.8,
|
| 76 |
+
-1.0
|
| 77 |
+
],
|
| 78 |
+
[
|
| 79 |
+
-0.25,
|
| 80 |
+
2.8,
|
| 81 |
+
-1.0
|
| 82 |
+
],
|
| 83 |
+
[
|
| 84 |
+
-0.25,
|
| 85 |
+
2.8,
|
| 86 |
+
-3.1
|
| 87 |
+
],
|
| 88 |
+
[
|
| 89 |
+
-2.95,
|
| 90 |
+
2.8,
|
| 91 |
+
-3.1
|
| 92 |
+
],
|
| 93 |
+
[
|
| 94 |
+
-2.95,
|
| 95 |
+
2.8,
|
| 96 |
+
-0.9
|
| 97 |
+
],
|
| 98 |
+
[
|
| 99 |
+
-3.15,
|
| 100 |
+
2.8,
|
| 101 |
+
-0.9
|
| 102 |
+
]
|
| 103 |
+
],
|
| 104 |
+
"bounds_bottom": [
|
| 105 |
+
[
|
| 106 |
+
-3.15,
|
| 107 |
+
0.0,
|
| 108 |
+
-0.2
|
| 109 |
+
],
|
| 110 |
+
[
|
| 111 |
+
-2.95,
|
| 112 |
+
0.0,
|
| 113 |
+
-0.2
|
| 114 |
+
],
|
| 115 |
+
[
|
| 116 |
+
-2.95,
|
| 117 |
+
0.0,
|
| 118 |
+
2.9
|
| 119 |
+
],
|
| 120 |
+
[
|
| 121 |
+
-2.25,
|
| 122 |
+
0.0,
|
| 123 |
+
2.9
|
| 124 |
+
],
|
| 125 |
+
[
|
| 126 |
+
-2.25,
|
| 127 |
+
0.0,
|
| 128 |
+
3.1
|
| 129 |
+
],
|
| 130 |
+
[
|
| 131 |
+
-0.25,
|
| 132 |
+
0.0,
|
| 133 |
+
3.1
|
| 134 |
+
],
|
| 135 |
+
[
|
| 136 |
+
-0.25,
|
| 137 |
+
0.0,
|
| 138 |
+
2.9
|
| 139 |
+
],
|
| 140 |
+
[
|
| 141 |
+
0.45,
|
| 142 |
+
0.0,
|
| 143 |
+
2.9
|
| 144 |
+
],
|
| 145 |
+
[
|
| 146 |
+
0.45,
|
| 147 |
+
0.0,
|
| 148 |
+
0.1
|
| 149 |
+
],
|
| 150 |
+
[
|
| 151 |
+
0.85,
|
| 152 |
+
0.0,
|
| 153 |
+
0.1
|
| 154 |
+
],
|
| 155 |
+
[
|
| 156 |
+
0.85,
|
| 157 |
+
0.0,
|
| 158 |
+
0.2
|
| 159 |
+
],
|
| 160 |
+
[
|
| 161 |
+
1.55,
|
| 162 |
+
0.0,
|
| 163 |
+
0.2
|
| 164 |
+
],
|
| 165 |
+
[
|
| 166 |
+
1.55,
|
| 167 |
+
0.0,
|
| 168 |
+
0.1
|
| 169 |
+
],
|
| 170 |
+
[
|
| 171 |
+
3.15,
|
| 172 |
+
0.0,
|
| 173 |
+
0.1
|
| 174 |
+
],
|
| 175 |
+
[
|
| 176 |
+
3.15,
|
| 177 |
+
0.0,
|
| 178 |
+
-1.0
|
| 179 |
+
],
|
| 180 |
+
[
|
| 181 |
+
-0.25,
|
| 182 |
+
0.0,
|
| 183 |
+
-1.0
|
| 184 |
+
],
|
| 185 |
+
[
|
| 186 |
+
-0.25,
|
| 187 |
+
0.0,
|
| 188 |
+
-3.1
|
| 189 |
+
],
|
| 190 |
+
[
|
| 191 |
+
-2.95,
|
| 192 |
+
0.0,
|
| 193 |
+
-3.1
|
| 194 |
+
],
|
| 195 |
+
[
|
| 196 |
+
-2.95,
|
| 197 |
+
0.0,
|
| 198 |
+
-0.9
|
| 199 |
+
],
|
| 200 |
+
[
|
| 201 |
+
-3.15,
|
| 202 |
+
0.0,
|
| 203 |
+
-0.9
|
| 204 |
+
]
|
| 205 |
+
],
|
| 206 |
+
"room_type": "livingroom",
|
| 207 |
+
"room_id": "LivingDiningRoom-7220",
|
| 208 |
+
"objects": [
|
| 209 |
+
{
|
| 210 |
+
"desc": "Elegant traditional baroque pendant lamp with an antique brass finish, featuring multiple ornate arms and candle-shaped lights for classic ambiance.",
|
| 211 |
+
"size": [
|
| 212 |
+
1.05,
|
| 213 |
+
1.12,
|
| 214 |
+
1.07
|
| 215 |
+
],
|
| 216 |
+
"pos": [
|
| 217 |
+
-1.44,
|
| 218 |
+
1.68,
|
| 219 |
+
-1.93
|
| 220 |
+
],
|
| 221 |
+
"rot": [
|
| 222 |
+
0,
|
| 223 |
+
0,
|
| 224 |
+
0,
|
| 225 |
+
1
|
| 226 |
+
],
|
| 227 |
+
"jid": "2a89f148-8de5-4100-a6f4-e0019bd6e57b"
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"desc": "Classic-contemporary three-seat sofa with white tufted fabric, contrasting black and blue cushions, ornate wooden legs, and distinctive decorative prints.",
|
| 231 |
+
"size": [
|
| 232 |
+
2.49,
|
| 233 |
+
0.94,
|
| 234 |
+
0.79
|
| 235 |
+
],
|
| 236 |
+
"pos": [
|
| 237 |
+
-2.54,
|
| 238 |
+
0.0,
|
| 239 |
+
1.44
|
| 240 |
+
],
|
| 241 |
+
"rot": [
|
| 242 |
+
0,
|
| 243 |
+
0.70711,
|
| 244 |
+
0,
|
| 245 |
+
0.70711
|
| 246 |
+
],
|
| 247 |
+
"jid": "e68fe496-a9c9-4a0b-b00a-1bbc5aab885d"
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"desc": "Luxury Baroque armchair featuring ornate carved wooden frame with silver finish and plush beige upholstery for elegant comfort.",
|
| 251 |
+
"size": [
|
| 252 |
+
0.74,
|
| 253 |
+
1.03,
|
| 254 |
+
0.78
|
| 255 |
+
],
|
| 256 |
+
"pos": [
|
| 257 |
+
-1.46,
|
| 258 |
+
0.0,
|
| 259 |
+
0.13
|
| 260 |
+
],
|
| 261 |
+
"rot": [
|
| 262 |
+
0,
|
| 263 |
+
0.09971,
|
| 264 |
+
0,
|
| 265 |
+
0.99502
|
| 266 |
+
],
|
| 267 |
+
"jid": "772e338e-3907-48ff-b695-5d0aa0088c51"
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"desc": "Traditional round dining table with dark brown wooden top, silver ornate pedestal legs, and classic detailing.",
|
| 271 |
+
"size": [
|
| 272 |
+
1.78,
|
| 273 |
+
0.76,
|
| 274 |
+
1.73
|
| 275 |
+
],
|
| 276 |
+
"pos": [
|
| 277 |
+
-1.45,
|
| 278 |
+
0.0,
|
| 279 |
+
-1.81
|
| 280 |
+
],
|
| 281 |
+
"rot": [
|
| 282 |
+
0,
|
| 283 |
+
0.70711,
|
| 284 |
+
0,
|
| 285 |
+
0.70711
|
| 286 |
+
],
|
| 287 |
+
"jid": "ffd71c3f-5625-4090-a8d7-a3caba1af504"
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"desc": "Modern minimalist rectangular coffee table with woven synthetic rattan and textured brown patterned surface.",
|
| 291 |
+
"size": [
|
| 292 |
+
1.63,
|
| 293 |
+
0.52,
|
| 294 |
+
1.52
|
| 295 |
+
],
|
| 296 |
+
"pos": [
|
| 297 |
+
-1.24,
|
| 298 |
+
0.0,
|
| 299 |
+
1.34
|
| 300 |
+
],
|
| 301 |
+
"rot": [
|
| 302 |
+
0,
|
| 303 |
+
-0.70711,
|
| 304 |
+
0,
|
| 305 |
+
0.70711
|
| 306 |
+
],
|
| 307 |
+
"jid": "d7b7be7a-5ed6-419e-8d47-cfa909735db7"
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"desc": "Contemporary minimalist TV stand with geometric patterned grey doors, brown wooden top, and gold metal accents.",
|
| 311 |
+
"size": [
|
| 312 |
+
2.0,
|
| 313 |
+
0.56,
|
| 314 |
+
0.51
|
| 315 |
+
],
|
| 316 |
+
"pos": [
|
| 317 |
+
0.17,
|
| 318 |
+
0.0,
|
| 319 |
+
1.37
|
| 320 |
+
],
|
| 321 |
+
"rot": [
|
| 322 |
+
0,
|
| 323 |
+
-0.70711,
|
| 324 |
+
0,
|
| 325 |
+
0.70711
|
| 326 |
+
],
|
| 327 |
+
"jid": "0b618eb2-4a1c-43c4-bee7-205c5a6a08d0"
|
| 328 |
+
},
|
| 329 |
+
{
|
| 330 |
+
"desc": "Modern minimalist sideboard with beige wood frame, three gray textured doors, and slim black metal legs.",
|
| 331 |
+
"size": [
|
| 332 |
+
1.79,
|
| 333 |
+
0.64,
|
| 334 |
+
0.47
|
| 335 |
+
],
|
| 336 |
+
"pos": [
|
| 337 |
+
-2.7,
|
| 338 |
+
0.0,
|
| 339 |
+
-1.98
|
| 340 |
+
],
|
| 341 |
+
"rot": [
|
| 342 |
+
0,
|
| 343 |
+
0.70711,
|
| 344 |
+
0,
|
| 345 |
+
0.70711
|
| 346 |
+
],
|
| 347 |
+
"jid": "e736b2f5-97bd-45cf-8406-20ef69f13f82"
|
| 348 |
+
}
|
| 349 |
+
],
|
| 350 |
+
"doors": [
|
| 351 |
+
{
|
| 352 |
+
"type": "door",
|
| 353 |
+
"pos": [
|
| 354 |
+
-1.28,
|
| 355 |
+
1.1,
|
| 356 |
+
2.99
|
| 357 |
+
],
|
| 358 |
+
"size": [
|
| 359 |
+
1.98,
|
| 360 |
+
2.2,
|
| 361 |
+
0.24
|
| 362 |
+
],
|
| 363 |
+
"uid": "84751593635200263/0"
|
| 364 |
+
},
|
| 365 |
+
{
|
| 366 |
+
"type": "door",
|
| 367 |
+
"pos": [
|
| 368 |
+
-0.16,
|
| 369 |
+
1.1,
|
| 370 |
+
-1.97
|
| 371 |
+
],
|
| 372 |
+
"size": [
|
| 373 |
+
0.12,
|
| 374 |
+
2.2,
|
| 375 |
+
1.36
|
| 376 |
+
],
|
| 377 |
+
"uid": "85651593635200301/0"
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"type": "door",
|
| 381 |
+
"pos": [
|
| 382 |
+
3.22,
|
| 383 |
+
1.05,
|
| 384 |
+
-0.57
|
| 385 |
+
],
|
| 386 |
+
"size": [
|
| 387 |
+
0.12,
|
| 388 |
+
2.1,
|
| 389 |
+
0.7
|
| 390 |
+
],
|
| 391 |
+
"uid": "86591593635200319/0"
|
| 392 |
+
},
|
| 393 |
+
{
|
| 394 |
+
"type": "door",
|
| 395 |
+
"pos": [
|
| 396 |
+
1.16,
|
| 397 |
+
1.05,
|
| 398 |
+
0.11
|
| 399 |
+
],
|
| 400 |
+
"size": [
|
| 401 |
+
0.7,
|
| 402 |
+
2.1,
|
| 403 |
+
0.12
|
| 404 |
+
],
|
| 405 |
+
"uid": "88391593635200356/0"
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"type": "door",
|
| 409 |
+
"pos": [
|
| 410 |
+
3.22,
|
| 411 |
+
1.05,
|
| 412 |
+
-1.47
|
| 413 |
+
],
|
| 414 |
+
"size": [
|
| 415 |
+
0.12,
|
| 416 |
+
2.1,
|
| 417 |
+
0.64
|
| 418 |
+
],
|
| 419 |
+
"uid": "90191593635200380/0"
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"type": "door",
|
| 423 |
+
"pos": [
|
| 424 |
+
-3.06,
|
| 425 |
+
1.05,
|
| 426 |
+
-0.57
|
| 427 |
+
],
|
| 428 |
+
"size": [
|
| 429 |
+
0.24,
|
| 430 |
+
2.1,
|
| 431 |
+
0.75
|
| 432 |
+
],
|
| 433 |
+
"uid": "91191593635200390/0"
|
| 434 |
+
}
|
| 435 |
+
],
|
| 436 |
+
"windows": [
|
| 437 |
+
{
|
| 438 |
+
"type": "window",
|
| 439 |
+
"pos": [
|
| 440 |
+
-1.24,
|
| 441 |
+
1.65,
|
| 442 |
+
-3.2
|
| 443 |
+
],
|
| 444 |
+
"size": [
|
| 445 |
+
0.99,
|
| 446 |
+
1.5,
|
| 447 |
+
0.24
|
| 448 |
+
],
|
| 449 |
+
"uid": "82951593635200237/0"
|
| 450 |
+
}
|
| 451 |
+
],
|
| 452 |
+
"floor_material": {
|
| 453 |
+
"jid": "9992a6f0-5364-4c45-96ca-9f0064430902",
|
| 454 |
+
"material_type": "flooring - hardwood",
|
| 455 |
+
"texture_path": "3D-FRONT-texture/9992a6f0-5364-4c45-96ca-9f0064430902/texture.png"
|
| 456 |
+
}
|
| 457 |
+
}
|
eval/respace/dataset-ssr3dfront_stage2_with_doors_windows/1e9ab322-0cd4-4101-985c-16d0297af49b-0d7be190-efc9-42fb-91e6-4d5e4ec8c5fc.json
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bounds_top": [
|
| 3 |
+
[
|
| 4 |
+
-1.3,
|
| 5 |
+
2.6,
|
| 6 |
+
1.8
|
| 7 |
+
],
|
| 8 |
+
[
|
| 9 |
+
1.3,
|
| 10 |
+
2.6,
|
| 11 |
+
1.8
|
| 12 |
+
],
|
| 13 |
+
[
|
| 14 |
+
1.3,
|
| 15 |
+
2.6,
|
| 16 |
+
-1.8
|
| 17 |
+
],
|
| 18 |
+
[
|
| 19 |
+
-1.3,
|
| 20 |
+
2.6,
|
| 21 |
+
-1.8
|
| 22 |
+
]
|
| 23 |
+
],
|
| 24 |
+
"bounds_bottom": [
|
| 25 |
+
[
|
| 26 |
+
-1.3,
|
| 27 |
+
0.0,
|
| 28 |
+
1.8
|
| 29 |
+
],
|
| 30 |
+
[
|
| 31 |
+
1.3,
|
| 32 |
+
0.0,
|
| 33 |
+
1.8
|
| 34 |
+
],
|
| 35 |
+
[
|
| 36 |
+
1.3,
|
| 37 |
+
0.0,
|
| 38 |
+
-1.8
|
| 39 |
+
],
|
| 40 |
+
[
|
| 41 |
+
-1.3,
|
| 42 |
+
0.0,
|
| 43 |
+
-1.8
|
| 44 |
+
]
|
| 45 |
+
],
|
| 46 |
+
"room_type": "other",
|
| 47 |
+
"room_id": "ElderlyRoom-34339",
|
| 48 |
+
"objects": [
|
| 49 |
+
{
|
| 50 |
+
"desc": "Modern classic king-size canopy bed with sheer white curtains, brown wooden frame, and decorative pink pillows for a cozy, elegant aesthetic.",
|
| 51 |
+
"size": [
|
| 52 |
+
2.23,
|
| 53 |
+
2.26,
|
| 54 |
+
2.19
|
| 55 |
+
],
|
| 56 |
+
"pos": [
|
| 57 |
+
0.15,
|
| 58 |
+
0.0,
|
| 59 |
+
-0.22
|
| 60 |
+
],
|
| 61 |
+
"rot": [
|
| 62 |
+
0,
|
| 63 |
+
-0.70711,
|
| 64 |
+
0,
|
| 65 |
+
0.70711
|
| 66 |
+
],
|
| 67 |
+
"jid": "f43310eb-270b-49ec-aef9-11103921b224"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"desc": "A contemporary minimalist wooden wardrobe with natural light brown color, two doors, and two drawers, featuring a sleek and functional design.",
|
| 71 |
+
"size": [
|
| 72 |
+
0.9,
|
| 73 |
+
2.0,
|
| 74 |
+
0.56
|
| 75 |
+
],
|
| 76 |
+
"pos": [
|
| 77 |
+
0.39,
|
| 78 |
+
0.0,
|
| 79 |
+
1.53
|
| 80 |
+
],
|
| 81 |
+
"rot": [
|
| 82 |
+
0,
|
| 83 |
+
1,
|
| 84 |
+
0,
|
| 85 |
+
0
|
| 86 |
+
],
|
| 87 |
+
"jid": "835f1b5d-cb60-4164-a1cd-927bad7f3dbd"
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"desc": "A modern, minimalist wooden nightstand with a natural finish, featuring a single drawer, open shelf, and integrated drawer handle.",
|
| 91 |
+
"size": [
|
| 92 |
+
0.4,
|
| 93 |
+
0.45,
|
| 94 |
+
0.35
|
| 95 |
+
],
|
| 96 |
+
"pos": [
|
| 97 |
+
1.04,
|
| 98 |
+
0.0,
|
| 99 |
+
1.08
|
| 100 |
+
],
|
| 101 |
+
"rot": [
|
| 102 |
+
0,
|
| 103 |
+
-0.70711,
|
| 104 |
+
0,
|
| 105 |
+
0.70711
|
| 106 |
+
],
|
| 107 |
+
"jid": "056d9c19-ddb8-45d0-bc22-f9c4f0b41962"
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"desc": "A modern, minimalist wooden nightstand with a natural finish, featuring a single drawer, open shelf, and integrated drawer handle.",
|
| 111 |
+
"size": [
|
| 112 |
+
0.4,
|
| 113 |
+
0.45,
|
| 114 |
+
0.35
|
| 115 |
+
],
|
| 116 |
+
"pos": [
|
| 117 |
+
1.13,
|
| 118 |
+
0.0,
|
| 119 |
+
-1.38
|
| 120 |
+
],
|
| 121 |
+
"rot": [
|
| 122 |
+
0,
|
| 123 |
+
-0.70711,
|
| 124 |
+
0,
|
| 125 |
+
0.70711
|
| 126 |
+
],
|
| 127 |
+
"jid": "056d9c19-ddb8-45d0-bc22-f9c4f0b41962"
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"desc": "Modern black fabric armchair with high backrest, slender metal legs, and minimalist design.",
|
| 131 |
+
"size": [
|
| 132 |
+
0.78,
|
| 133 |
+
1.07,
|
| 134 |
+
0.97
|
| 135 |
+
],
|
| 136 |
+
"pos": [
|
| 137 |
+
-0.18,
|
| 138 |
+
0.0,
|
| 139 |
+
-1.52
|
| 140 |
+
],
|
| 141 |
+
"rot": [
|
| 142 |
+
0,
|
| 143 |
+
-0.70711,
|
| 144 |
+
0,
|
| 145 |
+
0.70711
|
| 146 |
+
],
|
| 147 |
+
"jid": "9a7b95c2-d2e1-45a1-89a7-b24bbbe7b2c5"
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"desc": "Traditional pendant lamp with a circular frame and six beige fabric conical lampshades on a gold metal base.",
|
| 151 |
+
"size": [
|
| 152 |
+
1.01,
|
| 153 |
+
0.78,
|
| 154 |
+
0.9
|
| 155 |
+
],
|
| 156 |
+
"pos": [
|
| 157 |
+
-0.08,
|
| 158 |
+
1.86,
|
| 159 |
+
-0.25
|
| 160 |
+
],
|
| 161 |
+
"rot": [
|
| 162 |
+
0,
|
| 163 |
+
0,
|
| 164 |
+
0,
|
| 165 |
+
1
|
| 166 |
+
],
|
| 167 |
+
"jid": "9110001b-e49a-4703-aebe-ee2e61d6f469"
|
| 168 |
+
}
|
| 169 |
+
],
|
| 170 |
+
"doors": [],
|
| 171 |
+
"windows": [],
|
| 172 |
+
"floor_material": {
|
| 173 |
+
"jid": "a1543df0-d183-45bc-9814-9bf463f7caed",
|
| 174 |
+
"material_type": "flooring - reinforced",
|
| 175 |
+
"texture_path": "3D-FRONT-texture/a1543df0-d183-45bc-9814-9bf463f7caed/texture.png"
|
| 176 |
+
}
|
| 177 |
+
}
|
eval/respace/dataset-ssr3dfront_stage2_with_doors_windows/4625da8a-48ed-4930-abd5-58dae51a9b06-79b2c882-23b1-4e39-bcb8-d11bc9033997.json
ADDED
|
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bounds_top": [
|
| 3 |
+
[
|
| 4 |
+
-1.6,
|
| 5 |
+
2.8,
|
| 6 |
+
2.6
|
| 7 |
+
],
|
| 8 |
+
[
|
| 9 |
+
1.6,
|
| 10 |
+
2.8,
|
| 11 |
+
2.6
|
| 12 |
+
],
|
| 13 |
+
[
|
| 14 |
+
1.6,
|
| 15 |
+
2.8,
|
| 16 |
+
-2.5
|
| 17 |
+
],
|
| 18 |
+
[
|
| 19 |
+
-0.7,
|
| 20 |
+
2.8,
|
| 21 |
+
-2.5
|
| 22 |
+
],
|
| 23 |
+
[
|
| 24 |
+
-0.7,
|
| 25 |
+
2.8,
|
| 26 |
+
-2.6
|
| 27 |
+
],
|
| 28 |
+
[
|
| 29 |
+
-1.5,
|
| 30 |
+
2.8,
|
| 31 |
+
-2.6
|
| 32 |
+
],
|
| 33 |
+
[
|
| 34 |
+
-1.5,
|
| 35 |
+
2.8,
|
| 36 |
+
-2.5
|
| 37 |
+
],
|
| 38 |
+
[
|
| 39 |
+
-1.6,
|
| 40 |
+
2.8,
|
| 41 |
+
-2.5
|
| 42 |
+
]
|
| 43 |
+
],
|
| 44 |
+
"bounds_bottom": [
|
| 45 |
+
[
|
| 46 |
+
-1.6,
|
| 47 |
+
0.0,
|
| 48 |
+
2.6
|
| 49 |
+
],
|
| 50 |
+
[
|
| 51 |
+
1.6,
|
| 52 |
+
0.0,
|
| 53 |
+
2.6
|
| 54 |
+
],
|
| 55 |
+
[
|
| 56 |
+
1.6,
|
| 57 |
+
0.0,
|
| 58 |
+
-2.5
|
| 59 |
+
],
|
| 60 |
+
[
|
| 61 |
+
-0.7,
|
| 62 |
+
0.0,
|
| 63 |
+
-2.5
|
| 64 |
+
],
|
| 65 |
+
[
|
| 66 |
+
-0.7,
|
| 67 |
+
0.0,
|
| 68 |
+
-2.6
|
| 69 |
+
],
|
| 70 |
+
[
|
| 71 |
+
-1.5,
|
| 72 |
+
0.0,
|
| 73 |
+
-2.6
|
| 74 |
+
],
|
| 75 |
+
[
|
| 76 |
+
-1.5,
|
| 77 |
+
0.0,
|
| 78 |
+
-2.5
|
| 79 |
+
],
|
| 80 |
+
[
|
| 81 |
+
-1.6,
|
| 82 |
+
0.0,
|
| 83 |
+
-2.5
|
| 84 |
+
]
|
| 85 |
+
],
|
| 86 |
+
"room_type": "bedroom",
|
| 87 |
+
"room_id": "Bedroom-4142",
|
| 88 |
+
"objects": [
|
| 89 |
+
{
|
| 90 |
+
"desc": "Modern white wardrobe with sleek rectangular design, gold accents, and minimalist appeal.",
|
| 91 |
+
"size": [
|
| 92 |
+
2.0,
|
| 93 |
+
2.1,
|
| 94 |
+
0.64
|
| 95 |
+
],
|
| 96 |
+
"pos": [
|
| 97 |
+
0.55,
|
| 98 |
+
0.0,
|
| 99 |
+
-2.15
|
| 100 |
+
],
|
| 101 |
+
"rot": [
|
| 102 |
+
0,
|
| 103 |
+
0,
|
| 104 |
+
0,
|
| 105 |
+
1
|
| 106 |
+
],
|
| 107 |
+
"jid": "770a3313-2ecb-478f-b38f-e24bf7cd8fbc"
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"desc": "A modern rustic wooden stool with a cushioned beige seat, curved leg braces, and tapered legs.",
|
| 111 |
+
"size": [
|
| 112 |
+
0.44,
|
| 113 |
+
0.68,
|
| 114 |
+
0.47
|
| 115 |
+
],
|
| 116 |
+
"pos": [
|
| 117 |
+
0.82,
|
| 118 |
+
0.0,
|
| 119 |
+
1.92
|
| 120 |
+
],
|
| 121 |
+
"rot": [
|
| 122 |
+
0,
|
| 123 |
+
0.70711,
|
| 124 |
+
0,
|
| 125 |
+
0.70711
|
| 126 |
+
],
|
| 127 |
+
"jid": "4cd0fb8c-80b6-4a8d-a38f-dee8119461a1"
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"desc": "Stylish Mid-century Modern walnut brown wooden dressing table with a lift-up mirror, two drawers, and distinctive slanted legs.",
|
| 131 |
+
"size": [
|
| 132 |
+
0.93,
|
| 133 |
+
1.13,
|
| 134 |
+
0.42
|
| 135 |
+
],
|
| 136 |
+
"pos": [
|
| 137 |
+
1.3,
|
| 138 |
+
0.0,
|
| 139 |
+
1.94
|
| 140 |
+
],
|
| 141 |
+
"rot": [
|
| 142 |
+
0,
|
| 143 |
+
-0.70711,
|
| 144 |
+
0,
|
| 145 |
+
0.70711
|
| 146 |
+
],
|
| 147 |
+
"jid": "15b94a23-059b-4c12-98f8-bd651083d20f"
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"desc": "A modern, minimalist king-size bed featuring black fabric upholstery and sturdy wooden legs, embodying clean lines and a low-profile design.",
|
| 151 |
+
"size": [
|
| 152 |
+
2.13,
|
| 153 |
+
0.95,
|
| 154 |
+
2.34
|
| 155 |
+
],
|
| 156 |
+
"pos": [
|
| 157 |
+
0.42,
|
| 158 |
+
0.0,
|
| 159 |
+
-0.02
|
| 160 |
+
],
|
| 161 |
+
"rot": [
|
| 162 |
+
0,
|
| 163 |
+
-0.70711,
|
| 164 |
+
0,
|
| 165 |
+
0.70711
|
| 166 |
+
],
|
| 167 |
+
"jid": "5159ffde-a7c7-4d2c-8b07-8c4d088487ee"
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"desc": "Antique vintage nightstand with curved legs, hand-painted floral drawer, and delicate metal handle.",
|
| 171 |
+
"size": [
|
| 172 |
+
0.51,
|
| 173 |
+
0.65,
|
| 174 |
+
0.44
|
| 175 |
+
],
|
| 176 |
+
"pos": [
|
| 177 |
+
1.45,
|
| 178 |
+
0.0,
|
| 179 |
+
1.3
|
| 180 |
+
],
|
| 181 |
+
"rot": [
|
| 182 |
+
0,
|
| 183 |
+
-0.70711,
|
| 184 |
+
0,
|
| 185 |
+
0.70711
|
| 186 |
+
],
|
| 187 |
+
"jid": "af8f84de-bb38-49e0-9e39-ffd0511dd65b"
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"desc": "Antique vintage nightstand with curved legs, hand-painted floral drawer, and delicate metal handle.",
|
| 191 |
+
"size": [
|
| 192 |
+
0.51,
|
| 193 |
+
0.65,
|
| 194 |
+
0.44
|
| 195 |
+
],
|
| 196 |
+
"pos": [
|
| 197 |
+
1.45,
|
| 198 |
+
0.0,
|
| 199 |
+
-1.33
|
| 200 |
+
],
|
| 201 |
+
"rot": [
|
| 202 |
+
0,
|
| 203 |
+
-0.70711,
|
| 204 |
+
0,
|
| 205 |
+
0.70711
|
| 206 |
+
],
|
| 207 |
+
"jid": "af8f84de-bb38-49e0-9e39-ffd0511dd65b"
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"desc": "A modern, minimalistic black metal plant stand with tall tripod legs and a square pot, offering a sleek and elevated design.",
|
| 211 |
+
"size": [
|
| 212 |
+
0.41,
|
| 213 |
+
1.04,
|
| 214 |
+
0.39
|
| 215 |
+
],
|
| 216 |
+
"pos": [
|
| 217 |
+
-1.33,
|
| 218 |
+
0.0,
|
| 219 |
+
2.14
|
| 220 |
+
],
|
| 221 |
+
"rot": [
|
| 222 |
+
0,
|
| 223 |
+
0.70711,
|
| 224 |
+
0,
|
| 225 |
+
0.70711
|
| 226 |
+
],
|
| 227 |
+
"jid": "36f904b5-77ee-430d-8726-6db2d2104307"
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"desc": "Mid-century modern sideboard with dark wood finish, sleek legs, and gold brass handles.",
|
| 231 |
+
"size": [
|
| 232 |
+
0.8,
|
| 233 |
+
0.98,
|
| 234 |
+
0.41
|
| 235 |
+
],
|
| 236 |
+
"pos": [
|
| 237 |
+
-1.44,
|
| 238 |
+
0.0,
|
| 239 |
+
0.04
|
| 240 |
+
],
|
| 241 |
+
"rot": [
|
| 242 |
+
0,
|
| 243 |
+
0.70711,
|
| 244 |
+
0,
|
| 245 |
+
0.70711
|
| 246 |
+
],
|
| 247 |
+
"jid": "63fb829b-fe84-44c5-8449-ddabff483630"
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"desc": "Modern pendant lamp with a spherical petal-like white fabric design and a central light bulb suspended by metal.",
|
| 251 |
+
"size": [
|
| 252 |
+
0.82,
|
| 253 |
+
0.82,
|
| 254 |
+
0.82
|
| 255 |
+
],
|
| 256 |
+
"pos": [
|
| 257 |
+
-0.02,
|
| 258 |
+
1.97,
|
| 259 |
+
0.26
|
| 260 |
+
],
|
| 261 |
+
"rot": [
|
| 262 |
+
0,
|
| 263 |
+
0,
|
| 264 |
+
0,
|
| 265 |
+
1
|
| 266 |
+
],
|
| 267 |
+
"jid": "7be78e99-5ace-466f-84bf-e4509dabebb3"
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"desc": "A striking contemporary industrial pendant lamp with a red metal frame and five black bell-shaped shades, featuring curved arms and a distinctive metallic finish.",
|
| 271 |
+
"size": [
|
| 272 |
+
0.25,
|
| 273 |
+
0.27,
|
| 274 |
+
0.22
|
| 275 |
+
],
|
| 276 |
+
"pos": [
|
| 277 |
+
1.43,
|
| 278 |
+
2.12,
|
| 279 |
+
-1.16
|
| 280 |
+
],
|
| 281 |
+
"rot": [
|
| 282 |
+
0,
|
| 283 |
+
0,
|
| 284 |
+
0,
|
| 285 |
+
1
|
| 286 |
+
],
|
| 287 |
+
"jid": "921c632b-d082-4991-869e-e1770eeb5dba-(1.11)-(1.0)-(0.95)"
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"desc": "A striking contemporary industrial pendant lamp with a red metal frame and five black bell-shaped shades, featuring curved arms and a distinctive metallic finish.",
|
| 291 |
+
"size": [
|
| 292 |
+
0.25,
|
| 293 |
+
0.27,
|
| 294 |
+
0.22
|
| 295 |
+
],
|
| 296 |
+
"pos": [
|
| 297 |
+
1.43,
|
| 298 |
+
2.12,
|
| 299 |
+
1.15
|
| 300 |
+
],
|
| 301 |
+
"rot": [
|
| 302 |
+
0,
|
| 303 |
+
0,
|
| 304 |
+
0,
|
| 305 |
+
1
|
| 306 |
+
],
|
| 307 |
+
"jid": "921c632b-d082-4991-869e-e1770eeb5dba-(1.11)-(1.0)-(0.95)"
|
| 308 |
+
}
|
| 309 |
+
],
|
| 310 |
+
"doors": [
|
| 311 |
+
{
|
| 312 |
+
"type": "door",
|
| 313 |
+
"pos": [
|
| 314 |
+
-1.09,
|
| 315 |
+
1.06,
|
| 316 |
+
-2.53
|
| 317 |
+
],
|
| 318 |
+
"size": [
|
| 319 |
+
0.8,
|
| 320 |
+
2.11,
|
| 321 |
+
0.12
|
| 322 |
+
],
|
| 323 |
+
"uid": "660101593784432476/0"
|
| 324 |
+
}
|
| 325 |
+
],
|
| 326 |
+
"windows": [],
|
| 327 |
+
"floor_material": {
|
| 328 |
+
"jid": "3199e4ba-6cd0-4118-bdc6-0e2e12466466",
|
| 329 |
+
"material_type": "flooring - hardwood",
|
| 330 |
+
"texture_path": "3D-FRONT-texture/3199e4ba-6cd0-4118-bdc6-0e2e12466466/texture.png"
|
| 331 |
+
}
|
| 332 |
+
}
|
eval/respace/dataset-ssr3dfront_stage2_with_doors_windows/53cf7da8-7c02-419d-bc4a-acf5d4337cac-957fb842-6774-4540-bb6c-81f5c7132bc9.json
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bounds_top": [
|
| 3 |
+
[
|
| 4 |
+
-2.7,
|
| 5 |
+
2.6,
|
| 6 |
+
1.95
|
| 7 |
+
],
|
| 8 |
+
[
|
| 9 |
+
2.7,
|
| 10 |
+
2.6,
|
| 11 |
+
1.95
|
| 12 |
+
],
|
| 13 |
+
[
|
| 14 |
+
2.7,
|
| 15 |
+
2.6,
|
| 16 |
+
-1.95
|
| 17 |
+
],
|
| 18 |
+
[
|
| 19 |
+
-2.7,
|
| 20 |
+
2.6,
|
| 21 |
+
-1.95
|
| 22 |
+
]
|
| 23 |
+
],
|
| 24 |
+
"bounds_bottom": [
|
| 25 |
+
[
|
| 26 |
+
-2.7,
|
| 27 |
+
0.0,
|
| 28 |
+
1.95
|
| 29 |
+
],
|
| 30 |
+
[
|
| 31 |
+
2.7,
|
| 32 |
+
0.0,
|
| 33 |
+
1.95
|
| 34 |
+
],
|
| 35 |
+
[
|
| 36 |
+
2.7,
|
| 37 |
+
0.0,
|
| 38 |
+
-1.95
|
| 39 |
+
],
|
| 40 |
+
[
|
| 41 |
+
-2.7,
|
| 42 |
+
0.0,
|
| 43 |
+
-1.95
|
| 44 |
+
]
|
| 45 |
+
],
|
| 46 |
+
"room_type": "livingroom",
|
| 47 |
+
"room_id": "LivingDiningRoom-6226",
|
| 48 |
+
"objects": [
|
| 49 |
+
{
|
| 50 |
+
"desc": "A contemporary dark blue velvet three-seat sofa with a curved backrest, rounded shape, minimalist design, and multiple gray cushions for added comfort.",
|
| 51 |
+
"size": [
|
| 52 |
+
2.33,
|
| 53 |
+
0.82,
|
| 54 |
+
0.91
|
| 55 |
+
],
|
| 56 |
+
"pos": [
|
| 57 |
+
1.04,
|
| 58 |
+
0.0,
|
| 59 |
+
-1.51
|
| 60 |
+
],
|
| 61 |
+
"rot": [
|
| 62 |
+
0,
|
| 63 |
+
0,
|
| 64 |
+
0,
|
| 65 |
+
1
|
| 66 |
+
],
|
| 67 |
+
"jid": "221f31fd-1759-4c14-b183-77a0ac51b55f"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"desc": "A contemporary gray dining chair with a curved backrest and dark brown wooden legs, featuring minimalist design and comfortable padded upholstery.",
|
| 71 |
+
"size": [
|
| 72 |
+
0.68,
|
| 73 |
+
0.75,
|
| 74 |
+
0.68
|
| 75 |
+
],
|
| 76 |
+
"pos": [
|
| 77 |
+
-0.45,
|
| 78 |
+
0.0,
|
| 79 |
+
-0.43
|
| 80 |
+
],
|
| 81 |
+
"rot": [
|
| 82 |
+
0,
|
| 83 |
+
0.49227,
|
| 84 |
+
0,
|
| 85 |
+
0.87044
|
| 86 |
+
],
|
| 87 |
+
"jid": "1fd55b49-a16a-46b1-a196-e6d00b4ec179"
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"desc": "A contemporary, minimalist round coffee table with a beige upholstered body and grey metal trim.",
|
| 91 |
+
"size": [
|
| 92 |
+
0.49,
|
| 93 |
+
0.25,
|
| 94 |
+
0.49
|
| 95 |
+
],
|
| 96 |
+
"pos": [
|
| 97 |
+
1.17,
|
| 98 |
+
0.0,
|
| 99 |
+
-0.3
|
| 100 |
+
],
|
| 101 |
+
"rot": [
|
| 102 |
+
0,
|
| 103 |
+
0,
|
| 104 |
+
0,
|
| 105 |
+
1
|
| 106 |
+
],
|
| 107 |
+
"jid": "01ccb00d-2cc9-464e-a250-42adbf4eb646"
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"desc": "Elegant modern black wooden corner table with tapered legs and raised edge top featuring ornate detailing.",
|
| 111 |
+
"size": [
|
| 112 |
+
0.47,
|
| 113 |
+
0.54,
|
| 114 |
+
0.47
|
| 115 |
+
],
|
| 116 |
+
"pos": [
|
| 117 |
+
-0.47,
|
| 118 |
+
0.0,
|
| 119 |
+
-1.6
|
| 120 |
+
],
|
| 121 |
+
"rot": [
|
| 122 |
+
0,
|
| 123 |
+
0,
|
| 124 |
+
0,
|
| 125 |
+
1
|
| 126 |
+
],
|
| 127 |
+
"jid": "b5c2be8e-ab60-4cb8-bb59-abf7d26cef40"
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"desc": "Elegant modern black wooden corner table with tapered legs and raised edge top featuring ornate detailing.",
|
| 131 |
+
"size": [
|
| 132 |
+
0.47,
|
| 133 |
+
0.54,
|
| 134 |
+
0.47
|
| 135 |
+
],
|
| 136 |
+
"pos": [
|
| 137 |
+
2.46,
|
| 138 |
+
0.0,
|
| 139 |
+
-1.56
|
| 140 |
+
],
|
| 141 |
+
"rot": [
|
| 142 |
+
0,
|
| 143 |
+
0,
|
| 144 |
+
0,
|
| 145 |
+
1
|
| 146 |
+
],
|
| 147 |
+
"jid": "b5c2be8e-ab60-4cb8-bb59-abf7d26cef40"
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"desc": "Modern minimalist dining table with rectangular wooden top and striking angled metal legs, featuring clean lines and a smooth surface.",
|
| 151 |
+
"size": [
|
| 152 |
+
2.08,
|
| 153 |
+
0.91,
|
| 154 |
+
1.12
|
| 155 |
+
],
|
| 156 |
+
"pos": [
|
| 157 |
+
-2.16,
|
| 158 |
+
0.0,
|
| 159 |
+
-0.88
|
| 160 |
+
],
|
| 161 |
+
"rot": [
|
| 162 |
+
0,
|
| 163 |
+
0.70711,
|
| 164 |
+
0,
|
| 165 |
+
0.70711
|
| 166 |
+
],
|
| 167 |
+
"jid": "5ce0286c-a161-4473-82c0-febd82bc2233"
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"desc": "Modern minimalist dining chair with a curved back, two-tone fabric upholstery, thin metal legs, and accented stitching.",
|
| 171 |
+
"size": [
|
| 172 |
+
0.59,
|
| 173 |
+
0.85,
|
| 174 |
+
0.61
|
| 175 |
+
],
|
| 176 |
+
"pos": [
|
| 177 |
+
-1.48,
|
| 178 |
+
0.0,
|
| 179 |
+
-0.2
|
| 180 |
+
],
|
| 181 |
+
"rot": [
|
| 182 |
+
0,
|
| 183 |
+
0.70711,
|
| 184 |
+
0,
|
| 185 |
+
-0.70711
|
| 186 |
+
],
|
| 187 |
+
"jid": "baa3522b-8136-47e2-b401-b413906edeb0"
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"desc": "Modern minimalist dining chair with a curved back, two-tone fabric upholstery, thin metal legs, and accented stitching.",
|
| 191 |
+
"size": [
|
| 192 |
+
0.59,
|
| 193 |
+
0.85,
|
| 194 |
+
0.61
|
| 195 |
+
],
|
| 196 |
+
"pos": [
|
| 197 |
+
-1.47,
|
| 198 |
+
0.0,
|
| 199 |
+
-1.4
|
| 200 |
+
],
|
| 201 |
+
"rot": [
|
| 202 |
+
0,
|
| 203 |
+
0.70711,
|
| 204 |
+
0,
|
| 205 |
+
-0.70711
|
| 206 |
+
],
|
| 207 |
+
"jid": "baa3522b-8136-47e2-b401-b413906edeb0"
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"desc": "Modern minimalist black TV stand with a geometric patterned front, wood construction, and lacquered finish.",
|
| 211 |
+
"size": [
|
| 212 |
+
2.2,
|
| 213 |
+
0.78,
|
| 214 |
+
0.52
|
| 215 |
+
],
|
| 216 |
+
"pos": [
|
| 217 |
+
1.07,
|
| 218 |
+
0.0,
|
| 219 |
+
1.72
|
| 220 |
+
],
|
| 221 |
+
"rot": [
|
| 222 |
+
0,
|
| 223 |
+
-1,
|
| 224 |
+
0,
|
| 225 |
+
0
|
| 226 |
+
],
|
| 227 |
+
"jid": "232df588-db56-4982-a81e-ea9de8aa3606"
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"desc": "A mid-century modern dark brown wooden sideboard with slanted black handles, featuring two central doors, six drawers, and tapered legs.",
|
| 231 |
+
"size": [
|
| 232 |
+
1.59,
|
| 233 |
+
0.67,
|
| 234 |
+
0.39
|
| 235 |
+
],
|
| 236 |
+
"pos": [
|
| 237 |
+
-2.5,
|
| 238 |
+
0.0,
|
| 239 |
+
0.98
|
| 240 |
+
],
|
| 241 |
+
"rot": [
|
| 242 |
+
0,
|
| 243 |
+
0.70711,
|
| 244 |
+
0,
|
| 245 |
+
0.70711
|
| 246 |
+
],
|
| 247 |
+
"jid": "ab94bb57-2f95-4090-8eeb-f9dea82e3289"
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"desc": "Contemporary pendant lamp featuring gold and white circular disks, blending modern and minimalist design with a floating geometric aesthetic.",
|
| 251 |
+
"size": [
|
| 252 |
+
0.42,
|
| 253 |
+
1.12,
|
| 254 |
+
0.39
|
| 255 |
+
],
|
| 256 |
+
"pos": [
|
| 257 |
+
1.01,
|
| 258 |
+
1.48,
|
| 259 |
+
-0.02
|
| 260 |
+
],
|
| 261 |
+
"rot": [
|
| 262 |
+
0,
|
| 263 |
+
0,
|
| 264 |
+
0,
|
| 265 |
+
1
|
| 266 |
+
],
|
| 267 |
+
"jid": "a7621218-bd3a-48ab-9694-9f307bb4ba1e"
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"desc": "Modern gold geometric pendant lamp with crystal accents and layered design for a luxurious touch.",
|
| 271 |
+
"size": [
|
| 272 |
+
1.11,
|
| 273 |
+
1.22,
|
| 274 |
+
1.1
|
| 275 |
+
],
|
| 276 |
+
"pos": [
|
| 277 |
+
-2.12,
|
| 278 |
+
0.98,
|
| 279 |
+
-1.39
|
| 280 |
+
],
|
| 281 |
+
"rot": [
|
| 282 |
+
0,
|
| 283 |
+
0,
|
| 284 |
+
0,
|
| 285 |
+
1
|
| 286 |
+
],
|
| 287 |
+
"jid": "052065aa-8642-4969-abb0-98e429eb4360"
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"desc": "Modern gold geometric pendant lamp with crystal accents and layered design for a luxurious touch.",
|
| 291 |
+
"size": [
|
| 292 |
+
1.11,
|
| 293 |
+
1.22,
|
| 294 |
+
1.1
|
| 295 |
+
],
|
| 296 |
+
"pos": [
|
| 297 |
+
-2.12,
|
| 298 |
+
0.98,
|
| 299 |
+
-0.24
|
| 300 |
+
],
|
| 301 |
+
"rot": [
|
| 302 |
+
0,
|
| 303 |
+
0,
|
| 304 |
+
0,
|
| 305 |
+
1
|
| 306 |
+
],
|
| 307 |
+
"jid": "052065aa-8642-4969-abb0-98e429eb4360"
|
| 308 |
+
}
|
| 309 |
+
],
|
| 310 |
+
"doors": [],
|
| 311 |
+
"windows": [],
|
| 312 |
+
"floor_material": {
|
| 313 |
+
"jid": "ef6add12-4f83-4cf5-b0f3-6cb2a79f96da",
|
| 314 |
+
"material_type": "wood grain tiles",
|
| 315 |
+
"texture_path": "3D-FRONT-texture/ef6add12-4f83-4cf5-b0f3-6cb2a79f96da/texture.png"
|
| 316 |
+
}
|
| 317 |
+
}
|
eval/respace/dataset-ssr3dfront_stage2_with_doors_windows/83873897-a199-40c8-94f4-34a5f25f53ca-6a131d07-68c2-4049-8c15-4ac0454bf32e.json
ADDED
|
@@ -0,0 +1,661 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bounds_top": [
|
| 3 |
+
[
|
| 4 |
+
-5.45,
|
| 5 |
+
2.6,
|
| 6 |
+
0.1
|
| 7 |
+
],
|
| 8 |
+
[
|
| 9 |
+
0.35,
|
| 10 |
+
2.6,
|
| 11 |
+
0.1
|
| 12 |
+
],
|
| 13 |
+
[
|
| 14 |
+
0.35,
|
| 15 |
+
2.6,
|
| 16 |
+
4.1
|
| 17 |
+
],
|
| 18 |
+
[
|
| 19 |
+
2.15,
|
| 20 |
+
2.6,
|
| 21 |
+
4.1
|
| 22 |
+
],
|
| 23 |
+
[
|
| 24 |
+
2.15,
|
| 25 |
+
2.6,
|
| 26 |
+
2.8
|
| 27 |
+
],
|
| 28 |
+
[
|
| 29 |
+
1.75,
|
| 30 |
+
2.6,
|
| 31 |
+
2.8
|
| 32 |
+
],
|
| 33 |
+
[
|
| 34 |
+
1.75,
|
| 35 |
+
2.6,
|
| 36 |
+
-0.5
|
| 37 |
+
],
|
| 38 |
+
[
|
| 39 |
+
5.45,
|
| 40 |
+
2.6,
|
| 41 |
+
-0.5
|
| 42 |
+
],
|
| 43 |
+
[
|
| 44 |
+
5.45,
|
| 45 |
+
2.6,
|
| 46 |
+
-2.9
|
| 47 |
+
],
|
| 48 |
+
[
|
| 49 |
+
1.65,
|
| 50 |
+
2.6,
|
| 51 |
+
-2.9
|
| 52 |
+
],
|
| 53 |
+
[
|
| 54 |
+
1.65,
|
| 55 |
+
2.6,
|
| 56 |
+
-4.1
|
| 57 |
+
],
|
| 58 |
+
[
|
| 59 |
+
-5.45,
|
| 60 |
+
2.6,
|
| 61 |
+
-4.1
|
| 62 |
+
]
|
| 63 |
+
],
|
| 64 |
+
"bounds_bottom": [
|
| 65 |
+
[
|
| 66 |
+
-5.45,
|
| 67 |
+
0.0,
|
| 68 |
+
0.1
|
| 69 |
+
],
|
| 70 |
+
[
|
| 71 |
+
0.35,
|
| 72 |
+
0.0,
|
| 73 |
+
0.1
|
| 74 |
+
],
|
| 75 |
+
[
|
| 76 |
+
0.35,
|
| 77 |
+
0.0,
|
| 78 |
+
4.1
|
| 79 |
+
],
|
| 80 |
+
[
|
| 81 |
+
2.15,
|
| 82 |
+
0.0,
|
| 83 |
+
4.1
|
| 84 |
+
],
|
| 85 |
+
[
|
| 86 |
+
2.15,
|
| 87 |
+
0.0,
|
| 88 |
+
2.8
|
| 89 |
+
],
|
| 90 |
+
[
|
| 91 |
+
1.75,
|
| 92 |
+
0.0,
|
| 93 |
+
2.8
|
| 94 |
+
],
|
| 95 |
+
[
|
| 96 |
+
1.75,
|
| 97 |
+
0.0,
|
| 98 |
+
-0.5
|
| 99 |
+
],
|
| 100 |
+
[
|
| 101 |
+
5.45,
|
| 102 |
+
0.0,
|
| 103 |
+
-0.5
|
| 104 |
+
],
|
| 105 |
+
[
|
| 106 |
+
5.45,
|
| 107 |
+
0.0,
|
| 108 |
+
-2.9
|
| 109 |
+
],
|
| 110 |
+
[
|
| 111 |
+
1.65,
|
| 112 |
+
0.0,
|
| 113 |
+
-2.9
|
| 114 |
+
],
|
| 115 |
+
[
|
| 116 |
+
1.65,
|
| 117 |
+
0.0,
|
| 118 |
+
-4.1
|
| 119 |
+
],
|
| 120 |
+
[
|
| 121 |
+
-5.45,
|
| 122 |
+
0.0,
|
| 123 |
+
-4.1
|
| 124 |
+
]
|
| 125 |
+
],
|
| 126 |
+
"room_type": "livingroom",
|
| 127 |
+
"room_id": "LivingDiningRoom-2919",
|
| 128 |
+
"objects": [
|
| 129 |
+
{
|
| 130 |
+
"desc": "A modern minimalist armchair with black fabric upholstery, high backrest, and slender metal legs, offering sleek design and comfort.",
|
| 131 |
+
"size": [
|
| 132 |
+
0.78,
|
| 133 |
+
1.07,
|
| 134 |
+
0.97
|
| 135 |
+
],
|
| 136 |
+
"pos": [
|
| 137 |
+
-0.68,
|
| 138 |
+
0.0,
|
| 139 |
+
-1.36
|
| 140 |
+
],
|
| 141 |
+
"rot": [
|
| 142 |
+
0,
|
| 143 |
+
0.92388,
|
| 144 |
+
0,
|
| 145 |
+
-0.38268
|
| 146 |
+
],
|
| 147 |
+
"jid": "f7c601dd-90ac-485a-a9b8-8d676c87423a"
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"desc": "Modern minimalist dark purple fabric three-seat sofa with clean lines, slightly curved arms, multicolored decorative throw pillows, and tapered wooden legs.",
|
| 151 |
+
"size": [
|
| 152 |
+
2.39,
|
| 153 |
+
0.81,
|
| 154 |
+
0.85
|
| 155 |
+
],
|
| 156 |
+
"pos": [
|
| 157 |
+
-2.45,
|
| 158 |
+
0.0,
|
| 159 |
+
-0.32
|
| 160 |
+
],
|
| 161 |
+
"rot": [
|
| 162 |
+
0,
|
| 163 |
+
1,
|
| 164 |
+
0,
|
| 165 |
+
0
|
| 166 |
+
],
|
| 167 |
+
"jid": "f95c4da5-83ef-4123-af9a-3b17af255a66"
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"desc": "A modern armchair with eclectic patterns, gray fabric upholstery, and elegant tapered legs.",
|
| 171 |
+
"size": [
|
| 172 |
+
0.68,
|
| 173 |
+
0.8,
|
| 174 |
+
0.68
|
| 175 |
+
],
|
| 176 |
+
"pos": [
|
| 177 |
+
-4.21,
|
| 178 |
+
0.0,
|
| 179 |
+
-1.34
|
| 180 |
+
],
|
| 181 |
+
"rot": [
|
| 182 |
+
0,
|
| 183 |
+
0.77114,
|
| 184 |
+
0,
|
| 185 |
+
0.63666
|
| 186 |
+
],
|
| 187 |
+
"jid": "f7dce3bd-31e7-44ac-ba84-c5098c46f6fb"
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"desc": "A modern industrial corner side table with a minimalist metal three-legged base and round metallic tabletop.",
|
| 191 |
+
"size": [
|
| 192 |
+
0.44,
|
| 193 |
+
0.63,
|
| 194 |
+
0.44
|
| 195 |
+
],
|
| 196 |
+
"pos": [
|
| 197 |
+
-0.69,
|
| 198 |
+
0.0,
|
| 199 |
+
-0.37
|
| 200 |
+
],
|
| 201 |
+
"rot": [
|
| 202 |
+
0,
|
| 203 |
+
0,
|
| 204 |
+
0,
|
| 205 |
+
1
|
| 206 |
+
],
|
| 207 |
+
"jid": "da8570b9-119e-4a21-a23b-ca8b94000c30"
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"desc": "A modern industrial corner side table with a minimalist metal three-legged base and round metallic tabletop.",
|
| 211 |
+
"size": [
|
| 212 |
+
0.44,
|
| 213 |
+
0.63,
|
| 214 |
+
0.44
|
| 215 |
+
],
|
| 216 |
+
"pos": [
|
| 217 |
+
-4.19,
|
| 218 |
+
0.0,
|
| 219 |
+
-0.39
|
| 220 |
+
],
|
| 221 |
+
"rot": [
|
| 222 |
+
0,
|
| 223 |
+
0,
|
| 224 |
+
0,
|
| 225 |
+
1
|
| 226 |
+
],
|
| 227 |
+
"jid": "da8570b9-119e-4a21-a23b-ca8b94000c30"
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"desc": "A modern, black and gray rectangular coffee table with a reflective glass surface, metal frame, and decorative spherical elements, offering a minimalist and contemporary appeal.",
|
| 231 |
+
"size": [
|
| 232 |
+
1.78,
|
| 233 |
+
0.79,
|
| 234 |
+
1.21
|
| 235 |
+
],
|
| 236 |
+
"pos": [
|
| 237 |
+
-2.49,
|
| 238 |
+
0.0,
|
| 239 |
+
-1.66
|
| 240 |
+
],
|
| 241 |
+
"rot": [
|
| 242 |
+
0,
|
| 243 |
+
0,
|
| 244 |
+
0,
|
| 245 |
+
1
|
| 246 |
+
],
|
| 247 |
+
"jid": "fa892aee-16e1-4410-b8e1-1196e89e4455"
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"desc": "Contemporary geometric TV stand with white, black, and wood finish, featuring a striking contrast design and metal legs.",
|
| 251 |
+
"size": [
|
| 252 |
+
2.5,
|
| 253 |
+
0.75,
|
| 254 |
+
0.4
|
| 255 |
+
],
|
| 256 |
+
"pos": [
|
| 257 |
+
-2.69,
|
| 258 |
+
0.0,
|
| 259 |
+
-3.75
|
| 260 |
+
],
|
| 261 |
+
"rot": [
|
| 262 |
+
0,
|
| 263 |
+
0,
|
| 264 |
+
0,
|
| 265 |
+
1
|
| 266 |
+
],
|
| 267 |
+
"jid": "14a8635b-e6ef-4561-88d9-6c21f09eda27"
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"desc": "A modern minimalist floor lamp with a tall slim metal frame and a transparent glass spherical shade.",
|
| 271 |
+
"size": [
|
| 272 |
+
0.27,
|
| 273 |
+
0.8,
|
| 274 |
+
0.55
|
| 275 |
+
],
|
| 276 |
+
"pos": [
|
| 277 |
+
-4.67,
|
| 278 |
+
0.0,
|
| 279 |
+
-3.62
|
| 280 |
+
],
|
| 281 |
+
"rot": [
|
| 282 |
+
0,
|
| 283 |
+
0.30356,
|
| 284 |
+
0,
|
| 285 |
+
0.95281
|
| 286 |
+
],
|
| 287 |
+
"jid": "49508240-6ff1-4600-8f1d-333b339b58a7"
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"desc": "Modern minimalist dining table with a white marble top and a distinctive U-shaped black metal base.",
|
| 291 |
+
"size": [
|
| 292 |
+
1.95,
|
| 293 |
+
0.87,
|
| 294 |
+
0.85
|
| 295 |
+
],
|
| 296 |
+
"pos": [
|
| 297 |
+
4.09,
|
| 298 |
+
0.0,
|
| 299 |
+
-1.91
|
| 300 |
+
],
|
| 301 |
+
"rot": [
|
| 302 |
+
0,
|
| 303 |
+
0.70711,
|
| 304 |
+
0,
|
| 305 |
+
0.70711
|
| 306 |
+
],
|
| 307 |
+
"jid": "b130163c-7c15-4f9b-9294-5c81fd23e968"
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"desc": "Modern Mid-Century black leather and wood dining chair with a curved backrest and minimalist design.",
|
| 311 |
+
"size": [
|
| 312 |
+
0.55,
|
| 313 |
+
0.86,
|
| 314 |
+
0.53
|
| 315 |
+
],
|
| 316 |
+
"pos": [
|
| 317 |
+
3.19,
|
| 318 |
+
0.0,
|
| 319 |
+
-1.64
|
| 320 |
+
],
|
| 321 |
+
"rot": [
|
| 322 |
+
0,
|
| 323 |
+
0.70711,
|
| 324 |
+
0,
|
| 325 |
+
0.70711
|
| 326 |
+
],
|
| 327 |
+
"jid": "5fa11d54-53f7-4b9e-9d8f-07012eba4e90"
|
| 328 |
+
},
|
| 329 |
+
{
|
| 330 |
+
"desc": "Modern Mid-Century black leather and wood dining chair with a curved backrest and minimalist design.",
|
| 331 |
+
"size": [
|
| 332 |
+
0.55,
|
| 333 |
+
0.86,
|
| 334 |
+
0.53
|
| 335 |
+
],
|
| 336 |
+
"pos": [
|
| 337 |
+
3.35,
|
| 338 |
+
0.0,
|
| 339 |
+
-2.38
|
| 340 |
+
],
|
| 341 |
+
"rot": [
|
| 342 |
+
0,
|
| 343 |
+
0.70711,
|
| 344 |
+
0,
|
| 345 |
+
0.70711
|
| 346 |
+
],
|
| 347 |
+
"jid": "5fa11d54-53f7-4b9e-9d8f-07012eba4e90"
|
| 348 |
+
},
|
| 349 |
+
{
|
| 350 |
+
"desc": "Modern Mid-Century black leather and wood dining chair with a curved backrest and minimalist design.",
|
| 351 |
+
"size": [
|
| 352 |
+
0.55,
|
| 353 |
+
0.86,
|
| 354 |
+
0.53
|
| 355 |
+
],
|
| 356 |
+
"pos": [
|
| 357 |
+
4.76,
|
| 358 |
+
0.0,
|
| 359 |
+
-1.62
|
| 360 |
+
],
|
| 361 |
+
"rot": [
|
| 362 |
+
0,
|
| 363 |
+
0.71096,
|
| 364 |
+
0,
|
| 365 |
+
-0.70323
|
| 366 |
+
],
|
| 367 |
+
"jid": "5fa11d54-53f7-4b9e-9d8f-07012eba4e90"
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"desc": "Modern Mid-Century black leather and wood dining chair with a curved backrest and minimalist design.",
|
| 371 |
+
"size": [
|
| 372 |
+
0.55,
|
| 373 |
+
0.86,
|
| 374 |
+
0.53
|
| 375 |
+
],
|
| 376 |
+
"pos": [
|
| 377 |
+
4.76,
|
| 378 |
+
0.0,
|
| 379 |
+
-2.42
|
| 380 |
+
],
|
| 381 |
+
"rot": [
|
| 382 |
+
0,
|
| 383 |
+
0.71096,
|
| 384 |
+
0,
|
| 385 |
+
-0.70323
|
| 386 |
+
],
|
| 387 |
+
"jid": "5fa11d54-53f7-4b9e-9d8f-07012eba4e90"
|
| 388 |
+
},
|
| 389 |
+
{
|
| 390 |
+
"desc": "Modern Oriental wine cabinet with marble facade, gray wooden shelves and cabinets, and a central circular display featuring an artistic backdrop.",
|
| 391 |
+
"size": [
|
| 392 |
+
3.5,
|
| 393 |
+
2.63,
|
| 394 |
+
0.32
|
| 395 |
+
],
|
| 396 |
+
"pos": [
|
| 397 |
+
2.02,
|
| 398 |
+
0.0,
|
| 399 |
+
-2.71
|
| 400 |
+
],
|
| 401 |
+
"rot": [
|
| 402 |
+
0,
|
| 403 |
+
0,
|
| 404 |
+
0,
|
| 405 |
+
1
|
| 406 |
+
],
|
| 407 |
+
"jid": "53f1bd2c-1738-42b2-a19f-fa112f09703f"
|
| 408 |
+
},
|
| 409 |
+
{
|
| 410 |
+
"desc": "Modern minimalist potted plant with large green leaves in a white round ceramic pot.",
|
| 411 |
+
"size": [
|
| 412 |
+
0.68,
|
| 413 |
+
0.97,
|
| 414 |
+
0.73
|
| 415 |
+
],
|
| 416 |
+
"pos": [
|
| 417 |
+
0.19,
|
| 418 |
+
0.0,
|
| 419 |
+
-3.65
|
| 420 |
+
],
|
| 421 |
+
"rot": [
|
| 422 |
+
0,
|
| 423 |
+
0,
|
| 424 |
+
0,
|
| 425 |
+
1
|
| 426 |
+
],
|
| 427 |
+
"jid": "6d4046a6-e65c-4e8c-aaaa-8a59bb494aa3"
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"desc": "A modern geometric pendant lamp with a stacked rectangular metal design in bronze brown, featuring clean lines and suspended elements.",
|
| 431 |
+
"size": [
|
| 432 |
+
1.24,
|
| 433 |
+
1.33,
|
| 434 |
+
0.23
|
| 435 |
+
],
|
| 436 |
+
"pos": [
|
| 437 |
+
-2.47,
|
| 438 |
+
1.26,
|
| 439 |
+
-1.65
|
| 440 |
+
],
|
| 441 |
+
"rot": [
|
| 442 |
+
0,
|
| 443 |
+
0,
|
| 444 |
+
0,
|
| 445 |
+
1
|
| 446 |
+
],
|
| 447 |
+
"jid": "8693a47d-21a4-4b98-a1b4-5e901f379b8f"
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"desc": "A modern industrial pendant lamp featuring a circular brown metal frame with clear glass tube accents and central light bulbs, exemplifying geometric and contemporary design.",
|
| 451 |
+
"size": [
|
| 452 |
+
0.84,
|
| 453 |
+
1.03,
|
| 454 |
+
0.84
|
| 455 |
+
],
|
| 456 |
+
"pos": [
|
| 457 |
+
3.98,
|
| 458 |
+
1.77,
|
| 459 |
+
-2.08
|
| 460 |
+
],
|
| 461 |
+
"rot": [
|
| 462 |
+
0,
|
| 463 |
+
0,
|
| 464 |
+
0,
|
| 465 |
+
1
|
| 466 |
+
],
|
| 467 |
+
"jid": "60d66fea-b63a-43f0-b85c-748a3adc0391"
|
| 468 |
+
}
|
| 469 |
+
],
|
| 470 |
+
"doors": [
|
| 471 |
+
{
|
| 472 |
+
"type": "door",
|
| 473 |
+
"pos": [
|
| 474 |
+
1.0,
|
| 475 |
+
1.05,
|
| 476 |
+
4.21
|
| 477 |
+
],
|
| 478 |
+
"size": [
|
| 479 |
+
0.9,
|
| 480 |
+
2.1,
|
| 481 |
+
0.24
|
| 482 |
+
],
|
| 483 |
+
"uid": "41d2ccb8-ed37-47ef-aaff-870dae4e391f/24585991"
|
| 484 |
+
},
|
| 485 |
+
{
|
| 486 |
+
"type": "door",
|
| 487 |
+
"pos": [
|
| 488 |
+
1.0,
|
| 489 |
+
0.0,
|
| 490 |
+
4.21
|
| 491 |
+
],
|
| 492 |
+
"size": [
|
| 493 |
+
0.9,
|
| 494 |
+
0.0,
|
| 495 |
+
0.24
|
| 496 |
+
],
|
| 497 |
+
"uid": "41d2ccb8-ed37-47ef-aaff-870dae4e391f/24585994"
|
| 498 |
+
},
|
| 499 |
+
{
|
| 500 |
+
"type": "door",
|
| 501 |
+
"pos": [
|
| 502 |
+
3.52,
|
| 503 |
+
1.05,
|
| 504 |
+
-0.48
|
| 505 |
+
],
|
| 506 |
+
"size": [
|
| 507 |
+
1.5,
|
| 508 |
+
2.1,
|
| 509 |
+
0.12
|
| 510 |
+
],
|
| 511 |
+
"uid": "68bd856b-14e4-4098-ade3-e37eb23428b8/24585999"
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"type": "door",
|
| 515 |
+
"pos": [
|
| 516 |
+
3.52,
|
| 517 |
+
0.0,
|
| 518 |
+
-0.48
|
| 519 |
+
],
|
| 520 |
+
"size": [
|
| 521 |
+
1.5,
|
| 522 |
+
0.0,
|
| 523 |
+
0.12
|
| 524 |
+
],
|
| 525 |
+
"uid": "68bd856b-14e4-4098-ade3-e37eb23428b8/24586002"
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"type": "door",
|
| 529 |
+
"pos": [
|
| 530 |
+
1.78,
|
| 531 |
+
1.05,
|
| 532 |
+
2.06
|
| 533 |
+
],
|
| 534 |
+
"size": [
|
| 535 |
+
0.12,
|
| 536 |
+
2.1,
|
| 537 |
+
0.8
|
| 538 |
+
],
|
| 539 |
+
"uid": "29929fc2-3a6e-4eec-bfc1-6c6c7e241093/24586141"
|
| 540 |
+
},
|
| 541 |
+
{
|
| 542 |
+
"type": "door",
|
| 543 |
+
"pos": [
|
| 544 |
+
1.78,
|
| 545 |
+
0.0,
|
| 546 |
+
2.06
|
| 547 |
+
],
|
| 548 |
+
"size": [
|
| 549 |
+
0.12,
|
| 550 |
+
0.0,
|
| 551 |
+
0.8
|
| 552 |
+
],
|
| 553 |
+
"uid": "29929fc2-3a6e-4eec-bfc1-6c6c7e241093/24586144"
|
| 554 |
+
},
|
| 555 |
+
{
|
| 556 |
+
"type": "door",
|
| 557 |
+
"pos": [
|
| 558 |
+
1.78,
|
| 559 |
+
1.05,
|
| 560 |
+
-3.52
|
| 561 |
+
],
|
| 562 |
+
"size": [
|
| 563 |
+
0.24,
|
| 564 |
+
2.1,
|
| 565 |
+
0.8
|
| 566 |
+
],
|
| 567 |
+
"uid": "e94464ee-7720-4384-bf97-5bc1623c9397/24586283"
|
| 568 |
+
},
|
| 569 |
+
{
|
| 570 |
+
"type": "door",
|
| 571 |
+
"pos": [
|
| 572 |
+
1.78,
|
| 573 |
+
0.0,
|
| 574 |
+
-3.52
|
| 575 |
+
],
|
| 576 |
+
"size": [
|
| 577 |
+
0.24,
|
| 578 |
+
0.0,
|
| 579 |
+
0.8
|
| 580 |
+
],
|
| 581 |
+
"uid": "e94464ee-7720-4384-bf97-5bc1623c9397/24586286"
|
| 582 |
+
},
|
| 583 |
+
{
|
| 584 |
+
"type": "door",
|
| 585 |
+
"pos": [
|
| 586 |
+
1.18,
|
| 587 |
+
1.05,
|
| 588 |
+
-4.12
|
| 589 |
+
],
|
| 590 |
+
"size": [
|
| 591 |
+
0.8,
|
| 592 |
+
2.1,
|
| 593 |
+
0.12
|
| 594 |
+
],
|
| 595 |
+
"uid": "8e27ff27-661c-4f99-a319-cf299557ae4c/24586425"
|
| 596 |
+
},
|
| 597 |
+
{
|
| 598 |
+
"type": "door",
|
| 599 |
+
"pos": [
|
| 600 |
+
1.18,
|
| 601 |
+
0.0,
|
| 602 |
+
-4.12
|
| 603 |
+
],
|
| 604 |
+
"size": [
|
| 605 |
+
0.8,
|
| 606 |
+
0.0,
|
| 607 |
+
0.12
|
| 608 |
+
],
|
| 609 |
+
"uid": "8e27ff27-661c-4f99-a319-cf299557ae4c/24586428"
|
| 610 |
+
},
|
| 611 |
+
{
|
| 612 |
+
"type": "door",
|
| 613 |
+
"pos": [
|
| 614 |
+
-0.8,
|
| 615 |
+
1.05,
|
| 616 |
+
-4.12
|
| 617 |
+
],
|
| 618 |
+
"size": [
|
| 619 |
+
0.8,
|
| 620 |
+
2.1,
|
| 621 |
+
0.12
|
| 622 |
+
],
|
| 623 |
+
"uid": "3a648053-81ac-4a5e-8ab1-f5b9092c2ff2/24586567"
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "door",
|
| 627 |
+
"pos": [
|
| 628 |
+
-0.8,
|
| 629 |
+
0.0,
|
| 630 |
+
-4.12
|
| 631 |
+
],
|
| 632 |
+
"size": [
|
| 633 |
+
0.8,
|
| 634 |
+
0.0,
|
| 635 |
+
0.12
|
| 636 |
+
],
|
| 637 |
+
"uid": "3a648053-81ac-4a5e-8ab1-f5b9092c2ff2/24586570"
|
| 638 |
+
}
|
| 639 |
+
],
|
| 640 |
+
"windows": [
|
| 641 |
+
{
|
| 642 |
+
"type": "window",
|
| 643 |
+
"pos": [
|
| 644 |
+
5.53,
|
| 645 |
+
1.53,
|
| 646 |
+
-1.73
|
| 647 |
+
],
|
| 648 |
+
"size": [
|
| 649 |
+
0.24,
|
| 650 |
+
1.36,
|
| 651 |
+
1.26
|
| 652 |
+
],
|
| 653 |
+
"uid": "8f40127a-30e3-4fcf-be33-3cef56fbe309/24585469"
|
| 654 |
+
}
|
| 655 |
+
],
|
| 656 |
+
"floor_material": {
|
| 657 |
+
"jid": "a1543df0-d183-45bc-9814-9bf463f7caed",
|
| 658 |
+
"material_type": "flooring - reinforced",
|
| 659 |
+
"texture_path": "3D-FRONT-texture/a1543df0-d183-45bc-9814-9bf463f7caed/texture.png"
|
| 660 |
+
}
|
| 661 |
+
}
|
src/verl/recipe/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (155 Bytes). View file
|
|
|
src/verl/recipe/entropy/32b_kl_cov.sh
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -xeuo pipefail
|
| 3 |
+
|
| 4 |
+
export WANDB_API_KEY=YOUR_WANDB_API_KEY
|
| 5 |
+
# export VLLM_USE_V1=1
|
| 6 |
+
|
| 7 |
+
project_name='Qwen2.5-32B'
|
| 8 |
+
exp_name='klcov'
|
| 9 |
+
|
| 10 |
+
adv_estimator=grpo
|
| 11 |
+
|
| 12 |
+
use_kl_in_reward=False
|
| 13 |
+
kl_coef=0.0
|
| 14 |
+
use_kl_loss=False
|
| 15 |
+
kl_loss_coef=0.0
|
| 16 |
+
|
| 17 |
+
clip_ratio_low=0.2
|
| 18 |
+
clip_ratio_high=0.2
|
| 19 |
+
|
| 20 |
+
max_prompt_length=$((1024 * 2))
|
| 21 |
+
max_response_length=$((1024 * 8))
|
| 22 |
+
enable_overlong_buffer=False
|
| 23 |
+
overlong_buffer_len=$((1024 * 2))
|
| 24 |
+
overlong_penalty_factor=1.0
|
| 25 |
+
|
| 26 |
+
loss_agg_mode="token-mean"
|
| 27 |
+
loss_mode="kl_cov"
|
| 28 |
+
enable_filter_groups=True
|
| 29 |
+
filter_groups_metric=acc
|
| 30 |
+
max_num_gen_batches=10
|
| 31 |
+
train_prompt_bsz=256
|
| 32 |
+
gen_prompt_bsz=$((train_prompt_bsz * 3))
|
| 33 |
+
train_prompt_mini_bsz=32
|
| 34 |
+
n_resp_per_prompt=8
|
| 35 |
+
max_token=20480
|
| 36 |
+
|
| 37 |
+
# Ray
|
| 38 |
+
RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"}
|
| 39 |
+
WORKING_DIR=${WORKING_DIR:-"${PWD}"}
|
| 40 |
+
RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"}
|
| 41 |
+
NNODES=${NNODES:-4}
|
| 42 |
+
# Paths
|
| 43 |
+
RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"}
|
| 44 |
+
MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"}
|
| 45 |
+
CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"}
|
| 46 |
+
TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"}
|
| 47 |
+
TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]}
|
| 48 |
+
|
| 49 |
+
# Algorithm
|
| 50 |
+
temperature=1.0
|
| 51 |
+
top_p=1.0
|
| 52 |
+
top_k=-1 # 0 for HF rollout, -1 for vLLM rollout
|
| 53 |
+
ppo_kl_coef=1
|
| 54 |
+
kl_cov_ratio=0.0002
|
| 55 |
+
|
| 56 |
+
# Mathematically equivalent
|
| 57 |
+
use_dynamic_bsz=True
|
| 58 |
+
infer_micro_batch_size=null
|
| 59 |
+
train_micro_batch_size=null
|
| 60 |
+
offload=False
|
| 61 |
+
|
| 62 |
+
HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \
|
| 63 |
+
data.train_files="${TRAIN_FILE}" \
|
| 64 |
+
data.val_files="${TEST_FILE}" \
|
| 65 |
+
data.prompt_key=prompt \
|
| 66 |
+
data.truncation='left' \
|
| 67 |
+
data.filter_overlong_prompts=False \
|
| 68 |
+
data.max_prompt_length=${max_prompt_length} \
|
| 69 |
+
data.max_response_length=${max_response_length} \
|
| 70 |
+
data.gen_batch_size=${gen_prompt_bsz} \
|
| 71 |
+
data.train_batch_size=${train_prompt_bsz} \
|
| 72 |
+
data.return_raw_chat=True \
|
| 73 |
+
actor_rollout_ref.rollout.n=${n_resp_per_prompt} \
|
| 74 |
+
actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \
|
| 75 |
+
actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \
|
| 76 |
+
actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \
|
| 77 |
+
actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \
|
| 78 |
+
actor_rollout_ref.actor.clip_ratio_c=10.0 \
|
| 79 |
+
actor_rollout_ref.actor.loss_mode=${loss_mode} \
|
| 80 |
+
actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \
|
| 81 |
+
actor_rollout_ref.actor.policy_loss.kl_cov_ratio=${kl_cov_ratio} \
|
| 82 |
+
actor_rollout_ref.actor.policy_loss.ppo_kl_coef=${ppo_kl_coef} \
|
| 83 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \
|
| 84 |
+
actor_rollout_ref.rollout.mode=sync \
|
| 85 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 86 |
+
algorithm.adv_estimator=${adv_estimator} \
|
| 87 |
+
algorithm.use_kl_in_reward=${use_kl_in_reward} \
|
| 88 |
+
algorithm.kl_ctrl.kl_coef=${kl_coef} \
|
| 89 |
+
algorithm.filter_groups.enable=${enable_filter_groups} \
|
| 90 |
+
algorithm.filter_groups.metric=${filter_groups_metric} \
|
| 91 |
+
algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \
|
| 92 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 93 |
+
actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \
|
| 94 |
+
actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \
|
| 95 |
+
actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \
|
| 96 |
+
actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \
|
| 97 |
+
actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \
|
| 98 |
+
actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \
|
| 99 |
+
actor_rollout_ref.model.path="${MODEL_PATH}" \
|
| 100 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 101 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 102 |
+
actor_rollout_ref.actor.optim.weight_decay=0 \
|
| 103 |
+
actor_rollout_ref.actor.optim.warmup_style=constant \
|
| 104 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \
|
| 105 |
+
actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \
|
| 106 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \
|
| 107 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \
|
| 108 |
+
actor_rollout_ref.actor.entropy_coeff=0 \
|
| 109 |
+
actor_rollout_ref.actor.grad_clip=1.0 \
|
| 110 |
+
actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \
|
| 111 |
+
actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \
|
| 112 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \
|
| 113 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \
|
| 114 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 115 |
+
actor_rollout_ref.rollout.enable_chunked_prefill=True \
|
| 116 |
+
actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \
|
| 117 |
+
actor_rollout_ref.rollout.temperature=${temperature} \
|
| 118 |
+
actor_rollout_ref.rollout.top_p=${top_p} \
|
| 119 |
+
actor_rollout_ref.rollout.top_k="${top_k}" \
|
| 120 |
+
actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \
|
| 121 |
+
actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \
|
| 122 |
+
actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \
|
| 123 |
+
actor_rollout_ref.rollout.val_kwargs.do_sample=False \
|
| 124 |
+
actor_rollout_ref.rollout.val_kwargs.n=1 \
|
| 125 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \
|
| 126 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \
|
| 127 |
+
actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \
|
| 128 |
+
actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \
|
| 129 |
+
reward_model.reward_manager=dapo \
|
| 130 |
+
reward_model.overlong_buffer.enable=${enable_overlong_buffer} \
|
| 131 |
+
reward_model.overlong_buffer.len=${overlong_buffer_len} \
|
| 132 |
+
reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \
|
| 133 |
+
trainer.logger='["console","wandb"]' \
|
| 134 |
+
trainer.project_name="${project_name}" \
|
| 135 |
+
trainer.experiment_name="${exp_name}" \
|
| 136 |
+
trainer.n_gpus_per_node=8 \
|
| 137 |
+
trainer.nnodes="${NNODES}" \
|
| 138 |
+
trainer.val_before_train=False \
|
| 139 |
+
trainer.test_freq=4 \
|
| 140 |
+
trainer.save_freq=32 \
|
| 141 |
+
trainer.total_epochs=1000 \
|
| 142 |
+
trainer.default_local_dir="${CKPTS_DIR}" \
|
| 143 |
+
trainer.resume_mode=disable
|
src/verl/recipe/entropy/32b_kl_cov_mininbsz.sh
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -xeuo pipefail
|
| 3 |
+
|
| 4 |
+
export WANDB_API_KEY=YOUR_WANDB_API_KEY
|
| 5 |
+
# export VLLM_USE_V1=1
|
| 6 |
+
|
| 7 |
+
project_name='Qwen2.5-32B'
|
| 8 |
+
exp_name='klcov'
|
| 9 |
+
|
| 10 |
+
adv_estimator=grpo
|
| 11 |
+
|
| 12 |
+
use_kl_in_reward=False
|
| 13 |
+
kl_coef=0.0
|
| 14 |
+
use_kl_loss=False
|
| 15 |
+
kl_loss_coef=0.0
|
| 16 |
+
|
| 17 |
+
clip_ratio_low=0.2
|
| 18 |
+
clip_ratio_high=0.2
|
| 19 |
+
|
| 20 |
+
max_prompt_length=$((1024 * 2))
|
| 21 |
+
max_response_length=$((1024 * 8))
|
| 22 |
+
enable_overlong_buffer=False
|
| 23 |
+
overlong_buffer_len=$((1024 * 2))
|
| 24 |
+
overlong_penalty_factor=1.0
|
| 25 |
+
|
| 26 |
+
loss_agg_mode="token-mean"
|
| 27 |
+
loss_mode="kl_cov"
|
| 28 |
+
enable_filter_groups=True
|
| 29 |
+
filter_groups_metric=acc
|
| 30 |
+
max_num_gen_batches=10
|
| 31 |
+
train_prompt_bsz=256
|
| 32 |
+
gen_prompt_bsz=$((train_prompt_bsz * 3))
|
| 33 |
+
train_prompt_mini_bsz=16
|
| 34 |
+
n_resp_per_prompt=8
|
| 35 |
+
max_token=20480
|
| 36 |
+
|
| 37 |
+
# Ray
|
| 38 |
+
RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"}
|
| 39 |
+
WORKING_DIR=${WORKING_DIR:-"${PWD}"}
|
| 40 |
+
RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"}
|
| 41 |
+
NNODES=${NNODES:-4}
|
| 42 |
+
# Paths
|
| 43 |
+
RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"}
|
| 44 |
+
MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"}
|
| 45 |
+
CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"}
|
| 46 |
+
TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"}
|
| 47 |
+
TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]}
|
| 48 |
+
|
| 49 |
+
# Algorithm
|
| 50 |
+
temperature=1.0
|
| 51 |
+
top_p=1.0
|
| 52 |
+
top_k=-1 # 0 for HF rollout, -1 for vLLM rollout
|
| 53 |
+
ppo_kl_coef=1
|
| 54 |
+
kl_cov_ratio=0.0002
|
| 55 |
+
|
| 56 |
+
# Mathematically equivalent
|
| 57 |
+
use_dynamic_bsz=True
|
| 58 |
+
infer_micro_batch_size=null
|
| 59 |
+
train_micro_batch_size=null
|
| 60 |
+
offload=False
|
| 61 |
+
|
| 62 |
+
HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \
|
| 63 |
+
data.train_files="${TRAIN_FILE}" \
|
| 64 |
+
data.val_files="${TEST_FILE}" \
|
| 65 |
+
data.prompt_key=prompt \
|
| 66 |
+
data.truncation='left' \
|
| 67 |
+
data.filter_overlong_prompts=False \
|
| 68 |
+
data.max_prompt_length=${max_prompt_length} \
|
| 69 |
+
data.max_response_length=${max_response_length} \
|
| 70 |
+
data.gen_batch_size=${gen_prompt_bsz} \
|
| 71 |
+
data.train_batch_size=${train_prompt_bsz} \
|
| 72 |
+
data.return_raw_chat=True \
|
| 73 |
+
actor_rollout_ref.rollout.n=${n_resp_per_prompt} \
|
| 74 |
+
actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \
|
| 75 |
+
actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \
|
| 76 |
+
actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \
|
| 77 |
+
actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \
|
| 78 |
+
actor_rollout_ref.actor.clip_ratio_c=10.0 \
|
| 79 |
+
actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \
|
| 80 |
+
actor_rollout_ref.actor.policy_loss.kl_cov_ratio=${kl_cov_ratio} \
|
| 81 |
+
actor_rollout_ref.actor.policy_loss.ppo_kl_coef=${ppo_kl_coef} \
|
| 82 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \
|
| 83 |
+
actor_rollout_ref.rollout.mode=sync \
|
| 84 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 85 |
+
algorithm.adv_estimator=${adv_estimator} \
|
| 86 |
+
algorithm.use_kl_in_reward=${use_kl_in_reward} \
|
| 87 |
+
algorithm.kl_ctrl.kl_coef=${kl_coef} \
|
| 88 |
+
algorithm.filter_groups.enable=${enable_filter_groups} \
|
| 89 |
+
algorithm.filter_groups.metric=${filter_groups_metric} \
|
| 90 |
+
algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \
|
| 91 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 92 |
+
actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \
|
| 93 |
+
actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \
|
| 94 |
+
actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \
|
| 95 |
+
actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \
|
| 96 |
+
actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \
|
| 97 |
+
actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \
|
| 98 |
+
actor_rollout_ref.model.path="${MODEL_PATH}" \
|
| 99 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 100 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 101 |
+
actor_rollout_ref.actor.optim.weight_decay=0 \
|
| 102 |
+
actor_rollout_ref.actor.optim.warmup_style=constant \
|
| 103 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \
|
| 104 |
+
actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \
|
| 105 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \
|
| 106 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \
|
| 107 |
+
actor_rollout_ref.actor.entropy_coeff=0 \
|
| 108 |
+
actor_rollout_ref.actor.grad_clip=1.0 \
|
| 109 |
+
actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \
|
| 110 |
+
actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \
|
| 111 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \
|
| 112 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \
|
| 113 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 114 |
+
actor_rollout_ref.rollout.enable_chunked_prefill=True \
|
| 115 |
+
actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \
|
| 116 |
+
actor_rollout_ref.rollout.temperature=${temperature} \
|
| 117 |
+
actor_rollout_ref.rollout.top_p=${top_p} \
|
| 118 |
+
actor_rollout_ref.rollout.top_k="${top_k}" \
|
| 119 |
+
actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \
|
| 120 |
+
actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \
|
| 121 |
+
actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \
|
| 122 |
+
actor_rollout_ref.rollout.val_kwargs.do_sample=False \
|
| 123 |
+
actor_rollout_ref.rollout.val_kwargs.n=1 \
|
| 124 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \
|
| 125 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \
|
| 126 |
+
actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \
|
| 127 |
+
actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \
|
| 128 |
+
reward_model.reward_manager=dapo \
|
| 129 |
+
reward_model.overlong_buffer.enable=${enable_overlong_buffer} \
|
| 130 |
+
reward_model.overlong_buffer.len=${overlong_buffer_len} \
|
| 131 |
+
reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \
|
| 132 |
+
trainer.logger='["console","wandb"]' \
|
| 133 |
+
trainer.project_name="${project_name}" \
|
| 134 |
+
trainer.experiment_name="${exp_name}" \
|
| 135 |
+
trainer.n_gpus_per_node=8 \
|
| 136 |
+
trainer.nnodes="${NNODES}" \
|
| 137 |
+
trainer.val_before_train=False \
|
| 138 |
+
trainer.test_freq=4 \
|
| 139 |
+
trainer.save_freq=32 \
|
| 140 |
+
trainer.total_epochs=1000 \
|
| 141 |
+
trainer.default_local_dir="${CKPTS_DIR}" \
|
| 142 |
+
trainer.resume_mode=disable
|
src/verl/recipe/entropy/7b_clip_cov.sh
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -xeuo pipefail
|
| 3 |
+
|
| 4 |
+
export WANDB_API_KEY=YOUR_WANDB_API_KEY
|
| 5 |
+
# export VLLM_USE_V1=1
|
| 6 |
+
|
| 7 |
+
project_name='Qwen2.5-7B'
|
| 8 |
+
exp_name='clipcov'
|
| 9 |
+
|
| 10 |
+
adv_estimator=grpo
|
| 11 |
+
|
| 12 |
+
use_kl_in_reward=False
|
| 13 |
+
kl_coef=0.0
|
| 14 |
+
use_kl_loss=False
|
| 15 |
+
kl_loss_coef=0.0
|
| 16 |
+
|
| 17 |
+
clip_ratio_low=1
|
| 18 |
+
clip_ratio_high=1
|
| 19 |
+
clip_cov_ratio=0.0002
|
| 20 |
+
clip_cov_lb=1.0
|
| 21 |
+
clip_cov_ub=5.0
|
| 22 |
+
|
| 23 |
+
max_prompt_length=$((1024 * 2))
|
| 24 |
+
max_response_length=$((1024 * 8))
|
| 25 |
+
enable_overlong_buffer=False
|
| 26 |
+
overlong_buffer_len=$((1024 * 2))
|
| 27 |
+
overlong_penalty_factor=1.0
|
| 28 |
+
|
| 29 |
+
loss_agg_mode="token-mean"
|
| 30 |
+
loss_mode="clip_cov"
|
| 31 |
+
enable_filter_groups=True
|
| 32 |
+
filter_groups_metric=acc
|
| 33 |
+
max_num_gen_batches=10
|
| 34 |
+
train_prompt_bsz=256
|
| 35 |
+
gen_prompt_bsz=$((train_prompt_bsz * 3))
|
| 36 |
+
train_prompt_mini_bsz=32
|
| 37 |
+
n_resp_per_prompt=8
|
| 38 |
+
max_token=30720
|
| 39 |
+
|
| 40 |
+
# Ray
|
| 41 |
+
RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"}
|
| 42 |
+
WORKING_DIR=${WORKING_DIR:-"${PWD}"}
|
| 43 |
+
RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"}
|
| 44 |
+
NNODES=${NNODES:-4}
|
| 45 |
+
# Paths
|
| 46 |
+
RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"}
|
| 47 |
+
MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"}
|
| 48 |
+
CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"}
|
| 49 |
+
TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"}
|
| 50 |
+
TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]}
|
| 51 |
+
|
| 52 |
+
# Algorithm
|
| 53 |
+
temperature=1.0
|
| 54 |
+
top_p=1.0
|
| 55 |
+
top_k=-1 # 0 for HF rollout, -1 for vLLM rollout
|
| 56 |
+
ppo_kl_coef=1
|
| 57 |
+
kl_cov_ratio=0.2
|
| 58 |
+
|
| 59 |
+
# Mathematically equivalent
|
| 60 |
+
use_dynamic_bsz=True
|
| 61 |
+
infer_micro_batch_size=null
|
| 62 |
+
train_micro_batch_size=null
|
| 63 |
+
offload=False
|
| 64 |
+
|
| 65 |
+
HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \
|
| 66 |
+
data.train_files="${TRAIN_FILE}" \
|
| 67 |
+
data.val_files="${TEST_FILE}" \
|
| 68 |
+
data.prompt_key=prompt \
|
| 69 |
+
data.truncation='left' \
|
| 70 |
+
data.filter_overlong_prompts=False \
|
| 71 |
+
data.max_prompt_length=${max_prompt_length} \
|
| 72 |
+
data.max_response_length=${max_response_length} \
|
| 73 |
+
data.gen_batch_size=${gen_prompt_bsz} \
|
| 74 |
+
data.train_batch_size=${train_prompt_bsz} \
|
| 75 |
+
data.return_raw_chat=True \
|
| 76 |
+
actor_rollout_ref.rollout.n=${n_resp_per_prompt} \
|
| 77 |
+
actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \
|
| 78 |
+
actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \
|
| 79 |
+
actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \
|
| 80 |
+
actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \
|
| 81 |
+
actor_rollout_ref.actor.clip_ratio_c=10.0 \
|
| 82 |
+
actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \
|
| 83 |
+
actor_rollout_ref.actor.policy_loss.clip_cov_ratio=${clip_cov_ratio} \
|
| 84 |
+
actor_rollout_ref.actor.policy_loss.clip_cov_lb=${clip_cov_lb} \
|
| 85 |
+
actor_rollout_ref.actor.policy_loss.clip_cov_ub=${clip_cov_ub} \
|
| 86 |
+
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \
|
| 87 |
+
actor_rollout_ref.rollout.mode=sync \
|
| 88 |
+
actor_rollout_ref.rollout.name=vllm \
|
| 89 |
+
algorithm.adv_estimator=${adv_estimator} \
|
| 90 |
+
algorithm.use_kl_in_reward=${use_kl_in_reward} \
|
| 91 |
+
algorithm.kl_ctrl.kl_coef=${kl_coef} \
|
| 92 |
+
algorithm.filter_groups.enable=${enable_filter_groups} \
|
| 93 |
+
algorithm.filter_groups.metric=${filter_groups_metric} \
|
| 94 |
+
algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \
|
| 95 |
+
actor_rollout_ref.model.use_remove_padding=True \
|
| 96 |
+
actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \
|
| 97 |
+
actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \
|
| 98 |
+
actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \
|
| 99 |
+
actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \
|
| 100 |
+
actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \
|
| 101 |
+
actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \
|
| 102 |
+
actor_rollout_ref.model.path="${MODEL_PATH}" \
|
| 103 |
+
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
| 104 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 105 |
+
actor_rollout_ref.actor.optim.weight_decay=0 \
|
| 106 |
+
actor_rollout_ref.actor.optim.warmup_style=constant \
|
| 107 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \
|
| 108 |
+
actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \
|
| 109 |
+
actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \
|
| 110 |
+
actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \
|
| 111 |
+
actor_rollout_ref.actor.entropy_coeff=0 \
|
| 112 |
+
actor_rollout_ref.actor.grad_clip=1.0 \
|
| 113 |
+
actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \
|
| 114 |
+
actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \
|
| 115 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \
|
| 116 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \
|
| 117 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
|
| 118 |
+
actor_rollout_ref.rollout.enable_chunked_prefill=True \
|
| 119 |
+
actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \
|
| 120 |
+
actor_rollout_ref.rollout.temperature=${temperature} \
|
| 121 |
+
actor_rollout_ref.rollout.top_p=${top_p} \
|
| 122 |
+
actor_rollout_ref.rollout.top_k="${top_k}" \
|
| 123 |
+
actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \
|
| 124 |
+
actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \
|
| 125 |
+
actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \
|
| 126 |
+
actor_rollout_ref.rollout.val_kwargs.do_sample=False \
|
| 127 |
+
actor_rollout_ref.rollout.val_kwargs.n=1 \
|
| 128 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \
|
| 129 |
+
actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \
|
| 130 |
+
actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \
|
| 131 |
+
actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \
|
| 132 |
+
reward_model.reward_manager=dapo \
|
| 133 |
+
reward_model.overlong_buffer.enable=${enable_overlong_buffer} \
|
| 134 |
+
reward_model.overlong_buffer.len=${overlong_buffer_len} \
|
| 135 |
+
reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \
|
| 136 |
+
trainer.logger='["console","wandb"]' \
|
| 137 |
+
trainer.project_name="${project_name}" \
|
| 138 |
+
trainer.experiment_name="${exp_name}" \
|
| 139 |
+
trainer.n_gpus_per_node=8 \
|
| 140 |
+
trainer.nnodes="${NNODES}" \
|
| 141 |
+
trainer.val_before_train=False \
|
| 142 |
+
trainer.test_freq=4 \
|
| 143 |
+
trainer.save_freq=32 \
|
| 144 |
+
trainer.total_epochs=1000 \
|
| 145 |
+
trainer.default_local_dir="${CKPTS_DIR}" \
|
| 146 |
+
trainer.resume_mode=disable
|
src/verl/recipe/entropy/README.md
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
|
| 3 |
+
# The Entropy Mechanism of Reinforcement Learning for Large Language Model Reasoning.
|
| 4 |
+
|
| 5 |
+
[](https://arxiv.org/pdf/2505.22617) [](https://github.com/PRIME-RL/Entropy-Mechanism-of-RL) [](https://www.alphaxiv.org/abs/2505.22617) [](https://x.com/stingning/status/1928088554166505667) [](https://x.com/charlesfornlp/status/1928089451080585283) [](https://x.com/_akhaliq/status/1928077929105268861)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
<div align="center" style="font-family: Arial, sans-serif;">
|
| 10 |
+
<p>
|
| 11 |
+
<a href="#🎉news" style="text-decoration: none; font-weight: bold;">🎉 News</a> •
|
| 12 |
+
<a href="#✨getting-started" style="text-decoration: none; font-weight: bold;">✨ Getting Started</a> •
|
| 13 |
+
<a href="#📖introduction" style="text-decoration: none; font-weight: bold;">📖 Introduction</a>
|
| 14 |
+
</p>
|
| 15 |
+
<p>
|
| 16 |
+
<a href="#🎈citation" style="text-decoration: none; font-weight: bold;">🎈 Citation</a> •
|
| 17 |
+
<a href="#🌻acknowledgement" style="text-decoration: none; font-weight: bold;">🌻 Acknowledgement</a> •
|
| 18 |
+
<a href="#📬Contact" style="text-decoration: none; font-weight: bold;">📬 Contact</a> •
|
| 19 |
+
<a href="#📈star-history" style="text-decoration: none; font-weight: bold;">📈 Star History</a>
|
| 20 |
+
</p>
|
| 21 |
+
</div>
|
| 22 |
+
|
| 23 |
+
</div>
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# 🎉News
|
| 27 |
+
|
| 28 |
+
- **[2025/05/29]** 🎉 Ranked **#1** of the day on [Huggingface Daily Papers](https://huggingface.co/papers?date=2025-05-29).
|
| 29 |
+
- **[2025/05/29]** Released our Paper on arXiv. See [here](https://arxiv.org/pdf/2505.22617). We provide insights into the entropy mechanism of RL for LLMs and propose two simple yet effective strategies to alleviate the entropy collapse.
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# ✨Getting started
|
| 34 |
+
|
| 35 |
+
After preparing the training data, for training Qwen2.5-7B on a single node, taking the KL-Cov approach as an example, you can simply run:
|
| 36 |
+
|
| 37 |
+
```
|
| 38 |
+
cd verl
|
| 39 |
+
conda activate your_env
|
| 40 |
+
bash recipe/dapo/7b_kl_cov.sh
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
While for training Qwen2.5-32B on multi nodes, you can run the following commands:
|
| 44 |
+
|
| 45 |
+
```
|
| 46 |
+
cd verl
|
| 47 |
+
conda activate your_env
|
| 48 |
+
bash recipe/dapo/32b_kl_cov.sh
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
# 📖Introduction
|
| 52 |
+
|
| 53 |
+
<div align="left">
|
| 54 |
+
<img src="https://github.com/PRIME-RL/Entropy-Mechanism-of-RL/blob/main/figures/e2a.jpg?raw=true" alt="issue" style="width: 96%; height: auto;">
|
| 55 |
+
</div>
|
| 56 |
+
|
| 57 |
+
This paper addresses the entropy collapse issue in scaling reinforcement learning (RL) for large language models (LLMs), where policy entropy drops sharply during training, leading to overconfidence and performance saturation. We empirically establish a relationship between entropy ($H$) and performance ($R$): $R=−aexp(H)+b$, showing performance is bottlenecked by entropy exhaustion.
|
| 58 |
+
|
| 59 |
+
<div align="left">
|
| 60 |
+
<img src="https://github.com/PRIME-RL/Entropy-Mechanism-of-RL/blob/main/figures/cov.jpg?raw=true" alt="issue" style="width: 96%; height: auto;">
|
| 61 |
+
</div>
|
| 62 |
+
|
| 63 |
+
Theoretically, we find entropy changes are driven by the covariance between action probability and logit updates, which correlates with advantage in Policy Gradient methods. High-probability, high-advantage actions reduce entropy, while rare, high-advantage actions increase it. Empirically, the covariance term remains positive, explaining entropy’s monotonic decline. To mitigate this, we propose Clip-Cov and KL-Cov, which restrict updates for high-covariance tokens. These methods effectively prevent entropy collapse, and improve performance.
|
| 64 |
+
|
| 65 |
+
# 📃Evaluation
|
| 66 |
+
|
| 67 |
+
<div align="left">
|
| 68 |
+
<img src="https://github.com/PRIME-RL/Entropy-Mechanism-of-RL/blob/main/figures/performance_fig.jpg?raw=true" alt="issue" style="width: 96%; height: auto;">
|
| 69 |
+
</div>
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
Our method is able to maintain a considerably higher level of entropy throughout training. For example, when the baseline's entropy reaches a plateau and can no longer be consumed, the KL-Cov method still sustains an entropy level over 10 times higher. Meanwhile, the response length of the policy model steadily increases, and its performance on the test set consistently surpasses that of the baseline. This indicates that our model is able to explore more freely during training, learning better policy through RL.
|
| 73 |
+
| **Method** | **AIME24** | **AIME25** | **AMC** | **MATH-500** | **OMNI-MATH** | **OlympiadBench** | **Minerva** | **Avg.** |
|
| 74 |
+
| ----------------- | ---------: | ---------: | -------: | -----------: | ------------: | ----------------: | ----------: | -------: |
|
| 75 |
+
| *Qwen2.5-7B* | | | | | | | | |
|
| 76 |
+
| GRPO | 21.2 | 9.6 | 58.7 | 78.8 | 27.9 | 40.7 | 36.7 | 38.6 |
|
| 77 |
+
| w. Clip-higher | 18.1 | 11.5 | 56.6 | 79.2 | 29.8 | 43.3 | 40.4 | 38.8 |
|
| 78 |
+
| w. **`CLIP-Cov`** | 22.1 | **15.8** | 58.2 | 80.4 | **30.5** | **44.1** | **41.1** | 40.4 |
|
| 79 |
+
| w. **`KL-Cov`** | **22.6** | 12.9 | **61.4** | **80.8** | 29.1 | 42.6 | 38.2 | **40.6** |
|
| 80 |
+
| *Qwen2.5-32B* | | | | | | | | |
|
| 81 |
+
| GRPO | 21.8 | 16.2 | 69.7 | 84.2 | 35.2 | 43.6 | 45.5 | 45.8 |
|
| 82 |
+
| w. Clip-higher | 35.6 | 22.3 | 69.5 | 77.2 | 35.1 | 42.5 | 43.0 | 47.2 |
|
| 83 |
+
| w. **`CLIP-Cov`** | 32.3 | 22.7 | 67.2 | **87.0** | **42.0** | **57.2** | 46.0 | 50.3 |
|
| 84 |
+
| w. **`KL-Cov`** | **36.8** | **30.8** | **74.5** | 84.6 | 39.1 | 49.0 | **46.3** | **52.2** |
|
| 85 |
+
|
| 86 |
+
Our two approaches both achieve non-trivial improvements across all benchmarks. Compared to GRPO, our method outperforms it by 2.0% on average for the 7B model and by 6.4% for the 32B model. Moreover, we observe that our method yields more substantial gains on the larger Qwen2.5-32B. Specifically, our method achieves improvements of 15.0% and 14.6% compared to GRPO on the most challenging benchmarks, AIME24 and AIME25, respectively.
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# 🎈Citation
|
| 90 |
+
If you find this paper or repo helpful, please cite us.
|
| 91 |
+
|
| 92 |
+
```bibtex
|
| 93 |
+
@article{cui2025entropy,
|
| 94 |
+
title={The Entropy Mechanism of Reinforcement Learning for Reasoning Language Models},
|
| 95 |
+
author={Cui, Ganqu and Zhang, Yuchen and Chen, Jiacheng and Yuan, Lifan and Wang, Zhi and Zuo, Yuxin and Li, Haozhan and Fan, Yuchen and Chen, Huayu and Chen, Weize and others},
|
| 96 |
+
journal={arXiv preprint arXiv:2505.22617},
|
| 97 |
+
year={2025}
|
| 98 |
+
}
|
| 99 |
+
```
|
| 100 |
+
# 🌻Acknowledgement
|
| 101 |
+
We implement our reinforcement learning algorithm extending from [verl](https://github.com/volcengine/verl). We utilize [vLLM](https://github.com/vllm-project/vllm) for inference. Our models are trained primarily on [Qwen2.5 family](https://github.com/QwenLM/Qwen2.5). Our training data is built from [DAPO-MATH](https://huggingface.co/datasets/BytedTsinghua-SIA/DAPO-Math-17k). Thanks for their great contributions!
|
| 102 |
+
|
| 103 |
+
# 📬 Contact
|
| 104 |
+
|
| 105 |
+
For questions, discussion, or collaboration opportunities, feel free to contact:
|
| 106 |
+
- Ganqu Cui: cuiganqu@pjlab.org.cn
|
| 107 |
+
- Yuchen Zhang: yuchen.zhang2003@gmail.com
|
| 108 |
+
- Jiacheng Chen: jackchan9345@gmail.com
|
| 109 |
+
- Ning Ding: ningding.cs@gmail.com
|
| 110 |
+
|
src/verl/recipe/entropy/entropy_ray_trainer.py
ADDED
|
@@ -0,0 +1,347 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""
|
| 15 |
+
FSDP PPO Trainer with Ray-based single controller.
|
| 16 |
+
This trainer supports model-agonistic model initialization with huggingface
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import uuid
|
| 20 |
+
from collections import defaultdict
|
| 21 |
+
from copy import deepcopy
|
| 22 |
+
from pprint import pprint
|
| 23 |
+
|
| 24 |
+
import numpy as np
|
| 25 |
+
import torch
|
| 26 |
+
from tqdm import tqdm
|
| 27 |
+
|
| 28 |
+
from verl import DataProto
|
| 29 |
+
from verl.trainer.ppo.metric_utils import (
|
| 30 |
+
compute_data_metrics,
|
| 31 |
+
compute_throughout_metrics,
|
| 32 |
+
compute_timing_metrics,
|
| 33 |
+
reduce_metrics,
|
| 34 |
+
)
|
| 35 |
+
from verl.trainer.ppo.ray_trainer import (
|
| 36 |
+
AdvantageEstimator,
|
| 37 |
+
RayPPOTrainer,
|
| 38 |
+
apply_kl_penalty,
|
| 39 |
+
compute_advantage,
|
| 40 |
+
compute_response_mask,
|
| 41 |
+
)
|
| 42 |
+
from verl.utils.profiler import simple_timer
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class RayEntropyTrainer(RayPPOTrainer):
|
| 46 |
+
"""
|
| 47 |
+
Note that this trainer runs on the driver process on a single CPU/GPU node.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def fit(self):
|
| 51 |
+
"""
|
| 52 |
+
The training loop of PPO.
|
| 53 |
+
The driver process only need to call the compute functions of the worker group through RPC
|
| 54 |
+
to construct the PPO dataflow.
|
| 55 |
+
The light-weight advantage computation is done on the driver process.
|
| 56 |
+
"""
|
| 57 |
+
from omegaconf import OmegaConf
|
| 58 |
+
|
| 59 |
+
from verl.utils.tracking import Tracking
|
| 60 |
+
|
| 61 |
+
logger = Tracking(
|
| 62 |
+
project_name=self.config.trainer.project_name,
|
| 63 |
+
experiment_name=self.config.trainer.experiment_name,
|
| 64 |
+
default_backend=self.config.trainer.logger,
|
| 65 |
+
config=OmegaConf.to_container(self.config, resolve=True),
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
self.global_steps = 0
|
| 69 |
+
|
| 70 |
+
# load checkpoint before doing anything
|
| 71 |
+
self._load_checkpoint()
|
| 72 |
+
|
| 73 |
+
# perform validation before training
|
| 74 |
+
# currently, we only support validation using the reward_function.
|
| 75 |
+
if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True):
|
| 76 |
+
val_metrics = self._validate()
|
| 77 |
+
assert val_metrics, f"{val_metrics=}"
|
| 78 |
+
pprint(f"Initial validation metrics: {val_metrics}")
|
| 79 |
+
logger.log(data=val_metrics, step=self.global_steps)
|
| 80 |
+
if self.config.trainer.get("val_only", False):
|
| 81 |
+
return
|
| 82 |
+
|
| 83 |
+
# add tqdm
|
| 84 |
+
progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress")
|
| 85 |
+
|
| 86 |
+
# we start from step 1
|
| 87 |
+
self.global_steps += 1
|
| 88 |
+
last_val_metrics = None
|
| 89 |
+
|
| 90 |
+
timing_raw = defaultdict(float)
|
| 91 |
+
batch = None
|
| 92 |
+
num_prompt_in_batch = 0
|
| 93 |
+
num_gen_batches = 0
|
| 94 |
+
for epoch in range(self.config.trainer.total_epochs):
|
| 95 |
+
for batch_dict in self.train_dataloader:
|
| 96 |
+
metrics = {}
|
| 97 |
+
|
| 98 |
+
new_batch: DataProto = DataProto.from_single_dict(batch_dict)
|
| 99 |
+
num_gen_batches += 1
|
| 100 |
+
# pop those keys for generation
|
| 101 |
+
if "multi_modal_inputs" in new_batch.non_tensor_batch.keys():
|
| 102 |
+
gen_batch = new_batch.pop(
|
| 103 |
+
batch_keys=["input_ids", "attention_mask", "position_ids"],
|
| 104 |
+
non_tensor_batch_keys=["raw_prompt_ids", "multi_modal_data", "multi_modal_inputs"],
|
| 105 |
+
)
|
| 106 |
+
else:
|
| 107 |
+
gen_batch = new_batch.pop(
|
| 108 |
+
batch_keys=["input_ids", "attention_mask", "position_ids"],
|
| 109 |
+
non_tensor_batch_keys=["raw_prompt_ids"],
|
| 110 |
+
)
|
| 111 |
+
gen_batch = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
|
| 112 |
+
|
| 113 |
+
is_last_step = self.global_steps >= self.total_training_steps
|
| 114 |
+
|
| 115 |
+
with simple_timer("step", timing_raw):
|
| 116 |
+
# generate a batch
|
| 117 |
+
# with simple_timer("gen", timing_raw):
|
| 118 |
+
# gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch)
|
| 119 |
+
with simple_timer("gen", timing_raw):
|
| 120 |
+
if not self.async_rollout_mode:
|
| 121 |
+
gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch)
|
| 122 |
+
else:
|
| 123 |
+
gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch)
|
| 124 |
+
|
| 125 |
+
if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX:
|
| 126 |
+
with simple_timer("gen_max", timing_raw):
|
| 127 |
+
gen_baseline_batch = deepcopy(gen_batch)
|
| 128 |
+
gen_baseline_batch.meta_info["do_sample"] = False
|
| 129 |
+
gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch)
|
| 130 |
+
|
| 131 |
+
new_batch = new_batch.union(gen_baseline_output)
|
| 132 |
+
reward_baseline_tensor = self.reward_fn(new_batch)
|
| 133 |
+
reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1)
|
| 134 |
+
|
| 135 |
+
new_batch.pop(batch_keys=list(gen_baseline_output.batch.keys()))
|
| 136 |
+
|
| 137 |
+
new_batch.batch["reward_baselines"] = reward_baseline_tensor
|
| 138 |
+
|
| 139 |
+
del gen_baseline_batch, gen_baseline_output
|
| 140 |
+
|
| 141 |
+
new_batch.non_tensor_batch["uid"] = np.array(
|
| 142 |
+
[str(uuid.uuid4()) for _ in range(len(new_batch.batch))], dtype=object
|
| 143 |
+
)
|
| 144 |
+
# repeat to align with repeated responses in rollout
|
| 145 |
+
new_batch = new_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
|
| 146 |
+
new_batch = new_batch.union(gen_batch_output)
|
| 147 |
+
|
| 148 |
+
with simple_timer("reward", timing_raw):
|
| 149 |
+
# compute scores. Support both model and function-based.
|
| 150 |
+
# We first compute the scores using reward model. Then, we call reward_fn to combine
|
| 151 |
+
# the results from reward model and rule-based results.
|
| 152 |
+
if self.use_rm:
|
| 153 |
+
# we first compute reward model score
|
| 154 |
+
reward_tensor = self.rm_wg.compute_rm_score(new_batch)
|
| 155 |
+
new_batch = new_batch.union(reward_tensor)
|
| 156 |
+
|
| 157 |
+
# we combine with rule-based rm
|
| 158 |
+
reward_extra_infos_dict: dict[str, list]
|
| 159 |
+
try:
|
| 160 |
+
reward_result = self.reward_fn(new_batch, return_dict=True)
|
| 161 |
+
reward_tensor = reward_result["reward_tensor"]
|
| 162 |
+
reward_extra_infos_dict = reward_result["reward_extra_info"]
|
| 163 |
+
except Exception as e:
|
| 164 |
+
print(f"Error in reward_fn: {e}")
|
| 165 |
+
reward_tensor = self.reward_fn(new_batch)
|
| 166 |
+
reward_extra_infos_dict = {}
|
| 167 |
+
|
| 168 |
+
new_batch.batch["token_level_scores"] = reward_tensor
|
| 169 |
+
|
| 170 |
+
print(f"{list(reward_extra_infos_dict.keys())=}")
|
| 171 |
+
if reward_extra_infos_dict:
|
| 172 |
+
new_batch.non_tensor_batch.update(
|
| 173 |
+
{k: np.array(v) for k, v in reward_extra_infos_dict.items()}
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# compute rewards. apply_kl_penalty if available
|
| 177 |
+
if self.config.algorithm.use_kl_in_reward:
|
| 178 |
+
new_batch, kl_metrics = apply_kl_penalty(
|
| 179 |
+
new_batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty
|
| 180 |
+
)
|
| 181 |
+
metrics.update(
|
| 182 |
+
kl_metrics
|
| 183 |
+
) # TODO: This will be cleared if we use multiple genenration batches
|
| 184 |
+
else:
|
| 185 |
+
new_batch.batch["token_level_rewards"] = new_batch.batch["token_level_scores"]
|
| 186 |
+
|
| 187 |
+
if not self.config.algorithm.filter_groups.enable:
|
| 188 |
+
batch = new_batch
|
| 189 |
+
else: # NOTE: When prompts after filtering is less than train batch size,
|
| 190 |
+
# we skip to the next generation batch
|
| 191 |
+
metric_name = self.config.algorithm.filter_groups.metric
|
| 192 |
+
if metric_name == "seq_final_reward":
|
| 193 |
+
# Turn to numpy for easier filtering
|
| 194 |
+
new_batch.non_tensor_batch["seq_final_reward"] = (
|
| 195 |
+
new_batch.batch["token_level_rewards"].sum(dim=-1).numpy()
|
| 196 |
+
)
|
| 197 |
+
elif metric_name == "seq_reward":
|
| 198 |
+
new_batch.non_tensor_batch["seq_reward"] = (
|
| 199 |
+
new_batch.batch["token_level_scores"].sum(dim=-1).numpy()
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
# Collect the sequence reward for each trajectory
|
| 203 |
+
prompt_uid2metric_vals = defaultdict(list)
|
| 204 |
+
for uid, metric_val in zip(
|
| 205 |
+
new_batch.non_tensor_batch["uid"], new_batch.non_tensor_batch[metric_name], strict=True
|
| 206 |
+
):
|
| 207 |
+
prompt_uid2metric_vals[uid].append(metric_val)
|
| 208 |
+
|
| 209 |
+
prompt_uid2metric_std = {}
|
| 210 |
+
for prompt_uid, metric_vals in prompt_uid2metric_vals.items():
|
| 211 |
+
prompt_uid2metric_std[prompt_uid] = np.std(metric_vals)
|
| 212 |
+
|
| 213 |
+
kept_prompt_uids = [
|
| 214 |
+
uid
|
| 215 |
+
for uid, std in prompt_uid2metric_std.items()
|
| 216 |
+
if std > 0 or len(prompt_uid2metric_vals[uid]) == 1
|
| 217 |
+
]
|
| 218 |
+
num_prompt_in_batch += len(kept_prompt_uids)
|
| 219 |
+
|
| 220 |
+
kept_traj_idxs = []
|
| 221 |
+
for idx, traj_from_prompt_uid in enumerate(new_batch.non_tensor_batch["uid"]):
|
| 222 |
+
if traj_from_prompt_uid in kept_prompt_uids:
|
| 223 |
+
kept_traj_idxs.append(idx)
|
| 224 |
+
|
| 225 |
+
new_batch = new_batch[kept_traj_idxs]
|
| 226 |
+
batch = new_batch if batch is None else DataProto.concat([batch, new_batch])
|
| 227 |
+
|
| 228 |
+
prompt_bsz = self.config.data.train_batch_size
|
| 229 |
+
if num_prompt_in_batch < prompt_bsz:
|
| 230 |
+
print(f"{num_prompt_in_batch=} < {prompt_bsz=}")
|
| 231 |
+
max_num_gen_batches = self.config.algorithm.filter_groups.max_num_gen_batches
|
| 232 |
+
if max_num_gen_batches <= 0 or num_gen_batches < max_num_gen_batches:
|
| 233 |
+
print(f"{num_gen_batches=}. Keep generating...")
|
| 234 |
+
continue
|
| 235 |
+
else:
|
| 236 |
+
raise ValueError(
|
| 237 |
+
f"{num_gen_batches=} >= {max_num_gen_batches=}."
|
| 238 |
+
+ " Generated too many. Please check if your data are too difficult."
|
| 239 |
+
+ " You could also try set max_num_gen_batches=0 to enable endless trials."
|
| 240 |
+
)
|
| 241 |
+
else:
|
| 242 |
+
# Align the batch
|
| 243 |
+
traj_bsz = self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n
|
| 244 |
+
print(
|
| 245 |
+
f"Collected {num_prompt_in_batch} / {self.config.data.train_batch_size} prompt. "
|
| 246 |
+
f"Collecting finished."
|
| 247 |
+
)
|
| 248 |
+
batch = batch[:traj_bsz]
|
| 249 |
+
|
| 250 |
+
# === Updating ===
|
| 251 |
+
|
| 252 |
+
batch.batch["response_mask"] = compute_response_mask(batch)
|
| 253 |
+
|
| 254 |
+
# balance the number of valid tokens on each dp rank.
|
| 255 |
+
# Note that this breaks the order of data inside the batch.
|
| 256 |
+
# Please take care when you implement group based adv computation such as GRPO and rloo
|
| 257 |
+
if self.config.trainer.balance_batch:
|
| 258 |
+
self._balance_batch(batch, metrics=metrics)
|
| 259 |
+
|
| 260 |
+
# compute global_valid tokens
|
| 261 |
+
batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist()
|
| 262 |
+
|
| 263 |
+
# recompute old_log_probs
|
| 264 |
+
with simple_timer("old_log_prob", timing_raw):
|
| 265 |
+
old_log_prob = self.actor_rollout_wg.compute_log_prob(batch)
|
| 266 |
+
batch = batch.union(old_log_prob)
|
| 267 |
+
|
| 268 |
+
if self.use_reference_policy:
|
| 269 |
+
# compute reference log_prob
|
| 270 |
+
with simple_timer("ref", timing_raw):
|
| 271 |
+
ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch)
|
| 272 |
+
batch = batch.union(ref_log_prob)
|
| 273 |
+
|
| 274 |
+
# compute values
|
| 275 |
+
if self.use_critic:
|
| 276 |
+
with simple_timer("values", timing_raw):
|
| 277 |
+
values = self.critic_wg.compute_values(batch)
|
| 278 |
+
batch = batch.union(values)
|
| 279 |
+
|
| 280 |
+
with simple_timer("adv", timing_raw):
|
| 281 |
+
# compute advantages, executed on the driver process
|
| 282 |
+
norm_adv_by_std_in_grpo = self.config.algorithm.get("norm_adv_by_std_in_grpo", True)
|
| 283 |
+
batch = compute_advantage(
|
| 284 |
+
batch,
|
| 285 |
+
adv_estimator=self.config.algorithm.adv_estimator,
|
| 286 |
+
gamma=self.config.algorithm.gamma,
|
| 287 |
+
lam=self.config.algorithm.lam,
|
| 288 |
+
num_repeat=self.config.actor_rollout_ref.rollout.n,
|
| 289 |
+
norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo,
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
# update critic
|
| 293 |
+
if self.use_critic:
|
| 294 |
+
with simple_timer("update_critic", timing_raw):
|
| 295 |
+
critic_output = self.critic_wg.update_critic(batch)
|
| 296 |
+
critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"])
|
| 297 |
+
metrics.update(critic_output_metrics)
|
| 298 |
+
|
| 299 |
+
# implement critic warmup
|
| 300 |
+
if self.config.trainer.critic_warmup <= self.global_steps:
|
| 301 |
+
# update actor
|
| 302 |
+
with simple_timer("update_actor", timing_raw):
|
| 303 |
+
actor_output = self.actor_rollout_wg.update_actor(batch)
|
| 304 |
+
actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"])
|
| 305 |
+
metrics.update(actor_output_metrics)
|
| 306 |
+
|
| 307 |
+
# validate
|
| 308 |
+
if (
|
| 309 |
+
self.val_reward_fn is not None
|
| 310 |
+
and self.config.trainer.test_freq > 0
|
| 311 |
+
and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0)
|
| 312 |
+
):
|
| 313 |
+
with simple_timer("testing", timing_raw):
|
| 314 |
+
val_metrics: dict = self._validate()
|
| 315 |
+
if is_last_step:
|
| 316 |
+
last_val_metrics = val_metrics
|
| 317 |
+
metrics.update(val_metrics)
|
| 318 |
+
|
| 319 |
+
if self.config.trainer.save_freq > 0 and (
|
| 320 |
+
is_last_step or self.global_steps % self.config.trainer.save_freq == 0
|
| 321 |
+
):
|
| 322 |
+
with simple_timer("save_checkpoint", timing_raw):
|
| 323 |
+
self._save_checkpoint()
|
| 324 |
+
|
| 325 |
+
# collect metrics
|
| 326 |
+
metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic))
|
| 327 |
+
metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw))
|
| 328 |
+
# TODO: implement actual tflpo and theoretical tflpo
|
| 329 |
+
n_gpus = self.resource_pool_manager.get_n_gpus()
|
| 330 |
+
metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus))
|
| 331 |
+
timing_raw = defaultdict(float) # clear timing
|
| 332 |
+
|
| 333 |
+
metrics["train/num_gen_batches"] = num_gen_batches
|
| 334 |
+
batch = None
|
| 335 |
+
num_prompt_in_batch = 0
|
| 336 |
+
num_gen_batches = 0
|
| 337 |
+
|
| 338 |
+
# TODO: make a canonical logger that supports various backend
|
| 339 |
+
logger.log(data=metrics, step=self.global_steps)
|
| 340 |
+
|
| 341 |
+
if is_last_step:
|
| 342 |
+
pprint(f"Final validation metrics: {last_val_metrics}")
|
| 343 |
+
progress_bar.close()
|
| 344 |
+
return
|
| 345 |
+
|
| 346 |
+
progress_bar.update(1)
|
| 347 |
+
self.global_steps += 1
|
src/verl/recipe/spin/dp_actor.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
# Copyright 2023-2024 SGLang Team
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
import itertools
|
| 18 |
+
import math
|
| 19 |
+
from collections import defaultdict
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
import torch
|
| 23 |
+
|
| 24 |
+
from recipe.spin.core_algos import compute_online_dpo_loss, get_batch_logps
|
| 25 |
+
from verl import DataProto
|
| 26 |
+
from verl.utils.device import get_device_name
|
| 27 |
+
from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches
|
| 28 |
+
from verl.workers.actor import DataParallelPPOActor
|
| 29 |
+
|
| 30 |
+
__all__ = ["DataParallelPPOActor"]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class SPINDataParallelPPOActor(DataParallelPPOActor):
|
| 34 |
+
def compute_log_prob(self, data: DataProto) -> torch.Tensor:
|
| 35 |
+
"""Compute the log probability of the responses given input_ids, attention_mask and position_ids
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
data (DataProto): a DataProto containing keys
|
| 39 |
+
|
| 40 |
+
``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. Note that input_ids is the
|
| 41 |
+
concatenation of prompt and response. Note that ``sequence_length = prompt_length + response_length``.
|
| 42 |
+
|
| 43 |
+
``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64.
|
| 44 |
+
|
| 45 |
+
``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64.
|
| 46 |
+
|
| 47 |
+
``responses``: tensor of shape [batch_size, response_length]. torch.int64.
|
| 48 |
+
|
| 49 |
+
Returns:
|
| 50 |
+
torch.Tensor: the log_prob tensor
|
| 51 |
+
"""
|
| 52 |
+
# set to eval
|
| 53 |
+
self.actor_module.eval()
|
| 54 |
+
|
| 55 |
+
micro_batch_size = data.meta_info["micro_batch_size"]
|
| 56 |
+
temperature = data.meta_info["temperature"] # temperature must be in the data.meta_info to avoid silent error
|
| 57 |
+
use_dynamic_bsz = data.meta_info["use_dynamic_bsz"]
|
| 58 |
+
|
| 59 |
+
select_keys = ["responses", "input_ids", "attention_mask", "position_ids"]
|
| 60 |
+
batch = data.select(batch_keys=select_keys).batch
|
| 61 |
+
has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys()
|
| 62 |
+
|
| 63 |
+
if has_multi_modal_inputs:
|
| 64 |
+
num_micro_batches = data.batch.batch_size[0] // micro_batch_size
|
| 65 |
+
non_tensor_select_keys = ["multi_modal_inputs"]
|
| 66 |
+
micro_batches = data.select(select_keys, non_tensor_select_keys).chunk(num_micro_batches)
|
| 67 |
+
elif use_dynamic_bsz:
|
| 68 |
+
# split using dynamic bsz
|
| 69 |
+
max_token_len = data.meta_info["max_token_len"] * self.ulysses_sequence_parallel_size
|
| 70 |
+
micro_batches, indices = rearrange_micro_batches(batch=batch, max_token_len=max_token_len)
|
| 71 |
+
else:
|
| 72 |
+
micro_batches = batch.split(micro_batch_size)
|
| 73 |
+
|
| 74 |
+
log_probs_lst = []
|
| 75 |
+
for micro_batch in micro_batches:
|
| 76 |
+
if isinstance(micro_batch, DataProto):
|
| 77 |
+
micro_batch = {**micro_batch.batch, **micro_batch.non_tensor_batch}
|
| 78 |
+
|
| 79 |
+
with torch.no_grad():
|
| 80 |
+
_, log_probs = self._forward_micro_batch(micro_batch, temperature=temperature)
|
| 81 |
+
log_probs_lst.append(log_probs)
|
| 82 |
+
log_probs = torch.concat(log_probs_lst, dim=0)
|
| 83 |
+
|
| 84 |
+
if use_dynamic_bsz:
|
| 85 |
+
indices = list(itertools.chain.from_iterable(indices))
|
| 86 |
+
assert len(indices) == log_probs.size(0), f"{len(indices)} vs. {log_probs.size()}"
|
| 87 |
+
revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long)
|
| 88 |
+
log_probs = log_probs[revert_indices]
|
| 89 |
+
|
| 90 |
+
return log_probs
|
| 91 |
+
|
| 92 |
+
def update_policy_dpo_with_ref(self, data: DataProto):
|
| 93 |
+
"""
|
| 94 |
+
Performs the DPO update step using pre-calculated reference log probs
|
| 95 |
+
from an external, periodically updated reference model.
|
| 96 |
+
"""
|
| 97 |
+
self.actor_module.train() # Ensure training mode
|
| 98 |
+
|
| 99 |
+
# --- Retrieve necessary data ---
|
| 100 |
+
try:
|
| 101 |
+
# Expects batch prepared by fit_dpo loop, including reference log probs
|
| 102 |
+
batch_td = data.batch
|
| 103 |
+
chosen_labels = batch_td["chosen_labels"]
|
| 104 |
+
rejected_labels = batch_td["rejected_labels"]
|
| 105 |
+
# ... other needed tensors like chosen/rejected input_ids, attention_mask, position_ids ...
|
| 106 |
+
|
| 107 |
+
# === Get PRE-CALCULATED reference log probs from input data ===
|
| 108 |
+
reference_chosen_logps = batch_td["reference_chosen_logps"] # Should be sequence-level logps
|
| 109 |
+
reference_rejected_logps = batch_td["reference_rejected_logps"] # Should be sequence-level logps
|
| 110 |
+
# ============================================================
|
| 111 |
+
|
| 112 |
+
# Get DPO params from meta_info
|
| 113 |
+
# beta = data.meta_info.get('dpo_beta', 0.1) # Default beta
|
| 114 |
+
beta = self.config.get("dpo_beta", 0.1) # Default beta
|
| 115 |
+
loss_type = data.meta_info.get("dpo_loss_type", "sigmoid")
|
| 116 |
+
label_smoothing = data.meta_info.get("dpo_label_smoothing", 0.0)
|
| 117 |
+
# reference_free should now be False as we provide ref logps
|
| 118 |
+
reference_free = data.meta_info.get("reference_free", False) # Default False
|
| 119 |
+
|
| 120 |
+
except KeyError as e:
|
| 121 |
+
print(f"ERROR: Missing required key for DPO update (in update_policy_dpo): {e}")
|
| 122 |
+
print(f"Available keys in data.batch: {list(batch_td.keys())}") # Debug print
|
| 123 |
+
return {} # Return empty metrics on error
|
| 124 |
+
except Exception as e_data:
|
| 125 |
+
print(f"ERROR accessing data for DPO update (in update_policy_dpo): {e_data}")
|
| 126 |
+
return {}
|
| 127 |
+
|
| 128 |
+
# --- Micro-batching Setup ---
|
| 129 |
+
micro_batch_size = self.config.get("ppo_micro_batch_size_per_gpu")
|
| 130 |
+
if micro_batch_size is None:
|
| 131 |
+
# Fallback or default if not set, or raise error
|
| 132 |
+
micro_batch_size = 1 # Example fallback, adjust as needed
|
| 133 |
+
print(f"Warning: 'ppo_micro_batch_size_per_gpu' not set, defaulting to {micro_batch_size}")
|
| 134 |
+
# raise ValueError("Config 'ppo_micro_batch_size_per_gpu' must be set.")
|
| 135 |
+
|
| 136 |
+
# Ensure chosen_input_ids exists before getting shape
|
| 137 |
+
if "chosen_input_ids" not in batch_td:
|
| 138 |
+
print("ERROR: 'chosen_input_ids' not found in batch_td for DPO update.")
|
| 139 |
+
return {}
|
| 140 |
+
bsz = batch_td["chosen_input_ids"].shape[0]
|
| 141 |
+
|
| 142 |
+
if bsz == 0:
|
| 143 |
+
print("Warning: DPO batch size is 0 in update_policy_dpo. Skipping update.")
|
| 144 |
+
return {"actor/dpo_loss": 0.0, "actor/grad_norm": 0.0} # Return zero metrics if batch is empty
|
| 145 |
+
|
| 146 |
+
num_micro_batches = math.ceil(bsz / micro_batch_size)
|
| 147 |
+
gradient_accumulation_steps = num_micro_batches
|
| 148 |
+
|
| 149 |
+
# --- Metrics Accumulation ---
|
| 150 |
+
total_loss = 0.0
|
| 151 |
+
accumulated_metrics = defaultdict(list)
|
| 152 |
+
metrics = {} # Final metrics dict
|
| 153 |
+
|
| 154 |
+
# --- Zero Gradients ---
|
| 155 |
+
self.actor_optimizer.zero_grad(set_to_none=True)
|
| 156 |
+
|
| 157 |
+
# --- Micro-batch Loop ---
|
| 158 |
+
for i in range(num_micro_batches):
|
| 159 |
+
start_idx = i * micro_batch_size
|
| 160 |
+
end_idx = min(start_idx + micro_batch_size, bsz)
|
| 161 |
+
if start_idx >= end_idx:
|
| 162 |
+
continue
|
| 163 |
+
|
| 164 |
+
# Slice the full DPO batch into micro-batches
|
| 165 |
+
# Important: Slice ALL required tensors, including labels and inputs
|
| 166 |
+
micro_batch_chosen_labels = chosen_labels[start_idx:end_idx]
|
| 167 |
+
micro_batch_rejected_labels = rejected_labels[start_idx:end_idx]
|
| 168 |
+
micro_batch_chosen_inputs = {
|
| 169 |
+
"input_ids": batch_td["chosen_input_ids"][start_idx:end_idx],
|
| 170 |
+
"attention_mask": batch_td["chosen_attention_mask"][start_idx:end_idx],
|
| 171 |
+
}
|
| 172 |
+
if "chosen_position_ids" in batch_td:
|
| 173 |
+
micro_batch_chosen_inputs["position_ids"] = batch_td["chosen_position_ids"][start_idx:end_idx]
|
| 174 |
+
|
| 175 |
+
micro_batch_rejected_inputs = {
|
| 176 |
+
"input_ids": batch_td["rejected_input_ids"][start_idx:end_idx],
|
| 177 |
+
"attention_mask": batch_td["rejected_attention_mask"][start_idx:end_idx],
|
| 178 |
+
}
|
| 179 |
+
if "rejected_position_ids" in batch_td:
|
| 180 |
+
micro_batch_rejected_inputs["position_ids"] = batch_td["rejected_position_ids"][start_idx:end_idx]
|
| 181 |
+
|
| 182 |
+
# Determine autocast dtype
|
| 183 |
+
autocast_dtype = torch.bfloat16 # Or get dynamically from config/FSDP settings
|
| 184 |
+
# --- Autocast Forward Pass ---
|
| 185 |
+
with torch.autocast(device_type=get_device_name(), dtype=autocast_dtype):
|
| 186 |
+
# --- Step 1: Forward pass for CURRENT policy log probs (with grad) ---
|
| 187 |
+
policy_chosen_outputs = self.actor_module(**micro_batch_chosen_inputs, use_cache=False)
|
| 188 |
+
policy_rejected_outputs = self.actor_module(**micro_batch_rejected_inputs, use_cache=False)
|
| 189 |
+
|
| 190 |
+
# --- Step 2: Calculate CURRENT policy log probs using get_batch_logps ---
|
| 191 |
+
policy_chosen_logps = get_batch_logps(
|
| 192 |
+
policy_chosen_outputs.logits, micro_batch_chosen_labels, average_log_prob=False
|
| 193 |
+
)
|
| 194 |
+
policy_rejected_logps = get_batch_logps(
|
| 195 |
+
policy_rejected_outputs.logits, micro_batch_rejected_labels, average_log_prob=False
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# --- Step 3: Retrieve PRE-CALCULATED reference log probs (NO grad needed) ---
|
| 199 |
+
# Slice the full batch reference logps for the current micro-batch
|
| 200 |
+
micro_ref_chosen_logps = reference_chosen_logps[start_idx:end_idx]
|
| 201 |
+
micro_ref_rejected_logps = reference_rejected_logps[start_idx:end_idx]
|
| 202 |
+
# --- The ActorAsRef calculation block is REMOVED ---
|
| 203 |
+
|
| 204 |
+
# --- Step 4: Calculate DPO Logits and Loss ---
|
| 205 |
+
pi_logratios = policy_chosen_logps - policy_rejected_logps
|
| 206 |
+
ref_logratios = micro_ref_chosen_logps - micro_ref_rejected_logps # Uses pre-calculated values
|
| 207 |
+
logits = pi_logratios - ref_logratios # DPO logits
|
| 208 |
+
|
| 209 |
+
loss = compute_online_dpo_loss(
|
| 210 |
+
policy_chosen_logps=policy_chosen_logps, # Has grad
|
| 211 |
+
policy_rejected_logps=policy_rejected_logps, # Has grad
|
| 212 |
+
reference_chosen_logps=micro_ref_chosen_logps, # No grad (from input)
|
| 213 |
+
reference_rejected_logps=micro_ref_rejected_logps, # No grad (from input)
|
| 214 |
+
beta=beta,
|
| 215 |
+
label_smoothing=label_smoothing,
|
| 216 |
+
loss_type=loss_type,
|
| 217 |
+
reference_free=reference_free, # Should be False now
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
# --- Scale loss for gradient accumulation ---
|
| 221 |
+
scaled_loss = loss / gradient_accumulation_steps
|
| 222 |
+
|
| 223 |
+
# --- Accumulate Metrics ---
|
| 224 |
+
total_loss += loss.item() # Unscaled loss
|
| 225 |
+
accumulated_metrics["actor/dpo_loss_batch"].append(loss.item())
|
| 226 |
+
accumulated_metrics["actor/dpo_logits_batch"].append(logits.mean().item())
|
| 227 |
+
# Accumulate policy and reference log probs/ratios if needed for debugging
|
| 228 |
+
accumulated_metrics["actor/policy_chosen_logps_batch"].append(policy_chosen_logps.mean().item())
|
| 229 |
+
accumulated_metrics["actor/policy_rejected_logps_batch"].append(policy_rejected_logps.mean().item())
|
| 230 |
+
accumulated_metrics["actor/reference_chosen_logps_batch"].append(micro_ref_chosen_logps.mean().item())
|
| 231 |
+
accumulated_metrics["actor/reference_rejected_logps_batch"].append(
|
| 232 |
+
micro_ref_rejected_logps.mean().item()
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
# --- Backward Pass (outside autocast) ---
|
| 236 |
+
# Check if loss requires grad before backward
|
| 237 |
+
if scaled_loss.requires_grad:
|
| 238 |
+
scaled_loss.backward()
|
| 239 |
+
else:
|
| 240 |
+
print(f"Warning: Scaled loss at micro-batch {i} does not require grad. Skipping backward.")
|
| 241 |
+
|
| 242 |
+
# --- End Micro-batch Loop ---
|
| 243 |
+
|
| 244 |
+
# --- Optimizer Step (after accumulating gradients for all micro-batches) ---
|
| 245 |
+
grad_norm = self._optimizer_step()
|
| 246 |
+
|
| 247 |
+
# --- Populate Final Metrics ---
|
| 248 |
+
if num_micro_batches > 0 and bsz > 0: # Check if any processing happened
|
| 249 |
+
metrics["actor/dpo_loss"] = total_loss / num_micro_batches
|
| 250 |
+
metrics["actor/grad_norm"] = (
|
| 251 |
+
grad_norm.item() if torch.is_tensor(grad_norm) and torch.isfinite(grad_norm) else float("inf")
|
| 252 |
+
)
|
| 253 |
+
# Average other accumulated metrics
|
| 254 |
+
for key, val_list in accumulated_metrics.items():
|
| 255 |
+
if val_list:
|
| 256 |
+
metrics[key.replace("_batch", "")] = np.mean(val_list)
|
| 257 |
+
|
| 258 |
+
# Calculate accuracy / rewards / margins based on averaged logprobs if desired
|
| 259 |
+
if (
|
| 260 |
+
"actor/policy_chosen_logps" in metrics
|
| 261 |
+
and "actor/policy_rejected_logps" in metrics
|
| 262 |
+
and "actor/reference_chosen_logps" in metrics
|
| 263 |
+
and "actor/reference_rejected_logps" in metrics
|
| 264 |
+
):
|
| 265 |
+
policy_ratio_mean = metrics["actor/policy_chosen_logps"] - metrics["actor/policy_rejected_logps"]
|
| 266 |
+
ref_ratio_mean = metrics["actor/reference_chosen_logps"] - metrics["actor/reference_rejected_logps"]
|
| 267 |
+
logits_mean = policy_ratio_mean - ref_ratio_mean
|
| 268 |
+
metrics["actor/rewards_chosen"] = beta * (
|
| 269 |
+
metrics["actor/policy_chosen_logps"] - metrics["actor/reference_chosen_logps"]
|
| 270 |
+
)
|
| 271 |
+
metrics["actor/rewards_rejected"] = beta * (
|
| 272 |
+
metrics["actor/policy_rejected_logps"] - metrics["actor/reference_rejected_logps"]
|
| 273 |
+
)
|
| 274 |
+
metrics["actor/rewards_accuracies"] = float(logits_mean > 0) # Mean accuracy proxy
|
| 275 |
+
metrics["actor/rewards_margins"] = metrics["actor/rewards_chosen"] - metrics["actor/rewards_rejected"]
|
| 276 |
+
|
| 277 |
+
else: # Handle case where no micro-batches were run (e.g., bsz=0)
|
| 278 |
+
metrics["actor/dpo_loss"] = 0.0
|
| 279 |
+
metrics["actor/grad_norm"] = 0.0
|
| 280 |
+
# Initialize other metrics to 0 or NaN as appropriate
|
| 281 |
+
for key in accumulated_metrics.keys():
|
| 282 |
+
metrics[key.replace("_batch", "")] = 0.0
|
| 283 |
+
metrics["actor/rewards_chosen"] = 0.0
|
| 284 |
+
metrics["actor/rewards_rejected"] = 0.0
|
| 285 |
+
metrics["actor/rewards_accuracies"] = 0.0
|
| 286 |
+
metrics["actor/rewards_margins"] = 0.0
|
| 287 |
+
|
| 288 |
+
return metrics # Return aggregated metrics
|
src/verl/recipe/spin/fsdp_workers.py
ADDED
|
@@ -0,0 +1,600 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
# Copyright 2023-2024 SGLang Team
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
import logging
|
| 18 |
+
import os
|
| 19 |
+
import warnings
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
import psutil
|
| 23 |
+
import torch
|
| 24 |
+
import torch.distributed
|
| 25 |
+
from codetiming import Timer
|
| 26 |
+
from omegaconf import OmegaConf, open_dict
|
| 27 |
+
from torch.distributed.device_mesh import init_device_mesh
|
| 28 |
+
|
| 29 |
+
import verl.utils.torch_functional as verl_F
|
| 30 |
+
from verl import DataProto
|
| 31 |
+
from verl.single_controller.base import Worker
|
| 32 |
+
from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register
|
| 33 |
+
from verl.utils import hf_tokenizer
|
| 34 |
+
from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager
|
| 35 |
+
from verl.utils.device import get_device_id, get_device_name, get_nccl_backend, get_torch_device
|
| 36 |
+
from verl.utils.flops_counter import FlopsCounter
|
| 37 |
+
from verl.utils.fs import copy_to_local
|
| 38 |
+
from verl.utils.fsdp_utils import (
|
| 39 |
+
get_fsdp_wrap_policy,
|
| 40 |
+
get_init_weight_context_manager,
|
| 41 |
+
init_fn,
|
| 42 |
+
load_fsdp_model_to_gpu,
|
| 43 |
+
load_fsdp_optimizer,
|
| 44 |
+
offload_fsdp_model_to_cpu,
|
| 45 |
+
offload_fsdp_optimizer,
|
| 46 |
+
)
|
| 47 |
+
from verl.utils.import_utils import import_external_libs
|
| 48 |
+
from verl.utils.model import compute_position_id_with_mask
|
| 49 |
+
from verl.utils.profiler import log_gpu_memory_usage
|
| 50 |
+
from verl.workers.fsdp_workers import ActorRolloutRefWorker
|
| 51 |
+
from verl.workers.sharding_manager.fsdp_ulysses import FSDPUlyssesShardingManager
|
| 52 |
+
|
| 53 |
+
logger = logging.getLogger(__file__)
|
| 54 |
+
logger.setLevel(os.getenv("VERL_PPO_LOGGING_LEVEL", "WARN"))
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def create_device_mesh(world_size, fsdp_size):
|
| 58 |
+
if fsdp_size < 0 or fsdp_size >= world_size:
|
| 59 |
+
device_mesh = init_device_mesh(get_device_name(), mesh_shape=(world_size,), mesh_dim_names=["fsdp"])
|
| 60 |
+
else:
|
| 61 |
+
device_mesh = init_device_mesh(
|
| 62 |
+
get_device_name(), mesh_shape=(world_size // fsdp_size, fsdp_size), mesh_dim_names=["ddp", "fsdp"]
|
| 63 |
+
)
|
| 64 |
+
return device_mesh
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def get_sharding_strategy(device_mesh):
|
| 68 |
+
from torch.distributed.fsdp import ShardingStrategy
|
| 69 |
+
|
| 70 |
+
if device_mesh.ndim == 1:
|
| 71 |
+
sharding_strategy = ShardingStrategy.FULL_SHARD
|
| 72 |
+
elif device_mesh.ndim == 2:
|
| 73 |
+
sharding_strategy = ShardingStrategy.HYBRID_SHARD
|
| 74 |
+
else:
|
| 75 |
+
raise NotImplementedError(f"Get device mesh ndim={device_mesh.ndim}, but only support 1 or 2")
|
| 76 |
+
return sharding_strategy
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class SPINRolloutRefWorker(ActorRolloutRefWorker):
|
| 80 |
+
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
|
| 81 |
+
def init_model(self):
|
| 82 |
+
from recipe.spin.dp_actor import SPINDataParallelPPOActor as DataParallelPPOActor
|
| 83 |
+
|
| 84 |
+
# This is used to import external_lib into the huggingface systems
|
| 85 |
+
import_external_libs(self.config.model.get("external_lib", None))
|
| 86 |
+
|
| 87 |
+
override_model_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {})))
|
| 88 |
+
use_remove_padding = self.config.model.get("use_remove_padding", False)
|
| 89 |
+
use_fused_kernels = self.config.model.get("use_fused_kernels", False)
|
| 90 |
+
|
| 91 |
+
if self._is_actor or self._is_rollout or self._is_ref:
|
| 92 |
+
# we need the model for actor and rollout
|
| 93 |
+
if self._is_actor or self._is_ref:
|
| 94 |
+
optim_config = self.config.actor.optim
|
| 95 |
+
fsdp_config = self.config.actor.fsdp_config
|
| 96 |
+
else:
|
| 97 |
+
optim_config = None
|
| 98 |
+
fsdp_config = OmegaConf.create()
|
| 99 |
+
self.actor_module_fsdp, self.actor_optimizer, self.actor_lr_scheduler, self.actor_model_config = (
|
| 100 |
+
self._build_model_optimizer(
|
| 101 |
+
model_path=self.config.model.path,
|
| 102 |
+
fsdp_config=fsdp_config,
|
| 103 |
+
optim_config=optim_config,
|
| 104 |
+
override_model_config=override_model_config,
|
| 105 |
+
use_remove_padding=use_remove_padding,
|
| 106 |
+
use_fused_kernels=use_fused_kernels,
|
| 107 |
+
enable_gradient_checkpointing=self.config.model.get("enable_gradient_checkpointing", False),
|
| 108 |
+
trust_remote_code=self.config.model.get("trust_remote_code", False),
|
| 109 |
+
use_liger=self.config.model.get("use_liger", False),
|
| 110 |
+
role="actor",
|
| 111 |
+
)
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
# get the original unwrapped module
|
| 115 |
+
self.actor_module = self.actor_module_fsdp._fsdp_wrapped_module
|
| 116 |
+
|
| 117 |
+
if self._is_offload_optimizer:
|
| 118 |
+
offload_fsdp_optimizer(optimizer=self.actor_optimizer)
|
| 119 |
+
log_gpu_memory_usage("After offload actor optimizer during init", logger=logger)
|
| 120 |
+
# load from checkpoint
|
| 121 |
+
if self._is_actor or self._is_ref:
|
| 122 |
+
OmegaConf.set_struct(self.config.actor, True)
|
| 123 |
+
with open_dict(self.config.actor):
|
| 124 |
+
self.config.actor.use_remove_padding = use_remove_padding
|
| 125 |
+
self.config.actor.use_fused_kernels = use_fused_kernels
|
| 126 |
+
self.actor = DataParallelPPOActor(
|
| 127 |
+
config=self.config.actor, actor_module=self.actor_module_fsdp, actor_optimizer=self.actor_optimizer
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
if self._is_rollout:
|
| 131 |
+
self.rollout, self.rollout_sharding_manager = self._build_rollout(
|
| 132 |
+
trust_remote_code=self.config.model.get("trust_remote_code", False)
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
if self._is_ref:
|
| 136 |
+
self.ref_module_fsdp = self._build_model_optimizer(
|
| 137 |
+
model_path=self.config.model.path,
|
| 138 |
+
fsdp_config=self.config.ref.fsdp_config,
|
| 139 |
+
optim_config=None,
|
| 140 |
+
override_model_config=override_model_config,
|
| 141 |
+
use_remove_padding=use_remove_padding,
|
| 142 |
+
use_fused_kernels=use_fused_kernels,
|
| 143 |
+
trust_remote_code=self.config.model.get("trust_remote_code", False),
|
| 144 |
+
use_liger=self.config.model.get("use_liger", False),
|
| 145 |
+
role="ref",
|
| 146 |
+
)[0]
|
| 147 |
+
OmegaConf.set_struct(self.config.ref, True)
|
| 148 |
+
with open_dict(self.config.ref):
|
| 149 |
+
self.config.ref.use_remove_padding = use_remove_padding
|
| 150 |
+
self.config.ref.use_fused_kernels = use_fused_kernels
|
| 151 |
+
self.ref_policy = DataParallelPPOActor(config=self.config.ref, actor_module=self.ref_module_fsdp)
|
| 152 |
+
self.checkpoint_manager = FSDPCheckpointManager(
|
| 153 |
+
model=self.actor_module_fsdp,
|
| 154 |
+
optimizer=self.actor.actor_optimizer,
|
| 155 |
+
lr_scheduler=self.actor_lr_scheduler,
|
| 156 |
+
processing_class=self.processor if self.processor is not None else self.tokenizer,
|
| 157 |
+
checkpoint_config=self.config.actor.checkpoint,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
if self._is_actor:
|
| 161 |
+
self.flops_counter = FlopsCounter(self.actor_model_config)
|
| 162 |
+
self.checkpoint_manager = FSDPCheckpointManager(
|
| 163 |
+
model=self.actor_module_fsdp,
|
| 164 |
+
optimizer=self.actor.actor_optimizer,
|
| 165 |
+
lr_scheduler=self.actor_lr_scheduler,
|
| 166 |
+
processing_class=self.processor if self.processor is not None else self.tokenizer,
|
| 167 |
+
checkpoint_config=self.config.actor.checkpoint,
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
|
| 171 |
+
def compute_ref_log_prob(self, data: DataProto):
|
| 172 |
+
assert self._is_ref
|
| 173 |
+
|
| 174 |
+
# Support all hardwares
|
| 175 |
+
data = data.to(get_device_id())
|
| 176 |
+
|
| 177 |
+
micro_batch_size = self.config.ref.log_prob_micro_batch_size_per_gpu
|
| 178 |
+
data.meta_info["micro_batch_size"] = micro_batch_size
|
| 179 |
+
data.meta_info["temperature"] = self.config.rollout.temperature
|
| 180 |
+
data.meta_info["max_token_len"] = self.config.ref.log_prob_max_token_len_per_gpu
|
| 181 |
+
data.meta_info["use_dynamic_bsz"] = self.config.ref.log_prob_use_dynamic_bsz
|
| 182 |
+
with self.ulysses_sharding_manager:
|
| 183 |
+
output = self.ref_policy.compute_log_prob(data=data)
|
| 184 |
+
output = DataProto.from_dict(tensors={"ref_log_prob": output})
|
| 185 |
+
|
| 186 |
+
output = output.to("cpu")
|
| 187 |
+
|
| 188 |
+
# https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes
|
| 189 |
+
# unshard the root FSDP module
|
| 190 |
+
if self.world_size > 1:
|
| 191 |
+
self.ref_policy.actor_module._handle.reshard(True)
|
| 192 |
+
|
| 193 |
+
return output
|
| 194 |
+
|
| 195 |
+
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
|
| 196 |
+
def compute_log_prob(self, data: DataProto):
|
| 197 |
+
assert self._is_actor
|
| 198 |
+
if self._is_offload_param:
|
| 199 |
+
load_fsdp_model_to_gpu(self.actor_module_fsdp)
|
| 200 |
+
|
| 201 |
+
# Support all hardwares
|
| 202 |
+
data = data.to(get_device_id())
|
| 203 |
+
# we should always recompute old_log_probs when it is HybridEngine
|
| 204 |
+
data.meta_info["micro_batch_size"] = self.config.rollout.log_prob_micro_batch_size_per_gpu
|
| 205 |
+
data.meta_info["max_token_len"] = self.config.rollout.log_prob_max_token_len_per_gpu
|
| 206 |
+
data.meta_info["use_dynamic_bsz"] = self.config.rollout.log_prob_use_dynamic_bsz
|
| 207 |
+
data.meta_info["temperature"] = self.config.rollout.temperature
|
| 208 |
+
# perform recompute log_prob
|
| 209 |
+
with self.ulysses_sharding_manager:
|
| 210 |
+
output = self.actor.compute_log_prob(data=data)
|
| 211 |
+
output = DataProto.from_dict(
|
| 212 |
+
tensors={"old_log_probs": output}, meta_info={"temperature": self.config.rollout.temperature}
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
output = output.to("cpu")
|
| 216 |
+
|
| 217 |
+
# https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes
|
| 218 |
+
# unshard the root FSDP module
|
| 219 |
+
if self.world_size > 1:
|
| 220 |
+
self.actor.actor_module._handle.reshard(True)
|
| 221 |
+
|
| 222 |
+
if self._is_offload_param:
|
| 223 |
+
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
|
| 224 |
+
|
| 225 |
+
log_gpu_memory_usage("After compute_log_prob", logger=logger)
|
| 226 |
+
return output
|
| 227 |
+
|
| 228 |
+
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
|
| 229 |
+
def update_actor_dpo(self, data: DataProto):
|
| 230 |
+
"""
|
| 231 |
+
Wrapper for actor update step. Handles FSDP state management.
|
| 232 |
+
Calls self.actor.update_policy which now contains DPO logic based
|
| 233 |
+
on pre-calculated log probabilities.
|
| 234 |
+
"""
|
| 235 |
+
# Support all hardwares
|
| 236 |
+
data = data.to(get_device_id())
|
| 237 |
+
|
| 238 |
+
assert self._is_actor # Make sure this worker has the actor role
|
| 239 |
+
if self.actor is None:
|
| 240 |
+
raise RuntimeError("Actor instance (self.actor) not initialized in worker.")
|
| 241 |
+
|
| 242 |
+
# --- FSDP State Management ---
|
| 243 |
+
if self._is_offload_param:
|
| 244 |
+
load_fsdp_model_to_gpu(self.actor_module_fsdp)
|
| 245 |
+
if self._is_offload_optimizer:
|
| 246 |
+
load_fsdp_optimizer(optimizer=self.actor_optimizer, device_id=get_device_id())
|
| 247 |
+
|
| 248 |
+
log_gpu_memory_usage("Before update policy (DPO via PPO path)", logger=logger)
|
| 249 |
+
|
| 250 |
+
# --- Ulysses Sharding (if used) ---
|
| 251 |
+
with self.ulysses_sharding_manager:
|
| 252 |
+
# --- Call the core update method (now containing DPO logic) ---
|
| 253 |
+
with Timer(name="update_policy_dpo_via_ppo", logger=None) as timer: # Use a distinct timer name
|
| 254 |
+
# Calls the modified update_policy method
|
| 255 |
+
metrics = self.actor.update_policy_dpo_with_ref(data=data) # <-- THIS CALLS THE MODIFIED FUNCTION
|
| 256 |
+
delta_time = timer.last
|
| 257 |
+
|
| 258 |
+
# --- Add Performance Metrics ---
|
| 259 |
+
# MFU calculation might be less accurate/meaningful here for DPO
|
| 260 |
+
metrics["perf/approx_tokens_processed"] = torch.sum(
|
| 261 |
+
data.batch.get("attention_mask", torch.tensor(0))
|
| 262 |
+
).item() # Approx tokens
|
| 263 |
+
metrics["perf/max_memory_allocated_gb"] = get_torch_device().max_memory_allocated() / (1024**3)
|
| 264 |
+
metrics["perf/max_memory_reserved_gb"] = get_torch_device().max_memory_reserved() / (1024**3)
|
| 265 |
+
metrics["perf/cpu_memory_used_gb"] = psutil.virtual_memory().used / (1024**3)
|
| 266 |
+
global_num_tokens = data.meta_info["global_token_num"]
|
| 267 |
+
estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time)
|
| 268 |
+
metrics["perf/mfu/actor"] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size
|
| 269 |
+
|
| 270 |
+
# --- LR Scheduler Step ---
|
| 271 |
+
lr = self.actor_lr_scheduler.get_last_lr()[0]
|
| 272 |
+
metrics["actor/lr"] = lr
|
| 273 |
+
self.actor_lr_scheduler.step()
|
| 274 |
+
|
| 275 |
+
log_gpu_memory_usage("After update policy (DPO via PPO path)", logger=logger)
|
| 276 |
+
|
| 277 |
+
# --- Prepare Output ---
|
| 278 |
+
output = DataProto(meta_info={"metrics": metrics})
|
| 279 |
+
output = output.to("cpu")
|
| 280 |
+
|
| 281 |
+
# --- FSDP State Management (Offload) ---
|
| 282 |
+
if self._is_offload_param:
|
| 283 |
+
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
|
| 284 |
+
if self._is_offload_optimizer:
|
| 285 |
+
offload_fsdp_optimizer(optimizer=self.actor_optimizer)
|
| 286 |
+
|
| 287 |
+
return output
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
# TODO(sgm): we may need to extract it to dp_reward_model.py
|
| 291 |
+
class RewardModelWorker(Worker):
|
| 292 |
+
"""
|
| 293 |
+
Note that we only implement the reward model that is subclass of AutoModelForTokenClassification.
|
| 294 |
+
"""
|
| 295 |
+
|
| 296 |
+
def __init__(self, config):
|
| 297 |
+
super().__init__()
|
| 298 |
+
import torch.distributed
|
| 299 |
+
|
| 300 |
+
if not torch.distributed.is_initialized():
|
| 301 |
+
torch.distributed.init_process_group(backend=get_nccl_backend())
|
| 302 |
+
self.config = config
|
| 303 |
+
|
| 304 |
+
# build device mesh for Ulysses Sequence Parallel
|
| 305 |
+
world_size = torch.distributed.get_world_size()
|
| 306 |
+
from torch.distributed.device_mesh import init_device_mesh
|
| 307 |
+
|
| 308 |
+
fsdp_size = self.config.model.fsdp_config.fsdp_size
|
| 309 |
+
self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size)
|
| 310 |
+
|
| 311 |
+
self.ulysses_device_mesh = None
|
| 312 |
+
self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1)
|
| 313 |
+
dp = world_size // self.ulysses_sequence_parallel_size
|
| 314 |
+
if self.ulysses_sequence_parallel_size > 1:
|
| 315 |
+
self.ulysses_device_mesh = init_device_mesh(
|
| 316 |
+
get_device_name(), mesh_shape=(dp, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"]
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
if self.ulysses_device_mesh is not None:
|
| 320 |
+
is_collect = self.ulysses_device_mesh["sp"].get_local_rank() == 0
|
| 321 |
+
self._register_dispatch_collect_info(
|
| 322 |
+
"reward", dp_rank=self.ulysses_device_mesh["dp"].get_local_rank(), is_collect=is_collect
|
| 323 |
+
)
|
| 324 |
+
else:
|
| 325 |
+
self._register_dispatch_collect_info("reward", dp_rank=self.rank, is_collect=True)
|
| 326 |
+
|
| 327 |
+
self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh)
|
| 328 |
+
|
| 329 |
+
self.use_remove_padding = self.config.model.get("use_remove_padding", False)
|
| 330 |
+
|
| 331 |
+
# normalize config
|
| 332 |
+
if self.config.micro_batch_size is not None:
|
| 333 |
+
self.config.micro_batch_size //= torch.distributed.get_world_size()
|
| 334 |
+
self.config.micro_batch_size_per_gpu = self.config.micro_batch_size
|
| 335 |
+
|
| 336 |
+
def _build_model(self, config):
|
| 337 |
+
# the following line is necessary
|
| 338 |
+
from torch.distributed.fsdp import CPUOffload
|
| 339 |
+
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
| 340 |
+
from transformers import AutoConfig, AutoModelForTokenClassification
|
| 341 |
+
|
| 342 |
+
# download the checkpoint from hdfs
|
| 343 |
+
local_path = copy_to_local(config.model.path)
|
| 344 |
+
|
| 345 |
+
if self.config.model.input_tokenizer is None:
|
| 346 |
+
self._do_switch_chat_template = False
|
| 347 |
+
else:
|
| 348 |
+
self._do_switch_chat_template = True
|
| 349 |
+
input_tokenizer_local_path = copy_to_local(config.model.input_tokenizer)
|
| 350 |
+
self.input_tokenizer = hf_tokenizer(
|
| 351 |
+
input_tokenizer_local_path, trust_remote_code=config.model.get("trust_remote_code", False)
|
| 352 |
+
)
|
| 353 |
+
self.tokenizer = hf_tokenizer(local_path, trust_remote_code=config.model.get("trust_remote_code", False))
|
| 354 |
+
|
| 355 |
+
trust_remote_code = config.model.get("trust_remote_code", False)
|
| 356 |
+
model_config = AutoConfig.from_pretrained(local_path, trust_remote_code=trust_remote_code)
|
| 357 |
+
model_config.num_labels = 1
|
| 358 |
+
|
| 359 |
+
# note that we have to create model in fp32. Otherwise, the optimizer is in bf16, which is incorrect
|
| 360 |
+
init_context = get_init_weight_context_manager(
|
| 361 |
+
use_meta_tensor=not model_config.tie_word_embeddings, mesh=self.device_mesh
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
with init_context(), warnings.catch_warnings():
|
| 365 |
+
warnings.simplefilter("ignore")
|
| 366 |
+
model_config.classifier_dropout = 0.0
|
| 367 |
+
reward_module = AutoModelForTokenClassification.from_pretrained(
|
| 368 |
+
pretrained_model_name_or_path=local_path,
|
| 369 |
+
config=model_config,
|
| 370 |
+
torch_dtype=torch.bfloat16,
|
| 371 |
+
attn_implementation="flash_attention_2",
|
| 372 |
+
trust_remote_code=trust_remote_code,
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
if config.model.get("use_remove_padding", False) or self.ulysses_sequence_parallel_size > 1:
|
| 376 |
+
from verl.models.transformers.monkey_patch import apply_monkey_patch
|
| 377 |
+
|
| 378 |
+
apply_monkey_patch(model=reward_module, ulysses_sp_size=self.ulysses_sequence_parallel_size)
|
| 379 |
+
|
| 380 |
+
reward_module.to(torch.bfloat16)
|
| 381 |
+
|
| 382 |
+
auto_wrap_policy = get_fsdp_wrap_policy(module=reward_module, config=self.config.model.fsdp_config)
|
| 383 |
+
|
| 384 |
+
fsdp_mesh = self.device_mesh
|
| 385 |
+
sharding_strategy = get_sharding_strategy(fsdp_mesh)
|
| 386 |
+
|
| 387 |
+
reward_module = FSDP(
|
| 388 |
+
reward_module,
|
| 389 |
+
param_init_fn=init_fn,
|
| 390 |
+
use_orig_params=False,
|
| 391 |
+
auto_wrap_policy=auto_wrap_policy,
|
| 392 |
+
device_id=get_device_id(),
|
| 393 |
+
sharding_strategy=sharding_strategy, # zero3
|
| 394 |
+
sync_module_states=True,
|
| 395 |
+
cpu_offload=CPUOffload(offload_params=True),
|
| 396 |
+
forward_prefetch=False,
|
| 397 |
+
device_mesh=self.device_mesh,
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
return reward_module
|
| 401 |
+
|
| 402 |
+
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
|
| 403 |
+
def init_model(self):
|
| 404 |
+
# This is used to import external_lib into the huggingface systems
|
| 405 |
+
import_external_libs(self.config.model.get("external_lib", None))
|
| 406 |
+
self.reward_module = self._build_model(config=self.config)
|
| 407 |
+
|
| 408 |
+
def _forward_micro_batch(self, micro_batch):
|
| 409 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, rearrange, unpad_input
|
| 410 |
+
|
| 411 |
+
from verl.utils.ulysses import gather_outputs_and_unpad, ulysses_pad_and_slice_inputs
|
| 412 |
+
|
| 413 |
+
with torch.no_grad(), torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
|
| 414 |
+
input_ids = micro_batch["input_ids"]
|
| 415 |
+
batch_size, seqlen = input_ids.shape
|
| 416 |
+
attention_mask = micro_batch["attention_mask"]
|
| 417 |
+
position_ids = micro_batch["position_ids"]
|
| 418 |
+
|
| 419 |
+
if self.use_remove_padding:
|
| 420 |
+
input_ids_rmpad, indices, *_ = unpad_input(
|
| 421 |
+
input_ids.unsqueeze(-1), attention_mask
|
| 422 |
+
) # input_ids_rmpad (total_nnz, ...)
|
| 423 |
+
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz)
|
| 424 |
+
|
| 425 |
+
# unpad the position_ids to align the rotary
|
| 426 |
+
position_ids_rmpad = index_first_axis(
|
| 427 |
+
rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices
|
| 428 |
+
).transpose(0, 1)
|
| 429 |
+
|
| 430 |
+
# pad and slice the inputs if sp > 1
|
| 431 |
+
if self.ulysses_sequence_parallel_size > 1:
|
| 432 |
+
input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs(
|
| 433 |
+
input_ids_rmpad, position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
# only pass input_ids and position_ids to enable flash_attn_varlen
|
| 437 |
+
output = self.reward_module(
|
| 438 |
+
input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids_rmpad, use_cache=False
|
| 439 |
+
) # prevent model thinks we are generating
|
| 440 |
+
reward_rmpad = output.logits
|
| 441 |
+
reward_rmpad = reward_rmpad.squeeze(0) # (total_nnz)
|
| 442 |
+
|
| 443 |
+
# gather output if sp > 1
|
| 444 |
+
if self.ulysses_sequence_parallel_size > 1:
|
| 445 |
+
reward_rmpad = gather_outputs_and_unpad(
|
| 446 |
+
reward_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
# pad it back
|
| 450 |
+
rm_score = pad_input(reward_rmpad, indices=indices, batch=batch_size, seqlen=seqlen).squeeze(-1)
|
| 451 |
+
else:
|
| 452 |
+
output = self.reward_module(
|
| 453 |
+
input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, use_cache=False
|
| 454 |
+
)
|
| 455 |
+
rm_score = output.logits # (batch_size, seq_len, 1)
|
| 456 |
+
rm_score = rm_score.squeeze(-1)
|
| 457 |
+
|
| 458 |
+
# extract the result of the last valid token
|
| 459 |
+
eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bsz,)
|
| 460 |
+
rm_score = rm_score[torch.arange(batch_size), eos_mask_idx]
|
| 461 |
+
return rm_score
|
| 462 |
+
|
| 463 |
+
def _expand_to_token_level(self, data: DataProto, scores: torch.Tensor):
|
| 464 |
+
batch_size = data.batch.batch_size[0]
|
| 465 |
+
# expand as token_level_reward
|
| 466 |
+
attention_mask = data.batch["attention_mask"]
|
| 467 |
+
position_ids = data.batch["position_ids"]
|
| 468 |
+
response_length = data.batch["responses"].shape[-1]
|
| 469 |
+
eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bsz,)
|
| 470 |
+
token_level_scores = torch.zeros_like(attention_mask, dtype=scores.dtype) # (bsz, seqlen)
|
| 471 |
+
token_level_scores[torch.arange(batch_size), eos_mask_idx] = scores
|
| 472 |
+
|
| 473 |
+
# select the response part
|
| 474 |
+
token_level_scores = token_level_scores[:, -response_length:]
|
| 475 |
+
|
| 476 |
+
return token_level_scores
|
| 477 |
+
|
| 478 |
+
def _switch_chat_template(self, data: DataProto):
|
| 479 |
+
src_max_length = data.batch["attention_mask"].shape[-1]
|
| 480 |
+
|
| 481 |
+
src_tokenizer = self.input_tokenizer
|
| 482 |
+
target_tokenizer = self.tokenizer
|
| 483 |
+
|
| 484 |
+
rm_input_ids = []
|
| 485 |
+
rm_attention_mask = []
|
| 486 |
+
|
| 487 |
+
for i in range(data.batch.batch_size[0]):
|
| 488 |
+
if not isinstance(data.non_tensor_batch["raw_prompt"][i], list | np.ndarray):
|
| 489 |
+
raise TypeError(
|
| 490 |
+
f"raw_prompt must be a list or numpy array, got {type(data.non_tensor_batch['raw_prompt'][i])}"
|
| 491 |
+
)
|
| 492 |
+
|
| 493 |
+
# extract raw prompt
|
| 494 |
+
chat: list = list(data.non_tensor_batch["raw_prompt"][i])
|
| 495 |
+
|
| 496 |
+
# extract response
|
| 497 |
+
response_ids = data.batch["responses"][i]
|
| 498 |
+
response_length = response_ids.shape[-1]
|
| 499 |
+
valid_response_length = data.batch["attention_mask"][i][-response_length:].sum()
|
| 500 |
+
valid_response_ids = response_ids[:valid_response_length]
|
| 501 |
+
|
| 502 |
+
# decode
|
| 503 |
+
response = src_tokenizer.decode(valid_response_ids)
|
| 504 |
+
# remove bos and eos
|
| 505 |
+
response = response.replace(src_tokenizer.eos_token, "")
|
| 506 |
+
|
| 507 |
+
chat.append({"role": "assistant", "content": response})
|
| 508 |
+
|
| 509 |
+
prompt_with_chat_template = target_tokenizer.apply_chat_template(
|
| 510 |
+
chat, add_generation_prompt=False, tokenize=False
|
| 511 |
+
)
|
| 512 |
+
if self.rank == 0 and i == 0:
|
| 513 |
+
# for debugging purpose
|
| 514 |
+
print(f"Switch template. chat: {prompt_with_chat_template}")
|
| 515 |
+
|
| 516 |
+
# the maximum length is actually determined by the reward model itself
|
| 517 |
+
max_length = self.config.get("max_length", src_max_length)
|
| 518 |
+
if max_length is None:
|
| 519 |
+
max_length = src_max_length
|
| 520 |
+
|
| 521 |
+
model_inputs = target_tokenizer(prompt_with_chat_template, return_tensors="pt", add_special_tokens=False)
|
| 522 |
+
input_ids, attention_mask = verl_F.postprocess_data(
|
| 523 |
+
input_ids=model_inputs["input_ids"],
|
| 524 |
+
attention_mask=model_inputs["attention_mask"],
|
| 525 |
+
max_length=max_length,
|
| 526 |
+
pad_token_id=target_tokenizer.pad_token_id,
|
| 527 |
+
left_pad=False, # right padding
|
| 528 |
+
truncation=self.config.get("truncation", "right"),
|
| 529 |
+
) # truncate from the right
|
| 530 |
+
|
| 531 |
+
rm_input_ids.append(input_ids)
|
| 532 |
+
rm_attention_mask.append(attention_mask)
|
| 533 |
+
|
| 534 |
+
rm_input_ids = torch.cat(rm_input_ids, dim=0)
|
| 535 |
+
rm_attention_mask = torch.cat(rm_attention_mask, dim=0)
|
| 536 |
+
|
| 537 |
+
rm_position_ids = compute_position_id_with_mask(rm_attention_mask)
|
| 538 |
+
|
| 539 |
+
rm_inputs = {"input_ids": rm_input_ids, "attention_mask": rm_attention_mask, "position_ids": rm_position_ids}
|
| 540 |
+
|
| 541 |
+
return DataProto.from_dict(rm_inputs)
|
| 542 |
+
|
| 543 |
+
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="reward"))
|
| 544 |
+
def compute_rm_score(self, data: DataProto):
|
| 545 |
+
import itertools
|
| 546 |
+
|
| 547 |
+
from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches
|
| 548 |
+
|
| 549 |
+
# Support all hardwares
|
| 550 |
+
data = data.to(get_device_id())
|
| 551 |
+
if self._do_switch_chat_template:
|
| 552 |
+
rm_data = self._switch_chat_template(data)
|
| 553 |
+
else:
|
| 554 |
+
rm_input_ids = data.batch["input_ids"]
|
| 555 |
+
rm_attention_mask = data.batch["attention_mask"]
|
| 556 |
+
rm_position_ids = data.batch["position_ids"]
|
| 557 |
+
rm_inputs = {
|
| 558 |
+
"input_ids": rm_input_ids,
|
| 559 |
+
"attention_mask": rm_attention_mask,
|
| 560 |
+
"position_ids": rm_position_ids,
|
| 561 |
+
}
|
| 562 |
+
rm_data = DataProto.from_dict(rm_inputs)
|
| 563 |
+
|
| 564 |
+
# Support all hardwares
|
| 565 |
+
rm_data.batch = rm_data.batch.to(get_device_id())
|
| 566 |
+
|
| 567 |
+
# perform forward computation
|
| 568 |
+
with self.ulysses_sharding_manager:
|
| 569 |
+
rm_data = self.ulysses_sharding_manager.preprocess_data(data=rm_data)
|
| 570 |
+
data = self.ulysses_sharding_manager.preprocess_data(data=data)
|
| 571 |
+
|
| 572 |
+
use_dynamic_bsz = self.config.use_dynamic_bsz
|
| 573 |
+
if use_dynamic_bsz:
|
| 574 |
+
max_token_len = self.config.forward_max_token_len_per_gpu * self.ulysses_sequence_parallel_size
|
| 575 |
+
micro_batches, indices = rearrange_micro_batches(batch=rm_data.batch, max_token_len=max_token_len)
|
| 576 |
+
else:
|
| 577 |
+
micro_batches = rm_data.batch.split(self.config.micro_batch_size_per_gpu)
|
| 578 |
+
output = []
|
| 579 |
+
for micro_batch in micro_batches:
|
| 580 |
+
rm_score = self._forward_micro_batch(micro_batch)
|
| 581 |
+
output.append(rm_score)
|
| 582 |
+
scores = torch.cat(output, dim=0) # (batch_size)
|
| 583 |
+
|
| 584 |
+
if use_dynamic_bsz:
|
| 585 |
+
indices = list(itertools.chain.from_iterable(indices))
|
| 586 |
+
assert len(indices) == scores.size(0), f"{len(indices)} vs. {scores.size()}"
|
| 587 |
+
revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long)
|
| 588 |
+
scores = scores[revert_indices]
|
| 589 |
+
|
| 590 |
+
token_level_scores = self._expand_to_token_level(data, scores)
|
| 591 |
+
# Note that this is only the scores, may not be the final rewards used to train RL
|
| 592 |
+
output = DataProto.from_dict(tensors={"rm_scores": token_level_scores})
|
| 593 |
+
output = self.ulysses_sharding_manager.postprocess_data(data=output)
|
| 594 |
+
|
| 595 |
+
# https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes
|
| 596 |
+
# unshard the root FSDP module
|
| 597 |
+
self.reward_module._handle.reshard(True)
|
| 598 |
+
|
| 599 |
+
output = output.to("cpu")
|
| 600 |
+
return output
|
src/verl/recipe/spin/main_spin.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
# Copyright 2023-2024 SGLang Team
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import os
|
| 17 |
+
|
| 18 |
+
import hydra
|
| 19 |
+
import ray
|
| 20 |
+
|
| 21 |
+
from recipe.spin.spin_trainer import RaySPINTrainer
|
| 22 |
+
from recipe.spin.utils import validate_config
|
| 23 |
+
from verl.trainer.ppo.reward import get_custom_reward_fn
|
| 24 |
+
from verl.trainer.ppo.utils import need_reference_policy
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@hydra.main(config_path="config", config_name="spin_trainer", version_base=None)
|
| 28 |
+
def main(config):
|
| 29 |
+
run_ppo(config)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def run_ppo(config) -> None:
|
| 33 |
+
# TODO(linjunrong.ocss884): this ENV is left for resolving SGLang conflict with ray devices
|
| 34 |
+
# isolation, will solve in the future
|
| 35 |
+
os.environ["ENSURE_CUDA_VISIBLE_DEVICES"] = os.environ.get("CUDA_VISIBLE_DEVICES", "")
|
| 36 |
+
if not ray.is_initialized():
|
| 37 |
+
# this is for local ray cluster
|
| 38 |
+
ray.init(
|
| 39 |
+
runtime_env={
|
| 40 |
+
"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "WARN"}
|
| 41 |
+
}
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
runner = TaskRunner.remote()
|
| 45 |
+
ray.get(runner.run.remote(config))
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head
|
| 49 |
+
class TaskRunner:
|
| 50 |
+
def run(self, config):
|
| 51 |
+
# print initial config
|
| 52 |
+
from pprint import pprint
|
| 53 |
+
|
| 54 |
+
from omegaconf import OmegaConf
|
| 55 |
+
|
| 56 |
+
from verl.utils.fs import copy_to_local
|
| 57 |
+
|
| 58 |
+
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
|
| 59 |
+
OmegaConf.resolve(config)
|
| 60 |
+
|
| 61 |
+
# define worker classes
|
| 62 |
+
if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}:
|
| 63 |
+
assert config.critic.strategy in {"fsdp", "fsdp2"}
|
| 64 |
+
# from recipe.spin.fsdp_workers import ActorRolloutRefWorker
|
| 65 |
+
from recipe.spin.fsdp_workers import SPINRolloutRefWorker
|
| 66 |
+
from verl.single_controller.ray import RayWorkerGroup
|
| 67 |
+
|
| 68 |
+
ray_worker_group_cls = RayWorkerGroup
|
| 69 |
+
|
| 70 |
+
elif config.actor_rollout_ref.actor.strategy == "megatron":
|
| 71 |
+
assert config.actor_rollout_ref.actor.strategy == config.critic.strategy
|
| 72 |
+
from verl.single_controller.ray import RayWorkerGroup
|
| 73 |
+
|
| 74 |
+
ray_worker_group_cls = RayWorkerGroup
|
| 75 |
+
|
| 76 |
+
else:
|
| 77 |
+
raise NotImplementedError
|
| 78 |
+
|
| 79 |
+
from recipe.spin.spin_trainer import ResourcePoolManager, Role
|
| 80 |
+
|
| 81 |
+
role_worker_mapping = {
|
| 82 |
+
# Role.ActorRollout: ray.remote(ActorRolloutRefWorker),
|
| 83 |
+
Role.ActorRollout: ray.remote(SPINRolloutRefWorker),
|
| 84 |
+
# Role.Critic: ray.remote(CriticWorker),
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
global_pool_id = "global_pool"
|
| 88 |
+
resource_pool_spec = {
|
| 89 |
+
global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes,
|
| 90 |
+
}
|
| 91 |
+
mapping = {
|
| 92 |
+
Role.ActorRollout: global_pool_id,
|
| 93 |
+
# Role.Critic: global_pool_id,
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
if config.reward_model.enable:
|
| 97 |
+
if config.reward_model.strategy in {"fsdp", "fsdp2"}:
|
| 98 |
+
from recipe.spin.fsdp_workers import RewardModelWorker
|
| 99 |
+
elif config.reward_model.strategy == "megatron":
|
| 100 |
+
from verl.workers.megatron_workers import RewardModelWorker
|
| 101 |
+
else:
|
| 102 |
+
raise NotImplementedError
|
| 103 |
+
role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker)
|
| 104 |
+
mapping[Role.RewardModel] = global_pool_id
|
| 105 |
+
|
| 106 |
+
# use reference model
|
| 107 |
+
# if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss:
|
| 108 |
+
# role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker)
|
| 109 |
+
role_worker_mapping[Role.RefPolicy] = ray.remote(SPINRolloutRefWorker)
|
| 110 |
+
mapping[Role.RefPolicy] = global_pool_id
|
| 111 |
+
|
| 112 |
+
# validate config
|
| 113 |
+
validate_config(
|
| 114 |
+
config=config,
|
| 115 |
+
use_reference_policy=need_reference_policy(self.role_worker_mapping),
|
| 116 |
+
use_critic=False,
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
# download the checkpoint from hdfs
|
| 120 |
+
local_path = copy_to_local(config.actor_rollout_ref.model.path)
|
| 121 |
+
|
| 122 |
+
# instantiate tokenizer
|
| 123 |
+
from verl.utils import hf_processor, hf_tokenizer
|
| 124 |
+
|
| 125 |
+
trust_remote_code = config.data.get("trust_remote_code", False)
|
| 126 |
+
tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code)
|
| 127 |
+
processor = hf_processor(local_path, use_fast=True) # used for multimodal LLM, could be none
|
| 128 |
+
|
| 129 |
+
from verl.workers.reward_manager import get_reward_manager_cls
|
| 130 |
+
|
| 131 |
+
# Note(haibin.lin): please make sure custom reward managers are imported and
|
| 132 |
+
# registered via `verl.workers.reward_manager.register`
|
| 133 |
+
reward_manager_name = config.reward_model.get("reward_manager", "naive")
|
| 134 |
+
reward_manager_cls = get_reward_manager_cls(reward_manager_name)
|
| 135 |
+
|
| 136 |
+
compute_score = get_custom_reward_fn(config)
|
| 137 |
+
reward_kwargs = dict(config.reward_model.get("reward_kwargs", {}))
|
| 138 |
+
reward_fn = reward_manager_cls(
|
| 139 |
+
tokenizer=tokenizer,
|
| 140 |
+
num_examine=0,
|
| 141 |
+
compute_score=compute_score,
|
| 142 |
+
reward_fn_key=config.data.reward_fn_key,
|
| 143 |
+
**reward_kwargs,
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# Note that we always use function-based RM for validation
|
| 147 |
+
val_reward_fn = reward_manager_cls(
|
| 148 |
+
tokenizer=tokenizer, num_examine=1, compute_score=compute_score, reward_fn_key=config.data.reward_fn_key
|
| 149 |
+
)
|
| 150 |
+
resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping)
|
| 151 |
+
|
| 152 |
+
trainer = RaySPINTrainer(
|
| 153 |
+
config=config,
|
| 154 |
+
tokenizer=tokenizer,
|
| 155 |
+
processor=processor,
|
| 156 |
+
role_worker_mapping=role_worker_mapping,
|
| 157 |
+
resource_pool_manager=resource_pool_manager,
|
| 158 |
+
ray_worker_group_cls=ray_worker_group_cls,
|
| 159 |
+
reward_fn=reward_fn,
|
| 160 |
+
val_reward_fn=val_reward_fn,
|
| 161 |
+
)
|
| 162 |
+
trainer.init_workers()
|
| 163 |
+
trainer.fit_dpo()
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
if __name__ == "__main__":
|
| 167 |
+
main()
|
src/verl/recipe/spin/run_spin.sh
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
set -e
|
| 2 |
+
set -x
|
| 3 |
+
VISIBLE_DEVICES="4,5,6,7"
|
| 4 |
+
export HYDRA_FULL_ERROR=1
|
| 5 |
+
|
| 6 |
+
CUDA_VISIBLE_DEVICES=${VISIBLE_DEVICES} python3 -m recipe.spin.main_spin \
|
| 7 |
+
data.train_files=$HOME/data/gsm8k/train.parquet \
|
| 8 |
+
data.val_files=$HOME/data/gsm8k/test.parquet \
|
| 9 |
+
data.train_batch_size=1024 \
|
| 10 |
+
data.max_prompt_length=1024 \
|
| 11 |
+
data.max_response_length=1024 \
|
| 12 |
+
actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \
|
| 13 |
+
actor_rollout_ref.actor.optim.lr=1e-6 \
|
| 14 |
+
actor_rollout_ref.actor.ppo_mini_batch_size=64 \
|
| 15 |
+
actor_rollout_ref.actor.ppo_micro_batch_size=8 \
|
| 16 |
+
actor_rollout_ref.rollout.log_prob_micro_batch_size=64 \
|
| 17 |
+
actor_rollout_ref.rollout.tensor_model_parallel_size=1 \
|
| 18 |
+
actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
|
| 19 |
+
actor_rollout_ref.ref.log_prob_micro_batch_size=64 \
|
| 20 |
+
algorithm.kl_ctrl.kl_coef=0.001 \
|
| 21 |
+
trainer.logger=console \
|
| 22 |
+
trainer.val_before_train=True \
|
| 23 |
+
trainer.n_gpus_per_node=4 \
|
| 24 |
+
trainer.nnodes=1 \
|
| 25 |
+
trainer.save_freq=-1 \
|
| 26 |
+
trainer.test_freq=1 \
|
| 27 |
+
+trainer.log_freq=1 \
|
| 28 |
+
trainer.ref_update_freq=1 \
|
| 29 |
+
trainer.total_epochs=1000 2>&1 | tee verl_demo.log
|
src/verl/recipe/spin/spin_trainer.py
ADDED
|
@@ -0,0 +1,1308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
# Copyright 2023-2024 SGLang Team
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import os
|
| 17 |
+
import traceback
|
| 18 |
+
import uuid
|
| 19 |
+
from collections import defaultdict
|
| 20 |
+
from contextlib import contextmanager
|
| 21 |
+
from dataclasses import dataclass, field
|
| 22 |
+
from pprint import pprint
|
| 23 |
+
from typing import Any, Optional
|
| 24 |
+
|
| 25 |
+
import numpy as np
|
| 26 |
+
import ray
|
| 27 |
+
import torch
|
| 28 |
+
from codetiming import Timer
|
| 29 |
+
from omegaconf import OmegaConf, open_dict
|
| 30 |
+
from torch.utils.data import Dataset, Sampler
|
| 31 |
+
from torchdata.stateful_dataloader import StatefulDataLoader
|
| 32 |
+
from tqdm import tqdm
|
| 33 |
+
|
| 34 |
+
from recipe.spin import core_algos
|
| 35 |
+
from verl import DataProto
|
| 36 |
+
from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto
|
| 37 |
+
from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup
|
| 38 |
+
from verl.single_controller.ray.base import create_colocated_worker_cls
|
| 39 |
+
from verl.trainer.ppo.metric_utils import (
|
| 40 |
+
compute_throughout_metrics,
|
| 41 |
+
compute_timing_metrics,
|
| 42 |
+
process_validation_metrics,
|
| 43 |
+
reduce_metrics,
|
| 44 |
+
)
|
| 45 |
+
from verl.trainer.ppo.utils import Role, WorkerType, need_reference_policy, need_reward_model
|
| 46 |
+
from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path
|
| 47 |
+
from verl.utils.seqlen_balancing import get_seqlen_balanced_partitions, log_seqlen_unbalance
|
| 48 |
+
from verl.utils.torch_functional import masked_mean
|
| 49 |
+
from verl.utils.tracking import ValidationGenerationsLogger
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@dataclass
|
| 53 |
+
class ResourcePoolManager:
|
| 54 |
+
"""
|
| 55 |
+
Define a resource pool specification. Resource pool will be initialized first.
|
| 56 |
+
Mapping
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
resource_pool_spec: dict[str, list[int]]
|
| 60 |
+
mapping: dict[Role, str]
|
| 61 |
+
resource_pool_dict: dict[str, RayResourcePool] = field(default_factory=dict)
|
| 62 |
+
|
| 63 |
+
def create_resource_pool(self):
|
| 64 |
+
for resource_pool_name, process_on_nodes in self.resource_pool_spec.items():
|
| 65 |
+
# max_colocate_count means the number of WorkerGroups (i.e. processes) in each RayResourcePool
|
| 66 |
+
# For FSDP backend, we recommend using max_colocate_count=1 that merge all WorkerGroups into one.
|
| 67 |
+
# For Megatron backend, we recommend using max_colocate_count>1 that can utilize different
|
| 68 |
+
# WorkerGroup for different models
|
| 69 |
+
resource_pool = RayResourcePool(
|
| 70 |
+
process_on_nodes=process_on_nodes, use_gpu=True, max_colocate_count=1, name_prefix=resource_pool_name
|
| 71 |
+
)
|
| 72 |
+
self.resource_pool_dict[resource_pool_name] = resource_pool
|
| 73 |
+
|
| 74 |
+
self._check_resource_available()
|
| 75 |
+
|
| 76 |
+
def get_resource_pool(self, role: Role) -> RayResourcePool:
|
| 77 |
+
"""Get the resource pool of the worker_cls"""
|
| 78 |
+
return self.resource_pool_dict[self.mapping[role]]
|
| 79 |
+
|
| 80 |
+
def get_n_gpus(self) -> int:
|
| 81 |
+
"""Get the number of gpus in this cluster."""
|
| 82 |
+
return sum([n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes])
|
| 83 |
+
|
| 84 |
+
def _check_resource_available(self):
|
| 85 |
+
"""Check if the resource pool can be satisfied in this ray cluster."""
|
| 86 |
+
node_available_resources = ray._private.state.available_resources_per_node()
|
| 87 |
+
node_available_gpus = {node: node_info.get("GPU", 0) for node, node_info in node_available_resources.items()}
|
| 88 |
+
|
| 89 |
+
# check total required gpus can be satisfied
|
| 90 |
+
total_available_gpus = sum(node_available_gpus.values())
|
| 91 |
+
total_required_gpus = sum(
|
| 92 |
+
[n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes]
|
| 93 |
+
)
|
| 94 |
+
if total_available_gpus < total_required_gpus:
|
| 95 |
+
raise ValueError(
|
| 96 |
+
f"Total available GPUs {total_available_gpus} is less than total desired GPUs {total_required_gpus}"
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
# check each resource pool can be satisfied, O(#resource_pools * #nodes)
|
| 100 |
+
for resource_pool_name, process_on_nodes in self.resource_pool_spec.items():
|
| 101 |
+
num_gpus, num_nodes = process_on_nodes[0], len(process_on_nodes)
|
| 102 |
+
for node, available_gpus in node_available_gpus.items():
|
| 103 |
+
if available_gpus >= num_gpus:
|
| 104 |
+
node_available_gpus[node] -= num_gpus
|
| 105 |
+
num_nodes -= 1
|
| 106 |
+
if num_nodes == 0:
|
| 107 |
+
break
|
| 108 |
+
if num_nodes > 0:
|
| 109 |
+
raise ValueError(
|
| 110 |
+
f"Resource pool {resource_pool_name}: {num_gpus}*{num_nodes} cannot be satisfied in this "
|
| 111 |
+
f"ray cluster"
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def _compute_response_info(batch: DataProto) -> dict[str, Any]:
|
| 116 |
+
"""Placeholder: Computes prompt and response lengths."""
|
| 117 |
+
try:
|
| 118 |
+
# Assuming 'prompts' and 'responses' keys exist after generation/union
|
| 119 |
+
prompt_len = batch.batch["prompts"].shape[1]
|
| 120 |
+
resp_len = batch.batch["responses"].shape[1]
|
| 121 |
+
# This is simplified - real implementation might use attention masks
|
| 122 |
+
# to get actual lengths per sample.
|
| 123 |
+
batch_size = batch.batch.batch_size[0]
|
| 124 |
+
prompt_lengths_tensor = torch.full((batch_size,), prompt_len, dtype=torch.float32, device=batch.batch.device)
|
| 125 |
+
response_lengths_tensor = torch.full((batch_size,), resp_len, dtype=torch.float32, device=batch.batch.device)
|
| 126 |
+
|
| 127 |
+
# Try getting actual lengths from attention mask if possible (more accurate)
|
| 128 |
+
if "response_mask" in batch.batch:
|
| 129 |
+
response_lengths_tensor = batch.batch["response_mask"].sum(dim=1).float()
|
| 130 |
+
# if "attention_mask" in batch.batch and "response_mask" in batch.batch:
|
| 131 |
+
# full_mask = batch.batch["attention_mask"]
|
| 132 |
+
# resp_mask = batch.batch["response_mask"]
|
| 133 |
+
# Infer prompt mask length based on where response mask starts or total length
|
| 134 |
+
# This logic depends heavily on how your masks are constructed.
|
| 135 |
+
# Example: prompt_lengths_tensor = full_mask.sum(dim=1).float() - response_lengths_tensor
|
| 136 |
+
# Fallback to using prompt shape if mask logic is complex:
|
| 137 |
+
prompt_lengths_tensor = torch.tensor(
|
| 138 |
+
[batch.batch["prompts"].shape[1]] * batch_size, dtype=torch.float32, device=batch.batch.device
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
return {
|
| 142 |
+
"prompt_length": prompt_lengths_tensor,
|
| 143 |
+
"response_length": response_lengths_tensor,
|
| 144 |
+
"max_response_length": resp_len,
|
| 145 |
+
"max_prompt_length": prompt_len, # Or from config if fixed padding
|
| 146 |
+
}
|
| 147 |
+
except KeyError as e:
|
| 148 |
+
print(f"Warning: Missing key in _compute_response_info: {e}. Returning defaults.")
|
| 149 |
+
# Return default/dummy values if keys are missing
|
| 150 |
+
b_size = batch.batch.batch_size[0] if batch.batch.batch_size else 1
|
| 151 |
+
max_resp = batch.batch.get("responses").shape[1] if batch.batch.get("responses") is not None else 0
|
| 152 |
+
max_prompt = batch.batch.get("prompts").shape[1] if batch.batch.get("prompts") is not None else 0
|
| 153 |
+
return {
|
| 154 |
+
"prompt_length": torch.zeros(b_size),
|
| 155 |
+
"response_length": torch.zeros(b_size),
|
| 156 |
+
"max_response_length": max_resp,
|
| 157 |
+
"max_prompt_length": max_prompt,
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
# --- Modified Metric Function ---
|
| 162 |
+
def compute_dpo_data_metrics(batch: DataProto) -> dict[str, Any]:
|
| 163 |
+
"""
|
| 164 |
+
Computes and returns metrics relevant for the DPO-like process.
|
| 165 |
+
Assumes 'batch' contains results after generation and preference marking,
|
| 166 |
+
potentially including 'dpo_logits', 'preferences', 'chosen_logps', etc.
|
| 167 |
+
Removes PPO-specific advantage/return/critic metrics.
|
| 168 |
+
"""
|
| 169 |
+
print("---- [DEBUG] Computing DPO Data Metrics ----")
|
| 170 |
+
metrics = {}
|
| 171 |
+
try:
|
| 172 |
+
# --- Scores and Rewards (from reward_fn) ---
|
| 173 |
+
if "token_level_scores" in batch.batch and batch.batch["token_level_scores"] is not None:
|
| 174 |
+
sequence_score = batch.batch["token_level_scores"].sum(-1)
|
| 175 |
+
metrics.update(
|
| 176 |
+
{
|
| 177 |
+
"reward/score/mean": torch.mean(sequence_score).item(),
|
| 178 |
+
"reward/score/max": torch.max(sequence_score).item(),
|
| 179 |
+
"reward/score/min": torch.min(sequence_score).item(),
|
| 180 |
+
}
|
| 181 |
+
)
|
| 182 |
+
else:
|
| 183 |
+
print("DEBUG compute_dpo_data_metrics: 'token_level_scores' not found.")
|
| 184 |
+
|
| 185 |
+
if "token_level_rewards" in batch.batch and batch.batch["token_level_rewards"] is not None:
|
| 186 |
+
sequence_reward = batch.batch["token_level_rewards"].sum(-1)
|
| 187 |
+
metrics.update(
|
| 188 |
+
{
|
| 189 |
+
"reward/rewards/mean": torch.mean(sequence_reward).item(),
|
| 190 |
+
"reward/rewards/max": torch.max(sequence_reward).item(),
|
| 191 |
+
"reward/rewards/min": torch.min(sequence_reward).item(),
|
| 192 |
+
}
|
| 193 |
+
)
|
| 194 |
+
else:
|
| 195 |
+
print("DEBUG compute_dpo_data_metrics: 'token_level_rewards' not found.")
|
| 196 |
+
|
| 197 |
+
# --- DPO Specific Metrics (if stored previously) ---
|
| 198 |
+
if "dpo_logits" in batch.batch and batch.batch["dpo_logits"] is not None:
|
| 199 |
+
metrics["actor/dpo_logits"] = batch.batch["dpo_logits"].mean().item()
|
| 200 |
+
else:
|
| 201 |
+
print("DEBUG compute_dpo_data_metrics: 'dpo_logits' not found.")
|
| 202 |
+
|
| 203 |
+
if "chosen_logps" in batch.batch and batch.batch["chosen_logps"] is not None:
|
| 204 |
+
metrics["actor/chosen_logps"] = batch.batch["chosen_logps"].mean().item()
|
| 205 |
+
else:
|
| 206 |
+
print("DEBUG compute_dpo_data_metrics: 'chosen_logps' not found.")
|
| 207 |
+
|
| 208 |
+
if "rejected_logps" in batch.batch and batch.batch["rejected_logps"] is not None:
|
| 209 |
+
metrics["actor/rejected_logps"] = batch.batch["rejected_logps"].mean().item()
|
| 210 |
+
else:
|
| 211 |
+
print("DEBUG compute_dpo_data_metrics: 'rejected_logps' not found.")
|
| 212 |
+
|
| 213 |
+
# Add metrics based on the 'preferences' mask if available
|
| 214 |
+
# if "preferences" in batch.batch and batch.batch["preferences"] is not None:
|
| 215 |
+
# prefs_mask = batch.batch["preferences"] # Shape [batch_size * n]
|
| 216 |
+
# Calculate accuracy based on RM scores (assuming higher score -> True in mask)
|
| 217 |
+
# Requires chosen/rejected scores to be available or recalculated
|
| 218 |
+
# This is complex here, better calculated in the main loop or update function
|
| 219 |
+
|
| 220 |
+
# --- Length Metrics ---
|
| 221 |
+
response_info = _compute_response_info(batch)
|
| 222 |
+
prompt_length = response_info["prompt_length"]
|
| 223 |
+
response_length = response_info["response_length"]
|
| 224 |
+
max_response_length = response_info["max_response_length"]
|
| 225 |
+
max_prompt_length = response_info["max_prompt_length"] # Use calculated or from config
|
| 226 |
+
|
| 227 |
+
metrics.update(
|
| 228 |
+
{
|
| 229 |
+
"response_length/mean": torch.mean(response_length).item(),
|
| 230 |
+
"response_length/max": torch.max(response_length).item(),
|
| 231 |
+
"response_length/min": torch.min(response_length).item(),
|
| 232 |
+
"response_length/clip_ratio": torch.mean(torch.eq(response_length, max_response_length).float()).item(),
|
| 233 |
+
"prompt_length/mean": torch.mean(prompt_length).item(),
|
| 234 |
+
"prompt_length/max": torch.max(prompt_length).item(),
|
| 235 |
+
"prompt_length/min": torch.min(prompt_length).item(),
|
| 236 |
+
# Prompt clip ratio might need adjustment based on how max_prompt_length is defined
|
| 237 |
+
"prompt_length/clip_ratio": torch.mean(torch.eq(prompt_length, max_prompt_length).float()).item(),
|
| 238 |
+
}
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
except KeyError as e:
|
| 242 |
+
print(f"ERROR in compute_dpo_data_metrics: Missing key {e}")
|
| 243 |
+
except Exception as e:
|
| 244 |
+
print(f"ERROR in compute_dpo_data_metrics: {e}")
|
| 245 |
+
traceback.print_exc()
|
| 246 |
+
|
| 247 |
+
print(f"---- [DEBUG] Calculated DPO Data Metrics: {list(metrics.keys())} ----")
|
| 248 |
+
return metrics
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def apply_kl_penalty(data: DataProto, kl_ctrl: core_algos.AdaptiveKLController, kl_penalty="kl"):
|
| 252 |
+
responses = data.batch["responses"]
|
| 253 |
+
response_length = responses.size(1)
|
| 254 |
+
token_level_scores = data.batch["token_level_scores"]
|
| 255 |
+
batch_size = data.batch.batch_size[0]
|
| 256 |
+
attention_mask = data.batch["attention_mask"]
|
| 257 |
+
response_mask = attention_mask[:, -response_length:]
|
| 258 |
+
|
| 259 |
+
# compute kl between ref_policy and current policy
|
| 260 |
+
# When apply_kl_penalty, algorithm.use_kl_in_reward=True, so the reference model has been enabled.
|
| 261 |
+
kld = core_algos.kl_penalty(
|
| 262 |
+
data.batch["old_log_probs"], data.batch["ref_log_prob"], kl_penalty=kl_penalty
|
| 263 |
+
) # (batch_size, response_length)
|
| 264 |
+
kld = kld * response_mask
|
| 265 |
+
beta = kl_ctrl.value
|
| 266 |
+
|
| 267 |
+
token_level_rewards = token_level_scores - beta * kld
|
| 268 |
+
|
| 269 |
+
current_kl = masked_mean(kld, mask=response_mask, axis=-1) # average over sequence
|
| 270 |
+
current_kl = torch.mean(current_kl, dim=0).item()
|
| 271 |
+
|
| 272 |
+
# according to https://github.com/huggingface/trl/blob/951ca1841f29114b969b57b26c7d3e80a39f75a0/trl/trainer/ppo_trainer.py#L837
|
| 273 |
+
kl_ctrl.update(current_kl=current_kl, n_steps=batch_size)
|
| 274 |
+
data.batch["token_level_rewards"] = token_level_rewards
|
| 275 |
+
|
| 276 |
+
metrics = {"actor/reward_kl_penalty": current_kl, "actor/reward_kl_penalty_coeff": beta}
|
| 277 |
+
|
| 278 |
+
return data, metrics
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def compute_response_mask(data: DataProto):
|
| 282 |
+
responses = data.batch["responses"]
|
| 283 |
+
response_length = responses.size(1)
|
| 284 |
+
attention_mask = data.batch["attention_mask"]
|
| 285 |
+
return attention_mask[:, -response_length:]
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def compute_onlineDPO_pref(data: DataProto):
|
| 289 |
+
"""
|
| 290 |
+
Wrapper to compute DPO preference and add it to the DataProto batch.
|
| 291 |
+
Includes debugging prints.
|
| 292 |
+
"""
|
| 293 |
+
# print(f"\n---- [DEBUG] Entering compute_onlineDPO_pref ----")
|
| 294 |
+
# print(f" Input batch keys: {list(data.batch.keys())}")
|
| 295 |
+
|
| 296 |
+
# Check inputs
|
| 297 |
+
rewards_tensor = data.batch.get("token_level_rewards")
|
| 298 |
+
mask_tensor = data.batch.get("response_mask")
|
| 299 |
+
|
| 300 |
+
if rewards_tensor is None or mask_tensor is None:
|
| 301 |
+
print(" ERROR: Missing 'token_level_rewards' or 'response_mask' in input data!")
|
| 302 |
+
# Handle error case - maybe return original data or raise?
|
| 303 |
+
# Returning original data for now to potentially allow skipping
|
| 304 |
+
return data
|
| 305 |
+
|
| 306 |
+
try:
|
| 307 |
+
preferences = core_algos.compute_onlinedpo_pref(token_level_rewards=rewards_tensor, response_mask=mask_tensor)
|
| 308 |
+
# Store the result
|
| 309 |
+
data.batch["preferences"] = preferences
|
| 310 |
+
|
| 311 |
+
except AttributeError:
|
| 312 |
+
print("ERROR: Function 'compute_online_dpo_preference' not found in core_algos.py!")
|
| 313 |
+
# Assign dummy value or raise error
|
| 314 |
+
data.batch["preferences"] = None # Indicate failure
|
| 315 |
+
except Exception as e_pref:
|
| 316 |
+
print(f"ERROR during core_algos.compute_online_dpo_preference: {e_pref}")
|
| 317 |
+
import traceback
|
| 318 |
+
|
| 319 |
+
traceback.print_exc()
|
| 320 |
+
data.batch["preferences"] = None # Indicate failure
|
| 321 |
+
|
| 322 |
+
# print(f"---- [DEBUG] Exiting compute_onlineDPO_pref ----")
|
| 323 |
+
return data
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
@contextmanager
|
| 327 |
+
def _timer(name: str, timing_raw: dict[str, float]):
|
| 328 |
+
with Timer(name=name, logger=None) as timer:
|
| 329 |
+
yield
|
| 330 |
+
timing_raw[name] = timer.last
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
class RaySPINTrainer:
|
| 334 |
+
"""
|
| 335 |
+
Note that this trainer runs on the driver process on a single CPU/GPU node.
|
| 336 |
+
"""
|
| 337 |
+
|
| 338 |
+
# TODO: support each role have individual ray_worker_group_cls,
|
| 339 |
+
# i.e., support different backend of different role
|
| 340 |
+
def __init__(
|
| 341 |
+
self,
|
| 342 |
+
config,
|
| 343 |
+
tokenizer,
|
| 344 |
+
role_worker_mapping: dict[Role, WorkerType],
|
| 345 |
+
resource_pool_manager: ResourcePoolManager,
|
| 346 |
+
ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup,
|
| 347 |
+
processor=None,
|
| 348 |
+
reward_fn=None,
|
| 349 |
+
val_reward_fn=None,
|
| 350 |
+
train_dataset: Optional[Dataset] = None,
|
| 351 |
+
val_dataset: Optional[Dataset] = None,
|
| 352 |
+
collate_fn=None,
|
| 353 |
+
train_sampler: Optional[Sampler] = None,
|
| 354 |
+
device_name=None,
|
| 355 |
+
):
|
| 356 |
+
# assert get_torch_device().is_available(), 'cuda must be available on driver'
|
| 357 |
+
|
| 358 |
+
self.tokenizer = tokenizer
|
| 359 |
+
self.processor = processor
|
| 360 |
+
self.config = config
|
| 361 |
+
self.reward_fn = reward_fn
|
| 362 |
+
self.val_reward_fn = val_reward_fn
|
| 363 |
+
|
| 364 |
+
self.hybrid_engine = config.actor_rollout_ref.hybrid_engine
|
| 365 |
+
assert self.hybrid_engine, "Currently, only support hybrid engine"
|
| 366 |
+
|
| 367 |
+
if self.hybrid_engine:
|
| 368 |
+
assert Role.ActorRollout in role_worker_mapping, f"{role_worker_mapping.keys()=}"
|
| 369 |
+
|
| 370 |
+
self.role_worker_mapping = role_worker_mapping
|
| 371 |
+
self.resource_pool_manager = resource_pool_manager
|
| 372 |
+
self.use_reference_policy = need_reference_policy(role_worker_mapping)
|
| 373 |
+
self.use_rm = need_reward_model(role_worker_mapping)
|
| 374 |
+
self.use_critic = False
|
| 375 |
+
self.ray_worker_group_cls = ray_worker_group_cls
|
| 376 |
+
self.validation_generations_logger = ValidationGenerationsLogger()
|
| 377 |
+
self.async_rollout_mode = False
|
| 378 |
+
self.device_name = device_name if device_name else self.config.trainer.device
|
| 379 |
+
|
| 380 |
+
# define in-reward KL control
|
| 381 |
+
# kl loss control currently not suppoorted
|
| 382 |
+
if config.algorithm.use_kl_in_reward:
|
| 383 |
+
self.kl_ctrl_in_reward = core_algos.get_kl_controller(config.algorithm.kl_ctrl)
|
| 384 |
+
|
| 385 |
+
self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler)
|
| 386 |
+
|
| 387 |
+
def _create_dataloader(self, train_dataset, val_dataset, collate_fn, train_sampler):
|
| 388 |
+
"""
|
| 389 |
+
Creates the train and validation dataloaders.
|
| 390 |
+
"""
|
| 391 |
+
# TODO: we have to make sure the batch size is divisible by the dp size
|
| 392 |
+
from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler
|
| 393 |
+
|
| 394 |
+
if train_dataset is None:
|
| 395 |
+
train_dataset = create_rl_dataset(
|
| 396 |
+
self.config.data.train_files, self.config.data, self.tokenizer, self.processor
|
| 397 |
+
)
|
| 398 |
+
if val_dataset is None:
|
| 399 |
+
val_dataset = create_rl_dataset(
|
| 400 |
+
self.config.data.val_files, self.config.data, self.tokenizer, self.processor
|
| 401 |
+
)
|
| 402 |
+
self.train_dataset, self.val_dataset = train_dataset, val_dataset
|
| 403 |
+
|
| 404 |
+
if train_sampler is None:
|
| 405 |
+
train_sampler = create_rl_sampler(self.config.data, self.train_dataset)
|
| 406 |
+
if collate_fn is None:
|
| 407 |
+
from verl.utils.dataset.rl_dataset import collate_fn as default_collate_fn
|
| 408 |
+
|
| 409 |
+
collate_fn = default_collate_fn
|
| 410 |
+
|
| 411 |
+
self.train_dataloader = StatefulDataLoader(
|
| 412 |
+
dataset=self.train_dataset,
|
| 413 |
+
batch_size=self.config.data.get("gen_batch_size", self.config.data.train_batch_size),
|
| 414 |
+
num_workers=self.config.data.get("dataloader_num_workers", 8),
|
| 415 |
+
drop_last=True,
|
| 416 |
+
collate_fn=collate_fn,
|
| 417 |
+
sampler=train_sampler,
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
val_batch_size = self.config.data.val_batch_size # Prefer config value if set
|
| 421 |
+
if val_batch_size is None:
|
| 422 |
+
val_batch_size = len(self.val_dataset)
|
| 423 |
+
|
| 424 |
+
self.val_dataloader = StatefulDataLoader(
|
| 425 |
+
dataset=self.val_dataset,
|
| 426 |
+
batch_size=val_batch_size,
|
| 427 |
+
num_workers=self.config.data.get("dataloader_num_workers", 8),
|
| 428 |
+
shuffle=False,
|
| 429 |
+
drop_last=False,
|
| 430 |
+
collate_fn=collate_fn,
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
assert len(self.train_dataloader) >= 1, "Train dataloader is empty!"
|
| 434 |
+
assert len(self.val_dataloader) >= 1, "Validation dataloader is empty!"
|
| 435 |
+
|
| 436 |
+
print(
|
| 437 |
+
f"Size of train dataloader: {len(self.train_dataloader)}, "
|
| 438 |
+
f"Size of val dataloader: {len(self.val_dataloader)}"
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs
|
| 442 |
+
|
| 443 |
+
if self.config.trainer.total_training_steps is not None:
|
| 444 |
+
total_training_steps = self.config.trainer.total_training_steps
|
| 445 |
+
|
| 446 |
+
self.total_training_steps = total_training_steps
|
| 447 |
+
print(f"Total training steps: {self.total_training_steps}")
|
| 448 |
+
|
| 449 |
+
try:
|
| 450 |
+
OmegaConf.set_struct(self.config, True)
|
| 451 |
+
with open_dict(self.config):
|
| 452 |
+
if OmegaConf.select(self.config, "actor_rollout_ref.actor.optim"):
|
| 453 |
+
self.config.actor_rollout_ref.actor.optim.total_training_steps = total_training_steps
|
| 454 |
+
if OmegaConf.select(self.config, "critic.optim"):
|
| 455 |
+
self.config.critic.optim.total_training_steps = total_training_steps
|
| 456 |
+
except Exception as e:
|
| 457 |
+
print(f"Warning: Could not set total_training_steps in config. Structure missing? Error: {e}")
|
| 458 |
+
|
| 459 |
+
def _maybe_log_val_generations(self, inputs, outputs, scores):
|
| 460 |
+
"""Log a table of validation samples to the configured logger (wandb or swanlab)"""
|
| 461 |
+
|
| 462 |
+
generations_to_log = self.config.trainer.log_val_generations
|
| 463 |
+
|
| 464 |
+
if generations_to_log == 0:
|
| 465 |
+
return
|
| 466 |
+
|
| 467 |
+
import numpy as np
|
| 468 |
+
|
| 469 |
+
# Create tuples of (input, output, score) and sort by input text
|
| 470 |
+
samples = list(zip(inputs, outputs, scores, strict=True))
|
| 471 |
+
samples.sort(key=lambda x: x[0]) # Sort by input text
|
| 472 |
+
|
| 473 |
+
# Use fixed random seed for deterministic shuffling
|
| 474 |
+
rng = np.random.RandomState(42)
|
| 475 |
+
rng.shuffle(samples)
|
| 476 |
+
|
| 477 |
+
# Take first N samples after shuffling
|
| 478 |
+
samples = samples[:generations_to_log]
|
| 479 |
+
|
| 480 |
+
# Log to each configured logger
|
| 481 |
+
self.validation_generations_logger.log(self.config.trainer.logger, samples, self.global_steps)
|
| 482 |
+
|
| 483 |
+
def _validate(self):
|
| 484 |
+
data_source_lst = []
|
| 485 |
+
reward_extra_infos_dict: dict[str, list] = defaultdict(list)
|
| 486 |
+
|
| 487 |
+
# Lists to collect samples for the table
|
| 488 |
+
sample_inputs = []
|
| 489 |
+
sample_outputs = []
|
| 490 |
+
sample_scores = []
|
| 491 |
+
|
| 492 |
+
for test_data in self.val_dataloader:
|
| 493 |
+
test_batch = DataProto.from_single_dict(test_data)
|
| 494 |
+
|
| 495 |
+
# repeat test batch
|
| 496 |
+
test_batch = test_batch.repeat(
|
| 497 |
+
repeat_times=self.config.actor_rollout_ref.rollout.val_kwargs.n, interleave=True
|
| 498 |
+
)
|
| 499 |
+
|
| 500 |
+
# we only do validation on rule-based rm
|
| 501 |
+
if self.config.reward_model.enable and test_batch[0].non_tensor_batch["reward_model"]["style"] == "model":
|
| 502 |
+
return {}
|
| 503 |
+
|
| 504 |
+
# Store original inputs
|
| 505 |
+
input_ids = test_batch.batch["input_ids"]
|
| 506 |
+
# TODO: Can we keep special tokens except for padding tokens?
|
| 507 |
+
input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids]
|
| 508 |
+
sample_inputs.extend(input_texts)
|
| 509 |
+
|
| 510 |
+
batch_keys_to_pop = ["input_ids", "attention_mask", "position_ids"]
|
| 511 |
+
non_tensor_batch_keys_to_pop = ["raw_prompt_ids"]
|
| 512 |
+
if "multi_modal_inputs" in test_batch.non_tensor_batch:
|
| 513 |
+
non_tensor_batch_keys_to_pop.extend(["multi_modal_data", "multi_modal_inputs"])
|
| 514 |
+
if "raw_prompt" in test_batch.non_tensor_batch:
|
| 515 |
+
non_tensor_batch_keys_to_pop.append("raw_prompt")
|
| 516 |
+
if "tools_kwargs" in test_batch.non_tensor_batch:
|
| 517 |
+
non_tensor_batch_keys_to_pop.append("tools_kwargs")
|
| 518 |
+
test_gen_batch = test_batch.pop(
|
| 519 |
+
batch_keys=batch_keys_to_pop,
|
| 520 |
+
non_tensor_batch_keys=non_tensor_batch_keys_to_pop,
|
| 521 |
+
)
|
| 522 |
+
|
| 523 |
+
test_gen_batch.meta_info = {
|
| 524 |
+
"eos_token_id": self.tokenizer.eos_token_id,
|
| 525 |
+
"pad_token_id": self.tokenizer.pad_token_id,
|
| 526 |
+
"recompute_log_prob": False,
|
| 527 |
+
"do_sample": self.config.actor_rollout_ref.rollout.val_kwargs.do_sample,
|
| 528 |
+
"validate": True,
|
| 529 |
+
}
|
| 530 |
+
print(f"test_gen_batch meta info: {test_gen_batch.meta_info}")
|
| 531 |
+
|
| 532 |
+
# pad to be divisible by dp_size
|
| 533 |
+
test_gen_batch_padded, pad_size = pad_dataproto_to_divisor(test_gen_batch, self.actor_rollout_wg.world_size)
|
| 534 |
+
if not self.async_rollout_mode:
|
| 535 |
+
test_output_gen_batch_padded = self.actor_rollout_wg.generate_sequences(test_gen_batch_padded)
|
| 536 |
+
else:
|
| 537 |
+
test_output_gen_batch_padded = self.async_rollout_manager.generate_sequences(test_gen_batch_padded)
|
| 538 |
+
|
| 539 |
+
# unpad
|
| 540 |
+
test_output_gen_batch = unpad_dataproto(test_output_gen_batch_padded, pad_size=pad_size)
|
| 541 |
+
print("validation generation end")
|
| 542 |
+
|
| 543 |
+
# Store generated outputs
|
| 544 |
+
output_ids = test_output_gen_batch.batch["responses"]
|
| 545 |
+
output_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in output_ids]
|
| 546 |
+
sample_outputs.extend(output_texts)
|
| 547 |
+
|
| 548 |
+
test_batch = test_batch.union(test_output_gen_batch)
|
| 549 |
+
|
| 550 |
+
# evaluate using reward_function
|
| 551 |
+
result = self.val_reward_fn(test_batch, return_dict=True)
|
| 552 |
+
reward_tensor = result["reward_tensor"]
|
| 553 |
+
scores = reward_tensor.sum(-1).cpu().tolist()
|
| 554 |
+
sample_scores.extend(scores)
|
| 555 |
+
|
| 556 |
+
reward_extra_infos_dict["reward"].extend(scores)
|
| 557 |
+
if "reward_extra_info" in result:
|
| 558 |
+
for key, lst in result["reward_extra_info"].items():
|
| 559 |
+
reward_extra_infos_dict[key].extend(lst)
|
| 560 |
+
|
| 561 |
+
data_source_lst.append(test_batch.non_tensor_batch.get("data_source", ["unknown"] * reward_tensor.shape[0]))
|
| 562 |
+
|
| 563 |
+
self._maybe_log_val_generations(inputs=sample_inputs, outputs=sample_outputs, scores=sample_scores)
|
| 564 |
+
|
| 565 |
+
# dump generations
|
| 566 |
+
val_data_dir = self.config.trainer.get("validation_data_dir", None)
|
| 567 |
+
if val_data_dir:
|
| 568 |
+
sample_gts = [
|
| 569 |
+
item.non_tensor_batch.get("reward_model", {}).get("ground_truth", None) for item in test_batch
|
| 570 |
+
]
|
| 571 |
+
self._dump_generations(
|
| 572 |
+
inputs=sample_inputs,
|
| 573 |
+
outputs=sample_outputs,
|
| 574 |
+
gts=sample_gts,
|
| 575 |
+
scores=sample_scores,
|
| 576 |
+
reward_extra_infos_dict=reward_extra_infos_dict,
|
| 577 |
+
dump_path=val_data_dir,
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
for key_info, lst in reward_extra_infos_dict.items():
|
| 581 |
+
assert len(lst) == 0 or len(lst) == len(sample_scores), f"{key_info}: {len(lst)=}, {len(sample_scores)=}"
|
| 582 |
+
|
| 583 |
+
data_sources = np.concatenate(data_source_lst, axis=0)
|
| 584 |
+
print(f"DEBUG: Data sources shape: {data_sources.shape}") # Added Print
|
| 585 |
+
print(f"DEBUG: reward_extra_infos_dict keys before processing: {reward_extra_infos_dict.keys()}") # Added Print
|
| 586 |
+
|
| 587 |
+
data_src2var2metric2val = process_validation_metrics(data_sources, sample_inputs, reward_extra_infos_dict)
|
| 588 |
+
print(
|
| 589 |
+
f"DEBUG: Output of process_validation_metrics (data_src2var2metric2val): {data_src2var2metric2val}"
|
| 590 |
+
) # Added Print
|
| 591 |
+
metric_dict = {}
|
| 592 |
+
for data_source, var2metric2val in data_src2var2metric2val.items():
|
| 593 |
+
core_var = "acc" if "acc" in var2metric2val else "reward"
|
| 594 |
+
for var_name, metric2val in var2metric2val.items():
|
| 595 |
+
n_max = max([int(name.split("@")[-1].split("/")[0]) for name in metric2val.keys()])
|
| 596 |
+
for metric_name, metric_val in metric2val.items():
|
| 597 |
+
if (
|
| 598 |
+
(var_name == core_var)
|
| 599 |
+
and any(metric_name.startswith(pfx) for pfx in ["mean", "maj", "best"])
|
| 600 |
+
and (f"@{n_max}" in metric_name)
|
| 601 |
+
):
|
| 602 |
+
metric_sec = "val-core"
|
| 603 |
+
else:
|
| 604 |
+
metric_sec = "val-aux"
|
| 605 |
+
pfx = f"{metric_sec}/{data_source}/{var_name}/{metric_name}"
|
| 606 |
+
metric_dict[pfx] = metric_val
|
| 607 |
+
|
| 608 |
+
return metric_dict
|
| 609 |
+
|
| 610 |
+
def init_workers(self):
|
| 611 |
+
"""Init resource pool and worker group"""
|
| 612 |
+
self.resource_pool_manager.create_resource_pool()
|
| 613 |
+
|
| 614 |
+
self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()}
|
| 615 |
+
|
| 616 |
+
# create actor and rollout
|
| 617 |
+
if self.hybrid_engine:
|
| 618 |
+
resource_pool = self.resource_pool_manager.get_resource_pool(Role.ActorRollout)
|
| 619 |
+
actor_rollout_cls = RayClassWithInitArgs(
|
| 620 |
+
cls=self.role_worker_mapping[Role.ActorRollout],
|
| 621 |
+
config=self.config.actor_rollout_ref,
|
| 622 |
+
role="actor_rollout",
|
| 623 |
+
)
|
| 624 |
+
self.resource_pool_to_cls[resource_pool]["actor_rollout"] = actor_rollout_cls
|
| 625 |
+
else:
|
| 626 |
+
raise NotImplementedError
|
| 627 |
+
|
| 628 |
+
# create critic
|
| 629 |
+
if self.use_critic:
|
| 630 |
+
resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic)
|
| 631 |
+
critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=self.config.critic)
|
| 632 |
+
self.resource_pool_to_cls[resource_pool]["critic"] = critic_cls
|
| 633 |
+
|
| 634 |
+
# create reference policy if needed
|
| 635 |
+
if self.use_reference_policy:
|
| 636 |
+
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy)
|
| 637 |
+
ref_policy_cls = RayClassWithInitArgs(
|
| 638 |
+
self.role_worker_mapping[Role.RefPolicy], config=self.config.actor_rollout_ref, role="ref"
|
| 639 |
+
)
|
| 640 |
+
self.resource_pool_to_cls[resource_pool]["ref"] = ref_policy_cls
|
| 641 |
+
|
| 642 |
+
# create a reward model if reward_fn is None
|
| 643 |
+
if self.use_rm:
|
| 644 |
+
# we create a RM here
|
| 645 |
+
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel)
|
| 646 |
+
rm_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.RewardModel], config=self.config.reward_model)
|
| 647 |
+
self.resource_pool_to_cls[resource_pool]["rm"] = rm_cls
|
| 648 |
+
|
| 649 |
+
# initialize WorkerGroup
|
| 650 |
+
# NOTE: if you want to use a different resource pool for each role, which can support different
|
| 651 |
+
# parallel size,
|
| 652 |
+
# you should not use `create_colocated_worker_cls`. Instead, directly pass different resource pool to
|
| 653 |
+
# different worker groups.
|
| 654 |
+
# See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information.
|
| 655 |
+
all_wg = {}
|
| 656 |
+
self.wg_dicts = []
|
| 657 |
+
wg_kwargs = {} # Setting up kwargs for RayWorkerGroup
|
| 658 |
+
if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None:
|
| 659 |
+
wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout
|
| 660 |
+
wg_kwargs["device_name"] = self.device_name
|
| 661 |
+
|
| 662 |
+
for resource_pool, class_dict in self.resource_pool_to_cls.items():
|
| 663 |
+
worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
|
| 664 |
+
wg_dict = self.ray_worker_group_cls(
|
| 665 |
+
resource_pool=resource_pool,
|
| 666 |
+
ray_cls_with_init=worker_dict_cls,
|
| 667 |
+
**wg_kwargs,
|
| 668 |
+
)
|
| 669 |
+
spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
|
| 670 |
+
all_wg.update(spawn_wg)
|
| 671 |
+
# keep the referece of WorkerDict to support ray >= 2.31. Ref: https://github.com/ray-project/ray/pull/45699
|
| 672 |
+
self.wg_dicts.append(wg_dict)
|
| 673 |
+
|
| 674 |
+
if self.use_critic:
|
| 675 |
+
self.critic_wg = all_wg["critic"]
|
| 676 |
+
self.critic_wg.init_model()
|
| 677 |
+
|
| 678 |
+
if self.use_reference_policy:
|
| 679 |
+
self.ref_policy_wg = all_wg["ref"]
|
| 680 |
+
self.ref_policy_wg.init_model()
|
| 681 |
+
|
| 682 |
+
if self.use_rm:
|
| 683 |
+
self.rm_wg = all_wg["rm"]
|
| 684 |
+
self.rm_wg.init_model()
|
| 685 |
+
|
| 686 |
+
# we should create rollout at the end so that vllm can have a better estimation of kv cache memory
|
| 687 |
+
self.actor_rollout_wg = all_wg["actor_rollout"]
|
| 688 |
+
self.actor_rollout_wg.init_model()
|
| 689 |
+
|
| 690 |
+
def _save_checkpoint(self):
|
| 691 |
+
# path: given_path + `/global_step_{global_steps}` + `/actor`
|
| 692 |
+
local_global_step_folder = os.path.join(
|
| 693 |
+
self.config.trainer.default_local_dir, f"global_step_{self.global_steps}"
|
| 694 |
+
)
|
| 695 |
+
|
| 696 |
+
print(f"local_global_step_folder: {local_global_step_folder}")
|
| 697 |
+
actor_local_path = os.path.join(local_global_step_folder, "actor")
|
| 698 |
+
|
| 699 |
+
actor_remote_path = (
|
| 700 |
+
None
|
| 701 |
+
if self.config.trainer.default_hdfs_dir is None
|
| 702 |
+
else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "actor")
|
| 703 |
+
)
|
| 704 |
+
|
| 705 |
+
remove_previous_ckpt_in_save = self.config.trainer.get("remove_previous_ckpt_in_save", False)
|
| 706 |
+
if remove_previous_ckpt_in_save:
|
| 707 |
+
print(
|
| 708 |
+
"Warning: remove_previous_ckpt_in_save is deprecated, set max_actor_ckpt_to_keep=1 and "
|
| 709 |
+
"max_critic_ckpt_to_keep=1 instead"
|
| 710 |
+
)
|
| 711 |
+
max_actor_ckpt_to_keep = (
|
| 712 |
+
self.config.trainer.get("max_actor_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1
|
| 713 |
+
)
|
| 714 |
+
max_critic_ckpt_to_keep = (
|
| 715 |
+
self.config.trainer.get("max_critic_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1
|
| 716 |
+
)
|
| 717 |
+
|
| 718 |
+
self.actor_rollout_wg.save_checkpoint(
|
| 719 |
+
actor_local_path, actor_remote_path, self.global_steps, max_ckpt_to_keep=max_actor_ckpt_to_keep
|
| 720 |
+
)
|
| 721 |
+
|
| 722 |
+
if self.use_critic:
|
| 723 |
+
critic_local_path = os.path.join(local_global_step_folder, "critic")
|
| 724 |
+
critic_remote_path = (
|
| 725 |
+
None
|
| 726 |
+
if self.config.trainer.default_hdfs_dir is None
|
| 727 |
+
else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "critic")
|
| 728 |
+
)
|
| 729 |
+
self.critic_wg.save_checkpoint(
|
| 730 |
+
critic_local_path, critic_remote_path, self.global_steps, max_ckpt_to_keep=max_critic_ckpt_to_keep
|
| 731 |
+
)
|
| 732 |
+
|
| 733 |
+
# save dataloader
|
| 734 |
+
dataloader_local_path = os.path.join(local_global_step_folder, "data.pt")
|
| 735 |
+
dataloader_state_dict = self.train_dataloader.state_dict()
|
| 736 |
+
torch.save(dataloader_state_dict, dataloader_local_path)
|
| 737 |
+
|
| 738 |
+
# latest checkpointed iteration tracker (for atomic usage)
|
| 739 |
+
local_latest_checkpointed_iteration = os.path.join(
|
| 740 |
+
self.config.trainer.default_local_dir, "latest_checkpointed_iteration.txt"
|
| 741 |
+
)
|
| 742 |
+
with open(local_latest_checkpointed_iteration, "w") as f:
|
| 743 |
+
f.write(str(self.global_steps))
|
| 744 |
+
|
| 745 |
+
def _load_checkpoint(self):
|
| 746 |
+
if self.config.trainer.resume_mode == "disable":
|
| 747 |
+
return 0
|
| 748 |
+
|
| 749 |
+
# load from hdfs
|
| 750 |
+
if self.config.trainer.default_hdfs_dir is not None:
|
| 751 |
+
raise NotImplementedError("load from hdfs is not implemented yet")
|
| 752 |
+
else:
|
| 753 |
+
checkpoint_folder = self.config.trainer.default_local_dir # TODO: check path
|
| 754 |
+
if not os.path.isabs(checkpoint_folder):
|
| 755 |
+
working_dir = os.getcwd()
|
| 756 |
+
checkpoint_folder = os.path.join(working_dir, checkpoint_folder)
|
| 757 |
+
global_step_folder = find_latest_ckpt_path(checkpoint_folder) # None if no latest
|
| 758 |
+
|
| 759 |
+
# find global_step_folder
|
| 760 |
+
if self.config.trainer.resume_mode == "auto":
|
| 761 |
+
if global_step_folder is None:
|
| 762 |
+
print("Training from scratch")
|
| 763 |
+
return 0
|
| 764 |
+
else:
|
| 765 |
+
if self.config.trainer.resume_mode == "resume_path":
|
| 766 |
+
assert isinstance(self.config.trainer.resume_from_path, str), "resume ckpt must be str type"
|
| 767 |
+
assert "global_step_" in self.config.trainer.resume_from_path, (
|
| 768 |
+
"resume ckpt must specify the global_steps"
|
| 769 |
+
)
|
| 770 |
+
global_step_folder = self.config.trainer.resume_from_path
|
| 771 |
+
if not os.path.isabs(global_step_folder):
|
| 772 |
+
working_dir = os.getcwd()
|
| 773 |
+
global_step_folder = os.path.join(working_dir, global_step_folder)
|
| 774 |
+
print(f"Load from checkpoint folder: {global_step_folder}")
|
| 775 |
+
# set global step
|
| 776 |
+
self.global_steps = int(global_step_folder.split("global_step_")[-1])
|
| 777 |
+
|
| 778 |
+
print(f"Setting global step to {self.global_steps}")
|
| 779 |
+
print(f"Resuming from {global_step_folder}")
|
| 780 |
+
|
| 781 |
+
actor_path = os.path.join(global_step_folder, "actor")
|
| 782 |
+
critic_path = os.path.join(global_step_folder, "critic")
|
| 783 |
+
# load actor
|
| 784 |
+
self.actor_rollout_wg.load_checkpoint(
|
| 785 |
+
actor_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load
|
| 786 |
+
)
|
| 787 |
+
# load critic
|
| 788 |
+
if self.use_critic:
|
| 789 |
+
self.critic_wg.load_checkpoint(
|
| 790 |
+
critic_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load
|
| 791 |
+
)
|
| 792 |
+
|
| 793 |
+
# load dataloader,
|
| 794 |
+
# TODO: from remote not implemented yet
|
| 795 |
+
dataloader_local_path = os.path.join(global_step_folder, "data.pt")
|
| 796 |
+
if os.path.exists(dataloader_local_path):
|
| 797 |
+
dataloader_state_dict = torch.load(dataloader_local_path, weights_only=False)
|
| 798 |
+
self.train_dataloader.load_state_dict(dataloader_state_dict)
|
| 799 |
+
else:
|
| 800 |
+
print(f"Warning: No dataloader state found at {dataloader_local_path}, will start from scratch")
|
| 801 |
+
|
| 802 |
+
def _balance_batch(self, batch: DataProto, metrics, logging_prefix="global_seqlen"):
|
| 803 |
+
"""Reorder the data on single controller such that each dp rank gets similar total tokens"""
|
| 804 |
+
attention_mask = batch.batch["attention_mask"]
|
| 805 |
+
batch_size = attention_mask.shape[0]
|
| 806 |
+
global_seqlen_lst = batch.batch["attention_mask"].view(batch_size, -1).sum(-1).tolist() # (train_batch_size,)
|
| 807 |
+
world_size = self.actor_rollout_wg.world_size
|
| 808 |
+
global_partition_lst = get_seqlen_balanced_partitions(
|
| 809 |
+
global_seqlen_lst, k_partitions=world_size, equal_size=True
|
| 810 |
+
)
|
| 811 |
+
# reorder based on index. The data will be automatically equally partitioned by dispatch function
|
| 812 |
+
global_idx = torch.tensor([j for partition in global_partition_lst for j in partition])
|
| 813 |
+
batch.reorder(global_idx)
|
| 814 |
+
global_balance_stats = log_seqlen_unbalance(
|
| 815 |
+
seqlen_list=global_seqlen_lst, partitions=global_partition_lst, prefix=logging_prefix
|
| 816 |
+
)
|
| 817 |
+
metrics.update(global_balance_stats)
|
| 818 |
+
|
| 819 |
+
def fit_dpo(self): # Renamed for clarity as standard PPO loop
|
| 820 |
+
"""
|
| 821 |
+
The training loop of Online DPO using a periodically updated reference model.
|
| 822 |
+
The driver process calls worker groups for computation.
|
| 823 |
+
Advantage computation is replaced by DPO logic.
|
| 824 |
+
"""
|
| 825 |
+
import traceback # Ensure traceback is imported
|
| 826 |
+
|
| 827 |
+
from omegaconf import OmegaConf
|
| 828 |
+
|
| 829 |
+
from verl.utils.tracking import Tracking
|
| 830 |
+
|
| 831 |
+
# Initialize logger
|
| 832 |
+
logger = None
|
| 833 |
+
try:
|
| 834 |
+
logger = Tracking(
|
| 835 |
+
project_name=self.config.trainer.project_name,
|
| 836 |
+
experiment_name=self.config.trainer.experiment_name,
|
| 837 |
+
default_backend=self.config.trainer.logger,
|
| 838 |
+
config=OmegaConf.to_container(self.config, resolve=True, throw_on_missing=False),
|
| 839 |
+
)
|
| 840 |
+
except Exception as e:
|
| 841 |
+
print(f"Warning: Failed to initialize logger: {e}")
|
| 842 |
+
|
| 843 |
+
self.global_steps = 0
|
| 844 |
+
# Load checkpoint before doing anything
|
| 845 |
+
loaded_step = self._load_checkpoint()
|
| 846 |
+
self.global_steps = loaded_step + 1 if loaded_step is not None and loaded_step > 0 else 1
|
| 847 |
+
print(
|
| 848 |
+
f"Starting Online DPO training from global step {self.global_steps}. "
|
| 849 |
+
f"Total steps: {self.total_training_steps}"
|
| 850 |
+
)
|
| 851 |
+
print(f"Reference model update frequency: {self.config.trainer.get('ref_update_freq', 'Not Set')}")
|
| 852 |
+
|
| 853 |
+
# Check if reference policy is configured correctly for this mode
|
| 854 |
+
if not self.use_reference_policy:
|
| 855 |
+
print(
|
| 856 |
+
"WARNING: 'use_reference_policy' is False. Periodic reference model update requires a "
|
| 857 |
+
"reference policy worker. DPO updates might fail or use incorrect logic."
|
| 858 |
+
)
|
| 859 |
+
# Consider raising an error if strict adherence is required:
|
| 860 |
+
# raise ValueError("Periodic reference model update requires 'use_reference_policy' to be True "
|
| 861 |
+
# "and a configured reference worker.")
|
| 862 |
+
|
| 863 |
+
# Perform validation before training
|
| 864 |
+
if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True):
|
| 865 |
+
print("Running validation before Online DPO training...")
|
| 866 |
+
val_metrics = self._validate()
|
| 867 |
+
pprint(f"Initial validation metrics: {val_metrics}")
|
| 868 |
+
if logger and val_metrics:
|
| 869 |
+
logger.log(data=val_metrics, step=max(0, self.global_steps - 1))
|
| 870 |
+
if self.config.trainer.get("val_only", False):
|
| 871 |
+
print("Validation only mode enabled. Exiting training.")
|
| 872 |
+
if logger and hasattr(logger, "finish"):
|
| 873 |
+
logger.finish()
|
| 874 |
+
return
|
| 875 |
+
|
| 876 |
+
# Add tqdm progress bar
|
| 877 |
+
progress_bar = tqdm(
|
| 878 |
+
total=self.total_training_steps,
|
| 879 |
+
initial=self.global_steps,
|
| 880 |
+
desc="Online DPO Training Progress",
|
| 881 |
+
position=0,
|
| 882 |
+
leave=True,
|
| 883 |
+
)
|
| 884 |
+
|
| 885 |
+
last_val_metrics = None
|
| 886 |
+
should_stop = False
|
| 887 |
+
|
| 888 |
+
for epoch in range(self.config.trainer.total_epochs):
|
| 889 |
+
if should_stop:
|
| 890 |
+
break
|
| 891 |
+
print(f"--- Starting Online DPO Epoch {epoch} ---")
|
| 892 |
+
try:
|
| 893 |
+
train_iterator = iter(self.train_dataloader)
|
| 894 |
+
except TypeError:
|
| 895 |
+
print("Warning: Dataloader is not iterable.")
|
| 896 |
+
train_iterator = self.train_dataloader # Fallback attempt
|
| 897 |
+
|
| 898 |
+
for batch_idx, batch_dict in enumerate(train_iterator):
|
| 899 |
+
if self.global_steps > self.total_training_steps:
|
| 900 |
+
should_stop = True
|
| 901 |
+
break
|
| 902 |
+
|
| 903 |
+
metrics = {}
|
| 904 |
+
timing_raw = {}
|
| 905 |
+
step_timer = Timer(logger=None)
|
| 906 |
+
ref_log_prob_computed = False # Flag to track if ref log probs were computed
|
| 907 |
+
|
| 908 |
+
try: # Outer try-except for the whole step
|
| 909 |
+
step_timer.start()
|
| 910 |
+
with _timer("step", timing_raw):
|
| 911 |
+
batch: DataProto = DataProto.from_single_dict(batch_dict)
|
| 912 |
+
current_batch_size = batch.batch.batch_size[0]
|
| 913 |
+
print(
|
| 914 |
+
f"\n[Step {self.global_steps}, Batch {batch_idx}] Processing batch size: "
|
| 915 |
+
f"{current_batch_size}"
|
| 916 |
+
)
|
| 917 |
+
|
| 918 |
+
# --- Reference Model Update ---
|
| 919 |
+
ref_update_freq = self.config.trainer.get("ref_update_freq", -1)
|
| 920 |
+
if (
|
| 921 |
+
self.use_reference_policy
|
| 922 |
+
and ref_update_freq > 0
|
| 923 |
+
and self.global_steps % ref_update_freq == 0
|
| 924 |
+
):
|
| 925 |
+
print(f"\n[Step {self.global_steps}] Updating Reference Model Weights from Actor...")
|
| 926 |
+
try:
|
| 927 |
+
# --- This requires careful implementation with FSDP ---
|
| 928 |
+
# 1. Save actor state dict (potentially to CPU memory or disk)
|
| 929 |
+
# This needs to be done collectively across actor worker ranks.
|
| 930 |
+
# The checkpoint_manager might be adaptable, or use FSDP APIs directly.
|
| 931 |
+
# Example placeholder using a conceptual save/load mechanism:
|
| 932 |
+
actor_state_path = "/tmp/actor_state_mid" # Temporary path
|
| 933 |
+
self.actor_rollout_wg.save_checkpoint(actor_state_path) # Adapt save logic
|
| 934 |
+
|
| 935 |
+
# 2. Load the state dict onto the reference model worker group
|
| 936 |
+
# This also needs collective loading on the ref worker ranks.
|
| 937 |
+
self.ref_policy_wg.load_checkpoint(actor_state_path, None, True) # Adapt load logic
|
| 938 |
+
|
| 939 |
+
print(f"[Step {self.global_steps}] Reference Model Weights Updated.")
|
| 940 |
+
# Optionally remove the temporary state file
|
| 941 |
+
# os.remove(actor_state_path) # Needs rank-aware removal or shared storage
|
| 942 |
+
|
| 943 |
+
except Exception as sync_e:
|
| 944 |
+
print(f"ERROR during reference model sync at step {self.global_steps}: {sync_e}")
|
| 945 |
+
traceback.print_exc()
|
| 946 |
+
|
| 947 |
+
# Pop keys for generation
|
| 948 |
+
pop_batch_keys = ["input_ids", "attention_mask"]
|
| 949 |
+
if "position_ids" in batch.batch:
|
| 950 |
+
pop_batch_keys.append("position_ids")
|
| 951 |
+
pop_non_tensor_keys = ["raw_prompt_ids"] if "raw_prompt_ids" in batch.non_tensor_batch else []
|
| 952 |
+
if "multi_modal_inputs" in batch.non_tensor_batch.keys():
|
| 953 |
+
pop_non_tensor_keys.extend(["multi_modal_data", "multi_modal_inputs"])
|
| 954 |
+
original_non_tensor_data = batch.non_tensor_batch
|
| 955 |
+
gen_batch = batch.pop(
|
| 956 |
+
batch_keys=pop_batch_keys,
|
| 957 |
+
non_tensor_batch_keys=pop_non_tensor_keys,
|
| 958 |
+
)
|
| 959 |
+
gen_batch = gen_batch.repeat(
|
| 960 |
+
repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True
|
| 961 |
+
)
|
| 962 |
+
# (Add Debug prints for gen_batch if needed)
|
| 963 |
+
|
| 964 |
+
# Generate sequences (chosen/rejected pairs)
|
| 965 |
+
with _timer("gen", timing_raw):
|
| 966 |
+
try:
|
| 967 |
+
gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch)
|
| 968 |
+
# (Add Debug prints for gen_batch_output if needed)
|
| 969 |
+
except Exception as gen_e:
|
| 970 |
+
print(f"\n!!!!!!!! ERROR DURING GENERATION (Step {self.global_steps}) !!!!!!!!")
|
| 971 |
+
print(gen_e)
|
| 972 |
+
traceback.print_exc()
|
| 973 |
+
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
|
| 974 |
+
step_timer.stop()
|
| 975 |
+
continue
|
| 976 |
+
|
| 977 |
+
# Combine original prompts with generated sequences
|
| 978 |
+
batch.non_tensor_batch = original_non_tensor_data # Restore non-tensor data
|
| 979 |
+
batch.non_tensor_batch["uid"] = np.array(
|
| 980 |
+
[str(uuid.uuid4()) for _ in range(current_batch_size)], dtype=object
|
| 981 |
+
)
|
| 982 |
+
batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
|
| 983 |
+
batch = batch.union(gen_batch_output)
|
| 984 |
+
# (Add Debug prints after union if needed)
|
| 985 |
+
|
| 986 |
+
# Compute response mask (needed for ref logprob calc and DPO prep)
|
| 987 |
+
batch.batch["response_mask"] = compute_response_mask(batch)
|
| 988 |
+
|
| 989 |
+
if self.config.trainer.balance_batch:
|
| 990 |
+
self._balance_batch(batch, metrics=metrics)
|
| 991 |
+
|
| 992 |
+
batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist()
|
| 993 |
+
|
| 994 |
+
# --- Compute Log Probs for the CURRENT policy (used for KL if enabled, or ActorAsRef
|
| 995 |
+
# fallback) ---
|
| 996 |
+
# Note: For pure DPO with external ref, this 'old_log_probs' might not be strictly needed
|
| 997 |
+
# unless used for other metrics or a fallback. Keep it for now.
|
| 998 |
+
with _timer("policy_log_prob", timing_raw):
|
| 999 |
+
policy_log_prob_output = self.actor_rollout_wg.compute_log_prob(batch)
|
| 1000 |
+
batch = batch.union(policy_log_prob_output) # Adds 'old_log_probs'
|
| 1001 |
+
# (Debug prints for old_log_probs)
|
| 1002 |
+
|
| 1003 |
+
# --- Compute Log Probs using the EXTERNAL Reference Model ---
|
| 1004 |
+
if self.use_reference_policy:
|
| 1005 |
+
with _timer("ref_log_prob_dpo", timing_raw):
|
| 1006 |
+
# print(f"---- [Step {self.global_steps}] DEBUG DPO: Calling compute_ref_log_prob ----")
|
| 1007 |
+
try:
|
| 1008 |
+
# 'batch' contains interleaved chosen/rejected sequences
|
| 1009 |
+
ref_log_prob_output = self.ref_policy_wg.compute_ref_log_prob(
|
| 1010 |
+
batch
|
| 1011 |
+
) # Returns DataProto with 'ref_log_prob'
|
| 1012 |
+
batch = batch.union(
|
| 1013 |
+
ref_log_prob_output
|
| 1014 |
+
) # Adds 'ref_log_prob' key [batch_size * n, seq_len]
|
| 1015 |
+
ref_log_prob_computed = True # Mark success
|
| 1016 |
+
# print(f"---- [Step {self.global_steps}] DEBUG DPO: ref_log_prob tensor shape: "
|
| 1017 |
+
# f"{batch.batch['ref_log_prob'].shape} ----")
|
| 1018 |
+
except Exception as ref_e:
|
| 1019 |
+
print(f"ERROR computing reference log probs at step {self.global_steps}: {ref_e}")
|
| 1020 |
+
traceback.print_exc()
|
| 1021 |
+
batch.batch["ref_log_prob"] = None # Mark as failed
|
| 1022 |
+
ref_log_prob_computed = False
|
| 1023 |
+
else:
|
| 1024 |
+
print(
|
| 1025 |
+
"Warning: Skipping external reference log prob calculation as use_reference_policy "
|
| 1026 |
+
"is False."
|
| 1027 |
+
)
|
| 1028 |
+
# DPO update will likely fail unless ActorAsRef logic is re-enabled in dp_actor
|
| 1029 |
+
|
| 1030 |
+
# --- Compute Rewards/Scores (used to determine preference) ---
|
| 1031 |
+
with _timer("reward_calc", timing_raw):
|
| 1032 |
+
# (Reward calculation logic using RM or reward_fn as before)
|
| 1033 |
+
# ... Ensure this calculates 'token_level_rewards' or similar ...
|
| 1034 |
+
if self.use_rm:
|
| 1035 |
+
reward_tensor_rm = self.rm_wg.compute_rm_score(batch)
|
| 1036 |
+
batch = batch.union(reward_tensor_rm) # Adds 'rm_scores'
|
| 1037 |
+
|
| 1038 |
+
reward_extra_infos_dict = {}
|
| 1039 |
+
try:
|
| 1040 |
+
if self.reward_fn is None:
|
| 1041 |
+
# print(f"---- [DEBUG Step {self.global_steps}] ERROR: self.reward_fn is None! "
|
| 1042 |
+
# f"Using dummy rewards. ----")
|
| 1043 |
+
# Use rm_scores if available, otherwise zeros
|
| 1044 |
+
reward_tensor = batch.batch.get(
|
| 1045 |
+
"rm_scores", torch.zeros_like(batch.batch["response_mask"], dtype=torch.float32)
|
| 1046 |
+
)
|
| 1047 |
+
else:
|
| 1048 |
+
reward_result = self.reward_fn(batch, return_dict=True)
|
| 1049 |
+
reward_tensor = reward_result["reward_tensor"] # Final combined reward
|
| 1050 |
+
reward_extra_infos_dict = reward_result.get("reward_extra_info", {})
|
| 1051 |
+
|
| 1052 |
+
except Exception:
|
| 1053 |
+
# print(f'---- [DEBUG Step {self.global_steps}] Error in reward_fn call: {e}. '
|
| 1054 |
+
# f'Using dummy rewards. ----')
|
| 1055 |
+
traceback.print_exc()
|
| 1056 |
+
reward_tensor = torch.zeros_like(batch.batch["response_mask"], dtype=torch.float32)
|
| 1057 |
+
reward_extra_infos_dict = {}
|
| 1058 |
+
|
| 1059 |
+
# Use 'token_level_rewards' as the key for preference calculation
|
| 1060 |
+
batch.batch["token_level_rewards"] = reward_tensor
|
| 1061 |
+
if reward_extra_infos_dict:
|
| 1062 |
+
batch.non_tensor_batch.update(
|
| 1063 |
+
{k: np.array(v) for k, v in reward_extra_infos_dict.items()}
|
| 1064 |
+
)
|
| 1065 |
+
|
| 1066 |
+
# --- Determine Preferences ---
|
| 1067 |
+
# Uses 'token_level_rewards' to determine chosen/rejected based on score
|
| 1068 |
+
batch = compute_onlineDPO_pref(batch) # Adds 'preferences' key
|
| 1069 |
+
|
| 1070 |
+
# --- Prepare DPO Batch ---
|
| 1071 |
+
dpo_update_batch_proto = None # Initialize
|
| 1072 |
+
with _timer("prepare_dpo_batch", timing_raw):
|
| 1073 |
+
try:
|
| 1074 |
+
if "preferences" not in batch.batch or batch.batch["preferences"] is None:
|
| 1075 |
+
raise ValueError("'preferences' key missing or None after compute_onlineDPO_pref.")
|
| 1076 |
+
|
| 1077 |
+
# Check if reference log probs were computed successfully (if needed)
|
| 1078 |
+
if self.use_reference_policy and not ref_log_prob_computed:
|
| 1079 |
+
raise ValueError("Reference log probs required but failed to compute.")
|
| 1080 |
+
|
| 1081 |
+
# Check required base keys
|
| 1082 |
+
required_keys = ["input_ids", "attention_mask", "response_mask"]
|
| 1083 |
+
for rk in required_keys:
|
| 1084 |
+
if rk not in batch.batch or batch.batch[rk] is None:
|
| 1085 |
+
raise KeyError(f"Required key '{rk}' missing from batch for DPO prep.")
|
| 1086 |
+
|
| 1087 |
+
preferences_mask = batch.batch["preferences"] # Shape [batch_size * n]
|
| 1088 |
+
not_preferences_mask = ~preferences_mask
|
| 1089 |
+
|
| 1090 |
+
# Gather Chosen/Rejected Base Tensors
|
| 1091 |
+
chosen_input_ids = batch.batch["input_ids"][preferences_mask]
|
| 1092 |
+
chosen_attention_mask = batch.batch["attention_mask"][preferences_mask]
|
| 1093 |
+
rejected_input_ids = batch.batch["input_ids"][not_preferences_mask]
|
| 1094 |
+
rejected_attention_mask = batch.batch["attention_mask"][not_preferences_mask]
|
| 1095 |
+
chosen_position_ids = (
|
| 1096 |
+
batch.batch.get("position_ids")[preferences_mask]
|
| 1097 |
+
if "position_ids" in batch.batch
|
| 1098 |
+
else None
|
| 1099 |
+
)
|
| 1100 |
+
rejected_position_ids = (
|
| 1101 |
+
batch.batch.get("position_ids")[not_preferences_mask]
|
| 1102 |
+
if "position_ids" in batch.batch
|
| 1103 |
+
else None
|
| 1104 |
+
)
|
| 1105 |
+
|
| 1106 |
+
# Create Labels
|
| 1107 |
+
print("WARNING: Creating DPO labels using configured max_prompt_length...")
|
| 1108 |
+
prompt_len = self.config.data.max_prompt_length
|
| 1109 |
+
chosen_labels = chosen_input_ids.clone()
|
| 1110 |
+
chosen_labels[:, :prompt_len] = -100
|
| 1111 |
+
rejected_labels = rejected_input_ids.clone()
|
| 1112 |
+
rejected_labels[:, :prompt_len] = -100
|
| 1113 |
+
|
| 1114 |
+
# Calculate and Gather Reference Log Probs (Sequence Level)
|
| 1115 |
+
if self.use_reference_policy:
|
| 1116 |
+
ref_log_prob_tensor = batch.batch["ref_log_prob"] # Token level [bsz * n, seq_len]
|
| 1117 |
+
response_mask_full = batch.batch[
|
| 1118 |
+
"response_mask"
|
| 1119 |
+
] # Response mask [bsz * n, seq_len]
|
| 1120 |
+
ref_sequence_logps = (ref_log_prob_tensor * response_mask_full).sum(
|
| 1121 |
+
dim=-1
|
| 1122 |
+
) # Sequence level [bsz * n]
|
| 1123 |
+
reference_chosen_logps = ref_sequence_logps[preferences_mask]
|
| 1124 |
+
reference_rejected_logps = ref_sequence_logps[not_preferences_mask]
|
| 1125 |
+
else:
|
| 1126 |
+
# If not using external ref, DPO needs ActorAsRef logic in dp_actor
|
| 1127 |
+
# We won't add the keys here, dp_actor will handle it (or fail if not modified)
|
| 1128 |
+
print(
|
| 1129 |
+
"Info: Not adding explicit reference logps to DPO batch "
|
| 1130 |
+
"(use_reference_policy=False)."
|
| 1131 |
+
)
|
| 1132 |
+
reference_chosen_logps = None # Explicitly None
|
| 1133 |
+
reference_rejected_logps = None
|
| 1134 |
+
|
| 1135 |
+
# Package Tensors
|
| 1136 |
+
dpo_tensors = {
|
| 1137 |
+
"chosen_input_ids": chosen_input_ids,
|
| 1138 |
+
"chosen_attention_mask": chosen_attention_mask,
|
| 1139 |
+
"chosen_labels": chosen_labels,
|
| 1140 |
+
"rejected_input_ids": rejected_input_ids,
|
| 1141 |
+
"rejected_attention_mask": rejected_attention_mask,
|
| 1142 |
+
"rejected_labels": rejected_labels,
|
| 1143 |
+
}
|
| 1144 |
+
# Conditionally add reference logps if computed
|
| 1145 |
+
if reference_chosen_logps is not None:
|
| 1146 |
+
dpo_tensors["reference_chosen_logps"] = reference_chosen_logps
|
| 1147 |
+
if reference_rejected_logps is not None:
|
| 1148 |
+
dpo_tensors["reference_rejected_logps"] = reference_rejected_logps
|
| 1149 |
+
# Add position ids if they exist
|
| 1150 |
+
if chosen_position_ids is not None:
|
| 1151 |
+
dpo_tensors["chosen_position_ids"] = chosen_position_ids
|
| 1152 |
+
if rejected_position_ids is not None:
|
| 1153 |
+
dpo_tensors["rejected_position_ids"] = rejected_position_ids
|
| 1154 |
+
|
| 1155 |
+
# Prepare Meta Info
|
| 1156 |
+
dpo_meta = {
|
| 1157 |
+
"dpo_beta": OmegaConf.select(self.config.algorithm, "dpo_beta", default=0.1),
|
| 1158 |
+
"dpo_loss_type": OmegaConf.select(
|
| 1159 |
+
self.config.algorithm, "dpo_loss_type", default="sigmoid"
|
| 1160 |
+
),
|
| 1161 |
+
"dpo_label_smoothing": OmegaConf.select(
|
| 1162 |
+
self.config.algorithm, "dpo_label_smoothing", default=0.0
|
| 1163 |
+
),
|
| 1164 |
+
"use_reference_policy": self.use_reference_policy,
|
| 1165 |
+
"reference_free": not self.use_reference_policy, # False if using external ref
|
| 1166 |
+
"global_step": self.global_steps,
|
| 1167 |
+
}
|
| 1168 |
+
|
| 1169 |
+
dpo_update_batch_proto = DataProto.from_dict(tensors=dpo_tensors, meta_info=dpo_meta)
|
| 1170 |
+
# print(f"---- [Step {self.global_steps}] DEBUG DPO: Prepared DPO Update Batch ----")
|
| 1171 |
+
# print(f" Keys: {list(dpo_update_batch_proto.batch.keys())}")
|
| 1172 |
+
# print(f" Meta Info: {dpo_meta}")
|
| 1173 |
+
|
| 1174 |
+
except Exception as e_prep:
|
| 1175 |
+
print(f"ERROR preparing DPO batch at step {self.global_steps}: {e_prep}")
|
| 1176 |
+
traceback.print_exc()
|
| 1177 |
+
dpo_update_batch_proto = None # Skip update on error
|
| 1178 |
+
|
| 1179 |
+
# --- Actor Update Step ---
|
| 1180 |
+
actor_output = None
|
| 1181 |
+
if self.config.trainer.critic_warmup <= self.global_steps and dpo_update_batch_proto:
|
| 1182 |
+
with _timer("update_actor", timing_raw):
|
| 1183 |
+
# Pass the batch containing reference log probs (if computed)
|
| 1184 |
+
# The modified update_actor_dpo expects them if reference_free=False
|
| 1185 |
+
actor_output = self.actor_rollout_wg.update_actor_dpo(dpo_update_batch_proto)
|
| 1186 |
+
if actor_output and "metrics" in actor_output.meta_info:
|
| 1187 |
+
metrics.update(reduce_metrics(actor_output.meta_info["metrics"]))
|
| 1188 |
+
elif dpo_update_batch_proto is None:
|
| 1189 |
+
print(
|
| 1190 |
+
f"Skipping actor update at step {self.global_steps} due to DPO batch preparation error."
|
| 1191 |
+
)
|
| 1192 |
+
|
| 1193 |
+
# --- Validation and Saving ---
|
| 1194 |
+
test_freq = OmegaConf.select(self.config.trainer, "test_freq", default=-1)
|
| 1195 |
+
is_last_step = self.global_steps >= self.total_training_steps
|
| 1196 |
+
if (
|
| 1197 |
+
self.val_reward_fn is not None
|
| 1198 |
+
and test_freq > 0
|
| 1199 |
+
and (is_last_step or self.global_steps % test_freq == 0)
|
| 1200 |
+
):
|
| 1201 |
+
print(f"\nRunning DPO validation at step {self.global_steps}...")
|
| 1202 |
+
val_timing_raw = {}
|
| 1203 |
+
with _timer("testing", val_timing_raw):
|
| 1204 |
+
val_metrics: dict = self._validate()
|
| 1205 |
+
if is_last_step:
|
| 1206 |
+
last_val_metrics = val_metrics
|
| 1207 |
+
if val_metrics:
|
| 1208 |
+
metrics["time/validation_run"] = val_timing_raw.get("testing", 0)
|
| 1209 |
+
metrics.update(val_metrics)
|
| 1210 |
+
else:
|
| 1211 |
+
print("Validation skipped or returned no metrics.")
|
| 1212 |
+
|
| 1213 |
+
save_freq = OmegaConf.select(self.config.trainer, "save_freq", default=-1)
|
| 1214 |
+
if save_freq > 0 and (is_last_step or self.global_steps % save_freq == 0):
|
| 1215 |
+
print(f"\nSaving DPO checkpoint at step {self.global_steps}...")
|
| 1216 |
+
with _timer("save_checkpoint", timing_raw):
|
| 1217 |
+
self._save_checkpoint() # Saves actor (and potentially critic if used elsewhere)
|
| 1218 |
+
metrics["time/save_checkpoint"] = timing_raw.get("save_checkpoint", 0)
|
| 1219 |
+
|
| 1220 |
+
# --- End main step timer context ---
|
| 1221 |
+
|
| 1222 |
+
# --- Metrics calculation AFTER the 'step' timer block ---
|
| 1223 |
+
metrics.update(compute_dpo_data_metrics(batch=batch)) # Use DPO-specific metrics
|
| 1224 |
+
metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw))
|
| 1225 |
+
n_gpus = self.resource_pool_manager.get_n_gpus()
|
| 1226 |
+
if "step" in timing_raw:
|
| 1227 |
+
metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus))
|
| 1228 |
+
else:
|
| 1229 |
+
print(
|
| 1230 |
+
f"Warning: 'step' key missing from timing_raw at step {self.global_steps}. "
|
| 1231 |
+
f"Skipping throughput."
|
| 1232 |
+
)
|
| 1233 |
+
|
| 1234 |
+
step_timer.stop()
|
| 1235 |
+
metrics["time/step"] = step_timer.last
|
| 1236 |
+
|
| 1237 |
+
# Log metrics
|
| 1238 |
+
log_freq = OmegaConf.select(self.config.trainer, "log_freq", default=1)
|
| 1239 |
+
if logger and self.global_steps % log_freq == 0:
|
| 1240 |
+
log_payload = metrics.copy()
|
| 1241 |
+
# Add learning rate to log payload
|
| 1242 |
+
if actor_output and "actor/lr" in metrics:
|
| 1243 |
+
log_payload["actor/lr"] = metrics["actor/lr"]
|
| 1244 |
+
|
| 1245 |
+
print(f"[Step {self.global_steps} DPO] Logging Step Payload Keys: {list(log_payload.keys())}")
|
| 1246 |
+
try:
|
| 1247 |
+
logger.log(data=log_payload, step=self.global_steps)
|
| 1248 |
+
except Exception as e:
|
| 1249 |
+
print(f"Logging failed at step {self.global_steps}: {e}")
|
| 1250 |
+
|
| 1251 |
+
# Update progress bar
|
| 1252 |
+
postfix_metrics = {
|
| 1253 |
+
k: f"{v:.3f}" if isinstance(v, float) else v
|
| 1254 |
+
for k, v in metrics.items()
|
| 1255 |
+
if isinstance(v, int | float)
|
| 1256 |
+
}
|
| 1257 |
+
progress_bar.set_postfix(postfix_metrics)
|
| 1258 |
+
|
| 1259 |
+
except Exception as step_e:
|
| 1260 |
+
print(f"\n!!!!!!!! ERROR DURING DPO Step {self.global_steps} !!!!!!!!")
|
| 1261 |
+
print(f"Caught Exception: {step_e}")
|
| 1262 |
+
traceback.print_exc()
|
| 1263 |
+
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
|
| 1264 |
+
step_timer.stop()
|
| 1265 |
+
should_stop = True
|
| 1266 |
+
break
|
| 1267 |
+
|
| 1268 |
+
if is_last_step or should_stop:
|
| 1269 |
+
print(f"Stopping DPO training at step {self.global_steps}.")
|
| 1270 |
+
break
|
| 1271 |
+
|
| 1272 |
+
self.global_steps += 1
|
| 1273 |
+
progress_bar.update(1)
|
| 1274 |
+
|
| 1275 |
+
# End of epoch handling
|
| 1276 |
+
if hasattr(self.train_dataloader, "reset"):
|
| 1277 |
+
try:
|
| 1278 |
+
self.train_dataloader.reset()
|
| 1279 |
+
except Exception as e:
|
| 1280 |
+
print(f"Warning: Failed to reset train dataloader state: {e}")
|
| 1281 |
+
if should_stop:
|
| 1282 |
+
break
|
| 1283 |
+
|
| 1284 |
+
# --- Final cleanup and logging ---
|
| 1285 |
+
progress_bar.close()
|
| 1286 |
+
final_step = max(0, self.global_steps - 1)
|
| 1287 |
+
print(f"Online DPO Training finished at step {final_step}.")
|
| 1288 |
+
# Save final checkpoint
|
| 1289 |
+
save_freq = OmegaConf.select(self.config.trainer, "save_freq", default=-1)
|
| 1290 |
+
if not self.config.trainer.get("val_only", False) and (save_freq <= 0 or final_step % save_freq != 0):
|
| 1291 |
+
print(f"Saving final DPO checkpoint at step {final_step}...")
|
| 1292 |
+
self._save_checkpoint()
|
| 1293 |
+
|
| 1294 |
+
# Final validation run
|
| 1295 |
+
if self.val_reward_fn and last_val_metrics is None and not self.config.trainer.get("val_only", False):
|
| 1296 |
+
print("Running final validation...")
|
| 1297 |
+
last_val_metrics = self._validate()
|
| 1298 |
+
if last_val_metrics and logger:
|
| 1299 |
+
last_val_metrics["final_validation"] = True
|
| 1300 |
+
try:
|
| 1301 |
+
logger.log(data=last_val_metrics, step=final_step)
|
| 1302 |
+
except Exception as e:
|
| 1303 |
+
print(f"[Final Val Metrics Log Error]: {e}")
|
| 1304 |
+
|
| 1305 |
+
pprint(f"Final validation metrics: {last_val_metrics}")
|
| 1306 |
+
if logger and hasattr(logger, "finish"):
|
| 1307 |
+
logger.finish()
|
| 1308 |
+
print("Online DPO Training Run Complete.")
|