{ "pdf_info": [ { "preproc_blocks": [ { "type": "title", "bbox": [ 186, 79, 424, 121 ], "lines": [ { "bbox": [ 184, 80, 426, 99 ], "spans": [ { "bbox": [ 184, 80, 426, 99 ], "score": 1.0, "content": "DayDreamer: World Models for", "type": "text" } ], "index": 0 }, { "bbox": [ 211, 100, 400, 123 ], "spans": [ { "bbox": [ 211, 100, 400, 123 ], "score": 1.0, "content": "Physical Robot Learning", "type": "text" } ], "index": 1 } ], "index": 0.5 }, { "type": "title", "bbox": [ 173, 140, 228, 152 ], "lines": [ { "bbox": [ 172, 139, 230, 154 ], "spans": [ { "bbox": [ 172, 139, 230, 154 ], "score": 1.0, "content": "Philipp Wu*", "type": "text" } ], "index": 2 } ], "index": 2 }, { "type": "text", "bbox": [ 249, 140, 440, 151 ], "lines": [ { "bbox": [ 248, 139, 441, 154 ], "spans": [ { "bbox": [ 248, 139, 348, 154 ], "score": 1.0, "content": "Alejandro Escontrela*", "type": "text" }, { "bbox": [ 365, 139, 441, 153 ], "score": 1.0, "content": "Danijar Hafner*", "type": "text" } ], "index": 3 } ], "index": 3 }, { "type": "text", "bbox": [ 235, 161, 376, 173 ], "lines": [ { "bbox": [ 233, 159, 378, 176 ], "spans": [ { "bbox": [ 233, 159, 299, 176 ], "score": 1.0, "content": "Ken Goldberg", "type": "text" }, { "bbox": [ 316, 160, 378, 174 ], "score": 1.0, "content": "Pieter Abbeel", "type": "text" } ], "index": 4 } ], "index": 4 }, { "type": "text", "bbox": [ 238, 182, 373, 193 ], "lines": [ { "bbox": [ 236, 180, 375, 196 ], "spans": [ { "bbox": [ 236, 180, 375, 196 ], "score": 1.0, "content": "University of California, Berkeley", "type": "text" } ], "index": 5 } ], "index": 5 }, { "type": "text", "bbox": [ 266, 198, 345, 209 ], "lines": [ { "bbox": [ 264, 196, 347, 211 ], "spans": [ { "bbox": [ 264, 196, 347, 211 ], "score": 1.0, "content": "*Equal contribution", "type": "text" } ], "index": 6 } ], "index": 6 }, { "type": "text", "bbox": [ 142, 232, 469, 522 ], "lines": [ { "bbox": [ 142, 231, 469, 243 ], "spans": [ { "bbox": [ 142, 231, 469, 243 ], "score": 1.0, "content": "Abstract: To solve tasks in complex environments, robots need to learn from", "type": "text" } ], "index": 7 }, { "bbox": [ 141, 243, 469, 256 ], "spans": [ { "bbox": [ 141, 243, 469, 256 ], "score": 1.0, "content": "experience. Deep reinforcement learning is a common approach to robot learning", "type": "text" } ], "index": 8 }, { "bbox": [ 142, 254, 469, 266 ], "spans": [ { "bbox": [ 142, 254, 469, 266 ], "score": 1.0, "content": "but requires a large amount of trial and error to learn, limiting its deployment in", "type": "text" } ], "index": 9 }, { "bbox": [ 141, 265, 469, 279 ], "spans": [ { "bbox": [ 141, 265, 469, 279 ], "score": 1.0, "content": "the physical world. As a consequence, many advances in robot learning rely on", "type": "text" } ], "index": 10 }, { "bbox": [ 141, 277, 469, 290 ], "spans": [ { "bbox": [ 141, 277, 469, 290 ], "score": 1.0, "content": "simulators. On the other hand, learning inside of simulators fails to capture the", "type": "text" } ], "index": 11 }, { "bbox": [ 141, 289, 469, 302 ], "spans": [ { "bbox": [ 141, 289, 469, 302 ], "score": 1.0, "content": "complexity of the real world, is prone to simulator inaccuracies, and the resulting", "type": "text" } ], "index": 12 }, { "bbox": [ 141, 301, 469, 313 ], "spans": [ { "bbox": [ 141, 301, 469, 313 ], "score": 1.0, "content": "behaviors do not adapt to changes in the world. The Dreamer algorithm has recently", "type": "text" } ], "index": 13 }, { "bbox": [ 141, 312, 469, 326 ], "spans": [ { "bbox": [ 141, 312, 469, 326 ], "score": 1.0, "content": "shown great promise for learning from small amounts of interaction by planning", "type": "text" } ], "index": 14 }, { "bbox": [ 141, 324, 469, 336 ], "spans": [ { "bbox": [ 141, 324, 469, 336 ], "score": 1.0, "content": "within a learned world model, outperforming pure reinforcement learning in video", "type": "text" } ], "index": 15 }, { "bbox": [ 142, 336, 469, 348 ], "spans": [ { "bbox": [ 142, 336, 469, 348 ], "score": 1.0, "content": "games. Learning a world model to predict the outcomes of potential actions enables", "type": "text" } ], "index": 16 }, { "bbox": [ 141, 348, 469, 359 ], "spans": [ { "bbox": [ 141, 348, 469, 359 ], "score": 1.0, "content": "planning in imagination, reducing the amount of trial and error needed in the real", "type": "text" } ], "index": 17 }, { "bbox": [ 141, 359, 469, 372 ], "spans": [ { "bbox": [ 141, 359, 469, 372 ], "score": 1.0, "content": "environment. However, it is unknown whether Dreamer can facilitate faster learning", "type": "text" } ], "index": 18 }, { "bbox": [ 141, 372, 469, 382 ], "spans": [ { "bbox": [ 141, 372, 469, 382 ], "score": 1.0, "content": "on physical robots. In this paper, we apply Dreamer to 4 robots to learn online", "type": "text" } ], "index": 19 }, { "bbox": [ 141, 383, 470, 395 ], "spans": [ { "bbox": [ 141, 383, 470, 395 ], "score": 1.0, "content": "and directly in the real world, without any simulators. Dreamer trains a quadruped", "type": "text" } ], "index": 20 }, { "bbox": [ 141, 394, 469, 407 ], "spans": [ { "bbox": [ 141, 394, 469, 407 ], "score": 1.0, "content": "robot to roll off its back, stand up, and walk from scratch and without resets in only", "type": "text" } ], "index": 21 }, { "bbox": [ 141, 406, 469, 418 ], "spans": [ { "bbox": [ 141, 406, 469, 418 ], "score": 1.0, "content": "1 hour. We then push the robot and find that Dreamer adapts within 10 minutes to", "type": "text" } ], "index": 22 }, { "bbox": [ 142, 419, 469, 429 ], "spans": [ { "bbox": [ 142, 419, 469, 429 ], "score": 1.0, "content": "withstand perturbations or quickly roll over and stand back up. On two different", "type": "text" } ], "index": 23 }, { "bbox": [ 141, 429, 469, 441 ], "spans": [ { "bbox": [ 141, 429, 469, 441 ], "score": 1.0, "content": "robotic arms, Dreamer learns to pick and place objects from camera images and", "type": "text" } ], "index": 24 }, { "bbox": [ 141, 442, 470, 453 ], "spans": [ { "bbox": [ 141, 442, 470, 453 ], "score": 1.0, "content": "sparse rewards, approaching human-level teleoperation performance. On a wheeled", "type": "text" } ], "index": 25 }, { "bbox": [ 141, 451, 470, 466 ], "spans": [ { "bbox": [ 141, 451, 470, 466 ], "score": 1.0, "content": "robot, Dreamer learns to navigate to a goal position purely from camera images,", "type": "text" } ], "index": 26 }, { "bbox": [ 141, 464, 470, 477 ], "spans": [ { "bbox": [ 141, 464, 470, 477 ], "score": 1.0, "content": "automatically resolving ambiguity about the robot orientation. Using the same", "type": "text" } ], "index": 27 }, { "bbox": [ 141, 477, 469, 488 ], "spans": [ { "bbox": [ 141, 477, 469, 488 ], "score": 1.0, "content": "hyperparameters across all experiments, we find that Dreamer is capable of online", "type": "text" } ], "index": 28 }, { "bbox": [ 141, 487, 469, 500 ], "spans": [ { "bbox": [ 141, 487, 469, 500 ], "score": 1.0, "content": "learning in the real world, which establishes a strong baseline. We release our", "type": "text" } ], "index": 29 }, { "bbox": [ 141, 498, 469, 513 ], "spans": [ { "bbox": [ 141, 498, 469, 513 ], "score": 1.0, "content": "infrastructure for future applications of world models to robot learning. Videos are", "type": "text" } ], "index": 30 }, { "bbox": [ 141, 510, 433, 524 ], "spans": [ { "bbox": [ 141, 510, 433, 524 ], "score": 1.0, "content": "available on the project website: https://danijar.com/daydreamer", "type": "text" } ], "index": 31 } ], "index": 19 }, { "type": "image", "bbox": [ 107, 542, 504, 647 ], "blocks": [ { "type": "image_body", "bbox": [ 107, 542, 504, 647 ], "group_id": 0, "lines": [ { "bbox": [ 107, 542, 504, 647 ], "spans": [ { "bbox": [ 107, 542, 504, 647 ], "score": 0.973, "type": "image", "image_path": "17f2d11eee9937e70f62a1993623ebccd221887d067e71919c350fa57662f4d3.jpg" } ] } ], "index": 33, "virtual_lines": [ { "bbox": [ 107, 542, 504, 577.0 ], "spans": [], "index": 32 }, { "bbox": [ 107, 577.0, 504, 612.0 ], "spans": [], "index": 33 }, { "bbox": [ 107, 612.0, 504, 647.0 ], "spans": [], "index": 34 } ] }, { "type": "image_caption", "bbox": [ 106, 653, 505, 721 ], "group_id": 0, "lines": [ { "bbox": [ 105, 653, 505, 666 ], "spans": [ { "bbox": [ 105, 653, 505, 666 ], "score": 1.0, "content": "Figure 1: To study the applicability of Dreamer for sample-efficient robot learning, we apply the", "type": "text" } ], "index": 35 }, { "bbox": [ 106, 664, 505, 676 ], "spans": [ { "bbox": [ 106, 664, 505, 676 ], "score": 1.0, "content": "algorithm to learn robot locomotion, manipulation, and navigation tasks from scratch in the real", "type": "text" } ], "index": 36 }, { "bbox": [ 104, 674, 505, 689 ], "spans": [ { "bbox": [ 104, 674, 505, 689 ], "score": 1.0, "content": "world on 4 robots, without simulators. The tasks evaluate a diverse range of challenges, including", "type": "text" } ], "index": 37 }, { "bbox": [ 106, 687, 506, 700 ], "spans": [ { "bbox": [ 106, 687, 506, 700 ], "score": 1.0, "content": "continuous and discrete actions, dense and sparse rewards, proprioceptive and camera inputs, as well", "type": "text" } ], "index": 38 }, { "bbox": [ 105, 698, 506, 711 ], "spans": [ { "bbox": [ 105, 698, 506, 711 ], "score": 1.0, "content": "as sensor fusion of multiple input modalities. Learning successfully using the same hyperparameters", "type": "text" } ], "index": 39 }, { "bbox": [ 105, 709, 470, 722 ], "spans": [ { "bbox": [ 105, 709, 470, 722 ], "score": 1.0, "content": "across all experiments, Dreamer establishes a strong baseline for real world robot learning.", "type": "text" } ], "index": 40 } ], "index": 37.5 } ], "index": 35.25 } ], "page_idx": 0, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 106, 732, 377, 743 ], "lines": [ { "bbox": [ 106, 732, 378, 744 ], "spans": [ { "bbox": [ 106, 732, 378, 744 ], "score": 1.0, "content": "6th Conference on Robot Learning (CoRL 2022), Auckland, New Zealand.", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "title", "bbox": [ 186, 79, 424, 121 ], "lines": [ { "bbox": [ 184, 80, 426, 99 ], "spans": [ { "bbox": [ 184, 80, 426, 99 ], "score": 1.0, "content": "DayDreamer: World Models for", "type": "text" } ], "index": 0 }, { "bbox": [ 211, 100, 400, 123 ], "spans": [ { "bbox": [ 211, 100, 400, 123 ], "score": 1.0, "content": "Physical Robot Learning", "type": "text" } ], "index": 1 } ], "index": 0.5 }, { "type": "title", "bbox": [ 173, 140, 228, 152 ], "lines": [ { "bbox": [ 172, 139, 230, 154 ], "spans": [ { "bbox": [ 172, 139, 230, 154 ], "score": 1.0, "content": "Philipp Wu*", "type": "text" } ], "index": 2 } ], "index": 2 }, { "type": "text", "bbox": [ 249, 140, 440, 151 ], "lines": [ { "bbox": [ 248, 139, 441, 154 ], "spans": [ { "bbox": [ 248, 139, 348, 154 ], "score": 1.0, "content": "Alejandro Escontrela*", "type": "text" }, { "bbox": [ 365, 139, 441, 153 ], "score": 1.0, "content": "Danijar Hafner*", "type": "text" } ], "index": 3 } ], "index": 3, "bbox_fs": [ 248, 139, 441, 154 ] }, { "type": "text", "bbox": [ 235, 161, 376, 173 ], "lines": [ { "bbox": [ 233, 159, 378, 176 ], "spans": [ { "bbox": [ 233, 159, 299, 176 ], "score": 1.0, "content": "Ken Goldberg", "type": "text" }, { "bbox": [ 316, 160, 378, 174 ], "score": 1.0, "content": "Pieter Abbeel", "type": "text" } ], "index": 4 } ], "index": 4, "bbox_fs": [ 233, 159, 378, 176 ] }, { "type": "text", "bbox": [ 238, 182, 373, 193 ], "lines": [ { "bbox": [ 236, 180, 375, 196 ], "spans": [ { "bbox": [ 236, 180, 375, 196 ], "score": 1.0, "content": "University of California, Berkeley", "type": "text" } ], "index": 5 }, { "bbox": [ 264, 196, 347, 211 ], "spans": [ { "bbox": [ 264, 196, 347, 211 ], "score": 1.0, "content": "*Equal contribution", "type": "text" } ], "index": 6 } ], "index": 5, "bbox_fs": [ 236, 180, 375, 196 ] }, { "type": "text", "bbox": [ 266, 198, 345, 209 ], "lines": [], "index": 6, "bbox_fs": [ 264, 196, 347, 211 ], "lines_deleted": true }, { "type": "text", "bbox": [ 142, 232, 469, 522 ], "lines": [ { "bbox": [ 142, 231, 469, 243 ], "spans": [ { "bbox": [ 142, 231, 469, 243 ], "score": 1.0, "content": "Abstract: To solve tasks in complex environments, robots need to learn from", "type": "text" } ], "index": 7 }, { "bbox": [ 141, 243, 469, 256 ], "spans": [ { "bbox": [ 141, 243, 469, 256 ], "score": 1.0, "content": "experience. Deep reinforcement learning is a common approach to robot learning", "type": "text" } ], "index": 8 }, { "bbox": [ 142, 254, 469, 266 ], "spans": [ { "bbox": [ 142, 254, 469, 266 ], "score": 1.0, "content": "but requires a large amount of trial and error to learn, limiting its deployment in", "type": "text" } ], "index": 9 }, { "bbox": [ 141, 265, 469, 279 ], "spans": [ { "bbox": [ 141, 265, 469, 279 ], "score": 1.0, "content": "the physical world. As a consequence, many advances in robot learning rely on", "type": "text" } ], "index": 10 }, { "bbox": [ 141, 277, 469, 290 ], "spans": [ { "bbox": [ 141, 277, 469, 290 ], "score": 1.0, "content": "simulators. On the other hand, learning inside of simulators fails to capture the", "type": "text" } ], "index": 11 }, { "bbox": [ 141, 289, 469, 302 ], "spans": [ { "bbox": [ 141, 289, 469, 302 ], "score": 1.0, "content": "complexity of the real world, is prone to simulator inaccuracies, and the resulting", "type": "text" } ], "index": 12 }, { "bbox": [ 141, 301, 469, 313 ], "spans": [ { "bbox": [ 141, 301, 469, 313 ], "score": 1.0, "content": "behaviors do not adapt to changes in the world. The Dreamer algorithm has recently", "type": "text" } ], "index": 13 }, { "bbox": [ 141, 312, 469, 326 ], "spans": [ { "bbox": [ 141, 312, 469, 326 ], "score": 1.0, "content": "shown great promise for learning from small amounts of interaction by planning", "type": "text" } ], "index": 14 }, { "bbox": [ 141, 324, 469, 336 ], "spans": [ { "bbox": [ 141, 324, 469, 336 ], "score": 1.0, "content": "within a learned world model, outperforming pure reinforcement learning in video", "type": "text" } ], "index": 15 }, { "bbox": [ 142, 336, 469, 348 ], "spans": [ { "bbox": [ 142, 336, 469, 348 ], "score": 1.0, "content": "games. Learning a world model to predict the outcomes of potential actions enables", "type": "text" } ], "index": 16 }, { "bbox": [ 141, 348, 469, 359 ], "spans": [ { "bbox": [ 141, 348, 469, 359 ], "score": 1.0, "content": "planning in imagination, reducing the amount of trial and error needed in the real", "type": "text" } ], "index": 17 }, { "bbox": [ 141, 359, 469, 372 ], "spans": [ { "bbox": [ 141, 359, 469, 372 ], "score": 1.0, "content": "environment. However, it is unknown whether Dreamer can facilitate faster learning", "type": "text" } ], "index": 18 }, { "bbox": [ 141, 372, 469, 382 ], "spans": [ { "bbox": [ 141, 372, 469, 382 ], "score": 1.0, "content": "on physical robots. In this paper, we apply Dreamer to 4 robots to learn online", "type": "text" } ], "index": 19 }, { "bbox": [ 141, 383, 470, 395 ], "spans": [ { "bbox": [ 141, 383, 470, 395 ], "score": 1.0, "content": "and directly in the real world, without any simulators. Dreamer trains a quadruped", "type": "text" } ], "index": 20 }, { "bbox": [ 141, 394, 469, 407 ], "spans": [ { "bbox": [ 141, 394, 469, 407 ], "score": 1.0, "content": "robot to roll off its back, stand up, and walk from scratch and without resets in only", "type": "text" } ], "index": 21 }, { "bbox": [ 141, 406, 469, 418 ], "spans": [ { "bbox": [ 141, 406, 469, 418 ], "score": 1.0, "content": "1 hour. We then push the robot and find that Dreamer adapts within 10 minutes to", "type": "text" } ], "index": 22 }, { "bbox": [ 142, 419, 469, 429 ], "spans": [ { "bbox": [ 142, 419, 469, 429 ], "score": 1.0, "content": "withstand perturbations or quickly roll over and stand back up. On two different", "type": "text" } ], "index": 23 }, { "bbox": [ 141, 429, 469, 441 ], "spans": [ { "bbox": [ 141, 429, 469, 441 ], "score": 1.0, "content": "robotic arms, Dreamer learns to pick and place objects from camera images and", "type": "text" } ], "index": 24 }, { "bbox": [ 141, 442, 470, 453 ], "spans": [ { "bbox": [ 141, 442, 470, 453 ], "score": 1.0, "content": "sparse rewards, approaching human-level teleoperation performance. On a wheeled", "type": "text" } ], "index": 25 }, { "bbox": [ 141, 451, 470, 466 ], "spans": [ { "bbox": [ 141, 451, 470, 466 ], "score": 1.0, "content": "robot, Dreamer learns to navigate to a goal position purely from camera images,", "type": "text" } ], "index": 26 }, { "bbox": [ 141, 464, 470, 477 ], "spans": [ { "bbox": [ 141, 464, 470, 477 ], "score": 1.0, "content": "automatically resolving ambiguity about the robot orientation. Using the same", "type": "text" } ], "index": 27 }, { "bbox": [ 141, 477, 469, 488 ], "spans": [ { "bbox": [ 141, 477, 469, 488 ], "score": 1.0, "content": "hyperparameters across all experiments, we find that Dreamer is capable of online", "type": "text" } ], "index": 28 }, { "bbox": [ 141, 487, 469, 500 ], "spans": [ { "bbox": [ 141, 487, 469, 500 ], "score": 1.0, "content": "learning in the real world, which establishes a strong baseline. We release our", "type": "text" } ], "index": 29 }, { "bbox": [ 141, 498, 469, 513 ], "spans": [ { "bbox": [ 141, 498, 469, 513 ], "score": 1.0, "content": "infrastructure for future applications of world models to robot learning. Videos are", "type": "text" } ], "index": 30 }, { "bbox": [ 141, 510, 433, 524 ], "spans": [ { "bbox": [ 141, 510, 433, 524 ], "score": 1.0, "content": "available on the project website: https://danijar.com/daydreamer", "type": "text" } ], "index": 31 } ], "index": 19, "bbox_fs": [ 141, 231, 470, 524 ] }, { "type": "image", "bbox": [ 107, 542, 504, 647 ], "blocks": [ { "type": "image_body", "bbox": [ 107, 542, 504, 647 ], "group_id": 0, "lines": [ { "bbox": [ 107, 542, 504, 647 ], "spans": [ { "bbox": [ 107, 542, 504, 647 ], "score": 0.973, "type": "image", "image_path": "17f2d11eee9937e70f62a1993623ebccd221887d067e71919c350fa57662f4d3.jpg" } ] } ], "index": 33, "virtual_lines": [ { "bbox": [ 107, 542, 504, 577.0 ], "spans": [], "index": 32 }, { "bbox": [ 107, 577.0, 504, 612.0 ], "spans": [], "index": 33 }, { "bbox": [ 107, 612.0, 504, 647.0 ], "spans": [], "index": 34 } ] }, { "type": "image_caption", "bbox": [ 106, 653, 505, 721 ], "group_id": 0, "lines": [ { "bbox": [ 105, 653, 505, 666 ], "spans": [ { "bbox": [ 105, 653, 505, 666 ], "score": 1.0, "content": "Figure 1: To study the applicability of Dreamer for sample-efficient robot learning, we apply the", "type": "text" } ], "index": 35 }, { "bbox": [ 106, 664, 505, 676 ], "spans": [ { "bbox": [ 106, 664, 505, 676 ], "score": 1.0, "content": "algorithm to learn robot locomotion, manipulation, and navigation tasks from scratch in the real", "type": "text" } ], "index": 36 }, { "bbox": [ 104, 674, 505, 689 ], "spans": [ { "bbox": [ 104, 674, 505, 689 ], "score": 1.0, "content": "world on 4 robots, without simulators. The tasks evaluate a diverse range of challenges, including", "type": "text" } ], "index": 37 }, { "bbox": [ 106, 687, 506, 700 ], "spans": [ { "bbox": [ 106, 687, 506, 700 ], "score": 1.0, "content": "continuous and discrete actions, dense and sparse rewards, proprioceptive and camera inputs, as well", "type": "text" } ], "index": 38 }, { "bbox": [ 105, 698, 506, 711 ], "spans": [ { "bbox": [ 105, 698, 506, 711 ], "score": 1.0, "content": "as sensor fusion of multiple input modalities. Learning successfully using the same hyperparameters", "type": "text" } ], "index": 39 }, { "bbox": [ 105, 709, 470, 722 ], "spans": [ { "bbox": [ 105, 709, 470, 722 ], "score": 1.0, "content": "across all experiments, Dreamer establishes a strong baseline for real world robot learning.", "type": "text" } ], "index": 40 } ], "index": 37.5 } ], "index": 35.25 } ] }, { "preproc_blocks": [ { "type": "title", "bbox": [ 107, 72, 190, 84 ], "lines": [ { "bbox": [ 105, 70, 192, 87 ], "spans": [ { "bbox": [ 105, 70, 192, 87 ], "score": 1.0, "content": "1 Introduction", "type": "text" } ], "index": 0 } ], "index": 0 }, { "type": "text", "bbox": [ 107, 100, 337, 252 ], "lines": [ { "bbox": [ 106, 101, 336, 112 ], "spans": [ { "bbox": [ 106, 101, 336, 112 ], "score": 1.0, "content": "Teaching robots to solve complex tasks in the real world", "type": "text" } ], "index": 1 }, { "bbox": [ 105, 112, 337, 124 ], "spans": [ { "bbox": [ 105, 112, 337, 124 ], "score": 1.0, "content": "is a foundational problem of robotics research. Deep rein-", "type": "text" } ], "index": 2 }, { "bbox": [ 106, 124, 337, 136 ], "spans": [ { "bbox": [ 106, 124, 337, 136 ], "score": 1.0, "content": "forcement learning (RL) offers a popular approach to robot", "type": "text" } ], "index": 3 }, { "bbox": [ 106, 136, 337, 147 ], "spans": [ { "bbox": [ 106, 136, 337, 147 ], "score": 1.0, "content": "learning that enables robots to improve their behavior over", "type": "text" } ], "index": 4 }, { "bbox": [ 106, 148, 336, 158 ], "spans": [ { "bbox": [ 106, 148, 336, 158 ], "score": 1.0, "content": "time through trial and error. However, current algorithms", "type": "text" } ], "index": 5 }, { "bbox": [ 105, 159, 337, 170 ], "spans": [ { "bbox": [ 105, 159, 337, 170 ], "score": 1.0, "content": "require too much interaction with the environment to learn", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 171, 336, 182 ], "spans": [ { "bbox": [ 106, 171, 336, 182 ], "score": 1.0, "content": "successful behaviors. Recently, modern world models", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 182, 336, 194 ], "spans": [ { "bbox": [ 106, 182, 336, 194 ], "score": 1.0, "content": "have shown great promise for data efficient learning in", "type": "text" } ], "index": 8 }, { "bbox": [ 106, 194, 338, 206 ], "spans": [ { "bbox": [ 106, 194, 338, 206 ], "score": 1.0, "content": "simulated domains and video games (Hafner et al., 2019;", "type": "text" } ], "index": 9 }, { "bbox": [ 106, 205, 338, 218 ], "spans": [ { "bbox": [ 106, 205, 338, 218 ], "score": 1.0, "content": "2020). Learning world models from past experience en-", "type": "text" } ], "index": 10 }, { "bbox": [ 106, 217, 337, 228 ], "spans": [ { "bbox": [ 106, 217, 337, 228 ], "score": 1.0, "content": "ables robots to imagine the future outcomes of potential", "type": "text" } ], "index": 11 }, { "bbox": [ 106, 229, 337, 240 ], "spans": [ { "bbox": [ 106, 229, 337, 240 ], "score": 1.0, "content": "actions, reducing the amount of trial and error in the real", "type": "text" } ], "index": 12 }, { "bbox": [ 106, 241, 225, 252 ], "spans": [ { "bbox": [ 106, 241, 225, 252 ], "score": 1.0, "content": "environment needed to learn.", "type": "text" } ], "index": 13 } ], "index": 7 }, { "type": "text", "bbox": [ 107, 262, 336, 425 ], "lines": [ { "bbox": [ 105, 260, 338, 275 ], "spans": [ { "bbox": [ 105, 260, 338, 275 ], "score": 1.0, "content": "While learning accurate world models can be challenging,", "type": "text" } ], "index": 14 }, { "bbox": [ 106, 274, 336, 286 ], "spans": [ { "bbox": [ 106, 274, 336, 286 ], "score": 1.0, "content": "they offer compelling properties for robot learning. By", "type": "text" } ], "index": 15 }, { "bbox": [ 106, 286, 338, 297 ], "spans": [ { "bbox": [ 106, 286, 338, 297 ], "score": 1.0, "content": "predicting future outcomes, world models allow for plan-", "type": "text" } ], "index": 16 }, { "bbox": [ 106, 297, 338, 308 ], "spans": [ { "bbox": [ 106, 297, 338, 308 ], "score": 1.0, "content": "ning and behavior learning given only small amounts of", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 308, 338, 320 ], "spans": [ { "bbox": [ 105, 308, 338, 320 ], "score": 1.0, "content": "real world interaction (Gal et al., 2016; Ebert et al., 2018).", "type": "text" } ], "index": 18 }, { "bbox": [ 106, 320, 337, 332 ], "spans": [ { "bbox": [ 106, 320, 337, 332 ], "score": 1.0, "content": "Moreover, world models summarize general dynamics", "type": "text" } ], "index": 19 }, { "bbox": [ 106, 333, 337, 344 ], "spans": [ { "bbox": [ 106, 333, 337, 344 ], "score": 1.0, "content": "knowledge about the environment that, once learned, could", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 343, 337, 355 ], "spans": [ { "bbox": [ 105, 343, 337, 355 ], "score": 1.0, "content": "be reused for a wide range of downstream tasks (Sekar", "type": "text" } ], "index": 21 }, { "bbox": [ 106, 356, 337, 367 ], "spans": [ { "bbox": [ 106, 356, 337, 367 ], "score": 1.0, "content": "et al., 2020). World models also learn representations", "type": "text" } ], "index": 22 }, { "bbox": [ 106, 367, 336, 379 ], "spans": [ { "bbox": [ 106, 367, 336, 379 ], "score": 1.0, "content": "that fuse multiple sensor modalities and integrate them", "type": "text" } ], "index": 23 }, { "bbox": [ 106, 378, 336, 390 ], "spans": [ { "bbox": [ 106, 378, 336, 390 ], "score": 1.0, "content": "into latent states, reducing the need for sophisticated state", "type": "text" } ], "index": 24 }, { "bbox": [ 106, 391, 336, 402 ], "spans": [ { "bbox": [ 106, 391, 336, 402 ], "score": 1.0, "content": "estimators. Finally, world models generalize well from", "type": "text" } ], "index": 25 }, { "bbox": [ 106, 402, 338, 414 ], "spans": [ { "bbox": [ 106, 402, 338, 414 ], "score": 1.0, "content": "available offline data (Yu et al., 2021), which further ac-", "type": "text" } ], "index": 26 }, { "bbox": [ 106, 414, 249, 425 ], "spans": [ { "bbox": [ 106, 414, 249, 425 ], "score": 1.0, "content": "celerates learning in the real world.", "type": "text" } ], "index": 27 } ], "index": 20.5 }, { "type": "image", "bbox": [ 344, 102, 506, 233 ], "blocks": [ { "type": "image_body", "bbox": [ 344, 102, 506, 233 ], "group_id": 0, "lines": [ { "bbox": [ 344, 102, 506, 233 ], "spans": [ { "bbox": [ 344, 102, 506, 233 ], "score": 0.973, "type": "image", "image_path": "e30f877426a1aa2686b70c08629a56889403c96105b017890f8ea57b7982c4a2.jpg" } ] } ], "index": 32.5, "virtual_lines": [ { "bbox": [ 344, 102, 506, 115.1 ], "spans": [], "index": 28 }, { "bbox": [ 344, 115.1, 506, 128.2 ], "spans": [], "index": 29 }, { "bbox": [ 344, 128.2, 506, 141.29999999999998 ], "spans": [], "index": 30 }, { "bbox": [ 344, 141.29999999999998, 506, 154.39999999999998 ], "spans": [], "index": 31 }, { "bbox": [ 344, 154.39999999999998, 506, 167.49999999999997 ], "spans": [], "index": 32 }, { "bbox": [ 344, 167.49999999999997, 506, 180.59999999999997 ], "spans": [], "index": 33 }, { "bbox": [ 344, 180.59999999999997, 506, 193.69999999999996 ], "spans": [], "index": 34 }, { "bbox": [ 344, 193.69999999999996, 506, 206.79999999999995 ], "spans": [], "index": 35 }, { "bbox": [ 344, 206.79999999999995, 506, 219.89999999999995 ], "spans": [], "index": 36 }, { "bbox": [ 344, 219.89999999999995, 506, 232.99999999999994 ], "spans": [], "index": 37 } ] }, { "type": "image_caption", "bbox": [ 344, 243, 505, 389 ], "group_id": 0, "lines": [ { "bbox": [ 343, 242, 505, 255 ], "spans": [ { "bbox": [ 343, 242, 505, 255 ], "score": 1.0, "content": "Figure 2: Dreamer follows a simple", "type": "text" } ], "index": 38 }, { "bbox": [ 343, 254, 506, 266 ], "spans": [ { "bbox": [ 343, 254, 506, 266 ], "score": 1.0, "content": "pipeline for online learning on robot", "type": "text" } ], "index": 39 }, { "bbox": [ 343, 265, 506, 276 ], "spans": [ { "bbox": [ 343, 265, 506, 276 ], "score": 1.0, "content": "hardware without simulators. The cur-", "type": "text" } ], "index": 40 }, { "bbox": [ 343, 276, 506, 288 ], "spans": [ { "bbox": [ 343, 276, 506, 288 ], "score": 1.0, "content": "rent learned policy collects experience", "type": "text" } ], "index": 41 }, { "bbox": [ 343, 288, 506, 299 ], "spans": [ { "bbox": [ 343, 288, 506, 299 ], "score": 1.0, "content": "on the robot. This experience is added", "type": "text" } ], "index": 42 }, { "bbox": [ 343, 299, 505, 311 ], "spans": [ { "bbox": [ 343, 299, 505, 311 ], "score": 1.0, "content": "to the replay buffer. The world model is", "type": "text" } ], "index": 43 }, { "bbox": [ 343, 309, 506, 322 ], "spans": [ { "bbox": [ 343, 309, 506, 322 ], "score": 1.0, "content": "trained on replayed off-policy sequences", "type": "text" } ], "index": 44 }, { "bbox": [ 343, 321, 506, 333 ], "spans": [ { "bbox": [ 343, 321, 506, 333 ], "score": 1.0, "content": "through supervised learning. An actor", "type": "text" } ], "index": 45 }, { "bbox": [ 343, 332, 506, 344 ], "spans": [ { "bbox": [ 343, 332, 506, 344 ], "score": 1.0, "content": "critic algorithm optimizes a neural net-", "type": "text" } ], "index": 46 }, { "bbox": [ 343, 344, 505, 356 ], "spans": [ { "bbox": [ 343, 344, 505, 356 ], "score": 1.0, "content": "work policy from imagined rollouts in", "type": "text" } ], "index": 47 }, { "bbox": [ 343, 354, 506, 366 ], "spans": [ { "bbox": [ 343, 354, 506, 366 ], "score": 1.0, "content": "the latent space of the world model. We", "type": "text" } ], "index": 48 }, { "bbox": [ 343, 366, 507, 377 ], "spans": [ { "bbox": [ 343, 366, 507, 377 ], "score": 1.0, "content": "parallelize data collection and neural net-", "type": "text" } ], "index": 49 }, { "bbox": [ 343, 376, 405, 390 ], "spans": [ { "bbox": [ 343, 376, 405, 390 ], "score": 1.0, "content": "work learning.", "type": "text" } ], "index": 50 } ], "index": 44 } ], "index": 38.25 }, { "type": "text", "bbox": [ 106, 435, 505, 541 ], "lines": [ { "bbox": [ 105, 435, 505, 449 ], "spans": [ { "bbox": [ 105, 435, 505, 449 ], "score": 1.0, "content": "Despite the promises of world models, learning accurate world models for the real world is a open", "type": "text" } ], "index": 51 }, { "bbox": [ 105, 446, 506, 460 ], "spans": [ { "bbox": [ 105, 446, 506, 460 ], "score": 1.0, "content": "challenge. In this paper, we leverage recent advances of the Dreamer world model for training a", "type": "text" } ], "index": 52 }, { "bbox": [ 106, 460, 505, 471 ], "spans": [ { "bbox": [ 106, 460, 505, 471 ], "score": 1.0, "content": "variety of robots in the most straight-forward and fundamental problem setting: online reinforcement", "type": "text" } ], "index": 53 }, { "bbox": [ 106, 471, 505, 483 ], "spans": [ { "bbox": [ 106, 471, 505, 483 ], "score": 1.0, "content": "learning in the real world, without simulators or demonstrations. As shown in Figure 2, Dreamer", "type": "text" } ], "index": 54 }, { "bbox": [ 106, 483, 506, 495 ], "spans": [ { "bbox": [ 106, 483, 506, 495 ], "score": 1.0, "content": "learns a world model from a replay buffer of past experience, learns behaviors from rollouts imagined", "type": "text" } ], "index": 55 }, { "bbox": [ 106, 495, 505, 506 ], "spans": [ { "bbox": [ 106, 495, 505, 506 ], "score": 1.0, "content": "in the latent space of the world model, and continuously interacts with the environment to explore", "type": "text" } ], "index": 56 }, { "bbox": [ 106, 506, 505, 517 ], "spans": [ { "bbox": [ 106, 506, 505, 517 ], "score": 1.0, "content": "and improve its behaviors. Our aim is to push the limits of robot learning directly in the real world", "type": "text" } ], "index": 57 }, { "bbox": [ 106, 518, 506, 530 ], "spans": [ { "bbox": [ 106, 518, 506, 530 ], "score": 1.0, "content": "and offer a robust platform to enable future work that develops the benefits of world models for robot", "type": "text" } ], "index": 58 }, { "bbox": [ 106, 529, 399, 541 ], "spans": [ { "bbox": [ 106, 529, 399, 541 ], "score": 1.0, "content": "learning. The key contributions of this paper are summarized as follows:", "type": "text" } ], "index": 59 } ], "index": 55 }, { "type": "text", "bbox": [ 106, 553, 506, 717 ], "lines": [ { "bbox": [ 106, 554, 504, 566 ], "spans": [ { "bbox": [ 106, 554, 504, 566 ], "score": 1.0, "content": "• Dreamer on Robots We apply Dreamer to 4 robots, demonstrating successful learning directly", "type": "text" } ], "index": 60 }, { "bbox": [ 115, 564, 506, 578 ], "spans": [ { "bbox": [ 115, 564, 506, 578 ], "score": 1.0, "content": "in the real world, without introducing new algorithms. The tasks cover a range of challenges,", "type": "text" } ], "index": 61 }, { "bbox": [ 115, 577, 421, 589 ], "spans": [ { "bbox": [ 115, 577, 421, 589 ], "score": 1.0, "content": "including different action spaces, sensory modalities, and reward structures.", "type": "text" } ], "index": 62 }, { "bbox": [ 106, 593, 506, 605 ], "spans": [ { "bbox": [ 106, 593, 506, 605 ], "score": 1.0, "content": "• Walking in 1 Hour We teach a quadruped from scratch in the real world to roll off its back,", "type": "text" } ], "index": 63 }, { "bbox": [ 115, 604, 505, 617 ], "spans": [ { "bbox": [ 115, 604, 505, 617 ], "score": 1.0, "content": "stand up, and walk in only 1 hour. Afterwards, we find that the robot adapts to being pushed within", "type": "text" } ], "index": 64 }, { "bbox": [ 115, 617, 457, 628 ], "spans": [ { "bbox": [ 115, 617, 457, 628 ], "score": 1.0, "content": "10 minutes, learning to withstand pushes or quickly roll over and get back on its feet.", "type": "text" } ], "index": 65 }, { "bbox": [ 106, 631, 506, 644 ], "spans": [ { "bbox": [ 106, 631, 506, 644 ], "score": 1.0, "content": "• Visual Pick and Place We train robotic arms to pick and place objects from sparse rewards,", "type": "text" } ], "index": 66 }, { "bbox": [ 115, 644, 505, 656 ], "spans": [ { "bbox": [ 115, 644, 505, 656 ], "score": 1.0, "content": "which requires localizing objects from pixels and fusing images with proprioceptive inputs. The", "type": "text" } ], "index": 67 }, { "bbox": [ 115, 654, 505, 668 ], "spans": [ { "bbox": [ 115, 654, 505, 668 ], "score": 1.0, "content": "learned behavior outperforms model-free agents and approaches the performance of a human", "type": "text" } ], "index": 68 }, { "bbox": [ 115, 667, 347, 679 ], "spans": [ { "bbox": [ 115, 667, 347, 679 ], "score": 1.0, "content": "teleoperator using the same control interface as the robot.", "type": "text" } ], "index": 69 }, { "bbox": [ 107, 683, 505, 695 ], "spans": [ { "bbox": [ 107, 683, 505, 695 ], "score": 1.0, "content": "• Open Source We publicly release the software infrastructure for all our experiments, which", "type": "text" } ], "index": 70 }, { "bbox": [ 115, 694, 506, 707 ], "spans": [ { "bbox": [ 115, 694, 506, 707 ], "score": 1.0, "content": "supports different action spaces and sensory modalities, offering a flexible platform for future", "type": "text" } ], "index": 71 }, { "bbox": [ 116, 707, 362, 718 ], "spans": [ { "bbox": [ 116, 707, 362, 718 ], "score": 1.0, "content": "research of world models for robot learning in the real world.", "type": "text" } ], "index": 72 } ], "index": 66 } ], "page_idx": 1, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 302, 741, 309, 750 ], "lines": [ { "bbox": [ 301, 740, 310, 753 ], "spans": [ { "bbox": [ 301, 740, 310, 753 ], "score": 1.0, "content": "2", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "title", "bbox": [ 107, 72, 190, 84 ], "lines": [ { "bbox": [ 105, 70, 192, 87 ], "spans": [ { "bbox": [ 105, 70, 192, 87 ], "score": 1.0, "content": "1 Introduction", "type": "text" } ], "index": 0 } ], "index": 0 }, { "type": "text", "bbox": [ 107, 100, 337, 252 ], "lines": [ { "bbox": [ 106, 101, 336, 112 ], "spans": [ { "bbox": [ 106, 101, 336, 112 ], "score": 1.0, "content": "Teaching robots to solve complex tasks in the real world", "type": "text" } ], "index": 1 }, { "bbox": [ 105, 112, 337, 124 ], "spans": [ { "bbox": [ 105, 112, 337, 124 ], "score": 1.0, "content": "is a foundational problem of robotics research. Deep rein-", "type": "text" } ], "index": 2 }, { "bbox": [ 106, 124, 337, 136 ], "spans": [ { "bbox": [ 106, 124, 337, 136 ], "score": 1.0, "content": "forcement learning (RL) offers a popular approach to robot", "type": "text" } ], "index": 3 }, { "bbox": [ 106, 136, 337, 147 ], "spans": [ { "bbox": [ 106, 136, 337, 147 ], "score": 1.0, "content": "learning that enables robots to improve their behavior over", "type": "text" } ], "index": 4 }, { "bbox": [ 106, 148, 336, 158 ], "spans": [ { "bbox": [ 106, 148, 336, 158 ], "score": 1.0, "content": "time through trial and error. However, current algorithms", "type": "text" } ], "index": 5 }, { "bbox": [ 105, 159, 337, 170 ], "spans": [ { "bbox": [ 105, 159, 337, 170 ], "score": 1.0, "content": "require too much interaction with the environment to learn", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 171, 336, 182 ], "spans": [ { "bbox": [ 106, 171, 336, 182 ], "score": 1.0, "content": "successful behaviors. Recently, modern world models", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 182, 336, 194 ], "spans": [ { "bbox": [ 106, 182, 336, 194 ], "score": 1.0, "content": "have shown great promise for data efficient learning in", "type": "text" } ], "index": 8 }, { "bbox": [ 106, 194, 338, 206 ], "spans": [ { "bbox": [ 106, 194, 338, 206 ], "score": 1.0, "content": "simulated domains and video games (Hafner et al., 2019;", "type": "text" } ], "index": 9 }, { "bbox": [ 106, 205, 338, 218 ], "spans": [ { "bbox": [ 106, 205, 338, 218 ], "score": 1.0, "content": "2020). Learning world models from past experience en-", "type": "text" } ], "index": 10 }, { "bbox": [ 106, 217, 337, 228 ], "spans": [ { "bbox": [ 106, 217, 337, 228 ], "score": 1.0, "content": "ables robots to imagine the future outcomes of potential", "type": "text" } ], "index": 11 }, { "bbox": [ 106, 229, 337, 240 ], "spans": [ { "bbox": [ 106, 229, 337, 240 ], "score": 1.0, "content": "actions, reducing the amount of trial and error in the real", "type": "text" } ], "index": 12 }, { "bbox": [ 106, 241, 225, 252 ], "spans": [ { "bbox": [ 106, 241, 225, 252 ], "score": 1.0, "content": "environment needed to learn.", "type": "text" } ], "index": 13 } ], "index": 7, "bbox_fs": [ 105, 101, 338, 252 ] }, { "type": "text", "bbox": [ 107, 262, 336, 425 ], "lines": [ { "bbox": [ 105, 260, 338, 275 ], "spans": [ { "bbox": [ 105, 260, 338, 275 ], "score": 1.0, "content": "While learning accurate world models can be challenging,", "type": "text" } ], "index": 14 }, { "bbox": [ 106, 274, 336, 286 ], "spans": [ { "bbox": [ 106, 274, 336, 286 ], "score": 1.0, "content": "they offer compelling properties for robot learning. By", "type": "text" } ], "index": 15 }, { "bbox": [ 106, 286, 338, 297 ], "spans": [ { "bbox": [ 106, 286, 338, 297 ], "score": 1.0, "content": "predicting future outcomes, world models allow for plan-", "type": "text" } ], "index": 16 }, { "bbox": [ 106, 297, 338, 308 ], "spans": [ { "bbox": [ 106, 297, 338, 308 ], "score": 1.0, "content": "ning and behavior learning given only small amounts of", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 308, 338, 320 ], "spans": [ { "bbox": [ 105, 308, 338, 320 ], "score": 1.0, "content": "real world interaction (Gal et al., 2016; Ebert et al., 2018).", "type": "text" } ], "index": 18 }, { "bbox": [ 106, 320, 337, 332 ], "spans": [ { "bbox": [ 106, 320, 337, 332 ], "score": 1.0, "content": "Moreover, world models summarize general dynamics", "type": "text" } ], "index": 19 }, { "bbox": [ 106, 333, 337, 344 ], "spans": [ { "bbox": [ 106, 333, 337, 344 ], "score": 1.0, "content": "knowledge about the environment that, once learned, could", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 343, 337, 355 ], "spans": [ { "bbox": [ 105, 343, 337, 355 ], "score": 1.0, "content": "be reused for a wide range of downstream tasks (Sekar", "type": "text" } ], "index": 21 }, { "bbox": [ 106, 356, 337, 367 ], "spans": [ { "bbox": [ 106, 356, 337, 367 ], "score": 1.0, "content": "et al., 2020). World models also learn representations", "type": "text" } ], "index": 22 }, { "bbox": [ 106, 367, 336, 379 ], "spans": [ { "bbox": [ 106, 367, 336, 379 ], "score": 1.0, "content": "that fuse multiple sensor modalities and integrate them", "type": "text" } ], "index": 23 }, { "bbox": [ 106, 378, 336, 390 ], "spans": [ { "bbox": [ 106, 378, 336, 390 ], "score": 1.0, "content": "into latent states, reducing the need for sophisticated state", "type": "text" } ], "index": 24 }, { "bbox": [ 106, 391, 336, 402 ], "spans": [ { "bbox": [ 106, 391, 336, 402 ], "score": 1.0, "content": "estimators. Finally, world models generalize well from", "type": "text" } ], "index": 25 }, { "bbox": [ 106, 402, 338, 414 ], "spans": [ { "bbox": [ 106, 402, 338, 414 ], "score": 1.0, "content": "available offline data (Yu et al., 2021), which further ac-", "type": "text" } ], "index": 26 }, { "bbox": [ 106, 414, 249, 425 ], "spans": [ { "bbox": [ 106, 414, 249, 425 ], "score": 1.0, "content": "celerates learning in the real world.", "type": "text" } ], "index": 27 } ], "index": 20.5, "bbox_fs": [ 105, 260, 338, 425 ] }, { "type": "image", "bbox": [ 344, 102, 506, 233 ], "blocks": [ { "type": "image_body", "bbox": [ 344, 102, 506, 233 ], "group_id": 0, "lines": [ { "bbox": [ 344, 102, 506, 233 ], "spans": [ { "bbox": [ 344, 102, 506, 233 ], "score": 0.973, "type": "image", "image_path": "e30f877426a1aa2686b70c08629a56889403c96105b017890f8ea57b7982c4a2.jpg" } ] } ], "index": 32.5, "virtual_lines": [ { "bbox": [ 344, 102, 506, 115.1 ], "spans": [], "index": 28 }, { "bbox": [ 344, 115.1, 506, 128.2 ], "spans": [], "index": 29 }, { "bbox": [ 344, 128.2, 506, 141.29999999999998 ], "spans": [], "index": 30 }, { "bbox": [ 344, 141.29999999999998, 506, 154.39999999999998 ], "spans": [], "index": 31 }, { "bbox": [ 344, 154.39999999999998, 506, 167.49999999999997 ], "spans": [], "index": 32 }, { "bbox": [ 344, 167.49999999999997, 506, 180.59999999999997 ], "spans": [], "index": 33 }, { "bbox": [ 344, 180.59999999999997, 506, 193.69999999999996 ], "spans": [], "index": 34 }, { "bbox": [ 344, 193.69999999999996, 506, 206.79999999999995 ], "spans": [], "index": 35 }, { "bbox": [ 344, 206.79999999999995, 506, 219.89999999999995 ], "spans": [], "index": 36 }, { "bbox": [ 344, 219.89999999999995, 506, 232.99999999999994 ], "spans": [], "index": 37 } ] }, { "type": "image_caption", "bbox": [ 344, 243, 505, 389 ], "group_id": 0, "lines": [ { "bbox": [ 343, 242, 505, 255 ], "spans": [ { "bbox": [ 343, 242, 505, 255 ], "score": 1.0, "content": "Figure 2: Dreamer follows a simple", "type": "text" } ], "index": 38 }, { "bbox": [ 343, 254, 506, 266 ], "spans": [ { "bbox": [ 343, 254, 506, 266 ], "score": 1.0, "content": "pipeline for online learning on robot", "type": "text" } ], "index": 39 }, { "bbox": [ 343, 265, 506, 276 ], "spans": [ { "bbox": [ 343, 265, 506, 276 ], "score": 1.0, "content": "hardware without simulators. The cur-", "type": "text" } ], "index": 40 }, { "bbox": [ 343, 276, 506, 288 ], "spans": [ { "bbox": [ 343, 276, 506, 288 ], "score": 1.0, "content": "rent learned policy collects experience", "type": "text" } ], "index": 41 }, { "bbox": [ 343, 288, 506, 299 ], "spans": [ { "bbox": [ 343, 288, 506, 299 ], "score": 1.0, "content": "on the robot. This experience is added", "type": "text" } ], "index": 42 }, { "bbox": [ 343, 299, 505, 311 ], "spans": [ { "bbox": [ 343, 299, 505, 311 ], "score": 1.0, "content": "to the replay buffer. The world model is", "type": "text" } ], "index": 43 }, { "bbox": [ 343, 309, 506, 322 ], "spans": [ { "bbox": [ 343, 309, 506, 322 ], "score": 1.0, "content": "trained on replayed off-policy sequences", "type": "text" } ], "index": 44 }, { "bbox": [ 343, 321, 506, 333 ], "spans": [ { "bbox": [ 343, 321, 506, 333 ], "score": 1.0, "content": "through supervised learning. An actor", "type": "text" } ], "index": 45 }, { "bbox": [ 343, 332, 506, 344 ], "spans": [ { "bbox": [ 343, 332, 506, 344 ], "score": 1.0, "content": "critic algorithm optimizes a neural net-", "type": "text" } ], "index": 46 }, { "bbox": [ 343, 344, 505, 356 ], "spans": [ { "bbox": [ 343, 344, 505, 356 ], "score": 1.0, "content": "work policy from imagined rollouts in", "type": "text" } ], "index": 47 }, { "bbox": [ 343, 354, 506, 366 ], "spans": [ { "bbox": [ 343, 354, 506, 366 ], "score": 1.0, "content": "the latent space of the world model. We", "type": "text" } ], "index": 48 }, { "bbox": [ 343, 366, 507, 377 ], "spans": [ { "bbox": [ 343, 366, 507, 377 ], "score": 1.0, "content": "parallelize data collection and neural net-", "type": "text" } ], "index": 49 }, { "bbox": [ 343, 376, 405, 390 ], "spans": [ { "bbox": [ 343, 376, 405, 390 ], "score": 1.0, "content": "work learning.", "type": "text" } ], "index": 50 } ], "index": 44 } ], "index": 38.25 }, { "type": "text", "bbox": [ 106, 435, 505, 541 ], "lines": [ { "bbox": [ 105, 435, 505, 449 ], "spans": [ { "bbox": [ 105, 435, 505, 449 ], "score": 1.0, "content": "Despite the promises of world models, learning accurate world models for the real world is a open", "type": "text" } ], "index": 51 }, { "bbox": [ 105, 446, 506, 460 ], "spans": [ { "bbox": [ 105, 446, 506, 460 ], "score": 1.0, "content": "challenge. In this paper, we leverage recent advances of the Dreamer world model for training a", "type": "text" } ], "index": 52 }, { "bbox": [ 106, 460, 505, 471 ], "spans": [ { "bbox": [ 106, 460, 505, 471 ], "score": 1.0, "content": "variety of robots in the most straight-forward and fundamental problem setting: online reinforcement", "type": "text" } ], "index": 53 }, { "bbox": [ 106, 471, 505, 483 ], "spans": [ { "bbox": [ 106, 471, 505, 483 ], "score": 1.0, "content": "learning in the real world, without simulators or demonstrations. As shown in Figure 2, Dreamer", "type": "text" } ], "index": 54 }, { "bbox": [ 106, 483, 506, 495 ], "spans": [ { "bbox": [ 106, 483, 506, 495 ], "score": 1.0, "content": "learns a world model from a replay buffer of past experience, learns behaviors from rollouts imagined", "type": "text" } ], "index": 55 }, { "bbox": [ 106, 495, 505, 506 ], "spans": [ { "bbox": [ 106, 495, 505, 506 ], "score": 1.0, "content": "in the latent space of the world model, and continuously interacts with the environment to explore", "type": "text" } ], "index": 56 }, { "bbox": [ 106, 506, 505, 517 ], "spans": [ { "bbox": [ 106, 506, 505, 517 ], "score": 1.0, "content": "and improve its behaviors. Our aim is to push the limits of robot learning directly in the real world", "type": "text" } ], "index": 57 }, { "bbox": [ 106, 518, 506, 530 ], "spans": [ { "bbox": [ 106, 518, 506, 530 ], "score": 1.0, "content": "and offer a robust platform to enable future work that develops the benefits of world models for robot", "type": "text" } ], "index": 58 }, { "bbox": [ 106, 529, 399, 541 ], "spans": [ { "bbox": [ 106, 529, 399, 541 ], "score": 1.0, "content": "learning. The key contributions of this paper are summarized as follows:", "type": "text" } ], "index": 59 } ], "index": 55, "bbox_fs": [ 105, 435, 506, 541 ] }, { "type": "list", "bbox": [ 106, 553, 506, 717 ], "lines": [ { "bbox": [ 106, 554, 504, 566 ], "spans": [ { "bbox": [ 106, 554, 504, 566 ], "score": 1.0, "content": "• Dreamer on Robots We apply Dreamer to 4 robots, demonstrating successful learning directly", "type": "text" } ], "index": 60, "is_list_start_line": true }, { "bbox": [ 115, 564, 506, 578 ], "spans": [ { "bbox": [ 115, 564, 506, 578 ], "score": 1.0, "content": "in the real world, without introducing new algorithms. The tasks cover a range of challenges,", "type": "text" } ], "index": 61 }, { "bbox": [ 115, 577, 421, 589 ], "spans": [ { "bbox": [ 115, 577, 421, 589 ], "score": 1.0, "content": "including different action spaces, sensory modalities, and reward structures.", "type": "text" } ], "index": 62, "is_list_end_line": true }, { "bbox": [ 106, 593, 506, 605 ], "spans": [ { "bbox": [ 106, 593, 506, 605 ], "score": 1.0, "content": "• Walking in 1 Hour We teach a quadruped from scratch in the real world to roll off its back,", "type": "text" } ], "index": 63, "is_list_start_line": true }, { "bbox": [ 115, 604, 505, 617 ], "spans": [ { "bbox": [ 115, 604, 505, 617 ], "score": 1.0, "content": "stand up, and walk in only 1 hour. Afterwards, we find that the robot adapts to being pushed within", "type": "text" } ], "index": 64 }, { "bbox": [ 115, 617, 457, 628 ], "spans": [ { "bbox": [ 115, 617, 457, 628 ], "score": 1.0, "content": "10 minutes, learning to withstand pushes or quickly roll over and get back on its feet.", "type": "text" } ], "index": 65, "is_list_end_line": true }, { "bbox": [ 106, 631, 506, 644 ], "spans": [ { "bbox": [ 106, 631, 506, 644 ], "score": 1.0, "content": "• Visual Pick and Place We train robotic arms to pick and place objects from sparse rewards,", "type": "text" } ], "index": 66, "is_list_start_line": true }, { "bbox": [ 115, 644, 505, 656 ], "spans": [ { "bbox": [ 115, 644, 505, 656 ], "score": 1.0, "content": "which requires localizing objects from pixels and fusing images with proprioceptive inputs. The", "type": "text" } ], "index": 67 }, { "bbox": [ 115, 654, 505, 668 ], "spans": [ { "bbox": [ 115, 654, 505, 668 ], "score": 1.0, "content": "learned behavior outperforms model-free agents and approaches the performance of a human", "type": "text" } ], "index": 68 }, { "bbox": [ 115, 667, 347, 679 ], "spans": [ { "bbox": [ 115, 667, 347, 679 ], "score": 1.0, "content": "teleoperator using the same control interface as the robot.", "type": "text" } ], "index": 69, "is_list_end_line": true }, { "bbox": [ 107, 683, 505, 695 ], "spans": [ { "bbox": [ 107, 683, 505, 695 ], "score": 1.0, "content": "• Open Source We publicly release the software infrastructure for all our experiments, which", "type": "text" } ], "index": 70, "is_list_start_line": true }, { "bbox": [ 115, 694, 506, 707 ], "spans": [ { "bbox": [ 115, 694, 506, 707 ], "score": 1.0, "content": "supports different action spaces and sensory modalities, offering a flexible platform for future", "type": "text" } ], "index": 71 }, { "bbox": [ 116, 707, 362, 718 ], "spans": [ { "bbox": [ 116, 707, 362, 718 ], "score": 1.0, "content": "research of world models for robot learning in the real world.", "type": "text" } ], "index": 72, "is_list_end_line": true } ], "index": 66, "bbox_fs": [ 106, 554, 506, 718 ] } ] }, { "preproc_blocks": [ { "type": "image", "bbox": [ 104, 52, 506, 216 ], "blocks": [ { "type": "image_body", "bbox": [ 104, 52, 506, 216 ], "group_id": 0, "lines": [ { "bbox": [ 104, 52, 506, 216 ], "spans": [ { "bbox": [ 104, 52, 506, 216 ], "score": 0.972, "type": "image", "image_path": "bbd9aa6b3f541685e1ecf9dd1c4451b92904b361a6547ee2e39414769cb64de4.jpg" } ] } ], "index": 1, "virtual_lines": [ { "bbox": [ 104, 52, 506, 106.66666666666666 ], "spans": [], "index": 0 }, { "bbox": [ 104, 106.66666666666666, 506, 161.33333333333331 ], "spans": [], "index": 1 }, { "bbox": [ 104, 161.33333333333331, 506, 215.99999999999997 ], "spans": [], "index": 2 } ] }, { "type": "image_caption", "bbox": [ 106, 222, 506, 302 ], "group_id": 0, "lines": [ { "bbox": [ 105, 222, 506, 236 ], "spans": [ { "bbox": [ 105, 222, 506, 236 ], "score": 1.0, "content": "Figure 3: Neural Network Training We leverage the Dreamer algorithm (Hafner et al., 2019;", "type": "text" } ], "index": 3 }, { "bbox": [ 105, 233, 507, 247 ], "spans": [ { "bbox": [ 105, 233, 507, 247 ], "score": 1.0, "content": "2020) for fast robot learning in real world. Dreamer consists of two main neural network components,", "type": "text" } ], "index": 4 }, { "bbox": [ 106, 245, 505, 258 ], "spans": [ { "bbox": [ 106, 245, 505, 258 ], "score": 1.0, "content": "the world model and the policy. Left: The world model follows the structure of a deep Kalman", "type": "text" } ], "index": 5 }, { "bbox": [ 105, 255, 505, 269 ], "spans": [ { "bbox": [ 105, 255, 505, 269 ], "score": 1.0, "content": "filter that is trained on subsequences drawn from the replay buffer. The encoder fuses all sensory", "type": "text" } ], "index": 6 }, { "bbox": [ 105, 267, 506, 280 ], "spans": [ { "bbox": [ 105, 267, 506, 280 ], "score": 1.0, "content": "modalities into discrete codes. The decoder reconstructs the inputs from the codes, providing a", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 280, 505, 291 ], "spans": [ { "bbox": [ 106, 280, 505, 291 ], "score": 1.0, "content": "rich learning signal and enabling human inspection of model predictions. A recurrent state-space", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 290, 507, 303 ], "spans": [ { "bbox": [ 105, 290, 507, 303 ], "score": 1.0, "content": "model (RSSM) is trained to predict future codes given actions, without observing intermediate inputs.", "type": "text" } ], "index": 9 } ], "index": 6 } ], "index": 3.5 }, { "type": "text", "bbox": [ 107, 302, 506, 347 ], "lines": [ { "bbox": [ 105, 300, 506, 313 ], "spans": [ { "bbox": [ 105, 300, 506, 313 ], "score": 1.0, "content": "Right: The world model enables massively parallel policy optimization from imagined rollouts", "type": "text" } ], "index": 10 }, { "bbox": [ 105, 313, 507, 325 ], "spans": [ { "bbox": [ 105, 313, 507, 325 ], "score": 1.0, "content": "in the compact latent space using a large batch size, without having to reconstruct sensory inputs.", "type": "text" } ], "index": 11 }, { "bbox": [ 106, 324, 505, 336 ], "spans": [ { "bbox": [ 106, 324, 505, 336 ], "score": 1.0, "content": "Dreamer trains a policy network and value network from the imagined rollouts and a learned", "type": "text" } ], "index": 12 }, { "bbox": [ 105, 336, 180, 346 ], "spans": [ { "bbox": [ 105, 336, 180, 346 ], "score": 1.0, "content": "reward function.", "type": "text" } ], "index": 13 } ], "index": 11.5 }, { "type": "title", "bbox": [ 107, 355, 177, 369 ], "lines": [ { "bbox": [ 104, 353, 178, 372 ], "spans": [ { "bbox": [ 104, 353, 178, 372 ], "score": 1.0, "content": "2 Approach", "type": "text" } ], "index": 14 } ], "index": 14 }, { "type": "text", "bbox": [ 106, 377, 505, 471 ], "lines": [ { "bbox": [ 105, 376, 506, 391 ], "spans": [ { "bbox": [ 105, 376, 506, 391 ], "score": 1.0, "content": "We leverage the Dreamer algorithm (Hafner et al., 2019; 2020) for online learning on physical robots,", "type": "text" } ], "index": 15 }, { "bbox": [ 106, 389, 506, 402 ], "spans": [ { "bbox": [ 106, 389, 506, 402 ], "score": 1.0, "content": "without the need for simulators. Figure 2 shows an overview of the approach. Dreamer learns a", "type": "text" } ], "index": 16 }, { "bbox": [ 106, 401, 505, 414 ], "spans": [ { "bbox": [ 106, 401, 505, 414 ], "score": 1.0, "content": "world model from a replay buffer of past experiences, uses an actor critic algorithm to learn behaviors", "type": "text" } ], "index": 17 }, { "bbox": [ 106, 412, 505, 425 ], "spans": [ { "bbox": [ 106, 412, 505, 425 ], "score": 1.0, "content": "from trajectories predicted by the learned model, and deploys its behavior in the environment", "type": "text" } ], "index": 18 }, { "bbox": [ 106, 424, 505, 437 ], "spans": [ { "bbox": [ 106, 424, 505, 437 ], "score": 1.0, "content": "to continuously grow the replay buffer. We decouple learning updates from data collection to", "type": "text" } ], "index": 19 }, { "bbox": [ 105, 435, 505, 448 ], "spans": [ { "bbox": [ 105, 435, 505, 448 ], "score": 1.0, "content": "meet latency requirements and to enable fast training without waiting for the environment. In our", "type": "text" } ], "index": 20 }, { "bbox": [ 106, 448, 505, 460 ], "spans": [ { "bbox": [ 106, 448, 505, 460 ], "score": 1.0, "content": "implementation, a learner thread continuously trains the world model and actor critic behavior, while", "type": "text" } ], "index": 21 }, { "bbox": [ 105, 460, 397, 471 ], "spans": [ { "bbox": [ 105, 460, 397, 471 ], "score": 1.0, "content": "an actor thread in parallel computes actions for environment interaction.", "type": "text" } ], "index": 22 } ], "index": 18.5 }, { "type": "text", "bbox": [ 106, 475, 505, 557 ], "lines": [ { "bbox": [ 106, 474, 505, 488 ], "spans": [ { "bbox": [ 106, 474, 505, 488 ], "score": 1.0, "content": "World Model Learning The world model is a deep neural network that learns to predict the", "type": "text" } ], "index": 23 }, { "bbox": [ 105, 487, 505, 500 ], "spans": [ { "bbox": [ 105, 487, 505, 500 ], "score": 1.0, "content": "environment dynamics, as shown in Figure 3 (left). Because sensory inputs can be large images, we", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 499, 505, 511 ], "spans": [ { "bbox": [ 105, 499, 505, 511 ], "score": 1.0, "content": "predict future representations rather than future inputs. This reduces accumulating errors and enables", "type": "text" } ], "index": 25 }, { "bbox": [ 105, 510, 506, 523 ], "spans": [ { "bbox": [ 105, 510, 506, 523 ], "score": 1.0, "content": "massively parallel training with a large batch size. Thus, the world model can be thought of as a", "type": "text" } ], "index": 26 }, { "bbox": [ 106, 522, 505, 534 ], "spans": [ { "bbox": [ 106, 522, 505, 534 ], "score": 1.0, "content": "fast simulator of the environment that the robot learns autonomously, starting from a blank slate and", "type": "text" } ], "index": 27 }, { "bbox": [ 106, 534, 505, 545 ], "spans": [ { "bbox": [ 106, 534, 505, 545 ], "score": 1.0, "content": "continuously improving its model as it explores the real world. The world model is based on the", "type": "text" } ], "index": 28 }, { "bbox": [ 106, 546, 488, 558 ], "spans": [ { "bbox": [ 106, 546, 488, 558 ], "score": 1.0, "content": "Recurrent State-Space Model (RSSM; Hafner et al., 2018), which consists of four components:", "type": "text" } ], "index": 29 } ], "index": 26 }, { "type": "interline_equation", "bbox": [ 201, 561, 482, 592 ], "lines": [ { "bbox": [ 201, 561, 482, 592 ], "spans": [ { "bbox": [ 201, 561, 482, 592 ], "score": 0.74, "content": "{ \\begin{array} { r l r l } & { \\operatorname { e n c } _ { \\theta } { \\big ( } s _ { t } \\ { \\big | } \\ s _ { t - 1 } , a _ { t - 1 } , x _ { t } { \\big ) } } & & { { \\mathrm { D e c o d e r ~ N e t w o r k : } } \\quad \\operatorname* { d e c } _ { \\theta } { \\big ( } s _ { t } { \\big ) } \\approx x _ { t } } \\\\ & { \\operatorname { d y n } _ { \\theta } { \\big ( } s _ { t } \\ { \\big | } \\ s _ { t - 1 } , a _ { t - 1 } { \\big ) } } & & { { \\mathrm { R e w a r d ~ N e t w o r k : } } \\quad \\operatorname { r e w } _ { \\theta } { \\big ( } s _ { t + 1 } { \\big ) } \\approx r _ { t } } \\end{array} }", "type": "interline_equation", "image_path": "bd74dd0968c35c4dc8ffeba6cc51d9f109521ca66008a561e30a7fcccfe148a6.jpg" } ] } ], "index": 31, "virtual_lines": [ { "bbox": [ 201, 561, 482, 571.3333333333334 ], "spans": [], "index": 30 }, { "bbox": [ 201, 571.3333333333334, 482, 581.6666666666667 ], "spans": [], "index": 31 }, { "bbox": [ 201, 581.6666666666667, 482, 592.0000000000001 ], "spans": [], "index": 32 } ] }, { "type": "text", "bbox": [ 106, 595, 506, 712 ], "lines": [ { "bbox": [ 105, 595, 506, 608 ], "spans": [ { "bbox": [ 105, 595, 506, 608 ], "score": 1.0, "content": "Physical robots are often equipped with multiple sensors of different modalities, such as proprioceptive", "type": "text" } ], "index": 33 }, { "bbox": [ 104, 606, 507, 620 ], "spans": [ { "bbox": [ 104, 606, 507, 620 ], "score": 1.0, "content": "joint readings, force sensors, and high-dimensional inputs such as RGB and depth camera images.", "type": "text" } ], "index": 34 }, { "bbox": [ 105, 618, 505, 631 ], "spans": [ { "bbox": [ 105, 618, 289, 631 ], "score": 1.0, "content": "The encoder network fuses all sensory inputs", "type": "text" }, { "bbox": [ 289, 621, 299, 630 ], "score": 0.84, "content": "x _ { t }", "type": "inline_equation" }, { "bbox": [ 299, 618, 473, 631 ], "score": 1.0, "content": "together into the stochastic representations", "type": "text" }, { "bbox": [ 473, 621, 482, 630 ], "score": 0.84, "content": "z _ { t }", "type": "inline_equation" }, { "bbox": [ 483, 618, 505, 631 ], "score": 1.0, "content": ". The", "type": "text" } ], "index": 35 }, { "bbox": [ 106, 630, 505, 643 ], "spans": [ { "bbox": [ 106, 630, 505, 643 ], "score": 1.0, "content": "dynamics model learns to predict the sequence of stochastic representations by using its recurrent state", "type": "text" } ], "index": 36 }, { "bbox": [ 106, 642, 505, 655 ], "spans": [ { "bbox": [ 106, 642, 117, 653 ], "score": 0.86, "content": "h _ { t }", "type": "inline_equation" }, { "bbox": [ 117, 642, 505, 655 ], "score": 1.0, "content": ". The decoder reconstructs the sensory inputs to provide a rich signal for learning representations", "type": "text" } ], "index": 37 }, { "bbox": [ 106, 654, 506, 666 ], "spans": [ { "bbox": [ 106, 654, 506, 666 ], "score": 1.0, "content": "and enables human inspection of model predictions. In our experiments, the robot has to discover", "type": "text" } ], "index": 38 }, { "bbox": [ 105, 664, 506, 679 ], "spans": [ { "bbox": [ 105, 664, 506, 679 ], "score": 1.0, "content": "task rewards by interacting with the real world, which the reward network learns to predict. Using", "type": "text" } ], "index": 39 }, { "bbox": [ 106, 678, 505, 689 ], "spans": [ { "bbox": [ 106, 678, 505, 689 ], "score": 1.0, "content": "manually specified rewards as a function of the decoded sensory inputs is also possible. We optimize", "type": "text" } ], "index": 40 }, { "bbox": [ 106, 689, 506, 701 ], "spans": [ { "bbox": [ 106, 689, 506, 701 ], "score": 1.0, "content": "all components of the world model jointly by stochastic backpropagation (Kingma and Welling, 2013;", "type": "text" } ], "index": 41 }, { "bbox": [ 105, 700, 196, 712 ], "spans": [ { "bbox": [ 105, 700, 196, 712 ], "score": 1.0, "content": "Rezende et al., 2014).", "type": "text" } ], "index": 42 } ], "index": 37.5 } ], "page_idx": 2, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 302, 741, 309, 750 ], "lines": [ { "bbox": [ 301, 740, 310, 752 ], "spans": [ { "bbox": [ 301, 740, 310, 752 ], "score": 1.0, "content": "3", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "image", "bbox": [ 104, 52, 506, 216 ], "blocks": [ { "type": "image_body", "bbox": [ 104, 52, 506, 216 ], "group_id": 0, "lines": [ { "bbox": [ 104, 52, 506, 216 ], "spans": [ { "bbox": [ 104, 52, 506, 216 ], "score": 0.972, "type": "image", "image_path": "bbd9aa6b3f541685e1ecf9dd1c4451b92904b361a6547ee2e39414769cb64de4.jpg" } ] } ], "index": 1, "virtual_lines": [ { "bbox": [ 104, 52, 506, 106.66666666666666 ], "spans": [], "index": 0 }, { "bbox": [ 104, 106.66666666666666, 506, 161.33333333333331 ], "spans": [], "index": 1 }, { "bbox": [ 104, 161.33333333333331, 506, 215.99999999999997 ], "spans": [], "index": 2 } ] }, { "type": "image_caption", "bbox": [ 106, 222, 506, 302 ], "group_id": 0, "lines": [ { "bbox": [ 105, 222, 506, 236 ], "spans": [ { "bbox": [ 105, 222, 506, 236 ], "score": 1.0, "content": "Figure 3: Neural Network Training We leverage the Dreamer algorithm (Hafner et al., 2019;", "type": "text" } ], "index": 3 }, { "bbox": [ 105, 233, 507, 247 ], "spans": [ { "bbox": [ 105, 233, 507, 247 ], "score": 1.0, "content": "2020) for fast robot learning in real world. Dreamer consists of two main neural network components,", "type": "text" } ], "index": 4 }, { "bbox": [ 106, 245, 505, 258 ], "spans": [ { "bbox": [ 106, 245, 505, 258 ], "score": 1.0, "content": "the world model and the policy. Left: The world model follows the structure of a deep Kalman", "type": "text" } ], "index": 5 }, { "bbox": [ 105, 255, 505, 269 ], "spans": [ { "bbox": [ 105, 255, 505, 269 ], "score": 1.0, "content": "filter that is trained on subsequences drawn from the replay buffer. The encoder fuses all sensory", "type": "text" } ], "index": 6 }, { "bbox": [ 105, 267, 506, 280 ], "spans": [ { "bbox": [ 105, 267, 506, 280 ], "score": 1.0, "content": "modalities into discrete codes. The decoder reconstructs the inputs from the codes, providing a", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 280, 505, 291 ], "spans": [ { "bbox": [ 106, 280, 505, 291 ], "score": 1.0, "content": "rich learning signal and enabling human inspection of model predictions. A recurrent state-space", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 290, 507, 303 ], "spans": [ { "bbox": [ 105, 290, 507, 303 ], "score": 1.0, "content": "model (RSSM) is trained to predict future codes given actions, without observing intermediate inputs.", "type": "text" } ], "index": 9 } ], "index": 6 } ], "index": 3.5 }, { "type": "text", "bbox": [ 107, 302, 506, 347 ], "lines": [ { "bbox": [ 105, 300, 506, 313 ], "spans": [ { "bbox": [ 105, 300, 506, 313 ], "score": 1.0, "content": "Right: The world model enables massively parallel policy optimization from imagined rollouts", "type": "text" } ], "index": 10 }, { "bbox": [ 105, 313, 507, 325 ], "spans": [ { "bbox": [ 105, 313, 507, 325 ], "score": 1.0, "content": "in the compact latent space using a large batch size, without having to reconstruct sensory inputs.", "type": "text" } ], "index": 11 }, { "bbox": [ 106, 324, 505, 336 ], "spans": [ { "bbox": [ 106, 324, 505, 336 ], "score": 1.0, "content": "Dreamer trains a policy network and value network from the imagined rollouts and a learned", "type": "text" } ], "index": 12 }, { "bbox": [ 105, 336, 180, 346 ], "spans": [ { "bbox": [ 105, 336, 180, 346 ], "score": 1.0, "content": "reward function.", "type": "text" } ], "index": 13 } ], "index": 11.5, "bbox_fs": [ 105, 300, 507, 346 ] }, { "type": "title", "bbox": [ 107, 355, 177, 369 ], "lines": [ { "bbox": [ 104, 353, 178, 372 ], "spans": [ { "bbox": [ 104, 353, 178, 372 ], "score": 1.0, "content": "2 Approach", "type": "text" } ], "index": 14 } ], "index": 14 }, { "type": "text", "bbox": [ 106, 377, 505, 471 ], "lines": [ { "bbox": [ 105, 376, 506, 391 ], "spans": [ { "bbox": [ 105, 376, 506, 391 ], "score": 1.0, "content": "We leverage the Dreamer algorithm (Hafner et al., 2019; 2020) for online learning on physical robots,", "type": "text" } ], "index": 15 }, { "bbox": [ 106, 389, 506, 402 ], "spans": [ { "bbox": [ 106, 389, 506, 402 ], "score": 1.0, "content": "without the need for simulators. Figure 2 shows an overview of the approach. Dreamer learns a", "type": "text" } ], "index": 16 }, { "bbox": [ 106, 401, 505, 414 ], "spans": [ { "bbox": [ 106, 401, 505, 414 ], "score": 1.0, "content": "world model from a replay buffer of past experiences, uses an actor critic algorithm to learn behaviors", "type": "text" } ], "index": 17 }, { "bbox": [ 106, 412, 505, 425 ], "spans": [ { "bbox": [ 106, 412, 505, 425 ], "score": 1.0, "content": "from trajectories predicted by the learned model, and deploys its behavior in the environment", "type": "text" } ], "index": 18 }, { "bbox": [ 106, 424, 505, 437 ], "spans": [ { "bbox": [ 106, 424, 505, 437 ], "score": 1.0, "content": "to continuously grow the replay buffer. We decouple learning updates from data collection to", "type": "text" } ], "index": 19 }, { "bbox": [ 105, 435, 505, 448 ], "spans": [ { "bbox": [ 105, 435, 505, 448 ], "score": 1.0, "content": "meet latency requirements and to enable fast training without waiting for the environment. In our", "type": "text" } ], "index": 20 }, { "bbox": [ 106, 448, 505, 460 ], "spans": [ { "bbox": [ 106, 448, 505, 460 ], "score": 1.0, "content": "implementation, a learner thread continuously trains the world model and actor critic behavior, while", "type": "text" } ], "index": 21 }, { "bbox": [ 105, 460, 397, 471 ], "spans": [ { "bbox": [ 105, 460, 397, 471 ], "score": 1.0, "content": "an actor thread in parallel computes actions for environment interaction.", "type": "text" } ], "index": 22 } ], "index": 18.5, "bbox_fs": [ 105, 376, 506, 471 ] }, { "type": "text", "bbox": [ 106, 475, 505, 557 ], "lines": [ { "bbox": [ 106, 474, 505, 488 ], "spans": [ { "bbox": [ 106, 474, 505, 488 ], "score": 1.0, "content": "World Model Learning The world model is a deep neural network that learns to predict the", "type": "text" } ], "index": 23 }, { "bbox": [ 105, 487, 505, 500 ], "spans": [ { "bbox": [ 105, 487, 505, 500 ], "score": 1.0, "content": "environment dynamics, as shown in Figure 3 (left). Because sensory inputs can be large images, we", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 499, 505, 511 ], "spans": [ { "bbox": [ 105, 499, 505, 511 ], "score": 1.0, "content": "predict future representations rather than future inputs. This reduces accumulating errors and enables", "type": "text" } ], "index": 25 }, { "bbox": [ 105, 510, 506, 523 ], "spans": [ { "bbox": [ 105, 510, 506, 523 ], "score": 1.0, "content": "massively parallel training with a large batch size. Thus, the world model can be thought of as a", "type": "text" } ], "index": 26 }, { "bbox": [ 106, 522, 505, 534 ], "spans": [ { "bbox": [ 106, 522, 505, 534 ], "score": 1.0, "content": "fast simulator of the environment that the robot learns autonomously, starting from a blank slate and", "type": "text" } ], "index": 27 }, { "bbox": [ 106, 534, 505, 545 ], "spans": [ { "bbox": [ 106, 534, 505, 545 ], "score": 1.0, "content": "continuously improving its model as it explores the real world. The world model is based on the", "type": "text" } ], "index": 28 }, { "bbox": [ 106, 546, 488, 558 ], "spans": [ { "bbox": [ 106, 546, 488, 558 ], "score": 1.0, "content": "Recurrent State-Space Model (RSSM; Hafner et al., 2018), which consists of four components:", "type": "text" } ], "index": 29 } ], "index": 26, "bbox_fs": [ 105, 474, 506, 558 ] }, { "type": "interline_equation", "bbox": [ 201, 561, 482, 592 ], "lines": [ { "bbox": [ 201, 561, 482, 592 ], "spans": [ { "bbox": [ 201, 561, 482, 592 ], "score": 0.74, "content": "{ \\begin{array} { r l r l } & { \\operatorname { e n c } _ { \\theta } { \\big ( } s _ { t } \\ { \\big | } \\ s _ { t - 1 } , a _ { t - 1 } , x _ { t } { \\big ) } } & & { { \\mathrm { D e c o d e r ~ N e t w o r k : } } \\quad \\operatorname* { d e c } _ { \\theta } { \\big ( } s _ { t } { \\big ) } \\approx x _ { t } } \\\\ & { \\operatorname { d y n } _ { \\theta } { \\big ( } s _ { t } \\ { \\big | } \\ s _ { t - 1 } , a _ { t - 1 } { \\big ) } } & & { { \\mathrm { R e w a r d ~ N e t w o r k : } } \\quad \\operatorname { r e w } _ { \\theta } { \\big ( } s _ { t + 1 } { \\big ) } \\approx r _ { t } } \\end{array} }", "type": "interline_equation", "image_path": "bd74dd0968c35c4dc8ffeba6cc51d9f109521ca66008a561e30a7fcccfe148a6.jpg" } ] } ], "index": 31, "virtual_lines": [ { "bbox": [ 201, 561, 482, 571.3333333333334 ], "spans": [], "index": 30 }, { "bbox": [ 201, 571.3333333333334, 482, 581.6666666666667 ], "spans": [], "index": 31 }, { "bbox": [ 201, 581.6666666666667, 482, 592.0000000000001 ], "spans": [], "index": 32 } ] }, { "type": "text", "bbox": [ 106, 595, 506, 712 ], "lines": [ { "bbox": [ 105, 595, 506, 608 ], "spans": [ { "bbox": [ 105, 595, 506, 608 ], "score": 1.0, "content": "Physical robots are often equipped with multiple sensors of different modalities, such as proprioceptive", "type": "text" } ], "index": 33 }, { "bbox": [ 104, 606, 507, 620 ], "spans": [ { "bbox": [ 104, 606, 507, 620 ], "score": 1.0, "content": "joint readings, force sensors, and high-dimensional inputs such as RGB and depth camera images.", "type": "text" } ], "index": 34 }, { "bbox": [ 105, 618, 505, 631 ], "spans": [ { "bbox": [ 105, 618, 289, 631 ], "score": 1.0, "content": "The encoder network fuses all sensory inputs", "type": "text" }, { "bbox": [ 289, 621, 299, 630 ], "score": 0.84, "content": "x _ { t }", "type": "inline_equation" }, { "bbox": [ 299, 618, 473, 631 ], "score": 1.0, "content": "together into the stochastic representations", "type": "text" }, { "bbox": [ 473, 621, 482, 630 ], "score": 0.84, "content": "z _ { t }", "type": "inline_equation" }, { "bbox": [ 483, 618, 505, 631 ], "score": 1.0, "content": ". The", "type": "text" } ], "index": 35 }, { "bbox": [ 106, 630, 505, 643 ], "spans": [ { "bbox": [ 106, 630, 505, 643 ], "score": 1.0, "content": "dynamics model learns to predict the sequence of stochastic representations by using its recurrent state", "type": "text" } ], "index": 36 }, { "bbox": [ 106, 642, 505, 655 ], "spans": [ { "bbox": [ 106, 642, 117, 653 ], "score": 0.86, "content": "h _ { t }", "type": "inline_equation" }, { "bbox": [ 117, 642, 505, 655 ], "score": 1.0, "content": ". The decoder reconstructs the sensory inputs to provide a rich signal for learning representations", "type": "text" } ], "index": 37 }, { "bbox": [ 106, 654, 506, 666 ], "spans": [ { "bbox": [ 106, 654, 506, 666 ], "score": 1.0, "content": "and enables human inspection of model predictions. In our experiments, the robot has to discover", "type": "text" } ], "index": 38 }, { "bbox": [ 105, 664, 506, 679 ], "spans": [ { "bbox": [ 105, 664, 506, 679 ], "score": 1.0, "content": "task rewards by interacting with the real world, which the reward network learns to predict. Using", "type": "text" } ], "index": 39 }, { "bbox": [ 106, 678, 505, 689 ], "spans": [ { "bbox": [ 106, 678, 505, 689 ], "score": 1.0, "content": "manually specified rewards as a function of the decoded sensory inputs is also possible. We optimize", "type": "text" } ], "index": 40 }, { "bbox": [ 106, 689, 506, 701 ], "spans": [ { "bbox": [ 106, 689, 506, 701 ], "score": 1.0, "content": "all components of the world model jointly by stochastic backpropagation (Kingma and Welling, 2013;", "type": "text" } ], "index": 41 }, { "bbox": [ 105, 700, 196, 712 ], "spans": [ { "bbox": [ 105, 700, 196, 712 ], "score": 1.0, "content": "Rezende et al., 2014).", "type": "text" } ], "index": 42 } ], "index": 37.5, "bbox_fs": [ 104, 595, 507, 712 ] } ] }, { "preproc_blocks": [ { "type": "text", "bbox": [ 106, 72, 505, 143 ], "lines": [ { "bbox": [ 106, 73, 505, 85 ], "spans": [ { "bbox": [ 106, 73, 505, 85 ], "score": 1.0, "content": "Actor Critic Learning While the world model represents task-agnostic knowledge about the", "type": "text" } ], "index": 0 }, { "bbox": [ 106, 84, 505, 96 ], "spans": [ { "bbox": [ 106, 84, 505, 96 ], "score": 1.0, "content": "dynamics, the actor critic algorithm learns a behavior that is specific to the task at hand. As shown in", "type": "text" } ], "index": 1 }, { "bbox": [ 106, 96, 505, 108 ], "spans": [ { "bbox": [ 106, 96, 505, 108 ], "score": 1.0, "content": "Figure 3 (right), we learn behaviors from rollouts that are predicted in the latent space of the world", "type": "text" } ], "index": 2 }, { "bbox": [ 105, 107, 506, 120 ], "spans": [ { "bbox": [ 105, 107, 506, 120 ], "score": 1.0, "content": "model, without decoding observations. This enables massively parallel behavior learning with typical", "type": "text" } ], "index": 3 }, { "bbox": [ 105, 118, 505, 132 ], "spans": [ { "bbox": [ 105, 118, 470, 132 ], "score": 1.0, "content": "batch sizes of 16K on a single GPU. The actor critic algorithm consists of an actor network", "type": "text" }, { "bbox": [ 470, 119, 505, 131 ], "score": 0.93, "content": "\\pi ( a _ { t } | s _ { t } )", "type": "inline_equation" } ], "index": 4 }, { "bbox": [ 105, 129, 215, 145 ], "spans": [ { "bbox": [ 105, 129, 188, 145 ], "score": 1.0, "content": "and a critic network", "type": "text" }, { "bbox": [ 189, 131, 210, 143 ], "score": 0.93, "content": "v ( s _ { t } )", "type": "inline_equation" }, { "bbox": [ 211, 129, 215, 145 ], "score": 1.0, "content": ".", "type": "text" } ], "index": 5 } ], "index": 2.5 }, { "type": "text", "bbox": [ 106, 149, 505, 220 ], "lines": [ { "bbox": [ 106, 150, 505, 162 ], "spans": [ { "bbox": [ 106, 150, 408, 162 ], "score": 1.0, "content": "The role of the actor network is to learn a distribution over successful actions", "type": "text" }, { "bbox": [ 409, 152, 419, 161 ], "score": 0.85, "content": "a _ { t }", "type": "inline_equation" }, { "bbox": [ 419, 150, 505, 162 ], "score": 1.0, "content": "for each latent model", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 162, 505, 174 ], "spans": [ { "bbox": [ 106, 162, 127, 174 ], "score": 1.0, "content": "state", "type": "text" }, { "bbox": [ 127, 163, 136, 173 ], "score": 0.86, "content": "s _ { t }", "type": "inline_equation" }, { "bbox": [ 137, 162, 505, 174 ], "score": 1.0, "content": "that maximizes the sum of future predicted task rewards. The critic network learns to predict", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 174, 505, 186 ], "spans": [ { "bbox": [ 106, 174, 505, 186 ], "score": 1.0, "content": "the sum of future task rewards through temporal difference learning (Sutton and Barto, 2018). This", "type": "text" } ], "index": 8 }, { "bbox": [ 106, 185, 505, 198 ], "spans": [ { "bbox": [ 106, 185, 437, 198 ], "score": 1.0, "content": "allows the algorithm to take into account rewards beyond the planning horizon of", "type": "text" }, { "bbox": [ 437, 185, 471, 195 ], "score": 0.9, "content": "H = 1 6", "type": "inline_equation" }, { "bbox": [ 471, 185, 505, 198 ], "score": 1.0, "content": "steps to", "type": "text" } ], "index": 9 }, { "bbox": [ 105, 196, 506, 210 ], "spans": [ { "bbox": [ 105, 196, 506, 210 ], "score": 1.0, "content": "learn long-term strategies. Given a predicted trajectory of model states, the critic is trained to regress", "type": "text" } ], "index": 10 }, { "bbox": [ 105, 208, 457, 221 ], "spans": [ { "bbox": [ 105, 208, 268, 221 ], "score": 1.0, "content": "the return of the trajectory. We compute", "type": "text" }, { "bbox": [ 268, 209, 275, 218 ], "score": 0.82, "content": "\\lambda", "type": "inline_equation" }, { "bbox": [ 276, 208, 457, 221 ], "score": 1.0, "content": "-returns following Hafner et al. (2020; 2019):", "type": "text" } ], "index": 11 } ], "index": 8.5 }, { "type": "interline_equation", "bbox": [ 187, 223, 423, 245 ], "lines": [ { "bbox": [ 187, 223, 423, 245 ], "spans": [ { "bbox": [ 187, 223, 423, 245 ], "score": 0.93, "content": "V _ { t } ^ { \\lambda } \\doteq r _ { t } + \\gamma \\Big ( ( 1 - \\lambda ) v ( s _ { t + 1 } ) + \\lambda V _ { t + 1 } ^ { \\lambda } \\Big ) , \\quad V _ { H } ^ { \\lambda } \\doteq v ( s _ { H } ) .", "type": "interline_equation", "image_path": "bced559cf73caae8b60315e212417c82fda960dadfbf4c1dc28fc8ea7be19aee.jpg" } ] } ], "index": 12, "virtual_lines": [ { "bbox": [ 187, 223, 423, 245 ], "spans": [], "index": 12 } ] }, { "type": "text", "bbox": [ 106, 249, 505, 343 ], "lines": [ { "bbox": [ 105, 249, 505, 262 ], "spans": [ { "bbox": [ 105, 249, 300, 262 ], "score": 1.0, "content": "While the critic network is trained to regress the", "type": "text" }, { "bbox": [ 300, 250, 307, 259 ], "score": 0.83, "content": "\\lambda", "type": "inline_equation" }, { "bbox": [ 307, 249, 505, 262 ], "score": 1.0, "content": "-returns, the actor network is trained to maximize", "type": "text" } ], "index": 13 }, { "bbox": [ 105, 261, 505, 274 ], "spans": [ { "bbox": [ 105, 261, 505, 274 ], "score": 1.0, "content": "them. Different gradient estimators are available for computing the policy gradient for optimizing", "type": "text" } ], "index": 14 }, { "bbox": [ 105, 272, 506, 286 ], "spans": [ { "bbox": [ 105, 272, 506, 286 ], "score": 1.0, "content": "the actor, such as Reinforce (Williams, 1992) and the reparameterization trick (Kingma and Welling,", "type": "text" } ], "index": 15 }, { "bbox": [ 105, 284, 505, 297 ], "spans": [ { "bbox": [ 105, 284, 505, 297 ], "score": 1.0, "content": "2013; Rezende et al., 2014) that directly backpropagates return gradients through the differentiable", "type": "text" } ], "index": 16 }, { "bbox": [ 106, 296, 505, 309 ], "spans": [ { "bbox": [ 106, 296, 505, 309 ], "score": 1.0, "content": "dynamics network (Henaff et al., 2019). Following Hafner et al. (2020), we choose reparameterization", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 307, 506, 321 ], "spans": [ { "bbox": [ 105, 307, 506, 321 ], "score": 1.0, "content": "gradients for continuous control tasks and Reinforce gradients for tasks with discrete actions. In", "type": "text" } ], "index": 18 }, { "bbox": [ 105, 319, 506, 333 ], "spans": [ { "bbox": [ 105, 319, 506, 333 ], "score": 1.0, "content": "addition to maximizing returns, the actor is also incentivized to maintain high entropy to prevent", "type": "text" } ], "index": 19 }, { "bbox": [ 106, 330, 492, 345 ], "spans": [ { "bbox": [ 106, 330, 492, 345 ], "score": 1.0, "content": "collapse to a deterministic policy and maintain some amount of exploration throughout training:", "type": "text" } ], "index": 20 } ], "index": 16.5 }, { "type": "interline_equation", "bbox": [ 166, 349, 444, 366 ], "lines": [ { "bbox": [ 166, 349, 444, 366 ], "spans": [ { "bbox": [ 166, 349, 444, 366 ], "score": 0.9, "content": "\\begin{array} { r } { \\mathcal { L } ( \\pi ) \\doteq - \\operatorname { E } \\bigl [ \\sum _ { t = 1 } ^ { H } \\ln \\pi ( a _ { t } \\mid s _ { t } ) \\mathrm { s g } ( V _ { t } ^ { \\lambda } - v ( s _ { t } ) ) + \\eta \\mathrm { H } \\bigl [ \\pi ( a _ { t } \\mid s _ { t } ) \\bigr ] \\bigr ] } \\end{array}", "type": "interline_equation", "image_path": "29586778ea39548465206a088f601c3e740302c103692a574d7a70d8188b3983.jpg" } ] } ], "index": 21, "virtual_lines": [ { "bbox": [ 166, 349, 444, 366 ], "spans": [], "index": 21 } ] }, { "type": "text", "bbox": [ 107, 369, 505, 428 ], "lines": [ { "bbox": [ 105, 369, 505, 381 ], "spans": [ { "bbox": [ 105, 369, 505, 381 ], "score": 1.0, "content": "We optimize the actor and critic using the Adam optimizer (Kingma and Ba, 2014). To compute the", "type": "text" } ], "index": 22 }, { "bbox": [ 107, 380, 505, 394 ], "spans": [ { "bbox": [ 107, 382, 113, 391 ], "score": 0.74, "content": "\\lambda", "type": "inline_equation" }, { "bbox": [ 114, 380, 505, 394 ], "score": 1.0, "content": "-returns, we use a slowly updated copy of the critic network as common in the literature (Mnih", "type": "text" } ], "index": 23 }, { "bbox": [ 105, 393, 505, 405 ], "spans": [ { "bbox": [ 105, 393, 505, 405 ], "score": 1.0, "content": "et al., 2015; Lillicrap et al., 2015). The actor and critic gradients do not affect the world model, as", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 403, 505, 416 ], "spans": [ { "bbox": [ 105, 403, 505, 416 ], "score": 1.0, "content": "this would lead to incorrect and overly optimistic model predictions. The hyperparameters are listed", "type": "text" } ], "index": 25 }, { "bbox": [ 105, 416, 171, 428 ], "spans": [ { "bbox": [ 105, 416, 171, 428 ], "score": 1.0, "content": "in Appendix D.", "type": "text" } ], "index": 26 } ], "index": 24 }, { "type": "title", "bbox": [ 107, 447, 191, 461 ], "lines": [ { "bbox": [ 103, 445, 193, 465 ], "spans": [ { "bbox": [ 103, 445, 193, 465 ], "score": 1.0, "content": "3 Experiments", "type": "text" } ], "index": 27 } ], "index": 27 }, { "type": "text", "bbox": [ 106, 473, 506, 555 ], "lines": [ { "bbox": [ 105, 473, 506, 486 ], "spans": [ { "bbox": [ 105, 473, 506, 486 ], "score": 1.0, "content": "We evaluate Dreamer on 4 robots, each with a different task, and compare its performance to", "type": "text" } ], "index": 28 }, { "bbox": [ 105, 485, 505, 498 ], "spans": [ { "bbox": [ 105, 485, 505, 498 ], "score": 1.0, "content": "appropriate algorithmic and human baselines. The experiments are representative of common robotic", "type": "text" } ], "index": 29 }, { "bbox": [ 105, 495, 506, 511 ], "spans": [ { "bbox": [ 105, 495, 506, 511 ], "score": 1.0, "content": "tasks, such as locomotion, manipulation, and navigation. The tasks pose a diverse range of challenges,", "type": "text" } ], "index": 30 }, { "bbox": [ 105, 508, 505, 522 ], "spans": [ { "bbox": [ 105, 508, 505, 522 ], "score": 1.0, "content": "including continuous and discrete actions, dense and sparse rewards, proprioceptive and image", "type": "text" } ], "index": 31 }, { "bbox": [ 105, 519, 506, 533 ], "spans": [ { "bbox": [ 105, 519, 506, 533 ], "score": 1.0, "content": "observations, and sensor fusion. The goal of the experiments is to evaluate whether the recent", "type": "text" } ], "index": 32 }, { "bbox": [ 106, 533, 506, 544 ], "spans": [ { "bbox": [ 106, 533, 506, 544 ], "score": 1.0, "content": "successes of learned world models enables sample-efficient robot learning directly in the real world.", "type": "text" } ], "index": 33 }, { "bbox": [ 106, 544, 365, 557 ], "spans": [ { "bbox": [ 106, 544, 365, 557 ], "score": 1.0, "content": "Specifically, we aim to answer the following research questions:", "type": "text" } ], "index": 34 } ], "index": 31 }, { "type": "text", "bbox": [ 106, 564, 506, 608 ], "lines": [ { "bbox": [ 104, 564, 449, 577 ], "spans": [ { "bbox": [ 104, 564, 449, 577 ], "score": 1.0, "content": "• Does Dreamer enable robot learning directly in the real world, without simulators?", "type": "text" } ], "index": 35 }, { "bbox": [ 105, 579, 493, 593 ], "spans": [ { "bbox": [ 105, 579, 493, 593 ], "score": 1.0, "content": "• Does Dreamer succeed across various robot platforms, sensory modalities, and action spaces?", "type": "text" } ], "index": 36 }, { "bbox": [ 105, 595, 506, 609 ], "spans": [ { "bbox": [ 105, 595, 506, 609 ], "score": 1.0, "content": "• How does the data-efficiency of Dreamer compare to previous reinforcement learning algorithms?", "type": "text" } ], "index": 37 } ], "index": 36 }, { "type": "text", "bbox": [ 106, 617, 506, 722 ], "lines": [ { "bbox": [ 106, 617, 505, 630 ], "spans": [ { "bbox": [ 106, 617, 505, 630 ], "score": 1.0, "content": "Implementation We build on the official implementation of DreamerV2 (Hafner et al., 2020). We", "type": "text" } ], "index": 38 }, { "bbox": [ 106, 629, 506, 641 ], "spans": [ { "bbox": [ 106, 629, 506, 641 ], "score": 1.0, "content": "develop an asynchronous actor and learner setup, which is essential in environments with high control", "type": "text" } ], "index": 39 }, { "bbox": [ 106, 641, 505, 653 ], "spans": [ { "bbox": [ 106, 641, 505, 653 ], "score": 1.0, "content": "rates, such as the quadruped, and also accelerates learning for slower environments, such as the robot", "type": "text" } ], "index": 40 }, { "bbox": [ 105, 651, 506, 666 ], "spans": [ { "bbox": [ 105, 651, 506, 666 ], "score": 1.0, "content": "arms. The actor thread computes online actions for the robot and sends trajectories of 128 time steps", "type": "text" } ], "index": 41 }, { "bbox": [ 106, 664, 506, 676 ], "spans": [ { "bbox": [ 106, 664, 506, 676 ], "score": 1.0, "content": "to the replay buffer. The learner thread samples data from the replay buffer, updates the world model,", "type": "text" } ], "index": 42 }, { "bbox": [ 106, 675, 505, 688 ], "spans": [ { "bbox": [ 106, 675, 505, 688 ], "score": 1.0, "content": "and optimizes the policy using imagination rollouts. Policy weights are synced from the learner to", "type": "text" } ], "index": 43 }, { "bbox": [ 106, 687, 506, 700 ], "spans": [ { "bbox": [ 106, 687, 506, 700 ], "score": 1.0, "content": "the actor every 20 seconds. We use an RSSM with 256 units to speed up the training computation.", "type": "text" } ], "index": 44 }, { "bbox": [ 105, 698, 506, 713 ], "spans": [ { "bbox": [ 105, 698, 506, 713 ], "score": 1.0, "content": "We use identical hyperparameters across all experiments, enabling off-the-shelf training on different", "type": "text" } ], "index": 45 }, { "bbox": [ 105, 710, 189, 722 ], "spans": [ { "bbox": [ 105, 710, 189, 722 ], "score": 1.0, "content": "robot embodiments.", "type": "text" } ], "index": 46 } ], "index": 42 } ], "page_idx": 3, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 302, 741, 308, 750 ], "lines": [ { "bbox": [ 302, 741, 310, 752 ], "spans": [ { "bbox": [ 302, 741, 310, 752 ], "score": 1.0, "content": "4", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "text", "bbox": [ 106, 72, 505, 143 ], "lines": [ { "bbox": [ 106, 73, 505, 85 ], "spans": [ { "bbox": [ 106, 73, 505, 85 ], "score": 1.0, "content": "Actor Critic Learning While the world model represents task-agnostic knowledge about the", "type": "text" } ], "index": 0 }, { "bbox": [ 106, 84, 505, 96 ], "spans": [ { "bbox": [ 106, 84, 505, 96 ], "score": 1.0, "content": "dynamics, the actor critic algorithm learns a behavior that is specific to the task at hand. As shown in", "type": "text" } ], "index": 1 }, { "bbox": [ 106, 96, 505, 108 ], "spans": [ { "bbox": [ 106, 96, 505, 108 ], "score": 1.0, "content": "Figure 3 (right), we learn behaviors from rollouts that are predicted in the latent space of the world", "type": "text" } ], "index": 2 }, { "bbox": [ 105, 107, 506, 120 ], "spans": [ { "bbox": [ 105, 107, 506, 120 ], "score": 1.0, "content": "model, without decoding observations. This enables massively parallel behavior learning with typical", "type": "text" } ], "index": 3 }, { "bbox": [ 105, 118, 505, 132 ], "spans": [ { "bbox": [ 105, 118, 470, 132 ], "score": 1.0, "content": "batch sizes of 16K on a single GPU. The actor critic algorithm consists of an actor network", "type": "text" }, { "bbox": [ 470, 119, 505, 131 ], "score": 0.93, "content": "\\pi ( a _ { t } | s _ { t } )", "type": "inline_equation" } ], "index": 4 }, { "bbox": [ 105, 129, 215, 145 ], "spans": [ { "bbox": [ 105, 129, 188, 145 ], "score": 1.0, "content": "and a critic network", "type": "text" }, { "bbox": [ 189, 131, 210, 143 ], "score": 0.93, "content": "v ( s _ { t } )", "type": "inline_equation" }, { "bbox": [ 211, 129, 215, 145 ], "score": 1.0, "content": ".", "type": "text" } ], "index": 5 } ], "index": 2.5, "bbox_fs": [ 105, 73, 506, 145 ] }, { "type": "text", "bbox": [ 106, 149, 505, 220 ], "lines": [ { "bbox": [ 106, 150, 505, 162 ], "spans": [ { "bbox": [ 106, 150, 408, 162 ], "score": 1.0, "content": "The role of the actor network is to learn a distribution over successful actions", "type": "text" }, { "bbox": [ 409, 152, 419, 161 ], "score": 0.85, "content": "a _ { t }", "type": "inline_equation" }, { "bbox": [ 419, 150, 505, 162 ], "score": 1.0, "content": "for each latent model", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 162, 505, 174 ], "spans": [ { "bbox": [ 106, 162, 127, 174 ], "score": 1.0, "content": "state", "type": "text" }, { "bbox": [ 127, 163, 136, 173 ], "score": 0.86, "content": "s _ { t }", "type": "inline_equation" }, { "bbox": [ 137, 162, 505, 174 ], "score": 1.0, "content": "that maximizes the sum of future predicted task rewards. The critic network learns to predict", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 174, 505, 186 ], "spans": [ { "bbox": [ 106, 174, 505, 186 ], "score": 1.0, "content": "the sum of future task rewards through temporal difference learning (Sutton and Barto, 2018). This", "type": "text" } ], "index": 8 }, { "bbox": [ 106, 185, 505, 198 ], "spans": [ { "bbox": [ 106, 185, 437, 198 ], "score": 1.0, "content": "allows the algorithm to take into account rewards beyond the planning horizon of", "type": "text" }, { "bbox": [ 437, 185, 471, 195 ], "score": 0.9, "content": "H = 1 6", "type": "inline_equation" }, { "bbox": [ 471, 185, 505, 198 ], "score": 1.0, "content": "steps to", "type": "text" } ], "index": 9 }, { "bbox": [ 105, 196, 506, 210 ], "spans": [ { "bbox": [ 105, 196, 506, 210 ], "score": 1.0, "content": "learn long-term strategies. Given a predicted trajectory of model states, the critic is trained to regress", "type": "text" } ], "index": 10 }, { "bbox": [ 105, 208, 457, 221 ], "spans": [ { "bbox": [ 105, 208, 268, 221 ], "score": 1.0, "content": "the return of the trajectory. We compute", "type": "text" }, { "bbox": [ 268, 209, 275, 218 ], "score": 0.82, "content": "\\lambda", "type": "inline_equation" }, { "bbox": [ 276, 208, 457, 221 ], "score": 1.0, "content": "-returns following Hafner et al. (2020; 2019):", "type": "text" } ], "index": 11 } ], "index": 8.5, "bbox_fs": [ 105, 150, 506, 221 ] }, { "type": "interline_equation", "bbox": [ 187, 223, 423, 245 ], "lines": [ { "bbox": [ 187, 223, 423, 245 ], "spans": [ { "bbox": [ 187, 223, 423, 245 ], "score": 0.93, "content": "V _ { t } ^ { \\lambda } \\doteq r _ { t } + \\gamma \\Big ( ( 1 - \\lambda ) v ( s _ { t + 1 } ) + \\lambda V _ { t + 1 } ^ { \\lambda } \\Big ) , \\quad V _ { H } ^ { \\lambda } \\doteq v ( s _ { H } ) .", "type": "interline_equation", "image_path": "bced559cf73caae8b60315e212417c82fda960dadfbf4c1dc28fc8ea7be19aee.jpg" } ] } ], "index": 12, "virtual_lines": [ { "bbox": [ 187, 223, 423, 245 ], "spans": [], "index": 12 } ] }, { "type": "text", "bbox": [ 106, 249, 505, 343 ], "lines": [ { "bbox": [ 105, 249, 505, 262 ], "spans": [ { "bbox": [ 105, 249, 300, 262 ], "score": 1.0, "content": "While the critic network is trained to regress the", "type": "text" }, { "bbox": [ 300, 250, 307, 259 ], "score": 0.83, "content": "\\lambda", "type": "inline_equation" }, { "bbox": [ 307, 249, 505, 262 ], "score": 1.0, "content": "-returns, the actor network is trained to maximize", "type": "text" } ], "index": 13 }, { "bbox": [ 105, 261, 505, 274 ], "spans": [ { "bbox": [ 105, 261, 505, 274 ], "score": 1.0, "content": "them. Different gradient estimators are available for computing the policy gradient for optimizing", "type": "text" } ], "index": 14 }, { "bbox": [ 105, 272, 506, 286 ], "spans": [ { "bbox": [ 105, 272, 506, 286 ], "score": 1.0, "content": "the actor, such as Reinforce (Williams, 1992) and the reparameterization trick (Kingma and Welling,", "type": "text" } ], "index": 15 }, { "bbox": [ 105, 284, 505, 297 ], "spans": [ { "bbox": [ 105, 284, 505, 297 ], "score": 1.0, "content": "2013; Rezende et al., 2014) that directly backpropagates return gradients through the differentiable", "type": "text" } ], "index": 16 }, { "bbox": [ 106, 296, 505, 309 ], "spans": [ { "bbox": [ 106, 296, 505, 309 ], "score": 1.0, "content": "dynamics network (Henaff et al., 2019). Following Hafner et al. (2020), we choose reparameterization", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 307, 506, 321 ], "spans": [ { "bbox": [ 105, 307, 506, 321 ], "score": 1.0, "content": "gradients for continuous control tasks and Reinforce gradients for tasks with discrete actions. In", "type": "text" } ], "index": 18 }, { "bbox": [ 105, 319, 506, 333 ], "spans": [ { "bbox": [ 105, 319, 506, 333 ], "score": 1.0, "content": "addition to maximizing returns, the actor is also incentivized to maintain high entropy to prevent", "type": "text" } ], "index": 19 }, { "bbox": [ 106, 330, 492, 345 ], "spans": [ { "bbox": [ 106, 330, 492, 345 ], "score": 1.0, "content": "collapse to a deterministic policy and maintain some amount of exploration throughout training:", "type": "text" } ], "index": 20 } ], "index": 16.5, "bbox_fs": [ 105, 249, 506, 345 ] }, { "type": "interline_equation", "bbox": [ 166, 349, 444, 366 ], "lines": [ { "bbox": [ 166, 349, 444, 366 ], "spans": [ { "bbox": [ 166, 349, 444, 366 ], "score": 0.9, "content": "\\begin{array} { r } { \\mathcal { L } ( \\pi ) \\doteq - \\operatorname { E } \\bigl [ \\sum _ { t = 1 } ^ { H } \\ln \\pi ( a _ { t } \\mid s _ { t } ) \\mathrm { s g } ( V _ { t } ^ { \\lambda } - v ( s _ { t } ) ) + \\eta \\mathrm { H } \\bigl [ \\pi ( a _ { t } \\mid s _ { t } ) \\bigr ] \\bigr ] } \\end{array}", "type": "interline_equation", "image_path": "29586778ea39548465206a088f601c3e740302c103692a574d7a70d8188b3983.jpg" } ] } ], "index": 21, "virtual_lines": [ { "bbox": [ 166, 349, 444, 366 ], "spans": [], "index": 21 } ] }, { "type": "text", "bbox": [ 107, 369, 505, 428 ], "lines": [ { "bbox": [ 105, 369, 505, 381 ], "spans": [ { "bbox": [ 105, 369, 505, 381 ], "score": 1.0, "content": "We optimize the actor and critic using the Adam optimizer (Kingma and Ba, 2014). To compute the", "type": "text" } ], "index": 22 }, { "bbox": [ 107, 380, 505, 394 ], "spans": [ { "bbox": [ 107, 382, 113, 391 ], "score": 0.74, "content": "\\lambda", "type": "inline_equation" }, { "bbox": [ 114, 380, 505, 394 ], "score": 1.0, "content": "-returns, we use a slowly updated copy of the critic network as common in the literature (Mnih", "type": "text" } ], "index": 23 }, { "bbox": [ 105, 393, 505, 405 ], "spans": [ { "bbox": [ 105, 393, 505, 405 ], "score": 1.0, "content": "et al., 2015; Lillicrap et al., 2015). The actor and critic gradients do not affect the world model, as", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 403, 505, 416 ], "spans": [ { "bbox": [ 105, 403, 505, 416 ], "score": 1.0, "content": "this would lead to incorrect and overly optimistic model predictions. The hyperparameters are listed", "type": "text" } ], "index": 25 }, { "bbox": [ 105, 416, 171, 428 ], "spans": [ { "bbox": [ 105, 416, 171, 428 ], "score": 1.0, "content": "in Appendix D.", "type": "text" } ], "index": 26 } ], "index": 24, "bbox_fs": [ 105, 369, 505, 428 ] }, { "type": "title", "bbox": [ 107, 447, 191, 461 ], "lines": [ { "bbox": [ 103, 445, 193, 465 ], "spans": [ { "bbox": [ 103, 445, 193, 465 ], "score": 1.0, "content": "3 Experiments", "type": "text" } ], "index": 27 } ], "index": 27 }, { "type": "text", "bbox": [ 106, 473, 506, 555 ], "lines": [ { "bbox": [ 105, 473, 506, 486 ], "spans": [ { "bbox": [ 105, 473, 506, 486 ], "score": 1.0, "content": "We evaluate Dreamer on 4 robots, each with a different task, and compare its performance to", "type": "text" } ], "index": 28 }, { "bbox": [ 105, 485, 505, 498 ], "spans": [ { "bbox": [ 105, 485, 505, 498 ], "score": 1.0, "content": "appropriate algorithmic and human baselines. The experiments are representative of common robotic", "type": "text" } ], "index": 29 }, { "bbox": [ 105, 495, 506, 511 ], "spans": [ { "bbox": [ 105, 495, 506, 511 ], "score": 1.0, "content": "tasks, such as locomotion, manipulation, and navigation. The tasks pose a diverse range of challenges,", "type": "text" } ], "index": 30 }, { "bbox": [ 105, 508, 505, 522 ], "spans": [ { "bbox": [ 105, 508, 505, 522 ], "score": 1.0, "content": "including continuous and discrete actions, dense and sparse rewards, proprioceptive and image", "type": "text" } ], "index": 31 }, { "bbox": [ 105, 519, 506, 533 ], "spans": [ { "bbox": [ 105, 519, 506, 533 ], "score": 1.0, "content": "observations, and sensor fusion. The goal of the experiments is to evaluate whether the recent", "type": "text" } ], "index": 32 }, { "bbox": [ 106, 533, 506, 544 ], "spans": [ { "bbox": [ 106, 533, 506, 544 ], "score": 1.0, "content": "successes of learned world models enables sample-efficient robot learning directly in the real world.", "type": "text" } ], "index": 33 }, { "bbox": [ 106, 544, 365, 557 ], "spans": [ { "bbox": [ 106, 544, 365, 557 ], "score": 1.0, "content": "Specifically, we aim to answer the following research questions:", "type": "text" } ], "index": 34 } ], "index": 31, "bbox_fs": [ 105, 473, 506, 557 ] }, { "type": "text", "bbox": [ 106, 564, 506, 608 ], "lines": [ { "bbox": [ 104, 564, 449, 577 ], "spans": [ { "bbox": [ 104, 564, 449, 577 ], "score": 1.0, "content": "• Does Dreamer enable robot learning directly in the real world, without simulators?", "type": "text" } ], "index": 35 }, { "bbox": [ 105, 579, 493, 593 ], "spans": [ { "bbox": [ 105, 579, 493, 593 ], "score": 1.0, "content": "• Does Dreamer succeed across various robot platforms, sensory modalities, and action spaces?", "type": "text" } ], "index": 36 }, { "bbox": [ 105, 595, 506, 609 ], "spans": [ { "bbox": [ 105, 595, 506, 609 ], "score": 1.0, "content": "• How does the data-efficiency of Dreamer compare to previous reinforcement learning algorithms?", "type": "text" } ], "index": 37 } ], "index": 36, "bbox_fs": [ 104, 564, 506, 609 ] }, { "type": "text", "bbox": [ 106, 617, 506, 722 ], "lines": [ { "bbox": [ 106, 617, 505, 630 ], "spans": [ { "bbox": [ 106, 617, 505, 630 ], "score": 1.0, "content": "Implementation We build on the official implementation of DreamerV2 (Hafner et al., 2020). We", "type": "text" } ], "index": 38 }, { "bbox": [ 106, 629, 506, 641 ], "spans": [ { "bbox": [ 106, 629, 506, 641 ], "score": 1.0, "content": "develop an asynchronous actor and learner setup, which is essential in environments with high control", "type": "text" } ], "index": 39 }, { "bbox": [ 106, 641, 505, 653 ], "spans": [ { "bbox": [ 106, 641, 505, 653 ], "score": 1.0, "content": "rates, such as the quadruped, and also accelerates learning for slower environments, such as the robot", "type": "text" } ], "index": 40 }, { "bbox": [ 105, 651, 506, 666 ], "spans": [ { "bbox": [ 105, 651, 506, 666 ], "score": 1.0, "content": "arms. The actor thread computes online actions for the robot and sends trajectories of 128 time steps", "type": "text" } ], "index": 41 }, { "bbox": [ 106, 664, 506, 676 ], "spans": [ { "bbox": [ 106, 664, 506, 676 ], "score": 1.0, "content": "to the replay buffer. The learner thread samples data from the replay buffer, updates the world model,", "type": "text" } ], "index": 42 }, { "bbox": [ 106, 675, 505, 688 ], "spans": [ { "bbox": [ 106, 675, 505, 688 ], "score": 1.0, "content": "and optimizes the policy using imagination rollouts. Policy weights are synced from the learner to", "type": "text" } ], "index": 43 }, { "bbox": [ 106, 687, 506, 700 ], "spans": [ { "bbox": [ 106, 687, 506, 700 ], "score": 1.0, "content": "the actor every 20 seconds. We use an RSSM with 256 units to speed up the training computation.", "type": "text" } ], "index": 44 }, { "bbox": [ 105, 698, 506, 713 ], "spans": [ { "bbox": [ 105, 698, 506, 713 ], "score": 1.0, "content": "We use identical hyperparameters across all experiments, enabling off-the-shelf training on different", "type": "text" } ], "index": 45 }, { "bbox": [ 105, 710, 189, 722 ], "spans": [ { "bbox": [ 105, 710, 189, 722 ], "score": 1.0, "content": "robot embodiments.", "type": "text" } ], "index": 46 } ], "index": 42, "bbox_fs": [ 105, 617, 506, 722 ] } ] }, { "preproc_blocks": [ { "type": "image", "bbox": [ 106, 57, 501, 143 ], "blocks": [ { "type": "image_body", "bbox": [ 106, 57, 501, 143 ], "group_id": 0, "lines": [ { "bbox": [ 106, 57, 501, 143 ], "spans": [ { "bbox": [ 106, 57, 501, 143 ], "score": 0.964, "type": "image", "image_path": "159d86a4fe017221206965fa98efc6ce35e16bebec7536f231b04a5fa470830b.jpg" } ] } ], "index": 1, "virtual_lines": [ { "bbox": [ 106, 57, 501, 85.66666666666667 ], "spans": [], "index": 0 }, { "bbox": [ 106, 85.66666666666667, 501, 114.33333333333334 ], "spans": [], "index": 1 }, { "bbox": [ 106, 114.33333333333334, 501, 143.0 ], "spans": [], "index": 2 } ] }, { "type": "image_caption", "bbox": [ 106, 156, 505, 258 ], "group_id": 0, "lines": [ { "bbox": [ 106, 157, 506, 169 ], "spans": [ { "bbox": [ 106, 157, 506, 169 ], "score": 1.0, "content": "Figure 4: A1 Quadruped Walking Starting from lying on its back with the feet in the air, Dreamer", "type": "text" } ], "index": 3 }, { "bbox": [ 106, 168, 506, 180 ], "spans": [ { "bbox": [ 106, 168, 506, 180 ], "score": 1.0, "content": "learns to roll over, stand up, and walk in 1 hour of real world training time, without simulators or", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 180, 505, 191 ], "spans": [ { "bbox": [ 105, 180, 505, 191 ], "score": 1.0, "content": "resets. In contrast, SAC only learns to roll over but neither to stand up nor to walk. For SAC, we also", "type": "text" } ], "index": 5 }, { "bbox": [ 104, 189, 506, 204 ], "spans": [ { "bbox": [ 104, 189, 506, 204 ], "score": 1.0, "content": "had to help the robot out of a dead-locked leg configuration during training. On the right we show", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 202, 505, 214 ], "spans": [ { "bbox": [ 106, 202, 505, 214 ], "score": 1.0, "content": "training curves for both SAC and Dreamer. The maximum reward is 14. The filled circles indicate", "type": "text" } ], "index": 7 }, { "bbox": [ 105, 213, 506, 226 ], "spans": [ { "bbox": [ 105, 213, 506, 226 ], "score": 1.0, "content": "times where the robot fell on its back, requiring the learning of a robust strategy for getting back", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 224, 506, 237 ], "spans": [ { "bbox": [ 105, 224, 506, 237 ], "score": 1.0, "content": "up. After 1 hour of training, we start pushing the robot and find that it adapts its behavior within 10", "type": "text" } ], "index": 9 }, { "bbox": [ 105, 236, 505, 248 ], "spans": [ { "bbox": [ 105, 236, 505, 248 ], "score": 1.0, "content": "minutes to withstand light pushes and quickly roll back on its feet for hard pushes. The graph shows", "type": "text" } ], "index": 10 }, { "bbox": [ 105, 247, 494, 259 ], "spans": [ { "bbox": [ 105, 247, 494, 259 ], "score": 1.0, "content": "a single training run with the shaded area indicating one standard deviation within each time bin.", "type": "text" } ], "index": 11 } ], "index": 7 } ], "index": 4.0 }, { "type": "text", "bbox": [ 106, 268, 506, 432 ], "lines": [ { "bbox": [ 105, 268, 505, 281 ], "spans": [ { "bbox": [ 105, 268, 505, 281 ], "score": 1.0, "content": "Baselines We compare to a strong learning algorithm for each of our experimental setups. The A1", "type": "text" } ], "index": 12 }, { "bbox": [ 105, 281, 505, 292 ], "spans": [ { "bbox": [ 105, 281, 505, 292 ], "score": 1.0, "content": "quadruped robot uses continuous actions and low-dimensional inputs, allowing us to compare to SAC", "type": "text" } ], "index": 13 }, { "bbox": [ 106, 293, 505, 304 ], "spans": [ { "bbox": [ 106, 293, 505, 304 ], "score": 1.0, "content": "(Haarnoja et al., 2018a;b), a popular algorithm for data-efficient continuous control. For the visual", "type": "text" } ], "index": 14 }, { "bbox": [ 105, 303, 506, 317 ], "spans": [ { "bbox": [ 105, 303, 506, 317 ], "score": 1.0, "content": "pick and place experiments on the XArm and UR5 robots, inputs are images and proprioceptive", "type": "text" } ], "index": 15 }, { "bbox": [ 105, 315, 506, 327 ], "spans": [ { "bbox": [ 105, 315, 506, 327 ], "score": 1.0, "content": "readings and actions are discrete, suggesting algorithms from the DQN (Mnih et al., 2015) line of", "type": "text" } ], "index": 16 }, { "bbox": [ 105, 327, 505, 339 ], "spans": [ { "bbox": [ 105, 327, 505, 339 ], "score": 1.0, "content": "work as baselines. We choose Rainbow (Hessel et al., 2018) as a powerful representative of this", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 339, 505, 351 ], "spans": [ { "bbox": [ 105, 339, 505, 351 ], "score": 1.0, "content": "category, an algorithm that combines many improvements of DQN. To input the proprioceptive", "type": "text" } ], "index": 18 }, { "bbox": [ 105, 351, 505, 363 ], "spans": [ { "bbox": [ 105, 351, 505, 363 ], "score": 1.0, "content": "readings, we concatenate them as broadcasted planes to the RGB channels of the image, a common", "type": "text" } ], "index": 19 }, { "bbox": [ 106, 362, 506, 375 ], "spans": [ { "bbox": [ 106, 362, 506, 375 ], "score": 1.0, "content": "practice in the literature (Schrittwieser et al., 2019). For the UR5, we additionally compare against", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 372, 506, 387 ], "spans": [ { "bbox": [ 105, 372, 506, 387 ], "score": 1.0, "content": "PPO (Schulman et al., 2017), with similar modifications for fusing image and proprioceptive readings.", "type": "text" } ], "index": 21 }, { "bbox": [ 105, 385, 506, 398 ], "spans": [ { "bbox": [ 105, 385, 506, 398 ], "score": 1.0, "content": "In addition, we compare against a human operator controlling the robot arm through the robot control", "type": "text" } ], "index": 22 }, { "bbox": [ 106, 397, 506, 410 ], "spans": [ { "bbox": [ 106, 397, 506, 410 ], "score": 1.0, "content": "interface. For the Sphero navigation task, inputs are images and actions are continuous. The state-of-", "type": "text" } ], "index": 23 }, { "bbox": [ 106, 408, 506, 421 ], "spans": [ { "bbox": [ 106, 408, 506, 421 ], "score": 1.0, "content": "the-art baseline in this category is DrQv2 (Yarats et al., 2021), which uses image augmentation to", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 420, 216, 434 ], "spans": [ { "bbox": [ 105, 420, 216, 434 ], "score": 1.0, "content": "increase sample-efficiency.", "type": "text" } ], "index": 25 } ], "index": 18.5 }, { "type": "title", "bbox": [ 107, 443, 234, 455 ], "lines": [ { "bbox": [ 105, 441, 235, 458 ], "spans": [ { "bbox": [ 105, 441, 235, 458 ], "score": 1.0, "content": "3.1 A1 Quadruped Walking", "type": "text" } ], "index": 26 } ], "index": 26 }, { "type": "text", "bbox": [ 107, 459, 336, 609 ], "lines": [ { "bbox": [ 106, 458, 337, 470 ], "spans": [ { "bbox": [ 106, 458, 337, 470 ], "score": 1.0, "content": "This high-dimensional continuous control task requires", "type": "text" } ], "index": 27 }, { "bbox": [ 106, 471, 336, 481 ], "spans": [ { "bbox": [ 106, 471, 336, 481 ], "score": 1.0, "content": "training a quadruped robot to roll over from its back, stand", "type": "text" } ], "index": 28 }, { "bbox": [ 106, 482, 337, 493 ], "spans": [ { "bbox": [ 106, 482, 337, 493 ], "score": 1.0, "content": "up, and walk forward at a fixed target velocity. Prior work", "type": "text" } ], "index": 29 }, { "bbox": [ 105, 493, 337, 506 ], "spans": [ { "bbox": [ 105, 493, 337, 506 ], "score": 1.0, "content": "in quadruped locomotion requires either extensive training", "type": "text" } ], "index": 30 }, { "bbox": [ 105, 504, 337, 518 ], "spans": [ { "bbox": [ 105, 504, 337, 518 ], "score": 1.0, "content": "in simulation under domain randomization, using recovery", "type": "text" } ], "index": 31 }, { "bbox": [ 106, 516, 336, 528 ], "spans": [ { "bbox": [ 106, 516, 336, 528 ], "score": 1.0, "content": "controllers to avoid unsafe states, or defining the action", "type": "text" } ], "index": 32 }, { "bbox": [ 106, 529, 337, 540 ], "spans": [ { "bbox": [ 106, 529, 337, 540 ], "score": 1.0, "content": "space as parameterized trajectory generators that restrict", "type": "text" } ], "index": 33 }, { "bbox": [ 106, 540, 337, 552 ], "spans": [ { "bbox": [ 106, 540, 337, 552 ], "score": 1.0, "content": "the space of motions (Rusu et al., 2016; Peng et al., 2018;", "type": "text" } ], "index": 34 }, { "bbox": [ 106, 551, 336, 563 ], "spans": [ { "bbox": [ 106, 551, 336, 563 ], "score": 1.0, "content": "Rudin et al., 2021; Lee et al., 2020; Yang et al., 2019). In", "type": "text" } ], "index": 35 }, { "bbox": [ 105, 563, 337, 576 ], "spans": [ { "bbox": [ 105, 563, 337, 576 ], "score": 1.0, "content": "contrast, we train in the end-to-end reinforcement learning", "type": "text" } ], "index": 36 }, { "bbox": [ 106, 576, 338, 587 ], "spans": [ { "bbox": [ 106, 576, 338, 587 ], "score": 1.0, "content": "setting directly on the robot, without simulators or resets.", "type": "text" } ], "index": 37 }, { "bbox": [ 106, 587, 336, 598 ], "spans": [ { "bbox": [ 106, 587, 336, 598 ], "score": 1.0, "content": "We use the Unitree A1 robot that consists of 12 direct drive", "type": "text" } ], "index": 38 }, { "bbox": [ 106, 599, 336, 610 ], "spans": [ { "bbox": [ 106, 599, 251, 610 ], "score": 1.0, "content": "motors. The motors are controlled at", "type": "text" }, { "bbox": [ 251, 599, 276, 609 ], "score": 0.7, "content": "2 0 \\mathrm { H z }", "type": "inline_equation" }, { "bbox": [ 277, 599, 336, 610 ], "score": 1.0, "content": "via continuous", "type": "text" } ], "index": 39 } ], "index": 33 }, { "type": "image", "bbox": [ 344, 460, 504, 551 ], "blocks": [ { "type": "image_body", "bbox": [ 344, 460, 504, 551 ], "group_id": 1, "lines": [ { "bbox": [ 344, 460, 504, 551 ], "spans": [ { "bbox": [ 344, 460, 504, 551 ], "score": 0.971, "type": "image", "image_path": "69e7a0dc11e3a7ecd812dec526777f2a39e7aed63605587ef789903e7f57fb8c.jpg" } ] } ], "index": 43, "virtual_lines": [ { "bbox": [ 344, 460, 504, 473.0 ], "spans": [], "index": 40 }, { "bbox": [ 344, 473.0, 504, 486.0 ], "spans": [], "index": 41 }, { "bbox": [ 344, 486.0, 504, 499.0 ], "spans": [], "index": 42 }, { "bbox": [ 344, 499.0, 504, 512.0 ], "spans": [], "index": 43 }, { "bbox": [ 344, 512.0, 504, 525.0 ], "spans": [], "index": 44 }, { "bbox": [ 344, 525.0, 504, 538.0 ], "spans": [], "index": 45 }, { "bbox": [ 344, 538.0, 504, 551.0 ], "spans": [], "index": 46 } ] }, { "type": "image_caption", "bbox": [ 344, 556, 505, 602 ], "group_id": 1, "lines": [ { "bbox": [ 343, 556, 506, 568 ], "spans": [ { "bbox": [ 343, 556, 506, 568 ], "score": 1.0, "content": "Figure 8: Within 10 minutes of perturb-", "type": "text" } ], "index": 47 }, { "bbox": [ 343, 568, 505, 580 ], "spans": [ { "bbox": [ 343, 568, 505, 580 ], "score": 1.0, "content": "ing the learned walking behavior, the", "type": "text" } ], "index": 48 }, { "bbox": [ 343, 579, 505, 591 ], "spans": [ { "bbox": [ 343, 579, 505, 591 ], "score": 1.0, "content": "robot adapts to withstanding pushes or", "type": "text" } ], "index": 49 }, { "bbox": [ 343, 590, 505, 602 ], "spans": [ { "bbox": [ 343, 590, 505, 602 ], "score": 1.0, "content": "quickly rolling over and back on its feet.", "type": "text" } ], "index": 50 } ], "index": 48.5 } ], "index": 45.75 }, { "type": "text", "bbox": [ 107, 610, 505, 668 ], "lines": [ { "bbox": [ 106, 610, 505, 622 ], "spans": [ { "bbox": [ 106, 610, 505, 622 ], "score": 1.0, "content": "actions that represent motor angles that are realized by a PD controller on the hardware. Actions", "type": "text" } ], "index": 51 }, { "bbox": [ 105, 621, 506, 635 ], "spans": [ { "bbox": [ 105, 621, 506, 635 ], "score": 1.0, "content": "are filtered with a Butterworth filter to protect the motor from high-frequency actions. The input", "type": "text" } ], "index": 52 }, { "bbox": [ 106, 634, 504, 646 ], "spans": [ { "bbox": [ 106, 634, 504, 646 ], "score": 1.0, "content": "consists of motor angles, orientations, and angular velocities. Due to space constraints, we manually", "type": "text" } ], "index": 53 }, { "bbox": [ 105, 644, 505, 658 ], "spans": [ { "bbox": [ 105, 644, 505, 658 ], "score": 1.0, "content": "intervene when the robot has reached the end of the available training area, without modifying the", "type": "text" } ], "index": 54 }, { "bbox": [ 105, 657, 317, 669 ], "spans": [ { "bbox": [ 105, 657, 317, 669 ], "score": 1.0, "content": "joint configuration or orientation that the robot is in.", "type": "text" } ], "index": 55 } ], "index": 53 }, { "type": "text", "bbox": [ 107, 676, 505, 722 ], "lines": [ { "bbox": [ 105, 675, 505, 689 ], "spans": [ { "bbox": [ 105, 675, 505, 689 ], "score": 1.0, "content": "The reward function is the sum of five terms. An upright reward is computed from the base frame", "type": "text" } ], "index": 56 }, { "bbox": [ 105, 686, 507, 700 ], "spans": [ { "bbox": [ 105, 686, 146, 700 ], "score": 1.0, "content": "up vector", "type": "text" }, { "bbox": [ 147, 687, 159, 698 ], "score": 0.87, "content": "\\hat { z } ^ { T }", "type": "inline_equation" }, { "bbox": [ 159, 686, 507, 700 ], "score": 1.0, "content": ", terms for matching the standing pose are computed from the joint angles of the hips,", "type": "text" } ], "index": 57 }, { "bbox": [ 105, 699, 505, 712 ], "spans": [ { "bbox": [ 105, 699, 505, 712 ], "score": 1.0, "content": "shoulders, and knees, and a forward velocity term is computed from the projected forward velocity", "type": "text" } ], "index": 58 }, { "bbox": [ 107, 708, 505, 725 ], "spans": [ { "bbox": [ 107, 710, 122, 721 ], "score": 0.89, "content": "\\boldsymbol { s } _ { v } \\boldsymbol { x }", "type": "inline_equation" }, { "bbox": [ 122, 708, 213, 725 ], "score": 1.0, "content": "and the total velocity", "type": "text" }, { "bbox": [ 213, 710, 224, 721 ], "score": 0.87, "content": "s _ { v }", "type": "inline_equation" }, { "bbox": [ 224, 708, 505, 725 ], "score": 1.0, "content": ". Without the reward curriculum, the agent receives spurious reward", "type": "text" } ], "index": 59 } ], "index": 57.5 } ], "page_idx": 4, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 302, 741, 308, 750 ], "lines": [ { "bbox": [ 302, 740, 309, 753 ], "spans": [ { "bbox": [ 302, 740, 309, 753 ], "score": 1.0, "content": "5", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "image", "bbox": [ 106, 57, 501, 143 ], "blocks": [ { "type": "image_body", "bbox": [ 106, 57, 501, 143 ], "group_id": 0, "lines": [ { "bbox": [ 106, 57, 501, 143 ], "spans": [ { "bbox": [ 106, 57, 501, 143 ], "score": 0.964, "type": "image", "image_path": "159d86a4fe017221206965fa98efc6ce35e16bebec7536f231b04a5fa470830b.jpg" } ] } ], "index": 1, "virtual_lines": [ { "bbox": [ 106, 57, 501, 85.66666666666667 ], "spans": [], "index": 0 }, { "bbox": [ 106, 85.66666666666667, 501, 114.33333333333334 ], "spans": [], "index": 1 }, { "bbox": [ 106, 114.33333333333334, 501, 143.0 ], "spans": [], "index": 2 } ] }, { "type": "image_caption", "bbox": [ 106, 156, 505, 258 ], "group_id": 0, "lines": [ { "bbox": [ 106, 157, 506, 169 ], "spans": [ { "bbox": [ 106, 157, 506, 169 ], "score": 1.0, "content": "Figure 4: A1 Quadruped Walking Starting from lying on its back with the feet in the air, Dreamer", "type": "text" } ], "index": 3 }, { "bbox": [ 106, 168, 506, 180 ], "spans": [ { "bbox": [ 106, 168, 506, 180 ], "score": 1.0, "content": "learns to roll over, stand up, and walk in 1 hour of real world training time, without simulators or", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 180, 505, 191 ], "spans": [ { "bbox": [ 105, 180, 505, 191 ], "score": 1.0, "content": "resets. In contrast, SAC only learns to roll over but neither to stand up nor to walk. For SAC, we also", "type": "text" } ], "index": 5 }, { "bbox": [ 104, 189, 506, 204 ], "spans": [ { "bbox": [ 104, 189, 506, 204 ], "score": 1.0, "content": "had to help the robot out of a dead-locked leg configuration during training. On the right we show", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 202, 505, 214 ], "spans": [ { "bbox": [ 106, 202, 505, 214 ], "score": 1.0, "content": "training curves for both SAC and Dreamer. The maximum reward is 14. The filled circles indicate", "type": "text" } ], "index": 7 }, { "bbox": [ 105, 213, 506, 226 ], "spans": [ { "bbox": [ 105, 213, 506, 226 ], "score": 1.0, "content": "times where the robot fell on its back, requiring the learning of a robust strategy for getting back", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 224, 506, 237 ], "spans": [ { "bbox": [ 105, 224, 506, 237 ], "score": 1.0, "content": "up. After 1 hour of training, we start pushing the robot and find that it adapts its behavior within 10", "type": "text" } ], "index": 9 }, { "bbox": [ 105, 236, 505, 248 ], "spans": [ { "bbox": [ 105, 236, 505, 248 ], "score": 1.0, "content": "minutes to withstand light pushes and quickly roll back on its feet for hard pushes. The graph shows", "type": "text" } ], "index": 10 }, { "bbox": [ 105, 247, 494, 259 ], "spans": [ { "bbox": [ 105, 247, 494, 259 ], "score": 1.0, "content": "a single training run with the shaded area indicating one standard deviation within each time bin.", "type": "text" } ], "index": 11 } ], "index": 7 } ], "index": 4.0 }, { "type": "text", "bbox": [ 106, 268, 506, 432 ], "lines": [ { "bbox": [ 105, 268, 505, 281 ], "spans": [ { "bbox": [ 105, 268, 505, 281 ], "score": 1.0, "content": "Baselines We compare to a strong learning algorithm for each of our experimental setups. The A1", "type": "text" } ], "index": 12 }, { "bbox": [ 105, 281, 505, 292 ], "spans": [ { "bbox": [ 105, 281, 505, 292 ], "score": 1.0, "content": "quadruped robot uses continuous actions and low-dimensional inputs, allowing us to compare to SAC", "type": "text" } ], "index": 13 }, { "bbox": [ 106, 293, 505, 304 ], "spans": [ { "bbox": [ 106, 293, 505, 304 ], "score": 1.0, "content": "(Haarnoja et al., 2018a;b), a popular algorithm for data-efficient continuous control. For the visual", "type": "text" } ], "index": 14 }, { "bbox": [ 105, 303, 506, 317 ], "spans": [ { "bbox": [ 105, 303, 506, 317 ], "score": 1.0, "content": "pick and place experiments on the XArm and UR5 robots, inputs are images and proprioceptive", "type": "text" } ], "index": 15 }, { "bbox": [ 105, 315, 506, 327 ], "spans": [ { "bbox": [ 105, 315, 506, 327 ], "score": 1.0, "content": "readings and actions are discrete, suggesting algorithms from the DQN (Mnih et al., 2015) line of", "type": "text" } ], "index": 16 }, { "bbox": [ 105, 327, 505, 339 ], "spans": [ { "bbox": [ 105, 327, 505, 339 ], "score": 1.0, "content": "work as baselines. We choose Rainbow (Hessel et al., 2018) as a powerful representative of this", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 339, 505, 351 ], "spans": [ { "bbox": [ 105, 339, 505, 351 ], "score": 1.0, "content": "category, an algorithm that combines many improvements of DQN. To input the proprioceptive", "type": "text" } ], "index": 18 }, { "bbox": [ 105, 351, 505, 363 ], "spans": [ { "bbox": [ 105, 351, 505, 363 ], "score": 1.0, "content": "readings, we concatenate them as broadcasted planes to the RGB channels of the image, a common", "type": "text" } ], "index": 19 }, { "bbox": [ 106, 362, 506, 375 ], "spans": [ { "bbox": [ 106, 362, 506, 375 ], "score": 1.0, "content": "practice in the literature (Schrittwieser et al., 2019). For the UR5, we additionally compare against", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 372, 506, 387 ], "spans": [ { "bbox": [ 105, 372, 506, 387 ], "score": 1.0, "content": "PPO (Schulman et al., 2017), with similar modifications for fusing image and proprioceptive readings.", "type": "text" } ], "index": 21 }, { "bbox": [ 105, 385, 506, 398 ], "spans": [ { "bbox": [ 105, 385, 506, 398 ], "score": 1.0, "content": "In addition, we compare against a human operator controlling the robot arm through the robot control", "type": "text" } ], "index": 22 }, { "bbox": [ 106, 397, 506, 410 ], "spans": [ { "bbox": [ 106, 397, 506, 410 ], "score": 1.0, "content": "interface. For the Sphero navigation task, inputs are images and actions are continuous. The state-of-", "type": "text" } ], "index": 23 }, { "bbox": [ 106, 408, 506, 421 ], "spans": [ { "bbox": [ 106, 408, 506, 421 ], "score": 1.0, "content": "the-art baseline in this category is DrQv2 (Yarats et al., 2021), which uses image augmentation to", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 420, 216, 434 ], "spans": [ { "bbox": [ 105, 420, 216, 434 ], "score": 1.0, "content": "increase sample-efficiency.", "type": "text" } ], "index": 25 } ], "index": 18.5, "bbox_fs": [ 105, 268, 506, 434 ] }, { "type": "title", "bbox": [ 107, 443, 234, 455 ], "lines": [ { "bbox": [ 105, 441, 235, 458 ], "spans": [ { "bbox": [ 105, 441, 235, 458 ], "score": 1.0, "content": "3.1 A1 Quadruped Walking", "type": "text" } ], "index": 26 } ], "index": 26 }, { "type": "text", "bbox": [ 107, 459, 336, 609 ], "lines": [ { "bbox": [ 106, 458, 337, 470 ], "spans": [ { "bbox": [ 106, 458, 337, 470 ], "score": 1.0, "content": "This high-dimensional continuous control task requires", "type": "text" } ], "index": 27 }, { "bbox": [ 106, 471, 336, 481 ], "spans": [ { "bbox": [ 106, 471, 336, 481 ], "score": 1.0, "content": "training a quadruped robot to roll over from its back, stand", "type": "text" } ], "index": 28 }, { "bbox": [ 106, 482, 337, 493 ], "spans": [ { "bbox": [ 106, 482, 337, 493 ], "score": 1.0, "content": "up, and walk forward at a fixed target velocity. Prior work", "type": "text" } ], "index": 29 }, { "bbox": [ 105, 493, 337, 506 ], "spans": [ { "bbox": [ 105, 493, 337, 506 ], "score": 1.0, "content": "in quadruped locomotion requires either extensive training", "type": "text" } ], "index": 30 }, { "bbox": [ 105, 504, 337, 518 ], "spans": [ { "bbox": [ 105, 504, 337, 518 ], "score": 1.0, "content": "in simulation under domain randomization, using recovery", "type": "text" } ], "index": 31 }, { "bbox": [ 106, 516, 336, 528 ], "spans": [ { "bbox": [ 106, 516, 336, 528 ], "score": 1.0, "content": "controllers to avoid unsafe states, or defining the action", "type": "text" } ], "index": 32 }, { "bbox": [ 106, 529, 337, 540 ], "spans": [ { "bbox": [ 106, 529, 337, 540 ], "score": 1.0, "content": "space as parameterized trajectory generators that restrict", "type": "text" } ], "index": 33 }, { "bbox": [ 106, 540, 337, 552 ], "spans": [ { "bbox": [ 106, 540, 337, 552 ], "score": 1.0, "content": "the space of motions (Rusu et al., 2016; Peng et al., 2018;", "type": "text" } ], "index": 34 }, { "bbox": [ 106, 551, 336, 563 ], "spans": [ { "bbox": [ 106, 551, 336, 563 ], "score": 1.0, "content": "Rudin et al., 2021; Lee et al., 2020; Yang et al., 2019). In", "type": "text" } ], "index": 35 }, { "bbox": [ 105, 563, 337, 576 ], "spans": [ { "bbox": [ 105, 563, 337, 576 ], "score": 1.0, "content": "contrast, we train in the end-to-end reinforcement learning", "type": "text" } ], "index": 36 }, { "bbox": [ 106, 576, 338, 587 ], "spans": [ { "bbox": [ 106, 576, 338, 587 ], "score": 1.0, "content": "setting directly on the robot, without simulators or resets.", "type": "text" } ], "index": 37 }, { "bbox": [ 106, 587, 336, 598 ], "spans": [ { "bbox": [ 106, 587, 336, 598 ], "score": 1.0, "content": "We use the Unitree A1 robot that consists of 12 direct drive", "type": "text" } ], "index": 38 }, { "bbox": [ 106, 599, 336, 610 ], "spans": [ { "bbox": [ 106, 599, 251, 610 ], "score": 1.0, "content": "motors. The motors are controlled at", "type": "text" }, { "bbox": [ 251, 599, 276, 609 ], "score": 0.7, "content": "2 0 \\mathrm { H z }", "type": "inline_equation" }, { "bbox": [ 277, 599, 336, 610 ], "score": 1.0, "content": "via continuous", "type": "text" } ], "index": 39 }, { "bbox": [ 106, 610, 505, 622 ], "spans": [ { "bbox": [ 106, 610, 505, 622 ], "score": 1.0, "content": "actions that represent motor angles that are realized by a PD controller on the hardware. Actions", "type": "text" } ], "index": 51 }, { "bbox": [ 105, 621, 506, 635 ], "spans": [ { "bbox": [ 105, 621, 506, 635 ], "score": 1.0, "content": "are filtered with a Butterworth filter to protect the motor from high-frequency actions. The input", "type": "text" } ], "index": 52 }, { "bbox": [ 106, 634, 504, 646 ], "spans": [ { "bbox": [ 106, 634, 504, 646 ], "score": 1.0, "content": "consists of motor angles, orientations, and angular velocities. Due to space constraints, we manually", "type": "text" } ], "index": 53 }, { "bbox": [ 105, 644, 505, 658 ], "spans": [ { "bbox": [ 105, 644, 505, 658 ], "score": 1.0, "content": "intervene when the robot has reached the end of the available training area, without modifying the", "type": "text" } ], "index": 54 }, { "bbox": [ 105, 657, 317, 669 ], "spans": [ { "bbox": [ 105, 657, 317, 669 ], "score": 1.0, "content": "joint configuration or orientation that the robot is in.", "type": "text" } ], "index": 55 } ], "index": 33, "bbox_fs": [ 105, 458, 338, 610 ] }, { "type": "image", "bbox": [ 344, 460, 504, 551 ], "blocks": [ { "type": "image_body", "bbox": [ 344, 460, 504, 551 ], "group_id": 1, "lines": [ { "bbox": [ 344, 460, 504, 551 ], "spans": [ { "bbox": [ 344, 460, 504, 551 ], "score": 0.971, "type": "image", "image_path": "69e7a0dc11e3a7ecd812dec526777f2a39e7aed63605587ef789903e7f57fb8c.jpg" } ] } ], "index": 43, "virtual_lines": [ { "bbox": [ 344, 460, 504, 473.0 ], "spans": [], "index": 40 }, { "bbox": [ 344, 473.0, 504, 486.0 ], "spans": [], "index": 41 }, { "bbox": [ 344, 486.0, 504, 499.0 ], "spans": [], "index": 42 }, { "bbox": [ 344, 499.0, 504, 512.0 ], "spans": [], "index": 43 }, { "bbox": [ 344, 512.0, 504, 525.0 ], "spans": [], "index": 44 }, { "bbox": [ 344, 525.0, 504, 538.0 ], "spans": [], "index": 45 }, { "bbox": [ 344, 538.0, 504, 551.0 ], "spans": [], "index": 46 } ] }, { "type": "image_caption", "bbox": [ 344, 556, 505, 602 ], "group_id": 1, "lines": [ { "bbox": [ 343, 556, 506, 568 ], "spans": [ { "bbox": [ 343, 556, 506, 568 ], "score": 1.0, "content": "Figure 8: Within 10 minutes of perturb-", "type": "text" } ], "index": 47 }, { "bbox": [ 343, 568, 505, 580 ], "spans": [ { "bbox": [ 343, 568, 505, 580 ], "score": 1.0, "content": "ing the learned walking behavior, the", "type": "text" } ], "index": 48 }, { "bbox": [ 343, 579, 505, 591 ], "spans": [ { "bbox": [ 343, 579, 505, 591 ], "score": 1.0, "content": "robot adapts to withstanding pushes or", "type": "text" } ], "index": 49 }, { "bbox": [ 343, 590, 505, 602 ], "spans": [ { "bbox": [ 343, 590, 505, 602 ], "score": 1.0, "content": "quickly rolling over and back on its feet.", "type": "text" } ], "index": 50 } ], "index": 48.5 } ], "index": 45.75 }, { "type": "text", "bbox": [ 107, 610, 505, 668 ], "lines": [], "index": 53, "bbox_fs": [ 105, 610, 506, 669 ], "lines_deleted": true }, { "type": "text", "bbox": [ 107, 676, 505, 722 ], "lines": [ { "bbox": [ 105, 675, 505, 689 ], "spans": [ { "bbox": [ 105, 675, 505, 689 ], "score": 1.0, "content": "The reward function is the sum of five terms. An upright reward is computed from the base frame", "type": "text" } ], "index": 56 }, { "bbox": [ 105, 686, 507, 700 ], "spans": [ { "bbox": [ 105, 686, 146, 700 ], "score": 1.0, "content": "up vector", "type": "text" }, { "bbox": [ 147, 687, 159, 698 ], "score": 0.87, "content": "\\hat { z } ^ { T }", "type": "inline_equation" }, { "bbox": [ 159, 686, 507, 700 ], "score": 1.0, "content": ", terms for matching the standing pose are computed from the joint angles of the hips,", "type": "text" } ], "index": 57 }, { "bbox": [ 105, 699, 505, 712 ], "spans": [ { "bbox": [ 105, 699, 505, 712 ], "score": 1.0, "content": "shoulders, and knees, and a forward velocity term is computed from the projected forward velocity", "type": "text" } ], "index": 58 }, { "bbox": [ 107, 708, 505, 725 ], "spans": [ { "bbox": [ 107, 710, 122, 721 ], "score": 0.89, "content": "\\boldsymbol { s } _ { v } \\boldsymbol { x }", "type": "inline_equation" }, { "bbox": [ 122, 708, 213, 725 ], "score": 1.0, "content": "and the total velocity", "type": "text" }, { "bbox": [ 213, 710, 224, 721 ], "score": 0.87, "content": "s _ { v }", "type": "inline_equation" }, { "bbox": [ 224, 708, 505, 725 ], "score": 1.0, "content": ". Without the reward curriculum, the agent receives spurious reward", "type": "text" } ], "index": 59 }, { "bbox": [ 106, 245, 505, 258 ], "spans": [ { "bbox": [ 106, 245, 505, 258 ], "score": 1.0, "content": "values due to the velocity estimator’s dependence on foot-ground contact events. Each of the five", "type": "text", "cross_page": true } ], "index": 10 }, { "bbox": [ 105, 257, 466, 270 ], "spans": [ { "bbox": [ 105, 257, 466, 270 ], "score": 1.0, "content": "terms is active while its preceding terms are satisfied to at least 0.7 and otherwise set to 0:", "type": "text", "cross_page": true } ], "index": 11 } ], "index": 57.5, "bbox_fs": [ 105, 675, 507, 725 ] } ] }, { "preproc_blocks": [ { "type": "image", "bbox": [ 106, 58, 499, 142 ], "blocks": [ { "type": "image_body", "bbox": [ 106, 58, 499, 142 ], "group_id": 0, "lines": [ { "bbox": [ 106, 58, 499, 142 ], "spans": [ { "bbox": [ 106, 58, 499, 142 ], "score": 0.958, "type": "image", "image_path": "69863294723843746383e47fe99dcd32744e499d1c526f51df4121c52ff99fe8.jpg" } ] } ], "index": 1, "virtual_lines": [ { "bbox": [ 106, 58, 499, 86.0 ], "spans": [], "index": 0 }, { "bbox": [ 106, 86.0, 499, 114.0 ], "spans": [], "index": 1 }, { "bbox": [ 106, 114.0, 499, 142.0 ], "spans": [], "index": 2 } ] }, { "type": "image_caption", "bbox": [ 106, 156, 506, 236 ], "group_id": 0, "lines": [ { "bbox": [ 105, 157, 505, 169 ], "spans": [ { "bbox": [ 105, 157, 505, 169 ], "score": 1.0, "content": "Figure 5: UR5 Multi Object Visual Pick and Place This task requires learning to locate three", "type": "text" } ], "index": 3 }, { "bbox": [ 105, 167, 506, 182 ], "spans": [ { "bbox": [ 105, 167, 506, 182 ], "score": 1.0, "content": "ball objects from third-person camera images, grasp them, and move them into the other bin. The", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 179, 506, 192 ], "spans": [ { "bbox": [ 105, 179, 506, 192 ], "score": 1.0, "content": "arm is free to move within and above the bins and sparse rewards are given for grasping a ball and", "type": "text" } ], "index": 5 }, { "bbox": [ 106, 191, 506, 203 ], "spans": [ { "bbox": [ 106, 191, 506, 203 ], "score": 1.0, "content": "for dropping it in the opposite bin. The environment requires the world model to learn multi-object", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 202, 506, 215 ], "spans": [ { "bbox": [ 106, 202, 506, 215 ], "score": 1.0, "content": "dynamics in the real world and the sparse reward structure poses a challenge for policy optimization.", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 213, 506, 226 ], "spans": [ { "bbox": [ 106, 213, 506, 226 ], "score": 1.0, "content": "Dreamer overcomes the challenges of visual localization and sparse rewards on this task, learning a", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 225, 365, 237 ], "spans": [ { "bbox": [ 105, 225, 365, 237 ], "score": 1.0, "content": "successful strategy within a few hours of autonomous operation.", "type": "text" } ], "index": 9 } ], "index": 6 } ], "index": 3.5 }, { "type": "text", "bbox": [ 107, 245, 505, 269 ], "lines": [ { "bbox": [ 106, 245, 505, 258 ], "spans": [ { "bbox": [ 106, 245, 505, 258 ], "score": 1.0, "content": "values due to the velocity estimator’s dependence on foot-ground contact events. Each of the five", "type": "text" } ], "index": 10 }, { "bbox": [ 105, 257, 466, 270 ], "spans": [ { "bbox": [ 105, 257, 466, 270 ], "score": 1.0, "content": "terms is active while its preceding terms are satisfied to at least 0.7 and otherwise set to 0:", "type": "text" } ], "index": 11 } ], "index": 10.5 }, { "type": "interline_equation", "bbox": [ 123, 273, 477, 289 ], "lines": [ { "bbox": [ 123, 273, 477, 289 ], "spans": [ { "bbox": [ 123, 273, 477, 289 ], "score": 0.82, "content": "\\begin{array} { r l } { r ^ { \\mathrm { u p r } } \\doteq ( \\hat { z } ^ { T } [ 0 , 0 , 1 ] - 1 ) / 2 } & { { } r ^ { \\mathrm { h i p } } \\doteq 1 - \\frac 1 4 \\| q ^ { \\mathrm { h i p } } + 0 . 2 \\| _ { 1 } \\quad r ^ { \\mathrm { s h o u l d e r } } \\doteq 1 - \\frac 1 4 \\| q ^ { \\mathrm { s h o u l d e r } } + 0 . 2 \\| _ { 1 } } \\end{array}", "type": "interline_equation", "image_path": "d59a106b3ad7e1f46762af4cb6c79589191a386db6dd18da2381b810f0e72627.jpg" } ] } ], "index": 12, "virtual_lines": [ { "bbox": [ 123, 273, 477, 289 ], "spans": [], "index": 12 } ] }, { "type": "interline_equation", "bbox": [ 122, 294, 476, 310 ], "lines": [ { "bbox": [ 122, 294, 476, 310 ], "spans": [ { "bbox": [ 122, 294, 476, 310 ], "score": 0.85, "content": "\\begin{array} { r l } { r ^ { \\mathrm { k n e e } } \\doteq 1 - \\frac 1 4 \\parallel q ^ { \\mathrm { k n e e } } - 1 . 0 \\parallel _ { 1 } } & { { } r ^ { \\mathrm { v e l o c i t y } } \\doteq 5 \\big ( \\operatorname* { m a x } ( 0 , ^ { \\mathcal { B } } v _ { x } ) / \\parallel ^ { \\mathcal { B } } v \\parallel _ { 2 } \\cdot \\mathrm { c l i p } ( ^ { \\mathcal { B } } v _ { x } / 0 . 3 , - 1 , 1 ) + 1 \\big ) } \\end{array}", "type": "interline_equation", "image_path": "d3f8da6b396f76d54da62528b9e91defa6543c22e48371ec0f3e6d668424058f.jpg" } ] } ], "index": 13, "virtual_lines": [ { "bbox": [ 122, 294, 476, 310 ], "spans": [], "index": 13 } ] }, { "type": "text", "bbox": [ 106, 314, 506, 408 ], "lines": [ { "bbox": [ 105, 313, 506, 327 ], "spans": [ { "bbox": [ 105, 313, 506, 327 ], "score": 1.0, "content": "As shown in Figure 4, after one hour of training, Dreamer learns to consistently flip the robot over", "type": "text" } ], "index": 14 }, { "bbox": [ 106, 326, 505, 338 ], "spans": [ { "bbox": [ 106, 326, 505, 338 ], "score": 1.0, "content": "from its back, stand up, and walk forward. In the first 5 minutes of training, the robot manages to", "type": "text" } ], "index": 15 }, { "bbox": [ 106, 338, 505, 349 ], "spans": [ { "bbox": [ 106, 338, 505, 349 ], "score": 1.0, "content": "roll off its back and land on its feet. 20 minutes later, it learns how to stand up on its feet. About", "type": "text" } ], "index": 16 }, { "bbox": [ 104, 349, 506, 362 ], "spans": [ { "bbox": [ 104, 349, 506, 362 ], "score": 1.0, "content": "1 hour into training, the robot learns a pronking gait to walk forward at the desired velocity. After", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 361, 506, 374 ], "spans": [ { "bbox": [ 105, 361, 506, 374 ], "score": 1.0, "content": "succeeding at this task, we tested the robustness of the algorithms by repeatedly knocking the robot", "type": "text" } ], "index": 18 }, { "bbox": [ 104, 371, 506, 387 ], "spans": [ { "bbox": [ 104, 371, 506, 387 ], "score": 1.0, "content": "off of its feet with a large pole, shown in Figure 8. Within 10 minutes of additional online learning,", "type": "text" } ], "index": 19 }, { "bbox": [ 106, 384, 505, 397 ], "spans": [ { "bbox": [ 106, 384, 505, 397 ], "score": 1.0, "content": "the robot adapts and withstand pushes or quickly rolls back on its feet. In comparison, SAC quickly", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 396, 433, 409 ], "spans": [ { "bbox": [ 105, 396, 433, 409 ], "score": 1.0, "content": "learns to roll off its back but fails to stand up or walk given the small data budget.", "type": "text" } ], "index": 21 } ], "index": 17.5 }, { "type": "title", "bbox": [ 106, 416, 303, 428 ], "lines": [ { "bbox": [ 105, 415, 303, 430 ], "spans": [ { "bbox": [ 105, 415, 303, 430 ], "score": 1.0, "content": "3.2 UR5 Multi-Object Visual Pick and Place", "type": "text" } ], "index": 22 } ], "index": 22 }, { "type": "text", "bbox": [ 106, 429, 506, 568 ], "lines": [ { "bbox": [ 106, 428, 505, 441 ], "spans": [ { "bbox": [ 106, 428, 505, 441 ], "score": 1.0, "content": "Common in warehouse and logistics environments, pick and place tasks require a robot manipulator", "type": "text" } ], "index": 23 }, { "bbox": [ 106, 440, 505, 452 ], "spans": [ { "bbox": [ 106, 440, 505, 452 ], "score": 1.0, "content": "to transport items from one bin into another. Figure 5 shows a successful pick and place cycle of", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 451, 505, 464 ], "spans": [ { "bbox": [ 105, 451, 505, 464 ], "score": 1.0, "content": "this task. The task is challenging because of sparse rewards, the need to infer object positions from", "type": "text" } ], "index": 25 }, { "bbox": [ 106, 464, 506, 476 ], "spans": [ { "bbox": [ 106, 464, 506, 476 ], "score": 1.0, "content": "pixels, and the challenging dynamics of multiple moving objects. The sensory inputs consist of", "type": "text" } ], "index": 26 }, { "bbox": [ 105, 475, 506, 488 ], "spans": [ { "bbox": [ 105, 475, 506, 488 ], "score": 1.0, "content": "proprioceptive readings (joint angles, gripper position, end effector Cartesian position) and a 3rd", "type": "text" } ], "index": 27 }, { "bbox": [ 105, 487, 506, 501 ], "spans": [ { "bbox": [ 105, 487, 506, 501 ], "score": 1.0, "content": "person RGB image of the scene. Successfully grasping one of the 3 objects, detected by partial", "type": "text" } ], "index": 28 }, { "bbox": [ 105, 498, 506, 511 ], "spans": [ { "bbox": [ 105, 498, 218, 511 ], "score": 1.0, "content": "gripper closure, results in a", "type": "text" }, { "bbox": [ 218, 499, 232, 509 ], "score": 0.85, "content": "+ 1", "type": "inline_equation" }, { "bbox": [ 232, 498, 440, 511 ], "score": 1.0, "content": "reward, releasing the object in the same bin gives a", "type": "text" }, { "bbox": [ 441, 499, 455, 509 ], "score": 0.48, "content": "- 1", "type": "inline_equation" }, { "bbox": [ 455, 498, 506, 511 ], "score": 1.0, "content": "reward, and", "type": "text" } ], "index": 29 }, { "bbox": [ 105, 509, 506, 524 ], "spans": [ { "bbox": [ 105, 509, 245, 524 ], "score": 1.0, "content": "placing in the opposite bin gives a", "type": "text" }, { "bbox": [ 245, 510, 264, 521 ], "score": 0.87, "content": "+ 1 0", "type": "inline_equation" }, { "bbox": [ 265, 509, 506, 524 ], "score": 1.0, "content": "reward. We control the UR5 robot from Universal Robotics", "type": "text" } ], "index": 30 }, { "bbox": [ 105, 521, 506, 534 ], "spans": [ { "bbox": [ 105, 521, 117, 534 ], "score": 1.0, "content": "at", "type": "text" }, { "bbox": [ 117, 522, 138, 532 ], "score": 0.54, "content": "2 \\ \\mathrm { H z }", "type": "inline_equation" }, { "bbox": [ 139, 521, 474, 534 ], "score": 1.0, "content": ". Actions are discrete for moving the end effector in increments along X, Y, and", "type": "text" }, { "bbox": [ 475, 522, 483, 532 ], "score": 0.31, "content": "\\textsf { Z }", "type": "inline_equation" }, { "bbox": [ 484, 521, 506, 534 ], "score": 1.0, "content": "axes", "type": "text" } ], "index": 31 }, { "bbox": [ 106, 533, 506, 546 ], "spans": [ { "bbox": [ 106, 533, 506, 546 ], "score": 1.0, "content": "and for toggling the gripper state. Movement in the Z axis is only enabled while holding an object", "type": "text" } ], "index": 32 }, { "bbox": [ 106, 545, 506, 558 ], "spans": [ { "bbox": [ 106, 545, 506, 558 ], "score": 1.0, "content": "and the gripper automatically opens once above the correct bin. We estimate human teleoperation", "type": "text" } ], "index": 33 }, { "bbox": [ 105, 556, 507, 569 ], "spans": [ { "bbox": [ 105, 556, 507, 569 ], "score": 1.0, "content": "performance by recording 3 demonstrators for 20 minutes each, controlling the UR5 with a joystick.", "type": "text" } ], "index": 34 } ], "index": 28.5 }, { "type": "text", "bbox": [ 106, 572, 505, 678 ], "lines": [ { "bbox": [ 105, 572, 506, 586 ], "spans": [ { "bbox": [ 105, 572, 506, 586 ], "score": 1.0, "content": "Dreamer reaches an average pick rate of 2.5 objects per minute within 8 hours. The robot initially", "type": "text" } ], "index": 35 }, { "bbox": [ 106, 585, 506, 598 ], "spans": [ { "bbox": [ 106, 585, 506, 598 ], "score": 1.0, "content": "struggles to learn as the reward signal is very sparse, but begins to gradually improve after 2 hours of", "type": "text" } ], "index": 36 }, { "bbox": [ 105, 596, 505, 609 ], "spans": [ { "bbox": [ 105, 596, 505, 609 ], "score": 1.0, "content": "training. The robot first learns to localize the objects and toggles the gripper when near an object. Over", "type": "text" } ], "index": 37 }, { "bbox": [ 106, 609, 505, 621 ], "spans": [ { "bbox": [ 106, 609, 505, 621 ], "score": 1.0, "content": "time, grasping becomes precise and the robot learns to push objects out of corners. Figure 5 shows", "type": "text" } ], "index": 38 }, { "bbox": [ 105, 619, 506, 633 ], "spans": [ { "bbox": [ 105, 619, 506, 633 ], "score": 1.0, "content": "the learning curves of Dreamer compared to Rainbow DQN, PPO, and the human baseline. Both", "type": "text" } ], "index": 39 }, { "bbox": [ 104, 630, 505, 646 ], "spans": [ { "bbox": [ 104, 630, 505, 646 ], "score": 1.0, "content": "Rainbow DQN and PPO only learn the short-sighted behavior of grasping and immediately dropping", "type": "text" } ], "index": 40 }, { "bbox": [ 105, 643, 505, 656 ], "spans": [ { "bbox": [ 105, 643, 505, 656 ], "score": 1.0, "content": "objects in the same bin. In contrast, Dreamer approaches human-level teleoperation performance", "type": "text" } ], "index": 41 }, { "bbox": [ 105, 654, 505, 668 ], "spans": [ { "bbox": [ 105, 654, 505, 668 ], "score": 1.0, "content": "after 8 hours. We hypothesize that Rainbow DQN and PPO fail because they require larger amounts", "type": "text" } ], "index": 42 }, { "bbox": [ 106, 667, 385, 679 ], "spans": [ { "bbox": [ 106, 667, 385, 679 ], "score": 1.0, "content": "of experience, which is not feasible for us to collect in the real world.", "type": "text" } ], "index": 43 } ], "index": 39 }, { "type": "title", "bbox": [ 107, 686, 252, 698 ], "lines": [ { "bbox": [ 105, 685, 253, 699 ], "spans": [ { "bbox": [ 105, 685, 253, 699 ], "score": 1.0, "content": "3.3 XArm Visual Pick and Place", "type": "text" } ], "index": 44 } ], "index": 44 }, { "type": "text", "bbox": [ 106, 699, 503, 722 ], "lines": [ { "bbox": [ 105, 698, 505, 711 ], "spans": [ { "bbox": [ 105, 698, 505, 711 ], "score": 1.0, "content": "While the UR5 robot is a high performance industrial robot, the XArm is an accessible low-cost", "type": "text" } ], "index": 45 }, { "bbox": [ 105, 711, 505, 723 ], "spans": [ { "bbox": [ 105, 711, 342, 723 ], "score": 1.0, "content": "7 DOF manipulation, which we control at approximately", "type": "text" }, { "bbox": [ 342, 711, 371, 721 ], "score": 0.6, "content": "0 . 5 \\ : \\mathrm { H z }", "type": "inline_equation" }, { "bbox": [ 371, 711, 505, 723 ], "score": 1.0, "content": ". Similar to Section 3.2, the task", "type": "text" } ], "index": 46 } ], "index": 45.5 } ], "page_idx": 5, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 302, 742, 309, 750 ], "lines": [ { "bbox": [ 302, 741, 310, 752 ], "spans": [ { "bbox": [ 302, 741, 310, 752 ], "score": 1.0, "content": "6", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "image", "bbox": [ 106, 58, 499, 142 ], "blocks": [ { "type": "image_body", "bbox": [ 106, 58, 499, 142 ], "group_id": 0, "lines": [ { "bbox": [ 106, 58, 499, 142 ], "spans": [ { "bbox": [ 106, 58, 499, 142 ], "score": 0.958, "type": "image", "image_path": "69863294723843746383e47fe99dcd32744e499d1c526f51df4121c52ff99fe8.jpg" } ] } ], "index": 1, "virtual_lines": [ { "bbox": [ 106, 58, 499, 86.0 ], "spans": [], "index": 0 }, { "bbox": [ 106, 86.0, 499, 114.0 ], "spans": [], "index": 1 }, { "bbox": [ 106, 114.0, 499, 142.0 ], "spans": [], "index": 2 } ] }, { "type": "image_caption", "bbox": [ 106, 156, 506, 236 ], "group_id": 0, "lines": [ { "bbox": [ 105, 157, 505, 169 ], "spans": [ { "bbox": [ 105, 157, 505, 169 ], "score": 1.0, "content": "Figure 5: UR5 Multi Object Visual Pick and Place This task requires learning to locate three", "type": "text" } ], "index": 3 }, { "bbox": [ 105, 167, 506, 182 ], "spans": [ { "bbox": [ 105, 167, 506, 182 ], "score": 1.0, "content": "ball objects from third-person camera images, grasp them, and move them into the other bin. The", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 179, 506, 192 ], "spans": [ { "bbox": [ 105, 179, 506, 192 ], "score": 1.0, "content": "arm is free to move within and above the bins and sparse rewards are given for grasping a ball and", "type": "text" } ], "index": 5 }, { "bbox": [ 106, 191, 506, 203 ], "spans": [ { "bbox": [ 106, 191, 506, 203 ], "score": 1.0, "content": "for dropping it in the opposite bin. The environment requires the world model to learn multi-object", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 202, 506, 215 ], "spans": [ { "bbox": [ 106, 202, 506, 215 ], "score": 1.0, "content": "dynamics in the real world and the sparse reward structure poses a challenge for policy optimization.", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 213, 506, 226 ], "spans": [ { "bbox": [ 106, 213, 506, 226 ], "score": 1.0, "content": "Dreamer overcomes the challenges of visual localization and sparse rewards on this task, learning a", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 225, 365, 237 ], "spans": [ { "bbox": [ 105, 225, 365, 237 ], "score": 1.0, "content": "successful strategy within a few hours of autonomous operation.", "type": "text" } ], "index": 9 } ], "index": 6 } ], "index": 3.5 }, { "type": "text", "bbox": [ 107, 245, 505, 269 ], "lines": [], "index": 10.5, "bbox_fs": [ 105, 245, 505, 270 ], "lines_deleted": true }, { "type": "interline_equation", "bbox": [ 123, 273, 477, 289 ], "lines": [ { "bbox": [ 123, 273, 477, 289 ], "spans": [ { "bbox": [ 123, 273, 477, 289 ], "score": 0.82, "content": "\\begin{array} { r l } { r ^ { \\mathrm { u p r } } \\doteq ( \\hat { z } ^ { T } [ 0 , 0 , 1 ] - 1 ) / 2 } & { { } r ^ { \\mathrm { h i p } } \\doteq 1 - \\frac 1 4 \\| q ^ { \\mathrm { h i p } } + 0 . 2 \\| _ { 1 } \\quad r ^ { \\mathrm { s h o u l d e r } } \\doteq 1 - \\frac 1 4 \\| q ^ { \\mathrm { s h o u l d e r } } + 0 . 2 \\| _ { 1 } } \\end{array}", "type": "interline_equation", "image_path": "d59a106b3ad7e1f46762af4cb6c79589191a386db6dd18da2381b810f0e72627.jpg" } ] } ], "index": 12, "virtual_lines": [ { "bbox": [ 123, 273, 477, 289 ], "spans": [], "index": 12 } ] }, { "type": "interline_equation", "bbox": [ 122, 294, 476, 310 ], "lines": [ { "bbox": [ 122, 294, 476, 310 ], "spans": [ { "bbox": [ 122, 294, 476, 310 ], "score": 0.85, "content": "\\begin{array} { r l } { r ^ { \\mathrm { k n e e } } \\doteq 1 - \\frac 1 4 \\parallel q ^ { \\mathrm { k n e e } } - 1 . 0 \\parallel _ { 1 } } & { { } r ^ { \\mathrm { v e l o c i t y } } \\doteq 5 \\big ( \\operatorname* { m a x } ( 0 , ^ { \\mathcal { B } } v _ { x } ) / \\parallel ^ { \\mathcal { B } } v \\parallel _ { 2 } \\cdot \\mathrm { c l i p } ( ^ { \\mathcal { B } } v _ { x } / 0 . 3 , - 1 , 1 ) + 1 \\big ) } \\end{array}", "type": "interline_equation", "image_path": "d3f8da6b396f76d54da62528b9e91defa6543c22e48371ec0f3e6d668424058f.jpg" } ] } ], "index": 13, "virtual_lines": [ { "bbox": [ 122, 294, 476, 310 ], "spans": [], "index": 13 } ] }, { "type": "text", "bbox": [ 106, 314, 506, 408 ], "lines": [ { "bbox": [ 105, 313, 506, 327 ], "spans": [ { "bbox": [ 105, 313, 506, 327 ], "score": 1.0, "content": "As shown in Figure 4, after one hour of training, Dreamer learns to consistently flip the robot over", "type": "text" } ], "index": 14 }, { "bbox": [ 106, 326, 505, 338 ], "spans": [ { "bbox": [ 106, 326, 505, 338 ], "score": 1.0, "content": "from its back, stand up, and walk forward. In the first 5 minutes of training, the robot manages to", "type": "text" } ], "index": 15 }, { "bbox": [ 106, 338, 505, 349 ], "spans": [ { "bbox": [ 106, 338, 505, 349 ], "score": 1.0, "content": "roll off its back and land on its feet. 20 minutes later, it learns how to stand up on its feet. About", "type": "text" } ], "index": 16 }, { "bbox": [ 104, 349, 506, 362 ], "spans": [ { "bbox": [ 104, 349, 506, 362 ], "score": 1.0, "content": "1 hour into training, the robot learns a pronking gait to walk forward at the desired velocity. After", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 361, 506, 374 ], "spans": [ { "bbox": [ 105, 361, 506, 374 ], "score": 1.0, "content": "succeeding at this task, we tested the robustness of the algorithms by repeatedly knocking the robot", "type": "text" } ], "index": 18 }, { "bbox": [ 104, 371, 506, 387 ], "spans": [ { "bbox": [ 104, 371, 506, 387 ], "score": 1.0, "content": "off of its feet with a large pole, shown in Figure 8. Within 10 minutes of additional online learning,", "type": "text" } ], "index": 19 }, { "bbox": [ 106, 384, 505, 397 ], "spans": [ { "bbox": [ 106, 384, 505, 397 ], "score": 1.0, "content": "the robot adapts and withstand pushes or quickly rolls back on its feet. In comparison, SAC quickly", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 396, 433, 409 ], "spans": [ { "bbox": [ 105, 396, 433, 409 ], "score": 1.0, "content": "learns to roll off its back but fails to stand up or walk given the small data budget.", "type": "text" } ], "index": 21 } ], "index": 17.5, "bbox_fs": [ 104, 313, 506, 409 ] }, { "type": "title", "bbox": [ 106, 416, 303, 428 ], "lines": [ { "bbox": [ 105, 415, 303, 430 ], "spans": [ { "bbox": [ 105, 415, 303, 430 ], "score": 1.0, "content": "3.2 UR5 Multi-Object Visual Pick and Place", "type": "text" } ], "index": 22 } ], "index": 22 }, { "type": "text", "bbox": [ 106, 429, 506, 568 ], "lines": [ { "bbox": [ 106, 428, 505, 441 ], "spans": [ { "bbox": [ 106, 428, 505, 441 ], "score": 1.0, "content": "Common in warehouse and logistics environments, pick and place tasks require a robot manipulator", "type": "text" } ], "index": 23 }, { "bbox": [ 106, 440, 505, 452 ], "spans": [ { "bbox": [ 106, 440, 505, 452 ], "score": 1.0, "content": "to transport items from one bin into another. Figure 5 shows a successful pick and place cycle of", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 451, 505, 464 ], "spans": [ { "bbox": [ 105, 451, 505, 464 ], "score": 1.0, "content": "this task. The task is challenging because of sparse rewards, the need to infer object positions from", "type": "text" } ], "index": 25 }, { "bbox": [ 106, 464, 506, 476 ], "spans": [ { "bbox": [ 106, 464, 506, 476 ], "score": 1.0, "content": "pixels, and the challenging dynamics of multiple moving objects. The sensory inputs consist of", "type": "text" } ], "index": 26 }, { "bbox": [ 105, 475, 506, 488 ], "spans": [ { "bbox": [ 105, 475, 506, 488 ], "score": 1.0, "content": "proprioceptive readings (joint angles, gripper position, end effector Cartesian position) and a 3rd", "type": "text" } ], "index": 27 }, { "bbox": [ 105, 487, 506, 501 ], "spans": [ { "bbox": [ 105, 487, 506, 501 ], "score": 1.0, "content": "person RGB image of the scene. Successfully grasping one of the 3 objects, detected by partial", "type": "text" } ], "index": 28 }, { "bbox": [ 105, 498, 506, 511 ], "spans": [ { "bbox": [ 105, 498, 218, 511 ], "score": 1.0, "content": "gripper closure, results in a", "type": "text" }, { "bbox": [ 218, 499, 232, 509 ], "score": 0.85, "content": "+ 1", "type": "inline_equation" }, { "bbox": [ 232, 498, 440, 511 ], "score": 1.0, "content": "reward, releasing the object in the same bin gives a", "type": "text" }, { "bbox": [ 441, 499, 455, 509 ], "score": 0.48, "content": "- 1", "type": "inline_equation" }, { "bbox": [ 455, 498, 506, 511 ], "score": 1.0, "content": "reward, and", "type": "text" } ], "index": 29 }, { "bbox": [ 105, 509, 506, 524 ], "spans": [ { "bbox": [ 105, 509, 245, 524 ], "score": 1.0, "content": "placing in the opposite bin gives a", "type": "text" }, { "bbox": [ 245, 510, 264, 521 ], "score": 0.87, "content": "+ 1 0", "type": "inline_equation" }, { "bbox": [ 265, 509, 506, 524 ], "score": 1.0, "content": "reward. We control the UR5 robot from Universal Robotics", "type": "text" } ], "index": 30 }, { "bbox": [ 105, 521, 506, 534 ], "spans": [ { "bbox": [ 105, 521, 117, 534 ], "score": 1.0, "content": "at", "type": "text" }, { "bbox": [ 117, 522, 138, 532 ], "score": 0.54, "content": "2 \\ \\mathrm { H z }", "type": "inline_equation" }, { "bbox": [ 139, 521, 474, 534 ], "score": 1.0, "content": ". Actions are discrete for moving the end effector in increments along X, Y, and", "type": "text" }, { "bbox": [ 475, 522, 483, 532 ], "score": 0.31, "content": "\\textsf { Z }", "type": "inline_equation" }, { "bbox": [ 484, 521, 506, 534 ], "score": 1.0, "content": "axes", "type": "text" } ], "index": 31 }, { "bbox": [ 106, 533, 506, 546 ], "spans": [ { "bbox": [ 106, 533, 506, 546 ], "score": 1.0, "content": "and for toggling the gripper state. Movement in the Z axis is only enabled while holding an object", "type": "text" } ], "index": 32 }, { "bbox": [ 106, 545, 506, 558 ], "spans": [ { "bbox": [ 106, 545, 506, 558 ], "score": 1.0, "content": "and the gripper automatically opens once above the correct bin. We estimate human teleoperation", "type": "text" } ], "index": 33 }, { "bbox": [ 105, 556, 507, 569 ], "spans": [ { "bbox": [ 105, 556, 507, 569 ], "score": 1.0, "content": "performance by recording 3 demonstrators for 20 minutes each, controlling the UR5 with a joystick.", "type": "text" } ], "index": 34 } ], "index": 28.5, "bbox_fs": [ 105, 428, 507, 569 ] }, { "type": "text", "bbox": [ 106, 572, 505, 678 ], "lines": [ { "bbox": [ 105, 572, 506, 586 ], "spans": [ { "bbox": [ 105, 572, 506, 586 ], "score": 1.0, "content": "Dreamer reaches an average pick rate of 2.5 objects per minute within 8 hours. The robot initially", "type": "text" } ], "index": 35 }, { "bbox": [ 106, 585, 506, 598 ], "spans": [ { "bbox": [ 106, 585, 506, 598 ], "score": 1.0, "content": "struggles to learn as the reward signal is very sparse, but begins to gradually improve after 2 hours of", "type": "text" } ], "index": 36 }, { "bbox": [ 105, 596, 505, 609 ], "spans": [ { "bbox": [ 105, 596, 505, 609 ], "score": 1.0, "content": "training. The robot first learns to localize the objects and toggles the gripper when near an object. Over", "type": "text" } ], "index": 37 }, { "bbox": [ 106, 609, 505, 621 ], "spans": [ { "bbox": [ 106, 609, 505, 621 ], "score": 1.0, "content": "time, grasping becomes precise and the robot learns to push objects out of corners. Figure 5 shows", "type": "text" } ], "index": 38 }, { "bbox": [ 105, 619, 506, 633 ], "spans": [ { "bbox": [ 105, 619, 506, 633 ], "score": 1.0, "content": "the learning curves of Dreamer compared to Rainbow DQN, PPO, and the human baseline. Both", "type": "text" } ], "index": 39 }, { "bbox": [ 104, 630, 505, 646 ], "spans": [ { "bbox": [ 104, 630, 505, 646 ], "score": 1.0, "content": "Rainbow DQN and PPO only learn the short-sighted behavior of grasping and immediately dropping", "type": "text" } ], "index": 40 }, { "bbox": [ 105, 643, 505, 656 ], "spans": [ { "bbox": [ 105, 643, 505, 656 ], "score": 1.0, "content": "objects in the same bin. In contrast, Dreamer approaches human-level teleoperation performance", "type": "text" } ], "index": 41 }, { "bbox": [ 105, 654, 505, 668 ], "spans": [ { "bbox": [ 105, 654, 505, 668 ], "score": 1.0, "content": "after 8 hours. We hypothesize that Rainbow DQN and PPO fail because they require larger amounts", "type": "text" } ], "index": 42 }, { "bbox": [ 106, 667, 385, 679 ], "spans": [ { "bbox": [ 106, 667, 385, 679 ], "score": 1.0, "content": "of experience, which is not feasible for us to collect in the real world.", "type": "text" } ], "index": 43 } ], "index": 39, "bbox_fs": [ 104, 572, 506, 679 ] }, { "type": "title", "bbox": [ 107, 686, 252, 698 ], "lines": [ { "bbox": [ 105, 685, 253, 699 ], "spans": [ { "bbox": [ 105, 685, 253, 699 ], "score": 1.0, "content": "3.3 XArm Visual Pick and Place", "type": "text" } ], "index": 44 } ], "index": 44 }, { "type": "text", "bbox": [ 106, 699, 503, 722 ], "lines": [ { "bbox": [ 105, 698, 505, 711 ], "spans": [ { "bbox": [ 105, 698, 505, 711 ], "score": 1.0, "content": "While the UR5 robot is a high performance industrial robot, the XArm is an accessible low-cost", "type": "text" } ], "index": 45 }, { "bbox": [ 105, 711, 505, 723 ], "spans": [ { "bbox": [ 105, 711, 342, 723 ], "score": 1.0, "content": "7 DOF manipulation, which we control at approximately", "type": "text" }, { "bbox": [ 342, 711, 371, 721 ], "score": 0.6, "content": "0 . 5 \\ : \\mathrm { H z }", "type": "inline_equation" }, { "bbox": [ 371, 711, 505, 723 ], "score": 1.0, "content": ". Similar to Section 3.2, the task", "type": "text" } ], "index": 46 }, { "bbox": [ 105, 252, 506, 266 ], "spans": [ { "bbox": [ 105, 252, 506, 266 ], "score": 1.0, "content": "requires localizing and grasping a soft object and moving it from one bin to another and back, shown", "type": "text", "cross_page": true } ], "index": 11 }, { "bbox": [ 105, 263, 505, 277 ], "spans": [ { "bbox": [ 105, 263, 505, 277 ], "score": 1.0, "content": "in Figure 6. We connect the object to the gripper with a string, which makes it less likely for the", "type": "text", "cross_page": true } ], "index": 12 }, { "bbox": [ 106, 275, 505, 289 ], "spans": [ { "bbox": [ 106, 275, 505, 289 ], "score": 1.0, "content": "object to get stuck in corners at the cost of more complex dynamics. The sparse reward, discrete", "type": "text", "cross_page": true } ], "index": 13 }, { "bbox": [ 105, 286, 505, 302 ], "spans": [ { "bbox": [ 105, 286, 505, 302 ], "score": 1.0, "content": "action space, and observation space match the UR5 setup except for the addition of depth image", "type": "text", "cross_page": true } ], "index": 14 }, { "bbox": [ 106, 300, 162, 311 ], "spans": [ { "bbox": [ 106, 300, 162, 311 ], "score": 1.0, "content": "observations.", "type": "text", "cross_page": true } ], "index": 15 } ], "index": 45.5, "bbox_fs": [ 105, 698, 505, 723 ] } ] }, { "preproc_blocks": [ { "type": "image", "bbox": [ 107, 53, 500, 137 ], "blocks": [ { "type": "image_body", "bbox": [ 107, 53, 500, 137 ], "group_id": 0, "lines": [ { "bbox": [ 107, 53, 500, 137 ], "spans": [ { "bbox": [ 107, 53, 500, 137 ], "score": 0.957, "type": "image", "image_path": "dfce202941b6d7b7a3b4e91b152da625264b3b1c43837193ab53e137e11b01f3.jpg" } ] } ], "index": 1, "virtual_lines": [ { "bbox": [ 107, 53, 500, 81.0 ], "spans": [], "index": 0 }, { "bbox": [ 107, 81.0, 500, 109.0 ], "spans": [], "index": 1 }, { "bbox": [ 107, 109.0, 500, 137.0 ], "spans": [], "index": 2 } ] }, { "type": "image_caption", "bbox": [ 106, 151, 505, 243 ], "group_id": 0, "lines": [ { "bbox": [ 106, 151, 506, 165 ], "spans": [ { "bbox": [ 106, 151, 506, 165 ], "score": 1.0, "content": "Figure 6: XArm Visual Pick and Place The XArm is an affordable robot arm that operates slower", "type": "text" } ], "index": 3 }, { "bbox": [ 106, 164, 505, 176 ], "spans": [ { "bbox": [ 106, 164, 505, 176 ], "score": 1.0, "content": "than the UR5. To demonstrate successful learning on this robot, we use a third-person RealSense", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 174, 505, 189 ], "spans": [ { "bbox": [ 105, 174, 505, 189 ], "score": 1.0, "content": "camera with RGB and depth modalities, as well as proprioceptive inputs for the robot arm, requiring", "type": "text" } ], "index": 5 }, { "bbox": [ 106, 186, 505, 199 ], "spans": [ { "bbox": [ 106, 186, 505, 199 ], "score": 1.0, "content": "the world model to learn sensor fusion. The pick and place task uses a soft object. While soft", "type": "text" } ], "index": 6 }, { "bbox": [ 105, 196, 506, 211 ], "spans": [ { "bbox": [ 105, 196, 506, 211 ], "score": 1.0, "content": "objects would be challenging to model accurately in a simulator, Dreamer avoids this issue by", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 208, 505, 221 ], "spans": [ { "bbox": [ 106, 208, 505, 221 ], "score": 1.0, "content": "directly learning on the real robot without a simulator. While Rainbow and PPO using R3M visual", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 220, 506, 233 ], "spans": [ { "bbox": [ 105, 220, 506, 233 ], "score": 1.0, "content": "embeddings converge to the local optimum of grasping and ungrasping the object in the same bin,", "type": "text" } ], "index": 9 }, { "bbox": [ 106, 231, 466, 244 ], "spans": [ { "bbox": [ 106, 231, 466, 244 ], "score": 1.0, "content": "Dreamer learns a successful pick and place policy from sparse rewards in under 10 hours.", "type": "text" } ], "index": 10 } ], "index": 6.5 } ], "index": 3.75 }, { "type": "text", "bbox": [ 107, 252, 505, 310 ], "lines": [ { "bbox": [ 105, 252, 506, 266 ], "spans": [ { "bbox": [ 105, 252, 506, 266 ], "score": 1.0, "content": "requires localizing and grasping a soft object and moving it from one bin to another and back, shown", "type": "text" } ], "index": 11 }, { "bbox": [ 105, 263, 505, 277 ], "spans": [ { "bbox": [ 105, 263, 505, 277 ], "score": 1.0, "content": "in Figure 6. We connect the object to the gripper with a string, which makes it less likely for the", "type": "text" } ], "index": 12 }, { "bbox": [ 106, 275, 505, 289 ], "spans": [ { "bbox": [ 106, 275, 505, 289 ], "score": 1.0, "content": "object to get stuck in corners at the cost of more complex dynamics. The sparse reward, discrete", "type": "text" } ], "index": 13 }, { "bbox": [ 105, 286, 505, 302 ], "spans": [ { "bbox": [ 105, 286, 505, 302 ], "score": 1.0, "content": "action space, and observation space match the UR5 setup except for the addition of depth image", "type": "text" } ], "index": 14 }, { "bbox": [ 106, 300, 162, 311 ], "spans": [ { "bbox": [ 106, 300, 162, 311 ], "score": 1.0, "content": "observations.", "type": "text" } ], "index": 15 } ], "index": 13 }, { "type": "text", "bbox": [ 106, 315, 505, 444 ], "lines": [ { "bbox": [ 106, 315, 506, 329 ], "spans": [ { "bbox": [ 106, 315, 506, 329 ], "score": 1.0, "content": "Dreamer learns a policy that enables the XArm to achieve an average pick rate of 3.1 objects per", "type": "text" } ], "index": 16 }, { "bbox": [ 106, 328, 506, 340 ], "spans": [ { "bbox": [ 106, 328, 506, 340 ], "score": 1.0, "content": "minute in 10 hours of time, which is comparable to human performance on this task. Figure 6", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 338, 506, 352 ], "spans": [ { "bbox": [ 105, 338, 506, 352 ], "score": 1.0, "content": "shows that Dreamer learns to solve the task within 10 hours, whereas the Rainbow algorithm, a", "type": "text" } ], "index": 18 }, { "bbox": [ 105, 350, 506, 364 ], "spans": [ { "bbox": [ 105, 350, 506, 364 ], "score": 1.0, "content": "top model-free algorithm for discrete control from pixels, fails to learn. We additionally compare", "type": "text" } ], "index": 19 }, { "bbox": [ 105, 362, 505, 376 ], "spans": [ { "bbox": [ 105, 362, 505, 376 ], "score": 1.0, "content": "Dreamer against a PPO baseline that utilizes R3M (Nair et al., 2022) pretrained visual embeddings", "type": "text" } ], "index": 20 }, { "bbox": [ 106, 374, 506, 387 ], "spans": [ { "bbox": [ 106, 374, 506, 387 ], "score": 1.0, "content": "for the state, but notice no improvement in performance. Interestingly, we observed that Dreamer", "type": "text" } ], "index": 21 }, { "bbox": [ 105, 385, 505, 399 ], "spans": [ { "bbox": [ 105, 385, 505, 399 ], "score": 1.0, "content": "learns to sometimes use the string to pull the object out of a corner before grasping it, demonstrating", "type": "text" } ], "index": 22 }, { "bbox": [ 105, 397, 505, 410 ], "spans": [ { "bbox": [ 105, 397, 505, 410 ], "score": 1.0, "content": "multi-modal behaviors. Moreover, we observed that when lighting conditions change drastically", "type": "text" } ], "index": 23 }, { "bbox": [ 106, 410, 504, 421 ], "spans": [ { "bbox": [ 106, 410, 504, 421 ], "score": 1.0, "content": "(such as sharp shadows during sunrise), performance initially collapses but Dreamer then adapts to", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 419, 507, 435 ], "spans": [ { "bbox": [ 105, 419, 507, 435 ], "score": 1.0, "content": "the changing conditions and exceeds its previous performance after a few hours of additional training,", "type": "text" } ], "index": 25 }, { "bbox": [ 106, 432, 206, 446 ], "spans": [ { "bbox": [ 106, 432, 206, 446 ], "score": 1.0, "content": "reported in Appendix A.", "type": "text" } ], "index": 26 } ], "index": 21 }, { "type": "title", "bbox": [ 107, 452, 210, 464 ], "lines": [ { "bbox": [ 105, 451, 210, 466 ], "spans": [ { "bbox": [ 105, 451, 210, 466 ], "score": 1.0, "content": "3.4 Sphero Navigation", "type": "text" } ], "index": 27 } ], "index": 27 }, { "type": "text", "bbox": [ 107, 465, 505, 569 ], "lines": [ { "bbox": [ 106, 465, 506, 478 ], "spans": [ { "bbox": [ 106, 465, 506, 478 ], "score": 1.0, "content": "We evaluate Dreamer on a visual navigation task that requires maneuvering a wheeled robot to a", "type": "text" } ], "index": 28 }, { "bbox": [ 106, 477, 505, 489 ], "spans": [ { "bbox": [ 106, 477, 505, 489 ], "score": 1.0, "content": "fixed goal location given only RGB images as input. We use the Sphero Ollie robot, a cylindrical", "type": "text" } ], "index": 29 }, { "bbox": [ 106, 488, 503, 501 ], "spans": [ { "bbox": [ 106, 489, 482, 501 ], "score": 1.0, "content": "robot with two controllable motors, which we control through continuous torque commands at", "type": "text" }, { "bbox": [ 483, 488, 503, 499 ], "score": 0.56, "content": "2 \\ : \\mathrm { H z }", "type": "inline_equation" } ], "index": 30 }, { "bbox": [ 106, 500, 505, 513 ], "spans": [ { "bbox": [ 106, 500, 505, 513 ], "score": 1.0, "content": "Because the robot is symmetric and the robot only has access to image observations, it has to infer", "type": "text" } ], "index": 31 }, { "bbox": [ 106, 511, 505, 524 ], "spans": [ { "bbox": [ 106, 511, 505, 524 ], "score": 1.0, "content": "the heading direction from the history of observations. The robot is provided with a dense reward", "type": "text" } ], "index": 32 }, { "bbox": [ 106, 524, 505, 536 ], "spans": [ { "bbox": [ 106, 524, 505, 536 ], "score": 1.0, "content": "equal to the negative L2 distance, which is computed using a oracle vision pipeline that detects the", "type": "text" } ], "index": 33 }, { "bbox": [ 105, 535, 505, 547 ], "spans": [ { "bbox": [ 105, 535, 505, 547 ], "score": 1.0, "content": "Sphero’s position (this information is not provided to the agent). As the goal is fixed, after 100", "type": "text" } ], "index": 34 }, { "bbox": [ 105, 547, 505, 560 ], "spans": [ { "bbox": [ 105, 547, 505, 560 ], "score": 1.0, "content": "environment steps, we end the episode and randomize the robot’s position through a sequence of high", "type": "text" } ], "index": 35 }, { "bbox": [ 105, 560, 226, 570 ], "spans": [ { "bbox": [ 105, 560, 226, 570 ], "score": 1.0, "content": "power random motor actions.", "type": "text" } ], "index": 36 } ], "index": 32 }, { "type": "text", "bbox": [ 107, 575, 505, 645 ], "lines": [ { "bbox": [ 105, 574, 506, 588 ], "spans": [ { "bbox": [ 105, 574, 506, 588 ], "score": 1.0, "content": "In 2 hours, Dreamer learns to quickly and consistently navigate to the goal and stay near the goal for", "type": "text" } ], "index": 37 }, { "bbox": [ 106, 587, 505, 599 ], "spans": [ { "bbox": [ 106, 587, 505, 599 ], "score": 1.0, "content": "the remainder of the episode. As shown in Figure 7, Dreamer achieves an average distance to the", "type": "text" } ], "index": 38 }, { "bbox": [ 105, 597, 507, 612 ], "spans": [ { "bbox": [ 105, 597, 507, 612 ], "score": 1.0, "content": "goal of 0.15, measured in units of the area size and averaged across time steps. We find that DrQv2,", "type": "text" } ], "index": 39 }, { "bbox": [ 105, 611, 505, 623 ], "spans": [ { "bbox": [ 105, 611, 505, 623 ], "score": 1.0, "content": "a model-free algorithm specifically designed to continuous control from pixels, achieves similar", "type": "text" } ], "index": 40 }, { "bbox": [ 105, 622, 505, 635 ], "spans": [ { "bbox": [ 105, 622, 505, 635 ], "score": 1.0, "content": "performance. This result matches the simulated experiments of Yarats et al. (2021) that showed the", "type": "text" } ], "index": 41 }, { "bbox": [ 106, 633, 419, 647 ], "spans": [ { "bbox": [ 106, 633, 419, 647 ], "score": 1.0, "content": "two algorithms to perform similarly for continuous control tasks from images.", "type": "text" } ], "index": 42 } ], "index": 39.5 }, { "type": "title", "bbox": [ 107, 657, 197, 670 ], "lines": [ { "bbox": [ 105, 656, 198, 672 ], "spans": [ { "bbox": [ 105, 656, 198, 672 ], "score": 1.0, "content": "4 Related Work", "type": "text" } ], "index": 43 } ], "index": 43 }, { "type": "text", "bbox": [ 107, 676, 505, 722 ], "lines": [ { "bbox": [ 105, 675, 505, 688 ], "spans": [ { "bbox": [ 105, 675, 505, 688 ], "score": 1.0, "content": "Existing work on robot learning commonly leverages large amounts of simulated experience before", "type": "text" } ], "index": 44 }, { "bbox": [ 106, 687, 505, 698 ], "spans": [ { "bbox": [ 106, 687, 505, 698 ], "score": 1.0, "content": "deploying to the real world (Rusu et al., 2016; Peng et al., 2018; OpenAI et al., 2018; Lee et al., 2020;", "type": "text" } ], "index": 45 }, { "bbox": [ 105, 698, 506, 711 ], "spans": [ { "bbox": [ 105, 698, 506, 711 ], "score": 1.0, "content": "Irpan et al., 2020; Kumar et al., 2021; Siekmann et al., 2021; Escontrela et al., 2022), leverage fleets", "type": "text" } ], "index": 46 }, { "bbox": [ 106, 711, 505, 722 ], "spans": [ { "bbox": [ 106, 711, 505, 722 ], "score": 1.0, "content": "of robots to collect experience datasets (Kalashnikov et al., 2018; Dasari et al., 2019; Kalashnikov", "type": "text" } ], "index": 47 } ], "index": 45.5 } ], "page_idx": 6, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 302, 741, 309, 750 ], "lines": [ { "bbox": [ 302, 741, 309, 753 ], "spans": [ { "bbox": [ 302, 741, 309, 753 ], "score": 1.0, "content": "7", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "image", "bbox": [ 107, 53, 500, 137 ], "blocks": [ { "type": "image_body", "bbox": [ 107, 53, 500, 137 ], "group_id": 0, "lines": [ { "bbox": [ 107, 53, 500, 137 ], "spans": [ { "bbox": [ 107, 53, 500, 137 ], "score": 0.957, "type": "image", "image_path": "dfce202941b6d7b7a3b4e91b152da625264b3b1c43837193ab53e137e11b01f3.jpg" } ] } ], "index": 1, "virtual_lines": [ { "bbox": [ 107, 53, 500, 81.0 ], "spans": [], "index": 0 }, { "bbox": [ 107, 81.0, 500, 109.0 ], "spans": [], "index": 1 }, { "bbox": [ 107, 109.0, 500, 137.0 ], "spans": [], "index": 2 } ] }, { "type": "image_caption", "bbox": [ 106, 151, 505, 243 ], "group_id": 0, "lines": [ { "bbox": [ 106, 151, 506, 165 ], "spans": [ { "bbox": [ 106, 151, 506, 165 ], "score": 1.0, "content": "Figure 6: XArm Visual Pick and Place The XArm is an affordable robot arm that operates slower", "type": "text" } ], "index": 3 }, { "bbox": [ 106, 164, 505, 176 ], "spans": [ { "bbox": [ 106, 164, 505, 176 ], "score": 1.0, "content": "than the UR5. To demonstrate successful learning on this robot, we use a third-person RealSense", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 174, 505, 189 ], "spans": [ { "bbox": [ 105, 174, 505, 189 ], "score": 1.0, "content": "camera with RGB and depth modalities, as well as proprioceptive inputs for the robot arm, requiring", "type": "text" } ], "index": 5 }, { "bbox": [ 106, 186, 505, 199 ], "spans": [ { "bbox": [ 106, 186, 505, 199 ], "score": 1.0, "content": "the world model to learn sensor fusion. The pick and place task uses a soft object. While soft", "type": "text" } ], "index": 6 }, { "bbox": [ 105, 196, 506, 211 ], "spans": [ { "bbox": [ 105, 196, 506, 211 ], "score": 1.0, "content": "objects would be challenging to model accurately in a simulator, Dreamer avoids this issue by", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 208, 505, 221 ], "spans": [ { "bbox": [ 106, 208, 505, 221 ], "score": 1.0, "content": "directly learning on the real robot without a simulator. While Rainbow and PPO using R3M visual", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 220, 506, 233 ], "spans": [ { "bbox": [ 105, 220, 506, 233 ], "score": 1.0, "content": "embeddings converge to the local optimum of grasping and ungrasping the object in the same bin,", "type": "text" } ], "index": 9 }, { "bbox": [ 106, 231, 466, 244 ], "spans": [ { "bbox": [ 106, 231, 466, 244 ], "score": 1.0, "content": "Dreamer learns a successful pick and place policy from sparse rewards in under 10 hours.", "type": "text" } ], "index": 10 } ], "index": 6.5 } ], "index": 3.75 }, { "type": "text", "bbox": [ 107, 252, 505, 310 ], "lines": [], "index": 13, "bbox_fs": [ 105, 252, 506, 311 ], "lines_deleted": true }, { "type": "text", "bbox": [ 106, 315, 505, 444 ], "lines": [ { "bbox": [ 106, 315, 506, 329 ], "spans": [ { "bbox": [ 106, 315, 506, 329 ], "score": 1.0, "content": "Dreamer learns a policy that enables the XArm to achieve an average pick rate of 3.1 objects per", "type": "text" } ], "index": 16 }, { "bbox": [ 106, 328, 506, 340 ], "spans": [ { "bbox": [ 106, 328, 506, 340 ], "score": 1.0, "content": "minute in 10 hours of time, which is comparable to human performance on this task. Figure 6", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 338, 506, 352 ], "spans": [ { "bbox": [ 105, 338, 506, 352 ], "score": 1.0, "content": "shows that Dreamer learns to solve the task within 10 hours, whereas the Rainbow algorithm, a", "type": "text" } ], "index": 18 }, { "bbox": [ 105, 350, 506, 364 ], "spans": [ { "bbox": [ 105, 350, 506, 364 ], "score": 1.0, "content": "top model-free algorithm for discrete control from pixels, fails to learn. We additionally compare", "type": "text" } ], "index": 19 }, { "bbox": [ 105, 362, 505, 376 ], "spans": [ { "bbox": [ 105, 362, 505, 376 ], "score": 1.0, "content": "Dreamer against a PPO baseline that utilizes R3M (Nair et al., 2022) pretrained visual embeddings", "type": "text" } ], "index": 20 }, { "bbox": [ 106, 374, 506, 387 ], "spans": [ { "bbox": [ 106, 374, 506, 387 ], "score": 1.0, "content": "for the state, but notice no improvement in performance. Interestingly, we observed that Dreamer", "type": "text" } ], "index": 21 }, { "bbox": [ 105, 385, 505, 399 ], "spans": [ { "bbox": [ 105, 385, 505, 399 ], "score": 1.0, "content": "learns to sometimes use the string to pull the object out of a corner before grasping it, demonstrating", "type": "text" } ], "index": 22 }, { "bbox": [ 105, 397, 505, 410 ], "spans": [ { "bbox": [ 105, 397, 505, 410 ], "score": 1.0, "content": "multi-modal behaviors. Moreover, we observed that when lighting conditions change drastically", "type": "text" } ], "index": 23 }, { "bbox": [ 106, 410, 504, 421 ], "spans": [ { "bbox": [ 106, 410, 504, 421 ], "score": 1.0, "content": "(such as sharp shadows during sunrise), performance initially collapses but Dreamer then adapts to", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 419, 507, 435 ], "spans": [ { "bbox": [ 105, 419, 507, 435 ], "score": 1.0, "content": "the changing conditions and exceeds its previous performance after a few hours of additional training,", "type": "text" } ], "index": 25 }, { "bbox": [ 106, 432, 206, 446 ], "spans": [ { "bbox": [ 106, 432, 206, 446 ], "score": 1.0, "content": "reported in Appendix A.", "type": "text" } ], "index": 26 } ], "index": 21, "bbox_fs": [ 105, 315, 507, 446 ] }, { "type": "title", "bbox": [ 107, 452, 210, 464 ], "lines": [ { "bbox": [ 105, 451, 210, 466 ], "spans": [ { "bbox": [ 105, 451, 210, 466 ], "score": 1.0, "content": "3.4 Sphero Navigation", "type": "text" } ], "index": 27 } ], "index": 27 }, { "type": "text", "bbox": [ 107, 465, 505, 569 ], "lines": [ { "bbox": [ 106, 465, 506, 478 ], "spans": [ { "bbox": [ 106, 465, 506, 478 ], "score": 1.0, "content": "We evaluate Dreamer on a visual navigation task that requires maneuvering a wheeled robot to a", "type": "text" } ], "index": 28 }, { "bbox": [ 106, 477, 505, 489 ], "spans": [ { "bbox": [ 106, 477, 505, 489 ], "score": 1.0, "content": "fixed goal location given only RGB images as input. We use the Sphero Ollie robot, a cylindrical", "type": "text" } ], "index": 29 }, { "bbox": [ 106, 488, 503, 501 ], "spans": [ { "bbox": [ 106, 489, 482, 501 ], "score": 1.0, "content": "robot with two controllable motors, which we control through continuous torque commands at", "type": "text" }, { "bbox": [ 483, 488, 503, 499 ], "score": 0.56, "content": "2 \\ : \\mathrm { H z }", "type": "inline_equation" } ], "index": 30 }, { "bbox": [ 106, 500, 505, 513 ], "spans": [ { "bbox": [ 106, 500, 505, 513 ], "score": 1.0, "content": "Because the robot is symmetric and the robot only has access to image observations, it has to infer", "type": "text" } ], "index": 31 }, { "bbox": [ 106, 511, 505, 524 ], "spans": [ { "bbox": [ 106, 511, 505, 524 ], "score": 1.0, "content": "the heading direction from the history of observations. The robot is provided with a dense reward", "type": "text" } ], "index": 32 }, { "bbox": [ 106, 524, 505, 536 ], "spans": [ { "bbox": [ 106, 524, 505, 536 ], "score": 1.0, "content": "equal to the negative L2 distance, which is computed using a oracle vision pipeline that detects the", "type": "text" } ], "index": 33 }, { "bbox": [ 105, 535, 505, 547 ], "spans": [ { "bbox": [ 105, 535, 505, 547 ], "score": 1.0, "content": "Sphero’s position (this information is not provided to the agent). As the goal is fixed, after 100", "type": "text" } ], "index": 34 }, { "bbox": [ 105, 547, 505, 560 ], "spans": [ { "bbox": [ 105, 547, 505, 560 ], "score": 1.0, "content": "environment steps, we end the episode and randomize the robot’s position through a sequence of high", "type": "text" } ], "index": 35 }, { "bbox": [ 105, 560, 226, 570 ], "spans": [ { "bbox": [ 105, 560, 226, 570 ], "score": 1.0, "content": "power random motor actions.", "type": "text" } ], "index": 36 } ], "index": 32, "bbox_fs": [ 105, 465, 506, 570 ] }, { "type": "text", "bbox": [ 107, 575, 505, 645 ], "lines": [ { "bbox": [ 105, 574, 506, 588 ], "spans": [ { "bbox": [ 105, 574, 506, 588 ], "score": 1.0, "content": "In 2 hours, Dreamer learns to quickly and consistently navigate to the goal and stay near the goal for", "type": "text" } ], "index": 37 }, { "bbox": [ 106, 587, 505, 599 ], "spans": [ { "bbox": [ 106, 587, 505, 599 ], "score": 1.0, "content": "the remainder of the episode. As shown in Figure 7, Dreamer achieves an average distance to the", "type": "text" } ], "index": 38 }, { "bbox": [ 105, 597, 507, 612 ], "spans": [ { "bbox": [ 105, 597, 507, 612 ], "score": 1.0, "content": "goal of 0.15, measured in units of the area size and averaged across time steps. We find that DrQv2,", "type": "text" } ], "index": 39 }, { "bbox": [ 105, 611, 505, 623 ], "spans": [ { "bbox": [ 105, 611, 505, 623 ], "score": 1.0, "content": "a model-free algorithm specifically designed to continuous control from pixels, achieves similar", "type": "text" } ], "index": 40 }, { "bbox": [ 105, 622, 505, 635 ], "spans": [ { "bbox": [ 105, 622, 505, 635 ], "score": 1.0, "content": "performance. This result matches the simulated experiments of Yarats et al. (2021) that showed the", "type": "text" } ], "index": 41 }, { "bbox": [ 106, 633, 419, 647 ], "spans": [ { "bbox": [ 106, 633, 419, 647 ], "score": 1.0, "content": "two algorithms to perform similarly for continuous control tasks from images.", "type": "text" } ], "index": 42 } ], "index": 39.5, "bbox_fs": [ 105, 574, 507, 647 ] }, { "type": "title", "bbox": [ 107, 657, 197, 670 ], "lines": [ { "bbox": [ 105, 656, 198, 672 ], "spans": [ { "bbox": [ 105, 656, 198, 672 ], "score": 1.0, "content": "4 Related Work", "type": "text" } ], "index": 43 } ], "index": 43 }, { "type": "text", "bbox": [ 107, 676, 505, 722 ], "lines": [ { "bbox": [ 105, 675, 505, 688 ], "spans": [ { "bbox": [ 105, 675, 505, 688 ], "score": 1.0, "content": "Existing work on robot learning commonly leverages large amounts of simulated experience before", "type": "text" } ], "index": 44 }, { "bbox": [ 106, 687, 505, 698 ], "spans": [ { "bbox": [ 106, 687, 505, 698 ], "score": 1.0, "content": "deploying to the real world (Rusu et al., 2016; Peng et al., 2018; OpenAI et al., 2018; Lee et al., 2020;", "type": "text" } ], "index": 45 }, { "bbox": [ 105, 698, 506, 711 ], "spans": [ { "bbox": [ 105, 698, 506, 711 ], "score": 1.0, "content": "Irpan et al., 2020; Kumar et al., 2021; Siekmann et al., 2021; Escontrela et al., 2022), leverage fleets", "type": "text" } ], "index": 46 }, { "bbox": [ 106, 711, 505, 722 ], "spans": [ { "bbox": [ 106, 711, 505, 722 ], "score": 1.0, "content": "of robots to collect experience datasets (Kalashnikov et al., 2018; Dasari et al., 2019; Kalashnikov", "type": "text" } ], "index": 47 }, { "bbox": [ 106, 225, 505, 236 ], "spans": [ { "bbox": [ 106, 225, 505, 236 ], "score": 1.0, "content": "et al., 2021; Ebert et al., 2021), or rely on external information such as human expert demonstrations", "type": "text", "cross_page": true } ], "index": 8 }, { "bbox": [ 105, 236, 506, 249 ], "spans": [ { "bbox": [ 105, 236, 506, 249 ], "score": 1.0, "content": "or task priors to achieve sample-efficient learning (Xie et al., 2019; Schoettler et al., 2019; James", "type": "text", "cross_page": true } ], "index": 9 }, { "bbox": [ 105, 246, 505, 261 ], "spans": [ { "bbox": [ 105, 246, 505, 261 ], "score": 1.0, "content": "et al., 2021; Shah and Levine, 2022; Bohez et al., 2022; Sivakumar et al., 2022). However, designing", "type": "text", "cross_page": true } ], "index": 10 }, { "bbox": [ 106, 259, 505, 271 ], "spans": [ { "bbox": [ 106, 259, 505, 271 ], "score": 1.0, "content": "simulated tasks and collecting expert demonstrations is time-consuming. Moreover, many of these", "type": "text", "cross_page": true } ], "index": 11 }, { "bbox": [ 105, 271, 506, 284 ], "spans": [ { "bbox": [ 105, 271, 506, 284 ], "score": 1.0, "content": "approaches require specialized algorithms for leveraging offline experience, demonstrations, or", "type": "text", "cross_page": true } ], "index": 12 }, { "bbox": [ 105, 282, 506, 295 ], "spans": [ { "bbox": [ 105, 282, 506, 295 ], "score": 1.0, "content": "simulator inaccuracies. In contrast, our experiments show that learning end-to-end from rewards in", "type": "text", "cross_page": true } ], "index": 13 }, { "bbox": [ 106, 295, 427, 306 ], "spans": [ { "bbox": [ 106, 295, 427, 306 ], "score": 1.0, "content": "the physical world is feasible for a diverse range of tasks through world models.", "type": "text", "cross_page": true } ], "index": 14 } ], "index": 45.5, "bbox_fs": [ 105, 675, 506, 722 ] } ] }, { "preproc_blocks": [ { "type": "image", "bbox": [ 106, 58, 498, 142 ], "blocks": [ { "type": "image_body", "bbox": [ 106, 58, 498, 142 ], "group_id": 0, "lines": [ { "bbox": [ 106, 58, 498, 142 ], "spans": [ { "bbox": [ 106, 58, 498, 142 ], "score": 0.945, "type": "image", "image_path": "372acb53fc3f9ab9f178baac319f8b0cb0c4ee1ca96f374bada7397c4ec23630.jpg" } ] } ], "index": 1, "virtual_lines": [ { "bbox": [ 106, 58, 498, 86.0 ], "spans": [], "index": 0 }, { "bbox": [ 106, 86.0, 498, 114.0 ], "spans": [], "index": 1 }, { "bbox": [ 106, 114.0, 498, 142.0 ], "spans": [], "index": 2 } ] }, { "type": "image_caption", "bbox": [ 106, 156, 505, 213 ], "group_id": 0, "lines": [ { "bbox": [ 105, 156, 505, 169 ], "spans": [ { "bbox": [ 105, 156, 505, 169 ], "score": 1.0, "content": "Figure 7: Sphero Navigation This task requires the Sphero robot to navigate to a goal location", "type": "text" } ], "index": 3 }, { "bbox": [ 105, 168, 505, 181 ], "spans": [ { "bbox": [ 105, 168, 505, 181 ], "score": 1.0, "content": "given a top-down RGB image as the only input. The task requires the robot to localize itself from raw", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 179, 505, 192 ], "spans": [ { "bbox": [ 105, 179, 505, 192 ], "score": 1.0, "content": "pixels, to infer its orientation from the sequence of past images because it is ambiguous from a single", "type": "text" } ], "index": 5 }, { "bbox": [ 105, 190, 506, 204 ], "spans": [ { "bbox": [ 105, 190, 506, 204 ], "score": 1.0, "content": "image, and to control the robot from under-actuated motors that require building up momentum over", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 201, 389, 214 ], "spans": [ { "bbox": [ 106, 201, 389, 214 ], "score": 1.0, "content": "time. Dreamer learns a successful policy on this task in under 2 hours.", "type": "text" } ], "index": 7 } ], "index": 5 } ], "index": 3.0 }, { "type": "text", "bbox": [ 107, 224, 505, 306 ], "lines": [ { "bbox": [ 106, 225, 505, 236 ], "spans": [ { "bbox": [ 106, 225, 505, 236 ], "score": 1.0, "content": "et al., 2021; Ebert et al., 2021), or rely on external information such as human expert demonstrations", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 236, 506, 249 ], "spans": [ { "bbox": [ 105, 236, 506, 249 ], "score": 1.0, "content": "or task priors to achieve sample-efficient learning (Xie et al., 2019; Schoettler et al., 2019; James", "type": "text" } ], "index": 9 }, { "bbox": [ 105, 246, 505, 261 ], "spans": [ { "bbox": [ 105, 246, 505, 261 ], "score": 1.0, "content": "et al., 2021; Shah and Levine, 2022; Bohez et al., 2022; Sivakumar et al., 2022). However, designing", "type": "text" } ], "index": 10 }, { "bbox": [ 106, 259, 505, 271 ], "spans": [ { "bbox": [ 106, 259, 505, 271 ], "score": 1.0, "content": "simulated tasks and collecting expert demonstrations is time-consuming. Moreover, many of these", "type": "text" } ], "index": 11 }, { "bbox": [ 105, 271, 506, 284 ], "spans": [ { "bbox": [ 105, 271, 506, 284 ], "score": 1.0, "content": "approaches require specialized algorithms for leveraging offline experience, demonstrations, or", "type": "text" } ], "index": 12 }, { "bbox": [ 105, 282, 506, 295 ], "spans": [ { "bbox": [ 105, 282, 506, 295 ], "score": 1.0, "content": "simulator inaccuracies. In contrast, our experiments show that learning end-to-end from rewards in", "type": "text" } ], "index": 13 }, { "bbox": [ 106, 295, 427, 306 ], "spans": [ { "bbox": [ 106, 295, 427, 306 ], "score": 1.0, "content": "the physical world is feasible for a diverse range of tasks through world models.", "type": "text" } ], "index": 14 } ], "index": 11 }, { "type": "text", "bbox": [ 107, 316, 505, 456 ], "lines": [ { "bbox": [ 106, 316, 506, 329 ], "spans": [ { "bbox": [ 106, 316, 506, 329 ], "score": 1.0, "content": "Relatively few works have demonstrated end-to-end learning from scratch in the physical world.", "type": "text" } ], "index": 15 }, { "bbox": [ 106, 327, 505, 340 ], "spans": [ { "bbox": [ 106, 327, 505, 340 ], "score": 1.0, "content": "Visual Foresight (Finn et al., 2016; Finn and Levine, 2017; Ebert et al., 2018) learns a video prediction", "type": "text" } ], "index": 16 }, { "bbox": [ 106, 339, 505, 352 ], "spans": [ { "bbox": [ 106, 339, 505, 352 ], "score": 1.0, "content": "model to solve real world tasks by online planning, but is limited to short-horizon tasks and requires", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 351, 505, 364 ], "spans": [ { "bbox": [ 105, 351, 505, 364 ], "score": 1.0, "content": "generating images during planning, making it computationally expensive. Yang et al. (2019; 2022)", "type": "text" } ], "index": 18 }, { "bbox": [ 106, 362, 505, 375 ], "spans": [ { "bbox": [ 106, 362, 505, 375 ], "score": 1.0, "content": "learn quadruped locomotion through a model-based approach by predicting foot placement and", "type": "text" } ], "index": 19 }, { "bbox": [ 105, 374, 505, 388 ], "spans": [ { "bbox": [ 105, 374, 505, 388 ], "score": 1.0, "content": "leveraging a domain-specific controller to achieve them. Ha et al. (2020) learn a quadruped walking", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 386, 505, 399 ], "spans": [ { "bbox": [ 105, 386, 505, 399 ], "score": 1.0, "content": "policy by relying on a scripted reset policy, so the robot does not have to learn to stand up. SOLAR", "type": "text" } ], "index": 21 }, { "bbox": [ 106, 398, 505, 411 ], "spans": [ { "bbox": [ 106, 398, 505, 411 ], "score": 1.0, "content": "(Zhang et al., 2019) learns a latent dynamics model from images and demonstrates reaching and", "type": "text" } ], "index": 22 }, { "bbox": [ 105, 409, 505, 423 ], "spans": [ { "bbox": [ 105, 409, 505, 423 ], "score": 1.0, "content": "pushing with a robot arm. Nagabandi et al. (2019) learns manipulation policies by planning through", "type": "text" } ], "index": 23 }, { "bbox": [ 105, 420, 506, 433 ], "spans": [ { "bbox": [ 105, 420, 506, 433 ], "score": 1.0, "content": "a learned dynamics model from state observations. In comparison, our experiments show successful", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 433, 506, 446 ], "spans": [ { "bbox": [ 105, 433, 506, 446 ], "score": 1.0, "content": "learning across 4 challenging robot tasks that cover a wide range of challenges and sensory modalities,", "type": "text" } ], "index": 25 }, { "bbox": [ 105, 443, 349, 458 ], "spans": [ { "bbox": [ 105, 443, 349, 458 ], "score": 1.0, "content": "with a single learning algorithm and hyperparameter setting.", "type": "text" } ], "index": 26 } ], "index": 20.5 }, { "type": "title", "bbox": [ 107, 474, 179, 487 ], "lines": [ { "bbox": [ 104, 472, 181, 490 ], "spans": [ { "bbox": [ 104, 472, 181, 490 ], "score": 1.0, "content": "5 Discussion", "type": "text" } ], "index": 27 } ], "index": 27 }, { "type": "text", "bbox": [ 106, 502, 505, 596 ], "lines": [ { "bbox": [ 106, 503, 506, 515 ], "spans": [ { "bbox": [ 106, 503, 506, 515 ], "score": 1.0, "content": "We applied Dreamer to physical robot learning, finding that modern world models enable sample-", "type": "text" } ], "index": 28 }, { "bbox": [ 106, 515, 505, 527 ], "spans": [ { "bbox": [ 106, 515, 505, 527 ], "score": 1.0, "content": "efficient robot learning for a range of tasks, from scratch in the real world and without simulators. We", "type": "text" } ], "index": 29 }, { "bbox": [ 106, 526, 506, 539 ], "spans": [ { "bbox": [ 106, 526, 506, 539 ], "score": 1.0, "content": "also find that the approach is generally applicable in that it can solve robot locomotion, manipulation,", "type": "text" } ], "index": 30 }, { "bbox": [ 106, 538, 505, 551 ], "spans": [ { "bbox": [ 106, 538, 505, 551 ], "score": 1.0, "content": "and navigation tasks without changing hyperparameters. Dreamer taught a quadruped robot to roll", "type": "text" } ], "index": 31 }, { "bbox": [ 105, 548, 505, 564 ], "spans": [ { "bbox": [ 105, 548, 505, 564 ], "score": 1.0, "content": "off the back, stand up, and walk in 1 hour from scratch, which previously required extensive training", "type": "text" } ], "index": 32 }, { "bbox": [ 105, 561, 505, 574 ], "spans": [ { "bbox": [ 105, 561, 505, 574 ], "score": 1.0, "content": "in simulation followed by transfer to the real world or parameterized trajectory generators and given", "type": "text" } ], "index": 33 }, { "bbox": [ 106, 574, 504, 585 ], "spans": [ { "bbox": [ 106, 574, 504, 585 ], "score": 1.0, "content": "reset policies. We also demonstrate learning to pick and place objects from pixels and sparse rewards", "type": "text" } ], "index": 34 }, { "bbox": [ 106, 585, 241, 596 ], "spans": [ { "bbox": [ 106, 585, 241, 596 ], "score": 1.0, "content": "on two robot arms in 8–10 hours.", "type": "text" } ], "index": 35 } ], "index": 31.5 }, { "type": "text", "bbox": [ 107, 607, 505, 665 ], "lines": [ { "bbox": [ 105, 606, 505, 620 ], "spans": [ { "bbox": [ 105, 606, 505, 620 ], "score": 1.0, "content": "Limitations While Dreamer shows promising results, learning on hardware over many hours", "type": "text" } ], "index": 36 }, { "bbox": [ 105, 619, 505, 631 ], "spans": [ { "bbox": [ 105, 619, 505, 631 ], "score": 1.0, "content": "creates wear on robots that may require human intervention or repair. Additionally, more work is", "type": "text" } ], "index": 37 }, { "bbox": [ 105, 630, 506, 644 ], "spans": [ { "bbox": [ 105, 630, 506, 644 ], "score": 1.0, "content": "required to explore the limits of Dreamer and our baselines by training for a longer time. Finally, we", "type": "text" } ], "index": 38 }, { "bbox": [ 105, 641, 505, 657 ], "spans": [ { "bbox": [ 105, 641, 505, 657 ], "score": 1.0, "content": "see tackling more challenging tasks, potentially by combining the benefits of fast real world learning", "type": "text" } ], "index": 39 }, { "bbox": [ 106, 653, 372, 666 ], "spans": [ { "bbox": [ 106, 653, 372, 666 ], "score": 1.0, "content": "with those of simulators, as an impactful future research direction.", "type": "text" } ], "index": 40 } ], "index": 38 }, { "type": "text", "bbox": [ 108, 676, 504, 722 ], "lines": [ { "bbox": [ 105, 675, 505, 689 ], "spans": [ { "bbox": [ 105, 675, 505, 689 ], "score": 1.0, "content": "Acknowledgements We thank Stephen James and Justin Kerr for helpful suggestions and help with", "type": "text" } ], "index": 41 }, { "bbox": [ 105, 686, 506, 701 ], "spans": [ { "bbox": [ 105, 686, 506, 701 ], "score": 1.0, "content": "printing the protective shell of the quadruped robot. We thank Ademi Adeniji for help with setting up", "type": "text" } ], "index": 42 }, { "bbox": [ 105, 698, 505, 712 ], "spans": [ { "bbox": [ 105, 698, 505, 712 ], "score": 1.0, "content": "the XArm robot and Raven Huang for help with setting up the UR5 robot. This work was supported", "type": "text" } ], "index": 43 }, { "bbox": [ 105, 711, 498, 724 ], "spans": [ { "bbox": [ 105, 711, 498, 724 ], "score": 1.0, "content": "in part by an NSF Fellowship, NSF NRI #2024675, and the Vanier Canada Graduate Scholarship.", "type": "text" } ], "index": 44 } ], "index": 42.5 } ], "page_idx": 7, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 302, 742, 308, 750 ], "lines": [ { "bbox": [ 302, 741, 309, 752 ], "spans": [ { "bbox": [ 302, 741, 309, 752 ], "score": 1.0, "content": "8", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "image", "bbox": [ 106, 58, 498, 142 ], "blocks": [ { "type": "image_body", "bbox": [ 106, 58, 498, 142 ], "group_id": 0, "lines": [ { "bbox": [ 106, 58, 498, 142 ], "spans": [ { "bbox": [ 106, 58, 498, 142 ], "score": 0.945, "type": "image", "image_path": "372acb53fc3f9ab9f178baac319f8b0cb0c4ee1ca96f374bada7397c4ec23630.jpg" } ] } ], "index": 1, "virtual_lines": [ { "bbox": [ 106, 58, 498, 86.0 ], "spans": [], "index": 0 }, { "bbox": [ 106, 86.0, 498, 114.0 ], "spans": [], "index": 1 }, { "bbox": [ 106, 114.0, 498, 142.0 ], "spans": [], "index": 2 } ] }, { "type": "image_caption", "bbox": [ 106, 156, 505, 213 ], "group_id": 0, "lines": [ { "bbox": [ 105, 156, 505, 169 ], "spans": [ { "bbox": [ 105, 156, 505, 169 ], "score": 1.0, "content": "Figure 7: Sphero Navigation This task requires the Sphero robot to navigate to a goal location", "type": "text" } ], "index": 3 }, { "bbox": [ 105, 168, 505, 181 ], "spans": [ { "bbox": [ 105, 168, 505, 181 ], "score": 1.0, "content": "given a top-down RGB image as the only input. The task requires the robot to localize itself from raw", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 179, 505, 192 ], "spans": [ { "bbox": [ 105, 179, 505, 192 ], "score": 1.0, "content": "pixels, to infer its orientation from the sequence of past images because it is ambiguous from a single", "type": "text" } ], "index": 5 }, { "bbox": [ 105, 190, 506, 204 ], "spans": [ { "bbox": [ 105, 190, 506, 204 ], "score": 1.0, "content": "image, and to control the robot from under-actuated motors that require building up momentum over", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 201, 389, 214 ], "spans": [ { "bbox": [ 106, 201, 389, 214 ], "score": 1.0, "content": "time. Dreamer learns a successful policy on this task in under 2 hours.", "type": "text" } ], "index": 7 } ], "index": 5 } ], "index": 3.0 }, { "type": "text", "bbox": [ 107, 224, 505, 306 ], "lines": [], "index": 11, "bbox_fs": [ 105, 225, 506, 306 ], "lines_deleted": true }, { "type": "text", "bbox": [ 107, 316, 505, 456 ], "lines": [ { "bbox": [ 106, 316, 506, 329 ], "spans": [ { "bbox": [ 106, 316, 506, 329 ], "score": 1.0, "content": "Relatively few works have demonstrated end-to-end learning from scratch in the physical world.", "type": "text" } ], "index": 15 }, { "bbox": [ 106, 327, 505, 340 ], "spans": [ { "bbox": [ 106, 327, 505, 340 ], "score": 1.0, "content": "Visual Foresight (Finn et al., 2016; Finn and Levine, 2017; Ebert et al., 2018) learns a video prediction", "type": "text" } ], "index": 16 }, { "bbox": [ 106, 339, 505, 352 ], "spans": [ { "bbox": [ 106, 339, 505, 352 ], "score": 1.0, "content": "model to solve real world tasks by online planning, but is limited to short-horizon tasks and requires", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 351, 505, 364 ], "spans": [ { "bbox": [ 105, 351, 505, 364 ], "score": 1.0, "content": "generating images during planning, making it computationally expensive. Yang et al. (2019; 2022)", "type": "text" } ], "index": 18 }, { "bbox": [ 106, 362, 505, 375 ], "spans": [ { "bbox": [ 106, 362, 505, 375 ], "score": 1.0, "content": "learn quadruped locomotion through a model-based approach by predicting foot placement and", "type": "text" } ], "index": 19 }, { "bbox": [ 105, 374, 505, 388 ], "spans": [ { "bbox": [ 105, 374, 505, 388 ], "score": 1.0, "content": "leveraging a domain-specific controller to achieve them. Ha et al. (2020) learn a quadruped walking", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 386, 505, 399 ], "spans": [ { "bbox": [ 105, 386, 505, 399 ], "score": 1.0, "content": "policy by relying on a scripted reset policy, so the robot does not have to learn to stand up. SOLAR", "type": "text" } ], "index": 21 }, { "bbox": [ 106, 398, 505, 411 ], "spans": [ { "bbox": [ 106, 398, 505, 411 ], "score": 1.0, "content": "(Zhang et al., 2019) learns a latent dynamics model from images and demonstrates reaching and", "type": "text" } ], "index": 22 }, { "bbox": [ 105, 409, 505, 423 ], "spans": [ { "bbox": [ 105, 409, 505, 423 ], "score": 1.0, "content": "pushing with a robot arm. Nagabandi et al. (2019) learns manipulation policies by planning through", "type": "text" } ], "index": 23 }, { "bbox": [ 105, 420, 506, 433 ], "spans": [ { "bbox": [ 105, 420, 506, 433 ], "score": 1.0, "content": "a learned dynamics model from state observations. In comparison, our experiments show successful", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 433, 506, 446 ], "spans": [ { "bbox": [ 105, 433, 506, 446 ], "score": 1.0, "content": "learning across 4 challenging robot tasks that cover a wide range of challenges and sensory modalities,", "type": "text" } ], "index": 25 }, { "bbox": [ 105, 443, 349, 458 ], "spans": [ { "bbox": [ 105, 443, 349, 458 ], "score": 1.0, "content": "with a single learning algorithm and hyperparameter setting.", "type": "text" } ], "index": 26 } ], "index": 20.5, "bbox_fs": [ 105, 316, 506, 458 ] }, { "type": "title", "bbox": [ 107, 474, 179, 487 ], "lines": [ { "bbox": [ 104, 472, 181, 490 ], "spans": [ { "bbox": [ 104, 472, 181, 490 ], "score": 1.0, "content": "5 Discussion", "type": "text" } ], "index": 27 } ], "index": 27 }, { "type": "text", "bbox": [ 106, 502, 505, 596 ], "lines": [ { "bbox": [ 106, 503, 506, 515 ], "spans": [ { "bbox": [ 106, 503, 506, 515 ], "score": 1.0, "content": "We applied Dreamer to physical robot learning, finding that modern world models enable sample-", "type": "text" } ], "index": 28 }, { "bbox": [ 106, 515, 505, 527 ], "spans": [ { "bbox": [ 106, 515, 505, 527 ], "score": 1.0, "content": "efficient robot learning for a range of tasks, from scratch in the real world and without simulators. We", "type": "text" } ], "index": 29 }, { "bbox": [ 106, 526, 506, 539 ], "spans": [ { "bbox": [ 106, 526, 506, 539 ], "score": 1.0, "content": "also find that the approach is generally applicable in that it can solve robot locomotion, manipulation,", "type": "text" } ], "index": 30 }, { "bbox": [ 106, 538, 505, 551 ], "spans": [ { "bbox": [ 106, 538, 505, 551 ], "score": 1.0, "content": "and navigation tasks without changing hyperparameters. Dreamer taught a quadruped robot to roll", "type": "text" } ], "index": 31 }, { "bbox": [ 105, 548, 505, 564 ], "spans": [ { "bbox": [ 105, 548, 505, 564 ], "score": 1.0, "content": "off the back, stand up, and walk in 1 hour from scratch, which previously required extensive training", "type": "text" } ], "index": 32 }, { "bbox": [ 105, 561, 505, 574 ], "spans": [ { "bbox": [ 105, 561, 505, 574 ], "score": 1.0, "content": "in simulation followed by transfer to the real world or parameterized trajectory generators and given", "type": "text" } ], "index": 33 }, { "bbox": [ 106, 574, 504, 585 ], "spans": [ { "bbox": [ 106, 574, 504, 585 ], "score": 1.0, "content": "reset policies. We also demonstrate learning to pick and place objects from pixels and sparse rewards", "type": "text" } ], "index": 34 }, { "bbox": [ 106, 585, 241, 596 ], "spans": [ { "bbox": [ 106, 585, 241, 596 ], "score": 1.0, "content": "on two robot arms in 8–10 hours.", "type": "text" } ], "index": 35 } ], "index": 31.5, "bbox_fs": [ 105, 503, 506, 596 ] }, { "type": "text", "bbox": [ 107, 607, 505, 665 ], "lines": [ { "bbox": [ 105, 606, 505, 620 ], "spans": [ { "bbox": [ 105, 606, 505, 620 ], "score": 1.0, "content": "Limitations While Dreamer shows promising results, learning on hardware over many hours", "type": "text" } ], "index": 36 }, { "bbox": [ 105, 619, 505, 631 ], "spans": [ { "bbox": [ 105, 619, 505, 631 ], "score": 1.0, "content": "creates wear on robots that may require human intervention or repair. Additionally, more work is", "type": "text" } ], "index": 37 }, { "bbox": [ 105, 630, 506, 644 ], "spans": [ { "bbox": [ 105, 630, 506, 644 ], "score": 1.0, "content": "required to explore the limits of Dreamer and our baselines by training for a longer time. Finally, we", "type": "text" } ], "index": 38 }, { "bbox": [ 105, 641, 505, 657 ], "spans": [ { "bbox": [ 105, 641, 505, 657 ], "score": 1.0, "content": "see tackling more challenging tasks, potentially by combining the benefits of fast real world learning", "type": "text" } ], "index": 39 }, { "bbox": [ 106, 653, 372, 666 ], "spans": [ { "bbox": [ 106, 653, 372, 666 ], "score": 1.0, "content": "with those of simulators, as an impactful future research direction.", "type": "text" } ], "index": 40 } ], "index": 38, "bbox_fs": [ 105, 606, 506, 666 ] }, { "type": "text", "bbox": [ 108, 676, 504, 722 ], "lines": [ { "bbox": [ 105, 675, 505, 689 ], "spans": [ { "bbox": [ 105, 675, 505, 689 ], "score": 1.0, "content": "Acknowledgements We thank Stephen James and Justin Kerr for helpful suggestions and help with", "type": "text" } ], "index": 41 }, { "bbox": [ 105, 686, 506, 701 ], "spans": [ { "bbox": [ 105, 686, 506, 701 ], "score": 1.0, "content": "printing the protective shell of the quadruped robot. We thank Ademi Adeniji for help with setting up", "type": "text" } ], "index": 42 }, { "bbox": [ 105, 698, 505, 712 ], "spans": [ { "bbox": [ 105, 698, 505, 712 ], "score": 1.0, "content": "the XArm robot and Raven Huang for help with setting up the UR5 robot. This work was supported", "type": "text" } ], "index": 43 }, { "bbox": [ 105, 711, 498, 724 ], "spans": [ { "bbox": [ 105, 711, 498, 724 ], "score": 1.0, "content": "in part by an NSF Fellowship, NSF NRI #2024675, and the Vanier Canada Graduate Scholarship.", "type": "text" } ], "index": 44 } ], "index": 42.5, "bbox_fs": [ 105, 675, 506, 724 ] } ] }, { "preproc_blocks": [ { "type": "text", "bbox": [ 105, 72, 507, 727 ], "lines": [ { "bbox": [ 106, 70, 165, 87 ], "spans": [ { "bbox": [ 106, 70, 165, 87 ], "score": 1.0, "content": "References", "type": "text" } ], "index": 0 }, { "bbox": [ 105, 95, 506, 111 ], "spans": [ { "bbox": [ 105, 95, 506, 111 ], "score": 1.0, "content": "D. Hafner, T. Lillicrap, J. Ba, and M. Norouzi. Dream to control: Learning behaviors by latent", "type": "text" } ], "index": 1 }, { "bbox": [ 115, 108, 332, 121 ], "spans": [ { "bbox": [ 115, 108, 332, 121 ], "score": 1.0, "content": "imagination. arXiv preprint arXiv:1912.01603, 2019.", "type": "text" } ], "index": 2 }, { "bbox": [ 105, 128, 505, 142 ], "spans": [ { "bbox": [ 105, 128, 505, 142 ], "score": 1.0, "content": "D. Hafner, T. Lillicrap, M. Norouzi, and J. Ba. Mastering atari with discrete world models. arXiv", "type": "text" } ], "index": 3 }, { "bbox": [ 113, 140, 255, 154 ], "spans": [ { "bbox": [ 113, 140, 255, 154 ], "score": 1.0, "content": "preprint arXiv:2010.02193, 2020.", "type": "text" } ], "index": 4 }, { "bbox": [ 104, 159, 506, 176 ], "spans": [ { "bbox": [ 104, 159, 506, 176 ], "score": 1.0, "content": "Y. Gal, R. McAllister, and C. E. Rasmussen. Improving pilco with bayesian neural network dynamics", "type": "text" } ], "index": 5 }, { "bbox": [ 114, 172, 396, 188 ], "spans": [ { "bbox": [ 114, 172, 396, 188 ], "score": 1.0, "content": "models. In Data-Efficient Machine Learning workshop, ICML, 2016.", "type": "text" } ], "index": 6 }, { "bbox": [ 105, 193, 505, 208 ], "spans": [ { "bbox": [ 105, 193, 505, 208 ], "score": 1.0, "content": "F. Ebert, C. Finn, S. Dasari, A. Xie, A. Lee, and S. Levine. Visual foresight: Model-based deep", "type": "text" } ], "index": 7 }, { "bbox": [ 114, 206, 505, 220 ], "spans": [ { "bbox": [ 114, 206, 505, 220 ], "score": 1.0, "content": "reinforcement learning for vision-based robotic control. arXiv preprint arXiv:1812.00568, 2018.", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 227, 506, 240 ], "spans": [ { "bbox": [ 105, 227, 506, 240 ], "score": 1.0, "content": "R. Sekar, O. Rybkin, K. Daniilidis, P. Abbeel, D. Hafner, and D. Pathak. Planning to explore via self-", "type": "text" } ], "index": 9 }, { "bbox": [ 115, 238, 506, 252 ], "spans": [ { "bbox": [ 115, 238, 506, 252 ], "score": 1.0, "content": "supervised world models. In International Conference on Machine Learning, pages 8583–8592.", "type": "text" } ], "index": 10 }, { "bbox": [ 115, 250, 174, 263 ], "spans": [ { "bbox": [ 115, 250, 174, 263 ], "score": 1.0, "content": "PMLR, 2020.", "type": "text" } ], "index": 11 }, { "bbox": [ 105, 271, 505, 284 ], "spans": [ { "bbox": [ 105, 271, 505, 284 ], "score": 1.0, "content": "T. Yu, A. Kumar, R. Rafailov, A. Rajeswaran, S. Levine, and C. Finn. Combo: Conservative", "type": "text" } ], "index": 12 }, { "bbox": [ 114, 281, 507, 298 ], "spans": [ { "bbox": [ 114, 281, 507, 298 ], "score": 1.0, "content": "offline model-based policy optimization. Advances in neural information processing systems, 34:", "type": "text" } ], "index": 13 }, { "bbox": [ 115, 294, 201, 307 ], "spans": [ { "bbox": [ 115, 294, 201, 307 ], "score": 1.0, "content": "28954–28967, 2021.", "type": "text" } ], "index": 14 }, { "bbox": [ 104, 314, 507, 330 ], "spans": [ { "bbox": [ 104, 314, 507, 330 ], "score": 1.0, "content": "D. Hafner, T. Lillicrap, I. Fischer, R. Villegas, D. Ha, H. Lee, and J. Davidson. Learning latent", "type": "text" } ], "index": 15 }, { "bbox": [ 114, 327, 424, 342 ], "spans": [ { "bbox": [ 114, 327, 424, 342 ], "score": 1.0, "content": "dynamics for planning from pixels. arXiv preprint arXiv:1811.04551, 2018.", "type": "text" } ], "index": 16 }, { "bbox": [ 104, 346, 507, 363 ], "spans": [ { "bbox": [ 104, 346, 507, 363 ], "score": 1.0, "content": "D. P. Kingma and M. Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114,", "type": "text" } ], "index": 17 }, { "bbox": [ 115, 358, 143, 375 ], "spans": [ { "bbox": [ 115, 358, 143, 375 ], "score": 1.0, "content": "2013.", "type": "text" } ], "index": 18 }, { "bbox": [ 105, 381, 505, 395 ], "spans": [ { "bbox": [ 105, 381, 505, 395 ], "score": 1.0, "content": "D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference", "type": "text" } ], "index": 19 }, { "bbox": [ 114, 393, 385, 407 ], "spans": [ { "bbox": [ 114, 393, 385, 407 ], "score": 1.0, "content": "in deep generative models. arXiv preprint arXiv:1401.4082, 2014.", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 414, 466, 427 ], "spans": [ { "bbox": [ 105, 414, 466, 427 ], "score": 1.0, "content": "R. S. Sutton and A. G. Barto. Reinforcement learning: An introduction. MIT press, 2018.", "type": "text" } ], "index": 21 }, { "bbox": [ 105, 435, 506, 449 ], "spans": [ { "bbox": [ 105, 435, 506, 449 ], "score": 1.0, "content": "R. J. Williams. Simple statistical gradient-following algorithms for connectionist reinforcement", "type": "text" } ], "index": 22 }, { "bbox": [ 115, 447, 324, 460 ], "spans": [ { "bbox": [ 115, 447, 324, 460 ], "score": 1.0, "content": "learning. Machine learning, 8(3-4):229–256, 1992.", "type": "text" } ], "index": 23 }, { "bbox": [ 104, 466, 506, 483 ], "spans": [ { "bbox": [ 104, 466, 506, 483 ], "score": 1.0, "content": "M. Henaff, A. Canziani, and Y. LeCun. Model-predictive policy learning with uncertainty", "type": "text" } ], "index": 24 }, { "bbox": [ 113, 479, 447, 493 ], "spans": [ { "bbox": [ 113, 479, 447, 493 ], "score": 1.0, "content": "regularization for driving in dense traffic. arXiv preprint arXiv:1901.02705, 2019.", "type": "text" } ], "index": 25 }, { "bbox": [ 105, 500, 505, 513 ], "spans": [ { "bbox": [ 105, 500, 505, 513 ], "score": 1.0, "content": "D. P. Kingma and J. Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980,", "type": "text" } ], "index": 26 }, { "bbox": [ 114, 512, 144, 526 ], "spans": [ { "bbox": [ 114, 512, 144, 526 ], "score": 1.0, "content": "2014.", "type": "text" } ], "index": 27 }, { "bbox": [ 105, 531, 507, 548 ], "spans": [ { "bbox": [ 105, 531, 507, 548 ], "score": 1.0, "content": "V. Mnih, K. Kavukcuoglu, D. Silver, A. A. Rusu, J. Veness, M. G. Bellemare, A. Graves,", "type": "text" } ], "index": 28 }, { "bbox": [ 113, 541, 507, 561 ], "spans": [ { "bbox": [ 113, 541, 507, 561 ], "score": 1.0, "content": "M. Riedmiller, A. K. Fidjeland, G. Ostrovski, et al. Human-level control through deep", "type": "text" } ], "index": 29 }, { "bbox": [ 115, 556, 335, 570 ], "spans": [ { "bbox": [ 115, 556, 335, 570 ], "score": 1.0, "content": "reinforcement learning. Nature, 518(7540):529, 2015.", "type": "text" } ], "index": 30 }, { "bbox": [ 104, 576, 506, 592 ], "spans": [ { "bbox": [ 104, 576, 506, 592 ], "score": 1.0, "content": "T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra. Continuous", "type": "text" } ], "index": 31 }, { "bbox": [ 114, 588, 449, 604 ], "spans": [ { "bbox": [ 114, 588, 449, 604 ], "score": 1.0, "content": "control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015.", "type": "text" } ], "index": 32 }, { "bbox": [ 104, 609, 506, 625 ], "spans": [ { "bbox": [ 104, 609, 506, 625 ], "score": 1.0, "content": "T. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy deep", "type": "text" } ], "index": 33 }, { "bbox": [ 113, 621, 471, 637 ], "spans": [ { "bbox": [ 113, 621, 471, 637 ], "score": 1.0, "content": "reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018a.", "type": "text" } ], "index": 34 }, { "bbox": [ 104, 642, 507, 658 ], "spans": [ { "bbox": [ 104, 642, 507, 658 ], "score": 1.0, "content": "T. Haarnoja, A. Zhou, K. Hartikainen, G. Tucker, S. Ha, J. Tan, V. Kumar, H. Zhu, A. Gupta,", "type": "text" } ], "index": 35 }, { "bbox": [ 114, 655, 506, 668 ], "spans": [ { "bbox": [ 114, 655, 506, 668 ], "score": 1.0, "content": "P. Abbeel, et al. Soft actor-critic algorithms and applications. arXiv preprint arXiv:1812.05905,", "type": "text" } ], "index": 36 }, { "bbox": [ 115, 664, 148, 681 ], "spans": [ { "bbox": [ 115, 664, 148, 681 ], "score": 1.0, "content": "2018b.", "type": "text" } ], "index": 37 }, { "bbox": [ 105, 687, 507, 700 ], "spans": [ { "bbox": [ 105, 687, 507, 700 ], "score": 1.0, "content": "M. Hessel, J. Modayil, H. Van Hasselt, T. Schaul, G. Ostrovski, W. Dabney, D. Horgan, B. Piot,", "type": "text" } ], "index": 38 }, { "bbox": [ 114, 698, 507, 714 ], "spans": [ { "bbox": [ 114, 698, 507, 714 ], "score": 1.0, "content": "M. Azar, and D. Silver. Rainbow: Combining improvements in deep reinforcement learning. In", "type": "text" } ], "index": 39 }, { "bbox": [ 115, 711, 374, 724 ], "spans": [ { "bbox": [ 115, 711, 374, 724 ], "score": 1.0, "content": "Thirty-Second AAAI Conference on Artificial Intelligence, 2018.", "type": "text" } ], "index": 40 } ], "index": 20 } ], "page_idx": 8, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 302, 741, 309, 750 ], "lines": [ { "bbox": [ 302, 741, 309, 752 ], "spans": [ { "bbox": [ 302, 741, 309, 752 ], "score": 1.0, "content": "9", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "list", "bbox": [ 105, 72, 507, 727 ], "lines": [ { "bbox": [ 106, 70, 165, 87 ], "spans": [ { "bbox": [ 106, 70, 165, 87 ], "score": 1.0, "content": "References", "type": "text" } ], "index": 0, "is_list_start_line": true, "is_list_end_line": true }, { "bbox": [ 105, 95, 506, 111 ], "spans": [ { "bbox": [ 105, 95, 506, 111 ], "score": 1.0, "content": "D. Hafner, T. Lillicrap, J. Ba, and M. Norouzi. Dream to control: Learning behaviors by latent", "type": "text" } ], "index": 1, "is_list_start_line": true }, { "bbox": [ 115, 108, 332, 121 ], "spans": [ { "bbox": [ 115, 108, 332, 121 ], "score": 1.0, "content": "imagination. arXiv preprint arXiv:1912.01603, 2019.", "type": "text" } ], "index": 2, "is_list_end_line": true }, { "bbox": [ 105, 128, 505, 142 ], "spans": [ { "bbox": [ 105, 128, 505, 142 ], "score": 1.0, "content": "D. Hafner, T. Lillicrap, M. Norouzi, and J. Ba. Mastering atari with discrete world models. arXiv", "type": "text" } ], "index": 3, "is_list_start_line": true }, { "bbox": [ 113, 140, 255, 154 ], "spans": [ { "bbox": [ 113, 140, 255, 154 ], "score": 1.0, "content": "preprint arXiv:2010.02193, 2020.", "type": "text" } ], "index": 4, "is_list_end_line": true }, { "bbox": [ 104, 159, 506, 176 ], "spans": [ { "bbox": [ 104, 159, 506, 176 ], "score": 1.0, "content": "Y. Gal, R. McAllister, and C. E. Rasmussen. Improving pilco with bayesian neural network dynamics", "type": "text" } ], "index": 5, "is_list_start_line": true }, { "bbox": [ 114, 172, 396, 188 ], "spans": [ { "bbox": [ 114, 172, 396, 188 ], "score": 1.0, "content": "models. In Data-Efficient Machine Learning workshop, ICML, 2016.", "type": "text" } ], "index": 6, "is_list_end_line": true }, { "bbox": [ 105, 193, 505, 208 ], "spans": [ { "bbox": [ 105, 193, 505, 208 ], "score": 1.0, "content": "F. Ebert, C. Finn, S. Dasari, A. Xie, A. Lee, and S. Levine. Visual foresight: Model-based deep", "type": "text" } ], "index": 7, "is_list_start_line": true }, { "bbox": [ 114, 206, 505, 220 ], "spans": [ { "bbox": [ 114, 206, 505, 220 ], "score": 1.0, "content": "reinforcement learning for vision-based robotic control. arXiv preprint arXiv:1812.00568, 2018.", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 227, 506, 240 ], "spans": [ { "bbox": [ 105, 227, 506, 240 ], "score": 1.0, "content": "R. Sekar, O. Rybkin, K. Daniilidis, P. Abbeel, D. Hafner, and D. Pathak. Planning to explore via self-", "type": "text" } ], "index": 9, "is_list_start_line": true }, { "bbox": [ 115, 238, 506, 252 ], "spans": [ { "bbox": [ 115, 238, 506, 252 ], "score": 1.0, "content": "supervised world models. In International Conference on Machine Learning, pages 8583–8592.", "type": "text" } ], "index": 10 }, { "bbox": [ 115, 250, 174, 263 ], "spans": [ { "bbox": [ 115, 250, 174, 263 ], "score": 1.0, "content": "PMLR, 2020.", "type": "text" } ], "index": 11, "is_list_end_line": true }, { "bbox": [ 105, 271, 505, 284 ], "spans": [ { "bbox": [ 105, 271, 505, 284 ], "score": 1.0, "content": "T. Yu, A. Kumar, R. Rafailov, A. Rajeswaran, S. Levine, and C. Finn. Combo: Conservative", "type": "text" } ], "index": 12, "is_list_start_line": true }, { "bbox": [ 114, 281, 507, 298 ], "spans": [ { "bbox": [ 114, 281, 507, 298 ], "score": 1.0, "content": "offline model-based policy optimization. Advances in neural information processing systems, 34:", "type": "text" } ], "index": 13 }, { "bbox": [ 115, 294, 201, 307 ], "spans": [ { "bbox": [ 115, 294, 201, 307 ], "score": 1.0, "content": "28954–28967, 2021.", "type": "text" } ], "index": 14, "is_list_end_line": true }, { "bbox": [ 104, 314, 507, 330 ], "spans": [ { "bbox": [ 104, 314, 507, 330 ], "score": 1.0, "content": "D. Hafner, T. Lillicrap, I. Fischer, R. Villegas, D. Ha, H. Lee, and J. Davidson. Learning latent", "type": "text" } ], "index": 15, "is_list_start_line": true }, { "bbox": [ 114, 327, 424, 342 ], "spans": [ { "bbox": [ 114, 327, 424, 342 ], "score": 1.0, "content": "dynamics for planning from pixels. arXiv preprint arXiv:1811.04551, 2018.", "type": "text" } ], "index": 16, "is_list_end_line": true }, { "bbox": [ 104, 346, 507, 363 ], "spans": [ { "bbox": [ 104, 346, 507, 363 ], "score": 1.0, "content": "D. P. Kingma and M. Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114,", "type": "text" } ], "index": 17, "is_list_start_line": true }, { "bbox": [ 115, 358, 143, 375 ], "spans": [ { "bbox": [ 115, 358, 143, 375 ], "score": 1.0, "content": "2013.", "type": "text" } ], "index": 18, "is_list_end_line": true }, { "bbox": [ 105, 381, 505, 395 ], "spans": [ { "bbox": [ 105, 381, 505, 395 ], "score": 1.0, "content": "D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference", "type": "text" } ], "index": 19, "is_list_start_line": true }, { "bbox": [ 114, 393, 385, 407 ], "spans": [ { "bbox": [ 114, 393, 385, 407 ], "score": 1.0, "content": "in deep generative models. arXiv preprint arXiv:1401.4082, 2014.", "type": "text" } ], "index": 20, "is_list_end_line": true }, { "bbox": [ 105, 414, 466, 427 ], "spans": [ { "bbox": [ 105, 414, 466, 427 ], "score": 1.0, "content": "R. S. Sutton and A. G. Barto. Reinforcement learning: An introduction. MIT press, 2018.", "type": "text" } ], "index": 21, "is_list_start_line": true, "is_list_end_line": true }, { "bbox": [ 105, 435, 506, 449 ], "spans": [ { "bbox": [ 105, 435, 506, 449 ], "score": 1.0, "content": "R. J. Williams. Simple statistical gradient-following algorithms for connectionist reinforcement", "type": "text" } ], "index": 22, "is_list_start_line": true }, { "bbox": [ 115, 447, 324, 460 ], "spans": [ { "bbox": [ 115, 447, 324, 460 ], "score": 1.0, "content": "learning. Machine learning, 8(3-4):229–256, 1992.", "type": "text" } ], "index": 23, "is_list_end_line": true }, { "bbox": [ 104, 466, 506, 483 ], "spans": [ { "bbox": [ 104, 466, 506, 483 ], "score": 1.0, "content": "M. Henaff, A. Canziani, and Y. LeCun. Model-predictive policy learning with uncertainty", "type": "text" } ], "index": 24, "is_list_start_line": true }, { "bbox": [ 113, 479, 447, 493 ], "spans": [ { "bbox": [ 113, 479, 447, 493 ], "score": 1.0, "content": "regularization for driving in dense traffic. arXiv preprint arXiv:1901.02705, 2019.", "type": "text" } ], "index": 25, "is_list_end_line": true }, { "bbox": [ 105, 500, 505, 513 ], "spans": [ { "bbox": [ 105, 500, 505, 513 ], "score": 1.0, "content": "D. P. Kingma and J. Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980,", "type": "text" } ], "index": 26, "is_list_start_line": true }, { "bbox": [ 114, 512, 144, 526 ], "spans": [ { "bbox": [ 114, 512, 144, 526 ], "score": 1.0, "content": "2014.", "type": "text" } ], "index": 27, "is_list_end_line": true }, { "bbox": [ 105, 531, 507, 548 ], "spans": [ { "bbox": [ 105, 531, 507, 548 ], "score": 1.0, "content": "V. Mnih, K. Kavukcuoglu, D. Silver, A. A. Rusu, J. Veness, M. G. Bellemare, A. Graves,", "type": "text" } ], "index": 28, "is_list_start_line": true }, { "bbox": [ 113, 541, 507, 561 ], "spans": [ { "bbox": [ 113, 541, 507, 561 ], "score": 1.0, "content": "M. Riedmiller, A. K. Fidjeland, G. Ostrovski, et al. Human-level control through deep", "type": "text" } ], "index": 29 }, { "bbox": [ 115, 556, 335, 570 ], "spans": [ { "bbox": [ 115, 556, 335, 570 ], "score": 1.0, "content": "reinforcement learning. Nature, 518(7540):529, 2015.", "type": "text" } ], "index": 30, "is_list_end_line": true }, { "bbox": [ 104, 576, 506, 592 ], "spans": [ { "bbox": [ 104, 576, 506, 592 ], "score": 1.0, "content": "T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra. Continuous", "type": "text" } ], "index": 31, "is_list_start_line": true }, { "bbox": [ 114, 588, 449, 604 ], "spans": [ { "bbox": [ 114, 588, 449, 604 ], "score": 1.0, "content": "control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015.", "type": "text" } ], "index": 32, "is_list_end_line": true }, { "bbox": [ 104, 609, 506, 625 ], "spans": [ { "bbox": [ 104, 609, 506, 625 ], "score": 1.0, "content": "T. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy deep", "type": "text" } ], "index": 33, "is_list_start_line": true }, { "bbox": [ 113, 621, 471, 637 ], "spans": [ { "bbox": [ 113, 621, 471, 637 ], "score": 1.0, "content": "reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018a.", "type": "text" } ], "index": 34, "is_list_end_line": true }, { "bbox": [ 104, 642, 507, 658 ], "spans": [ { "bbox": [ 104, 642, 507, 658 ], "score": 1.0, "content": "T. Haarnoja, A. Zhou, K. Hartikainen, G. Tucker, S. Ha, J. Tan, V. Kumar, H. Zhu, A. Gupta,", "type": "text" } ], "index": 35, "is_list_start_line": true }, { "bbox": [ 114, 655, 506, 668 ], "spans": [ { "bbox": [ 114, 655, 506, 668 ], "score": 1.0, "content": "P. Abbeel, et al. Soft actor-critic algorithms and applications. arXiv preprint arXiv:1812.05905,", "type": "text" } ], "index": 36 }, { "bbox": [ 115, 664, 148, 681 ], "spans": [ { "bbox": [ 115, 664, 148, 681 ], "score": 1.0, "content": "2018b.", "type": "text" } ], "index": 37, "is_list_end_line": true }, { "bbox": [ 105, 687, 507, 700 ], "spans": [ { "bbox": [ 105, 687, 507, 700 ], "score": 1.0, "content": "M. Hessel, J. Modayil, H. Van Hasselt, T. Schaul, G. Ostrovski, W. Dabney, D. Horgan, B. Piot,", "type": "text" } ], "index": 38, "is_list_start_line": true }, { "bbox": [ 114, 698, 507, 714 ], "spans": [ { "bbox": [ 114, 698, 507, 714 ], "score": 1.0, "content": "M. Azar, and D. Silver. Rainbow: Combining improvements in deep reinforcement learning. In", "type": "text" } ], "index": 39 }, { "bbox": [ 115, 711, 374, 724 ], "spans": [ { "bbox": [ 115, 711, 374, 724 ], "score": 1.0, "content": "Thirty-Second AAAI Conference on Artificial Intelligence, 2018.", "type": "text" } ], "index": 40, "is_list_end_line": true }, { "bbox": [ 105, 73, 506, 86 ], "spans": [ { "bbox": [ 105, 73, 506, 86 ], "score": 1.0, "content": "J. Schrittwieser, I. Antonoglou, T. Hubert, K. Simonyan, L. Sifre, S. Schmitt, A. Guez, E. Lockhart,", "type": "text", "cross_page": true } ], "index": 0, "is_list_start_line": true }, { "bbox": [ 115, 83, 506, 97 ], "spans": [ { "bbox": [ 115, 83, 506, 97 ], "score": 1.0, "content": "D. Hassabis, T. Graepel, et al. Mastering atari, go, chess and shogi by planning with a learned", "type": "text", "cross_page": true } ], "index": 1 }, { "bbox": [ 114, 96, 310, 109 ], "spans": [ { "bbox": [ 114, 96, 310, 109 ], "score": 1.0, "content": "model. arXiv preprint arXiv:1911.08265, 2019.", "type": "text", "cross_page": true } ], "index": 2, "is_list_end_line": true }, { "bbox": [ 105, 115, 506, 130 ], "spans": [ { "bbox": [ 105, 115, 506, 130 ], "score": 1.0, "content": "J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization", "type": "text", "cross_page": true } ], "index": 3, "is_list_start_line": true }, { "bbox": [ 115, 127, 328, 142 ], "spans": [ { "bbox": [ 115, 127, 328, 142 ], "score": 1.0, "content": "algorithms. arXiv preprint arXiv:1707.06347, 2017.", "type": "text", "cross_page": true } ], "index": 4, "is_list_end_line": true }, { "bbox": [ 105, 147, 506, 161 ], "spans": [ { "bbox": [ 105, 147, 506, 161 ], "score": 1.0, "content": "D. Yarats, R. Fergus, A. Lazaric, and L. Pinto. Mastering visual continuous control: Improved", "type": "text", "cross_page": true } ], "index": 5, "is_list_start_line": true }, { "bbox": [ 115, 159, 441, 173 ], "spans": [ { "bbox": [ 115, 159, 441, 173 ], "score": 1.0, "content": "data-augmented reinforcement learning. arXiv preprint arXiv:2107.09645, 2021.", "type": "text", "cross_page": true } ], "index": 6, "is_list_end_line": true }, { "bbox": [ 103, 176, 506, 194 ], "spans": [ { "bbox": [ 103, 176, 506, 194 ], "score": 1.0, "content": "A. A. Rusu, M. Vecerik, T. Rothörl, N. Heess, R. Pascanu, and R. Hadsell. Sim-to-real robot learning", "type": "text", "cross_page": true } ], "index": 7, "is_list_start_line": true }, { "bbox": [ 115, 190, 279, 205 ], "spans": [ { "bbox": [ 115, 190, 279, 205 ], "score": 1.0, "content": "from pixels with progressive nets, 2016.", "type": "text", "cross_page": true } ], "index": 8, "is_list_end_line": true }, { "bbox": [ 105, 210, 506, 224 ], "spans": [ { "bbox": [ 105, 210, 506, 224 ], "score": 1.0, "content": "X. B. Peng, M. Andrychowicz, W. Zaremba, and P. Abbeel. Sim-to-real transfer of robotic control", "type": "text", "cross_page": true } ], "index": 9, "is_list_start_line": true }, { "bbox": [ 115, 222, 505, 234 ], "spans": [ { "bbox": [ 115, 222, 505, 234 ], "score": 1.0, "content": "with dynamics randomization. In 2018 IEEE International Conference on Robotics and Automation", "type": "text", "cross_page": true } ], "index": 10 }, { "bbox": [ 114, 234, 381, 248 ], "spans": [ { "bbox": [ 114, 234, 381, 248 ], "score": 1.0, "content": "(ICRA), pages 1–8, May 2018. doi:10.1109/ICRA.2018.8460528.", "type": "text", "cross_page": true } ], "index": 11, "is_list_end_line": true }, { "bbox": [ 103, 251, 506, 268 ], "spans": [ { "bbox": [ 103, 251, 506, 268 ], "score": 1.0, "content": "N. Rudin, D. Hoeller, P. Reist, and M. Hutter. Learning to walk in minutes using massively parallel", "type": "text", "cross_page": true } ], "index": 12, "is_list_start_line": true }, { "bbox": [ 115, 265, 260, 279 ], "spans": [ { "bbox": [ 115, 265, 260, 279 ], "score": 1.0, "content": "deep reinforcement learning, 2021.", "type": "text", "cross_page": true } ], "index": 13, "is_list_end_line": true }, { "bbox": [ 105, 285, 506, 299 ], "spans": [ { "bbox": [ 105, 285, 506, 299 ], "score": 1.0, "content": "J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomotion over", "type": "text", "cross_page": true } ], "index": 14, "is_list_start_line": true }, { "bbox": [ 114, 295, 507, 311 ], "spans": [ { "bbox": [ 114, 295, 507, 311 ], "score": 1.0, "content": "challenging terrain. Science Robotics, 5(47), oct 2020. doi:10.1126/scirobotics.abc5986. URL", "type": "text", "cross_page": true } ], "index": 15 }, { "bbox": [ 115, 308, 357, 322 ], "spans": [ { "bbox": [ 115, 308, 357, 322 ], "score": 1.0, "content": "https://doi.org/10.1126%2Fscirobotics.abc5986.", "type": "text", "cross_page": true } ], "index": 16, "is_list_end_line": true }, { "bbox": [ 105, 327, 506, 343 ], "spans": [ { "bbox": [ 105, 327, 506, 343 ], "score": 1.0, "content": "Y. Yang, K. Caluwaerts, A. Iscen, T. Zhang, J. Tan, and V. Sindhwani. Data efficient reinforcement", "type": "text", "cross_page": true } ], "index": 17, "is_list_start_line": true }, { "bbox": [ 115, 340, 251, 353 ], "spans": [ { "bbox": [ 115, 340, 251, 353 ], "score": 1.0, "content": "learning for legged robots, 2019.", "type": "text", "cross_page": true } ], "index": 18, "is_list_end_line": true }, { "bbox": [ 104, 358, 506, 374 ], "spans": [ { "bbox": [ 104, 358, 506, 374 ], "score": 1.0, "content": "S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta. R3m: A universal visual representation for", "type": "text", "cross_page": true } ], "index": 19, "is_list_start_line": true }, { "bbox": [ 115, 373, 222, 385 ], "spans": [ { "bbox": [ 115, 373, 222, 385 ], "score": 1.0, "content": "robot manipulation, 2022.", "type": "text", "cross_page": true } ], "index": 20, "is_list_end_line": true }, { "bbox": [ 106, 392, 506, 405 ], "spans": [ { "bbox": [ 106, 392, 506, 405 ], "score": 1.0, "content": "OpenAI, M. Andrychowicz, B. Baker, M. Chociej, R. Jozefowicz, B. McGrew, J. Pachocki, A. Petron,", "type": "text", "cross_page": true } ], "index": 21, "is_list_start_line": true }, { "bbox": [ 115, 403, 505, 417 ], "spans": [ { "bbox": [ 115, 403, 505, 417 ], "score": 1.0, "content": "M. Plappert, G. Powell, A. Ray, J. Schneider, S. Sidor, J. Tobin, P. Welinder, L. Weng, and", "type": "text", "cross_page": true } ], "index": 22 }, { "bbox": [ 115, 415, 366, 429 ], "spans": [ { "bbox": [ 115, 415, 366, 429 ], "score": 1.0, "content": "W. Zaremba. Learning dexterous in-hand manipulation, 2018.", "type": "text", "cross_page": true } ], "index": 23, "is_list_end_line": true }, { "bbox": [ 105, 434, 506, 448 ], "spans": [ { "bbox": [ 105, 434, 506, 448 ], "score": 1.0, "content": "A. Irpan, C. Harris, J. Ibarz, K. Rao, M. Khansari, and S. Levine. Rl-cyclegan: Improving deep-rl", "type": "text", "cross_page": true } ], "index": 24, "is_list_start_line": true }, { "bbox": [ 114, 446, 506, 461 ], "spans": [ { "bbox": [ 114, 446, 506, 461 ], "score": 1.0, "content": "robotics with simulation-to-real. In Proceedings of the IEEE Conference on Computer Vision and", "type": "text", "cross_page": true } ], "index": 25 }, { "bbox": [ 115, 459, 281, 471 ], "spans": [ { "bbox": [ 115, 459, 281, 471 ], "score": 1.0, "content": "Pattern Recognition (CVPR 2020), 2020.", "type": "text", "cross_page": true } ], "index": 26, "is_list_end_line": true }, { "bbox": [ 105, 478, 496, 490 ], "spans": [ { "bbox": [ 105, 478, 496, 490 ], "score": 1.0, "content": "A. Kumar, Z. Fu, D. Pathak, and J. Malik. Rma: Rapid motor adaptation for legged robots, 2021.", "type": "text", "cross_page": true } ], "index": 27, "is_list_start_line": true }, { "bbox": [ 105, 497, 506, 512 ], "spans": [ { "bbox": [ 105, 497, 506, 512 ], "score": 1.0, "content": "J. Siekmann, K. Green, J. Warila, A. Fern, and J. Hurst. Blind bipedal stair traversal via sim-to-real", "type": "text", "cross_page": true } ], "index": 28, "is_list_start_line": true }, { "bbox": [ 115, 511, 239, 523 ], "spans": [ { "bbox": [ 115, 511, 239, 523 ], "score": 1.0, "content": "reinforcement learning, 2021.", "type": "text", "cross_page": true } ], "index": 29, "is_list_end_line": true }, { "bbox": [ 105, 531, 505, 542 ], "spans": [ { "bbox": [ 105, 531, 505, 542 ], "score": 1.0, "content": "A. Escontrela, X. B. Peng, W. Yu, T. Zhang, A. Iscen, K. Goldberg, and P. Abbeel. Adversarial", "type": "text", "cross_page": true } ], "index": 30, "is_list_start_line": true }, { "bbox": [ 113, 541, 411, 554 ], "spans": [ { "bbox": [ 113, 541, 411, 554 ], "score": 1.0, "content": "motion priors make good substitutes for complex reward functions, 2022.", "type": "text", "cross_page": true } ], "index": 31, "is_list_end_line": true }, { "bbox": [ 105, 560, 506, 575 ], "spans": [ { "bbox": [ 105, 560, 506, 575 ], "score": 1.0, "content": "D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan,", "type": "text", "cross_page": true } ], "index": 32, "is_list_start_line": true }, { "bbox": [ 115, 572, 506, 587 ], "spans": [ { "bbox": [ 115, 572, 506, 587 ], "score": 1.0, "content": "V. Vanhoucke, and S. Levine. Qt-opt: Scalable deep reinforcement learning for vision-based robotic", "type": "text", "cross_page": true } ], "index": 33 }, { "bbox": [ 115, 585, 199, 597 ], "spans": [ { "bbox": [ 115, 585, 199, 597 ], "score": 1.0, "content": "manipulation, 2018.", "type": "text", "cross_page": true } ], "index": 34, "is_list_end_line": true }, { "bbox": [ 105, 604, 507, 618 ], "spans": [ { "bbox": [ 105, 604, 507, 618 ], "score": 1.0, "content": "S. Dasari, F. Ebert, S. Tian, S. Nair, B. Bucher, K. Schmeckpeper, S. Singh, S. Levine, and C. Finn.", "type": "text", "cross_page": true } ], "index": 35, "is_list_start_line": true }, { "bbox": [ 115, 617, 317, 629 ], "spans": [ { "bbox": [ 115, 617, 317, 629 ], "score": 1.0, "content": "Robonet: Large-scale multi-robot learning, 2019.", "type": "text", "cross_page": true } ], "index": 36, "is_list_end_line": true }, { "bbox": [ 105, 636, 506, 648 ], "spans": [ { "bbox": [ 105, 636, 506, 648 ], "score": 1.0, "content": "D. Kalashnikov, J. Varley, Y. Chebotar, B. Swanson, R. Jonschkowski, C. Finn, S. Levine, and", "type": "text", "cross_page": true } ], "index": 37, "is_list_start_line": true }, { "bbox": [ 115, 648, 481, 662 ], "spans": [ { "bbox": [ 115, 648, 481, 662 ], "score": 1.0, "content": "K. Hausman. Mt-opt: Continuous multi-task robotic reinforcement learning at scale, 2021.", "type": "text", "cross_page": true } ], "index": 38, "is_list_end_line": true }, { "bbox": [ 105, 667, 507, 681 ], "spans": [ { "bbox": [ 105, 667, 507, 681 ], "score": 1.0, "content": "F. Ebert, Y. Yang, K. Schmeckpeper, B. Bucher, G. Georgakis, K. Daniilidis, C. Finn, and S. Levine.", "type": "text", "cross_page": true } ], "index": 39, "is_list_start_line": true }, { "bbox": [ 115, 679, 470, 693 ], "spans": [ { "bbox": [ 115, 679, 470, 693 ], "score": 1.0, "content": "Bridge data: Boosting generalization of robotic skills with cross-domain datasets, 2021.", "type": "text", "cross_page": true } ], "index": 40, "is_list_end_line": true }, { "bbox": [ 103, 698, 507, 713 ], "spans": [ { "bbox": [ 103, 698, 507, 713 ], "score": 1.0, "content": "A. Xie, F. Ebert, S. Levine, and C. Finn. Improvisation through physical understanding: Using novel", "type": "text", "cross_page": true } ], "index": 41, "is_list_start_line": true }, { "bbox": [ 115, 711, 430, 725 ], "spans": [ { "bbox": [ 115, 711, 430, 725 ], "score": 1.0, "content": "objects as tools with visual foresight. arXiv preprint arXiv:1904.05538, 2019.", "type": "text", "cross_page": true } ], "index": 42, "is_list_end_line": true }, { "bbox": [ 105, 72, 506, 86 ], "spans": [ { "bbox": [ 105, 72, 506, 86 ], "score": 1.0, "content": "G. Schoettler, A. Nair, J. Luo, S. Bahl, J. A. Ojea, E. Solowjow, and S. Levine. Deep reinforcement", "type": "text", "cross_page": true } ], "index": 0, "is_list_start_line": true }, { "bbox": [ 115, 84, 446, 98 ], "spans": [ { "bbox": [ 115, 84, 446, 98 ], "score": 1.0, "content": "learning for industrial insertion tasks with visual inputs and natural rewards, 2019.", "type": "text", "cross_page": true } ], "index": 1, "is_list_end_line": true }, { "bbox": [ 105, 105, 506, 118 ], "spans": [ { "bbox": [ 105, 105, 506, 118 ], "score": 1.0, "content": "S. James, K. Wada, T. Laidlow, and A. J. Davison. Coarse-to-fine q-attention: Efficient learning for", "type": "text", "cross_page": true } ], "index": 2, "is_list_start_line": true }, { "bbox": [ 115, 118, 326, 129 ], "spans": [ { "bbox": [ 115, 118, 326, 129 ], "score": 1.0, "content": "visual robotic manipulation via discretisation, 2021.", "type": "text", "cross_page": true } ], "index": 3, "is_list_end_line": true }, { "bbox": [ 103, 136, 507, 152 ], "spans": [ { "bbox": [ 103, 136, 507, 152 ], "score": 1.0, "content": "D. Shah and S. Levine. Viking: Vision-based kilometer-scale navigation with geographic hints, 2022.", "type": "text", "cross_page": true } ], "index": 4, "is_list_start_line": true }, { "bbox": [ 105, 158, 506, 171 ], "spans": [ { "bbox": [ 105, 158, 506, 171 ], "score": 1.0, "content": "S. Bohez, S. Tunyasuvunakool, P. Brakel, F. Sadeghi, L. Hasenclever, Y. Tassa, E. Parisotto,", "type": "text", "cross_page": true } ], "index": 5, "is_list_start_line": true }, { "bbox": [ 114, 169, 507, 183 ], "spans": [ { "bbox": [ 114, 169, 507, 183 ], "score": 1.0, "content": "J. Humplik, T. Haarnoja, R. Hafner, M. Wulfmeier, M. Neunert, B. Moran, N. Siegel, A. Huber,", "type": "text", "cross_page": true } ], "index": 6 }, { "bbox": [ 113, 179, 508, 196 ], "spans": [ { "bbox": [ 113, 179, 508, 196 ], "score": 1.0, "content": "F. Romano, N. Batchelor, F. Casarini, J. Merel, R. Hadsell, and N. Heess. Imitate and repurpose:", "type": "text", "cross_page": true } ], "index": 7 }, { "bbox": [ 115, 193, 447, 207 ], "spans": [ { "bbox": [ 115, 193, 447, 207 ], "score": 1.0, "content": "Learning reusable robot movement skills from human and animal behaviors, 2022.", "type": "text", "cross_page": true } ], "index": 8, "is_list_end_line": true }, { "bbox": [ 105, 212, 505, 228 ], "spans": [ { "bbox": [ 105, 212, 505, 228 ], "score": 1.0, "content": "A. Sivakumar, K. Shaw, and D. Pathak. Robotic telekinesis: Learning a robotic hand imitator by", "type": "text", "cross_page": true } ], "index": 9, "is_list_start_line": true }, { "bbox": [ 115, 225, 264, 238 ], "spans": [ { "bbox": [ 115, 225, 264, 238 ], "score": 1.0, "content": "watching humans on youtube, 2022.", "type": "text", "cross_page": true } ], "index": 10, "is_list_end_line": true }, { "bbox": [ 105, 245, 506, 259 ], "spans": [ { "bbox": [ 105, 245, 506, 259 ], "score": 1.0, "content": "C. Finn, I. Goodfellow, and S. Levine. Unsupervised learning for physical interaction through video", "type": "text", "cross_page": true } ], "index": 11, "is_list_start_line": true }, { "bbox": [ 114, 257, 464, 271 ], "spans": [ { "bbox": [ 114, 257, 464, 271 ], "score": 1.0, "content": "prediction. In Advances in neural information processing systems, pages 64–72, 2016.", "type": "text", "cross_page": true } ], "index": 12, "is_list_end_line": true }, { "bbox": [ 104, 276, 506, 292 ], "spans": [ { "bbox": [ 104, 276, 506, 292 ], "score": 1.0, "content": "C. Finn and S. Levine. Deep visual foresight for planning robot motion. In Robotics and Automation", "type": "text", "cross_page": true } ], "index": 13, "is_list_start_line": true }, { "bbox": [ 114, 289, 441, 304 ], "spans": [ { "bbox": [ 114, 289, 441, 304 ], "score": 1.0, "content": "(ICRA), 2017 IEEE International Conference on, pages 2786–2793. IEEE, 2017.", "type": "text", "cross_page": true } ], "index": 14, "is_list_end_line": true }, { "bbox": [ 105, 308, 506, 325 ], "spans": [ { "bbox": [ 105, 308, 506, 325 ], "score": 1.0, "content": "Y. Yang, T. Zhang, E. Coumans, J. Tan, and B. Boots. Fast and efficient locomotion via learned gait", "type": "text", "cross_page": true } ], "index": 15, "is_list_start_line": true }, { "bbox": [ 115, 322, 425, 335 ], "spans": [ { "bbox": [ 115, 322, 425, 335 ], "score": 1.0, "content": "transitions. In Conference on Robot Learning, pages 773–783. PMLR, 2022.", "type": "text", "cross_page": true } ], "index": 16, "is_list_end_line": true }, { "bbox": [ 105, 342, 506, 356 ], "spans": [ { "bbox": [ 105, 342, 506, 356 ], "score": 1.0, "content": "S. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human", "type": "text", "cross_page": true } ], "index": 17, "is_list_start_line": true }, { "bbox": [ 115, 353, 308, 369 ], "spans": [ { "bbox": [ 115, 353, 308, 369 ], "score": 1.0, "content": "effort. arXiv preprint arXiv:2002.08550, 2020.", "type": "text", "cross_page": true } ], "index": 18, "is_list_end_line": true }, { "bbox": [ 106, 375, 505, 387 ], "spans": [ { "bbox": [ 106, 375, 505, 387 ], "score": 1.0, "content": "M. Zhang, S. Vikram, L. Smith, P. Abbeel, M. Johnson, and S. Levine. Solar: deep structured", "type": "text", "cross_page": true } ], "index": 19, "is_list_start_line": true }, { "bbox": [ 115, 387, 506, 400 ], "spans": [ { "bbox": [ 115, 387, 506, 400 ], "score": 1.0, "content": "representations for model-based reinforcement learning. In International Conference on Machine", "type": "text", "cross_page": true } ], "index": 20 }, { "bbox": [ 114, 398, 182, 411 ], "spans": [ { "bbox": [ 114, 398, 182, 411 ], "score": 1.0, "content": "Learning, 2019.", "type": "text", "cross_page": true } ], "index": 21, "is_list_end_line": true }, { "bbox": [ 105, 418, 506, 432 ], "spans": [ { "bbox": [ 105, 418, 506, 432 ], "score": 1.0, "content": "A. Nagabandi, K. Konoglie, S. Levine, and V. Kumar. Deep dynamics models for learning dexterous", "type": "text", "cross_page": true } ], "index": 22, "is_list_start_line": true }, { "bbox": [ 115, 430, 199, 444 ], "spans": [ { "bbox": [ 115, 430, 199, 444 ], "score": 1.0, "content": "manipulation, 2019.", "type": "text", "cross_page": true } ], "index": 23, "is_list_end_line": true }, { "bbox": [ 105, 450, 505, 464 ], "spans": [ { "bbox": [ 105, 450, 505, 464 ], "score": 1.0, "content": "G. I. Parisi, R. Kemker, J. L. Part, C. Kanan, and S. Wermter. Continual lifelong learning with neural", "type": "text", "cross_page": true } ], "index": 24, "is_list_start_line": true }, { "bbox": [ 115, 461, 419, 475 ], "spans": [ { "bbox": [ 115, 461, 419, 475 ], "score": 1.0, "content": "networks: A review. Neural Networks, 113:54–71, 2019. ISSN 0893-6080.", "type": "text", "cross_page": true } ], "index": 25, "is_list_end_line": true }, { "bbox": [ 105, 482, 506, 496 ], "spans": [ { "bbox": [ 105, 482, 506, 496 ], "score": 1.0, "content": "T. Miki, J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning robust perceptive", "type": "text", "cross_page": true } ], "index": 26, "is_list_start_line": true }, { "bbox": [ 114, 493, 506, 508 ], "spans": [ { "bbox": [ 114, 493, 506, 508 ], "score": 1.0, "content": "locomotion for quadrupedal robots in the wild. Science Robotics, 7(62), jan 2022. doi:10.1126/", "type": "text", "cross_page": true } ], "index": 27 }, { "bbox": [ 115, 506, 201, 519 ], "spans": [ { "bbox": [ 115, 506, 201, 519 ], "score": 1.0, "content": "scirobotics.abk2822.", "type": "text", "cross_page": true } ], "index": 28, "is_list_end_line": true }, { "bbox": [ 103, 524, 506, 542 ], "spans": [ { "bbox": [ 103, 524, 506, 542 ], "score": 1.0, "content": "L. Smith, J. C. Kew, X. B. Peng, S. Ha, J. Tan, and S. Levine. Legged robots that keep on learning:", "type": "text", "cross_page": true } ], "index": 29, "is_list_start_line": true }, { "bbox": [ 115, 538, 342, 552 ], "spans": [ { "bbox": [ 115, 538, 342, 552 ], "score": 1.0, "content": "Fine-tuning locomotion policies in the real world, 2021.", "type": "text", "cross_page": true } ], "index": 30, "is_list_end_line": true }, { "bbox": [ 105, 557, 506, 573 ], "spans": [ { "bbox": [ 105, 557, 506, 573 ], "score": 1.0, "content": "T.-Y. Yang, T. Zhang, L. Luu, S. Ha, J. Tan, and W. Yu. Safe reinforcement learning for legged", "type": "text", "cross_page": true } ], "index": 31, "is_list_start_line": true }, { "bbox": [ 115, 570, 387, 584 ], "spans": [ { "bbox": [ 115, 570, 387, 584 ], "score": 1.0, "content": "locomotion, 2022. URL https://arxiv.org/abs/2203.02638.", "type": "text", "cross_page": true } ], "index": 32, "is_list_end_line": true }, { "bbox": [ 103, 589, 506, 606 ], "spans": [ { "bbox": [ 103, 589, 506, 606 ], "score": 1.0, "content": "S. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human", "type": "text", "cross_page": true } ], "index": 33, "is_list_start_line": true }, { "bbox": [ 115, 603, 363, 616 ], "spans": [ { "bbox": [ 115, 603, 363, 616 ], "score": 1.0, "content": "effort, 2020. URL https://arxiv.org/abs/2002.08550.", "type": "text", "cross_page": true } ], "index": 34, "is_list_end_line": true }, { "bbox": [ 105, 623, 506, 636 ], "spans": [ { "bbox": [ 105, 623, 506, 636 ], "score": 1.0, "content": "L. Smith, I. Kostrikov, and S. Levine. A walk in the park: Learning to walk in 20 minutes with", "type": "text", "cross_page": true } ], "index": 35, "is_list_start_line": true }, { "bbox": [ 115, 635, 478, 649 ], "spans": [ { "bbox": [ 115, 635, 478, 649 ], "score": 1.0, "content": "model-free reinforcement learning, 2022. URL https://arxiv.org/abs/2208.07860.", "type": "text", "cross_page": true } ], "index": 36, "is_list_end_line": true }, { "bbox": [ 105, 655, 506, 669 ], "spans": [ { "bbox": [ 105, 655, 506, 669 ], "score": 1.0, "content": "S. Levine, P. Pastor, A. Krizhevsky, J. Ibarz, and D. Quillen. Learning hand-eye coordination for", "type": "text", "cross_page": true } ], "index": 37, "is_list_start_line": true }, { "bbox": [ 114, 666, 507, 682 ], "spans": [ { "bbox": [ 114, 666, 507, 682 ], "score": 1.0, "content": "robotic grasping with deep learning and large-scale data collection. The International Journal of", "type": "text", "cross_page": true } ], "index": 38 }, { "bbox": [ 114, 678, 294, 692 ], "spans": [ { "bbox": [ 114, 678, 294, 692 ], "score": 1.0, "content": "Robotics Research, 37(4-5):421–436, 2018.", "type": "text", "cross_page": true } ], "index": 39, "is_list_end_line": true }, { "bbox": [ 105, 699, 506, 713 ], "spans": [ { "bbox": [ 105, 699, 506, 713 ], "score": 1.0, "content": "L. Pinto and A. Gupta. Supersizing self-supervision: Learning to grasp from 50k tries and 700 robot", "type": "text", "cross_page": true } ], "index": 40, "is_list_start_line": true }, { "bbox": [ 114, 711, 169, 724 ], "spans": [ { "bbox": [ 114, 711, 169, 724 ], "score": 1.0, "content": "hours, 2015.", "type": "text", "cross_page": true } ], "index": 41, "is_list_end_line": true }, { "bbox": [ 105, 73, 505, 87 ], "spans": [ { "bbox": [ 105, 73, 505, 87 ], "score": 1.0, "content": "H. Ha and S. Song. Flingbot: The unreasonable effectiveness of dynamic manipulation for cloth", "type": "text", "cross_page": true } ], "index": 0, "is_list_start_line": true }, { "bbox": [ 115, 84, 315, 98 ], "spans": [ { "bbox": [ 115, 84, 315, 98 ], "score": 1.0, "content": "unfolding. Conference on Robot Learning, 2021.", "type": "text", "cross_page": true } ], "index": 1, "is_list_end_line": true }, { "bbox": [ 104, 103, 505, 119 ], "spans": [ { "bbox": [ 104, 103, 505, 119 ], "score": 1.0, "content": "S. James and A. J. Davison. Q-attention: Enabling efficient learning for vision-based robotic", "type": "text", "cross_page": true } ], "index": 2, "is_list_start_line": true }, { "bbox": [ 115, 116, 200, 129 ], "spans": [ { "bbox": [ 115, 116, 200, 129 ], "score": 1.0, "content": "manipulation, 2021.", "type": "text", "cross_page": true } ], "index": 3, "is_list_end_line": true }, { "bbox": [ 104, 134, 505, 150 ], "spans": [ { "bbox": [ 104, 134, 505, 150 ], "score": 1.0, "content": "E. Tzeng, C. Devin, J. Hoffman, C. Finn, P. Abbeel, S. Levine, K. Saenko, and T. Darrell. Adapting", "type": "text", "cross_page": true } ], "index": 4, "is_list_start_line": true }, { "bbox": [ 115, 147, 402, 162 ], "spans": [ { "bbox": [ 115, 147, 402, 162 ], "score": 1.0, "content": "deep visuomotor representations with weak pairwise constraints, 2015.", "type": "text", "cross_page": true } ], "index": 5, "is_list_end_line": true }, { "bbox": [ 104, 166, 506, 181 ], "spans": [ { "bbox": [ 104, 166, 506, 181 ], "score": 1.0, "content": "I. Akkaya, M. Andrychowicz, M. Chociej, M. Litwin, B. McGrew, A. Petron, A. Paino, M. Plappert,", "type": "text", "cross_page": true } ], "index": 6, "is_list_start_line": true }, { "bbox": [ 115, 178, 507, 192 ], "spans": [ { "bbox": [ 115, 178, 507, 192 ], "score": 1.0, "content": "G. Powell, R. Ribas, et al. Solving rubik’s cube with a robot hand. arXiv preprint arXiv:1910.07113,", "type": "text", "cross_page": true } ], "index": 7 }, { "bbox": [ 115, 190, 142, 203 ], "spans": [ { "bbox": [ 115, 190, 142, 203 ], "score": 1.0, "content": "2019.", "type": "text", "cross_page": true } ], "index": 8, "is_list_end_line": true }, { "bbox": [ 105, 208, 505, 223 ], "spans": [ { "bbox": [ 105, 208, 505, 223 ], "score": 1.0, "content": "M. P. Deisenroth, G. Neumann, J. Peters, et al. A survey on policy search for robotics. Foundations", "type": "text", "cross_page": true } ], "index": 9, "is_list_start_line": true }, { "bbox": [ 115, 221, 298, 235 ], "spans": [ { "bbox": [ 115, 221, 298, 235 ], "score": 1.0, "content": "and Trends in Robotics, 2(1–2):1–142, 2013.", "type": "text", "cross_page": true } ], "index": 10, "is_list_end_line": true }, { "bbox": [ 105, 241, 506, 255 ], "spans": [ { "bbox": [ 105, 241, 506, 255 ], "score": 1.0, "content": "K. Chua, R. Calandra, R. McAllister, and S. Levine. Deep reinforcement learning in a handful of", "type": "text", "cross_page": true } ], "index": 11, "is_list_start_line": true }, { "bbox": [ 114, 252, 507, 268 ], "spans": [ { "bbox": [ 114, 252, 507, 268 ], "score": 1.0, "content": "trials using probabilistic dynamics models. In Advances in Neural Information Processing Systems,", "type": "text", "cross_page": true } ], "index": 12 }, { "bbox": [ 114, 264, 218, 278 ], "spans": [ { "bbox": [ 114, 264, 218, 278 ], "score": 1.0, "content": "pages 4754–4765, 2018.", "type": "text", "cross_page": true } ], "index": 13, "is_list_end_line": true }, { "bbox": [ 104, 282, 506, 299 ], "spans": [ { "bbox": [ 104, 282, 506, 299 ], "score": 1.0, "content": "A. Nagabandi, G. Yang, T. Asmar, R. Pandya, G. Kahn, S. Levine, and R. S. Fearing. Learning", "type": "text", "cross_page": true } ], "index": 14, "is_list_start_line": true }, { "bbox": [ 114, 295, 487, 310 ], "spans": [ { "bbox": [ 114, 295, 487, 310 ], "score": 1.0, "content": "image-conditioned dynamics models for control of under-actuated legged millirobots, 2017.", "type": "text", "cross_page": true } ], "index": 15, "is_list_end_line": true }, { "bbox": [ 104, 314, 505, 329 ], "spans": [ { "bbox": [ 104, 314, 505, 329 ], "score": 1.0, "content": "P. Becker-Ehmck, M. Karl, J. Peters, and P. van der Smagt. Learning to fly via deep model-based", "type": "text", "cross_page": true } ], "index": 16, "is_list_start_line": true }, { "bbox": [ 114, 327, 376, 340 ], "spans": [ { "bbox": [ 114, 327, 376, 340 ], "score": 1.0, "content": "reinforcement learning. arXiv preprint arXiv:2003.08876, 2020.", "type": "text", "cross_page": true } ], "index": 17, "is_list_end_line": true }, { "bbox": [ 104, 345, 505, 360 ], "spans": [ { "bbox": [ 104, 345, 505, 360 ], "score": 1.0, "content": "F. Deng, I. Jang, and S. Ahn. Dreamerpro: Reconstruction-free model-based reinforcement learning", "type": "text", "cross_page": true } ], "index": 18, "is_list_start_line": true }, { "bbox": [ 115, 358, 416, 372 ], "spans": [ { "bbox": [ 115, 358, 416, 372 ], "score": 1.0, "content": "with prototypical representations. arXiv preprint arXiv:2110.14565, 2021.", "type": "text", "cross_page": true } ], "index": 19, "is_list_end_line": true }, { "bbox": [ 105, 377, 505, 392 ], "spans": [ { "bbox": [ 105, 377, 505, 392 ], "score": 1.0, "content": "M. Okada and T. Taniguchi. Dreaming: Model-based reinforcement learning by latent imagination", "type": "text", "cross_page": true } ], "index": 20, "is_list_start_line": true }, { "bbox": [ 114, 389, 506, 403 ], "spans": [ { "bbox": [ 114, 389, 506, 403 ], "score": 1.0, "content": "without reconstruction. In 2021 IEEE International Conference on Robotics and Automation", "type": "text", "cross_page": true } ], "index": 21 }, { "bbox": [ 115, 402, 275, 414 ], "spans": [ { "bbox": [ 115, 402, 275, 414 ], "score": 1.0, "content": "(ICRA), pages 4209–4215. IEEE, 2021.", "type": "text", "cross_page": true } ], "index": 22, "is_list_end_line": true }, { "bbox": [ 105, 420, 505, 435 ], "spans": [ { "bbox": [ 105, 420, 505, 435 ], "score": 1.0, "content": "H. Bharadhwaj, M. Babaeizadeh, D. Erhan, and S. Levine. Information prioritization through", "type": "text", "cross_page": true } ], "index": 23, "is_list_start_line": true }, { "bbox": [ 115, 433, 439, 446 ], "spans": [ { "bbox": [ 115, 433, 439, 446 ], "score": 1.0, "content": "empowerment in visual model-based rl. arXiv preprint arXiv:2204.08585, 2022.", "type": "text", "cross_page": true } ], "index": 24, "is_list_end_line": true }, { "bbox": [ 105, 452, 505, 467 ], "spans": [ { "bbox": [ 105, 452, 505, 467 ], "score": 1.0, "content": "K. Paster, L. E. McKinney, S. A. McIlraith, and J. Ba. Blast: Latent dynamics models from", "type": "text", "cross_page": true } ], "index": 25, "is_list_start_line": true }, { "bbox": [ 115, 464, 355, 478 ], "spans": [ { "bbox": [ 115, 464, 355, 478 ], "score": 1.0, "content": "bootstrapping. In Deep RL Workshop NeurIPS 2021, 2021.", "type": "text", "cross_page": true } ], "index": 26, "is_list_end_line": true }, { "bbox": [ 105, 482, 506, 499 ], "spans": [ { "bbox": [ 105, 482, 506, 499 ], "score": 1.0, "content": "K. Hsu, M. J. Kim, R. Rafailov, J. Wu, and C. Finn. Vision-based manipulators need to also see from", "type": "text", "cross_page": true } ], "index": 27, "is_list_start_line": true }, { "bbox": [ 115, 495, 387, 509 ], "spans": [ { "bbox": [ 115, 495, 387, 509 ], "score": 1.0, "content": "their hands, 2022. URL https://arxiv.org/abs/2203.12677.", "type": "text", "cross_page": true } ], "index": 28, "is_list_end_line": true } ], "index": 20, "bbox_fs": [ 104, 70, 507, 724 ] } ] }, { "preproc_blocks": [ { "type": "text", "bbox": [ 103, 44, 507, 729 ], "lines": [ { "bbox": [ 105, 73, 506, 86 ], "spans": [ { "bbox": [ 105, 73, 506, 86 ], "score": 1.0, "content": "J. Schrittwieser, I. Antonoglou, T. Hubert, K. Simonyan, L. Sifre, S. Schmitt, A. Guez, E. Lockhart,", "type": "text" } ], "index": 0 }, { "bbox": [ 115, 83, 506, 97 ], "spans": [ { "bbox": [ 115, 83, 506, 97 ], "score": 1.0, "content": "D. Hassabis, T. Graepel, et al. Mastering atari, go, chess and shogi by planning with a learned", "type": "text" } ], "index": 1 }, { "bbox": [ 114, 96, 310, 109 ], "spans": [ { "bbox": [ 114, 96, 310, 109 ], "score": 1.0, "content": "model. arXiv preprint arXiv:1911.08265, 2019.", "type": "text" } ], "index": 2 }, { "bbox": [ 105, 115, 506, 130 ], "spans": [ { "bbox": [ 105, 115, 506, 130 ], "score": 1.0, "content": "J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization", "type": "text" } ], "index": 3 }, { "bbox": [ 115, 127, 328, 142 ], "spans": [ { "bbox": [ 115, 127, 328, 142 ], "score": 1.0, "content": "algorithms. arXiv preprint arXiv:1707.06347, 2017.", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 147, 506, 161 ], "spans": [ { "bbox": [ 105, 147, 506, 161 ], "score": 1.0, "content": "D. Yarats, R. Fergus, A. Lazaric, and L. Pinto. Mastering visual continuous control: Improved", "type": "text" } ], "index": 5 }, { "bbox": [ 115, 159, 441, 173 ], "spans": [ { "bbox": [ 115, 159, 441, 173 ], "score": 1.0, "content": "data-augmented reinforcement learning. arXiv preprint arXiv:2107.09645, 2021.", "type": "text" } ], "index": 6 }, { "bbox": [ 103, 176, 506, 194 ], "spans": [ { "bbox": [ 103, 176, 506, 194 ], "score": 1.0, "content": "A. A. Rusu, M. Vecerik, T. Rothörl, N. Heess, R. Pascanu, and R. Hadsell. Sim-to-real robot learning", "type": "text" } ], "index": 7 }, { "bbox": [ 115, 190, 279, 205 ], "spans": [ { "bbox": [ 115, 190, 279, 205 ], "score": 1.0, "content": "from pixels with progressive nets, 2016.", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 210, 506, 224 ], "spans": [ { "bbox": [ 105, 210, 506, 224 ], "score": 1.0, "content": "X. B. Peng, M. Andrychowicz, W. Zaremba, and P. Abbeel. Sim-to-real transfer of robotic control", "type": "text" } ], "index": 9 }, { "bbox": [ 115, 222, 505, 234 ], "spans": [ { "bbox": [ 115, 222, 505, 234 ], "score": 1.0, "content": "with dynamics randomization. In 2018 IEEE International Conference on Robotics and Automation", "type": "text" } ], "index": 10 }, { "bbox": [ 114, 234, 381, 248 ], "spans": [ { "bbox": [ 114, 234, 381, 248 ], "score": 1.0, "content": "(ICRA), pages 1–8, May 2018. doi:10.1109/ICRA.2018.8460528.", "type": "text" } ], "index": 11 }, { "bbox": [ 103, 251, 506, 268 ], "spans": [ { "bbox": [ 103, 251, 506, 268 ], "score": 1.0, "content": "N. Rudin, D. Hoeller, P. Reist, and M. Hutter. Learning to walk in minutes using massively parallel", "type": "text" } ], "index": 12 }, { "bbox": [ 115, 265, 260, 279 ], "spans": [ { "bbox": [ 115, 265, 260, 279 ], "score": 1.0, "content": "deep reinforcement learning, 2021.", "type": "text" } ], "index": 13 }, { "bbox": [ 105, 285, 506, 299 ], "spans": [ { "bbox": [ 105, 285, 506, 299 ], "score": 1.0, "content": "J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomotion over", "type": "text" } ], "index": 14 }, { "bbox": [ 114, 295, 507, 311 ], "spans": [ { "bbox": [ 114, 295, 507, 311 ], "score": 1.0, "content": "challenging terrain. Science Robotics, 5(47), oct 2020. doi:10.1126/scirobotics.abc5986. URL", "type": "text" } ], "index": 15 }, { "bbox": [ 115, 308, 357, 322 ], "spans": [ { "bbox": [ 115, 308, 357, 322 ], "score": 1.0, "content": "https://doi.org/10.1126%2Fscirobotics.abc5986.", "type": "text" } ], "index": 16 }, { "bbox": [ 105, 327, 506, 343 ], "spans": [ { "bbox": [ 105, 327, 506, 343 ], "score": 1.0, "content": "Y. Yang, K. Caluwaerts, A. Iscen, T. Zhang, J. Tan, and V. Sindhwani. Data efficient reinforcement", "type": "text" } ], "index": 17 }, { "bbox": [ 115, 340, 251, 353 ], "spans": [ { "bbox": [ 115, 340, 251, 353 ], "score": 1.0, "content": "learning for legged robots, 2019.", "type": "text" } ], "index": 18 }, { "bbox": [ 104, 358, 506, 374 ], "spans": [ { "bbox": [ 104, 358, 506, 374 ], "score": 1.0, "content": "S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta. R3m: A universal visual representation for", "type": "text" } ], "index": 19 }, { "bbox": [ 115, 373, 222, 385 ], "spans": [ { "bbox": [ 115, 373, 222, 385 ], "score": 1.0, "content": "robot manipulation, 2022.", "type": "text" } ], "index": 20 }, { "bbox": [ 106, 392, 506, 405 ], "spans": [ { "bbox": [ 106, 392, 506, 405 ], "score": 1.0, "content": "OpenAI, M. Andrychowicz, B. Baker, M. Chociej, R. Jozefowicz, B. McGrew, J. Pachocki, A. Petron,", "type": "text" } ], "index": 21 }, { "bbox": [ 115, 403, 505, 417 ], "spans": [ { "bbox": [ 115, 403, 505, 417 ], "score": 1.0, "content": "M. Plappert, G. Powell, A. Ray, J. Schneider, S. Sidor, J. Tobin, P. Welinder, L. Weng, and", "type": "text" } ], "index": 22 }, { "bbox": [ 115, 415, 366, 429 ], "spans": [ { "bbox": [ 115, 415, 366, 429 ], "score": 1.0, "content": "W. Zaremba. Learning dexterous in-hand manipulation, 2018.", "type": "text" } ], "index": 23 }, { "bbox": [ 105, 434, 506, 448 ], "spans": [ { "bbox": [ 105, 434, 506, 448 ], "score": 1.0, "content": "A. Irpan, C. Harris, J. Ibarz, K. Rao, M. Khansari, and S. Levine. Rl-cyclegan: Improving deep-rl", "type": "text" } ], "index": 24 }, { "bbox": [ 114, 446, 506, 461 ], "spans": [ { "bbox": [ 114, 446, 506, 461 ], "score": 1.0, "content": "robotics with simulation-to-real. In Proceedings of the IEEE Conference on Computer Vision and", "type": "text" } ], "index": 25 }, { "bbox": [ 115, 459, 281, 471 ], "spans": [ { "bbox": [ 115, 459, 281, 471 ], "score": 1.0, "content": "Pattern Recognition (CVPR 2020), 2020.", "type": "text" } ], "index": 26 }, { "bbox": [ 105, 478, 496, 490 ], "spans": [ { "bbox": [ 105, 478, 496, 490 ], "score": 1.0, "content": "A. Kumar, Z. Fu, D. Pathak, and J. Malik. Rma: Rapid motor adaptation for legged robots, 2021.", "type": "text" } ], "index": 27 }, { "bbox": [ 105, 497, 506, 512 ], "spans": [ { "bbox": [ 105, 497, 506, 512 ], "score": 1.0, "content": "J. Siekmann, K. Green, J. Warila, A. Fern, and J. Hurst. Blind bipedal stair traversal via sim-to-real", "type": "text" } ], "index": 28 }, { "bbox": [ 115, 511, 239, 523 ], "spans": [ { "bbox": [ 115, 511, 239, 523 ], "score": 1.0, "content": "reinforcement learning, 2021.", "type": "text" } ], "index": 29 }, { "bbox": [ 105, 531, 505, 542 ], "spans": [ { "bbox": [ 105, 531, 505, 542 ], "score": 1.0, "content": "A. Escontrela, X. B. Peng, W. Yu, T. Zhang, A. Iscen, K. Goldberg, and P. Abbeel. Adversarial", "type": "text" } ], "index": 30 }, { "bbox": [ 113, 541, 411, 554 ], "spans": [ { "bbox": [ 113, 541, 411, 554 ], "score": 1.0, "content": "motion priors make good substitutes for complex reward functions, 2022.", "type": "text" } ], "index": 31 }, { "bbox": [ 105, 560, 506, 575 ], "spans": [ { "bbox": [ 105, 560, 506, 575 ], "score": 1.0, "content": "D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan,", "type": "text" } ], "index": 32 }, { "bbox": [ 115, 572, 506, 587 ], "spans": [ { "bbox": [ 115, 572, 506, 587 ], "score": 1.0, "content": "V. Vanhoucke, and S. Levine. Qt-opt: Scalable deep reinforcement learning for vision-based robotic", "type": "text" } ], "index": 33 }, { "bbox": [ 115, 585, 199, 597 ], "spans": [ { "bbox": [ 115, 585, 199, 597 ], "score": 1.0, "content": "manipulation, 2018.", "type": "text" } ], "index": 34 }, { "bbox": [ 105, 604, 507, 618 ], "spans": [ { "bbox": [ 105, 604, 507, 618 ], "score": 1.0, "content": "S. Dasari, F. Ebert, S. Tian, S. Nair, B. Bucher, K. Schmeckpeper, S. Singh, S. Levine, and C. Finn.", "type": "text" } ], "index": 35 }, { "bbox": [ 115, 617, 317, 629 ], "spans": [ { "bbox": [ 115, 617, 317, 629 ], "score": 1.0, "content": "Robonet: Large-scale multi-robot learning, 2019.", "type": "text" } ], "index": 36 }, { "bbox": [ 105, 636, 506, 648 ], "spans": [ { "bbox": [ 105, 636, 506, 648 ], "score": 1.0, "content": "D. Kalashnikov, J. Varley, Y. Chebotar, B. Swanson, R. Jonschkowski, C. Finn, S. Levine, and", "type": "text" } ], "index": 37 }, { "bbox": [ 115, 648, 481, 662 ], "spans": [ { "bbox": [ 115, 648, 481, 662 ], "score": 1.0, "content": "K. Hausman. Mt-opt: Continuous multi-task robotic reinforcement learning at scale, 2021.", "type": "text" } ], "index": 38 }, { "bbox": [ 105, 667, 507, 681 ], "spans": [ { "bbox": [ 105, 667, 507, 681 ], "score": 1.0, "content": "F. Ebert, Y. Yang, K. Schmeckpeper, B. Bucher, G. Georgakis, K. Daniilidis, C. Finn, and S. Levine.", "type": "text" } ], "index": 39 }, { "bbox": [ 115, 679, 470, 693 ], "spans": [ { "bbox": [ 115, 679, 470, 693 ], "score": 1.0, "content": "Bridge data: Boosting generalization of robotic skills with cross-domain datasets, 2021.", "type": "text" } ], "index": 40 }, { "bbox": [ 103, 698, 507, 713 ], "spans": [ { "bbox": [ 103, 698, 507, 713 ], "score": 1.0, "content": "A. Xie, F. Ebert, S. Levine, and C. Finn. Improvisation through physical understanding: Using novel", "type": "text" } ], "index": 41 }, { "bbox": [ 115, 711, 430, 725 ], "spans": [ { "bbox": [ 115, 711, 430, 725 ], "score": 1.0, "content": "objects as tools with visual foresight. arXiv preprint arXiv:1904.05538, 2019.", "type": "text" } ], "index": 42 } ], "index": 21 } ], "page_idx": 9, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 300, 741, 311, 751 ], "lines": [ { "bbox": [ 299, 740, 313, 754 ], "spans": [ { "bbox": [ 299, 740, 313, 754 ], "score": 1.0, "content": "10", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "list", "bbox": [ 103, 44, 507, 729 ], "lines": [], "index": 21, "bbox_fs": [ 103, 73, 507, 725 ], "lines_deleted": true } ] }, { "preproc_blocks": [ { "type": "text", "bbox": [ 104, 72, 507, 726 ], "lines": [ { "bbox": [ 105, 72, 506, 86 ], "spans": [ { "bbox": [ 105, 72, 506, 86 ], "score": 1.0, "content": "G. Schoettler, A. Nair, J. Luo, S. Bahl, J. A. Ojea, E. Solowjow, and S. Levine. Deep reinforcement", "type": "text" } ], "index": 0 }, { "bbox": [ 115, 84, 446, 98 ], "spans": [ { "bbox": [ 115, 84, 446, 98 ], "score": 1.0, "content": "learning for industrial insertion tasks with visual inputs and natural rewards, 2019.", "type": "text" } ], "index": 1 }, { "bbox": [ 105, 105, 506, 118 ], "spans": [ { "bbox": [ 105, 105, 506, 118 ], "score": 1.0, "content": "S. James, K. Wada, T. Laidlow, and A. J. Davison. Coarse-to-fine q-attention: Efficient learning for", "type": "text" } ], "index": 2 }, { "bbox": [ 115, 118, 326, 129 ], "spans": [ { "bbox": [ 115, 118, 326, 129 ], "score": 1.0, "content": "visual robotic manipulation via discretisation, 2021.", "type": "text" } ], "index": 3 }, { "bbox": [ 103, 136, 507, 152 ], "spans": [ { "bbox": [ 103, 136, 507, 152 ], "score": 1.0, "content": "D. Shah and S. Levine. Viking: Vision-based kilometer-scale navigation with geographic hints, 2022.", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 158, 506, 171 ], "spans": [ { "bbox": [ 105, 158, 506, 171 ], "score": 1.0, "content": "S. Bohez, S. Tunyasuvunakool, P. Brakel, F. Sadeghi, L. Hasenclever, Y. Tassa, E. Parisotto,", "type": "text" } ], "index": 5 }, { "bbox": [ 114, 169, 507, 183 ], "spans": [ { "bbox": [ 114, 169, 507, 183 ], "score": 1.0, "content": "J. Humplik, T. Haarnoja, R. Hafner, M. Wulfmeier, M. Neunert, B. Moran, N. Siegel, A. Huber,", "type": "text" } ], "index": 6 }, { "bbox": [ 113, 179, 508, 196 ], "spans": [ { "bbox": [ 113, 179, 508, 196 ], "score": 1.0, "content": "F. Romano, N. Batchelor, F. Casarini, J. Merel, R. Hadsell, and N. Heess. Imitate and repurpose:", "type": "text" } ], "index": 7 }, { "bbox": [ 115, 193, 447, 207 ], "spans": [ { "bbox": [ 115, 193, 447, 207 ], "score": 1.0, "content": "Learning reusable robot movement skills from human and animal behaviors, 2022.", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 212, 505, 228 ], "spans": [ { "bbox": [ 105, 212, 505, 228 ], "score": 1.0, "content": "A. Sivakumar, K. Shaw, and D. Pathak. Robotic telekinesis: Learning a robotic hand imitator by", "type": "text" } ], "index": 9 }, { "bbox": [ 115, 225, 264, 238 ], "spans": [ { "bbox": [ 115, 225, 264, 238 ], "score": 1.0, "content": "watching humans on youtube, 2022.", "type": "text" } ], "index": 10 }, { "bbox": [ 105, 245, 506, 259 ], "spans": [ { "bbox": [ 105, 245, 506, 259 ], "score": 1.0, "content": "C. Finn, I. Goodfellow, and S. Levine. Unsupervised learning for physical interaction through video", "type": "text" } ], "index": 11 }, { "bbox": [ 114, 257, 464, 271 ], "spans": [ { "bbox": [ 114, 257, 464, 271 ], "score": 1.0, "content": "prediction. In Advances in neural information processing systems, pages 64–72, 2016.", "type": "text" } ], "index": 12 }, { "bbox": [ 104, 276, 506, 292 ], "spans": [ { "bbox": [ 104, 276, 506, 292 ], "score": 1.0, "content": "C. Finn and S. Levine. Deep visual foresight for planning robot motion. In Robotics and Automation", "type": "text" } ], "index": 13 }, { "bbox": [ 114, 289, 441, 304 ], "spans": [ { "bbox": [ 114, 289, 441, 304 ], "score": 1.0, "content": "(ICRA), 2017 IEEE International Conference on, pages 2786–2793. IEEE, 2017.", "type": "text" } ], "index": 14 }, { "bbox": [ 105, 308, 506, 325 ], "spans": [ { "bbox": [ 105, 308, 506, 325 ], "score": 1.0, "content": "Y. Yang, T. Zhang, E. Coumans, J. Tan, and B. Boots. Fast and efficient locomotion via learned gait", "type": "text" } ], "index": 15 }, { "bbox": [ 115, 322, 425, 335 ], "spans": [ { "bbox": [ 115, 322, 425, 335 ], "score": 1.0, "content": "transitions. In Conference on Robot Learning, pages 773–783. PMLR, 2022.", "type": "text" } ], "index": 16 }, { "bbox": [ 105, 342, 506, 356 ], "spans": [ { "bbox": [ 105, 342, 506, 356 ], "score": 1.0, "content": "S. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human", "type": "text" } ], "index": 17 }, { "bbox": [ 115, 353, 308, 369 ], "spans": [ { "bbox": [ 115, 353, 308, 369 ], "score": 1.0, "content": "effort. arXiv preprint arXiv:2002.08550, 2020.", "type": "text" } ], "index": 18 }, { "bbox": [ 106, 375, 505, 387 ], "spans": [ { "bbox": [ 106, 375, 505, 387 ], "score": 1.0, "content": "M. Zhang, S. Vikram, L. Smith, P. Abbeel, M. Johnson, and S. Levine. Solar: deep structured", "type": "text" } ], "index": 19 }, { "bbox": [ 115, 387, 506, 400 ], "spans": [ { "bbox": [ 115, 387, 506, 400 ], "score": 1.0, "content": "representations for model-based reinforcement learning. In International Conference on Machine", "type": "text" } ], "index": 20 }, { "bbox": [ 114, 398, 182, 411 ], "spans": [ { "bbox": [ 114, 398, 182, 411 ], "score": 1.0, "content": "Learning, 2019.", "type": "text" } ], "index": 21 }, { "bbox": [ 105, 418, 506, 432 ], "spans": [ { "bbox": [ 105, 418, 506, 432 ], "score": 1.0, "content": "A. Nagabandi, K. Konoglie, S. Levine, and V. Kumar. Deep dynamics models for learning dexterous", "type": "text" } ], "index": 22 }, { "bbox": [ 115, 430, 199, 444 ], "spans": [ { "bbox": [ 115, 430, 199, 444 ], "score": 1.0, "content": "manipulation, 2019.", "type": "text" } ], "index": 23 }, { "bbox": [ 105, 450, 505, 464 ], "spans": [ { "bbox": [ 105, 450, 505, 464 ], "score": 1.0, "content": "G. I. Parisi, R. Kemker, J. L. Part, C. Kanan, and S. Wermter. Continual lifelong learning with neural", "type": "text" } ], "index": 24 }, { "bbox": [ 115, 461, 419, 475 ], "spans": [ { "bbox": [ 115, 461, 419, 475 ], "score": 1.0, "content": "networks: A review. Neural Networks, 113:54–71, 2019. ISSN 0893-6080.", "type": "text" } ], "index": 25 }, { "bbox": [ 105, 482, 506, 496 ], "spans": [ { "bbox": [ 105, 482, 506, 496 ], "score": 1.0, "content": "T. Miki, J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning robust perceptive", "type": "text" } ], "index": 26 }, { "bbox": [ 114, 493, 506, 508 ], "spans": [ { "bbox": [ 114, 493, 506, 508 ], "score": 1.0, "content": "locomotion for quadrupedal robots in the wild. Science Robotics, 7(62), jan 2022. doi:10.1126/", "type": "text" } ], "index": 27 }, { "bbox": [ 115, 506, 201, 519 ], "spans": [ { "bbox": [ 115, 506, 201, 519 ], "score": 1.0, "content": "scirobotics.abk2822.", "type": "text" } ], "index": 28 }, { "bbox": [ 103, 524, 506, 542 ], "spans": [ { "bbox": [ 103, 524, 506, 542 ], "score": 1.0, "content": "L. Smith, J. C. Kew, X. B. Peng, S. Ha, J. Tan, and S. Levine. Legged robots that keep on learning:", "type": "text" } ], "index": 29 }, { "bbox": [ 115, 538, 342, 552 ], "spans": [ { "bbox": [ 115, 538, 342, 552 ], "score": 1.0, "content": "Fine-tuning locomotion policies in the real world, 2021.", "type": "text" } ], "index": 30 }, { "bbox": [ 105, 557, 506, 573 ], "spans": [ { "bbox": [ 105, 557, 506, 573 ], "score": 1.0, "content": "T.-Y. Yang, T. Zhang, L. Luu, S. Ha, J. Tan, and W. Yu. Safe reinforcement learning for legged", "type": "text" } ], "index": 31 }, { "bbox": [ 115, 570, 387, 584 ], "spans": [ { "bbox": [ 115, 570, 387, 584 ], "score": 1.0, "content": "locomotion, 2022. URL https://arxiv.org/abs/2203.02638.", "type": "text" } ], "index": 32 }, { "bbox": [ 103, 589, 506, 606 ], "spans": [ { "bbox": [ 103, 589, 506, 606 ], "score": 1.0, "content": "S. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human", "type": "text" } ], "index": 33 }, { "bbox": [ 115, 603, 363, 616 ], "spans": [ { "bbox": [ 115, 603, 363, 616 ], "score": 1.0, "content": "effort, 2020. URL https://arxiv.org/abs/2002.08550.", "type": "text" } ], "index": 34 }, { "bbox": [ 105, 623, 506, 636 ], "spans": [ { "bbox": [ 105, 623, 506, 636 ], "score": 1.0, "content": "L. Smith, I. Kostrikov, and S. Levine. A walk in the park: Learning to walk in 20 minutes with", "type": "text" } ], "index": 35 }, { "bbox": [ 115, 635, 478, 649 ], "spans": [ { "bbox": [ 115, 635, 478, 649 ], "score": 1.0, "content": "model-free reinforcement learning, 2022. URL https://arxiv.org/abs/2208.07860.", "type": "text" } ], "index": 36 }, { "bbox": [ 105, 655, 506, 669 ], "spans": [ { "bbox": [ 105, 655, 506, 669 ], "score": 1.0, "content": "S. Levine, P. Pastor, A. Krizhevsky, J. Ibarz, and D. Quillen. Learning hand-eye coordination for", "type": "text" } ], "index": 37 }, { "bbox": [ 114, 666, 507, 682 ], "spans": [ { "bbox": [ 114, 666, 507, 682 ], "score": 1.0, "content": "robotic grasping with deep learning and large-scale data collection. The International Journal of", "type": "text" } ], "index": 38 }, { "bbox": [ 114, 678, 294, 692 ], "spans": [ { "bbox": [ 114, 678, 294, 692 ], "score": 1.0, "content": "Robotics Research, 37(4-5):421–436, 2018.", "type": "text" } ], "index": 39 }, { "bbox": [ 105, 699, 506, 713 ], "spans": [ { "bbox": [ 105, 699, 506, 713 ], "score": 1.0, "content": "L. Pinto and A. Gupta. Supersizing self-supervision: Learning to grasp from 50k tries and 700 robot", "type": "text" } ], "index": 40 }, { "bbox": [ 114, 711, 169, 724 ], "spans": [ { "bbox": [ 114, 711, 169, 724 ], "score": 1.0, "content": "hours, 2015.", "type": "text" } ], "index": 41 } ], "index": 20.5 } ], "page_idx": 10, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 300, 741, 311, 751 ], "lines": [ { "bbox": [ 299, 740, 312, 754 ], "spans": [ { "bbox": [ 299, 740, 312, 754 ], "score": 1.0, "content": "11", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "list", "bbox": [ 104, 72, 507, 726 ], "lines": [], "index": 20.5, "bbox_fs": [ 103, 72, 508, 724 ], "lines_deleted": true } ] }, { "preproc_blocks": [ { "type": "text", "bbox": [ 103, 68, 507, 513 ], "lines": [ { "bbox": [ 105, 73, 505, 87 ], "spans": [ { "bbox": [ 105, 73, 505, 87 ], "score": 1.0, "content": "H. Ha and S. Song. Flingbot: The unreasonable effectiveness of dynamic manipulation for cloth", "type": "text" } ], "index": 0 }, { "bbox": [ 115, 84, 315, 98 ], "spans": [ { "bbox": [ 115, 84, 315, 98 ], "score": 1.0, "content": "unfolding. Conference on Robot Learning, 2021.", "type": "text" } ], "index": 1 }, { "bbox": [ 104, 103, 505, 119 ], "spans": [ { "bbox": [ 104, 103, 505, 119 ], "score": 1.0, "content": "S. James and A. J. Davison. Q-attention: Enabling efficient learning for vision-based robotic", "type": "text" } ], "index": 2 }, { "bbox": [ 115, 116, 200, 129 ], "spans": [ { "bbox": [ 115, 116, 200, 129 ], "score": 1.0, "content": "manipulation, 2021.", "type": "text" } ], "index": 3 }, { "bbox": [ 104, 134, 505, 150 ], "spans": [ { "bbox": [ 104, 134, 505, 150 ], "score": 1.0, "content": "E. Tzeng, C. Devin, J. Hoffman, C. Finn, P. Abbeel, S. Levine, K. Saenko, and T. Darrell. Adapting", "type": "text" } ], "index": 4 }, { "bbox": [ 115, 147, 402, 162 ], "spans": [ { "bbox": [ 115, 147, 402, 162 ], "score": 1.0, "content": "deep visuomotor representations with weak pairwise constraints, 2015.", "type": "text" } ], "index": 5 }, { "bbox": [ 104, 166, 506, 181 ], "spans": [ { "bbox": [ 104, 166, 506, 181 ], "score": 1.0, "content": "I. Akkaya, M. Andrychowicz, M. Chociej, M. Litwin, B. McGrew, A. Petron, A. Paino, M. Plappert,", "type": "text" } ], "index": 6 }, { "bbox": [ 115, 178, 507, 192 ], "spans": [ { "bbox": [ 115, 178, 507, 192 ], "score": 1.0, "content": "G. Powell, R. Ribas, et al. Solving rubik’s cube with a robot hand. arXiv preprint arXiv:1910.07113,", "type": "text" } ], "index": 7 }, { "bbox": [ 115, 190, 142, 203 ], "spans": [ { "bbox": [ 115, 190, 142, 203 ], "score": 1.0, "content": "2019.", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 208, 505, 223 ], "spans": [ { "bbox": [ 105, 208, 505, 223 ], "score": 1.0, "content": "M. P. Deisenroth, G. Neumann, J. Peters, et al. A survey on policy search for robotics. Foundations", "type": "text" } ], "index": 9 }, { "bbox": [ 115, 221, 298, 235 ], "spans": [ { "bbox": [ 115, 221, 298, 235 ], "score": 1.0, "content": "and Trends in Robotics, 2(1–2):1–142, 2013.", "type": "text" } ], "index": 10 }, { "bbox": [ 105, 241, 506, 255 ], "spans": [ { "bbox": [ 105, 241, 506, 255 ], "score": 1.0, "content": "K. Chua, R. Calandra, R. McAllister, and S. Levine. Deep reinforcement learning in a handful of", "type": "text" } ], "index": 11 }, { "bbox": [ 114, 252, 507, 268 ], "spans": [ { "bbox": [ 114, 252, 507, 268 ], "score": 1.0, "content": "trials using probabilistic dynamics models. In Advances in Neural Information Processing Systems,", "type": "text" } ], "index": 12 }, { "bbox": [ 114, 264, 218, 278 ], "spans": [ { "bbox": [ 114, 264, 218, 278 ], "score": 1.0, "content": "pages 4754–4765, 2018.", "type": "text" } ], "index": 13 }, { "bbox": [ 104, 282, 506, 299 ], "spans": [ { "bbox": [ 104, 282, 506, 299 ], "score": 1.0, "content": "A. Nagabandi, G. Yang, T. Asmar, R. Pandya, G. Kahn, S. Levine, and R. S. Fearing. Learning", "type": "text" } ], "index": 14 }, { "bbox": [ 114, 295, 487, 310 ], "spans": [ { "bbox": [ 114, 295, 487, 310 ], "score": 1.0, "content": "image-conditioned dynamics models for control of under-actuated legged millirobots, 2017.", "type": "text" } ], "index": 15 }, { "bbox": [ 104, 314, 505, 329 ], "spans": [ { "bbox": [ 104, 314, 505, 329 ], "score": 1.0, "content": "P. Becker-Ehmck, M. Karl, J. Peters, and P. van der Smagt. Learning to fly via deep model-based", "type": "text" } ], "index": 16 }, { "bbox": [ 114, 327, 376, 340 ], "spans": [ { "bbox": [ 114, 327, 376, 340 ], "score": 1.0, "content": "reinforcement learning. arXiv preprint arXiv:2003.08876, 2020.", "type": "text" } ], "index": 17 }, { "bbox": [ 104, 345, 505, 360 ], "spans": [ { "bbox": [ 104, 345, 505, 360 ], "score": 1.0, "content": "F. Deng, I. Jang, and S. Ahn. Dreamerpro: Reconstruction-free model-based reinforcement learning", "type": "text" } ], "index": 18 }, { "bbox": [ 115, 358, 416, 372 ], "spans": [ { "bbox": [ 115, 358, 416, 372 ], "score": 1.0, "content": "with prototypical representations. arXiv preprint arXiv:2110.14565, 2021.", "type": "text" } ], "index": 19 }, { "bbox": [ 105, 377, 505, 392 ], "spans": [ { "bbox": [ 105, 377, 505, 392 ], "score": 1.0, "content": "M. Okada and T. Taniguchi. Dreaming: Model-based reinforcement learning by latent imagination", "type": "text" } ], "index": 20 }, { "bbox": [ 114, 389, 506, 403 ], "spans": [ { "bbox": [ 114, 389, 506, 403 ], "score": 1.0, "content": "without reconstruction. In 2021 IEEE International Conference on Robotics and Automation", "type": "text" } ], "index": 21 }, { "bbox": [ 115, 402, 275, 414 ], "spans": [ { "bbox": [ 115, 402, 275, 414 ], "score": 1.0, "content": "(ICRA), pages 4209–4215. IEEE, 2021.", "type": "text" } ], "index": 22 }, { "bbox": [ 105, 420, 505, 435 ], "spans": [ { "bbox": [ 105, 420, 505, 435 ], "score": 1.0, "content": "H. Bharadhwaj, M. Babaeizadeh, D. Erhan, and S. Levine. Information prioritization through", "type": "text" } ], "index": 23 }, { "bbox": [ 115, 433, 439, 446 ], "spans": [ { "bbox": [ 115, 433, 439, 446 ], "score": 1.0, "content": "empowerment in visual model-based rl. arXiv preprint arXiv:2204.08585, 2022.", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 452, 505, 467 ], "spans": [ { "bbox": [ 105, 452, 505, 467 ], "score": 1.0, "content": "K. Paster, L. E. McKinney, S. A. McIlraith, and J. Ba. Blast: Latent dynamics models from", "type": "text" } ], "index": 25 }, { "bbox": [ 115, 464, 355, 478 ], "spans": [ { "bbox": [ 115, 464, 355, 478 ], "score": 1.0, "content": "bootstrapping. In Deep RL Workshop NeurIPS 2021, 2021.", "type": "text" } ], "index": 26 }, { "bbox": [ 105, 482, 506, 499 ], "spans": [ { "bbox": [ 105, 482, 506, 499 ], "score": 1.0, "content": "K. Hsu, M. J. Kim, R. Rafailov, J. Wu, and C. Finn. Vision-based manipulators need to also see from", "type": "text" } ], "index": 27 }, { "bbox": [ 115, 495, 387, 509 ], "spans": [ { "bbox": [ 115, 495, 387, 509 ], "score": 1.0, "content": "their hands, 2022. URL https://arxiv.org/abs/2203.12677.", "type": "text" } ], "index": 28 } ], "index": 14 } ], "page_idx": 11, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 300, 741, 311, 750 ], "lines": [ { "bbox": [ 299, 740, 312, 754 ], "spans": [ { "bbox": [ 299, 740, 312, 754 ], "score": 1.0, "content": "12", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "list", "bbox": [ 103, 68, 507, 513 ], "lines": [], "index": 14, "bbox_fs": [ 104, 73, 507, 509 ], "lines_deleted": true } ] }, { "preproc_blocks": [ { "type": "title", "bbox": [ 107, 72, 186, 85 ], "lines": [ { "bbox": [ 105, 70, 188, 88 ], "spans": [ { "bbox": [ 105, 70, 188, 88 ], "score": 1.0, "content": "A Adaptation", "type": "text" } ], "index": 0 } ], "index": 0 }, { "type": "text", "bbox": [ 107, 93, 505, 152 ], "lines": [ { "bbox": [ 105, 93, 505, 106 ], "spans": [ { "bbox": [ 105, 93, 505, 106 ], "score": 1.0, "content": "Real world robot learning faces practical challenges such as changing environmental conditions", "type": "text" } ], "index": 1 }, { "bbox": [ 105, 105, 505, 118 ], "spans": [ { "bbox": [ 105, 105, 505, 118 ], "score": 1.0, "content": "and time varying dynamics. We found that Dreamer is able to adapt to the current environmental", "type": "text" } ], "index": 2 }, { "bbox": [ 105, 117, 505, 129 ], "spans": [ { "bbox": [ 105, 117, 505, 129 ], "score": 1.0, "content": "conditions with no change to the learning algorithm. This shows promise for using Dreamer in", "type": "text" } ], "index": 3 }, { "bbox": [ 105, 128, 505, 142 ], "spans": [ { "bbox": [ 105, 128, 505, 142 ], "score": 1.0, "content": "continual learning settings (Parisi et al., 2019). Adaptation of the quadruped to external perturbations", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 141, 264, 153 ], "spans": [ { "bbox": [ 105, 141, 264, 153 ], "score": 1.0, "content": "is reported in Section 3.1 and Figure 8.", "type": "text" } ], "index": 5 } ], "index": 3 }, { "type": "text", "bbox": [ 106, 156, 506, 250 ], "lines": [ { "bbox": [ 105, 156, 505, 169 ], "spans": [ { "bbox": [ 105, 156, 505, 169 ], "score": 1.0, "content": "The XArm, situated near large windows, is able to adapt and maintain performance under the presence", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 169, 505, 180 ], "spans": [ { "bbox": [ 106, 169, 505, 180 ], "score": 1.0, "content": "of changing lighting conditions. The XArm experiments were conducted after sundown to keep the", "type": "text" } ], "index": 7 }, { "bbox": [ 105, 180, 506, 193 ], "spans": [ { "bbox": [ 105, 180, 506, 193 ], "score": 1.0, "content": "lighting conditions constant throughout training. Figure A.1 shows the learning curve of the XArm.", "type": "text" } ], "index": 8 }, { "bbox": [ 106, 192, 505, 204 ], "spans": [ { "bbox": [ 106, 192, 505, 204 ], "score": 1.0, "content": "As expected, the performance of the XArm drops during sunrise. However, the XArm is able to", "type": "text" } ], "index": 9 }, { "bbox": [ 105, 203, 506, 217 ], "spans": [ { "bbox": [ 105, 203, 506, 217 ], "score": 1.0, "content": "adapt to the change in lighting conditions in about 5 hours time and recover the original performance,", "type": "text" } ], "index": 10 }, { "bbox": [ 105, 214, 505, 227 ], "spans": [ { "bbox": [ 105, 214, 505, 227 ], "score": 1.0, "content": "which is faster than it would be to train from scratch. A careful inspection of the image observations", "type": "text" } ], "index": 11 }, { "bbox": [ 105, 226, 505, 239 ], "spans": [ { "bbox": [ 105, 226, 505, 239 ], "score": 1.0, "content": "at these times, as shown in Figure A.1, reveals that the robot received observations with strong light", "type": "text" } ], "index": 12 }, { "bbox": [ 105, 238, 446, 252 ], "spans": [ { "bbox": [ 105, 238, 446, 252 ], "score": 1.0, "content": "rays covering the scene which greatly differs from the original training observations.", "type": "text" } ], "index": 13 } ], "index": 9.5 }, { "type": "image", "bbox": [ 106, 259, 498, 349 ], "blocks": [ { "type": "image_body", "bbox": [ 106, 259, 498, 349 ], "group_id": 0, "lines": [ { "bbox": [ 106, 259, 498, 349 ], "spans": [ { "bbox": [ 106, 259, 498, 349 ], "score": 0.962, "type": "image", "image_path": "db6cf431ae9355646aa06c810c30e311d8db38009707b4dea4bad788085ac2bb.jpg" } ] } ], "index": 15, "virtual_lines": [ { "bbox": [ 106, 259, 498, 289.0 ], "spans": [], "index": 14 }, { "bbox": [ 106, 289.0, 498, 319.0 ], "spans": [], "index": 15 }, { "bbox": [ 106, 319.0, 498, 349.0 ], "spans": [], "index": 16 } ] }, { "type": "image_caption", "bbox": [ 106, 355, 506, 412 ], "group_id": 0, "lines": [ { "bbox": [ 105, 355, 505, 369 ], "spans": [ { "bbox": [ 105, 355, 505, 369 ], "score": 1.0, "content": "Figure A.1: The left two images are raw observations consumed by Dreamer. The leftmost image is", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 367, 506, 380 ], "spans": [ { "bbox": [ 105, 367, 506, 380 ], "score": 1.0, "content": "an image observation as seen by the XArm at night, when it was trained. The next image shows an", "type": "text" } ], "index": 18 }, { "bbox": [ 105, 378, 507, 392 ], "spans": [ { "bbox": [ 105, 378, 507, 392 ], "score": 1.0, "content": "observation during sunrise. Despite the vast difference in pixel space, the XArm is able to recover,", "type": "text" } ], "index": 19 }, { "bbox": [ 105, 389, 505, 403 ], "spans": [ { "bbox": [ 105, 389, 505, 403 ], "score": 1.0, "content": "and then surpass, the original performance in approximately 5 hours. Even after 24 hours when the", "type": "text" } ], "index": 20 }, { "bbox": [ 106, 400, 438, 414 ], "spans": [ { "bbox": [ 106, 400, 438, 414 ], "score": 1.0, "content": "lighting shifts to night time conditions, the XArm is able to maintain performance.", "type": "text" } ], "index": 21 } ], "index": 19 } ], "index": 17.0 }, { "type": "title", "bbox": [ 107, 432, 190, 446 ], "lines": [ { "bbox": [ 104, 430, 192, 449 ], "spans": [ { "bbox": [ 104, 430, 192, 449 ], "score": 1.0, "content": "B Imagination", "type": "text" } ], "index": 22 } ], "index": 22 }, { "type": "image", "bbox": [ 110, 458, 501, 650 ], "blocks": [ { "type": "image_body", "bbox": [ 110, 458, 501, 650 ], "group_id": 1, "lines": [ { "bbox": [ 110, 458, 501, 650 ], "spans": [ { "bbox": [ 110, 458, 501, 650 ], "score": 0.98, "type": "image", "image_path": "24fe88bf92baa43778d9defa3450750bc0d2c910fa9c12c5902630d7c2316e1e.jpg" } ] } ], "index": 24, "virtual_lines": [ { "bbox": [ 110, 458, 501, 522.0 ], "spans": [], "index": 23 }, { "bbox": [ 110, 522.0, 501, 586.0 ], "spans": [], "index": 24 }, { "bbox": [ 110, 586.0, 501, 650.0 ], "spans": [], "index": 25 } ] }, { "type": "image_caption", "bbox": [ 106, 656, 506, 713 ], "group_id": 1, "lines": [ { "bbox": [ 105, 656, 505, 669 ], "spans": [ { "bbox": [ 105, 656, 505, 669 ], "score": 1.0, "content": "Figure B.1: To introspect the policy, we can roll out trajectories in the latent space of Dreamer, then", "type": "text" } ], "index": 26 }, { "bbox": [ 104, 664, 506, 683 ], "spans": [ { "bbox": [ 104, 664, 506, 683 ], "score": 1.0, "content": "decode the images to visualize the intent of the actor network. Each row is an imagined trajectory,", "type": "text" } ], "index": 27 }, { "bbox": [ 106, 678, 505, 692 ], "spans": [ { "bbox": [ 106, 678, 505, 692 ], "score": 1.0, "content": "showing every 2nd frame. Top: Latent rollouts on the UR5 environment. Multiple objects introduce", "type": "text" } ], "index": 28 }, { "bbox": [ 105, 690, 506, 703 ], "spans": [ { "bbox": [ 105, 690, 506, 703 ], "score": 1.0, "content": "more visual complexity that the network has to model. Note the second trajectory, which shows a", "type": "text" } ], "index": 29 }, { "bbox": [ 106, 701, 479, 713 ], "spans": [ { "bbox": [ 106, 701, 479, 713 ], "score": 1.0, "content": "static orange ball becoming a green ball. Bottom: Latent rollouts on the XArm environment.", "type": "text" } ], "index": 30 } ], "index": 28 } ], "index": 26.0 } ], "page_idx": 12, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 300, 741, 311, 750 ], "lines": [ { "bbox": [ 299, 740, 312, 754 ], "spans": [ { "bbox": [ 299, 740, 312, 754 ], "score": 1.0, "content": "13", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "title", "bbox": [ 107, 72, 186, 85 ], "lines": [ { "bbox": [ 105, 70, 188, 88 ], "spans": [ { "bbox": [ 105, 70, 188, 88 ], "score": 1.0, "content": "A Adaptation", "type": "text" } ], "index": 0 } ], "index": 0 }, { "type": "text", "bbox": [ 107, 93, 505, 152 ], "lines": [ { "bbox": [ 105, 93, 505, 106 ], "spans": [ { "bbox": [ 105, 93, 505, 106 ], "score": 1.0, "content": "Real world robot learning faces practical challenges such as changing environmental conditions", "type": "text" } ], "index": 1 }, { "bbox": [ 105, 105, 505, 118 ], "spans": [ { "bbox": [ 105, 105, 505, 118 ], "score": 1.0, "content": "and time varying dynamics. We found that Dreamer is able to adapt to the current environmental", "type": "text" } ], "index": 2 }, { "bbox": [ 105, 117, 505, 129 ], "spans": [ { "bbox": [ 105, 117, 505, 129 ], "score": 1.0, "content": "conditions with no change to the learning algorithm. This shows promise for using Dreamer in", "type": "text" } ], "index": 3 }, { "bbox": [ 105, 128, 505, 142 ], "spans": [ { "bbox": [ 105, 128, 505, 142 ], "score": 1.0, "content": "continual learning settings (Parisi et al., 2019). Adaptation of the quadruped to external perturbations", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 141, 264, 153 ], "spans": [ { "bbox": [ 105, 141, 264, 153 ], "score": 1.0, "content": "is reported in Section 3.1 and Figure 8.", "type": "text" } ], "index": 5 } ], "index": 3, "bbox_fs": [ 105, 93, 505, 153 ] }, { "type": "text", "bbox": [ 106, 156, 506, 250 ], "lines": [ { "bbox": [ 105, 156, 505, 169 ], "spans": [ { "bbox": [ 105, 156, 505, 169 ], "score": 1.0, "content": "The XArm, situated near large windows, is able to adapt and maintain performance under the presence", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 169, 505, 180 ], "spans": [ { "bbox": [ 106, 169, 505, 180 ], "score": 1.0, "content": "of changing lighting conditions. The XArm experiments were conducted after sundown to keep the", "type": "text" } ], "index": 7 }, { "bbox": [ 105, 180, 506, 193 ], "spans": [ { "bbox": [ 105, 180, 506, 193 ], "score": 1.0, "content": "lighting conditions constant throughout training. Figure A.1 shows the learning curve of the XArm.", "type": "text" } ], "index": 8 }, { "bbox": [ 106, 192, 505, 204 ], "spans": [ { "bbox": [ 106, 192, 505, 204 ], "score": 1.0, "content": "As expected, the performance of the XArm drops during sunrise. However, the XArm is able to", "type": "text" } ], "index": 9 }, { "bbox": [ 105, 203, 506, 217 ], "spans": [ { "bbox": [ 105, 203, 506, 217 ], "score": 1.0, "content": "adapt to the change in lighting conditions in about 5 hours time and recover the original performance,", "type": "text" } ], "index": 10 }, { "bbox": [ 105, 214, 505, 227 ], "spans": [ { "bbox": [ 105, 214, 505, 227 ], "score": 1.0, "content": "which is faster than it would be to train from scratch. A careful inspection of the image observations", "type": "text" } ], "index": 11 }, { "bbox": [ 105, 226, 505, 239 ], "spans": [ { "bbox": [ 105, 226, 505, 239 ], "score": 1.0, "content": "at these times, as shown in Figure A.1, reveals that the robot received observations with strong light", "type": "text" } ], "index": 12 }, { "bbox": [ 105, 238, 446, 252 ], "spans": [ { "bbox": [ 105, 238, 446, 252 ], "score": 1.0, "content": "rays covering the scene which greatly differs from the original training observations.", "type": "text" } ], "index": 13 } ], "index": 9.5, "bbox_fs": [ 105, 156, 506, 252 ] }, { "type": "image", "bbox": [ 106, 259, 498, 349 ], "blocks": [ { "type": "image_body", "bbox": [ 106, 259, 498, 349 ], "group_id": 0, "lines": [ { "bbox": [ 106, 259, 498, 349 ], "spans": [ { "bbox": [ 106, 259, 498, 349 ], "score": 0.962, "type": "image", "image_path": "db6cf431ae9355646aa06c810c30e311d8db38009707b4dea4bad788085ac2bb.jpg" } ] } ], "index": 15, "virtual_lines": [ { "bbox": [ 106, 259, 498, 289.0 ], "spans": [], "index": 14 }, { "bbox": [ 106, 289.0, 498, 319.0 ], "spans": [], "index": 15 }, { "bbox": [ 106, 319.0, 498, 349.0 ], "spans": [], "index": 16 } ] }, { "type": "image_caption", "bbox": [ 106, 355, 506, 412 ], "group_id": 0, "lines": [ { "bbox": [ 105, 355, 505, 369 ], "spans": [ { "bbox": [ 105, 355, 505, 369 ], "score": 1.0, "content": "Figure A.1: The left two images are raw observations consumed by Dreamer. The leftmost image is", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 367, 506, 380 ], "spans": [ { "bbox": [ 105, 367, 506, 380 ], "score": 1.0, "content": "an image observation as seen by the XArm at night, when it was trained. The next image shows an", "type": "text" } ], "index": 18 }, { "bbox": [ 105, 378, 507, 392 ], "spans": [ { "bbox": [ 105, 378, 507, 392 ], "score": 1.0, "content": "observation during sunrise. Despite the vast difference in pixel space, the XArm is able to recover,", "type": "text" } ], "index": 19 }, { "bbox": [ 105, 389, 505, 403 ], "spans": [ { "bbox": [ 105, 389, 505, 403 ], "score": 1.0, "content": "and then surpass, the original performance in approximately 5 hours. Even after 24 hours when the", "type": "text" } ], "index": 20 }, { "bbox": [ 106, 400, 438, 414 ], "spans": [ { "bbox": [ 106, 400, 438, 414 ], "score": 1.0, "content": "lighting shifts to night time conditions, the XArm is able to maintain performance.", "type": "text" } ], "index": 21 } ], "index": 19 } ], "index": 17.0 }, { "type": "title", "bbox": [ 107, 432, 190, 446 ], "lines": [ { "bbox": [ 104, 430, 192, 449 ], "spans": [ { "bbox": [ 104, 430, 192, 449 ], "score": 1.0, "content": "B Imagination", "type": "text" } ], "index": 22 } ], "index": 22 }, { "type": "image", "bbox": [ 110, 458, 501, 650 ], "blocks": [ { "type": "image_body", "bbox": [ 110, 458, 501, 650 ], "group_id": 1, "lines": [ { "bbox": [ 110, 458, 501, 650 ], "spans": [ { "bbox": [ 110, 458, 501, 650 ], "score": 0.98, "type": "image", "image_path": "24fe88bf92baa43778d9defa3450750bc0d2c910fa9c12c5902630d7c2316e1e.jpg" } ] } ], "index": 24, "virtual_lines": [ { "bbox": [ 110, 458, 501, 522.0 ], "spans": [], "index": 23 }, { "bbox": [ 110, 522.0, 501, 586.0 ], "spans": [], "index": 24 }, { "bbox": [ 110, 586.0, 501, 650.0 ], "spans": [], "index": 25 } ] }, { "type": "image_caption", "bbox": [ 106, 656, 506, 713 ], "group_id": 1, "lines": [ { "bbox": [ 105, 656, 505, 669 ], "spans": [ { "bbox": [ 105, 656, 505, 669 ], "score": 1.0, "content": "Figure B.1: To introspect the policy, we can roll out trajectories in the latent space of Dreamer, then", "type": "text" } ], "index": 26 }, { "bbox": [ 104, 664, 506, 683 ], "spans": [ { "bbox": [ 104, 664, 506, 683 ], "score": 1.0, "content": "decode the images to visualize the intent of the actor network. Each row is an imagined trajectory,", "type": "text" } ], "index": 27 }, { "bbox": [ 106, 678, 505, 692 ], "spans": [ { "bbox": [ 106, 678, 505, 692 ], "score": 1.0, "content": "showing every 2nd frame. Top: Latent rollouts on the UR5 environment. Multiple objects introduce", "type": "text" } ], "index": 28 }, { "bbox": [ 105, 690, 506, 703 ], "spans": [ { "bbox": [ 105, 690, 506, 703 ], "score": 1.0, "content": "more visual complexity that the network has to model. Note the second trajectory, which shows a", "type": "text" } ], "index": 29 }, { "bbox": [ 106, 701, 479, 713 ], "spans": [ { "bbox": [ 106, 701, 479, 713 ], "score": 1.0, "content": "static orange ball becoming a green ball. Bottom: Latent rollouts on the XArm environment.", "type": "text" } ], "index": 30 } ], "index": 28 } ], "index": 26.0 } ] }, { "preproc_blocks": [ { "type": "title", "bbox": [ 107, 71, 245, 85 ], "lines": [ { "bbox": [ 106, 70, 247, 87 ], "spans": [ { "bbox": [ 106, 70, 247, 87 ], "score": 1.0, "content": "C Detailed Related Work", "type": "text" } ], "index": 0 } ], "index": 0 }, { "type": "text", "bbox": [ 106, 93, 505, 315 ], "lines": [ { "bbox": [ 105, 92, 505, 107 ], "spans": [ { "bbox": [ 105, 92, 505, 107 ], "score": 1.0, "content": "RL for locomotion A common approach is to train RL agents from large amounts of simulated data", "type": "text" } ], "index": 1 }, { "bbox": [ 105, 105, 506, 117 ], "spans": [ { "bbox": [ 105, 105, 506, 117 ], "score": 1.0, "content": "under domain and dynamics randomization (Peng et al., 2018; Lee et al., 2020; Rudin et al., 2021;", "type": "text" } ], "index": 2 }, { "bbox": [ 106, 117, 506, 129 ], "spans": [ { "bbox": [ 106, 117, 506, 129 ], "score": 1.0, "content": "Siekmann et al., 2021; Escontrela et al., 2022; Miki et al., 2022; Kumar et al., 2021; Rusu et al., 2016;", "type": "text" } ], "index": 3 }, { "bbox": [ 106, 129, 506, 141 ], "spans": [ { "bbox": [ 106, 129, 506, 141 ], "score": 1.0, "content": "Bohez et al., 2022), then freezing the learned policy and deploying it to the real world. Smith et al.", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 139, 506, 154 ], "spans": [ { "bbox": [ 105, 139, 506, 154 ], "score": 1.0, "content": "(2021) explored pre-training policies in simulation and fine-tuning them with real world data. Yang", "type": "text" } ], "index": 5 }, { "bbox": [ 105, 152, 505, 164 ], "spans": [ { "bbox": [ 105, 152, 505, 164 ], "score": 1.0, "content": "et al. (2019) investigate learning a dynamics model using a multi-step loss and using model predictive", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 164, 505, 176 ], "spans": [ { "bbox": [ 106, 164, 505, 176 ], "score": 1.0, "content": "control to accomplish a specified task. Yang et al. (2022) train locomotion policies in the real world", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 176, 505, 187 ], "spans": [ { "bbox": [ 106, 176, 505, 187 ], "score": 1.0, "content": "but require a recovery controller trained in simulation to avoid unsafe states. In contrast, we use no", "type": "text" } ], "index": 8 }, { "bbox": [ 106, 187, 505, 200 ], "spans": [ { "bbox": [ 106, 187, 505, 200 ], "score": 1.0, "content": "simulators or reset policies and directly train on the physical robot. While prior work in locomotion", "type": "text" } ], "index": 9 }, { "bbox": [ 106, 199, 505, 211 ], "spans": [ { "bbox": [ 106, 199, 505, 211 ], "score": 1.0, "content": "has successfully learned walking behaviors in the real world, these works generally required several", "type": "text" } ], "index": 10 }, { "bbox": [ 106, 211, 505, 223 ], "spans": [ { "bbox": [ 106, 211, 505, 223 ], "score": 1.0, "content": "domain-specific assumptions or pretraining with simulators. Ha et al. (2020) achieved successful", "type": "text" } ], "index": 11 }, { "bbox": [ 106, 223, 505, 234 ], "spans": [ { "bbox": [ 106, 223, 505, 234 ], "score": 1.0, "content": "walking on the Minitaur robot in 90 minutes. However, the authors manually programmed a reset", "type": "text" } ], "index": 12 }, { "bbox": [ 106, 234, 506, 246 ], "spans": [ { "bbox": [ 106, 234, 506, 246 ], "score": 1.0, "content": "policy that was used when the robot fell on its back, while in our work the robot must learn to flip over", "type": "text" } ], "index": 13 }, { "bbox": [ 106, 245, 505, 258 ], "spans": [ { "bbox": [ 106, 245, 505, 258 ], "score": 1.0, "content": "and stand up. Additionally, the Minitaur robot is simpler than the A1 as it has 8 actuators compared", "type": "text" } ], "index": 14 }, { "bbox": [ 105, 257, 505, 269 ], "spans": [ { "bbox": [ 105, 257, 505, 269 ], "score": 1.0, "content": "to 12 on the A1. In recent work, Smith et al. (2022) utilize a high update-to-data ratio (UTD) RL", "type": "text" } ], "index": 15 }, { "bbox": [ 106, 269, 505, 282 ], "spans": [ { "bbox": [ 106, 269, 505, 282 ], "score": 1.0, "content": "algorithm to learn walking from 20 minutes of robot training data. However, their work assumes", "type": "text" } ], "index": 16 }, { "bbox": [ 106, 280, 505, 293 ], "spans": [ { "bbox": [ 106, 280, 505, 293 ], "score": 1.0, "content": "the availability of a reset policy and therefore comprises of a different learning problem compared", "type": "text" } ], "index": 17 }, { "bbox": [ 106, 293, 505, 305 ], "spans": [ { "bbox": [ 106, 293, 505, 305 ], "score": 1.0, "content": "to the problem we tackle of learning to flip over and walk from scratch. Additionally, we show our", "type": "text" } ], "index": 18 }, { "bbox": [ 106, 304, 439, 316 ], "spans": [ { "bbox": [ 106, 304, 439, 316 ], "score": 1.0, "content": "approach generalizes to environments with image observations and sparse rewards.", "type": "text" } ], "index": 19 } ], "index": 10 }, { "type": "text", "bbox": [ 106, 320, 505, 506 ], "lines": [ { "bbox": [ 106, 320, 505, 333 ], "spans": [ { "bbox": [ 106, 320, 505, 333 ], "score": 1.0, "content": "RL for manipulation Learning promises to enable robot manipulators to solve contact rich tasks", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 331, 505, 344 ], "spans": [ { "bbox": [ 105, 331, 505, 344 ], "score": 1.0, "content": "in open real world environments. One class of methods attempts to scale up experience collection", "type": "text" } ], "index": 21 }, { "bbox": [ 105, 343, 506, 356 ], "spans": [ { "bbox": [ 105, 343, 506, 356 ], "score": 1.0, "content": "through a fleet of robots (Kalashnikov et al., 2018; 2021; Ebert et al., 2021; Dasari et al., 2019;", "type": "text" } ], "index": 22 }, { "bbox": [ 105, 354, 505, 368 ], "spans": [ { "bbox": [ 105, 354, 505, 368 ], "score": 1.0, "content": "Levine et al., 2018). In contrast, we only leverage one robot, but parallelize an agent’s experience", "type": "text" } ], "index": 23 }, { "bbox": [ 106, 367, 505, 380 ], "spans": [ { "bbox": [ 106, 367, 505, 380 ], "score": 1.0, "content": "by using the learned world model. Another common approach is to leverage expert demonstrations", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 378, 506, 391 ], "spans": [ { "bbox": [ 105, 378, 506, 391 ], "score": 1.0, "content": "or other task priors (Pinto and Gupta, 2015; Ha and Song, 2021; Xie et al., 2019; Schoettler et al.,", "type": "text" } ], "index": 25 }, { "bbox": [ 106, 390, 505, 402 ], "spans": [ { "bbox": [ 106, 390, 505, 402 ], "score": 1.0, "content": "2019; Sivakumar et al., 2022). James and Davison (2021); James et al. (2021) leverages a few", "type": "text" } ], "index": 26 }, { "bbox": [ 105, 401, 506, 415 ], "spans": [ { "bbox": [ 105, 401, 506, 415 ], "score": 1.0, "content": "demonstrations to increase the sample-efficiency of Q learning by focusing the learner on important", "type": "text" } ], "index": 27 }, { "bbox": [ 105, 414, 506, 426 ], "spans": [ { "bbox": [ 105, 414, 506, 426 ], "score": 1.0, "content": "aspects of the scene. Other approaches, as in locomotion, first utilize a simulator, then transfer to", "type": "text" } ], "index": 28 }, { "bbox": [ 106, 425, 506, 437 ], "spans": [ { "bbox": [ 106, 425, 506, 437 ], "score": 1.0, "content": "the real world (Tzeng et al., 2015; Akkaya et al., 2019; OpenAI et al., 2018; Irpan et al., 2020). Our", "type": "text" } ], "index": 29 }, { "bbox": [ 106, 437, 506, 449 ], "spans": [ { "bbox": [ 106, 437, 506, 449 ], "score": 1.0, "content": "work focuses on single-robot environments where the agent must learn through a small amount of", "type": "text" } ], "index": 30 }, { "bbox": [ 105, 448, 505, 461 ], "spans": [ { "bbox": [ 105, 448, 505, 461 ], "score": 1.0, "content": "interaction with the world. Meanwhile, the Google Arm Farm line of work by Levine et al. leverages", "type": "text" } ], "index": 31 }, { "bbox": [ 106, 460, 505, 471 ], "spans": [ { "bbox": [ 106, 461, 126, 471 ], "score": 1.0, "content": "over", "type": "text" }, { "bbox": [ 126, 460, 147, 471 ], "score": 0.49, "content": "5 8 0 \\mathrm { k }", "type": "inline_equation" }, { "bbox": [ 147, 461, 505, 471 ], "score": 1.0, "content": "grasp attempts gathered by 7 robots and collected over 4 months. We believe that a method", "type": "text" } ], "index": 32 }, { "bbox": [ 106, 472, 505, 484 ], "spans": [ { "bbox": [ 106, 472, 505, 484 ], "score": 1.0, "content": "such as Dreamer could benefit greatly from this scale of training data, however it is unlikely that", "type": "text" } ], "index": 33 }, { "bbox": [ 106, 484, 504, 495 ], "spans": [ { "bbox": [ 106, 484, 504, 495 ], "score": 1.0, "content": "works such as MT-OPT/QT-OPT Kalashnikov et al. (2018; 2021) would work well in the low data", "type": "text" } ], "index": 34 }, { "bbox": [ 106, 496, 231, 507 ], "spans": [ { "bbox": [ 106, 496, 231, 507 ], "score": 1.0, "content": "regime that Dreamer excels in.", "type": "text" } ], "index": 35 } ], "index": 27.5 }, { "type": "text", "bbox": [ 106, 511, 506, 663 ], "lines": [ { "bbox": [ 106, 510, 505, 523 ], "spans": [ { "bbox": [ 106, 510, 505, 523 ], "score": 1.0, "content": "Model-based RL Due to its higher sample-efficiency over model-free methods, model-based RL", "type": "text" } ], "index": 36 }, { "bbox": [ 105, 523, 505, 534 ], "spans": [ { "bbox": [ 105, 523, 505, 534 ], "score": 1.0, "content": "is a promising approach to learning on real world robots (Deisenroth et al., 2013). A model based", "type": "text" } ], "index": 37 }, { "bbox": [ 105, 534, 506, 547 ], "spans": [ { "bbox": [ 105, 534, 506, 547 ], "score": 1.0, "content": "method first learns a dynamics model, which can then be used to plan actions (Nagabandi et al., 2019;", "type": "text" } ], "index": 38 }, { "bbox": [ 106, 546, 505, 558 ], "spans": [ { "bbox": [ 106, 546, 505, 558 ], "score": 1.0, "content": "Hafner et al., 2018; Chua et al., 2018; Nagabandi et al., 2017; Becker-Ehmck et al., 2020), or be used", "type": "text" } ], "index": 39 }, { "bbox": [ 105, 558, 505, 570 ], "spans": [ { "bbox": [ 105, 558, 505, 570 ], "score": 1.0, "content": "as a simulator to learn a policy network as in Dreamer (Hafner et al., 2019; 2020). One approach to", "type": "text" } ], "index": 40 }, { "bbox": [ 105, 569, 506, 582 ], "spans": [ { "bbox": [ 105, 569, 506, 582 ], "score": 1.0, "content": "tackle the high visual complexity of the world is to learn an action conditioned video prediction model", "type": "text" } ], "index": 41 }, { "bbox": [ 106, 582, 505, 593 ], "spans": [ { "bbox": [ 106, 582, 505, 593 ], "score": 1.0, "content": "(Finn and Levine, 2017; Ebert et al., 2018; Finn et al., 2016). One downside of this approach is the", "type": "text" } ], "index": 42 }, { "bbox": [ 105, 592, 506, 605 ], "spans": [ { "bbox": [ 105, 592, 506, 605 ], "score": 1.0, "content": "need to directly predict high dimensional observations, which can be computationally inefficient and", "type": "text" } ], "index": 43 }, { "bbox": [ 105, 605, 506, 617 ], "spans": [ { "bbox": [ 105, 605, 506, 617 ], "score": 1.0, "content": "easily drift. Dreamer learns a dynamics model in a latent space, allowing more efficient rollouts and", "type": "text" } ], "index": 44 }, { "bbox": [ 106, 617, 505, 628 ], "spans": [ { "bbox": [ 106, 617, 505, 628 ], "score": 1.0, "content": "avoids relying on high quality visual reconstructions for the policy. Another line of work proposes", "type": "text" } ], "index": 45 }, { "bbox": [ 105, 628, 505, 640 ], "spans": [ { "bbox": [ 105, 628, 505, 640 ], "score": 1.0, "content": "to learn latent dynamics models without having to reconstruct inputs (Deng et al., 2021; Okada and", "type": "text" } ], "index": 46 }, { "bbox": [ 106, 640, 505, 651 ], "spans": [ { "bbox": [ 106, 640, 505, 651 ], "score": 1.0, "content": "Taniguchi, 2021; Bharadhwaj et al., 2022; Paster et al., 2021), which we see as a promising approach", "type": "text" } ], "index": 47 }, { "bbox": [ 106, 651, 353, 663 ], "spans": [ { "bbox": [ 106, 651, 353, 663 ], "score": 1.0, "content": "for supporting moving view points in cluttered environments.", "type": "text" } ], "index": 48 } ], "index": 42 } ], "page_idx": 13, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 300, 741, 311, 750 ], "lines": [ { "bbox": [ 299, 740, 313, 754 ], "spans": [ { "bbox": [ 299, 740, 313, 754 ], "score": 1.0, "content": "14", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "title", "bbox": [ 107, 71, 245, 85 ], "lines": [ { "bbox": [ 106, 70, 247, 87 ], "spans": [ { "bbox": [ 106, 70, 247, 87 ], "score": 1.0, "content": "C Detailed Related Work", "type": "text" } ], "index": 0 } ], "index": 0 }, { "type": "text", "bbox": [ 106, 93, 505, 315 ], "lines": [ { "bbox": [ 105, 92, 505, 107 ], "spans": [ { "bbox": [ 105, 92, 505, 107 ], "score": 1.0, "content": "RL for locomotion A common approach is to train RL agents from large amounts of simulated data", "type": "text" } ], "index": 1 }, { "bbox": [ 105, 105, 506, 117 ], "spans": [ { "bbox": [ 105, 105, 506, 117 ], "score": 1.0, "content": "under domain and dynamics randomization (Peng et al., 2018; Lee et al., 2020; Rudin et al., 2021;", "type": "text" } ], "index": 2 }, { "bbox": [ 106, 117, 506, 129 ], "spans": [ { "bbox": [ 106, 117, 506, 129 ], "score": 1.0, "content": "Siekmann et al., 2021; Escontrela et al., 2022; Miki et al., 2022; Kumar et al., 2021; Rusu et al., 2016;", "type": "text" } ], "index": 3 }, { "bbox": [ 106, 129, 506, 141 ], "spans": [ { "bbox": [ 106, 129, 506, 141 ], "score": 1.0, "content": "Bohez et al., 2022), then freezing the learned policy and deploying it to the real world. Smith et al.", "type": "text" } ], "index": 4 }, { "bbox": [ 105, 139, 506, 154 ], "spans": [ { "bbox": [ 105, 139, 506, 154 ], "score": 1.0, "content": "(2021) explored pre-training policies in simulation and fine-tuning them with real world data. Yang", "type": "text" } ], "index": 5 }, { "bbox": [ 105, 152, 505, 164 ], "spans": [ { "bbox": [ 105, 152, 505, 164 ], "score": 1.0, "content": "et al. (2019) investigate learning a dynamics model using a multi-step loss and using model predictive", "type": "text" } ], "index": 6 }, { "bbox": [ 106, 164, 505, 176 ], "spans": [ { "bbox": [ 106, 164, 505, 176 ], "score": 1.0, "content": "control to accomplish a specified task. Yang et al. (2022) train locomotion policies in the real world", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 176, 505, 187 ], "spans": [ { "bbox": [ 106, 176, 505, 187 ], "score": 1.0, "content": "but require a recovery controller trained in simulation to avoid unsafe states. In contrast, we use no", "type": "text" } ], "index": 8 }, { "bbox": [ 106, 187, 505, 200 ], "spans": [ { "bbox": [ 106, 187, 505, 200 ], "score": 1.0, "content": "simulators or reset policies and directly train on the physical robot. While prior work in locomotion", "type": "text" } ], "index": 9 }, { "bbox": [ 106, 199, 505, 211 ], "spans": [ { "bbox": [ 106, 199, 505, 211 ], "score": 1.0, "content": "has successfully learned walking behaviors in the real world, these works generally required several", "type": "text" } ], "index": 10 }, { "bbox": [ 106, 211, 505, 223 ], "spans": [ { "bbox": [ 106, 211, 505, 223 ], "score": 1.0, "content": "domain-specific assumptions or pretraining with simulators. Ha et al. (2020) achieved successful", "type": "text" } ], "index": 11 }, { "bbox": [ 106, 223, 505, 234 ], "spans": [ { "bbox": [ 106, 223, 505, 234 ], "score": 1.0, "content": "walking on the Minitaur robot in 90 minutes. However, the authors manually programmed a reset", "type": "text" } ], "index": 12 }, { "bbox": [ 106, 234, 506, 246 ], "spans": [ { "bbox": [ 106, 234, 506, 246 ], "score": 1.0, "content": "policy that was used when the robot fell on its back, while in our work the robot must learn to flip over", "type": "text" } ], "index": 13 }, { "bbox": [ 106, 245, 505, 258 ], "spans": [ { "bbox": [ 106, 245, 505, 258 ], "score": 1.0, "content": "and stand up. Additionally, the Minitaur robot is simpler than the A1 as it has 8 actuators compared", "type": "text" } ], "index": 14 }, { "bbox": [ 105, 257, 505, 269 ], "spans": [ { "bbox": [ 105, 257, 505, 269 ], "score": 1.0, "content": "to 12 on the A1. In recent work, Smith et al. (2022) utilize a high update-to-data ratio (UTD) RL", "type": "text" } ], "index": 15 }, { "bbox": [ 106, 269, 505, 282 ], "spans": [ { "bbox": [ 106, 269, 505, 282 ], "score": 1.0, "content": "algorithm to learn walking from 20 minutes of robot training data. However, their work assumes", "type": "text" } ], "index": 16 }, { "bbox": [ 106, 280, 505, 293 ], "spans": [ { "bbox": [ 106, 280, 505, 293 ], "score": 1.0, "content": "the availability of a reset policy and therefore comprises of a different learning problem compared", "type": "text" } ], "index": 17 }, { "bbox": [ 106, 293, 505, 305 ], "spans": [ { "bbox": [ 106, 293, 505, 305 ], "score": 1.0, "content": "to the problem we tackle of learning to flip over and walk from scratch. Additionally, we show our", "type": "text" } ], "index": 18 }, { "bbox": [ 106, 304, 439, 316 ], "spans": [ { "bbox": [ 106, 304, 439, 316 ], "score": 1.0, "content": "approach generalizes to environments with image observations and sparse rewards.", "type": "text" } ], "index": 19 } ], "index": 10, "bbox_fs": [ 105, 92, 506, 316 ] }, { "type": "text", "bbox": [ 106, 320, 505, 506 ], "lines": [ { "bbox": [ 106, 320, 505, 333 ], "spans": [ { "bbox": [ 106, 320, 505, 333 ], "score": 1.0, "content": "RL for manipulation Learning promises to enable robot manipulators to solve contact rich tasks", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 331, 505, 344 ], "spans": [ { "bbox": [ 105, 331, 505, 344 ], "score": 1.0, "content": "in open real world environments. One class of methods attempts to scale up experience collection", "type": "text" } ], "index": 21 }, { "bbox": [ 105, 343, 506, 356 ], "spans": [ { "bbox": [ 105, 343, 506, 356 ], "score": 1.0, "content": "through a fleet of robots (Kalashnikov et al., 2018; 2021; Ebert et al., 2021; Dasari et al., 2019;", "type": "text" } ], "index": 22 }, { "bbox": [ 105, 354, 505, 368 ], "spans": [ { "bbox": [ 105, 354, 505, 368 ], "score": 1.0, "content": "Levine et al., 2018). In contrast, we only leverage one robot, but parallelize an agent’s experience", "type": "text" } ], "index": 23 }, { "bbox": [ 106, 367, 505, 380 ], "spans": [ { "bbox": [ 106, 367, 505, 380 ], "score": 1.0, "content": "by using the learned world model. Another common approach is to leverage expert demonstrations", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 378, 506, 391 ], "spans": [ { "bbox": [ 105, 378, 506, 391 ], "score": 1.0, "content": "or other task priors (Pinto and Gupta, 2015; Ha and Song, 2021; Xie et al., 2019; Schoettler et al.,", "type": "text" } ], "index": 25 }, { "bbox": [ 106, 390, 505, 402 ], "spans": [ { "bbox": [ 106, 390, 505, 402 ], "score": 1.0, "content": "2019; Sivakumar et al., 2022). James and Davison (2021); James et al. (2021) leverages a few", "type": "text" } ], "index": 26 }, { "bbox": [ 105, 401, 506, 415 ], "spans": [ { "bbox": [ 105, 401, 506, 415 ], "score": 1.0, "content": "demonstrations to increase the sample-efficiency of Q learning by focusing the learner on important", "type": "text" } ], "index": 27 }, { "bbox": [ 105, 414, 506, 426 ], "spans": [ { "bbox": [ 105, 414, 506, 426 ], "score": 1.0, "content": "aspects of the scene. Other approaches, as in locomotion, first utilize a simulator, then transfer to", "type": "text" } ], "index": 28 }, { "bbox": [ 106, 425, 506, 437 ], "spans": [ { "bbox": [ 106, 425, 506, 437 ], "score": 1.0, "content": "the real world (Tzeng et al., 2015; Akkaya et al., 2019; OpenAI et al., 2018; Irpan et al., 2020). Our", "type": "text" } ], "index": 29 }, { "bbox": [ 106, 437, 506, 449 ], "spans": [ { "bbox": [ 106, 437, 506, 449 ], "score": 1.0, "content": "work focuses on single-robot environments where the agent must learn through a small amount of", "type": "text" } ], "index": 30 }, { "bbox": [ 105, 448, 505, 461 ], "spans": [ { "bbox": [ 105, 448, 505, 461 ], "score": 1.0, "content": "interaction with the world. Meanwhile, the Google Arm Farm line of work by Levine et al. leverages", "type": "text" } ], "index": 31 }, { "bbox": [ 106, 460, 505, 471 ], "spans": [ { "bbox": [ 106, 461, 126, 471 ], "score": 1.0, "content": "over", "type": "text" }, { "bbox": [ 126, 460, 147, 471 ], "score": 0.49, "content": "5 8 0 \\mathrm { k }", "type": "inline_equation" }, { "bbox": [ 147, 461, 505, 471 ], "score": 1.0, "content": "grasp attempts gathered by 7 robots and collected over 4 months. We believe that a method", "type": "text" } ], "index": 32 }, { "bbox": [ 106, 472, 505, 484 ], "spans": [ { "bbox": [ 106, 472, 505, 484 ], "score": 1.0, "content": "such as Dreamer could benefit greatly from this scale of training data, however it is unlikely that", "type": "text" } ], "index": 33 }, { "bbox": [ 106, 484, 504, 495 ], "spans": [ { "bbox": [ 106, 484, 504, 495 ], "score": 1.0, "content": "works such as MT-OPT/QT-OPT Kalashnikov et al. (2018; 2021) would work well in the low data", "type": "text" } ], "index": 34 }, { "bbox": [ 106, 496, 231, 507 ], "spans": [ { "bbox": [ 106, 496, 231, 507 ], "score": 1.0, "content": "regime that Dreamer excels in.", "type": "text" } ], "index": 35 } ], "index": 27.5, "bbox_fs": [ 105, 320, 506, 507 ] }, { "type": "text", "bbox": [ 106, 511, 506, 663 ], "lines": [ { "bbox": [ 106, 510, 505, 523 ], "spans": [ { "bbox": [ 106, 510, 505, 523 ], "score": 1.0, "content": "Model-based RL Due to its higher sample-efficiency over model-free methods, model-based RL", "type": "text" } ], "index": 36 }, { "bbox": [ 105, 523, 505, 534 ], "spans": [ { "bbox": [ 105, 523, 505, 534 ], "score": 1.0, "content": "is a promising approach to learning on real world robots (Deisenroth et al., 2013). A model based", "type": "text" } ], "index": 37 }, { "bbox": [ 105, 534, 506, 547 ], "spans": [ { "bbox": [ 105, 534, 506, 547 ], "score": 1.0, "content": "method first learns a dynamics model, which can then be used to plan actions (Nagabandi et al., 2019;", "type": "text" } ], "index": 38 }, { "bbox": [ 106, 546, 505, 558 ], "spans": [ { "bbox": [ 106, 546, 505, 558 ], "score": 1.0, "content": "Hafner et al., 2018; Chua et al., 2018; Nagabandi et al., 2017; Becker-Ehmck et al., 2020), or be used", "type": "text" } ], "index": 39 }, { "bbox": [ 105, 558, 505, 570 ], "spans": [ { "bbox": [ 105, 558, 505, 570 ], "score": 1.0, "content": "as a simulator to learn a policy network as in Dreamer (Hafner et al., 2019; 2020). One approach to", "type": "text" } ], "index": 40 }, { "bbox": [ 105, 569, 506, 582 ], "spans": [ { "bbox": [ 105, 569, 506, 582 ], "score": 1.0, "content": "tackle the high visual complexity of the world is to learn an action conditioned video prediction model", "type": "text" } ], "index": 41 }, { "bbox": [ 106, 582, 505, 593 ], "spans": [ { "bbox": [ 106, 582, 505, 593 ], "score": 1.0, "content": "(Finn and Levine, 2017; Ebert et al., 2018; Finn et al., 2016). One downside of this approach is the", "type": "text" } ], "index": 42 }, { "bbox": [ 105, 592, 506, 605 ], "spans": [ { "bbox": [ 105, 592, 506, 605 ], "score": 1.0, "content": "need to directly predict high dimensional observations, which can be computationally inefficient and", "type": "text" } ], "index": 43 }, { "bbox": [ 105, 605, 506, 617 ], "spans": [ { "bbox": [ 105, 605, 506, 617 ], "score": 1.0, "content": "easily drift. Dreamer learns a dynamics model in a latent space, allowing more efficient rollouts and", "type": "text" } ], "index": 44 }, { "bbox": [ 106, 617, 505, 628 ], "spans": [ { "bbox": [ 106, 617, 505, 628 ], "score": 1.0, "content": "avoids relying on high quality visual reconstructions for the policy. Another line of work proposes", "type": "text" } ], "index": 45 }, { "bbox": [ 105, 628, 505, 640 ], "spans": [ { "bbox": [ 105, 628, 505, 640 ], "score": 1.0, "content": "to learn latent dynamics models without having to reconstruct inputs (Deng et al., 2021; Okada and", "type": "text" } ], "index": 46 }, { "bbox": [ 106, 640, 505, 651 ], "spans": [ { "bbox": [ 106, 640, 505, 651 ], "score": 1.0, "content": "Taniguchi, 2021; Bharadhwaj et al., 2022; Paster et al., 2021), which we see as a promising approach", "type": "text" } ], "index": 47 }, { "bbox": [ 106, 651, 353, 663 ], "spans": [ { "bbox": [ 106, 651, 353, 663 ], "score": 1.0, "content": "for supporting moving view points in cluttered environments.", "type": "text" } ], "index": 48 } ], "index": 42, "bbox_fs": [ 105, 510, 506, 663 ] } ] }, { "preproc_blocks": [ { "type": "title", "bbox": [ 106, 71, 220, 86 ], "lines": [ { "bbox": [ 104, 69, 221, 90 ], "spans": [ { "bbox": [ 104, 69, 221, 90 ], "score": 1.0, "content": "D Hyperparameters", "type": "text" } ], "index": 0 } ], "index": 0 }, { "type": "table", "bbox": [ 124, 102, 483, 399 ], "blocks": [ { "type": "table_body", "bbox": [ 124, 102, 483, 399 ], "group_id": 0, "lines": [ { "bbox": [ 124, 102, 483, 399 ], "spans": [ { "bbox": [ 124, 102, 483, 399 ], "score": 0.985, "html": "
NameSymbolValue
General
Replay capacity (FIFO)Start learningBatch sizeBatch lengthMLP sizeActivationBT10610432324× 512LayerNorm+ELU
World Model
RSSM sizeNumber of latentsClasses per latentKL balancing51232320.8
Actor Critic
Imagination horizonDiscountReturn lambdaTarget update intervalH?150.950.95100
All Optimizers
Gradient clippingLearning rateAdam epsilonE10010-410-6
", "type": "table", "image_path": "476b14497f73d983953dbe7a34cff454d210243e28b1aa97dafb54ae6a92e42f.jpg" } ] } ], "index": 2, "virtual_lines": [ { "bbox": [ 124, 102, 483, 201.0 ], "spans": [], "index": 1 }, { "bbox": [ 124, 201.0, 483, 300.0 ], "spans": [], "index": 2 }, { "bbox": [ 124, 300.0, 483, 399.0 ], "spans": [], "index": 3 } ] } ], "index": 2 }, { "type": "title", "bbox": [ 106, 418, 311, 433 ], "lines": [ { "bbox": [ 105, 418, 311, 433 ], "spans": [ { "bbox": [ 105, 418, 311, 433 ], "score": 1.0, "content": "E Environment and Hardware Details", "type": "text" } ], "index": 4 } ], "index": 4 }, { "type": "text", "bbox": [ 108, 440, 504, 464 ], "lines": [ { "bbox": [ 106, 439, 506, 453 ], "spans": [ { "bbox": [ 106, 439, 506, 453 ], "score": 1.0, "content": "For every robot setup that involved vision (UR5, XArm, Sphero), we used a RealSense D435 camera", "type": "text" } ], "index": 5 }, { "bbox": [ 106, 452, 330, 465 ], "spans": [ { "bbox": [ 106, 452, 330, 465 ], "score": 1.0, "content": "positioned to offer a fixed 3rd person view of the scene.", "type": "text" } ], "index": 6 } ], "index": 5.5 }, { "type": "text", "bbox": [ 106, 468, 505, 562 ], "lines": [ { "bbox": [ 105, 466, 505, 482 ], "spans": [ { "bbox": [ 105, 466, 505, 482 ], "score": 1.0, "content": "A1 We used the A1 quadrupedal robot by Unitree. The RL policy outputs actions at a frequency", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 480, 505, 493 ], "spans": [ { "bbox": [ 106, 480, 505, 493 ], "score": 1.0, "content": "that is too high for the PD controller to track, which we overcome by lowpass filtering the action", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 491, 506, 505 ], "spans": [ { "bbox": [ 105, 491, 506, 505 ], "score": 1.0, "content": "sequence. The joint range allows the legs to self-collide with the body, which can be damaging to", "type": "text" } ], "index": 9 }, { "bbox": [ 105, 503, 506, 516 ], "spans": [ { "bbox": [ 105, 503, 506, 516 ], "score": 1.0, "content": "the motors and increase battery consumption. We limited the joint range to decrease self-collisions.", "type": "text" } ], "index": 10 }, { "bbox": [ 106, 514, 505, 527 ], "spans": [ { "bbox": [ 106, 514, 505, 527 ], "score": 1.0, "content": "Finally, the EKF velocity estimator relies on foot-ground contact events to prevent significant drift in", "type": "text" } ], "index": 11 }, { "bbox": [ 106, 527, 505, 539 ], "spans": [ { "bbox": [ 106, 527, 505, 539 ], "score": 1.0, "content": "the estimates, so we employ a curriculum reward function that does not reward the robot for forward", "type": "text" } ], "index": 12 }, { "bbox": [ 106, 538, 506, 551 ], "spans": [ { "bbox": [ 106, 538, 506, 551 ], "score": 1.0, "content": "velocity until the robot is upright with extended legs. We also designed a shell which we 3D printed", "type": "text" } ], "index": 13 }, { "bbox": [ 105, 550, 451, 563 ], "spans": [ { "bbox": [ 105, 550, 451, 563 ], "score": 1.0, "content": "in order to better protect the cables and hardware and provide a smoother rolling over.", "type": "text" } ], "index": 14 } ], "index": 10.5 }, { "type": "text", "bbox": [ 106, 565, 505, 671 ], "lines": [ { "bbox": [ 106, 565, 505, 578 ], "spans": [ { "bbox": [ 106, 565, 505, 578 ], "score": 1.0, "content": "XArm & UR5 We utilized slanted bins to prevent objects from leaving the work area during the", "type": "text" } ], "index": 15 }, { "bbox": [ 106, 578, 506, 590 ], "spans": [ { "bbox": [ 106, 578, 506, 590 ], "score": 1.0, "content": "long-running pick and place experiments on the UR5, which is common practice Levine et al. (2018);", "type": "text" } ], "index": 16 }, { "bbox": [ 105, 588, 506, 602 ], "spans": [ { "bbox": [ 105, 588, 506, 602 ], "score": 1.0, "content": "Kalashnikov et al. (2018). We also added a partition behind the setup to keep the background constant.", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 600, 505, 615 ], "spans": [ { "bbox": [ 105, 600, 505, 615 ], "score": 1.0, "content": "It would be interesting to study how a gripper-mounted camera would impact policy performance", "type": "text" } ], "index": 18 }, { "bbox": [ 106, 613, 505, 625 ], "spans": [ { "bbox": [ 106, 613, 505, 625 ], "score": 1.0, "content": "Hsu et al. (2022), however we report strong results without this design choice. For the XArm we", "type": "text" } ], "index": 19 }, { "bbox": [ 105, 624, 505, 637 ], "spans": [ { "bbox": [ 105, 624, 505, 637 ], "score": 1.0, "content": "use the uFactory xArm Gripper. For the UR5, we use the Robotiq 2F-85 parallel jaw gripper. The", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 635, 505, 649 ], "spans": [ { "bbox": [ 105, 635, 505, 649 ], "score": 1.0, "content": "bin locations are predetermined and provided as part of the environment to prevent the robot from", "type": "text" } ], "index": 21 }, { "bbox": [ 106, 648, 505, 660 ], "spans": [ { "bbox": [ 106, 648, 316, 660 ], "score": 1.0, "content": "colliding with the bin. In addition, movement in the", "type": "text" }, { "bbox": [ 317, 648, 325, 658 ], "score": 0.31, "content": "\\textsf { Z }", "type": "inline_equation" }, { "bbox": [ 325, 648, 505, 660 ], "score": 1.0, "content": "axis is only enabled while holding an object", "type": "text" } ], "index": 22 }, { "bbox": [ 106, 660, 354, 672 ], "spans": [ { "bbox": [ 106, 660, 354, 672 ], "score": 1.0, "content": "and the gripper automatically opens once above the other bin.", "type": "text" } ], "index": 23 } ], "index": 19 }, { "type": "text", "bbox": [ 107, 675, 504, 722 ], "lines": [ { "bbox": [ 106, 675, 505, 688 ], "spans": [ { "bbox": [ 106, 676, 285, 688 ], "score": 1.0, "content": "Sphero We used a rectangular enclosure of", "type": "text" }, { "bbox": [ 285, 675, 335, 686 ], "score": 0.91, "content": "0 . 8 \\times 0 . 8 \\mathrm { { m ^ { 2 } } }", "type": "inline_equation" }, { "bbox": [ 336, 676, 505, 688 ], "score": 1.0, "content": "to keep the sphero robot within the camera", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 687, 506, 700 ], "spans": [ { "bbox": [ 105, 687, 506, 700 ], "score": 1.0, "content": "view. We used a simple OpenCV script to estimate the L2 distance between the Sphero and the", "type": "text" } ], "index": 25 }, { "bbox": [ 105, 699, 506, 712 ], "spans": [ { "bbox": [ 105, 699, 506, 712 ], "score": 1.0, "content": "goal position to provide a dense reward for policy optimization. This positional information was not", "type": "text" } ], "index": 26 }, { "bbox": [ 105, 710, 408, 725 ], "spans": [ { "bbox": [ 105, 710, 408, 725 ], "score": 1.0, "content": "provided to the agent, which it had to learn from the raw top-down images.", "type": "text" } ], "index": 27 } ], "index": 25.5 } ], "page_idx": 14, "page_size": [ 612, 792 ], "discarded_blocks": [ { "type": "discarded", "bbox": [ 300, 741, 311, 750 ], "lines": [ { "bbox": [ 299, 740, 312, 754 ], "spans": [ { "bbox": [ 299, 740, 312, 754 ], "score": 1.0, "content": "15", "type": "text" } ] } ] } ], "para_blocks": [ { "type": "title", "bbox": [ 106, 71, 220, 86 ], "lines": [ { "bbox": [ 104, 69, 221, 90 ], "spans": [ { "bbox": [ 104, 69, 221, 90 ], "score": 1.0, "content": "D Hyperparameters", "type": "text" } ], "index": 0 } ], "index": 0 }, { "type": "table", "bbox": [ 124, 102, 483, 399 ], "blocks": [ { "type": "table_body", "bbox": [ 124, 102, 483, 399 ], "group_id": 0, "lines": [ { "bbox": [ 124, 102, 483, 399 ], "spans": [ { "bbox": [ 124, 102, 483, 399 ], "score": 0.985, "html": "
NameSymbolValue
General
Replay capacity (FIFO)Start learningBatch sizeBatch lengthMLP sizeActivationBT10610432324× 512LayerNorm+ELU
World Model
RSSM sizeNumber of latentsClasses per latentKL balancing51232320.8
Actor Critic
Imagination horizonDiscountReturn lambdaTarget update intervalH?150.950.95100
All Optimizers
Gradient clippingLearning rateAdam epsilonE10010-410-6
", "type": "table", "image_path": "476b14497f73d983953dbe7a34cff454d210243e28b1aa97dafb54ae6a92e42f.jpg" } ] } ], "index": 2, "virtual_lines": [ { "bbox": [ 124, 102, 483, 201.0 ], "spans": [], "index": 1 }, { "bbox": [ 124, 201.0, 483, 300.0 ], "spans": [], "index": 2 }, { "bbox": [ 124, 300.0, 483, 399.0 ], "spans": [], "index": 3 } ] } ], "index": 2 }, { "type": "title", "bbox": [ 106, 418, 311, 433 ], "lines": [ { "bbox": [ 105, 418, 311, 433 ], "spans": [ { "bbox": [ 105, 418, 311, 433 ], "score": 1.0, "content": "E Environment and Hardware Details", "type": "text" } ], "index": 4 } ], "index": 4 }, { "type": "text", "bbox": [ 108, 440, 504, 464 ], "lines": [ { "bbox": [ 106, 439, 506, 453 ], "spans": [ { "bbox": [ 106, 439, 506, 453 ], "score": 1.0, "content": "For every robot setup that involved vision (UR5, XArm, Sphero), we used a RealSense D435 camera", "type": "text" } ], "index": 5 }, { "bbox": [ 106, 452, 330, 465 ], "spans": [ { "bbox": [ 106, 452, 330, 465 ], "score": 1.0, "content": "positioned to offer a fixed 3rd person view of the scene.", "type": "text" } ], "index": 6 } ], "index": 5.5, "bbox_fs": [ 106, 439, 506, 465 ] }, { "type": "text", "bbox": [ 106, 468, 505, 562 ], "lines": [ { "bbox": [ 105, 466, 505, 482 ], "spans": [ { "bbox": [ 105, 466, 505, 482 ], "score": 1.0, "content": "A1 We used the A1 quadrupedal robot by Unitree. The RL policy outputs actions at a frequency", "type": "text" } ], "index": 7 }, { "bbox": [ 106, 480, 505, 493 ], "spans": [ { "bbox": [ 106, 480, 505, 493 ], "score": 1.0, "content": "that is too high for the PD controller to track, which we overcome by lowpass filtering the action", "type": "text" } ], "index": 8 }, { "bbox": [ 105, 491, 506, 505 ], "spans": [ { "bbox": [ 105, 491, 506, 505 ], "score": 1.0, "content": "sequence. The joint range allows the legs to self-collide with the body, which can be damaging to", "type": "text" } ], "index": 9 }, { "bbox": [ 105, 503, 506, 516 ], "spans": [ { "bbox": [ 105, 503, 506, 516 ], "score": 1.0, "content": "the motors and increase battery consumption. We limited the joint range to decrease self-collisions.", "type": "text" } ], "index": 10 }, { "bbox": [ 106, 514, 505, 527 ], "spans": [ { "bbox": [ 106, 514, 505, 527 ], "score": 1.0, "content": "Finally, the EKF velocity estimator relies on foot-ground contact events to prevent significant drift in", "type": "text" } ], "index": 11 }, { "bbox": [ 106, 527, 505, 539 ], "spans": [ { "bbox": [ 106, 527, 505, 539 ], "score": 1.0, "content": "the estimates, so we employ a curriculum reward function that does not reward the robot for forward", "type": "text" } ], "index": 12 }, { "bbox": [ 106, 538, 506, 551 ], "spans": [ { "bbox": [ 106, 538, 506, 551 ], "score": 1.0, "content": "velocity until the robot is upright with extended legs. We also designed a shell which we 3D printed", "type": "text" } ], "index": 13 }, { "bbox": [ 105, 550, 451, 563 ], "spans": [ { "bbox": [ 105, 550, 451, 563 ], "score": 1.0, "content": "in order to better protect the cables and hardware and provide a smoother rolling over.", "type": "text" } ], "index": 14 } ], "index": 10.5, "bbox_fs": [ 105, 466, 506, 563 ] }, { "type": "text", "bbox": [ 106, 565, 505, 671 ], "lines": [ { "bbox": [ 106, 565, 505, 578 ], "spans": [ { "bbox": [ 106, 565, 505, 578 ], "score": 1.0, "content": "XArm & UR5 We utilized slanted bins to prevent objects from leaving the work area during the", "type": "text" } ], "index": 15 }, { "bbox": [ 106, 578, 506, 590 ], "spans": [ { "bbox": [ 106, 578, 506, 590 ], "score": 1.0, "content": "long-running pick and place experiments on the UR5, which is common practice Levine et al. (2018);", "type": "text" } ], "index": 16 }, { "bbox": [ 105, 588, 506, 602 ], "spans": [ { "bbox": [ 105, 588, 506, 602 ], "score": 1.0, "content": "Kalashnikov et al. (2018). We also added a partition behind the setup to keep the background constant.", "type": "text" } ], "index": 17 }, { "bbox": [ 105, 600, 505, 615 ], "spans": [ { "bbox": [ 105, 600, 505, 615 ], "score": 1.0, "content": "It would be interesting to study how a gripper-mounted camera would impact policy performance", "type": "text" } ], "index": 18 }, { "bbox": [ 106, 613, 505, 625 ], "spans": [ { "bbox": [ 106, 613, 505, 625 ], "score": 1.0, "content": "Hsu et al. (2022), however we report strong results without this design choice. For the XArm we", "type": "text" } ], "index": 19 }, { "bbox": [ 105, 624, 505, 637 ], "spans": [ { "bbox": [ 105, 624, 505, 637 ], "score": 1.0, "content": "use the uFactory xArm Gripper. For the UR5, we use the Robotiq 2F-85 parallel jaw gripper. The", "type": "text" } ], "index": 20 }, { "bbox": [ 105, 635, 505, 649 ], "spans": [ { "bbox": [ 105, 635, 505, 649 ], "score": 1.0, "content": "bin locations are predetermined and provided as part of the environment to prevent the robot from", "type": "text" } ], "index": 21 }, { "bbox": [ 106, 648, 505, 660 ], "spans": [ { "bbox": [ 106, 648, 316, 660 ], "score": 1.0, "content": "colliding with the bin. In addition, movement in the", "type": "text" }, { "bbox": [ 317, 648, 325, 658 ], "score": 0.31, "content": "\\textsf { Z }", "type": "inline_equation" }, { "bbox": [ 325, 648, 505, 660 ], "score": 1.0, "content": "axis is only enabled while holding an object", "type": "text" } ], "index": 22 }, { "bbox": [ 106, 660, 354, 672 ], "spans": [ { "bbox": [ 106, 660, 354, 672 ], "score": 1.0, "content": "and the gripper automatically opens once above the other bin.", "type": "text" } ], "index": 23 } ], "index": 19, "bbox_fs": [ 105, 565, 506, 672 ] }, { "type": "text", "bbox": [ 107, 675, 504, 722 ], "lines": [ { "bbox": [ 106, 675, 505, 688 ], "spans": [ { "bbox": [ 106, 676, 285, 688 ], "score": 1.0, "content": "Sphero We used a rectangular enclosure of", "type": "text" }, { "bbox": [ 285, 675, 335, 686 ], "score": 0.91, "content": "0 . 8 \\times 0 . 8 \\mathrm { { m ^ { 2 } } }", "type": "inline_equation" }, { "bbox": [ 336, 676, 505, 688 ], "score": 1.0, "content": "to keep the sphero robot within the camera", "type": "text" } ], "index": 24 }, { "bbox": [ 105, 687, 506, 700 ], "spans": [ { "bbox": [ 105, 687, 506, 700 ], "score": 1.0, "content": "view. We used a simple OpenCV script to estimate the L2 distance between the Sphero and the", "type": "text" } ], "index": 25 }, { "bbox": [ 105, 699, 506, 712 ], "spans": [ { "bbox": [ 105, 699, 506, 712 ], "score": 1.0, "content": "goal position to provide a dense reward for policy optimization. This positional information was not", "type": "text" } ], "index": 26 }, { "bbox": [ 105, 710, 408, 725 ], "spans": [ { "bbox": [ 105, 710, 408, 725 ], "score": 1.0, "content": "provided to the agent, which it had to learn from the raw top-down images.", "type": "text" } ], "index": 27 } ], "index": 25.5, "bbox_fs": [ 105, 675, 506, 725 ] } ] } ], "_backend": "pipeline", "_version_name": "2.2.2" }