diff --git a/.gitattributes b/.gitattributes index bed0738c7eeb449bca98b5d2f33c89a1ee56349a..aa0842af27b361dd0047608239cbaf6eb21c8097 100644 --- a/.gitattributes +++ b/.gitattributes @@ -58,3 +58,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text # Video files - compressed *.mp4 filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text +parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_span.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..37b49ed3dce075cb9de12a55b98c7dd96414da4c --- /dev/null +++ b/README.md @@ -0,0 +1,104 @@ +--- +license: other +pretty_name: RPC-Bench +task_categories: + - question-answering +language: + - en +tags: + - research-paper + - document-understanding + - multimodal + - benchmark + - llm + - vlm +--- + +
+ +# RPC-Bench: A Fine-grained Benchmark for Research Paper Comprehension + +
+ +

+ 🌐 Project Page β€’ + πŸ’» GitHub β€’ + πŸ“– Paper β€’ + πŸ€— Paper β€’ + 🧭 ModelScope +

+ +
+ +
+ +RPC-Bench is a fine-grained benchmark for research paper comprehension. It is built from review-rebuttal exchanges of high-quality academic papers and supports both text-only and visual evaluation through complementary paper representations. + +## Data Structure + +RPC-Bench is split into `train`, `dev`, and `test` subsets. Each subset is stored in the dataset structure and recorded in `manifest.jsonl`. + +`md/` contains Markdown files parsed from each paper by MinerU. These files provide the text input for LLM-oriented evaluation. + +`parse/` contains the full MinerU parsing outputs for each paper, including structured layout and content artifacts. + +`pdf/` contains the original paper PDFs. + +`vlm/` contains page images rendered from the PDFs with PyMuPDF at 200 DPI for VLM-oriented evaluation. + +```text +RPC-Bench/ +β”œβ”€β”€ README.md +β”œβ”€β”€ manifest.jsonl +β”œβ”€β”€ parse/ +β”‚ β”œβ”€β”€ train/ +β”‚ β”‚ └── / +β”‚ β”œβ”€β”€ dev/ +β”‚ β”‚ └── / +β”‚ └── test/ +β”‚ └── / +β”œβ”€β”€ md/ +β”‚ β”œβ”€β”€ train/ +β”‚ β”‚ └── / +β”‚ β”‚ └── .md +β”‚ β”œβ”€β”€ dev/ +β”‚ β”‚ └── / +β”‚ β”‚ └── .md +β”‚ └── test/ +β”‚ └── / +β”‚ └── .md +β”œβ”€β”€ pdf/ +β”‚ β”œβ”€β”€ train/ +β”‚ β”‚ └── .pdf +β”‚ β”œβ”€β”€ dev/ +β”‚ β”‚ └── .pdf +β”‚ └── test/ +β”‚ └── .pdf +└── vlm/ + β”œβ”€β”€ train/ + β”‚ └── / + β”œβ”€β”€ dev/ + β”‚ └── / + └── test/ + └── / +``` + +## Practical Uses + +RPC-Bench can be used to try paper-centric systems that require broader document understanding rather than local snippet matching. + +- Research paper comprehension: try models on full-paper understanding, including core concepts, methods, and experimental findings. +- Long-context evaluation: try whether longer context windows or long-context architectures improve document-level reasoning. +- Multimodal reasoning: try models that combine textual evidence with page-level figures, tables, and diagrams in the original PDF layout. +- RAG system diagnosis: try retrieval, chunking, and evidence-fusion strategies for paper-centric workflows beyond snippet-level retrieval accuracy. + +## Citation + +```bibtex +@article{chen2026rpc, + title={RPC-Bench: A Fine-grained Benchmark for Research Paper Comprehension}, + author={Chen, Yelin and Zhang, Fanjin and Sun, Suping and Pang, Yunhe and Wang, Yuanchun and Song, Jian and Li, Xiaoyan and Hou, Lei and Zhao, Shu and Tang, Jie and others}, + journal={arXiv preprint arXiv:2601.14289}, + year={2026} +} +``` diff --git a/dataset_card.md b/dataset_card.md new file mode 100644 index 0000000000000000000000000000000000000000..37b49ed3dce075cb9de12a55b98c7dd96414da4c --- /dev/null +++ b/dataset_card.md @@ -0,0 +1,104 @@ +--- +license: other +pretty_name: RPC-Bench +task_categories: + - question-answering +language: + - en +tags: + - research-paper + - document-understanding + - multimodal + - benchmark + - llm + - vlm +--- + +
+ +# RPC-Bench: A Fine-grained Benchmark for Research Paper Comprehension + +
+ +

+ 🌐 Project Page β€’ + πŸ’» GitHub β€’ + πŸ“– Paper β€’ + πŸ€— Paper β€’ + 🧭 ModelScope +

+ +
+ +
+ +RPC-Bench is a fine-grained benchmark for research paper comprehension. It is built from review-rebuttal exchanges of high-quality academic papers and supports both text-only and visual evaluation through complementary paper representations. + +## Data Structure + +RPC-Bench is split into `train`, `dev`, and `test` subsets. Each subset is stored in the dataset structure and recorded in `manifest.jsonl`. + +`md/` contains Markdown files parsed from each paper by MinerU. These files provide the text input for LLM-oriented evaluation. + +`parse/` contains the full MinerU parsing outputs for each paper, including structured layout and content artifacts. + +`pdf/` contains the original paper PDFs. + +`vlm/` contains page images rendered from the PDFs with PyMuPDF at 200 DPI for VLM-oriented evaluation. + +```text +RPC-Bench/ +β”œβ”€β”€ README.md +β”œβ”€β”€ manifest.jsonl +β”œβ”€β”€ parse/ +β”‚ β”œβ”€β”€ train/ +β”‚ β”‚ └── / +β”‚ β”œβ”€β”€ dev/ +β”‚ β”‚ └── / +β”‚ └── test/ +β”‚ └── / +β”œβ”€β”€ md/ +β”‚ β”œβ”€β”€ train/ +β”‚ β”‚ └── / +β”‚ β”‚ └── .md +β”‚ β”œβ”€β”€ dev/ +β”‚ β”‚ └── / +β”‚ β”‚ └── .md +β”‚ └── test/ +β”‚ └── / +β”‚ └── .md +β”œβ”€β”€ pdf/ +β”‚ β”œβ”€β”€ train/ +β”‚ β”‚ └── .pdf +β”‚ β”œβ”€β”€ dev/ +β”‚ β”‚ └── .pdf +β”‚ └── test/ +β”‚ └── .pdf +└── vlm/ + β”œβ”€β”€ train/ + β”‚ └── / + β”œβ”€β”€ dev/ + β”‚ └── / + └── test/ + └── / +``` + +## Practical Uses + +RPC-Bench can be used to try paper-centric systems that require broader document understanding rather than local snippet matching. + +- Research paper comprehension: try models on full-paper understanding, including core concepts, methods, and experimental findings. +- Long-context evaluation: try whether longer context windows or long-context architectures improve document-level reasoning. +- Multimodal reasoning: try models that combine textual evidence with page-level figures, tables, and diagrams in the original PDF layout. +- RAG system diagnosis: try retrieval, chunking, and evidence-fusion strategies for paper-centric workflows beyond snippet-level retrieval accuracy. + +## Citation + +```bibtex +@article{chen2026rpc, + title={RPC-Bench: A Fine-grained Benchmark for Research Paper Comprehension}, + author={Chen, Yelin and Zhang, Fanjin and Sun, Suping and Pang, Yunhe and Wang, Yuanchun and Song, Jian and Li, Xiaoyan and Hou, Lei and Zhao, Shu and Tang, Jie and others}, + journal={arXiv preprint arXiv:2601.14289}, + year={2026} +} +``` diff --git a/manifest.jsonl b/manifest.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2707cf4773544403722c90a241d1f63ef63eb162 --- /dev/null +++ b/manifest.jsonl @@ -0,0 +1,6 @@ +{"id": "wK2fDDJ5VcF", "split": "train", "source_json": "/workspace/yelin/PRC-Bench/benchmark/train_new.json", "parse": "parse/train/wK2fDDJ5VcF", "md": "md/train/wK2fDDJ5VcF/wK2fDDJ5VcF.md", "pdf": "pdf/train/wK2fDDJ5VcF.pdf", "pdf_source_split": "train", "vlm": "vlm/train/wK2fDDJ5VcF"} +{"id": "fy4ZBWxYbIo", "split": "train", "source_json": "/workspace/yelin/PRC-Bench/benchmark/train_new.json", "parse": "parse/train/fy4ZBWxYbIo", "md": "md/train/fy4ZBWxYbIo/fy4ZBWxYbIo.md", "pdf": "pdf/train/fy4ZBWxYbIo.pdf", "pdf_source_split": "train", "vlm": "vlm/train/fy4ZBWxYbIo"} +{"id": "a0SRWViFYW", "split": "dev", "source_json": "/workspace/yelin/PRC-Bench/benchmark/dev_new.json", "parse": "parse/dev/a0SRWViFYW", "md": "md/dev/a0SRWViFYW/a0SRWViFYW.md", "pdf": "pdf/dev/a0SRWViFYW.pdf", "pdf_source_split": "dev", "vlm": "vlm/dev/a0SRWViFYW"} +{"id": "3RBY8fKjHeu", "split": "dev", "source_json": "/workspace/yelin/PRC-Bench/benchmark/dev_new.json", "parse": "parse/dev/3RBY8fKjHeu", "md": "md/dev/3RBY8fKjHeu/3RBY8fKjHeu.md", "pdf": "pdf/dev/3RBY8fKjHeu.pdf", "pdf_source_split": "dev", "vlm": "vlm/dev/3RBY8fKjHeu"} +{"id": "rzQGHXNReU", "split": "test", "source_json": "/workspace/yelin/PRC-Bench/benchmark/test.json", "parse": "parse/test/rzQGHXNReU", "md": "md/test/rzQGHXNReU/rzQGHXNReU.md", "pdf": "pdf/test/rzQGHXNReU.pdf", "pdf_source_split": "test", "vlm": "vlm/test/rzQGHXNReU"} +{"id": "TrloAXEJ2B", "split": "test", "source_json": "/workspace/yelin/PRC-Bench/benchmark/test.json", "parse": "parse/test/TrloAXEJ2B", "md": "md/test/TrloAXEJ2B/TrloAXEJ2B.md", "pdf": "pdf/test/TrloAXEJ2B.pdf", "pdf_source_split": "test", "vlm": "vlm/test/TrloAXEJ2B"} diff --git a/md/dev/3RBY8fKjHeu/3RBY8fKjHeu.md b/md/dev/3RBY8fKjHeu/3RBY8fKjHeu.md new file mode 100644 index 0000000000000000000000000000000000000000..25b944c3974c3467c8e4f1b34efa8a0bb617ba58 --- /dev/null +++ b/md/dev/3RBY8fKjHeu/3RBY8fKjHeu.md @@ -0,0 +1,243 @@ +# DayDreamer: World Models for Physical Robot Learning + +# Philipp Wu\* + +Alejandro Escontrela\* Danijar Hafner\* + +Ken Goldberg Pieter Abbeel + +University of California, Berkeley \*Equal contribution + +Abstract: To solve tasks in complex environments, robots need to learn from experience. Deep reinforcement learning is a common approach to robot learning but requires a large amount of trial and error to learn, limiting its deployment in the physical world. As a consequence, many advances in robot learning rely on simulators. On the other hand, learning inside of simulators fails to capture the complexity of the real world, is prone to simulator inaccuracies, and the resulting behaviors do not adapt to changes in the world. The Dreamer algorithm has recently shown great promise for learning from small amounts of interaction by planning within a learned world model, outperforming pure reinforcement learning in video games. Learning a world model to predict the outcomes of potential actions enables planning in imagination, reducing the amount of trial and error needed in the real environment. However, it is unknown whether Dreamer can facilitate faster learning on physical robots. In this paper, we apply Dreamer to 4 robots to learn online and directly in the real world, without any simulators. Dreamer trains a quadruped robot to roll off its back, stand up, and walk from scratch and without resets in only 1 hour. We then push the robot and find that Dreamer adapts within 10 minutes to withstand perturbations or quickly roll over and stand back up. On two different robotic arms, Dreamer learns to pick and place objects from camera images and sparse rewards, approaching human-level teleoperation performance. On a wheeled robot, Dreamer learns to navigate to a goal position purely from camera images, automatically resolving ambiguity about the robot orientation. Using the same hyperparameters across all experiments, we find that Dreamer is capable of online learning in the real world, which establishes a strong baseline. We release our infrastructure for future applications of world models to robot learning. Videos are available on the project website: https://danijar.com/daydreamer + +![](images/17f2d11eee9937e70f62a1993623ebccd221887d067e71919c350fa57662f4d3.jpg) +Figure 1: To study the applicability of Dreamer for sample-efficient robot learning, we apply the algorithm to learn robot locomotion, manipulation, and navigation tasks from scratch in the real world on 4 robots, without simulators. The tasks evaluate a diverse range of challenges, including continuous and discrete actions, dense and sparse rewards, proprioceptive and camera inputs, as well as sensor fusion of multiple input modalities. Learning successfully using the same hyperparameters across all experiments, Dreamer establishes a strong baseline for real world robot learning. + +# 1 Introduction + +Teaching robots to solve complex tasks in the real world is a foundational problem of robotics research. Deep reinforcement learning (RL) offers a popular approach to robot learning that enables robots to improve their behavior over time through trial and error. However, current algorithms require too much interaction with the environment to learn successful behaviors. Recently, modern world models have shown great promise for data efficient learning in simulated domains and video games (Hafner et al., 2019; 2020). Learning world models from past experience enables robots to imagine the future outcomes of potential actions, reducing the amount of trial and error in the real environment needed to learn. + +While learning accurate world models can be challenging, they offer compelling properties for robot learning. By predicting future outcomes, world models allow for planning and behavior learning given only small amounts of real world interaction (Gal et al., 2016; Ebert et al., 2018). Moreover, world models summarize general dynamics knowledge about the environment that, once learned, could be reused for a wide range of downstream tasks (Sekar et al., 2020). World models also learn representations that fuse multiple sensor modalities and integrate them into latent states, reducing the need for sophisticated state estimators. Finally, world models generalize well from available offline data (Yu et al., 2021), which further accelerates learning in the real world. + +![](images/e30f877426a1aa2686b70c08629a56889403c96105b017890f8ea57b7982c4a2.jpg) +Figure 2: Dreamer follows a simple pipeline for online learning on robot hardware without simulators. The current learned policy collects experience on the robot. This experience is added to the replay buffer. The world model is trained on replayed off-policy sequences through supervised learning. An actor critic algorithm optimizes a neural network policy from imagined rollouts in the latent space of the world model. We parallelize data collection and neural network learning. + +Despite the promises of world models, learning accurate world models for the real world is a open challenge. In this paper, we leverage recent advances of the Dreamer world model for training a variety of robots in the most straight-forward and fundamental problem setting: online reinforcement learning in the real world, without simulators or demonstrations. As shown in Figure 2, Dreamer learns a world model from a replay buffer of past experience, learns behaviors from rollouts imagined in the latent space of the world model, and continuously interacts with the environment to explore and improve its behaviors. Our aim is to push the limits of robot learning directly in the real world and offer a robust platform to enable future work that develops the benefits of world models for robot learning. The key contributions of this paper are summarized as follows: + +β€’ Dreamer on Robots We apply Dreamer to 4 robots, demonstrating successful learning directly in the real world, without introducing new algorithms. The tasks cover a range of challenges, including different action spaces, sensory modalities, and reward structures. +β€’ Walking in 1 Hour We teach a quadruped from scratch in the real world to roll off its back, stand up, and walk in only 1 hour. Afterwards, we find that the robot adapts to being pushed within 10 minutes, learning to withstand pushes or quickly roll over and get back on its feet. +β€’ Visual Pick and Place We train robotic arms to pick and place objects from sparse rewards, which requires localizing objects from pixels and fusing images with proprioceptive inputs. The learned behavior outperforms model-free agents and approaches the performance of a human teleoperator using the same control interface as the robot. +β€’ Open Source We publicly release the software infrastructure for all our experiments, which supports different action spaces and sensory modalities, offering a flexible platform for future research of world models for robot learning in the real world. + +![](images/bbd9aa6b3f541685e1ecf9dd1c4451b92904b361a6547ee2e39414769cb64de4.jpg) +Figure 3: Neural Network Training We leverage the Dreamer algorithm (Hafner et al., 2019; 2020) for fast robot learning in real world. Dreamer consists of two main neural network components, the world model and the policy. Left: The world model follows the structure of a deep Kalman filter that is trained on subsequences drawn from the replay buffer. The encoder fuses all sensory modalities into discrete codes. The decoder reconstructs the inputs from the codes, providing a rich learning signal and enabling human inspection of model predictions. A recurrent state-space model (RSSM) is trained to predict future codes given actions, without observing intermediate inputs. + +Right: The world model enables massively parallel policy optimization from imagined rollouts in the compact latent space using a large batch size, without having to reconstruct sensory inputs. Dreamer trains a policy network and value network from the imagined rollouts and a learned reward function. + +# 2 Approach + +We leverage the Dreamer algorithm (Hafner et al., 2019; 2020) for online learning on physical robots, without the need for simulators. Figure 2 shows an overview of the approach. Dreamer learns a world model from a replay buffer of past experiences, uses an actor critic algorithm to learn behaviors from trajectories predicted by the learned model, and deploys its behavior in the environment to continuously grow the replay buffer. We decouple learning updates from data collection to meet latency requirements and to enable fast training without waiting for the environment. In our implementation, a learner thread continuously trains the world model and actor critic behavior, while an actor thread in parallel computes actions for environment interaction. + +World Model Learning The world model is a deep neural network that learns to predict the environment dynamics, as shown in Figure 3 (left). Because sensory inputs can be large images, we predict future representations rather than future inputs. This reduces accumulating errors and enables massively parallel training with a large batch size. Thus, the world model can be thought of as a fast simulator of the environment that the robot learns autonomously, starting from a blank slate and continuously improving its model as it explores the real world. The world model is based on the Recurrent State-Space Model (RSSM; Hafner et al., 2018), which consists of four components: + +$$ +{ \begin{array} { r l r l } & { \operatorname { e n c } _ { \theta } { \big ( } s _ { t } \ { \big | } \ s _ { t - 1 } , a _ { t - 1 } , x _ { t } { \big ) } } & & { { \mathrm { D e c o d e r ~ N e t w o r k : } } \quad \operatorname* { d e c } _ { \theta } { \big ( } s _ { t } { \big ) } \approx x _ { t } } \\ & { \operatorname { d y n } _ { \theta } { \big ( } s _ { t } \ { \big | } \ s _ { t - 1 } , a _ { t - 1 } { \big ) } } & & { { \mathrm { R e w a r d ~ N e t w o r k : } } \quad \operatorname { r e w } _ { \theta } { \big ( } s _ { t + 1 } { \big ) } \approx r _ { t } } \end{array} } +$$ + +Physical robots are often equipped with multiple sensors of different modalities, such as proprioceptive joint readings, force sensors, and high-dimensional inputs such as RGB and depth camera images. The encoder network fuses all sensory inputs $x _ { t }$ together into the stochastic representations $z _ { t }$ . The dynamics model learns to predict the sequence of stochastic representations by using its recurrent state $h _ { t }$ . The decoder reconstructs the sensory inputs to provide a rich signal for learning representations and enables human inspection of model predictions. In our experiments, the robot has to discover task rewards by interacting with the real world, which the reward network learns to predict. Using manually specified rewards as a function of the decoded sensory inputs is also possible. We optimize all components of the world model jointly by stochastic backpropagation (Kingma and Welling, 2013; Rezende et al., 2014). + +Actor Critic Learning While the world model represents task-agnostic knowledge about the dynamics, the actor critic algorithm learns a behavior that is specific to the task at hand. As shown in Figure 3 (right), we learn behaviors from rollouts that are predicted in the latent space of the world model, without decoding observations. This enables massively parallel behavior learning with typical batch sizes of 16K on a single GPU. The actor critic algorithm consists of an actor network $\pi ( a _ { t } | s _ { t } )$ and a critic network $v ( s _ { t } )$ . + +The role of the actor network is to learn a distribution over successful actions $a _ { t }$ for each latent model state $s _ { t }$ that maximizes the sum of future predicted task rewards. The critic network learns to predict the sum of future task rewards through temporal difference learning (Sutton and Barto, 2018). This allows the algorithm to take into account rewards beyond the planning horizon of $H = 1 6$ steps to learn long-term strategies. Given a predicted trajectory of model states, the critic is trained to regress the return of the trajectory. We compute $\lambda$ -returns following Hafner et al. (2020; 2019): + +$$ +V _ { t } ^ { \lambda } \doteq r _ { t } + \gamma \Big ( ( 1 - \lambda ) v ( s _ { t + 1 } ) + \lambda V _ { t + 1 } ^ { \lambda } \Big ) , \quad V _ { H } ^ { \lambda } \doteq v ( s _ { H } ) . +$$ + +While the critic network is trained to regress the $\lambda$ -returns, the actor network is trained to maximize them. Different gradient estimators are available for computing the policy gradient for optimizing the actor, such as Reinforce (Williams, 1992) and the reparameterization trick (Kingma and Welling, 2013; Rezende et al., 2014) that directly backpropagates return gradients through the differentiable dynamics network (Henaff et al., 2019). Following Hafner et al. (2020), we choose reparameterization gradients for continuous control tasks and Reinforce gradients for tasks with discrete actions. In addition to maximizing returns, the actor is also incentivized to maintain high entropy to prevent collapse to a deterministic policy and maintain some amount of exploration throughout training: + +$$ +\begin{array} { r } { \mathcal { L } ( \pi ) \doteq - \operatorname { E } \bigl [ \sum _ { t = 1 } ^ { H } \ln \pi ( a _ { t } \mid s _ { t } ) \mathrm { s g } ( V _ { t } ^ { \lambda } - v ( s _ { t } ) ) + \eta \mathrm { H } \bigl [ \pi ( a _ { t } \mid s _ { t } ) \bigr ] \bigr ] } \end{array} +$$ + +We optimize the actor and critic using the Adam optimizer (Kingma and Ba, 2014). To compute the $\lambda$ -returns, we use a slowly updated copy of the critic network as common in the literature (Mnih et al., 2015; Lillicrap et al., 2015). The actor and critic gradients do not affect the world model, as this would lead to incorrect and overly optimistic model predictions. The hyperparameters are listed in Appendix D. + +# 3 Experiments + +We evaluate Dreamer on 4 robots, each with a different task, and compare its performance to appropriate algorithmic and human baselines. The experiments are representative of common robotic tasks, such as locomotion, manipulation, and navigation. The tasks pose a diverse range of challenges, including continuous and discrete actions, dense and sparse rewards, proprioceptive and image observations, and sensor fusion. The goal of the experiments is to evaluate whether the recent successes of learned world models enables sample-efficient robot learning directly in the real world. Specifically, we aim to answer the following research questions: + +β€’ Does Dreamer enable robot learning directly in the real world, without simulators? β€’ Does Dreamer succeed across various robot platforms, sensory modalities, and action spaces? β€’ How does the data-efficiency of Dreamer compare to previous reinforcement learning algorithms? + +Implementation We build on the official implementation of DreamerV2 (Hafner et al., 2020). We develop an asynchronous actor and learner setup, which is essential in environments with high control rates, such as the quadruped, and also accelerates learning for slower environments, such as the robot arms. The actor thread computes online actions for the robot and sends trajectories of 128 time steps to the replay buffer. The learner thread samples data from the replay buffer, updates the world model, and optimizes the policy using imagination rollouts. Policy weights are synced from the learner to the actor every 20 seconds. We use an RSSM with 256 units to speed up the training computation. We use identical hyperparameters across all experiments, enabling off-the-shelf training on different robot embodiments. + +![](images/159d86a4fe017221206965fa98efc6ce35e16bebec7536f231b04a5fa470830b.jpg) +Figure 4: A1 Quadruped Walking Starting from lying on its back with the feet in the air, Dreamer learns to roll over, stand up, and walk in 1 hour of real world training time, without simulators or resets. In contrast, SAC only learns to roll over but neither to stand up nor to walk. For SAC, we also had to help the robot out of a dead-locked leg configuration during training. On the right we show training curves for both SAC and Dreamer. The maximum reward is 14. The filled circles indicate times where the robot fell on its back, requiring the learning of a robust strategy for getting back up. After 1 hour of training, we start pushing the robot and find that it adapts its behavior within 10 minutes to withstand light pushes and quickly roll back on its feet for hard pushes. The graph shows a single training run with the shaded area indicating one standard deviation within each time bin. + +Baselines We compare to a strong learning algorithm for each of our experimental setups. The A1 quadruped robot uses continuous actions and low-dimensional inputs, allowing us to compare to SAC (Haarnoja et al., 2018a;b), a popular algorithm for data-efficient continuous control. For the visual pick and place experiments on the XArm and UR5 robots, inputs are images and proprioceptive readings and actions are discrete, suggesting algorithms from the DQN (Mnih et al., 2015) line of work as baselines. We choose Rainbow (Hessel et al., 2018) as a powerful representative of this category, an algorithm that combines many improvements of DQN. To input the proprioceptive readings, we concatenate them as broadcasted planes to the RGB channels of the image, a common practice in the literature (Schrittwieser et al., 2019). For the UR5, we additionally compare against PPO (Schulman et al., 2017), with similar modifications for fusing image and proprioceptive readings. In addition, we compare against a human operator controlling the robot arm through the robot control interface. For the Sphero navigation task, inputs are images and actions are continuous. The state-ofthe-art baseline in this category is DrQv2 (Yarats et al., 2021), which uses image augmentation to increase sample-efficiency. + +# 3.1 A1 Quadruped Walking + +This high-dimensional continuous control task requires training a quadruped robot to roll over from its back, stand up, and walk forward at a fixed target velocity. Prior work in quadruped locomotion requires either extensive training in simulation under domain randomization, using recovery controllers to avoid unsafe states, or defining the action space as parameterized trajectory generators that restrict the space of motions (Rusu et al., 2016; Peng et al., 2018; Rudin et al., 2021; Lee et al., 2020; Yang et al., 2019). In contrast, we train in the end-to-end reinforcement learning setting directly on the robot, without simulators or resets. We use the Unitree A1 robot that consists of 12 direct drive motors. The motors are controlled at $2 0 \mathrm { H z }$ via continuous actions that represent motor angles that are realized by a PD controller on the hardware. Actions are filtered with a Butterworth filter to protect the motor from high-frequency actions. The input consists of motor angles, orientations, and angular velocities. Due to space constraints, we manually intervene when the robot has reached the end of the available training area, without modifying the joint configuration or orientation that the robot is in. + +![](images/69e7a0dc11e3a7ecd812dec526777f2a39e7aed63605587ef789903e7f57fb8c.jpg) +Figure 8: Within 10 minutes of perturbing the learned walking behavior, the robot adapts to withstanding pushes or quickly rolling over and back on its feet. + +The reward function is the sum of five terms. An upright reward is computed from the base frame up vector $\hat { z } ^ { T }$ , terms for matching the standing pose are computed from the joint angles of the hips, shoulders, and knees, and a forward velocity term is computed from the projected forward velocity $\boldsymbol { s } _ { v } \boldsymbol { x }$ and the total velocity $s _ { v }$ . Without the reward curriculum, the agent receives spurious reward values due to the velocity estimator’s dependence on foot-ground contact events. Each of the five terms is active while its preceding terms are satisfied to at least 0.7 and otherwise set to 0: + +![](images/69863294723843746383e47fe99dcd32744e499d1c526f51df4121c52ff99fe8.jpg) +Figure 5: UR5 Multi Object Visual Pick and Place This task requires learning to locate three ball objects from third-person camera images, grasp them, and move them into the other bin. The arm is free to move within and above the bins and sparse rewards are given for grasping a ball and for dropping it in the opposite bin. The environment requires the world model to learn multi-object dynamics in the real world and the sparse reward structure poses a challenge for policy optimization. Dreamer overcomes the challenges of visual localization and sparse rewards on this task, learning a successful strategy within a few hours of autonomous operation. + +$$ +\begin{array} { r l } { r ^ { \mathrm { u p r } } \doteq ( \hat { z } ^ { T } [ 0 , 0 , 1 ] - 1 ) / 2 } & { { } r ^ { \mathrm { h i p } } \doteq 1 - \frac 1 4 \| q ^ { \mathrm { h i p } } + 0 . 2 \| _ { 1 } \quad r ^ { \mathrm { s h o u l d e r } } \doteq 1 - \frac 1 4 \| q ^ { \mathrm { s h o u l d e r } } + 0 . 2 \| _ { 1 } } \end{array} +$$ + +$$ +\begin{array} { r l } { r ^ { \mathrm { k n e e } } \doteq 1 - \frac 1 4 \parallel q ^ { \mathrm { k n e e } } - 1 . 0 \parallel _ { 1 } } & { { } r ^ { \mathrm { v e l o c i t y } } \doteq 5 \big ( \operatorname* { m a x } ( 0 , ^ { \mathcal { B } } v _ { x } ) / \parallel ^ { \mathcal { B } } v \parallel _ { 2 } \cdot \mathrm { c l i p } ( ^ { \mathcal { B } } v _ { x } / 0 . 3 , - 1 , 1 ) + 1 \big ) } \end{array} +$$ + +As shown in Figure 4, after one hour of training, Dreamer learns to consistently flip the robot over from its back, stand up, and walk forward. In the first 5 minutes of training, the robot manages to roll off its back and land on its feet. 20 minutes later, it learns how to stand up on its feet. About 1 hour into training, the robot learns a pronking gait to walk forward at the desired velocity. After succeeding at this task, we tested the robustness of the algorithms by repeatedly knocking the robot off of its feet with a large pole, shown in Figure 8. Within 10 minutes of additional online learning, the robot adapts and withstand pushes or quickly rolls back on its feet. In comparison, SAC quickly learns to roll off its back but fails to stand up or walk given the small data budget. + +# 3.2 UR5 Multi-Object Visual Pick and Place + +Common in warehouse and logistics environments, pick and place tasks require a robot manipulator to transport items from one bin into another. Figure 5 shows a successful pick and place cycle of this task. The task is challenging because of sparse rewards, the need to infer object positions from pixels, and the challenging dynamics of multiple moving objects. The sensory inputs consist of proprioceptive readings (joint angles, gripper position, end effector Cartesian position) and a 3rd person RGB image of the scene. Successfully grasping one of the 3 objects, detected by partial gripper closure, results in a $+ 1$ reward, releasing the object in the same bin gives a $- 1$ reward, and placing in the opposite bin gives a $+ 1 0$ reward. We control the UR5 robot from Universal Robotics at $2 \ \mathrm { H z }$ . Actions are discrete for moving the end effector in increments along X, Y, and $\textsf { Z }$ axes and for toggling the gripper state. Movement in the Z axis is only enabled while holding an object and the gripper automatically opens once above the correct bin. We estimate human teleoperation performance by recording 3 demonstrators for 20 minutes each, controlling the UR5 with a joystick. + +Dreamer reaches an average pick rate of 2.5 objects per minute within 8 hours. The robot initially struggles to learn as the reward signal is very sparse, but begins to gradually improve after 2 hours of training. The robot first learns to localize the objects and toggles the gripper when near an object. Over time, grasping becomes precise and the robot learns to push objects out of corners. Figure 5 shows the learning curves of Dreamer compared to Rainbow DQN, PPO, and the human baseline. Both Rainbow DQN and PPO only learn the short-sighted behavior of grasping and immediately dropping objects in the same bin. In contrast, Dreamer approaches human-level teleoperation performance after 8 hours. We hypothesize that Rainbow DQN and PPO fail because they require larger amounts of experience, which is not feasible for us to collect in the real world. + +# 3.3 XArm Visual Pick and Place + +While the UR5 robot is a high performance industrial robot, the XArm is an accessible low-cost 7 DOF manipulation, which we control at approximately $0 . 5 \ : \mathrm { H z }$ . Similar to Section 3.2, the task requires localizing and grasping a soft object and moving it from one bin to another and back, shown in Figure 6. We connect the object to the gripper with a string, which makes it less likely for the object to get stuck in corners at the cost of more complex dynamics. The sparse reward, discrete action space, and observation space match the UR5 setup except for the addition of depth image observations. + +![](images/dfce202941b6d7b7a3b4e91b152da625264b3b1c43837193ab53e137e11b01f3.jpg) +Figure 6: XArm Visual Pick and Place The XArm is an affordable robot arm that operates slower than the UR5. To demonstrate successful learning on this robot, we use a third-person RealSense camera with RGB and depth modalities, as well as proprioceptive inputs for the robot arm, requiring the world model to learn sensor fusion. The pick and place task uses a soft object. While soft objects would be challenging to model accurately in a simulator, Dreamer avoids this issue by directly learning on the real robot without a simulator. While Rainbow and PPO using R3M visual embeddings converge to the local optimum of grasping and ungrasping the object in the same bin, Dreamer learns a successful pick and place policy from sparse rewards in under 10 hours. + +Dreamer learns a policy that enables the XArm to achieve an average pick rate of 3.1 objects per minute in 10 hours of time, which is comparable to human performance on this task. Figure 6 shows that Dreamer learns to solve the task within 10 hours, whereas the Rainbow algorithm, a top model-free algorithm for discrete control from pixels, fails to learn. We additionally compare Dreamer against a PPO baseline that utilizes R3M (Nair et al., 2022) pretrained visual embeddings for the state, but notice no improvement in performance. Interestingly, we observed that Dreamer learns to sometimes use the string to pull the object out of a corner before grasping it, demonstrating multi-modal behaviors. Moreover, we observed that when lighting conditions change drastically (such as sharp shadows during sunrise), performance initially collapses but Dreamer then adapts to the changing conditions and exceeds its previous performance after a few hours of additional training, reported in Appendix A. + +# 3.4 Sphero Navigation + +We evaluate Dreamer on a visual navigation task that requires maneuvering a wheeled robot to a fixed goal location given only RGB images as input. We use the Sphero Ollie robot, a cylindrical robot with two controllable motors, which we control through continuous torque commands at $2 \ : \mathrm { H z }$ Because the robot is symmetric and the robot only has access to image observations, it has to infer the heading direction from the history of observations. The robot is provided with a dense reward equal to the negative L2 distance, which is computed using a oracle vision pipeline that detects the Sphero’s position (this information is not provided to the agent). As the goal is fixed, after 100 environment steps, we end the episode and randomize the robot’s position through a sequence of high power random motor actions. + +In 2 hours, Dreamer learns to quickly and consistently navigate to the goal and stay near the goal for the remainder of the episode. As shown in Figure 7, Dreamer achieves an average distance to the goal of 0.15, measured in units of the area size and averaged across time steps. We find that DrQv2, a model-free algorithm specifically designed to continuous control from pixels, achieves similar performance. This result matches the simulated experiments of Yarats et al. (2021) that showed the two algorithms to perform similarly for continuous control tasks from images. + +# 4 Related Work + +Existing work on robot learning commonly leverages large amounts of simulated experience before deploying to the real world (Rusu et al., 2016; Peng et al., 2018; OpenAI et al., 2018; Lee et al., 2020; Irpan et al., 2020; Kumar et al., 2021; Siekmann et al., 2021; Escontrela et al., 2022), leverage fleets of robots to collect experience datasets (Kalashnikov et al., 2018; Dasari et al., 2019; Kalashnikov et al., 2021; Ebert et al., 2021), or rely on external information such as human expert demonstrations or task priors to achieve sample-efficient learning (Xie et al., 2019; Schoettler et al., 2019; James et al., 2021; Shah and Levine, 2022; Bohez et al., 2022; Sivakumar et al., 2022). However, designing simulated tasks and collecting expert demonstrations is time-consuming. Moreover, many of these approaches require specialized algorithms for leveraging offline experience, demonstrations, or simulator inaccuracies. In contrast, our experiments show that learning end-to-end from rewards in the physical world is feasible for a diverse range of tasks through world models. + +![](images/372acb53fc3f9ab9f178baac319f8b0cb0c4ee1ca96f374bada7397c4ec23630.jpg) +Figure 7: Sphero Navigation This task requires the Sphero robot to navigate to a goal location given a top-down RGB image as the only input. The task requires the robot to localize itself from raw pixels, to infer its orientation from the sequence of past images because it is ambiguous from a single image, and to control the robot from under-actuated motors that require building up momentum over time. Dreamer learns a successful policy on this task in under 2 hours. + +Relatively few works have demonstrated end-to-end learning from scratch in the physical world. Visual Foresight (Finn et al., 2016; Finn and Levine, 2017; Ebert et al., 2018) learns a video prediction model to solve real world tasks by online planning, but is limited to short-horizon tasks and requires generating images during planning, making it computationally expensive. Yang et al. (2019; 2022) learn quadruped locomotion through a model-based approach by predicting foot placement and leveraging a domain-specific controller to achieve them. Ha et al. (2020) learn a quadruped walking policy by relying on a scripted reset policy, so the robot does not have to learn to stand up. SOLAR (Zhang et al., 2019) learns a latent dynamics model from images and demonstrates reaching and pushing with a robot arm. Nagabandi et al. (2019) learns manipulation policies by planning through a learned dynamics model from state observations. In comparison, our experiments show successful learning across 4 challenging robot tasks that cover a wide range of challenges and sensory modalities, with a single learning algorithm and hyperparameter setting. + +# 5 Discussion + +We applied Dreamer to physical robot learning, finding that modern world models enable sampleefficient robot learning for a range of tasks, from scratch in the real world and without simulators. We also find that the approach is generally applicable in that it can solve robot locomotion, manipulation, and navigation tasks without changing hyperparameters. Dreamer taught a quadruped robot to roll off the back, stand up, and walk in 1 hour from scratch, which previously required extensive training in simulation followed by transfer to the real world or parameterized trajectory generators and given reset policies. We also demonstrate learning to pick and place objects from pixels and sparse rewards on two robot arms in 8–10 hours. + +Limitations While Dreamer shows promising results, learning on hardware over many hours creates wear on robots that may require human intervention or repair. Additionally, more work is required to explore the limits of Dreamer and our baselines by training for a longer time. Finally, we see tackling more challenging tasks, potentially by combining the benefits of fast real world learning with those of simulators, as an impactful future research direction. + +Acknowledgements We thank Stephen James and Justin Kerr for helpful suggestions and help with printing the protective shell of the quadruped robot. We thank Ademi Adeniji for help with setting up the XArm robot and Raven Huang for help with setting up the UR5 robot. This work was supported in part by an NSF Fellowship, NSF NRI #2024675, and the Vanier Canada Graduate Scholarship. + +References +D. Hafner, T. Lillicrap, J. Ba, and M. Norouzi. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019. +D. Hafner, T. Lillicrap, M. Norouzi, and J. Ba. Mastering atari with discrete world models. arXiv preprint arXiv:2010.02193, 2020. +Y. Gal, R. McAllister, and C. E. Rasmussen. Improving pilco with bayesian neural network dynamics models. In Data-Efficient Machine Learning workshop, ICML, 2016. +F. Ebert, C. Finn, S. Dasari, A. Xie, A. Lee, and S. Levine. Visual foresight: Model-based deep reinforcement learning for vision-based robotic control. arXiv preprint arXiv:1812.00568, 2018. +R. Sekar, O. Rybkin, K. Daniilidis, P. Abbeel, D. Hafner, and D. Pathak. Planning to explore via selfsupervised world models. In International Conference on Machine Learning, pages 8583–8592. PMLR, 2020. +T. Yu, A. Kumar, R. Rafailov, A. Rajeswaran, S. Levine, and C. Finn. Combo: Conservative offline model-based policy optimization. Advances in neural information processing systems, 34: 28954–28967, 2021. +D. Hafner, T. Lillicrap, I. Fischer, R. Villegas, D. Ha, H. Lee, and J. Davidson. Learning latent dynamics for planning from pixels. arXiv preprint arXiv:1811.04551, 2018. +D. P. Kingma and M. Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. +D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. arXiv preprint arXiv:1401.4082, 2014. +R. S. Sutton and A. G. Barto. Reinforcement learning: An introduction. MIT press, 2018. +R. J. Williams. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine learning, 8(3-4):229–256, 1992. +M. Henaff, A. Canziani, and Y. LeCun. Model-predictive policy learning with uncertainty regularization for driving in dense traffic. arXiv preprint arXiv:1901.02705, 2019. +D. P. Kingma and J. Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. +V. Mnih, K. Kavukcuoglu, D. Silver, A. A. Rusu, J. Veness, M. G. Bellemare, A. Graves, M. Riedmiller, A. K. Fidjeland, G. Ostrovski, et al. Human-level control through deep reinforcement learning. Nature, 518(7540):529, 2015. +T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015. +T. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018a. +T. Haarnoja, A. Zhou, K. Hartikainen, G. Tucker, S. Ha, J. Tan, V. Kumar, H. Zhu, A. Gupta, P. Abbeel, et al. Soft actor-critic algorithms and applications. arXiv preprint arXiv:1812.05905, 2018b. +M. Hessel, J. Modayil, H. Van Hasselt, T. Schaul, G. Ostrovski, W. Dabney, D. Horgan, B. Piot, M. Azar, and D. Silver. Rainbow: Combining improvements in deep reinforcement learning. In Thirty-Second AAAI Conference on Artificial Intelligence, 2018. +J. Schrittwieser, I. Antonoglou, T. Hubert, K. Simonyan, L. Sifre, S. Schmitt, A. Guez, E. Lockhart, D. Hassabis, T. Graepel, et al. Mastering atari, go, chess and shogi by planning with a learned model. arXiv preprint arXiv:1911.08265, 2019. +J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. +D. Yarats, R. Fergus, A. Lazaric, and L. Pinto. Mastering visual continuous control: Improved data-augmented reinforcement learning. arXiv preprint arXiv:2107.09645, 2021. +A. A. Rusu, M. Vecerik, T. RothΓΆrl, N. Heess, R. Pascanu, and R. Hadsell. Sim-to-real robot learning from pixels with progressive nets, 2016. +X. B. Peng, M. Andrychowicz, W. Zaremba, and P. Abbeel. Sim-to-real transfer of robotic control with dynamics randomization. In 2018 IEEE International Conference on Robotics and Automation (ICRA), pages 1–8, May 2018. doi:10.1109/ICRA.2018.8460528. +N. Rudin, D. Hoeller, P. Reist, and M. Hutter. Learning to walk in minutes using massively parallel deep reinforcement learning, 2021. +J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomotion over challenging terrain. Science Robotics, 5(47), oct 2020. doi:10.1126/scirobotics.abc5986. URL https://doi.org/10.1126%2Fscirobotics.abc5986. +Y. Yang, K. Caluwaerts, A. Iscen, T. Zhang, J. Tan, and V. Sindhwani. Data efficient reinforcement learning for legged robots, 2019. +S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta. R3m: A universal visual representation for robot manipulation, 2022. +OpenAI, M. Andrychowicz, B. Baker, M. Chociej, R. Jozefowicz, B. McGrew, J. Pachocki, A. Petron, M. Plappert, G. Powell, A. Ray, J. Schneider, S. Sidor, J. Tobin, P. Welinder, L. Weng, and W. Zaremba. Learning dexterous in-hand manipulation, 2018. +A. Irpan, C. Harris, J. Ibarz, K. Rao, M. Khansari, and S. Levine. Rl-cyclegan: Improving deep-rl robotics with simulation-to-real. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2020), 2020. +A. Kumar, Z. Fu, D. Pathak, and J. Malik. Rma: Rapid motor adaptation for legged robots, 2021. +J. Siekmann, K. Green, J. Warila, A. Fern, and J. Hurst. Blind bipedal stair traversal via sim-to-real reinforcement learning, 2021. +A. Escontrela, X. B. Peng, W. Yu, T. Zhang, A. Iscen, K. Goldberg, and P. Abbeel. Adversarial motion priors make good substitutes for complex reward functions, 2022. +D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan, V. Vanhoucke, and S. Levine. Qt-opt: Scalable deep reinforcement learning for vision-based robotic manipulation, 2018. +S. Dasari, F. Ebert, S. Tian, S. Nair, B. Bucher, K. Schmeckpeper, S. Singh, S. Levine, and C. Finn. Robonet: Large-scale multi-robot learning, 2019. +D. Kalashnikov, J. Varley, Y. Chebotar, B. Swanson, R. Jonschkowski, C. Finn, S. Levine, and K. Hausman. Mt-opt: Continuous multi-task robotic reinforcement learning at scale, 2021. +F. Ebert, Y. Yang, K. Schmeckpeper, B. Bucher, G. Georgakis, K. Daniilidis, C. Finn, and S. Levine. Bridge data: Boosting generalization of robotic skills with cross-domain datasets, 2021. +A. Xie, F. Ebert, S. Levine, and C. Finn. Improvisation through physical understanding: Using novel objects as tools with visual foresight. arXiv preprint arXiv:1904.05538, 2019. +G. Schoettler, A. Nair, J. Luo, S. Bahl, J. A. Ojea, E. Solowjow, and S. Levine. Deep reinforcement learning for industrial insertion tasks with visual inputs and natural rewards, 2019. +S. James, K. Wada, T. Laidlow, and A. J. Davison. Coarse-to-fine q-attention: Efficient learning for visual robotic manipulation via discretisation, 2021. +D. Shah and S. Levine. Viking: Vision-based kilometer-scale navigation with geographic hints, 2022. +S. Bohez, S. Tunyasuvunakool, P. Brakel, F. Sadeghi, L. Hasenclever, Y. Tassa, E. Parisotto, J. Humplik, T. Haarnoja, R. Hafner, M. Wulfmeier, M. Neunert, B. Moran, N. Siegel, A. Huber, F. Romano, N. Batchelor, F. Casarini, J. Merel, R. Hadsell, and N. Heess. Imitate and repurpose: Learning reusable robot movement skills from human and animal behaviors, 2022. +A. Sivakumar, K. Shaw, and D. Pathak. Robotic telekinesis: Learning a robotic hand imitator by watching humans on youtube, 2022. +C. Finn, I. Goodfellow, and S. Levine. Unsupervised learning for physical interaction through video prediction. In Advances in neural information processing systems, pages 64–72, 2016. +C. Finn and S. Levine. Deep visual foresight for planning robot motion. In Robotics and Automation (ICRA), 2017 IEEE International Conference on, pages 2786–2793. IEEE, 2017. +Y. Yang, T. Zhang, E. Coumans, J. Tan, and B. Boots. Fast and efficient locomotion via learned gait transitions. In Conference on Robot Learning, pages 773–783. PMLR, 2022. +S. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human effort. arXiv preprint arXiv:2002.08550, 2020. +M. Zhang, S. Vikram, L. Smith, P. Abbeel, M. Johnson, and S. Levine. Solar: deep structured representations for model-based reinforcement learning. In International Conference on Machine Learning, 2019. +A. Nagabandi, K. Konoglie, S. Levine, and V. Kumar. Deep dynamics models for learning dexterous manipulation, 2019. +G. I. Parisi, R. Kemker, J. L. Part, C. Kanan, and S. Wermter. Continual lifelong learning with neural networks: A review. Neural Networks, 113:54–71, 2019. ISSN 0893-6080. +T. Miki, J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning robust perceptive locomotion for quadrupedal robots in the wild. Science Robotics, 7(62), jan 2022. doi:10.1126/ scirobotics.abk2822. +L. Smith, J. C. Kew, X. B. Peng, S. Ha, J. Tan, and S. Levine. Legged robots that keep on learning: Fine-tuning locomotion policies in the real world, 2021. +T.-Y. Yang, T. Zhang, L. Luu, S. Ha, J. Tan, and W. Yu. Safe reinforcement learning for legged locomotion, 2022. URL https://arxiv.org/abs/2203.02638. +S. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human effort, 2020. URL https://arxiv.org/abs/2002.08550. +L. Smith, I. Kostrikov, and S. Levine. A walk in the park: Learning to walk in 20 minutes with model-free reinforcement learning, 2022. URL https://arxiv.org/abs/2208.07860. +S. Levine, P. Pastor, A. Krizhevsky, J. Ibarz, and D. Quillen. Learning hand-eye coordination for robotic grasping with deep learning and large-scale data collection. The International Journal of Robotics Research, 37(4-5):421–436, 2018. +L. Pinto and A. Gupta. Supersizing self-supervision: Learning to grasp from 50k tries and 700 robot hours, 2015. +H. Ha and S. Song. Flingbot: The unreasonable effectiveness of dynamic manipulation for cloth unfolding. Conference on Robot Learning, 2021. +S. James and A. J. Davison. Q-attention: Enabling efficient learning for vision-based robotic manipulation, 2021. +E. Tzeng, C. Devin, J. Hoffman, C. Finn, P. Abbeel, S. Levine, K. Saenko, and T. Darrell. Adapting deep visuomotor representations with weak pairwise constraints, 2015. +I. Akkaya, M. Andrychowicz, M. Chociej, M. Litwin, B. McGrew, A. Petron, A. Paino, M. Plappert, G. Powell, R. Ribas, et al. Solving rubik’s cube with a robot hand. arXiv preprint arXiv:1910.07113, 2019. +M. P. Deisenroth, G. Neumann, J. Peters, et al. A survey on policy search for robotics. Foundations and Trends in Robotics, 2(1–2):1–142, 2013. +K. Chua, R. Calandra, R. McAllister, and S. Levine. Deep reinforcement learning in a handful of trials using probabilistic dynamics models. In Advances in Neural Information Processing Systems, pages 4754–4765, 2018. +A. Nagabandi, G. Yang, T. Asmar, R. Pandya, G. Kahn, S. Levine, and R. S. Fearing. Learning image-conditioned dynamics models for control of under-actuated legged millirobots, 2017. +P. Becker-Ehmck, M. Karl, J. Peters, and P. van der Smagt. Learning to fly via deep model-based reinforcement learning. arXiv preprint arXiv:2003.08876, 2020. +F. Deng, I. Jang, and S. Ahn. Dreamerpro: Reconstruction-free model-based reinforcement learning with prototypical representations. arXiv preprint arXiv:2110.14565, 2021. +M. Okada and T. Taniguchi. Dreaming: Model-based reinforcement learning by latent imagination without reconstruction. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 4209–4215. IEEE, 2021. +H. Bharadhwaj, M. Babaeizadeh, D. Erhan, and S. Levine. Information prioritization through empowerment in visual model-based rl. arXiv preprint arXiv:2204.08585, 2022. +K. Paster, L. E. McKinney, S. A. McIlraith, and J. Ba. Blast: Latent dynamics models from bootstrapping. In Deep RL Workshop NeurIPS 2021, 2021. +K. Hsu, M. J. Kim, R. Rafailov, J. Wu, and C. Finn. Vision-based manipulators need to also see from their hands, 2022. URL https://arxiv.org/abs/2203.12677. + +# A Adaptation + +Real world robot learning faces practical challenges such as changing environmental conditions and time varying dynamics. We found that Dreamer is able to adapt to the current environmental conditions with no change to the learning algorithm. This shows promise for using Dreamer in continual learning settings (Parisi et al., 2019). Adaptation of the quadruped to external perturbations is reported in Section 3.1 and Figure 8. + +The XArm, situated near large windows, is able to adapt and maintain performance under the presence of changing lighting conditions. The XArm experiments were conducted after sundown to keep the lighting conditions constant throughout training. Figure A.1 shows the learning curve of the XArm. As expected, the performance of the XArm drops during sunrise. However, the XArm is able to adapt to the change in lighting conditions in about 5 hours time and recover the original performance, which is faster than it would be to train from scratch. A careful inspection of the image observations at these times, as shown in Figure A.1, reveals that the robot received observations with strong light rays covering the scene which greatly differs from the original training observations. + +![](images/db6cf431ae9355646aa06c810c30e311d8db38009707b4dea4bad788085ac2bb.jpg) +Figure A.1: The left two images are raw observations consumed by Dreamer. The leftmost image is an image observation as seen by the XArm at night, when it was trained. The next image shows an observation during sunrise. Despite the vast difference in pixel space, the XArm is able to recover, and then surpass, the original performance in approximately 5 hours. Even after 24 hours when the lighting shifts to night time conditions, the XArm is able to maintain performance. + +# B Imagination + +![](images/24fe88bf92baa43778d9defa3450750bc0d2c910fa9c12c5902630d7c2316e1e.jpg) +Figure B.1: To introspect the policy, we can roll out trajectories in the latent space of Dreamer, then decode the images to visualize the intent of the actor network. Each row is an imagined trajectory, showing every 2nd frame. Top: Latent rollouts on the UR5 environment. Multiple objects introduce more visual complexity that the network has to model. Note the second trajectory, which shows a static orange ball becoming a green ball. Bottom: Latent rollouts on the XArm environment. + +# C Detailed Related Work + +RL for locomotion A common approach is to train RL agents from large amounts of simulated data under domain and dynamics randomization (Peng et al., 2018; Lee et al., 2020; Rudin et al., 2021; Siekmann et al., 2021; Escontrela et al., 2022; Miki et al., 2022; Kumar et al., 2021; Rusu et al., 2016; Bohez et al., 2022), then freezing the learned policy and deploying it to the real world. Smith et al. (2021) explored pre-training policies in simulation and fine-tuning them with real world data. Yang et al. (2019) investigate learning a dynamics model using a multi-step loss and using model predictive control to accomplish a specified task. Yang et al. (2022) train locomotion policies in the real world but require a recovery controller trained in simulation to avoid unsafe states. In contrast, we use no simulators or reset policies and directly train on the physical robot. While prior work in locomotion has successfully learned walking behaviors in the real world, these works generally required several domain-specific assumptions or pretraining with simulators. Ha et al. (2020) achieved successful walking on the Minitaur robot in 90 minutes. However, the authors manually programmed a reset policy that was used when the robot fell on its back, while in our work the robot must learn to flip over and stand up. Additionally, the Minitaur robot is simpler than the A1 as it has 8 actuators compared to 12 on the A1. In recent work, Smith et al. (2022) utilize a high update-to-data ratio (UTD) RL algorithm to learn walking from 20 minutes of robot training data. However, their work assumes the availability of a reset policy and therefore comprises of a different learning problem compared to the problem we tackle of learning to flip over and walk from scratch. Additionally, we show our approach generalizes to environments with image observations and sparse rewards. + +RL for manipulation Learning promises to enable robot manipulators to solve contact rich tasks in open real world environments. One class of methods attempts to scale up experience collection through a fleet of robots (Kalashnikov et al., 2018; 2021; Ebert et al., 2021; Dasari et al., 2019; Levine et al., 2018). In contrast, we only leverage one robot, but parallelize an agent’s experience by using the learned world model. Another common approach is to leverage expert demonstrations or other task priors (Pinto and Gupta, 2015; Ha and Song, 2021; Xie et al., 2019; Schoettler et al., 2019; Sivakumar et al., 2022). James and Davison (2021); James et al. (2021) leverages a few demonstrations to increase the sample-efficiency of Q learning by focusing the learner on important aspects of the scene. Other approaches, as in locomotion, first utilize a simulator, then transfer to the real world (Tzeng et al., 2015; Akkaya et al., 2019; OpenAI et al., 2018; Irpan et al., 2020). Our work focuses on single-robot environments where the agent must learn through a small amount of interaction with the world. Meanwhile, the Google Arm Farm line of work by Levine et al. leverages over $5 8 0 \mathrm { k }$ grasp attempts gathered by 7 robots and collected over 4 months. We believe that a method such as Dreamer could benefit greatly from this scale of training data, however it is unlikely that works such as MT-OPT/QT-OPT Kalashnikov et al. (2018; 2021) would work well in the low data regime that Dreamer excels in. + +Model-based RL Due to its higher sample-efficiency over model-free methods, model-based RL is a promising approach to learning on real world robots (Deisenroth et al., 2013). A model based method first learns a dynamics model, which can then be used to plan actions (Nagabandi et al., 2019; Hafner et al., 2018; Chua et al., 2018; Nagabandi et al., 2017; Becker-Ehmck et al., 2020), or be used as a simulator to learn a policy network as in Dreamer (Hafner et al., 2019; 2020). One approach to tackle the high visual complexity of the world is to learn an action conditioned video prediction model (Finn and Levine, 2017; Ebert et al., 2018; Finn et al., 2016). One downside of this approach is the need to directly predict high dimensional observations, which can be computationally inefficient and easily drift. Dreamer learns a dynamics model in a latent space, allowing more efficient rollouts and avoids relying on high quality visual reconstructions for the policy. Another line of work proposes to learn latent dynamics models without having to reconstruct inputs (Deng et al., 2021; Okada and Taniguchi, 2021; Bharadhwaj et al., 2022; Paster et al., 2021), which we see as a promising approach for supporting moving view points in cluttered environments. + +# D Hyperparameters + +
NameSymbolValue
General
Replay capacity (FIFO)Start learningBatch sizeBatch lengthMLP sizeActivationBT10610432324Γ— 512LayerNorm+ELU
World Model
RSSM sizeNumber of latentsClasses per latentKL balancing51232320.8
Actor Critic
Imagination horizonDiscountReturn lambdaTarget update intervalH?150.950.95100
All Optimizers
Gradient clippingLearning rateAdam epsilonE10010-410-6
+ +# E Environment and Hardware Details + +For every robot setup that involved vision (UR5, XArm, Sphero), we used a RealSense D435 camera positioned to offer a fixed 3rd person view of the scene. + +A1 We used the A1 quadrupedal robot by Unitree. The RL policy outputs actions at a frequency that is too high for the PD controller to track, which we overcome by lowpass filtering the action sequence. The joint range allows the legs to self-collide with the body, which can be damaging to the motors and increase battery consumption. We limited the joint range to decrease self-collisions. Finally, the EKF velocity estimator relies on foot-ground contact events to prevent significant drift in the estimates, so we employ a curriculum reward function that does not reward the robot for forward velocity until the robot is upright with extended legs. We also designed a shell which we 3D printed in order to better protect the cables and hardware and provide a smoother rolling over. + +XArm & UR5 We utilized slanted bins to prevent objects from leaving the work area during the long-running pick and place experiments on the UR5, which is common practice Levine et al. (2018); Kalashnikov et al. (2018). We also added a partition behind the setup to keep the background constant. It would be interesting to study how a gripper-mounted camera would impact policy performance Hsu et al. (2022), however we report strong results without this design choice. For the XArm we use the uFactory xArm Gripper. For the UR5, we use the Robotiq 2F-85 parallel jaw gripper. The bin locations are predetermined and provided as part of the environment to prevent the robot from colliding with the bin. In addition, movement in the $\textsf { Z }$ axis is only enabled while holding an object and the gripper automatically opens once above the other bin. + +Sphero We used a rectangular enclosure of $0 . 8 \times 0 . 8 \mathrm { { m ^ { 2 } } }$ to keep the sphero robot within the camera view. We used a simple OpenCV script to estimate the L2 distance between the Sphero and the goal position to provide a dense reward for policy optimization. This positional information was not provided to the agent, which it had to learn from the raw top-down images. \ No newline at end of file diff --git a/md/dev/a0SRWViFYW/a0SRWViFYW.md b/md/dev/a0SRWViFYW/a0SRWViFYW.md new file mode 100644 index 0000000000000000000000000000000000000000..8f212d0bd4046c83906c071a4a6e566f0487e0f8 --- /dev/null +++ b/md/dev/a0SRWViFYW/a0SRWViFYW.md @@ -0,0 +1,1539 @@ +# STOCHASTIC PROJECTIVE SPLITTING:SOLVING SADDLE-POINT PROBLEMS WITH MULTIPLEREGULARIZERS + +Anonymous authors Paper under double-blind review + +# ABSTRACT + +We present a new, stochastic variant of the projective splitting (PS) family of algorithms for monotone inclusion problems. It can solve min-max and noncooperative game formulations arising in applications such as robust ML without the convergence issues associated with gradient descent-ascent, the current de facto standard approach in ML applications. Our proposal is the first version of PS able to use stochastic gradient oracles. It can solve min-max games while handling multiple constraints and nonsmooth regularizers via projection and proximal operators. Unlike other stochastic splitting methods that can solve such problems, our method does not rely on a product-space reformulation of the original problem. We prove almost-sure convergence of the iterates to the solution and a convergence rate for the expected residual. By working with monotone inclusions rather than variational inequalities, our analysis avoids the drawbacks of measuring convergence through the restricted gap function. We close with numerical experiments on a distributionally robust sparse logistic regression problem. + +# 1 INTRODUCTION + +The most prominent application of optimization in ML is empirical risk minimization. However, inspired by the success of GANs (Goodfellow et al., 2014). , ML practitioners have developed more complicated min-max and adversarial optimization formulations (Yu et al., 2021; Kuhn et al., 2019; Shafieezadeh-Abadeh et al., 2015; Sinha et al., 2018; Lin et al., 2020; Namkoong & Duchi, 2016; Huang et al., 2017; Wadsworth et al., 2018; Zhang et al., 2018; Edwards & Storkey, 2015; Celis & Keswani, 2019). Solving these multi-player games leads to issues not seen when minimizing a single-player loss function. The competitive nature of a game leads to rotational dynamics that can cause intuitive gradient-based methods to fail to converge (Gidel et al., 2019; Daskalakis et al., 2018; Hsieh et al., 2020). + +A mathematical framework underlying both convex optimization and saddle-point problems is the monotone inclusion problem; see Ryu & Boyd (2016) for an introduction. Methods developed for monotone inclusions will converge for convex-concave, games as they are explicitly designed to handle such problems’ governing dynamics. In recent years, monotone inclusion methods and theory have started to receive attention in the ML community (Diakonikolas, 2020; Liu et al., 2021; Ryu et al., 2020; Pathak & Wainwright, 2020), with a focus on monotone variational inequalities, which form a special case of monotone inclusions (Antonakopoulos et al., 2019; Gidel et al., 2019; Daskalakis et al., 2018; Hsieh et al., 2020; Mertikopoulos et al., 2019). + +The most prevalent methods for solving min-max games in ML are variants of gradient descent-ascent (GDA). This method alternates between a gradient-descent step for the minimizing player and a gradient-ascent step for the maximizing player. Unfortunately, GDA requires additional assumptions to converge on convex-concave games, and it even fails for some simple 2D bilinear games (Gidel et al., 2019, Prop. 1). While there have been several approaches to modify either GDA (Chavdarova et al., 2021; Grnarova et al., 2021; Balduzzi et al., 2018) or the underlying game objective (Mescheder et al., 2018; Nagarajan & Kolter, 2017; Mescheder et al., 2017) to ensure convergence, this paper instead develops a method for solving monotone inclusions that can naturally handle game dynamics. + +Our approach builds upon the recently proposed projective splitting (PS) method with forward steps (Johnstone & Eckstein, 2020b). PS is designed specifically for solving monotone inclusions, thus does not fall prey to the convergence issues that plague GDA, at least for convex-concave games. PS is within the general class of projective splitting methods invented by Eckstein & Svaiter (2008) and developed further in Eckstein & Svaiter (2009); Alotaibi et al. (2014); Combettes & Eckstein (2018); Eckstein (2017); Johnstone & Eckstein (2019; 2021; 2020a). These methods work by creating a separating hyperplane between the current iterate and the solution and then moving closer to the solution by projecting the current iterate onto this hyperplane (see Section 3 for an overview). Other than being able to natively handle game dynamics, the primary advantage of PS is that it fully splits problems involving an arbitrary number of regularizers and constraints. β€œFull splitting” means that the method can handle multiple regularizers and constraints through their respective individual proximal and projection operators, along with the smooth terms via gradients. What makes this useful is that many of the regularizers used in ML have proximal operators that are relatively easy to compute; see for example Parikh & Boyd (2013). + +Despite these advantages, the preexisting PS framework has a significant drawback: it requires deterministic gradient oracles. This feature makes it impractical for application to large datasets for which stochastic oracles may be the only feasible option. + +Contributions The primary contribution of this work is a new projective splitting algorithm that allows for a stochastic gradient oracle. We call the method stochastic projective splitting (SPS). Our method β€œfully splits” the monotone inclusion problem + +$$ +\begin{array} { r } { \mathrm { F i n d } z \in \mathbb { R } ^ { d } \mathrm { ~ s . t . ~ } 0 \in \sum _ { i = 1 } ^ { n } A _ { i } ( z ) + B ( z ) , } \end{array} +$$ + +where $B$ is monotone and $L$ -Lipschitz and each $A _ { i }$ is maximal monotone and typically set valued, usually arising from a constraint or a nonsmooth regularizer in the underlying optimization problem or game; see for example Ryu & Boyd (2016) for definitions. For some example ML applications of (1), see Section 2 and Appendix A. Here, an algorithm that β€œfully splits” (1) means one whose computational steps each involve only the individual operators $A _ { 1 } , \ldots , A _ { n } , B$ . Ours is the first method that can accomplish full splitting without a product-space reformulation that recasts (1) as a two-operator problem on a higher-dimensional space, a tactic whose disadvantages are discussed in Appendix F.7. Our method interrogates the Lipschitz operator $B$ through a stochastic oracle. Previous methods splitting (1) have either required a deterministic oracle for $B$ , or have made far more restrictive assumptions on the noise or the operators (BriceΓ±o-Arias & Combettes, 2011; Combettes & Pesquet, 2012; Malitsky & Tam, 2020; Bot et al., 2019; Van Dung & Vu, 2021) than we will require below. However, the stochastic methods of Alacaoglu et al. (2021) and BΓΆhm et al. (2020), when combined with a product-space reformulation, can solve (1) when all the $A _ { i }$ are subdifferentials of convex functions; see Section 6. + +When moving away from a deterministic gradient oracle in projective splitting, a key difficulty is that the generated hyperplanes do not guarantee separation between the solution and the current point. We solve this issue by relaxing the projection: we only update each iterate in the direction of the noisy projection and scale its movement by a decreasing stepsize that allows for control of the stochastic error. Using the framework of stochastic quasi-FejΓ©r monotonicity (Combettes & Pesquet, 2015), we prove almost-sure convergence of the final iterate and do not require averaging of the iterates (Theorem 1, Section 5). We also provide a non-asymptotic convergence rate for the approximation residual (Theorem 2, Section 5). + +A special case of SPS is the recently-developed Double Stepsize Extragradient Method (DSEG) (Hsieh et al., 2020). When $n = 0$ and therefore only $B$ is present in (1), DSEG and SPS coincide. Thus, our method extends DSEG to allow for regularizers and constraints. Our analysis also provides a new interpretation for DSEG as a special case of projective splitting. Our nonasymptotic convergence rate for SPS also applies to DSEG under no additional assumptions. By contrast, the original convergence rate analysis for DSEG requires either strong monotonicity or an error bound. + +We close with numerical experiments on a distributionally robust sparse logistic regression problem. This is a nonsmooth convex-concave min-max problem which can be converted to (1) with $n = 2$ set-valued operators. On this problems class, SPS compares well to the possible alternative splitting methods. + +Non-monotone problems The work of Hsieh et al. (2020) included a local convergence analysis for DSEG applied to locally monotone problems. For min-max problems, if the objective is locally convex-concave at a solution and DSEG is initialized in close proximity, then for small enough stepsizes it converges to the solution with high probability. It is possible to extend this result to SPS, along with our convergence rate analysis. This result is beyond the scope of this work, but Appendix J provides a proof sketch. + +# 2 BACKGROUND ON MONOTONE INCLUSIONS + +Since they are so important to SPS, this section provides some background material regarding monotone inclusions, along with their connections to convex optimization, games, and ML. Appendix G discusses their connections to variational inequalities. For a more thorough treatment, we refer to Bauschke & Combettes (2017). See Appendix A for a longer discussion of the applications of monotone inclusions to ML along with several examples. + +Fundamentals Let $f : \mathbb { R } ^ { d } \mathbb { R } \cup \{ \infty \}$ be closed, convex, and proper (CCP). Recall that its subdifferential $\partial f$ is given by $\partial f ( x ) \ { \overset { \cdot } { = } } \ \left\{ g : f ( y ) \geq f ( x ) + g ^ { \top } { \big ( } { \bar { y - x } } { \big ) } \right\}$ . The map $\partial f$ has the property + +$$ +u \in \partial f ( x ) , v \in \partial f ( y ) \implies ( u - v ) ^ { \top } ( x - y ) \geq 0 , +$$ + +and any point-to-set map having this property is called a monotone operator. A monotone operator $T$ is called maximal if no additional points can be included in the image $T ( x )$ of any $\boldsymbol { x } ^ { \mathrm { ~ \scriptsize ~ \in ~ } \mathbb { R } ^ { d } }$ without violating the above property (Bauschke & Combettes, 2017, Def. 20.20). Subgradient maps of CCP functions are maximal (Bauschke & Combettes, 2017, Thm. 20.25). A minimizer of $f$ is any $x ^ { * }$ such that $0 \in \partial f ( x ^ { * } )$ . This is perhaps the simplest example of a monotone inclusion, the problem of finding $x$ such that $0 \in T ( x )$ , where $T$ is a monotone operator. If $f$ is smooth, then $\bar { \partial } f ( x ) = \{ \nabla f ( x ) \}$ for all $x$ , and the monotone inclusion $0 \in \partial f ( x )$ is equivalent to the first-order optimality condition $0 = \nabla f ( x )$ . + +Under certain regularity conditions (Bauschke & Combettes, 2017, Cor. 16.5), minimizing a sum of CCP functions $f _ { 1 } , \ldots , f _ { n }$ is equivalent to solving the monotone inclusion formed from the sum of their subdifferentials: + +$$ +x ^ { * } \in \underset { x \in \mathbb { R } ^ { d } } { \arg \operatorname* { m i n } } \sum _ { i = 1 } ^ { n } f _ { i } ( x ) \iff 0 \in \sum _ { i = 1 } ^ { n } \partial f _ { i } ( x ^ { * } ) . +$$ + +As throughout this paper for all set addition operations, the summation on the right-hand side of (2) is the Minkowski sum $\textstyle \sum _ { i = 1 } ^ { n } S _ { i } = \{ \sum _ { i = 1 } ^ { n } s _ { i } \ | ^ { \cdot } s _ { i } \in S _ { i } \forall i \in { 1 . . n } \}$ . For a convex set $X$ , a constraint $x \in C$ for some convex set $C$ may be imposed by setting one of the $f _ { i }$ to be the indicator function $\iota _ { C }$ , defined by $\iota _ { C } ( x ) = 0$ for $x \in C$ and $\iota _ { C } \bar { ( } x ) = \dot { + } \infty$ for $x \not \in C$ . Indicator functions of closed convex sets are CCP (Bauschke & Combettes, 2017, Ex. 1.25), and the subgradient map of $\iota _ { C }$ is also referred to as the normal cone map $N _ { C }$ of $C$ (Bauschke & Combettes, 2017, Def. 6.37). Multiple constraints may be imposed by including multiple indicator functions in (2). + +ML applications The form (2) can be used to model ML problems with multiple constraints and/or nonsmooth regularizers, including sparse and overlapping group lasso (Jacob et al., 2009), sparse and low-rank matrix estimation problems (Richard et al., 2012), and rare feature selection (Yan & Bien, 2020); see Pedregosa & Gidel (2018) for an overview. + +Games Consider a two-player noncooperative game in which each player tries to selfishly minimize its own loss, with each loss depending on the actions of both players. Typically, the goal is to find a Nash equilibrium, in which neither player can improve its loss by changing strategy: + +$$ +x ^ { * } \in \arg \operatorname* { m i n } _ { x \in \Theta } F ( x , y ^ { * } ) \quad { \mathrm { a n d } } \quad y ^ { * } \in \arg \operatorname* { m i n } _ { y \in \Omega } G ( x ^ { * } , y ) . +$$ + +Assuming that the admissible strategy sets $\Theta \subseteq \mathbb { R } ^ { d _ { x } }$ and $\Omega \subseteq \mathbb { R } ^ { d _ { y } }$ are closed and convex and that $F$ and $G$ are differentiable, then writing the first-order necessary conditions for each optimization problem in (3) yields + +$$ +0 \in \left[ \begin{array} { l } { \nabla _ { x } F ( x ^ { * } , y ^ { * } ) } \\ { \nabla _ { y } G ( x ^ { * } , y ^ { * } ) } \end{array} \right] + \big ( N _ { \Theta } ( x ^ { * } ) \times N _ { \Omega } ( y ^ { * } ) \big ) . +$$ + +If $G = - F$ , then (3) is a min-max game. If $F$ is also convex in $x$ and concave in $y$ , then $B : ( x , y ) \mapsto$ $( \nabla _ { x } F ( x , y ) , - \nabla _ { y } F ( x , y ) ) ^ { \top }$ is monotone1 on $\mathbb { R } ^ { d _ { x } + d _ { y } }$ (Rockafellar, 1970). In many applications, $B$ is also Lipschitz continuous. In this situation, (4) is a monotone inclusion involving two operators $B$ and $N _ { \Theta \times \Omega }$ , with $B$ being Lipschitz. Using the simultaneous version of GDA on (3) is equivalent to applying the forward-backward method (FB) (Bauschke & Combettes, 2017, Thm. 26.14) to (4). However, convergence of FB requires that the operator $B$ be cocoercive (Bauschke & Combettes, 2017, Def. 4.10), and not merely Lipschitz (Bauschke & Combettes, 2017, Thm. 26.14). Thus, simultaneous GDA fails to converge for (3) without additional assumptions; see Gidel et al. (2019, Prop. 1) for a simple counterexample. + +Regularizers and further constraints may be imposed by adding more operators to (4). For example, if one wished to apply a (nonsmooth) convex regularizer $r : \bar { \mathbb { R } } ^ { d _ { x } } \bar { \mathbb { R } } \cup \{ + \infty \}$ to the $x$ variables and a similar regularizer $d : \mathbb { R } ^ { d _ { y } } \mathbb { R } \cup \{ + \infty \}$ to the $y$ variables, one would add the operator $A _ { 2 } : ( x , y ) \mapsto \bar { \partial r } ( x ) \times \partial d ( y )$ to the right-hand side of (4). + +ML applications of games Distributionally robust supervised learning (DRSL) is an emerging framework for improving the stability and reliability of ML models in the face of distributional shifts $\mathrm { T u }$ et al., 2021; Kuhn et al., 2019; Shafieezadeh-Abadeh et al., 2015; Sinha et al., 2018; Lin et al., 2020; Namkoong & Duchi, 2016). Common approaches to DRSL formulate the problem as a min-max game between a learner selecting the model parameters and an adversary selecting a worst-case distribution subject to some ambiguity set around the observed empirical distribution. This min-max problem is often further reduced to either a finite-dimensional saddlepoint problem or a convex optimization problem. + +DRSL is a source of games with multiple constraints/regularizers. One such formulation, based on Yu et al. (2021), is discussed in the experiments below. The work in Namkoong & Duchi (2016) uses an ambiguity set based on $f$ -divergences, while Sinha et al. (2018) introduce a Lagrangian relaxation of the Wasserstein ball. When applied to models utilizing multiple regularizers (Jacob et al., 2009; Richard et al., 2012; Yan & Bien, 2020), both of these approaches lead to min-max problems with multiple regularizers. + +Other applications of games in ML, although typically nonconvex, include generative adversarial networks (GANs) (Goodfellow et al., 2014; Arjovsky et al., 2017; Loizou et al., 2020; 2021; Mishchenko et al., 2020), fair classification (Wadsworth et al., 2018; Zhang et al., 2018; Edwards & Storkey, 2015; Celis & Keswani, 2019), and adversarial privacy (Huang et al., 2017). + +Resolvents, proximal operators, and projections A fundamental computational primitive for solving monotone inclusions is the resolvent. The resolvent of a monotone operator $A$ is defined to be $J _ { A } \overset { \cdot } { = } ( I + A ) ^ { - 1 }$ , where $I$ is the identity operator and the inverse of any operator $T$ is simply $T ^ { - 1 } : x \mapsto \{ y : T y \ni x \}$ . If $A$ is maximal monotone, then for any $\rho > 0$ , $J _ { \rho A }$ is single valued, nonexpansive, and has domain equal to $\mathbb { R } ^ { d }$ (Bauschke & Combettes, 2017, Thm. 21.1 and Prop. 23.8). Resolvents generalize proximal operators of convex functions: the proximal operator of a CCP function $f$ is + +$$ +\operatorname { p r o x } _ { \rho f } ( t ) \doteq \underset { x \in \mathbb { R } ^ { d } } { \arg \operatorname* { m i n } } \left\{ \rho f ( x ) + ( 1 / 2 ) \| x - t \| ^ { 2 } \right\} . +$$ + +It is easily proved that $\mathrm { p r o x } _ { \rho f } = \underset { - } { J } _ { \rho \partial f }$ . Like proximal operators, resolvents generalize projection onto convex sets: if $f = \iota _ { \mathcal { C } }$ , then $J _ { \rho N _ { C } } = \mathrm { p r o x } _ { \rho f } = \mathrm { p r o j } _ { \mathcal { C } }$ for any $\rho > 0$ . In many ML applications, proximal operators, and hence resolvents, are relatively straightforward to compute. For examples, see Parikh & Boyd (2013, Sec. 6). + +Operator splitting methods Operator splitting methods attempt to solve monotone inclusions such as (1) by a sequence of operations that each involve only one of the operators $A _ { 1 } , \ldots , A _ { n } , B$ . Such methods are often presented in the context of convex optimization problems like (2), but typically apply more generally to monotone inclusions such as (1). In the specific context of (1), each iteration of such a method ideally handles each $A _ { i }$ via its resolvent and the Lipschitz operator $B$ by explicit (not stochastic) evaluation. This is a feasible approach if the original problem can be decomposed in such a way that the resolvents of each $A _ { i }$ are relatively inexpensive to compute, and full evaluations of $B$ are possible. Although not discussed here, more general formulations in which matrices couple the arguments of the operators can broaden the applicability of operator splitting methods. + +# 3 THE PROJECTIVE SPLITTING FRAMEWORK + +Before introducing our proposed method, we give a brief introduction to the projective splitting class of methods. + +The extended solution set Projective splitting is a primal-dual framework and operates in an extended space of primal and dual variables. Rather than directly finding a solution to (1), we find a point in the extended solution set (or Kuhn-Tucker set) + +$$ +\begin{array} { r } { \mathcal { S } \doteq \left\{ ( z , w _ { 1 } , \ldots , w _ { n + 1 } ) \ \middle | \ w _ { i } \in A _ { i } ( z ) \forall i \in 1 . . n , w _ { n + 1 } = B ( z ) , \sum _ { i = 1 } ^ { n + 1 } w _ { i } = 0 \right\} . } \end{array} +$$ + +Given $p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \ldots , w _ { n + 1 } ^ { * } ) \in \mathcal { S }$ , it is straightforward to see that $z ^ { * }$ solves (1). Conversely, given a solution $z ^ { * }$ to (1), there must exist $w _ { 1 } ^ { * } , \ldots , w _ { n + 1 } ^ { * }$ such that $( z ^ { \ast } , w _ { 1 } ^ { \ast } , \dots , w _ { n + 1 } ^ { \ast } ) \in \mathcal { S }$ . Suppose $p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \ldots , w _ { n + 1 } ^ { * } ) \in \mathcal { S }$ . Since $z ^ { * }$ solves (1), $z ^ { * }$ is typically referred to as a primal solution. The vectors $w _ { 1 } ^ { * } , \ldots , w _ { n + 1 } ^ { * }$ solve a dual inclusion not described here, and are therefore called a dual solution. It can be shown that $s$ is closed and convex; see for example Johnstone $\&$ Eckstein (2020b). We will assume throughout that a solution to (1) exists, therefore the set $s$ is nonempty. + +Separator-projection framework Projective splitting methods are instances of the general separator-projection algorithmic framework for locating a member of a closed convex set $s$ within a linear space $\mathcal { P }$ . Each iteration $k$ of algorithms drawn from this framework operates by finding a set $H _ { k }$ that separates the current iterate $p ^ { k } \in \mathcal { P }$ from $s$ , meaning that $s$ is entirely in the set and $p ^ { k }$ typically is not. One then attempts to β€œmove closer" to $s$ by projecting the $p ^ { k }$ onto $H _ { k }$ . In the particular case of projective splitting applied to the problem (1) using (5), we select the space $\mathcal { P }$ to be + +$$ +\begin{array} { r } { \mathcal { P } \doteq \left\{ ( z , w _ { 1 } , \ldots , w _ { n + 1 } ) \in \mathbb { R } ^ { ( n + 2 ) d } \ \Big | \ \sum _ { i = 1 } ^ { n + 1 } w _ { i } = 0 \right\} , } \end{array} +$$ + +and each separating set $H _ { k }$ to be the half space $\{ p \in { \mathcal { P } } \mid \varphi _ { k } ( p ) \leq 0 \}$ generated by an affine function $\varphi _ { k } : \mathscr { P } \mathbb { R }$ . The general intention is to construct $\varphi _ { k }$ such that $\varphi _ { k } \tilde { ( p ^ { k } ) } > 0$ , but $\varphi _ { k } ( p ^ { * } ) \leq 0$ for all $p ^ { * } \in { \mathcal { S } }$ . The construction employed for $\varphi _ { k }$ in the case of (1) and (5) is of the form + +$$ +\begin{array} { r } { \varphi _ { k } ( z , w _ { 1 } , \ldots , w _ { n + 1 } ) \doteq \sum _ { i = 1 } ^ { n + 1 } \langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \rangle } \end{array} +$$ + +for some points $( x _ { i } ^ { k } , y _ { i } ^ { k } ) \in \mathbb { R } ^ { 2 d }$ , $i \in { 1 . . ( n + 1 ) }$ , that must be carefully chosen (see below). Any function of the form (7) can be shown to be affine when restricted to $\mathcal { P }$ . As mentioned above, the standard separator-projection algorithm obtains its next iterate $p ^ { k + 1 }$ by projecting $p ^ { k }$ onto $H _ { k }$ . This calculation involves the usual projection step for a half space, namely + +$$ +p ^ { k + 1 } = p ^ { k } - \alpha _ { k } \nabla \varphi _ { k } , \quad \mathrm { ~ w h e r e ~ } \quad \alpha _ { k } = \varphi _ { k } ( p ^ { k } ) / \| \nabla \varphi _ { k } \| ^ { 2 } , +$$ + +and the gradient $\nabla \varphi _ { k }$ is computed relative to $\mathcal { P }$ , thus resulting in $p ^ { k + 1 } \ \in \ { \mathcal { P } }$ , i.e. $\nabla \varphi _ { k } \ =$ +$\left( \sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k } , x _ { 1 } ^ { k } - { \bar { x } } ^ { k } , \dots , x _ { n + 1 } - { \bar { x } } ^ { k } \right)$ where $\begin{array} { r } { \bar { x } ^ { k } = \frac { 1 } { n + 1 } \sum _ { i = 1 } ^ { n + 1 } x _ { i } ^ { k } } \end{array}$ . + +# 4 PROPOSED METHOD + +The proposed method is given in Algorithm 1 and called Stochastic Projective Splitting (SPS). Unlike prior versions of projective splitting, SPS does not employ the stepsize $\alpha _ { k }$ of (8) that places the next iterate exactly on the hyperplane given by $\varphi _ { k } ( p ) = 0$ . Instead, it simply moves in the direction $- \nabla \varphi _ { k }$ with a pre-defined stepsize $\{ \alpha _ { k } \}$ . This fundamental change is required to deal with the stochastic noise on lines 6 and 8. This noise could lead to the usual choice of $\alpha _ { k }$ defined in (8) being unstable and difficult to analyze. In order to guarantee convergence, the parameters $\alpha _ { k }$ and $\rho _ { k }$ must be chosen to satisfy certain conditions given below. Note that the gradient is calculated with respect to the subspace $\mathcal { P }$ defined in (6); since the algorithm is initialized within $\mathcal { P }$ , it remains in $\mathcal { P }$ , within which $\varphi _ { k }$ the updates on lines 9-10 are equivalent to . $\boldsymbol { p } ^ { k + 1 } = \boldsymbol { p } ^ { k } - \alpha _ { k } \nabla \varphi _ { k }$ , where $\mathbf { \chi } ^ { \dot { k } } = ( z ^ { k } , w _ { 1 } ^ { k } , \dots , w _ { n + 1 } ^ { k } )$ + +Note that SPS does not explicitly evaluate $\varphi _ { k }$ , which is only used in the analysis, but it does keep track of $( x _ { i } ^ { k } , y _ { i } ^ { k } )$ for $i \in { 1 . . ( n + 1 ) }$ . The algorithm’s memory requirements scale linearly with the number of nonsmooth operators $n$ in the inclusion (1), with the simplest implementation storing $( 3 n + 5 ) d$ working-vector elements. This requirement can be reduced to $( n + 7 ) d$ through a technique discussed in Appendix H. In most applications, $n$ will be small, for example 2 or 3. + +Updating $( x _ { i } ^ { k } , y _ { i } ^ { k } )$ The variables $( x _ { i } ^ { k } , y _ { i } ^ { k } )$ are updated on lines 3-8 of Algorithm 1, in which $e ^ { k }$ and $\epsilon ^ { k }$ are $\mathbb { R } ^ { d }$ -valued random variables defined on a probability space $( \Omega , { \mathcal { F } } , P )$ . For $B$ we use a new, noisy version of the two-forward-step procedure from Johnstone & Eckstein (2020b). For each $A _ { i }$ , $i \in 1 . . n$ , we use the same resolvent step used in previous projective splitting papers, originating with (Eckstein & Svaiter, 2008). In the case $\epsilon ^ { k } = e ^ { k } = 0$ , the selection of the $( \bar { x _ { i } ^ { k } } , y _ { i } ^ { k } )$ is identical to that proposed by Johnstone & Eckstein (2020b), resulting in the hyperplane $\{ p : { \varphi } _ { k } ( p ) = 0 \}$ strictly separating $p ^ { k }$ from $s$ . + +SPS achieves full splitting of (1): each $A _ { i }$ is processed separately using a resolvent and the Lipschitz term $B$ is processed via a stochastic gradient oracle. When the $A _ { i }$ arise from regularizers or constraints, as discussed in Section 2, their resolvents can be readily computed so long as their respective proximal/projection operators have a convenient form. + +Noise assumptions Let $\mathcal { F } _ { k } \doteq \sigma ( p ^ { 1 } , \ldots , p ^ { k } )$ and $\mathcal { E } _ { k } \doteq \sigma ( \epsilon ^ { k } )$ . The stochastic estimators for the gradients, $r ^ { k }$ and $y _ { n + 1 } ^ { k }$ , are assumed to be unbiased, that is, the noise terms have mean 0 conditioned on the past: + +$$ +\mathbb { E } [ \epsilon ^ { k } | \mathcal { F } _ { k } ] = 0 , \quad \mathbb { E } [ e ^ { k } | \mathcal { F } _ { k } ] = 0 \quad a . s . +$$ + +We impose the following mild assumptions on the variance of the noise: + +$$ +\begin{array} { r l } & { \mathbb { E } \left[ \| \epsilon ^ { k } \| ^ { 2 } | \mathcal { F } _ { k } \right] \leq N _ { 1 } + N _ { 2 } \| B ( z ^ { k } ) \| ^ { 2 } \quad a . s . } \\ & { \mathbb { E } \left[ \| e ^ { k } \| ^ { 2 } | \mathcal { F } _ { k } , \mathcal { E } _ { k } \right] \leq N _ { 3 } + N _ { 4 } \| B ( x _ { n + 1 } ^ { k } ) \| ^ { 2 } \quad a . s . , } \end{array} +$$ + +where $0 \le N _ { 1 } , N _ { 2 } , N _ { 3 } , N _ { 4 } < \infty$ . We do not require $e ^ { k }$ and $\epsilon ^ { k }$ to be independent of one another. + +Stepsize choices The stepsizes $\rho _ { k }$ and $\alpha _ { k }$ are assumed to be deterministic. A constant stepsize choice which attains a non-asymptotic convergence rate will be considered in the next section (Theorem 2). The stepsize conditions we will impose to guarantee almost-sure convergence (Theorem 1) are + +$$ +\begin{array} { r } { \sum _ { k = 1 } ^ { \infty } \alpha _ { k } \rho _ { k } = \infty , \quad \sum _ { k = 1 } ^ { \infty } \alpha _ { k } ^ { 2 } < \infty , \quad \sum _ { k = 1 } ^ { \infty } \alpha _ { k } \rho _ { k } ^ { 2 } < \infty , \mathrm { a n d } \rho _ { k } \leq \overline { \rho } < 1 / L . } \end{array} +$$ + +For example, in the case $L = 1$ , a particular choice which satisfies these constraints is + +$$ +\alpha _ { k } = k ^ { - 0 . 5 - p } \mathrm { f o r } 0 < p < 0 . 5 , \mathrm { a n d } \rho _ { k } = k ^ { - 0 . 5 + t } \mathrm { f o r } p \leq t < 0 . 5 p + 0 . 2 5 . +$$ + +For simplicity, the stepsizes $\tau$ used for the resolvent updates in lines 3-5 are fixed, but they could be allowed to vary with both $i$ and $k$ so long as they have finite positive lower and upper bounds. + +# Algorithm 1: Stochastic Projective Splitting (SPS) + +# 5 MAIN THEORETICAL RESULTS + +Theorem 1. Suppose $A _ { 1 } , \ldots , A _ { n }$ are maximal monotone, $B$ is $L$ -Lipschitz and monotone, and a solution to (1) exists. For Algorithm $I$ , suppose (9)-(12) hold. Then with probability one it holds that $z ^ { k } \to z ^ { * }$ , where $z ^ { * }$ solves (1). Further, with probability one, $x _ { i } ^ { k } \to z ^ { * }$ for $i = 1 , \ldots , n$ . + +Proof sketch Theorem 1 is proved in Appendix C, but we provide a brief sketch here. The proof begins by deriving a simple recursion inspired by the analysis of SGD (Robbins & Monro, 1951). Since $p ^ { k + 1 } = p ^ { k } - \alpha _ { k } \nabla \bar { \varphi } _ { k }$ , a step of projective splitting can be viewed as GD applied to the affine hyperplane generator function $\varphi _ { k }$ . Thus, for any $p ^ { * } \in \mathcal { P }$ , + +$$ +\begin{array} { r l } & { \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } = \| p ^ { k } - p ^ { * } \| ^ { 2 } - 2 \alpha _ { k } \langle \nabla \varphi _ { k } , p ^ { k } - p ^ { * } \rangle + \alpha _ { k } ^ { 2 } \| \nabla \varphi _ { k } \| ^ { 2 } } \\ & { \qquad = \| p ^ { k } - p ^ { * } \| ^ { 2 } - 2 \alpha _ { k } ( \varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } ) ) + \alpha _ { k } ^ { 2 } \| \nabla \varphi _ { k } \| ^ { 2 } } \end{array} +$$ + +where in the second equation we have used that $\varphi _ { k } ( p )$ is affine on $\mathcal { P }$ . The basic strategy is to show that, for any $p ^ { * } \in { \mathcal { S } }$ , + +$$ +\begin{array} { r } { \mathbb { E } [ \| \nabla \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } ] \le C _ { 1 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + C _ { 2 } \quad a . s . } \end{array} +$$ + +for some $C _ { 1 } , C _ { 2 } > 0$ . This condition allows one to establish stochastic quasi-FejΓ©r monotonicity (SQFM) (Combettes & Pesquet, 2015, Proposition 2.3) of the iterates to $s$ . One consequence of SQFM is that with probability one there exists a subsequence $v _ { k }$ such that $\varphi _ { v _ { k } } ( p ^ { v _ { k } } ) - \varphi _ { v _ { k } } ( p ^ { * } )$ converges to 0. Furthermore, roughly speaking, we show that $\varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } )$ provides an upper bound on the following β€œapproximation residual" for SPS: + +$$ +\begin{array} { r } { G _ { k } \doteq \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } . } \end{array} +$$ + +provides an approximation error for SPS, as formalized in the following lemma: + +Lemma 1. For SPS, $p ^ { k } = ( z ^ { k } , w _ { 1 } ^ { k } , \ldots , w _ { n + 1 } ^ { k } ) \in \mathcal { S }$ if and only if $G _ { k } = 0$ + +Since $y _ { i } ^ { k } \ \in \ A _ { i } ( x _ { i } ^ { k } )$ for $i \in 1 . . n$ , having $G _ { k } ~ = ~ 0$ implies that $z ^ { k } = x _ { i } ^ { k }$ , $w _ { i } ^ { k } \ = \ y _ { i } ^ { k }$ , and thus $w _ { i } ^ { k } \in A _ { i } ( z ^ { k } )$ for $i \in 1 . . n$ . Since $w _ { n + 1 } ^ { k } = B ( z ^ { k } )$ and $\textstyle \sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0$ , it follows that $z ^ { k }$ solves (1). The reverse direction is proved in Appendix $\mathrm { D }$ . + +The quantity $G _ { k }$ generalizes the role played by the norm of the gradient in algorithms for smooth optimization. In particular, in the special case where $n = 0$ and $\bar { B } ( z ) = \nabla f ( z )$ for some smooth convex function $f$ , one has $G _ { k } = \| \bar { \nabla } f ( z ^ { k } ) \| ^ { 2 }$ . + +Combining the properties of $G _ { k }$ with other results following from SQFM (such as boundedness) will allow us to derive almost-sure convergence of the iterates to a solution of (1). + +Convergence rate We can also establish non-asymptotic convergence rates for the approximation residual $G _ { k }$ : + +Theorem 2. Fix the total iterations $K \geq 1$ of Algorithm 1 and set + +$$ +\forall k = 1 , \ldots , K : \rho _ { k } = \rho \doteq \operatorname* { m i n } \left\{ K ^ { - 1 / 4 } , 1 / 2 L \right\} \quad \ a n d \quad \alpha _ { k } = C _ { f } \rho ^ { 2 } +$$ + +for some $C _ { f } > 0$ . Suppose (9)-(11) hold. Then + +$$ +\begin{array} { r } { ( 1 / K ) { \sum } _ { j = 1 } ^ { K } \mathbb { E } [ G _ { j } ] = \mathcal { O } ( K ^ { - 1 / 4 } ) } \end{array} +$$ + +where the constants are given (along with the proof) in Appendix $E$ . + +Theorem 2 implies that if we pick an iterate $J$ uniformly at random from $1 . . K$ , then the expected value of $G _ { J }$ is $\mathcal { O } ( K ^ { - 1 / 4 } )$ . As far as we know, this is the first convergence rate for a stochastic fullsplitting method solving (1) in the general discontinuous (i.e. set-valued) monotone inclusion case, and it is not clear whether it can be improved, either by a better analysis or a better method. Faster rates are certainly possible for deterministic methods under various continuity assumptions; Tseng’s method obtains $\bar { \mathcal { O } } ( K ^ { - 1 } )$ rate (Monteiro $\&$ Svaiter, 2010) and the accelerated Halpern iteration under Lipschitz continuity obtains $\mathcal { O } ( K ^ { - 2 } )$ rate (Diakonikolas, 2020). While our rate may seem slow, it is worth remembering that (1) features $n$ discontinuous operators $A _ { i }$ , so we expect rates at least as slow as nonsmooth convex optimization, but perhaps worse because (1) is far more general than convex optimization. For a different error metric, the restricted gap function, in the special case of variational inequalities, faster rates have been established in Juditsky et al. (2011) and BΓΆhm et al. (2020). However, it is unclear how to relate the restricted gap function to $G _ { k }$ , so these rates may not be directly comparable to Theorem 2. + +# 6 RELATED WORK + +Arguably the three most popular classes of operator splitting algorithms are forward-backward splitting (FB) (Combettes & Pesquet, 2011), Douglas-Rachford splitting (DR) (Lions & Mercier, 1979), and Tseng’s method (Tseng, 2000). The extragradient method (EG) is similar to Tseng’s method, but has more projection steps per iteration and only applies to variational inequalities (Korpelevich, 1977; Nemirovski, 2004; Li et al., 2021). The popular Alternating Direction Method of Multipliers (ADMM), in its standard form, is a dual application of DR (Gabay, 1983). The three-operator splitting method (Davis & Yin, 2017) can only be applied to (1) if $B$ is cocoercive rather than merely Lipchitz, and thus its usefulness is mostly limited to optimization applications and not games. FB, DR, and Tseng’s method apply to monotone inclusions involving two operators, with varying assumptions on one of the operators. It is possible to derive splitting methods for the more complicated inclusion (1), involving more than two operators, by applying an appropriate 2-operator splitting method such as Tseng’s method to a product-space reformulation (PSR) (BriceΓ±o-Arias & Combettes, 2011; Combettes & Pesquet, 2012) (for more on PSR, see Appendix F). The recently developed forward-reflected-backward (FRB) method (Malitsky & Tam, 2020) can be used in the same way. However, there are several disadvantages to using a PSR, as discussed in Appendix F.7. + +By using a PSR, the stochastic methods of Alacaoglu et al. (2021) and BΓΆhm et al. (2020) can be applied to (1) in the case that each $A _ { i }$ is a subdifferential. Both of these methods are analyzed in terms of the restricted gap function. This merit function has a drawback compared with our approximation residual in that it requires one to find a bound for the iterates. However, Alacaoglu et al. (2021) and BΓΆhm et al. (2020) do not provide such a bound, meaning that their convergence rate results are somewhat incomplete. We discuss this issue in Appendix G. + +Theoretical convergence of the method of BΓΆhm et al. (2020) requires the use of averaging, since the final iterate does not converge for certain problems (Hsieh et al., 2020). Empirically, averaging tends to be slow and to destroy regularizer-induced structural properties such as sparsity or low matrix rank, so its utility is largely theoretical and it is usually avoided in practice. Furthermore, averaging loses even its theoretical benefits for nonconvex problems, so its use in such cases is rarer still. Another drawback of the analysis of BΓΆhm et al. (2020) is that, unlike in SPS, the resolvent (proximal) stepsizes also need to vanish. + +The method of Alacaoglu et al. (2021) applies variance reduction techniques to FRB. It only applies to finite-sum problems and requires the periodic computation of a full batch gradient, making it somewhat less flexible and scalable than our method. On the other hand, it has an accelerated ergodic rate for the restricted gap function in the variational inequality setting. We compare the empirical performance of SPS with Alacaoglu et al. (2021), BΓΆhm et al. (2020), and several deterministic methods using PSR in the numerical experiments described in Section 7. + +Additional related work is discussed in Appendix B. + +# 7 EXPERIMENTS + +We now present some numerical results on distributionally robust supervised learning (DRSL) problems. We follow the approach of Yu et al. (2021), which introduced a min-max formulation of Wasserstein DRSL. While other approaches reduce the problem to convex optimization, Yu et al. (2021) reduce it to a finite-dimensional min-max problem amenable to the use of stochastic methods on large datasets. However, unlike our proposed SPS method, the variance-reduced extragradient method that Yu et al. (2021) propose cannot handle multiple nonsmooth regularizers or constraints on the model parameters. Consequently, we consider distributionally robust sparse logistic regression (DRSLR), a problem class equivalent to that considered in Yu et al. (2021), but with an added $\ell _ { 1 }$ regularizer, a standard tool to induce sparsity. See the Appendix I for the full problem definition. + +We compared our SPS method to several methods for solving DRSLR for a collection of real datasets from the LIBSVM repository (Chang & Lin, 2011). We implemented SPS with $\alpha _ { k } = C _ { d } k ^ { - 0 . 5 1 }$ and $\rho _ { k } = C _ { d } k ^ { - 0 . 2 5 }$ and called it SPS-decay. We also implement SPS with the fixed stepsize given in (15) and called it SPS-fixed. We compared the method to deterministic projective splitting (Johnstone & Eckstein, 2020b) and the following methods based on PSR: Tseng’s method (Tseng, 2000; Combettes & Pesquet, 2012), the forward-reflected-backward (FRB) method (Malitsky & Tam, 2020), the stochastic Tseng (S-Tseng) method of BΓΆhm et al. (2020), and the variance-reduced stochastic FRB method (Alacaoglu et al., 2021), abbreviated FRB-VR. The S-Tseng and FRB-VR algorithms appear to be the only stochastic splitting methods other than SPS applicable to the tested problem class. + +![](images/5a3352bf1be8622af7e8437ef0309b7c8e82b213e948eae1d016add4ab16fe33.jpg) +Figure 1: Approximation residual versus running time for three LIBSVM benchmark datasets, with the markers at 10-iteration intervals. Left: epsilon, middle: SUSY, right: real-sim. For the stochastic algorithms (SPS, S-Tseng, and FRB-VR), we plot the median results over 10 trials, with unit standard deviation horizontal error bars for the running time and the vertical error bars displaying the min-to-max range of the approximation residual. The code is provided in the supplementary material. + +Figure 1 show results for three LIBSVM standard datasets: epsilon2 $m = 4 \cdot 1 0 ^ { 5 }$ , $d = 2 0 0 0 \mathrm { \Omega }$ ), SUSY (Baldi et al., 2014; Dua & Graff, 2017) $m = 2 \cdot 1 0 ^ { 6 }$ , $d = 1 8$ ), and real-sim3 ( $m = 7 2 { , } 3 0 9$ , $d = 2 0 { , } 9 5 8 _ { , }$ ). + +To measure the progress of the algorithms, we used the β€œapproximation residual” $R _ { k }$ defined in Appendix F. As with $G _ { k }$ , having $R _ { k } = 0$ implies that $z ^ { k }$ solves (1). We use $R _ { k }$ instead of $G _ { k }$ because it is also possible to compute essentially the same measure of convergence from the iterates of the other tested algorithms, establishing a fair comparison. Appendix F provides the details of the derivation of the residual measure for each algorithm, explores the relationship between $R _ { k }$ and $G _ { k }$ , and provides additional implementation details. + +Figure 1 plots the approximation residual versus running time for all seven algorithms under consideration. The computations were performed using Python 3.8.3 and numpy on a 2019 MacBook Pro with a 2.4GHz 8-core Intel I9 processor and 32GB of RAM . Being a stochastic method, SPS-decay seems to outperform the deterministic methods at obtaining a medium-accuracy solution quickly. It also seems to outperform the stochastic PSR-based methods S-Tseng and FRB-VR. + +# 8 CONCLUSIONS AND FUTURE WORK + +We have developed and analyzed a stochastic splitting method that can handle min-max problems with multiple regularizers and constraints. Going forward, this development should make it possible to incorporate regularizers and constraints into adversarial formulations trained from large datasets. + +Recent versions of deterministic projective splitting (Combettes & Eckstein, 2018; Johnstone & Eckstein, 2020b) allow for asynchronous and incremental operation, meaning that not all operators need to be activated at every iteration, with some calculations proceeding with stale inputs. Such characteristics make projective splitting well-suited to distributed implementations. Many of our SPS results may be extended to allow for these variations, but we leave those extensions to future work. + +# REFERENCES + +Ahmet Alacaoglu, Yura Malitsky, and Volkan Cevher. Forward-reflected-backward method with variance reduction. Computational Optimization and Applications, 2021. Available online. + +Abdullah Alotaibi, Patrick L Combettes, and Naseer Shahzad. Solving coupled composite monotone inclusions by successive FejΓ©r approximations of their Kuhn-Tucker set. SIAM Journal on Optimization, 24(4):2076–2095, 2014. + +Kimon Antonakopoulos, Veronica Belmega, and Panayotis Mertikopoulos. An adaptive mirrorprox method for variational inequalities with singular operators. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'AlchΓ©-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, 2019. + +Martin Arjovsky, Soumith Chintala, and LΓ©on Bottou. Wasserstein generative adversarial networks. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, volume 70 of Proceedings of Machine Learning Research, pp. 214–223, 06–11 Aug 2017. + +Pierre Baldi, Peter Sadowski, and Daniel Whiteson. Searching for exotic particles in high-energy physics with deep learning. Nature communications, 5(1):1–9, 2014. + +David Balduzzi, Sebastien Racaniere, James Martens, Jakob Foerster, Karl Tuyls, and Thore Graepel. The mechanics of $n$ -player differentiable games. In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 354–363. PMLR, 10–15 Jul 2018. + +Heinz H Bauschke and Patrick L Combettes. Convex analysis and monotone operator theory in Hilbert spaces. Springer, 2nd edition, 2017. + +Axel BΓΆhm, Michael Sedlmayer, ErnΓΆ Robert Csetnek, and Radu Ioan BoΒΈt. Two steps at a time β€” taking GAN training in stride with Tseng’s method. arXiv preprint arXiv:2006.09033, 2020. + +Radu Ioan Bot, Panayotis Mertikopoulos, Mathias Staudigl, and Phan Tu Vuong. Forward-backwardforward methods with variance reduction for stochastic variational inequalities. arXiv preprint arXiv:1902.03355, 2019. + +Luis M BriceΓ±o-Arias and Patrick L Combettes. A monotone+skew splitting model for composite monotone inclusions in duality. SIAM Journal on Optimization, 21(4):1230–1250, 2011. + +Luis M BriceΓ±o-Arias and Patrick L Combettes. Monotone operator methods for Nash equilibria in non-potential games. In Computational and Analytical Mathematics, volume 50 of Springer Proceedings in Mathematics and Statistics, pp. 143–159. Springer, 2013. + +L Elisa Celis and Vijay Keswani. Improved adversarial learning for fair classification. arXiv preprint arXiv:1901.10443, 2019. + +Chih-Chung Chang and Chih-Jen Lin. LIBSVM: A library for support vector machines. ACM Transactions on Intelligent Systems and Technology, 2:27:1–27:27, 2011. Software available at http://www.csie.ntu.edu.tw/\~cjlin/libsvm. + +Tatjana Chavdarova, Matteo Pagliardini, Sebastian U Stich, FranΓ§ois Fleuret, and Martin Jaggi. Taming GANs with lookahead-minmax. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id $=$ ZW0yXJyNmoG. + +Patrick L. Combettes and Jonathan Eckstein. Asynchronous block-iterative primal-dual decomposition methods for monotone inclusions. Mathematical Programming, 168(1-2):645–672, 2018. + +Patrick L Combettes and Jean-Christophe Pesquet. Proximal splitting methods in signal processing. In H.H. Bauschke, R.S.S. Burachik, P.L. Combettes, V. Elser, D.R. Luke, and H. Wolkowicz (eds.), Fixed-Point Algorithms for Inverse Problems in Science and Engineering, pp. 185–212. Springer, 2011. + +Patrick L Combettes and Jean-Christophe Pesquet. Primal-dual splitting algorithm for solving inclusions with mixtures of composite, Lipschitzian, and parallel-sum type monotone operators. Set-Valued and variational analysis, 20(2):307–330, 2012. + +Patrick L Combettes and Jean-Christophe Pesquet. Stochastic quasi-FejΓ©r block-coordinate fixed point iterations with random sweeping. SIAM Journal on Optimization, 25(2):1221–1248, 2015. + +Constantinos Daskalakis, Andrew Ilyas, Vasilis Syrgkanis, and Haoyang Zeng. Training GANs with optimism. In International Conference on Learning Representations, 2018. URL https: //openreview.net/forum?id ${ . } =$ SJJySbbAZ. + +Damek Davis and Wotao Yin. A three-operator splitting scheme and its optimization applications. Set-Valued and Variational Analysis, 25(4):829–858, 2017. + +Jelena Diakonikolas. Halpern iteration for near-optimal and parameter-free monotone inclusion and strong solutions to variational inequalities. In Conference on Learning Theory, pp. 1428–1451. PMLR, 2020. + +Dheeru Dua and Casey Graff. UCI machine learning repository, 2017. URL http://archive. ics.uci.edu/ml. + +Jonathan Eckstein. A simplified form of block-iterative operator splitting and an asynchronous algorithm resembling the multi-block alternating direction method of multipliers. Journal of Optimization Theory and Applications, 173(1):155–182, 2017. + +Jonathan Eckstein and Benar Fux Svaiter. A family of projective splitting methods for the sum of two maximal monotone operators. Mathematical Programming, 111(1):173–199, 2008. + +Jonathan Eckstein and Benar Fux Svaiter. General projective splitting methods for sums of maximal monotone operators. SIAM Journal on Control and Optimization, 48(2):787–811, 2009. + +Harrison Edwards and Amos Storkey. Censoring representations with an adversary. arXiv preprint arXiv:1511.05897, 2015. + +Daniel Gabay. Applications of the method of multipliers to variational inequalities. In M. Fortin and R. Glowinski (eds.), Augmented Lagrangian Methods: Applications to the Solution of Boundary Value Problems, chapter IX, pp. 299–340. North-Holland, Amsterdam, 1983. + +Gauthier Gidel, Hugo Berard, GaΓ«tan Vignoud, Pascal Vincent, and Simon Lacoste-Julien. A variational inequality perspective on generative adversarial networks. In International Conference on Learning Representations, 2019. URL https://openreview.net/forum?id $=$ r1laEnA5Ym. + +Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Z. Ghahramani, M. Welling, C. Cortes, N. Lawrence, and K. Q. Weinberger (eds.), Advances in Neural Information Processing Systems, volume 27. Curran Associates, 2014. + +Paulina Grnarova, Yannic Kilcher, Kfir Y Levy, Aurelien Lucchi, and Thomas Hofmann. Generative minimization networks: Training GANs without competition. arXiv preprint arXiv:2103.12685, 2021. + +Patrick T Harker and Jong-Shi Pang. Finite-dimensional variational inequality and nonlinear complementarity problems: a survey of theory, algorithms and applications. Mathematical programming, 48(1):161–220, 1990. + +Yu-Guan Hsieh, Franck Iutzeler, JΓ©rΓ΄me Malick, and Panayotis Mertikopoulos. On the convergence of single-call stochastic extra-gradient methods. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'AlchΓ©-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, 2019. + +Yu-Guan Hsieh, Franck Iutzeler, JΓ©rΓ΄me Malick, and Panayotis Mertikopoulos. Explore aggressively, update conservatively: Stochastic extragradient methods with variable stepsize scaling. In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 16223–16234. Curran Associates, 2020. + +Chong Huang, Peter Kairouz, Xiao Chen, Lalitha Sankar, and Ram Rajagopal. Context-aware generative adversarial privacy. Entropy, 19(12):656, 2017. + +Laurent Jacob, Guillaume Obozinski, and Jean-Philippe Vert. Group lasso with overlaps and graph lasso. In LΓ©on Bottou and Michael Littman (eds.), Proceedings of the 26th International Conference on Machine Learning, pp. 433–440, Montreal, June 2009. Omnipress. + +Patrick R Johnstone and Jonathan Eckstein. Convergence rates for projective splitting. SIAM Journal on Optimization, 29(3):1931–1957, 2019. + +Patrick R Johnstone and Jonathan Eckstein. Projective splitting with forward steps only requires continuity. Optimization Letters, 14(1):229–247, 2020a. + +Patrick R Johnstone and Jonathan Eckstein. Projective splitting with forward steps. Mathematical Programming, 2020b. Published online, to appear in print. + +Patrick R Johnstone and Jonathan Eckstein. Single-forward-step projective splitting: exploiting cocoercivity. Computational Optimization and Applications, 78(1):125–166, 2021. + +Anatoli Juditsky, Arkadi Nemirovski, and Claire Tauvel. Solving variational inequalities with stochastic mirror-prox algorithm. Stochastic Systems, 1(1):17–58, 2011. + +GM Korpelevich. Extragradient method for finding saddle points and other problems. Matekon, 13 (4):35–49, 1977. + +Daniel Kuhn, Peyman Mohajerin Esfahani, Viet Anh Nguyen, and Soroosh Shafieezadeh-Abadeh. Wasserstein distributionally robust optimization: Theory and applications in machine learning. In Serguei Netessine (ed.), Operations Research & Management Science in the Age of Analytics, Tutorials in Operations Research, pp. 130–166. INFORMS, 2019. + +Chris Junchi Li, Yaodong Yu, Nicolas Loizou, Gauthier Gidel, Yi Ma, Nicolas Le Roux, and Michael I Jordan. On the convergence of stochastic extragradient for bilinear games with restarted iteration averaging. arXiv preprint arXiv:2107.00464, 2021. + +Tianyi Lin, Chi Jin, and Michael Jordan. On gradient descent ascent for nonconvex-concave minimax problems. In Hal DaumΓ© III and Aarti Singh (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 6083–6093. PMLR, 2020. + +Pierre-Louis Lions and Bertrand Mercier. Splitting algorithms for the sum of two nonlinear operators. SIAM Journal on Numerical Analysis, 16(6):964–979, 1979. + +Mingrui Liu, Hassan Rafique, Qihang Lin, and Tianbao Yang. First-order convergence theory for weakly-convex-weakly-concave min-max problems. Journal of Machine Learning Research, 22 (169):1–34, 2021. + +Nicolas Loizou, Hugo Berard, Alexia Jolicoeur-Martineau, Pascal Vincent, Simon Lacoste-Julien, and Ioannis Mitliagkas. Stochastic hamiltonian gradient methods for smooth games. In International Conference on Machine Learning, pp. 6370–6381. PMLR, 2020. + +Nicolas Loizou, Hugo Berard, Gauthier Gidel, Ioannis Mitliagkas, and Simon Lacoste-Julien. Stochastic gradient descent-ascent and consensus optimization for smooth games: Convergence analysis under expected co-coercivity. arXiv preprint arXiv:2107.00052, 2021. + +Yura Malitsky and Matthew K Tam. A forward-backward splitting method for monotone inclusions without cocoercivity. SIAM Journal on Optimization, 30(2):1451–1472, 2020. + +Panayotis Mertikopoulos, Bruno Lecouat, Houssam Zenati, Chuan-Sheng Foo, Vijay Chandrasekhar, and Georgios Piliouras. Optimistic mirror descent in saddle-point problems: Going the extra(- gradient) mile. In International Conference on Learning Representations, 2019. URL https: //openreview.net/pdf?id=Bkg8jjC9KQ. + +Lars Mescheder, Sebastian Nowozin, and Andreas Geiger. The numerics of GANs. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, 2017. + +Lars Mescheder, Andreas Geiger, and Sebastian Nowozin. Which training methods for GANs do actually converge? In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 3481–3490. PMLR, 2018. + +Konstantin Mishchenko, Dmitry Kovalev, Egor Shulgin, Peter RichtΓ‘rik, and Yura Malitsky. Revisiting stochastic extragradient. In International Conference on Artificial Intelligence and Statistics, pp. 4573–4582. PMLR, 2020. + +Aryan Mokhtari, Asuman E Ozdaglar, and Sarath Pattathil. Convergence rate of $\mathbf { o } ( 1 / \mathrm { k } )$ for optimistic gradient and extragradient methods in smooth convex-concave saddle point problems. SIAM Journal on Optimization, 30(4):3230–3251, 2020. + +Renato DC Monteiro and Benar Fux Svaiter. On the complexity of the hybrid proximal extragradient method for the iterates and the ergodic mean. SIAM Journal on Optimization, 20(6):2755–2787, 2010. + +Vaishnavh Nagarajan and J. Zico Kolter. Gradient descent GAN optimization is locally stable. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, 2017. + +Hongseok Namkoong and John C Duchi. Stochastic gradient methods for distributionally robust optimization with $f$ -divergences. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 29. Curran Associates, 2016. + +Arkadi Nemirovski. Prox-method with rate of convergence $\mathrm { O } ( 1 / t )$ for variational inequalities with Lipschitz continuous monotone operators and smooth convex-concave saddle point problems. SIAM Journal on Optimization, 15(1):229–251, 2004. + +Yurii Nesterov. Dual extrapolation and its applications to solving variational inequalities and related problems. Mathematical Programming, 109(2):319–344, 2007. + +Neal Parikh and Stephen Boyd. Proximal algorithms. Foundations and Trends in Optimization, 1(3): 123–231, 2013. + +Reese Pathak and Martin J Wainwright. Fedsplit: an algorithmic framework for fast federated optimization. In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 7057–7066. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper/2020/file/ 4ebd440d99504722d80de606ea8507da-Paper.pdf. + +Fabian Pedregosa and Gauthier Gidel. Adaptive three-operator splitting. In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 4085–4094. PMLR, 10–15 Jul 2018. + +Fabian Pedregosa, Kilian Fatras, and Mattia Casotto. Proximal splitting meets variance reduction. In Kamalika Chaudhuri and Masashi Sugiyama (eds.), Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics, volume 89 of Proceedings of Machine Learning Research, pp. 1–10. PMLR, 16–18 Apr 2019. + +Emile Richard, Pierre-Andre Savalle, and Nicolas Vayatis. Estimation of simultaneously sparse and low rank matrices. In John Langford and Joelle Pineau (eds.), Proceedings of the 29th International Conference on Machine Learning, pp. 1351–1358. Omnipress, 2012. + +Herbert Robbins and Sutton Monro. A stochastic approximation method. The annals of mathematical statistics, pp. 400–407, 1951. + +R Tyrrell Rockafellar. Monotone operators associated with saddle-functions and minimax problems. Nonlinear functional analysis, 18(part 1):397–407, 1970. + +Ernest K Ryu and Stephen Boyd. Primer on monotone operator methods. Appl. Comput. Math, 15(1): 3–43, 2016. + +Ernest K. Ryu, Kun Yuan, and Wotao Yin. Ode analysis of stochastic gradient methods with optimism and anchoring for minimax problems, 2020. + +Gesualdo Scutari, Francisco Facchinei, Jong-Shi Pang, and Daniel P Palomar. Real and complex monotone communication games. IEEE Transactions on Information Theory, 60(7):4197–4231, 2014. + +Soroosh Shafieezadeh-Abadeh, Peyman Mohajerin Esfahani, and Daniel Kuhn. Distributionally robust logistic regression. In Corinna Cortes, Neil D. Lawrence, Daniel D. Lee, Masashi Sugiyama, and Roman Garnett (eds.), Advances in Neural Information Processing Systems, volume 28, pp. 1576–1584. Curran Associates, 2015. + +Aman Sinha, Hongseok Namkoong, and John Duchi. Certifying some distributional robustness with principled adversarial training. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id $=$ Hk6kPgZA-. + +Paul Tseng. A modified forward-backward splitting method for maximal monotone mappings. SIAM Journal on Control and Optimization, 38(2):431–446, 2000. + +Nguyen Van Dung and Bang Cong Vu. Convergence analysis of the stochastic reflected forwardbackward splitting algorithm. arXiv preprint arXiv:2102.08906, 2021. + +Christina Wadsworth, Francesca Vera, and Chris Piech. Achieving fairness through adversarial learning: an application to recidivism prediction. arXiv preprint arXiv:1807.00199, 2018. + +Xiaohan Yan and Jacob Bien. Rare feature selection in high dimensions. Journal of the American Statistical Association, 2020. Published online, to appear in print. + +Yaodong Yu, Tianyi Lin, Eric Mazumdar, and Michael I Jordan. Fast distributionally robust learning with variance reduced min-max optimization. arXiv preprint arXiv:2104.13326, 2021. + +Alp Yurtsever, Bang Cong Vu, and Volkan Cevher. Stochastic three-composite convex minimization. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 29. Curran Associates, 2016. + +Brian Hu Zhang, Blake Lemoine, and Margaret Mitchell. Mitigating unwanted biases with adversarial learning. In Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, pp. 335– 340, 2018. + +# A ML APPLICATIONS OF THE MONOTONE INCLUSION (1) + +There are two main classes of applications of (1) in ML: optimization problems and saddle-point games. + +Optimization Problems In this case the monotone inclusion arises from finding the zero of a sum of subgradients of convex functions, as discussed in Section 2. It is typical in ML to solve the empirical risk minimization problem + +$$ +\operatorname* { m i n } _ { x \in \mathbb { R } ^ { d } } \frac { 1 } { m } \sum _ { j = 1 } ^ { m } f _ { j } ( x ) + \sum _ { i = 1 } ^ { n } r _ { i } ( x ) +$$ + +over a size- $m$ dataset. Usually, the gradient of the loss function $f _ { j }$ for each datapoint $j$ is Lipschitz continuous. The terms $r _ { i }$ may be regularizers used to reduce overfitting or encourage structural properties such as sparsity or low matrix rank. They also may represent constraints on the parameters such as nonnegativity or the being in the probability simplex. Crucially, these regularizers are rarely differentiable. The first-order necessary condition for the solution of (16) is + +$$ +0 \in \nabla f ( x ^ { * } ) + \sum _ { i = 1 } ^ { n } \partial r _ { i } ( x ^ { * } ) , +$$ + +where $\begin{array} { r } { f ( x ) \doteq \frac { 1 } { m } \sum _ { j = 1 } ^ { m } f _ { j } ( x ) } \end{array}$ , thus $\begin{array} { r } { \nabla f ( x ) \doteq \frac { 1 } { m } \sum _ { j = 1 } ^ { m } \nabla f _ { j } ( x ) } \end{array}$ . The inclusion (17) is a special case of (1), and our method may use the standard stochastic oracle for $\nabla f ( x )$ , namely + +$$ +\frac { 1 } { | \mathbf { B } | } \sum _ { j \in \mathbf { B } } \nabla f _ { j } ( z ) +$$ + +which subsamples a randomly selected minibatch of datapoints $\mathbf { B } \in \{ 1 , \dots , m \}$ . + +Games Consider the following nonsmooth Nash equilibrium problem + +$$ +x ^ { * } \in \underset { x \in \mathbb { R } ^ { d _ { x } } } { \arg \operatorname* { m i n } } F ( x , y ^ { * } ) + \underset { i = 1 } { \overset { n _ { 1 } } { \sum } } r _ { i } ( x ) \quad \mathrm { a n d } \quad y ^ { * } \in \underset { y \in \mathbb { R } ^ { d _ { y } } } { \arg \operatorname* { m i n } } G ( x ^ { * } , y ) + \underset { i = 1 } { \overset { n _ { 2 } } { \sum } } d _ { i } ( y ) . +$$ + +The terms player’s st $\scriptstyle \sum _ { i = 1 } ^ { n _ { 1 } } r _ { i } ( x )$ and e tha $\textstyle \sum _ { i = 1 } ^ { n _ { 2 } } d _ { i } ( y )$ once again represent regularizers and constrai (saddle-point) problems correspond to having $F ( x , y ) =$ $- G ( x , y )$ . Under appropriate convexity conditions and constraint qualifications, the solutions of (18) correspond to the solutions of the following monotone inclusion in the form of (1): + +$$ +0 \in \left[ \begin{array} { l } { \nabla _ { x } F ( x ^ { * } , y ^ { * } ) } \\ { \nabla _ { y } G ( x ^ { * } , y ^ { * } ) } \end{array} \right] + \sum _ { i = 1 } ^ { \operatorname* { m a x } \{ n _ { 1 } , n _ { 2 } \} } \left( \partial r _ { i } ( x ^ { * } ) \times \partial d _ { i } ( y ^ { * } ) \right) +$$ + +where for $i > \operatorname* { m i n } \{ n _ { 1 } , n _ { 2 } \}$ we include β€œdummy functions", either $r _ { i } ( x ) = 0$ when $n _ { 1 } < n _ { 2 }$ or $d _ { i } ( y ) = 0$ when $n _ { 1 } < n _ { 2 }$ . If the functions $F$ and $G$ arise as averages in the same we as $f$ in (16), then our method may again use a stochastic oracle for them. + +Distributionally-Robust ML One example application of (19) is distributionally-robust ML, as demonstrated in the numerical experiment in Section 7. The full problem statement is given in Appendix I. + +Lagrangian Duality Another application of (19) is constrained optimization via Lagrangian duality. Consider + +$$ +\operatorname* { m i n } _ { x \in \mathbb { R } ^ { d } } \left\{ f ( x ) + \sum _ { i = 1 } ^ { n } r _ { i } ( x ) \right\} \quad { \mathrm { s . t . } } \quad h _ { j } ( x ) \leq 0 \quad j = 1 , \ldots , p . +$$ + +As in (16), $f$ is a loss function and the $r _ { i }$ may represent regularizers and (β€œsimple”) constraints; in addition, there are $p$ functional constraints on the model parameters $x$ . Introducing Lagrange multipliers $\gamma \in \mathbb { R } ^ { p }$ , the problem can be written as + +$$ +\operatorname* { m i n } _ { x \in \mathbb { R } ^ { d } } \operatorname* { m a x } _ { \gamma \in \mathbb { R } _ { + } ^ { p } } \left\{ f ( x ) + \sum _ { i = 1 } ^ { n } r _ { i } ( x ) + \sum _ { j = 1 } ^ { p } \gamma _ { j } h _ { j } ( x ) \right\} . +$$ + +Under appropriate convexity conditions and constraint-qualifications, this reduces to the following inclusion in the form of (1): + +$$ +0 \in \left[ \begin{array} { c } { \nabla f ( x ) + \sum _ { j = 1 } ^ { p } \gamma _ { j } \nabla h _ { j } ( x ) } \\ { - h ( x ) } \end{array} \right] + \sum _ { i = 1 } ^ { n } \left( \partial r _ { i } ( x ^ { * } ) \times \{ 0 \} \right) +$$ + +where $h ( \boldsymbol { x } ) = [ h _ { 1 } ( \boldsymbol { x } ) , h _ { 2 } ( \boldsymbol { x } ) , \ldots , h _ { p } ( \boldsymbol { x } ) ] ^ { \top }$ . For certain choices of $h$ , such as linear or quadratic functions, the first term above is monotone and (locally) Lipschitz continuous (Alacaoglu et al., 2021). + +Bilinear Games with Many Constraints Finally, consider the bilinear saddlepoint problem subject to multiple constraints: + +$$ +\begin{array} { l l l } { \underset { x \in \mathbb { R } ^ { d } } { \operatorname* { m i n } } \underset { y \in \mathbb { R } ^ { d } } { \operatorname* { m a x } } x ^ { \top } D y } & { \mathrm { s . t . } } & { x \in \mathcal { C } _ { j } ^ { 1 } } & { j = 1 , \dots , n _ { 1 } , } \\ & { } & { y \in \mathcal { C } _ { j } ^ { 2 } } & { j = 1 , \dots , n _ { 2 } . } \end{array} +$$ + +Under some regularity conditions, this problem reduces to the inclusion + +$$ +0 \in \left[ \begin{array} { c } { D y ^ { * } } \\ { - D ^ { \top } x ^ { * } } \end{array} \right] + \sum _ { j = 1 } ^ { \operatorname* { m a x } \{ n _ { 1 } , n _ { 2 } \} } \big ( N _ { { \mathcal C } _ { j } ^ { 1 } } ( x ^ { * } ) \times N _ { { \mathcal C } _ { j } ^ { 2 } } ( y ^ { * } ) \big ) , +$$ + +where we introduce additional β€œdummy” sets $\mathcal { C } _ { j } ^ { 1 } = \mathbb { R } ^ { d }$ or $\mathcal { C } _ { j } ^ { 2 } = \mathbb { R } ^ { d }$ when $n _ { 1 } \neq n _ { 2 }$ . The first term is linear and skew symmetric, and therefore can easily be shown to be Lipschitz continuous and monotone. If all the constraint sets are closed and convex, then the rest of the terms are maximal monotone, then the problem is of the form (1), meaning that projective splitting may be applied, possibly using a stochastic oracle for the first term. + +# B ADDITIONAL RELATED WORK + +The preprint by Bot et al. (2019) develops a stochastic version of Tseng’s method under the requirement that the noise variance converges to 0. In ML, this could be achieved with the use of perpetually increasing batch sizes, a strategy that is impractical in many scenarios. The stochastic version of FRB proposed by Van Dung & Vu (2021) has more practical noise requirements, but has stronger assumptions on the problem which are rarely satisfied in ML applications: either uniform/strong monotonicity or a bounded domain. The papers by Yurtsever et al. (2016) and Pedregosa et al. (2019) consider stochastic variants of three-operator splitting, but require $B$ in (1) to be cocoercive, essentially restricting them to optimization problems. + +There are several alternatives to the (stochastic) extragradient method that reduce the number of gradient evaluations per iteration from two to one (Hsieh et al., 2019; Malitsky & Tam, 2020; Gidel et al., 2019). However, these methods have more stringent stepsize limits, making it unclear a priori whether they will outperform two-step methods. + +DSEG is a stochastic version of EG (Hsieh et al., 2020). The primary innovation of DSEG is using different stepsizes for the extrapolation and update steps, thereby resolving some of the convergence issues affecting stochastic EG. As noted earlier, DSEG is the special case of our SPS method in which $n = 0$ , that is, no regularizers/constraints are present in the underlying game. The analysis in (Hsieh et al., 2020) also did not consider the fixed stepsize choice given in Theorem 2. + +In the context of GANs, several methods have been developed based on a variational inequality/monotone inclusion approach (Gidel et al., 2019; Daskalakis et al., 2018; Hsieh et al., 2019; 2020; BΓΆhm et al., 2020). Many of these papers point out that variational inequalities provide a principled framework for studying the GAN training problem and correcting some of the flaws in the standard method GDA. + +# C PROOF OF THEOREM 1 + +# C.1 STOCHASTIC QUASI-FEJER MONOTONICITY + +The key to the analysis is showing that the algorithm satisfies Stochastic Quasi-Fejer Monotonicity (Combettes & Pesquet, 2015). + +Lemma 2 ((Combettes & Pesquet, 2015), Proposition 2.3). Suppose $p ^ { k }$ is a sequence of $\mathbb { R } ^ { d }$ -valued random variables defined on a probability space $( \Omega , { \mathcal { F } } , P )$ . Let $\mathcal { F } _ { k } \overset { \cdot } { = } \sigma ( p ^ { 1 } , \cdot \cdot \cdot , p ^ { k } )$ . Let $F$ be $a$ osed subssuch that $\mathbb { R } ^ { d }$ , $p \in F$ , there exists d $\chi ^ { k } ( p ) \geq 0 , \eta ^ { k } ( p ) \geq$ $0 , \nu ^ { k } ( \hat { p } ) \stackrel { \cdot } { \geq } 0$ $\scriptstyle \sum _ { k = 1 } ^ { \infty } \chi ^ { k } ( p ) ^ { \widehat { < } } \infty$ $\scriptstyle \sum _ { k = 1 } ^ { \infty } \eta ^ { k } ( p ) < \infty$ + +$$ +\begin{array} { r l } { ( \forall k \in \mathbb { N } ) } & { \mathbb { E } [ \| p ^ { k + 1 } - p \| ^ { 2 } | \mathcal { F } _ { k } ] \leq ( 1 + \chi ^ { k } ( p ) ) \| p ^ { k } - p \| ^ { 2 } - \nu ^ { k } ( p ) + \eta ^ { k } ( p ) . } \end{array} +$$ + +Then the following hold: + +$$ +\begin{array} { r l } { I . \ ( \forall p \in F ) : } & { { } \sum _ { k = 1 } ^ { \infty } \nu ^ { k } ( p ) < \infty a . s . } \end{array} +$$ + +2. $p ^ { k }$ is bounded a.s. + +3. There exists $\tilde { \Omega }$ such that $P [ \tilde { \Omega } ] = 1$ and $\left\{ \| p ^ { k } ( \omega ) - p \| \right\}$ converges for every $\omega \in \tilde { \Omega }$ and $p \in F$ . + +# C.2 IMPORTANT RECURSION FOR SPS + +The following lemma summarizes the key recursion satisfied by Algorithm 1, to which we will apply Lemma 2. Recall that $L$ is the Lipschitz constant of $B$ . + +Lemma 3. For Algorithm $I$ , suppose (9)–(11) hold and + +$$ +\rho _ { k } \leq \overline { { \rho } } < 1 / L . +$$ + +Let + +$$ +T _ { k } \doteq \frac { \tau } { \overline { { \rho } } } \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { \overline { { \rho } } \tau } \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 \big ( 1 - \overline { { \rho } } L \big ) \| B \big ( z ^ { k } \big ) - w _ { n + 1 } ^ { k } \| ^ { 2 } +$$ + +then for all $p ^ { * } \in { \mathcal { S } }$ , with probability one + +$\begin{array} { r } { \mathbb { E } [ \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } | \mathcal { F } _ { k } ] \le \big ( 1 + C _ { 1 } \alpha _ { k } ^ { 2 } + C _ { 3 } \alpha _ { k } \rho _ { k } ^ { 2 } \big ) \| p ^ { k } - p ^ { * } \| ^ { 2 } - \alpha _ { k } \rho _ { k } T _ { k } + C _ { 2 } \alpha _ { k } ^ { 2 } + C _ { 4 } \alpha _ { k } \rho _ { k } ^ { 2 } } \end{array}$ (21) where $C _ { 1 } , \ldots , C _ { 4 }$ are nonegative constants defined in (33), (34), (48), and (49) below, respectively. + +Note that $T _ { k }$ is a scaled version of the approximation residual $G _ { k }$ defined in (14). + +We proceed to first prove Lemma 3 and then exploit the implications of Lemma 2. Referring to (10) and (11), let $N \doteq \mathrm { m a x } _ { j \in 1 \ldots 4 } N _ { j }$ . To simplify the constants, we will use $N$ in place of $N _ { j }$ for the noise variance bounds given in (10)-(11). + +# C.3 UPPER BOUNDING THE GRADIENT + +Throughout the analysis, we fix some $p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \ldots , w _ { n + 1 } ^ { * } ) \in \mathcal { S }$ . All statements are with probability one (almost surely), but for brevity we will omit this unless it needs to be emphasized. + +In this section, we derive appropriate upper bounds for $\| \nabla \varphi _ { k } \| ^ { 2 }$ to use in (13). We begin with $\nabla _ { z } \varphi _ { k }$ + +$$ +\begin{array} { r l r } & { } & { \displaystyle \| \nabla _ { z } \varphi _ { k } \| ^ { 2 } = \Big \| \displaystyle \sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k } \Big \| ^ { 2 } \leq 2 \| y _ { n + 1 } ^ { k } \| ^ { 2 } + 2 \Big \| \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } = 2 \Big \| B ( x _ { n + 1 } ^ { k } ) + e ^ { k } \Big \| ^ { 2 } + 2 \Big \| \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } } \\ & { } & { \leq 4 \| B ( x _ { n + 1 } ^ { k } ) \| ^ { 2 } + 2 \Big \| \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } + 4 \| e ^ { k } \| ^ { 2 } . } \end{array} +$$ + +Now next take expectations with respect to $\mathcal { F } _ { k }$ and $\mathcal { E } _ { k }$ , and use the bound on the variance of the noise in (11), obtaining + +$$ +\begin{array} { r l r } { { \mathbb { E } [ \| \nabla _ { z } \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } , \mathcal { E } _ { k } ] \leq \mathbb { E } [ 4 \| B ( x _ { n + 1 } ^ { k } ) \| ^ { 2 } + 2 \Big \| \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } + 4 \| e ^ { k } \| ^ { 2 } \ \Big | \ \mathcal { F } _ { k } , \mathcal { E } _ { k } ] } } \\ & { } & { \leq 4 ( N + 1 ) \| B ( x _ { n + 1 } ^ { k } ) \| ^ { 2 } + 2 \Big \| \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } + 4 N , ~ } \end{array} +$$ + +where we have used that $y _ { i } ^ { k }$ is $\mathcal { F } _ { k }$ -measurable for $i \in 1 . . n$ . Thus, taking expectations over $\mathcal { E } _ { k }$ conditioned on $\mathcal { F } _ { k }$ yields + +$$ +\mathbb { E } \left[ \| \nabla _ { z } \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } \right] \leq 4 ( N + 1 ) \mathbb { E } [ \| B ( x _ { n + 1 } ^ { k } ) \| ^ { 2 } | \mathcal { F } _ { k } ] + 2 \Big \| \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } + 4 N . +$$ + +We will now bound the two terms on the right side of (22). + +# C.3.1 FIRST TERM IN (22) + +First, note that + +$$ +\begin{array} { r l } & { \| B ( z ^ { k } ) \| ^ { 2 } = \| B ( z ^ { k } ) - B ( z ^ { * } ) + B ( z ^ { * } ) ) \| ^ { 2 } } \\ & { \qquad \leq 2 \| B ( z ^ { k } ) - B ( z ^ { * } ) \| ^ { 2 } + 2 \| B ( z ^ { * } ) \| ^ { 2 } } \\ & { \qquad \leq 2 L ^ { 2 } \| z ^ { k } - z ^ { * } \| ^ { 2 } + 2 \| B ( z ^ { * } ) \| ^ { 2 } } \\ & { \qquad \leq 2 L ^ { 2 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + 2 \| B ( z ^ { * } ) \| ^ { 2 } . } \end{array} +$$ + +Now, returning to the first term on the right of (22), we have + +$$ +\begin{array} { r l } & { \| B ( x _ { n + 1 } ^ { k } ) \| ^ { 2 } = \| B ( z ^ { k } ) + B ( x _ { n + 1 } ^ { k } ) - B ( z ^ { k } ) \| ^ { 2 } } \\ & { \qquad \leq 2 \| B ( z ^ { k } ) \| ^ { 2 } + 2 \| B ( x _ { n + 1 } ^ { k } ) - B ( z ^ { k } ) \| ^ { 2 } } \\ & { \qquad \leq 2 \| B ( z ^ { k } ) \| ^ { 2 } + 2 L ^ { 2 } \| x _ { n + 1 } ^ { k } - z ^ { k } \| ^ { 2 } } \\ & { \qquad \leq 4 L ^ { 2 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + 4 \| B ( z ^ { * } ) \| ^ { 2 } + 2 L ^ { 2 } \| x _ { n + 1 } ^ { k } - z ^ { k } \| ^ { 2 } } \end{array} +$$ + +where we have used (23) to obtain (24). + +For the third term in (24), we have from the calculation on line 7 of the algorithm that + +$$ +\begin{array} { r } { x _ { n + 1 } ^ { k } - z ^ { k } = - \rho _ { k } ( r ^ { k } - w _ { n + 1 } ^ { k } ) = - \rho _ { k } ( B ( z ^ { k } ) + \epsilon ^ { k } - w _ { n + 1 } ^ { k } ) , } \end{array} +$$ + +and therefore + +$$ +\begin{array} { r l } & { \| x _ { n + 1 } ^ { k } - z ^ { k } \| ^ { 2 } = \rho _ { k } ^ { 2 } \| B ( z ^ { k } ) + \epsilon ^ { k } - w _ { n + 1 } ^ { k } \| ^ { 2 } } \\ & { \qquad \leq \overline { { \rho } } ^ { 2 } \| B ( z ^ { k } ) + \epsilon ^ { k } - w _ { n + 1 } ^ { k } \| ^ { 2 } } \\ & { \qquad \leq 3 \overline { { \rho } } ^ { 2 } ( \| B ( z ^ { k } ) \| ^ { 2 } + \| \epsilon ^ { k } \| ^ { 2 } + \| w _ { n + 1 } ^ { k } \| ^ { 2 } ) . } \end{array} +$$ + +We next take expectations conditioned on $\mathcal { F } _ { k }$ and use the noise variance bound (10) to obtain + +$$ +\begin{array} { r l } & { \mathbb { E } \big [ \| x _ { n + 1 } ^ { k } - z ^ { k } \| ^ { 2 } | \mathcal { F } _ { k } \big ] \leq \mathbb { E } \big [ 3 \overline { { \rho } } ^ { 2 } \big ( \| B ( z ^ { k } ) \| ^ { 2 } + \| \epsilon ^ { k } \| ^ { 2 } + \| w _ { n + 1 } ^ { k } \| ^ { 2 } \big ) | \mathcal { F } _ { k } \big ] } \\ & { \qquad \leq 3 \overline { { \rho } } ^ { 2 } \big ( ( N + 1 ) \| B ( z ^ { k } ) \| ^ { 2 } + \| w _ { n + 1 } ^ { k } \| ^ { 2 } + N \big ) . } \end{array} +$$ + +Therefore + +$$ +\begin{array} { r l } & { \mathbb { E } [ \| x _ { n + 1 } ^ { k } - z ^ { k } \| ^ { 2 } | \mathcal { F } _ { k } ] \leq 6 \bar { \rho } ^ { 2 } \big ( ( N + 1 ) \| B ( z ^ { k } ) \| ^ { 2 } + \| w _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { * } \| ^ { 2 } + \| w _ { n + 1 } ^ { * } \| ^ { 2 } \big ) + 3 \bar { \rho } ^ { 2 } N } \\ & { \qquad = 6 \bar { \rho } ^ { 2 } \Big ( 2 ( N + 1 ) L ^ { 2 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + 2 ( N + 1 ) \| B ( z ^ { * } ) \| ^ { 2 } } \\ & { \qquad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad } \\ & { \qquad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad } \\ & { \leq 6 \bar { \rho } ^ { 2 } \big ( 2 ( N + 1 ) L ^ { 2 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + \| w _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { * } \| ^ { 2 } \big ) } \\ & { \qquad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad + 1 8 \bar { \rho } ^ { 2 } ( N + 1 ) \| B ( z ^ { * } ) \| ^ { 2 } + 3 \bar { \rho } ^ { 2 } N } \\ & { \leq 1 8 \bar { \rho } ^ { 2 } ( N + 1 ) \big ( ( L ^ { 2 } + 1 ) \| p ^ { k } - p ^ { * } \| ^ { 2 } + \| B ( z ^ { * } ) \| ^ { 2 } \big ) + 3 \bar { \rho } ^ { 2 } N } \end{array} +$$ + +where in the equality uses (23) and $w _ { n + 1 } ^ { * } = B ( z ^ { * } )$ . Combining (24) and (25), we arrive at + +$$ +\begin{array} { r l } & { \mathbb { E } \left[ \left. B ( x _ { n + 1 } ^ { k } ) \right. ^ { 2 } \middle | \mathcal { F } _ { k } \right] \leq 4 L ^ { 2 } \left[ 1 + 9 \overline { { \rho } } ^ { 2 } ( L ^ { 2 } + 1 ) ( N + 1 ) \right] \Vert p ^ { k } - p ^ { * } \Vert ^ { 2 } } \\ & { \qquad + 4 \big ( 1 + 9 \overline { { \rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) \big ) \Vert B ( z ^ { * } ) \Vert ^ { 2 } + 6 \overline { { \rho } } ^ { 2 } L ^ { 2 } N . } \end{array} +$$ + +# C.3.2 SECOND TERM IN (22) + +For $i \in 1 . . n$ , line 5 of the algorithm may be rearranged into $y _ { i } ^ { k } = \tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) + w _ { i } ^ { k }$ , so + +$$ +\begin{array} { r l r } { { \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \bigg \| ^ { 2 } = \bigg \| \sum _ { i = 1 } ^ { n } ( \tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) + w _ { i } ^ { k } ) \bigg \| ^ { 2 } } } \\ & { } & { \leq 2 \bigg \| \tau ^ { - 1 } \sum _ { i = 1 } ^ { n } ( z ^ { k } - x _ { i } ^ { k } ) \bigg \| ^ { 2 } + 2 \bigg \| \sum _ { i = 1 } ^ { n } w _ { i } ^ { k } \bigg \| ^ { 2 } } \\ & { } & { \leq 2 \pi \tau ^ { - 2 } \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 \bigg \| \sum _ { i = 1 } ^ { n } w _ { i } ^ { k } \bigg \| ^ { 2 } } \\ & { } & { \leq 4 \pi ^ { 2 } \tau ^ { - 2 } \| z ^ { k } - z ^ { * } \| ^ { 2 } + 4 \pi \tau ^ { - 2 } \sum _ { i = 1 } ^ { n } \| z ^ { * } - x _ { i } ^ { k } \| ^ { 2 } + 4 m \sum _ { i = 1 } ^ { n } \| w _ { i } ^ { k } - w _ { i } ^ { * } \| ^ { 2 } + 4 \bigg \| \sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \bigg \| ^ { 2 } } \\ & { } & { \leq 4 \pi ^ { 2 } ( \tau ^ { - 2 } + 1 ) \| y ^ { k } - p ^ { * } \| ^ { 2 } + 4 \pi \tau ^ { - 2 } \sum _ { i = 1 } ^ { n } \| z ^ { * } - x _ { i } ^ { k } \| ^ { 2 } + 4 \bigg \| \sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \bigg \| ^ { 2 } . } \end{array} +$$ + +By the definition of the solution set $s$ in (5), $w _ { i } ^ { * } \in A _ { i } ( z ^ { * } )$ , so $z ^ { * } + \tau w _ { i } ^ { * } \in ( I + \tau A _ { i } ) ( z ^ { * } )$ , and since the resolvent is single-valued (Bauschke & Combettes, 2017, Cor. 23.9) we therefore obtain + +$$ +z ^ { * } = ( I + \tau A _ { i } ) ^ { - 1 } ( I + \tau A _ { i } ) ( z ^ { * } ) = J _ { \tau A _ { i } } ( z ^ { * } + \tau w _ { i } ^ { * } ) . +$$ + +From lines 3 and 4 of the algorithm, we also have $x _ { i } ^ { k } = J _ { \tau A _ { i } } ( z ^ { k } + \tau w _ { i } ^ { k } )$ for $i \in 1 . . n$ . Thus, using the nonexpansiveness of the resolvent (Bauschke & Combettes, 2017, Def. 4.1 and Cor. 23.9), we have + +$$ +\begin{array} { r l } { \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { * } - x _ { i } ^ { k } \| ^ { 2 } = \displaystyle \sum _ { i = 1 } ^ { n } \left\| J _ { T , 4 _ { i } } ( z ^ { k } + \tau w _ { i } ^ { k } ) - J _ { \tau , 4 _ { i } } ( z ^ { * } + \tau w _ { i } ^ { * } ) \right\| ^ { 2 } } & { } \\ { \leq \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } + \tau w _ { i } ^ { k } - z ^ { * } - \tau w _ { i } ^ { * } \| ^ { 2 } } & { } \\ { = \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - z ^ { * } + \tau ( w _ { i } ^ { k } - w _ { i } ^ { * } ) \| ^ { 2 } } & { } \\ { \leq 2 n \| z ^ { k } - z ^ { * } \| ^ { 2 } + 2 \tau ^ { 2 } \displaystyle \sum _ { i = 1 } ^ { n } \| w _ { i } ^ { k } - w _ { i } ^ { * } \| ^ { 2 } } & { } \\ { \leq 2 ( n + \tau ^ { 2 } ) \| y ^ { k } - p ^ { * } \| ^ { 2 } . } & { } \end{array} +$$ + +Combining (27) and (28) yields + +$$ +\Big \| \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } \leq 1 2 n ^ { 2 } \tau ^ { - 2 } ( n + \tau ^ { 2 } ) \| p ^ { k } - p ^ { * } \| ^ { 2 } + 4 \Big \| \sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \Big \| ^ { 2 } . +$$ + +Combining (26) and (29) with (22) yields + +$$ +\begin{array} { r l } & { \mathbb { E } \left[ \| \nabla _ { z } \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } \right] \le 2 4 \left[ ( 1 + 9 \overline { { \rho } } ^ { 2 } ) ( L ^ { 2 } + 1 ) ^ { 2 } ( N + 1 ) ^ { 2 } + n ^ { 2 } \tau ^ { - 2 } ( n + \tau ^ { 2 } ) \right] \| p ^ { k } - p ^ { * } \| ^ { 2 } } \\ & { \qquad + 1 6 ( N + 1 ) \big ( 1 + 9 \overline { { \rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) \big ) \| B ( z ^ { * } ) \| ^ { 2 } + 8 \bigg \| \displaystyle \sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \bigg \| ^ { 2 } } \\ & { \qquad + 2 4 \overline { { \rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) N + 4 N . } \end{array} +$$ + +# C.3.3 DUAL GRADIENT NORM + +Considering that $\nabla \varphi _ { k }$ is taken with respect to the subspace $\mathcal { P }$ , the gradients with respect to the dual variables are β€” see for example Eckstein & Svaiter (2009) β€” for each $i \in { 1 . . ( n + 1 ) }$ , + +$$ +\begin{array} { l } { \displaystyle \| \nabla _ { w _ { i } } \varphi _ { k } \| ^ { 2 } = \left\| x _ { i } ^ { k } - \frac { 1 } { n + 1 } \sum _ { j = 1 } ^ { n + 1 } x _ { j } ^ { k } \right\| ^ { 2 } = \left\| \frac { 1 } { n + 1 } \sum _ { j = 1 } ^ { n + 1 } ( x _ { i } ^ { k } - x _ { j } ^ { k } ) \right\| ^ { 2 } } \\ { \displaystyle \leq \sum _ { j = 1 } ^ { n + 1 } \| x _ { i } ^ { k } - x _ { j } ^ { k } \| ^ { 2 } } \\ { \displaystyle \leq 2 \sum _ { j = 1 } ^ { n + 1 } \big ( \| x _ { i } ^ { k } - z ^ { k } \| ^ { 2 } + \| z ^ { k } - x _ { j } ^ { k } \| ^ { 2 } \big ) } \end{array} +$$ + +Summing this inequality for $i \in { 1 . . ( n + 1 ) }$ and collecting terms yields + +$$ +\sum _ { i = 1 } ^ { n + 1 } \| \nabla _ { w _ { i } } \varphi _ { k } \| ^ { 2 } \leq 4 ( n + 1 ) \sum _ { i = 1 } ^ { n + 1 } \| x _ { i } ^ { k } - z ^ { k } \| ^ { 2 } , +$$ + +so taking expectations conditioned on $\mathcal { F } _ { k }$ produces + +$$ +\begin{array} { r l } { \displaystyle \sum _ { i = 1 } ^ { n + 1 } \mathbb { E } \| \nabla _ { x _ { i } } \varphi _ { i } \| ^ { 2 } | \mathcal { F } _ { k } | \leq 4 ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n + 1 } \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } \| ^ { 2 } | \mathcal { F } _ { k } | } \\ & { \leq 4 ( n + 1 ) \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | + 4 ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | } \\ & { \leq 4 ( n + 1 ) \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | } \\ & { \qquad + s ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | + 8 ( n + 1 ) ^ { 2 } \| z ^ { k } - z ^ { k } | ^ { 2 } } \\ & { \qquad + s ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | } \\ & { \leq 4 ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | } \\ & { \qquad + s ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | + 8 ( n + 1 ) ^ { 2 } \| n ^ { k } - p ^ { k } | ^ { 2 } } \\ & { \leq 8 ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } 2 \tau ^ { 2 } + 1 + 9 s ^ { 2 } ( 2 ^ { k } + 1 ) | | b ^ { k } - z ^ { k } | ^ { 2 } | } \\ & \leq 8 ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } 2 \tau ^ { 2 } + 1 + 9 s ^ { 2 } ( 2 ^ { k } + 1 ) | | b ^ { k } - z \end{array} +$$ + +where the final inequality employs (25) and (28). + +All told, using (30) and (31) and simplifying the constants, one obtains + +$$ +\begin{array} { r l r } { { \mathbb { E } [ \| \nabla \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } ] = \mathbb { E } [ \| \nabla _ { z } \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } ] + \sum _ { i = 1 } ^ { n + 1 } \mathbb { E } [ \| \nabla _ { w _ { i } } \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } ] } } \\ & { } & { \leq C _ { 1 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + C _ { 2 } , } \end{array} +$$ + +where + +$$ +\begin{array} { c } { { C _ { 1 } = 2 4 ( 1 + 1 0 \overline { { { \rho } } } ^ { 2 } ) ( n + 1 ) ( L ^ { 2 } + 1 ) ^ { 2 } ( N + 1 ) ^ { 2 } } } \\ { { { } } } \\ { { + 8 ( n + 1 ) \left( 2 \tau ^ { 2 } + 6 ( n + 1 ) + 1 + 3 ( n + 1 ) ^ { 2 } \tau ^ { - 2 } \right) } } \end{array} +$$ + +and + +$$ +\begin{array} { l } { { C _ { 2 } = 1 6 ( N + 1 ) \left[ 1 + 4 { \overline { { \rho } } } ^ { 2 } ( n + 1 ) + 9 { \overline { { \rho } } } ^ { 2 } L ^ { 2 } ( N + 1 ) \right] \| B ( z ^ { * } ) \| ^ { 2 } + 8 \| \displaystyle \sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \| ^ { 2 } } } \\ { { \nonumber } } \\ { { \qquad + 1 2 { \overline { { \rho } } } ^ { 2 } N ( 2 L ^ { 2 } ( N + 1 ) + n + 1 ) + 4 N . } } \end{array} +$$ + +# C.4 LOWER BOUND FOR $\varphi _ { k }$ -GAP + +Recalling (13), that is, + +$$ +\| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } = \| p ^ { k } - p ^ { * } \| ^ { 2 } - 2 \alpha _ { k } ( \varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } ) ) + \alpha _ { k } ^ { 2 } \| \nabla \varphi _ { k } \| ^ { 2 } . +$$ + +We may use the gradient bound from (32) to obtain + +$$ +\begin{array} { r } { \mathbb { E } [ \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } | \mathcal { F } _ { k } ] \le ( 1 + C _ { 1 } \alpha _ { k } ^ { 2 } ) \| p ^ { k } - p ^ { * } \| ^ { 2 } - 2 \alpha _ { k } \mathbb { E } [ \varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } ) | \mathcal { F } _ { k } ] + C _ { 2 } \alpha _ { k } ^ { 2 } . } \end{array} +$$ + +We now focus on finding a lower bound for the term $\mathbb { E } [ \varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } ) | \mathcal { F } _ { k } ]$ , which we call the β€œ $\varphi _ { k }$ -gap”. Recall that for $p = ( z , w _ { 1 } , \ldots , w _ { n + 1 } )$ , + +$$ +\varphi _ { k } ( p ) = \sum _ { i = 1 } ^ { n + 1 } \langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \rangle . +$$ + +For each $i \in { 1 . . ( n + 1 ) }$ , define $\varphi _ { i , k } ( p ) \doteq \langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \rangle$ . We will call $\mathbb { E } [ \varphi _ { i , k } ( p ^ { k } ) - \varphi _ { i , k } ( p ^ { * } ) \vert \mathcal { F } _ { k } ]$ the β€œ $\varphi _ { i , k }$ -gap”. Note that $\begin{array} { r } { \varphi _ { k } ( p ) = \sum _ { i = 1 } ^ { n + 1 } \varphi _ { i , k } ( p ) } \end{array}$ . + +C.5 LOWER BOUND FOR $\varphi _ { i , k }$ -GAP OVER $i \in 1 . . n$ + +For $i \in 1 . . n$ , we have from line 5 of the algorithm that + +$$ +z ^ { k } - x _ { i } ^ { k } = \tau ( y _ { i } ^ { k } - w _ { i } ^ { k } ) . +$$ + +Since $\varphi _ { i , k } ( p ^ { k } ) = \langle z ^ { k } - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } ^ { k } \rangle$ , one may conclude that for $i \in 1 . . n$ + +$$ +\varphi _ { i , k } ( p ^ { k } ) = \frac { \tau } { 2 } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { 2 \tau } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } . +$$ + +On the other hand, for $p ^ { * } \in { \mathcal { S } }$ and $i \in 1 . . n$ , one also has + +$$ +- \varphi _ { i , k } \mathopen { } \mathclose \bgroup \left( p ^ { * } \aftergroup \egroup \right) = \mathopen { } \mathclose \bgroup \left. z ^ { * } - x _ { i } ^ { k } , w _ { i } ^ { * } - y _ { i } ^ { k } \aftergroup \egroup \right. \geq 0 +$$ + +by the monotonicity of $A _ { i }$ . Therefore, for $i \in 1 . . n$ , it holds that + +$$ +\varphi _ { i , k } ( p ^ { k } ) - \varphi _ { i , k } ( p ^ { * } ) \geq \frac { \tau } { 2 } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { 2 \tau } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } , +$$ + +and taking expectations conditioned on $\mathcal { F } _ { k }$ leads to + +$$ +\mathbb { E } [ \varphi _ { i , k } ( p ^ { k } ) - \varphi _ { i , k } ( p ^ { * } ) | \mathcal { F } _ { k } ] \ge \frac { \tau } { 2 } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { 2 \tau } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } +$$ + +where we have used that $x _ { i } ^ { k }$ and $y _ { i } ^ { k }$ are both $\mathcal { F } _ { k }$ -measurable for $i \in 1 . . n$ . + +# C.6 LOWER BOUND FOR $\varphi _ { n + 1 , k }$ -GAP + +From lines 6-7 of the algorithm, we have + +$$ +z ^ { k } - x _ { n + 1 } ^ { k } = \rho _ { k } ( B ( z ^ { k } ) - w _ { n + 1 } ^ { k } + \epsilon ^ { k } ) . +$$ + +Therefore, + +$$ +\begin{array} { r l } { \hat { \sigma } _ { \beta 1 , 1 } \hat { x } _ { \beta ^ { \prime } 1 , 1 } ^ { ( f ) } = \langle \boldsymbol { \xi } ^ { 2 } \boldsymbol { \cdot } \boldsymbol { x } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } - \boldsymbol { u } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle } & { \mathrm { ~ C ~ e ~ } } \\ & { = \langle \boldsymbol { \xi } ^ { 2 } \boldsymbol { \cdot } \boldsymbol { x } _ { \alpha + 1 , \beta } ^ { ( f ) } \boldsymbol { y } _ { \beta ^ { \prime } 1 } ^ { ( f ) } - \boldsymbol { u } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle + \langle \boldsymbol { \xi } ^ { 2 } - \boldsymbol { x } _ { \alpha + 1 , \beta } ^ { ( f ) } \boldsymbol { y } _ { \beta 1 } ^ { ( f ) } - \boldsymbol { B } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle } \\ & { - \langle \boldsymbol { \xi } ^ { 2 } \boldsymbol { \cdot } \boldsymbol { u } _ { \alpha + 1 , \beta } ^ { ( f ) } \boldsymbol { x } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle + \langle \boldsymbol { \xi } ^ { 2 } \boldsymbol { \cdot } \boldsymbol { u } _ { \alpha + 1 , \beta } ^ { ( f ) } \boldsymbol { x } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle + \langle \boldsymbol { \xi } ^ { 2 } \boldsymbol { \cdot } \boldsymbol { u } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \boldsymbol { y } _ { \beta 1 } ^ { ( f ) } - \boldsymbol { u } _ { \alpha - 1 , \beta ^ { \prime } } ^ { ( f ) } \rangle } \\ & - \langle \boldsymbol { u } _ { \alpha } ^ { \beta } \boldsymbol { y } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle + \langle \boldsymbol { \xi } ^ { 4 } \boldsymbol { x } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle + \langle \boldsymbol { u } _ { \alpha } ^ { \beta } \boldsymbol { u } _ { \alpha + 1 , \beta ^ { \prime } } ^ { ( f ) } \boldsymbol { y } _ { \beta ^ { \prime } 1 } ^ { ( f ) } \rangle + \langle \boldsymbol { u } _ { \alpha } ^ { \beta } \boldsymbol { u } _ \alpha + 1 , \end{array} +$$ + +where equality (a) uses line 8 of the algorithm and the inequality employs the Cauchy-Schwartz inequality followed by Lipschitz continuity of $B$ . + +On the other hand, + +$$ +\begin{array} { r l } & { - \varphi _ { n + 1 , k } ( p ^ { * } ) = \langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - y _ { n + 1 } ^ { k } \rangle } \\ & { \qquad = \langle z ^ { * } - x _ { n + 1 } ^ { k } , B ( z ^ { * } ) - B ( x _ { i } ^ { k } ) \rangle + \langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \rangle } \\ & { \qquad \geq \langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \rangle , } \end{array} +$$ + +where the second equality uses line 8 of the algorithm and the inequality follows from the monotonicity of $B$ . + +Combining (39) and (40) yields + +$$ +\begin{array} { r l } & { \circ _ { n + 1 , k } ( p ^ { k } ) - \varphi _ { n + 1 , k } ( p ^ { * } ) \geq \rho _ { k } ( 1 - \rho _ { k } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } + \rho _ { k } ( 1 - 2 \rho _ { k } L ) \langle \epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \rangle } \\ & { \qquad + \langle z ^ { k } - x _ { n + 1 } ^ { k } , e ^ { k } \rangle + \langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \rangle - \rho _ { k } ^ { 2 } L \| \epsilon ^ { k } \| ^ { 2 } } \\ & { \qquad = \rho _ { k } ( 1 - \rho _ { k } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } - \rho _ { k } ^ { 2 } L \| \epsilon ^ { k } \| ^ { 2 } } \\ & { \qquad + \rho _ { k } ( 1 - 2 \rho _ { k } L ) \langle \epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \rangle + \langle z ^ { k } - z ^ { * } , e ^ { k } \rangle . \qquad ( 4 1 ) } \end{array} +$$ + +Now, if we take expectations conditioned on $\mathcal { F } _ { k }$ and use (9), we obtain + +$$ +{ \mathbb E } \big [ \langle z ^ { k } - z ^ { * } , e ^ { k } \rangle \bigm | \mathcal F _ { k } \big ] = \langle z ^ { k } - z ^ { * } , { \mathbb E } [ e ^ { k } | \mathcal F _ { k } ] \rangle = 0 . +$$ + +Similarly, (9) also yields + +$$ +\begin{array} { r } { \mathbb { E } \big [ \langle \epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \rangle \big | \mathcal { F } _ { k } \big ] = \langle \mathbb { E } [ \epsilon ^ { k } | \mathcal { F } _ { k } ] , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \rangle = 0 . } \end{array} +$$ + +hus, using (42) and (43) and taking expectations of (41) yields + +$$ +\begin{array} { r l } & { \mathbb { E } [ \varphi _ { n + 1 , k } ( p ^ { k } ) - \varphi _ { n + 1 , k } ( p ^ { * } ) \mid \mathcal { F } _ { k } ] \ge \rho _ { k } ( 1 - \rho _ { k } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } - \rho _ { k } ^ { 2 } L \mathbb { E } [ \| \epsilon ^ { k } \| ^ { 2 } \vert \mathcal { F } _ { k } ] } \\ & { \qquad \ge \rho _ { k } ( 1 - \bar { \rho } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } - \rho _ { k } ^ { 2 } N L ( 1 + \| B ( z ^ { k } ) \| ^ { 2 } ) , } \end{array} +$$ + +where in the second inequality we used (12) and the noise variance bound (10). Recall from (12) that $1 - \overline { { \rho } } L > 0$ . + +Next, we remark that + +$$ +\begin{array} { r l } & { \| B ( z ^ { k } ) \| ^ { 2 } = \| B ( z ^ { k } ) - B ( z ^ { * } ) + B ( z ^ { * } ) \| ^ { 2 } } \\ & { \qquad \leq 2 L ^ { 2 } \| z ^ { k } - z ^ { * } \| ^ { 2 } + 2 \| B ( z ^ { * } ) \| ^ { 2 } \leq 2 L ^ { 2 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + 2 \| B ( z ^ { * } ) \| ^ { 2 } . } \end{array} +$$ + +Substituting this inequality into (44) yields + +$$ +\begin{array} { r l } & { \mathbb { E } [ \varphi _ { n + 1 , k } ( p ^ { k } ) - \varphi _ { n + 1 , k } ( p ^ { * } ) | \mathcal { F } _ { k } ] \geq \rho _ { k } ( 1 - \overline { { \rho } } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } } \\ & { \qquad - 2 \rho _ { k } ^ { 2 } N L ^ { 3 } \| p ^ { k } - p ^ { * } \| ^ { 2 } - \rho _ { k } ^ { 2 } N L ( 1 + 2 \| B ( z ^ { * } ) \| ^ { 2 } ) . } \end{array} +$$ + +Finalizing the lower bound on the $\varphi _ { k }$ -gap Summing (37) over $i \in 1 . . n$ and using (45) yields + +$$ +\begin{array} { r l r } { { \mathbb { E } [ \varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } ) | \mathcal { F } _ { k } ] = \sum _ { i = 1 } ^ { n + 1 } \mathbb { E } [ \varphi _ { i , k } ( p ^ { k } ) - \varphi _ { i , k } ( p ^ { * } ) | \mathcal { F } _ { k } ] } } \\ & { } & { \geq \frac { 7 } { 2 } \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { 2 \tau } \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } } \\ & { } & { + \rho _ { k } ( 1 - \overline { { \rho } } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } - 2 \rho _ { k } ^ { 2 } N L ^ { 3 } \| p ^ { k } - p ^ { * } \| ^ { 2 } } \\ & { } & { - \rho _ { k } ^ { 2 } N L ( 1 + 2 \| B ( z ^ { * } ) \| ^ { 2 } ) . } \end{array} +$$ + +C.7 ESTABLISHING STOCHASTIC QUASI-FEJER MONOTONICITY + +Returning to (35), + +$\begin{array} { r } { \mathbb { E } [ \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } | \mathcal { F } _ { k } ] \le ( 1 + C _ { 1 } \alpha _ { k } ^ { 2 } ) \| p ^ { k } - p ^ { * } \| ^ { 2 } - 2 \alpha _ { k } \mathbb { E } [ \varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } ) | \mathcal { F } _ { k } ] + C _ { 2 } \alpha _ { k } ^ { 2 } , } \end{array}$ we may now substitute (46) for the expectation on the right-hand side. First, define + +$$ +T _ { k } \doteq \frac { \tau } { \overline { { \rho } } } \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { \overline { { \rho } } \tau } \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 ( 1 - \overline { { \rho } } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } , +$$ + +after which we may use (46) in (35) to yield + +$$ +\begin{array} { r } { \mathbb { E } [ \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } | \mathcal { F } _ { k } ] \le \big ( 1 + C _ { 1 } \alpha _ { k } ^ { 2 } + C _ { 3 } \alpha _ { k } \rho _ { k } ^ { 2 } \big ) \| p ^ { k } - p ^ { * } \| ^ { 2 } - \alpha _ { k } \rho _ { k } T _ { k } + C _ { 2 } \alpha _ { k } ^ { 2 } + C _ { 4 } \alpha _ { k } \rho _ { k } ^ { 2 } } \end{array} +$$ + +here $C _ { 1 }$ and $C _ { 2 }$ are defined as before in (33) and (34) and + +$$ +\begin{array} { l } { C _ { 3 } = 4 N L ^ { 3 } } \\ { C _ { 4 } = 2 N L ( 1 + 2 \| B ( z ^ { \ast } ) \| ^ { 2 } ) . } \end{array} +$$ + +This completes the proof of Lemma 3. + +# C.8 A CONVERGENCE LEMMA + +Before establishing almost-sure convergence, we need the following lemma to derive convergence of the iterates from convergence of $T _ { k }$ defined above. Note that a more elaborate result would be needed in an infinite-dimensional setting. + +Lemma 4. For deterministic sequences $z ^ { k } \in \mathbb { R } ^ { ( n + 1 ) d }$ , $\{ ( w _ { i } ^ { k } ) _ { i = 1 } ^ { n + 1 } \} \ \in \ { \mathcal { P } }$ , and $\{ ( x _ { i } ^ { k } , y _ { i } ^ { k } ) _ { i = 1 } ^ { n + 1 } \} \in$ $\mathbb { R } ^ { 2 ( n + 1 ) d }$ , suppose that $y _ { i } ^ { k } \in A _ { i } ( x _ { i } ^ { k } )$ for i ∈ 1..n, $\textstyle \sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0$ i=, + +$$ +\xi _ { 1 } \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \xi _ { 2 } \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \xi _ { 3 } \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } \to 0 +$$ + +for scalars $\xi _ { 1 } , \xi _ { 2 } , \xi _ { 3 } > 0$ , and $p ^ { k } \doteq ( z ^ { k } , w _ { 1 } ^ { k } , \ldots , w _ { n + 1 } ^ { k } ) \to \hat { p } \doteq ( \hat { z } , \hat { w } _ { 1 } , \ldots , \hat { w } _ { n + 1 } )$ . Then $\hat { p } \in \mathcal S$ + +Proof. Fix any $i \in \{ 1 , \ldots , n \}$ . Since $\| y _ { i } ^ { k } - w _ { i } ^ { k } \| \to 0$ by (50) and $w _ { i } ^ { k } \hat { w } _ { i }$ , we also have $y _ { i } ^ { k } \hat { w } _ { i }$ . Similarly, (50) also implies that $\lVert z ^ { k } - x _ { i } ^ { k } \rVert \to 0$ , so from $z ^ { k } \hat { z }$ we also have $x _ { i } ^ { k } \hat { z }$ Since $y _ { i } ^ { k } \in A _ { i } ( x _ { i } ^ { k } )$ and $( x _ { i } ^ { k } , y _ { i } ^ { k } ) ( \hat { z } , \hat { w } _ { i } )$ , (Bauschke & Combettes, 2017, Prop. 20.37) implies $\hat { w } _ { i } \in A _ { i } ( \hat { z } )$ . Since $i$ was arbitrary, the preceding conclusions hold for $i \in 1 . . n$ . + +Now, (50) also implies that $\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| \to 0$ . Therefore, since $w _ { n + 1 } ^ { k } \to \hat { w } _ { n + 1 }$ , we also have $B ( z ^ { k } ) \to \hat { w } _ { n + 1 }$ . Much as before, since $( z ^ { k } , B ( z ^ { k } ) ) ( \hat { z } , \hat { w } _ { n + 1 } )$ , we may apply (Bauschke & Combettes, 2017, Prop. 20.37) to conclude that that $\hat { w } _ { n + 1 } = B ( \hat { z } )$ . + +Since the linear subspace $\mathcal { P }$ defined in (6) must be closed, the limit $\left( \hat { z } , \hat { w } _ { 1 } , \dots , \hat { w } _ { n + 1 } \right)$ of $\{ ( z ^ { k } , w _ { 1 } ^ { k } , \ldots , w _ { n + 1 } ^ { k } ) \} \subset \mathcal { P }$ must be in $\mathcal { P }$ , hence $\textstyle \sum _ { i = 1 } ^ { n + 1 } { \hat { w } } _ { i } = 0$ . + +nt . $\hat { p } = ( \hat { z } , \hat { w } _ { 1 } , \dots , \hat { w } _ { n + 1 } )$ satisfies tions defi $\hat { w } _ { i } \in A _ { i } ( \hat { z } )$ for ship $i \in 1 . . n$ , $\hat { w } _ { n + 1 } = B ( \hat { z } )$ , and $\textstyle \sum _ { i = 1 } ^ { n + 1 } { \hat { w } } _ { i } = 0$ $s$ $\hat { p } \in \mathcal S$ + +# C.9 FINISHING THE PROOF OF THEOREM 1 + +Given $\textstyle \sum _ { k } \alpha _ { k } ^ { 2 } < \infty$ , and $\sum \alpha _ { k } \rho _ { k } ^ { 2 } < \infty$ , (47) satisfies the conditions of Stochastic Quasi-Fejer Monotonicity as given in Lemma 2. By applying Lemma 2, we conclude that there exist $\Omega _ { 1 } , \Omega _ { 2 } , \Omega _ { 3 }$ such that $P [ \Omega _ { i } ] = 1$ for $i = { 1 , 2 , 3 }$ and + +1. for all $v \in \Omega _ { 1 }$ + +$$ +\sum _ { k = 1 } ^ { \infty } \alpha _ { k } \rho _ { k } T _ { k } ( v ) < \infty , +$$ + +2. for all $v \in \Omega _ { 2 }$ , and $p ^ { * } \in { \mathcal { S } }$ , $\| p ^ { k } ( v ) - p ^ { * } \|$ converges to a finite nonnegative random-variable, + +3. for all $v \in \Omega _ { 3 } , p ^ { k } ( v )$ remains bounded. + +Since $\textstyle \sum _ { k = 1 } ^ { \infty } \alpha _ { k } \rho _ { k } = \infty$ , (51) implies that for all $v \in \Omega _ { 1 }$ there exists a subsequence $q _ { k } ( v )$ such that + +$$ +T _ { q _ { k } ( v ) } \to 0 . +$$ + +Let $\Omega ^ { \prime } = \Omega _ { 1 } \cap \Omega _ { 2 } \cap \Omega _ { 3 }$ and note that $P [ \Omega ^ { \prime } ] = 1$ . Choose $v \in \Omega ^ { \prime }$ . Since $p ^ { k } ( v )$ remains bounded, so does $p ^ { q _ { k } ( v ) } ( v )$ for $q _ { k } ( v )$ defined above in (52). Thus there exists a subsequence $r _ { k } ( v ) \subseteq q _ { k } ( v )$ and $\hat { p } ( v ) \in \mathbb { R } ^ { ( n + 2 ) d }$ such that $p ^ { r _ { k } ( v ) } ( v ) \hat { p } ( v )$ . But since $T _ { q _ { k } ( v ) } \to 0$ , it also follows that $T _ { r _ { k } ( v ) } \to 0$ , that is, + +$$ +\begin{array} { r l r } { { \frac { \tau } { \rho } \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { r _ { k } ( v ) } ( v ) - w _ { i } ^ { r _ { k } ( v ) } ( v ) \| ^ { 2 } + \frac { 1 } { \rho \tau } \sum _ { i = 1 } ^ { n } \| z ^ { r _ { k } ( v ) } ( v ) - x _ { i } ^ { r _ { k } ( v ) } ( v ) \| ^ { 2 } } } \\ & { } & { \qquad + 2 ( 1 - \overline { { \rho } } L ) \| B ( z ^ { r _ { k } ( v ) } ( v ) ) - w _ { n + 1 } ^ { r _ { k } ( v ) } ( v ) \| ^ { 2 } \to 0 . } \end{array} +$$ + +We then have from Lemma 4 that $\hat { p } ( v ) \in S$ . + +Since $p ^ { r _ { k } ( v ) } ( v ) \hat { p } ( v )$ , it follows that $\lVert p ^ { r _ { k } ( v ) } ( v ) - \hat { p } ( v ) \rVert \to 0$ . But since $\hat { p } ( v ) \in S , \| p ^ { k } ( v ) - \hat { p } ( v ) \|$ converges by point 2 above. Thus + +$$ +\operatorname* { l i m } _ { k \to \infty } \| p ^ { k } ( v ) - \hat { p } ( v ) \| = \operatorname* { l i m } _ { k \to \infty } \| p ^ { r _ { k } ( v ) } ( v ) - \hat { p } ( v ) \| = 0 . +$$ + +Therefore $p ^ { k } ( v ) \hat { p } ( v ) \in \mathcal { S }$ . Thus there exists $\hat { p } \in \mathcal S$ such that $p ^ { k } \hat { p }$ a.s., which completes the proof of Theorem 1. + +# C.10 TWO ADDITIONAL RESULTS + +In this section, we prove two additional useful results about SPS. First, that $x _ { i } ^ { k } \hat { z }$ (a.s.) for $i = 1 , \ldots , n$ . Second, that $G _ { k } \to 0$ (a.s.). + +Note that + +$$ +x _ { i } ^ { k } = J _ { \tau A _ { i } } ( z ^ { k } + \tau w _ { i } ^ { k } ) +$$ + +and since $z ^ { k }$ and $w _ { i } ^ { k }$ convergence a.s., so does $x _ { i } ^ { k }$ . Consider the subsequence $q _ { k } ( v )$ such that (52) holds. Then + +$$ +z ^ { q _ { k } ( v ) } - x _ { i } ^ { q _ { k } ( v ) } 0 +$$ + +thus + +$$ +x _ { i } ^ { q _ { k } ( v ) } \hat { z } . +$$ + +Since $x _ { i } ^ { k }$ converges to some limit (a.s.), that limit must be $\hat { z }$ . + +Recall that + +$$ +\begin{array} { r } { G _ { k } \doteq \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } . } \end{array} +$$ + +We have shown that $z ^ { k }$ and $x _ { i } ^ { k }$ share the same limit for $i = 1 , \ldots , n$ (a.s.). Therefore $z ^ { k } - x _ { i } ^ { k } \to 0$ (a.s.). Since + +$$ +y _ { i } ^ { k } - w _ { i } ^ { k } = \tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) , +$$ + +it follows that $y _ { i } ^ { k } - w _ { i } ^ { k } \to 0$ (a.s.) for $i = 1 , \ldots , n$ . Therefore + +$$ +G _ { k } \to \| B ( \hat { z } ) - \hat { w } _ { n + 1 } \| ^ { 2 } . +$$ + +But since $( z , \hat { w } _ { 1 } , \dots , \hat { w } _ { n + 1 } ) \in S$ , $\hat { w } _ { n + 1 } = B ( \hat { z } )$ implying that $G _ { k } \to 0$ (a.s.). + +# D PROOF OF LEMMA 1 + +If $G _ { k } = 0$ , then + +$$ +\forall i = 1 , \ldots , n : \quad y _ { i } ^ { k } = w _ { i } ^ { k } \mathrm { ~ a n d ~ } z ^ { k } = x _ { i } ^ { k } . +$$ + +Since $y _ { i } ^ { k } \in A _ { i } ( x _ { i } ^ { k } )$ for $i = 1 , \ldots , n$ , (53) implies that that + +$$ +\forall i \in 1 . . n : \quad w _ { i } ^ { k } \in A _ { i } ( z ^ { k } ) . +$$ + +Furthermore $G _ { k } = 0$ also implies that $w _ { n + 1 } ^ { k } = B ( z ^ { k } )$ . Finally, since $\textstyle \sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0$ , we have that + +$$ +( z ^ { k } , w _ { 1 } ^ { k } , \ldots , w _ { n + 1 } ^ { k } ) \in { \mathcal { S } } . +$$ + +Conversely, suppose $( z ^ { k } , w _ { 1 } ^ { k } , \ldots , w _ { n + 1 } ^ { k } ) \in { \mathcal { S } }$ . The definition of $s$ implies that $B ( z ^ { k } ) = w _ { n + 1 } ^ { k }$ and furthermore that $w _ { i } ^ { k } \in A _ { i } ( z ^ { k } )$ for $i \in 1 . . n$ . For any $i \in 1 . . n$ , considering line 3 of Algorithm 1, we may write $t _ { i } ^ { k } = z ^ { k } + \tau w _ { i . } ^ { k } \in ( I + \tau A _ { i } ) ( z ^ { k } )$ , implying $z ^ { k } \in ( I + \tau A _ { i } ) ^ { - 1 } ( t _ { i } ^ { k } )$ . But since the resolvent $J _ { \tau A _ { i } } = ( I + \tau A _ { i } ) ^ { - 1 }$ is single-valued (Bauschke & Combettes, 2017, Prop. 23.8), we must have $z ^ { k } = ( I + \tau A _ { i } ) ^ { - 1 } ( t _ { i } ^ { k } )$ . Thus, by line 4, we have $x _ { i } ^ { k } = z ^ { k }$ . We may also derive from line 5 that + +$$ +y _ { i } ^ { k } = \tau ^ { - 1 } ( t _ { i } ^ { k } - x _ { i } ^ { k } ) = \tau ^ { - 1 } ( z ^ { k } + \tau w _ { i } ^ { k } - z ^ { k } ) = w _ { i } ^ { k } . +$$ + +Thus, since $x _ { i } ^ { k } = z ^ { k }$ and $y _ { i } ^ { k } = w _ { i } ^ { k }$ for $i = 1 , \ldots , n$ and $w _ { n + 1 } ^ { k } = B ( z ^ { k } )$ , we have that $G _ { k } = 0$ + +# E PROOF OF THEOREM 2 + +In addition to the proof, we provide a more detailed statement of the theorem: + +Theorem 3. Fix the total iterations $K \geq 1$ of Algorithm 1 and set + +$$ +\begin{array} { l l } { { \forall k = 1 , \dots , K : } } & { { \qquad \rho _ { k } = \rho \doteq \operatorname* { m i n } \left\{ K ^ { - 1 / 4 } , \frac { 1 } { 2 L } \right\} } } \\ { { \forall k = 1 , \dots , K : } } & { { \qquad \alpha _ { k } = \alpha \doteq C _ { f } \rho ^ { 2 } } } \end{array} +$$ + +for some $C _ { f } > 0$ . Suppose (9)-(11) hold. Then for any $p ^ { * } \in { \mathcal { S } }$ , + +$$ +\begin{array} { l } { \displaystyle \frac { 1 } { K } \sum _ { j = 1 } ^ { K } \mathbb { E } [ G _ { j } ] \leq \frac { 8 L ^ { 3 } \exp \left( C _ { f } ( C _ { 1 } + C _ { 3 } ) \right) } { C _ { f } \operatorname* { m i n } \{ \tau , \tau ^ { - 1 } \} K } \left( \| p ^ { 1 } - p ^ { * } \| ^ { 2 } + \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \right) \mathrm { ~ } f o r ~ K < ( 2 L ) ^ { 4 } } \\ { \displaystyle \frac { 1 } { K } \sum _ { j = 1 } ^ { K } \mathbb { E } [ G _ { j } ] \leq \frac { \exp \left( C _ { f } ( C _ { 1 } + C _ { 3 } ) \right) } { C _ { f } \operatorname* { m i n } \{ \tau , \tau ^ { - 1 } \} K ^ { 1 / 4 } } \left( \| p ^ { 1 } - p ^ { * } \| ^ { 2 } + \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \right) \mathrm { ~ } f o r ~ K \geq ( 2 L ) ^ { 4 } . } \end{array} +$$ + +where $G _ { k }$ is the approximation residual defined in (14), and $C _ { 1 } , C _ { 2 } , C _ { 3 } , C _ { 4 }$ are the nonegative constants defined in (33), (34), (48), and (49), respectively. Therefore, + +$$ +\frac { 1 } { K } \sum _ { j = 1 } ^ { K } \mathbb { E } [ G _ { j } ] = \mathcal { O } ( K ^ { - 1 / 4 } ) . +$$ + +Proof. Fix $\alpha _ { k } = \alpha$ and $\rho _ { k } = \rho$ , where $\alpha$ and $\rho$ are the respective right-hand sides of (55)-(56). Lemma 3 implies that (21) so long as (9)-(11) hold and the stepsize $\rho$ satisfies $\rho < L ^ { - 1 }$ . Since + +$$ +\rho = \operatorname* { m i n } \left\{ K ^ { - 1 / 4 } , \frac { 1 } { 2 L } \right\} \leq \frac { 1 } { 2 L } , +$$ + +we conclude that (21) applies. + +Rewriting (21) with $\alpha _ { k } = \alpha$ and $\rho _ { k } = \rho$ , we have + +$$ +\begin{array} { r } { \mathbb { E } [ \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } | \mathcal { F } _ { k } ] \le ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) \| p ^ { k } - p ^ { * } \| ^ { 2 } - \alpha \rho T _ { k } + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } . } \end{array} +$$ + +Therefore, taking expectations over $\mathcal { F } _ { k }$ , we have + +$$ +\begin{array} { r } { \mathbb { E } \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) \mathbb { E } \| p ^ { k } - p ^ { * } \| ^ { 2 } - \alpha \rho \mathbb { E } T _ { k } + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } . } \end{array} +$$ + +Recall that + +$$ +T _ { k } \doteq \frac { \tau } { \rho } \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { \rho \tau } \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 ( 1 - \overline { { \rho } } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } , +$$ + +where for the first two terms we have simply set $\rho = \overline { { \rho } }$ because the stepsize is constant. However, for the final term, we will still use an upper bound, $\overline { { \rho } }$ , on $\rho$ . In the current setting, we know that $\rho \leq ( 1 / 2 ) L ^ { - 1 }$ and therefore we may set $\overline { { \rho } } = ( 1 / 2 ) L ^ { - 1 }$ . Thus $1 - \overline { { \rho } } L = 1 / 2$ , leading to + +$$ +\rho \mathbb { E } T _ { k } = \tau \sum _ { i = 1 } ^ { n } \mathbb { E } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \tau ^ { - 1 } \sum _ { i = 1 } ^ { n } \mathbb { E } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \rho \mathbb { E } \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } . +$$ + +Let + +$$ +U _ { k } \doteq \mathbb { E } \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } \qquad W _ { k } \doteq \tau \sum _ { i = 1 } ^ { n } \mathbb { E } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \tau ^ { - 1 } \sum _ { i = 1 } ^ { n } \mathbb { E } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } , +$$ + +so that + +$$ +\rho \mathbb { E } T _ { k } = \rho U _ { k } + W _ { k } , +$$ + +and also let + +$$ +V _ { k } \doteq \mathbb { E } \| p ^ { k } - p ^ { * } \| ^ { 2 } . +$$ + +Using these definitions in (59) we write + +$$ +\begin{array} { r } { V _ { k + 1 } \leq \big ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } \big ) V _ { k } - \alpha \rho U _ { k } - \alpha W _ { k } + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } . } \end{array} +$$ + +Therefore, + +$$ +\begin{array} { c } { { V _ { k + 1 } + \alpha \rho U _ { k } + \alpha W _ { k } \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) V _ { k } + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } } } \\ { { \Longleftrightarrow V _ { k + 1 } + \alpha \rho \displaystyle \sum _ { j = 1 } ^ { k } U _ { j } + \alpha \displaystyle \sum _ { j = 1 } ^ { k } W _ { j } \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) V _ { k } + \alpha \rho \displaystyle \sum _ { j = 1 } ^ { k - 1 } U _ { j } + \alpha \displaystyle \sum _ { j = 1 } ^ { k - 1 } W _ { j } } } \\ { { \qquad + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } } } \\ { { \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) \left[ V _ { k } + \alpha \rho \displaystyle \sum _ { j = 1 } ^ { k - 1 } U _ { j } + \alpha \displaystyle \sum _ { j = 1 } ^ { k - 1 } W _ { j } \right] } } \\ { { \qquad + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } , } } \end{array} +$$ + +where we have used that $U _ { k } , W _ { k } \ge 0$ . Letting + +$$ +R _ { k } = V _ { k } + \alpha \rho \sum _ { j = 1 } ^ { k - 1 } U _ { j } + \alpha \sum _ { j = 1 } ^ { k - 1 } W _ { j } , +$$ + +we then have + +$$ +R _ { k + 1 } \leq { \left( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } \right) } R _ { k } + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } , +$$ + +which implies + +$$ +R _ { k + 1 } \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k } R _ { 1 } + ( C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } ) \sum _ { j = 1 } ^ { k } ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k - j } . +$$ + +Now, + +$$ +\begin{array} { r l r } { { \sum _ { j = 1 } ^ { k } ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k - j } = \sum _ { j = 0 } ^ { k - 1 } ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { j } } } \\ & { } & { = \frac { ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k } - 1 } { ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) - 1 } } \\ & { } & { = \frac { ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k } - 1 } { C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } } } \\ & { } & { \leq \frac { ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k } } { C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } } . } \end{array} +$$ + +Therefore, + +$$ +R _ { k + 1 } \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k } \left( R _ { 1 } + { \frac { C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } } { C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } } } \right) . +$$ + +Fix the number of iterations $K \geq 1$ . Now + +$$ +\rho = \operatorname* { m i n } \left\{ K ^ { - 1 / 4 } , \frac { 1 } { 2 L } \right\} \leq \frac { 1 } { K ^ { 1 / 4 } } \leq 1 . +$$ + +Therefore, + +$$ +\begin{array} { l } { \displaystyle \alpha \rho \sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \leq \alpha \rho \sum _ { j = 1 } ^ { K } U _ { j } + \alpha \sum _ { j = 1 } ^ { K } W _ { j } } \\ { \leq R _ { K + 1 } } \\ { \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { K } \left( R _ { 1 } + \frac { C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } } { C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } } \right) . } \end{array} +$$ + +Dividing through by $\alpha \rho K$ , we obtain + +$$ +\frac { 1 } { K } \sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \leq \frac { ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { K } } { \alpha \rho K } \left( R _ { 1 } + \frac { C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } } { C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } } \right) , +$$ + +and since $\alpha = C _ { f } \rho ^ { 2 }$ , we also have + +$$ +\frac { C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } } { C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } } = \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } . +$$ + +Furthermore, + +$$ +\rho \leq K ^ { - \frac { 1 } { 4 } } \implies \alpha \leq C _ { f } K ^ { - \frac { 1 } { 2 } } . +$$ + +Substituting these into (60) yields + +$$ +\begin{array} { r } { \displaystyle \frac { 1 } { K } \sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \leq \frac { \left( 1 + \frac { C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) } { K } \right) ^ { K } } { \alpha \rho K } \left( R _ { 1 } + \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \right) } \\ { \leq \frac { \exp ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } { \alpha \rho K } \left( R _ { 1 } + \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \right) , } \end{array} +$$ + +where we have used that for any $t \ge 0 , 1 + t / K \le e ^ { t / K }$ , so therefore $( 1 + t / K ) ^ { K } \leq e ^ { t }$ . + +The worst-case rates in terms of $K$ occur when $\rho = K ^ { - 1 / 4 }$ and $\alpha = C _ { f } K ^ { - 1 / 2 }$ . This is the case when $K \geq ( 2 L ) ^ { 4 }$ . Substituting these into the denominator yields, for $K \geq ( 2 L ) ^ { 4 }$ , that + +$$ +\frac { 1 } { K } \sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \leq \frac { \exp ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } { C _ { f } K ^ { 1 / 4 } } \left( R _ { 1 } + \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \right) . +$$ + +Thus, since $G _ { k } \leq \operatorname* { m a x } \{ \tau , \tau ^ { - 1 } \} \left( U _ { k } + W _ { k } \right)$ , we obtain + +$$ +\frac { 1 } { K } \sum _ { j = 1 } ^ { K } \mathbb { E } [ G _ { j } ] \leq \frac { \operatorname* { m a x } \{ \tau , \tau ^ { - 1 } \} \exp { ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } } { C _ { f } K ^ { 1 / 4 } } \left( \| p ^ { 1 } - p ^ { * } \| ^ { 2 } + \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \right) , +$$ + +which is (58). + +When $K < ( 2 L ) ^ { 4 }$ , (57) can similarly be obtained by substituting $\rho = ( 2 L ) ^ { - 1 }$ and $\alpha = C _ { f } ( 2 L ) ^ { - 2 }$ into (61). β–‘ + +# F APPROXIMATION RESIDUALS + +In this section we derive the approximation residual used to assess the performance of the algorithms in the numerical experiments. This residual relies on the following product-space reformulation of (1). + +F.1 PRODUCT-SPACE REFORMULATION AND RESIDUAL PRINCIPLE + +Recall (1), the monotone inclusion we are solving: + +$$ +{ \mathrm { F i n d ~ } } z \in \mathbb { R } ^ { d } : 0 \in \sum _ { i = 1 } ^ { n } A _ { i } ( z ) + B ( z ) . +$$ + +In this section we demonstrate a β€œproduct-space" reformulation of (1) which allows us to rewrite it in a standard form involving just two operators, one maximal monotone and the other monotone and Lipschitz. This approach was pioneered in (BriceΓ±o-Arias $\&$ Combettes, 2011; Combettes & Pesquet, 2012). Along with allowing for a simple definition of an approximation residual as a measure of approximation error in solving (1), it allows one to apply operator splitting methods originally formulated for two operators to problems such as (1) for any finite $n$ . + +Observe that solving (1) is equivalent to + +$$ +\begin{array} { l l } { \mathrm { F i n d } \left( w _ { 1 } , \ldots , w _ { n } , z \right) \in \mathbb { R } ^ { \left( n + 1 \right) d } : } & { w _ { i } \in A _ { i } ( z ) , \quad i \in { 1 . . n } } \\ & { \quad \displaystyle 0 \in \sum _ { i = 1 } ^ { n } w _ { i } + B ( z ) . } \end{array} +$$ + +This formulation resembles that of the extended solution set $s$ used in projective spitting, as given in (5), except that it combines the final two conditions in the definition of $s$ , and thus does not need the final dual variable $w _ { n + 1 }$ . From the definition of the inverse of an operator, the above formulation is equivalent to + +$$ +\begin{array} { r l } { \mathrm { F i n d ~ } ( w _ { 1 } , \dots , w _ { n } , z ) \in \mathbb { R } ^ { ( n + 1 ) d } : } & { 0 \in A _ { i } ^ { - 1 } ( w _ { i } ) - z , \quad i \in 1 . . n } \\ & { 0 \in \displaystyle \sum _ { i = 1 } ^ { n } w _ { i } + B ( z ) . } \end{array} +$$ + +These conditions are in turn equivalent to finding $( w _ { 1 } , \ldots , w _ { n } , z ) \in \mathbb { R } ^ { ( n + 1 ) d }$ such that + +$$ +0 \in \mathcal { A } ( w _ { 1 } , \ldots , w _ { n } , z ) + \mathcal { B } ( w _ { 1 } , \ldots , w _ { n } , z ) , +$$ + +where $\mathcal { A }$ is the set-valued map + +$$ +\mathcal { A } ( w _ { 1 } , \dots , w _ { n } , z ) \mapsto A _ { 1 } ^ { - 1 } ( w _ { 1 } ) \times A _ { 2 } ^ { - 1 } ( w _ { 2 } ) \times \dots \times A _ { n } ^ { - 1 } ( w _ { n } ) \times \{ 0 \} +$$ + +and $\mathcal { B }$ is the single-valued operator + +$$ +\mathcal { B } ( w _ { 1 } , \dots , w _ { n } , z ) \mapsto \left[ \begin{array} { c c c c } { 0 } & { \cdots } & { 0 } & { - I } \\ { \vdots } & { \ddots } & { \vdots } & { \vdots } \\ { 0 } & { \cdots } & { 0 } & { - I } \\ { I } & { \cdots } & { I } & { 0 } \end{array} \right] \left[ \begin{array} { c } { w _ { 1 } } \\ { \vdots } \\ { w _ { n } } \\ { z } \end{array} \right] + \left[ \begin{array} { c } { 0 } \\ { \vdots } \\ { 0 } \\ { B ( z ) } \end{array} \right] . +$$ + +It is easily established that $\mathcal { B }$ is maximal monotone and Lipschitz continuous, while $\mathcal { A }$ is maximal monotone. Letting $\mathcal { T } \doteq \mathcal { A } + \mathcal { B }$ , it follows from (Bauschke & Combettes, 2017, Proposition 20.23) that $\mathcal { T }$ is maximal monotone. Thus, we have reformulated (1) as the monotone inclusion $0 \in \mathcal { T } ( q )$ for $q$ in the product space $\mathbb { R } ^ { ( n + 1 ) \bar { d } }$ . A vector $z \in \mathbb { R } ^ { d }$ solves (1) if and only if there exists $( w _ { 1 } , \dots , w _ { n } ) \in \mathbb { R } ^ { n d }$ such that $\bar { 0 } \in \mathcal { T } ( q )$ , where $q = ( w _ { 1 } , \dots , w _ { n } , z )$ . + +For any pair $( q , v )$ such that $v \in \mathcal { T } ( q )$ , $\| v \| ^ { 2 }$ represents an approximation residual for $q$ in the sense that $v = 0$ implies $q$ is a solution to (62). One may take $\| \bar { v } \| ^ { 2 }$ as a measure of the error of $q$ as an approximate solution to (62), and it can only be 0 if $q$ is a solution. Given two approximate solutions $q _ { 1 }$ and $q _ { 2 }$ with certificates $v _ { 1 } \in T ( q _ { 1 } )$ and $v _ { 2 } \in \mathcal { T } ( q _ { 2 } )$ , we will treat $q _ { 1 }$ as a β€œbetter” approximate solution than $q _ { 2 }$ if $\| v _ { 1 } \| ^ { 2 } < \| v _ { 2 } \| ^ { 2 }$ . Doing so is somewhat analogous to the practice, common in optimization, of using the gradient $\| \nabla f ( x ) \| ^ { 2 }$ as a measure of quality of an approximate minimizer of some differentiable function $f$ . However, note that since $\mathcal { T } ( q _ { 1 } )$ is a set, there may exist elements of $\mathcal { T } ( q _ { 1 } )$ with smaller norm than $v _ { 1 }$ . Thus any given certificate $v _ { 1 }$ only corresponds to an upper bound on $\mathrm { d i s t } ^ { 2 } ( 0 , \mathcal { T } ( q _ { 1 } ) )$ . + +# F.2 APPROXIMATION RESIDUAL FOR PROJECTIVE SPLITTING + +In SPS (Algorithm 1), for $i \in 1 . . n$ , the pairs $( x _ { i } ^ { k } , y _ { i } ^ { k } )$ are chosen so that $y _ { i } ^ { k } \in A _ { i } ( x _ { i } ^ { k } )$ . This can be seen from the definition of the resolvent. Thus $\hat { x _ { i } ^ { k } } \in A _ { i } ^ { - 1 } ( y _ { i } ^ { k } )$ . Observe that + +$$ +\begin{array} { r } { v ^ { k } \doteq \left[ \begin{array} { c } { x _ { 1 } ^ { k } - z ^ { k } } \\ { \vdots } \\ { x _ { n } ^ { k } - z ^ { k } } \\ { B ( z ^ { k } ) + \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } } \end{array} \right] \in \mathcal { T } ( y _ { 1 } ^ { k } , \dotsc , y _ { n } ^ { k } , z ^ { k } ) . } \end{array} +$$ + +The approximation residual for SPS is thus + +$$ +R _ { k } \dot { = } \| v ^ { k } \| ^ { 2 } = \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \left\| B ( z ^ { k } ) + \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \right\| ^ { 2 } +$$ + +which is an approximation residual for $( y _ { 1 } ^ { k } , \dots , y _ { n } ^ { k } , z ^ { k } )$ in the sense defined above. We may relate $R _ { k }$ to the approximation residual $G _ { k }$ for SPS from Section 5 as follows: + +$$ +\begin{array} { r l } & { H _ { k } = \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \left\| B ( z ^ { k } ) + \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \right\| ^ { 2 } } \\ & { \quad = \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \left\| B ( z ^ { k } ) + \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } - \displaystyle \sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } \right\| ^ { 2 } } \\ & { \quad \leq \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } + 2 \left\| \displaystyle \sum _ { i = 1 } ^ { n } ( y _ { i } ^ { k } - w _ { i } ^ { k } ) \right\| ^ { 2 } } \\ & { \quad \leq \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } + 2 n \displaystyle \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } } \\ & { \quad < \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 \| B ( z ^ { k } ) + \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } } \\ & { \quad < \rho _ { n } \alpha , } \end{array} +$$ + +where in the second equality we have used the fact that $\begin{array} { r } { \sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0 } \end{array}$ . Thus, $R _ { k }$ has the same convergence rate as $G _ { k }$ given in Theorem 2. + +Note that while the certificate given in (65) focuses on the primal iterate $z ^ { k }$ , it may be changed to focus on any $x _ { i } ^ { k }$ for $i = 1 , \ldots , n$ , by using + +$$ +\boldsymbol { v } _ { i } ^ { k } \doteq \left[ \begin{array} { c } { x _ { 1 } ^ { k } - x _ { i } ^ { k } } \\ { \vdots } \\ { x _ { n } ^ { k } - x _ { i } ^ { k } } \\ { B ( x _ { i } ^ { k } ) + \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } } \end{array} \right] \in \mathscr { T } ( y _ { 1 } ^ { k } , \ldots , y _ { n } ^ { k } , x _ { i } ^ { k } ) . +$$ + +The approximation residual $\| v _ { i } ^ { k } \| ^ { 2 }$ may also be shown to have the same rate as $G _ { k }$ by following similar derivations to those above for $R _ { k }$ . + +# F.3 TSENG’S METHOD + +Tseng’s method (Tseng, 2000) can be applied to (62), resulting in the following recursion with iterates $q ^ { k } , \bar { q } ^ { \bar { k } } \in \mathbb { R } ^ { ( n + 1 ) d }$ : + +$$ +\begin{array} { c } { \bar { q } ^ { k } = J _ { \alpha \mathcal { A } } ( q ^ { k } - \alpha \mathcal { B } ( q ^ { k } ) ) } \\ { q ^ { k + 1 } = \bar { q } ^ { k } + \alpha \big ( \mathcal { B } ( q ^ { k } ) - \mathcal { B } ( \bar { q } ^ { k } ) \big ) , } \end{array} +$$ + +where $\mathcal { A }$ and $\mathcal { B }$ are defined in (63) and (64). The resolvent of $\mathcal { A }$ may be readily computed from the resolvents of the $A _ { i }$ using Moreau’s identity (Bauschke & Combettes, 2017, Proposition 23.20). + +Analogous to SPS, Tseng’s method has an approximation residual, which in this case is an element of $\mathcal { T } ( \bar { q } ^ { k } )$ . In particular, using the general properties of resolvent operators as applied to $J _ { \alpha \mathcal { A } }$ , we have + +$$ +\frac { 1 } { \alpha } ( q ^ { k } - \bar { q } ^ { k } ) - \mathcal { B } ( q ^ { k } ) \in \mathcal { A } ( \bar { q } ^ { k } ) . +$$ + +Also, rearranging (68) produces + +$$ +\frac { 1 } { \alpha } ( \bar { q } ^ { k } - q ^ { k + 1 } ) + \mathcal { B } ( q ^ { k } ) = \mathcal { B } ( \bar { q } ^ { k } ) . +$$ + +Adding these two relations produces + +$$ +\frac { 1 } { \alpha } ( q ^ { k } - q ^ { k + 1 } ) \in \mathcal { A } ( \bar { q } ^ { k } ) + \mathcal { B } ( \bar { q } ^ { k } ) = \mathcal { T } ( \bar { q } ^ { k } ) +$$ + +Therefore, + +$$ +R _ { k } ^ { \mathrm { { T s e n g } } } \doteq \frac { 1 } { \alpha ^ { 2 } } \| q ^ { k } - q ^ { k + 1 } \| ^ { 2 } +$$ + +represents a measure of the approximation error for Tseng’s method equivalent to $R _ { k }$ defined in (66) for SPS. + +# F.4 FRB + +The forward-reflected-backward method (FRB) (Malitsky & Tam, 2020) is another method that may be applied to the splitting $\mathcal { T } = \mathcal { A } + \mathcal { B }$ for $\mathcal { A }$ and $\mathcal { B }$ as defined in (63) and (64). Doing so yields recursion + +$$ +q ^ { k + 1 } = J _ { \alpha \mathcal { A } } \Big ( q ^ { k } - \alpha \big ( 2 \mathcal { B } ( q ^ { k } ) - \mathcal { B } ( q ^ { k - 1 } ) \big ) \Big ) . +$$ + +Following similar arguments to those for Tseng’s method, it can be shown that + +$$ +v _ { \mathrm { F R B } } ^ { k } \doteq \frac { 1 } { \alpha } \left( q ^ { k - 1 } - q ^ { k } \right) + \mathcal { B } ( q ^ { k } ) + \mathcal { B } ( q ^ { k - 2 } ) - 2 \mathcal { B } ( q ^ { k - 1 } ) \in \mathcal { T } ( q ^ { k } ) . +$$ + +Thus, FRB admits the following approximation residual equivalent to $R _ { k }$ for SPS: + +$$ +R _ { k } ^ { \mathrm { F R B } } \doteq \| v _ { \mathrm { F R B } } ^ { k } \| ^ { 2 } . +$$ + +Finally, we remark that the stepsizes used in both the Tseng and FRB methods can be chosen via a linesearch procedure that we do not detail here. + +# F.5 STOCHASTIC TSENG METHOD + +The stochastic version of Tseng’s method of (BΓΆhm et al., 2020) (S-Tseng) may be applied to the inclusion $0 \in \mathcal { A } ( q ) + \mathcal { B } ( q )$ , since the operator $\mathcal { A }$ may be written as a subdifferential. However, unlike the deterministic Tseng method, it does not produce a valid residual. Note also that S-Tseng outputs an ergodic sequence $\mathbf { \bar { \boldsymbol { q } } } _ { \mathrm { e r g } } ^ { k }$ . To construct a residual for the ergodic sequence, we compute a deterministic step of Tseng’s method according to (67)-(68), starting at $q _ { \mathrm { e r g } } ^ { k }$ . That is, letting + +$$ +\begin{array} { r l } & { \bar { q } ^ { k } = J _ { \alpha \mathcal { A } } ( q _ { \mathrm { e r g } } ^ { k } - \mathcal { B } ( q _ { \mathrm { e r g } } ^ { k } ) ) } \\ & { q ^ { k + 1 } = \bar { q } ^ { k } + \alpha ( \mathcal { B } ( q _ { \mathrm { e r g } } ^ { k } ) - \mathcal { B } ( \bar { q } ^ { k } ) ) , } \end{array} +$$ + +we can then compute essentially the same residual as in Section F.3, + +$$ +R _ { k } ^ { \mathrm { { S - T s e n g } } } \doteq \frac { 1 } { \alpha ^ { 2 } } \| q _ { \mathrm { { e r g } } } ^ { k } - q ^ { k + 1 } \| ^ { 2 } . +$$ + +To construct the stochastic oracle for S-Tseng, we assumed $\begin{array} { r } { B ( z ) = \frac { 1 } { m } \sum _ { i = 1 } ^ { m } B _ { i } ( z ) } \end{array}$ . Then we used + +$$ +\tilde { \mathcal { B } } ( w _ { 1 } , \dots , w _ { n } , z ) \mapsto \left[ \begin{array} { c c c c } { 0 } & { \cdots } & { 0 } & { - I } \\ { \vdots } & { \ddots } & { \vdots } & { \vdots } \\ { 0 } & { \cdots } & { 0 } & { - I } \\ { I } & { \cdots } & { I } & { 0 } \end{array} \right] \left[ \begin{array} { c } { w _ { 1 } } \\ { \vdots } \\ { w _ { n } } \\ { z } \end{array} \right] + \left[ \begin{array} { c } { 0 } \\ { \vdots } \\ { 0 } \\ { \frac { 1 } { \vert \mathbf { B } \vert } \sum _ { j \in \mathbf { B } } B _ { j } ( z ) } \end{array} \right] . +$$ + +for some minibatch $\mathbf { B } \in \{ 1 , \dots , m \}$ . + +# F.6 VARIANCE-REDUCED FRB + +The FRB-VR method of Alacaoglu et al. (2021) can also be applied to $0 \in \mathcal { A } ( q ) + \mathcal { B } ( q )$ , using the same stochastic oracle $\tilde { \mathcal { B } }$ defined in (69). if we let the iterates of FRB-VR be $( q ^ { k } , p ^ { k } )$ , then line 4 of Algorithm 1 of Alacaoglu et al. (2021) can be written as + +$$ +\begin{array} { c } { \hat { q } ^ { k } = q ^ { k } - \tau ( \mathcal { B } ( p ^ { k } ) + \tilde { \mathcal { B } } ( q ^ { k } ) - \tilde { \mathcal { B } } ( p ^ { k } ) ) } \\ { q ^ { k + 1 } = J _ { \tau \mathcal { A } } ( \hat { q } ^ { k } ) . } \end{array} +$$ + +Once again, the method does not directly produce a residual, but one can be developed from the algorithm definition as follows: (71) yields $\dot { \tau } ^ { - 1 } ( \hat { q } ^ { k } - q ^ { k + 1 } ) \in \mathcal { A } ( q ^ { k + 1 } )$ and hence + +$$ +\tau ^ { - 1 } ( \hat { q } ^ { k } - q ^ { k + 1 } ) + \mathcal { B } ( q ^ { k + 1 } ) \in ( \mathcal { A } + \mathcal { B } ) ( q ^ { k + 1 } ) . +$$ + +Therefore we use the residual + +$$ +R _ { k } ^ { \mathrm { F R B - V R } } = \lVert \tau ^ { - 1 } ( \hat { q } ^ { k } - q ^ { k + 1 } ) + \mathcal { B } ( q ^ { k + 1 } ) \rVert ^ { 2 } . +$$ + +plots for FR $R _ { k }$ for SPS, VR. $R _ { k } ^ { \mathrm { T s e n g } }$ for Tseng’s method, $R _ { k } ^ { \mathrm { F R B } }$ for FRB, $R _ { k } ^ { \mathrm { S - T s e n g } }$ for S-Tseng, and $R _ { k } ^ { \mathrm { F R B - V R } }$ + +# F.7 BENEFITS AND DRAWBACKS OF THE PRODUCT SPACE REFORMULATION + +The main benefit of the product space reformulation (PSR) is that it allows one to use familiar 2-operator splitting schemes for solving $0 \in \mathcal { A } ( q ) + \mathcal { B } ( q )$ to solve the more complicated recursion (1). However, one drawback of this approach is that the operator $\mathcal { B }$ , defined in (64), combines a skew-symmetric consensus matrix with the Lipschitz operator $B$ . Treating $\mathcal { B }$ as a single operator necessitates using a single stepsize for both of its constituent operators, but the $B$ component will generally have a much larger Lipschitz constant than the skew part, necessitating a smaller stepsize than is ideal for the skew operator. This difficulty can be countered by using different stepsizes for the primal and dual components, but that strategy introduces additional tuning parameters. In other works, methods based on PSR have exhibited slower convergence than deterministic projective splitting methods (Johnstone & Eckstein, 2021; 2020b). However, in our experiments in Section 7, the performance is comparable. + +# G VARIATIONAL INEQUALITIES + +For a mapping $B : \mathbb { R } ^ { d } \mathbb { R } ^ { d }$ and a closed and convex set $\mathcal { C }$ , the variational inequality problem (Harker & Pang, 1990) is to find $z ^ { \ast } \in \mathcal { C }$ such that + +$$ +B ( z ^ { * } ) ^ { \top } ( z - z ^ { * } ) \geq 0 , \forall z \in { \mathcal { C } } . +$$ + +Consider the normal cone mapping discussed in Section 2 and defined as + +$$ +N _ { { \mathcal { C } } } ( x ) \doteq \{ g : g ^ { \top } ( y - x ) \le 0 \ \forall y \in { \mathcal { C } } \} +$$ + +It is easily seen that (72) is equivalent to finding $z ^ { * }$ such that $- B ( z ^ { * } ) \in N _ { \mathcal { C } } ( z ^ { * } )$ . Hence, if $B$ is monotone, (72) is equivalent to the monotone inclusion + +$$ +0 \in B ( z ^ { * } ) + N _ { \cal { C } } ( z ^ { * } ) . +$$ + +Thus, monotone variational inequalities are a special case of monotone inclusions with two operators, one of which is single-valued and the other is the normal cone map of the constraint set $\mathcal { C }$ . As a consequence, methods for monotone inclusions can be used to solve monotone variational inequality problems. The reverse, however, may not be true. For example, the analysis of the extragradient method (Korpelevich, 1977) relies on the second operator $N _ { \mathcal { C } }$ in (73) being a normal cone, as opposed to a more general monotone operator. We are not aware of any direct extension of the extragradient method’s analysis allowing a more general resolvent to be used in place of the projection map corresponding to $N _ { \mathcal { C } }$ . + +The Restricted Gap Function There is a disadvantage to pursuing convergence rates based on variational inequalities (as in BΓΆhm et al. (2020) and Alacaoglu et al. (2021)) rather than monotone inclusions. Convergence rate analyses for variational inequalities focus on the gap function: + +$$ +G _ { { \mathcal C } } ( z ) \doteq \operatorname* { s u p } _ { z ^ { \prime } \in { \mathcal C } } B ( z ^ { \prime } ) ^ { \top } ( z - z ^ { \prime } ) . +$$ + +It can be shown that $G _ { \mathcal { C } } ( z ) \geq 0$ and $G _ { \mathcal { C } } ( z ) = 0$ if and only if $z$ solves (72). However, (74) is meaningless for most problems, since unless $\mathcal { C }$ is compact, $G \overset { \cdot } { c } ( z )$ is typically equal to $+ \infty$ for any nonsolution (Diakonikolas, 2020). Thus researchers instead focus on the restricted gap function (Nesterov, 2007) + +$$ +G _ { { \mathcal C } _ { 2 } } ( z ) \doteq \operatorname* { s u p } _ { z ^ { \prime } \in { \mathcal C } _ { 2 } } B ( z ^ { \prime } ) ^ { \top } ( z - z ^ { \prime } ) . +$$ + +where $\mathcal { C } _ { 2 }$ is an arbitrary compact set. However, now the results are only meaningful over the set $\mathcal { C } _ { 2 }$ . Thus, $\mathcal { C } _ { 2 }$ must be chosen large enough so that the iterates of the algorithm remain in the interior of $\mathcal { C } _ { 2 }$ (BΓΆhm et al., 2020). Further, the convergence rate bound depends on the diameter of $\mathcal { C } _ { 2 }$ . For some algorithms (Mokhtari et al., 2020) a valid set is provided which bounds the iterates. However BΓΆhm et al. (2020) and Alacaoglu et al. (2021) do not provide one, although in principle it could be done so long as the ergodic sequence can be bounded almost-surely. Thus, the convergence rates depending on (75) in BΓΆhm et al. (2020) and Alacaoglu et al. (2021) are somewhat incomplete in that they depend on unknown constants. + +In contrast, rates based on the approximation residual in the monotone inclusion setting, including ours given in (57)–(58), completely avoid this pitfall. There is no need to select a compact set containing the algorithm’s iterates and the constants in our rates are all explicit or depend on standard quantities such as the initial distance to a solution. + +# H MEMORY-SAVING TECHNIQUE FOR SPS + +The variables $t _ { i } ^ { k } , x _ { i } ^ { k }$ , and $y _ { i } ^ { k }$ on lines 3-5 of SPS are stored in variables and . Another two variables $\bar { x }$ iand $\bar { y }$ i i keep track of $\textstyle \sum _ { i = 1 } ^ { n } x _ { i } ^ { k }$ and $\textstyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k }$ . The dual variables are stored as $w _ { i }$ for $i \in 1 . . n$ and the primal variable as $z$ . Once $x = x _ { i } ^ { k }$ is computed, the $i ^ { \mathrm { { t h } } }$ dual variable $w _ { i }$ can be partially updated as $w _ { i } w _ { i } - \alpha _ { k } x$ . Once all the operators have been processed, the update for each dual variable may be completed via $w _ { i } w _ { i } + \alpha _ { k } \bar { ( n + 1 ) } _ { . } ^ { - 1 } \bar { x }$ . Also, the primal update is computed as $z z - \alpha _ { k } \bar { y }$ . During the calculation loop for the $x _ { i } ^ { k } , y _ { i } ^ { k }$ , the terms in approximation residual $R _ { k }$ may also be accumulated one by one. The total total number of vector elements that must be stored is $( n + 7 ) d$ . + +# I ADDITIONAL INFORMATION ABOUT THE NUMERICAL EXPERIMENTS + +We solve the following convex-concave min-max problem: + +$$ +\begin{array} { r l } { \underset { \beta \in \mathbb { R } ^ { d } } { \operatorname* { m i n } } \quad \underset { \gamma \in \mathbb { R } ^ { m } } { \operatorname* { m a x } } } & { \left\{ \lambda ( \delta - \kappa ) + \displaystyle \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \Psi ( \langle \hat { x } _ { i } , \beta \rangle ) + \displaystyle \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \gamma _ { i } ( \hat { y } _ { i } \langle \hat { x } _ { i } , \beta \rangle - \lambda \kappa ) + c \| \beta \| _ { 1 } \right\} } \\ { \mathrm { s . t . } \quad } & { \| \beta \| _ { 2 } \leq \lambda / ( L _ { \Psi } + 1 ) \qquad \| \gamma \| _ { \infty } \leq 1 . } \end{array} +$$ + +This model is identical to that of (Yu et al., 2021, Thm. 4.3) except for the addition of the $\ell _ { 1 }$ regularization term $c \| \beta \| _ { 1 }$ , where $c \geq 0$ is a given constant. The goal is to learn the model weights $\beta$ from a training dataset of $m$ feature vectors ${ \hat { x } } _ { i }$ and corresponding labels $\hat { y } _ { i }$ . Rather than computing the expected loss over the training set, the formulation uses, for each $\beta$ , the worst possible distribution within a Wasserstein-metric ball around the empirical distribution of the $\{ ( \hat { x } _ { i } , \hat { y } _ { i } ) \}$ , with the parameter $\delta \geq 0$ giving the diameter of the ball and the parameter $\kappa \geq 0$ specifying the relative weighting of features and labels. The variables $\gamma$ and $\lambda$ parameterize the selection of this worst-case distribution in response to the model weights $\beta$ . Finally, $\Psi$ is the logistic loss kernel $t \mapsto \log ( e ^ { t } + e ^ { - t } )$ and $L _ { \Psi } = 1$ is the corresponding Lipschitz constant. In all the experiments, we set $\delta = \kappa = 1$ and $c = 1 0 ^ { - 3 }$ . + +We now show how we converted this problem to the form (1) for our experiments. Let $z$ be a shorthand for $( \lambda , \beta , \gamma )$ , and define + +$$ +\mathcal { L } ( z ) \doteq \lambda ( \delta - \kappa ) + \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \Psi ( \langle { \hat { x } _ { i } } , \beta \rangle ) + \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \gamma _ { i } ( \hat { y } _ { i } \langle { \hat { x } _ { i } } , \beta \rangle - \lambda \kappa ) . +$$ + +The first-order necessary and sufficient conditions for the convex-concave saddlepoint problem in (76) are + +$$ +0 \in B ( z ) + A _ { 1 } ( z ) + A _ { 2 } ( z ) +$$ + +where the vector field $B ( z )$ is defined as + +$$ +\boldsymbol { B } ( z ) \doteq \left[ \begin{array} { l } { \nabla _ { \boldsymbol { \lambda } , \beta } \mathcal { L } ( z ) } \\ { - \nabla _ { \boldsymbol { \gamma } } \mathcal { L } ( z ) } \end{array} \right] , +$$ + +with + +$$ +\begin{array} { r } { \nabla _ { \lambda , \beta } \mathcal { L } ( z ) = \left[ \begin{array} { c } { \delta - \kappa ( 1 + \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \gamma _ { i } ) } \\ { \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \Psi ^ { \prime } ( \langle \hat { x } _ { i } , \beta \rangle ) \hat { x } _ { i } + \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \gamma _ { i } \hat { y } _ { i } \hat { x } _ { i } } \end{array} \right] } \end{array} +$$ + +and + +$$ +\nabla _ { \boldsymbol { \gamma } } \mathcal { L } ( z ) = \left[ \begin{array} { c } { \frac { 1 } { m } ( \hat { y } _ { 1 } \langle \hat { x } _ { 1 } , \beta \rangle - \lambda \kappa ) } \\ { \vdots } \\ { \frac { 1 } { m } ( \hat { y } _ { m } \langle \hat { x } _ { m } , \beta \rangle - \lambda \kappa ) } \end{array} \right] . +$$ + +It is readily confirmed that $B$ defined in this manner is Lipschitz. The monotonicity of $B$ follows from its being the generalized gradient of a convex-concave saddle function (Rockafellar, 1970). + +For the set-valued operators, $A _ { 1 } ( z )$ corresponds to the constraints and $A _ { 2 } ( z )$ to the nonsmooth $\ell _ { 1 }$ regularizer, and are defined as + +$$ +A _ { 1 } ( z ) \doteq N _ { \mathcal { C } _ { 1 } } ( \lambda , \beta ) \times N _ { \mathcal { C } _ { 2 } } ( \gamma ) , +$$ + +where + +$$ +\begin{array} { r } { \mathcal { C } _ { 1 } \doteq \bigl \{ ( \lambda , \beta ) : \| \beta \| _ { 2 } \le \lambda / ( L _ { \Psi } + 1 ) \bigr \} \quad \mathrm { ~ a n d ~ } \quad \mathcal { C } _ { 2 } \doteq \{ \gamma : \| \gamma \| _ { \infty } \le 1 \} , } \end{array} +$$ + +and + +$$ +A _ { 2 } ( z ) \doteq \{ \mathbf { 0 } _ { 1 \times 1 } \} \times c \partial \| \beta \| _ { 1 } \times \{ \mathbf { 0 } _ { m \times 1 } \} . +$$ + +Here, the notation ${ \bf 0 } _ { p \times 1 }$ denotes the $p$ -dimensional vector of all zeros. $\mathcal { C } _ { 1 }$ is a scaled version of the second-order cone, well known to be a closed convex set, while $\mathcal { C } _ { 2 }$ is the unit ball of the $\ell _ { \infty }$ norm, also closed and convex. Since $A _ { 1 }$ is a normal cone map of a closed convex set and $A _ { 2 }$ is the subgradient map of a closed proper convex function (the scaled 1-norm), both of these operators are maximal monotone and problem (77) is a special case of (1) for $n = 2$ . + +Stochastic oracle implementation The operator $B : \mathbb { R } ^ { m + d + 1 } \mapsto \mathbb { R } ^ { m + d + 1 }$ , defined in (78), can be written as + +$$ +B ( z ) = \frac { 1 } { m } \sum _ { i = 1 } ^ { m } B _ { i } ( z ) +$$ + +where + +$$ +B _ { i } ( z ) \doteq \left[ \begin{array} { c } { \delta - \kappa ( 1 + \gamma _ { i } ) } \\ { \Psi ^ { \prime } ( \langle \hat { x } _ { i } , \beta \rangle ) \hat { x } _ { i } + \gamma _ { i } \hat { y } _ { i } \hat { x } _ { i } } \\ { \mathbf { 0 } _ { ( i - 1 ) \times 1 } } \\ { - ( \hat { y } _ { i } \langle \hat { x } _ { i } , \beta \rangle - \lambda \kappa ) } \\ { \mathbf { 0 } _ { ( m - i ) \times 1 } } \end{array} \right] . +$$ + +In our SPS experiments, the stochastic oracle for $B$ is simply $\begin{array} { r } { \tilde { B } ( z ) = \frac { 1 } { | \mathbf { B } | } \sum _ { i \in \mathbf { B } } B _ { i } ( z ) } \end{array}$ for some minibatch $\mathbf { B } \subseteq \{ 1 , \dots , m \}$ . We used a batchsize of 100. + +Resolvent computations The resolvent of $A _ { 1 }$ is readily constructed from the projection maps of the simple sets $\mathcal { C } _ { 1 }$ and $\mathcal { C } _ { 2 }$ , while the resolvent $A _ { 2 }$ involves the proximal operator of the $\ell _ { 1 }$ norm. Specifically, + +$$ +J _ { \rho A _ { 1 } } ( z ) = \left[ \begin{array} { c } { \mathrm { p r o j } _ { \mathcal { C } _ { 1 } } ( \lambda , \beta ) } \\ { \mathrm { p r o j } _ { \mathcal { C } _ { 2 } } ( \gamma ) } \end{array} \right] \quad \mathrm { a n d } \quad J _ { \rho A _ { 2 } } ( z ) = \left[ \begin{array} { c } { \mathbf { 0 } _ { 1 \times 1 } } \\ { \mathrm { p r o x } _ { \rho c \| \cdot \| _ { 1 } } ( \beta ) } \\ { \mathbf { 0 } _ { m \times 1 } } \end{array} \right] . +$$ + +The constraint $\mathcal { C } _ { 1 }$ is a scaled second-order cone and $\mathcal { C } _ { 2 }$ is the $\ell _ { \infty }$ ball, both of which have closed-form projections. The proximal operator of the $\ell _ { 1 }$ norm is the well-known soft-thresholding operator (Parikh & Boyd, 2013, Section 6.5.2). Therefore all resolvents in the formulation may be computed quickly and accurately. + +SPS stepsize choices For the stepsize in SPS, we ordinarily require $\rho _ { k } \le \overline { { \rho } } < 1 / L$ for the global Lipschitz constant $L$ of $B$ . However, since the global Lipschitz constant may be pessimistic, better performance can often be achieved by experimenting with larger stepsizes. If divergence is observed, then the stepsize can be decreased. This type of strategy is common for SGD and similar stochastic methods. Thus, for SPS-decay we set $\alpha _ { k } ^ { - \pm } = C _ { d } k ^ { - 0 . 5 1 }$ and $\rho _ { k } = C _ { d } k ^ { - 0 . 2 5 }$ , and performed a grid search to select the best $C _ { d }$ from $\{ 0 . 1 , 0 . 5 , 1 , 5 , 1 0 \}$ , arriving at $C _ { d } = 1$ for epsilon and SUSY, and $C _ { d } = 0 . 5$ for real-sim. For SPS-fixed we used $\rho = K ^ { - 1 / 4 }$ and $\alpha = C _ { f } \rho ^ { 2 }$ , and performed a grid search to select $C _ { f }$ over $\{ 0 . 1 , 0 . 5 , 1 , 5 , 1 0 \}$ , arriving at $C _ { f } = 1$ for epsilon and real-sim, and $C _ { f } = 5$ for SUSY. The total number of iterations for SPS-fixed was chosen as follows: For the epsilon dataset, we used $K = 5 0 0 0$ , for SUSY we used $K = 2 0 0$ , and for real-sim we used $K = 1 0 0 0$ . + +![](images/c94c000fa07b86ebc660cc1004df85b2b9b8c4f0b3338fc20eb74e1d9a379c4e.jpg) +Figure 2: Approximation residual versus epoch for three LIBSVM benchmark datasets. Left: epsilon, middle: SUSY, right: real-sim. + +Parameter choices for the other algorithms All methods are initialized at the same random point. For Tseng’s method, we used the backtracking linesearch variant with an initial stepsize of 1, $\theta = 0 . 8$ , and a stepsize reduction factor of 0.7. For FRB, we used the backtracking linesearch variant with the same settings as for Tseng’s method. For deterministic PS, we used a fixed stepsize of $0 . 9 / L$ . For the stochastic Tseng’s method of BΓΆhm et al. (2020), the stepsize $\alpha _ { k }$ must satisfy: $\textstyle \sum _ { k = 1 } ^ { \infty } { \dot { \alpha } } _ { k } = \infty$ and P∞k=1 $\textstyle \sum _ { k = 1 } ^ { \infty } \alpha _ { k } ^ { 2 } < \infty$ . So we set $\alpha _ { k } = C k ^ { - d }$ and perform a grid search over $\{ C , d \}$ k=1 in the range $[ 1 0 ^ { - 4 } , 1 0 ] \times [ 0 . 5 1 , 1 ]$ , checking $5 \times 5$ values to find the best setting for each of the three problems. The selected values are in Table 1. + +Table 1: Parameter Values for S-Tseng + +
epsilonSUSYreal-sim
C0.560.560.77
d0.60.60.55
+ +The work of BΓΆhm et al. (2020) also introduced $\mathrm { F B F p }$ , a stochastic version of Tseng’s method that reuses a previously-computed gradient and therefore only needs one additional gradient calculation per iteration. In our experiments, the performance of the two methods was about the same, so we only report the performance of stoch. Tseng’s method. + +For variance-reduced FRB, the main parameter is the probability $p$ . We hand-tuned $p$ ,arriving at $p = 0 . 0 1$ for all problems. We set the stepsize to its maximum allowed value of + +$$ +\tau = { \frac { 1 - \sqrt { 1 - p } } { 2 L } } . +$$ + +Plots versus Epoch Figure 2 plots the performance of each method versus epoch (i.e. data pass). This shows an even more dramatic benefit for the stochastic methods than the plots versus time, since at each iteration the stochastic methods only need to process small amounts of data, whereas deterministic methods must process all of it. We believe these benefits do not fully manifest themselves in the plots versus time due to overheads in each iteration of the stochastic methods, multithreading providing a boost for the deterministic methods, memory access patterns, and other practical considerations. + +Fraction of Nonzero Entries versus Running time Figure 3 plots the fraction of nonzero entries in the iterates of each method versus running time. For each method, we used output of proxckΒ·k1. We observe that our methods produce sparse intermediate iterates for two of the three problems. This is one of the benefits of proximal splitting algorithms in general, including our method. For the other problem, SUSY, no method produces sparse iterates, suggesting that $c$ should be increased if sparse solutions are desired. + +![](images/7829a04c81fd4e0356905a55be1d4c62aec1a4bb1bef565b9369bc3a3ec4170b.jpg) +Figure 3: Fraction of nonzero entries versus running time for the three datasets. Left: epsilon, middle: SUSY, right: real-sim. + +# J LOCAL CONVERGENCE ON NON-MONOTONE PROBLEMS + +The work by Hsieh et al. (2020) provides a local convergence analysis for DSEG applied to locally monotone problems. Recall that DSEG is equivalent to the special case of SPS for which $n = 0$ . While extending this result to the more general setting of SPS is beyond the scope of this manuscript, we next provide a preliminary sketch of how the analysis of Hsieh et al. (2020) might be generalized to our setting. We leave a formal proof to future work. + +Sketch of assumptions and main result The first assumption needed is the existence of an isolated solution $p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } , \ldots , w _ { n + 1 } ^ { * } ) \in \mathcal { S }$ . We then require that there exists a ball $\mathbb { B } _ { r } \big ( z ^ { * } \big )$ , centered at $z ^ { * }$ , throughout which the operator $B$ is β€œwell-behaved”, meaning that it satisfies monotonicity and Lipschitz continuity. In addition, we need each $A _ { i }$ , for $i \in 1 . . n$ , to be maximal monotone within this ball. Outside of the ball, the operators do not need to be monotone or Lipschitz. + +Following (Hsieh et al., 2020, Assumption $2 ^ { \prime }$ ), the noise variance assumptions are slightly stronger than in the monotone case. In particular, we require that $\mathbb { E } [ \| \epsilon ^ { k } \| ^ { q } | \mathcal { F } _ { k } ] \le \dot { N } ^ { q }$ and $\mathbb { E } [ \| e ^ { k } \| ^ { q } | \dot { \mathcal { F } } _ { k } ] \le \mathsf { \bar { N } } ^ { q }$ for some $q > 2$ . As before, the noise must be zero-mean. Finally, the stepsize requirements are also slightly stronger than (12), having the added assumption that $\textstyle \sum _ { k = 1 } ^ { \infty } \rho _ { k } ^ { q } < \infty$ . + +With these assumptions, the goal is to show that, so long as the initial point $p ^ { 1 }$ is sufficiently close to $p ^ { * }$ , then with high probability $p ^ { k }$ converges to $p ^ { * }$ . + +Proof strategy The initial strategy is to develop the following recursion, satisfied by SPS, that does not (yet) utilize local monotonicity or Lipschitz continuity: + +$$ +\begin{array} { r l } & { \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } \leq ( 1 + c _ { 1 } \alpha _ { k } ^ { 2 } ) \| p ^ { k } - p ^ { * } \| ^ { 2 } - c _ { 2 } \alpha _ { k } \rho _ { k } ( T _ { k } ^ { \prime } + l _ { k } + r _ { k } ) - c _ { 3 } \alpha _ { k } ( r _ { k } ^ { \prime } + q _ { k } ) } \\ & { \qquad + c _ { 1 } \alpha _ { k } ^ { 2 } \big ( \| e ^ { k } \| ^ { 2 } + \| \epsilon ^ { k } \| ^ { 2 } + c _ { 4 } \big ) + c _ { 5 } \alpha _ { k } q _ { k } ^ { \prime } } \end{array} +$$ + +for appropriate constants $c _ { 1 } \ldots c _ { 5 } \geq 0$ . In this inequality, we use + +$$ +\begin{array} { l } { \displaystyle T _ { k } ^ { \prime } \stackrel { \prime } { = } \frac { \tau } { \overline { { \rho } } } \displaystyle \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { \overline { { \rho } } \tau } \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } , } \\ { \displaystyle l _ { k } \stackrel { \prime } { = } \displaystyle \sum _ { i = 1 } ^ { n } \langle z ^ { * } - x _ { i } ^ { k } , w _ { i } ^ { * } - y _ { i } ^ { k } \rangle + \big \langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { i } ^ { * } - B ( x _ { n + 1 } ^ { k } ) \big \rangle , } \\ { \displaystyle r _ { k } \stackrel { \prime } { = } \big \langle k ^ { \ell } , B ( \tilde { x } ^ { k } ) - w _ { n + 1 } ^ { k } \big \rangle , } \\ { \displaystyle r _ { k } ^ { \prime } \stackrel { \prime } { = } \big \langle z ^ { k } - z ^ { * } , e ^ { k } \big \rangle , } \\ { \displaystyle q _ { k } \triangleq \big ( \rho _ { k } ^ { - 1 } - d / 2 \big ) \| \tilde { x } ^ { k } - z ^ { k } \| ^ { 2 } - \| \tilde { x } ^ { k } - z ^ { k } \| \| B ( \tilde { x } ^ { k } ) - B ( z ^ { k } ) \| } \\ { \displaystyle q _ { k } ^ { \prime } \stackrel { \prime } { = } \rho _ { k } \| \epsilon ^ { k } \| \| B x _ { n + 1 } ^ { k } - B \tilde { x } ^ { k } \| + \frac { 1 } { 2 d } \| B \tilde { x } _ { n + 1 } ^ { k } - B x _ { n + 1 } ^ { k } \| ^ { 2 } , } \end{array} +$$ + +where + +$$ +\tilde { x } ^ { k } \doteq z ^ { k } - \rho _ { k } \bigl ( B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \bigr ) \qquad d \doteq \frac { 1 - \overline { { \rho } } L } { 1 + \overline { { \rho } } / 2 } , +$$ + +with $L$ being the local Lipschitz constant of $B$ on $\mathbb { B } _ { r } \big ( z ^ { * } \big )$ . The iterate $\tilde { x } ^ { k }$ is the analog of the iterate $\tilde { X } _ { t + 1 / 2 }$ used in Hsieh et al. (2020). + +The recursion (79) is derived by once again starting from (13) and following the arguments leading to (35), but this time not taking conditional expectations. In particular, the upper bounds on $\| \nabla _ { z } \varphi _ { k } \| ^ { 2 }$ and $\| \nabla _ { w _ { i } } \varphi _ { k } \| ^ { 2 }$ contribute the terms $c _ { 1 } \alpha _ { k } ^ { 2 } ( \| \bar { e } ^ { k } \| ^ { 2 } + \| \epsilon ^ { k } \| ^ { 2 } + c _ { 4 } )$ and $c _ { 1 } \alpha _ { k } ^ { 2 } \| p ^ { k } - p ^ { * } \| ^ { 2 }$ . For $i \in 1 . . n$ , the ${ } ^ { \mathfrak { e } } \varphi _ { i , k }$ -gap" term, $\varphi _ { i , k } ( p ^ { k } ) - \varphi _ { i , k } ( p ^ { * } )$ , is dealt with in a similar manner to Section C.5, but this time not using monotonicity as in (36). This contributes $T _ { k } ^ { \prime }$ and the first term in $l _ { k }$ . Finally, as we sketch below, the ${ ^ { \circ } } \varphi _ { n + 1 , k }$ -gap" term contributes $r _ { k } , r _ { k } ^ { \prime } , q _ { k } , q _ { k } ^ { \prime }$ , and the last term in $l _ { k }$ . + +For the $\cdot \circ _ { n + 1 , k } \cdot \mathbf { g } \mathbf { a p } ^ { , , }$ , that is, $\varphi _ { n + 1 , k } ( p ^ { k } ) - \varphi _ { n + 1 , k } ( p ^ { * } )$ , we have to depart from the analysis in Section C.6 and use an alternative argument involving $\tilde { x } ^ { k }$ . We now provide some details of this argument: in the following, we use $B z$ as shorthand for $B ( z )$ for any vector $z \in \mathbb { R } ^ { d }$ . We begin the analysis with + +$$ +\begin{array} { r l } & { \varphi _ { n + 1 , k } ( p ^ { k } ) = \langle z ^ { k } - x _ { n + 1 } ^ { k } , y _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \rangle } \\ & { \qquad = \langle z ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \rangle + \underbrace { \langle z ^ { k } - x _ { n + 1 } ^ { k } , e ^ { k } \rangle } _ { \mathrm { p a r t } \mathrm { o f } r _ { k } ^ { \prime } } . } \end{array} +$$ + +The final term will combine with the term $\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \rangle$ coming from + +$$ +\begin{array} { r l } & { - \varphi _ { n + 1 , k } ( p ^ { * } ) = \langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - y _ { n + 1 } ^ { k } \rangle } \\ & { \qquad = \langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - B x _ { n + 1 } ^ { k } \rangle + \langle x _ { n + 1 } ^ { k } - z ^ { * } , e _ { n + 1 } ^ { k } \rangle } \end{array} +$$ + +to yield $r _ { k } ^ { \prime }$ above. Equation (82) also yields the second term in $l _ { k }$ . Using that $\tilde { x } ^ { k } - x _ { n + 1 } ^ { k } = \rho _ { k } \epsilon _ { k }$ , we rewrite the first term in (81) as + +$$ +\begin{array} { r l } & { \bigl \langle z ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle = \bigl \langle z ^ { k } - \tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle + \bigl \langle \tilde { x } ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle } \\ & { \qquad = \bigl \langle z ^ { k } - \tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle + \rho _ { k } \bigl \langle \epsilon ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle \qquad } \\ & { \qquad = \bigl \langle z ^ { k } - \tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle + \rho _ { k } \bigl \langle \epsilon ^ { k } , B x _ { n + 1 } ^ { k } - B \tilde { x } ^ { k } \bigr \rangle \qquad ( 8 } \\ & { \qquad + \rho _ { k } \underbrace { \bigl \langle \epsilon ^ { k } , B \tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle } _ { r _ { k } } . } \end{array} +$$ + +Next, the terms in (83) admit the lower bound + +$$ +\begin{array} { r l } & { \langle z ^ { k } - { \tilde { x } } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \rangle + \rho _ { k } \langle \epsilon ^ { k } , B x _ { n + 1 } ^ { k } - B { \tilde { x } } ^ { k } \rangle } \\ & { \qquad \geq \langle z ^ { k } - { \tilde { x } } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \rangle - \underbrace { \rho _ { k } \| \epsilon ^ { k } \| \| B x _ { n + 1 } ^ { k } - B { \tilde { x } } ^ { k } \| } _ { \mathrm { ~ } } . } \end{array} +$$ + +Considering the first term on right-hand side of this bound, we also have + +$$ +\begin{array} { r l } { { \langle z ^ { k } - \tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \rangle = \langle z ^ { k } - \tilde { x } ^ { k } , B \tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \rangle + \langle z ^ { k } - \tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - B \tilde { x } ^ { k } \rangle } } \\ & { \geq \langle z ^ { k } - \tilde { x } ^ { k } , B \tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \rangle - \displaystyle \frac { d } { 2 } \| z ^ { k } - \tilde { x } ^ { k } \| ^ { 2 } - \displaystyle \frac { 1 } { \underline { { 2 d } } } \| B \tilde { x } ^ { k } - B x _ { n + 1 } ^ { k } \| ^ { 2 } } \end{array} +$$ + +for any $d > 0$ , using Young’s inequality. Finally, for the first two terms of the right-hand side of the above relation, we may write + +$$ +\begin{array} { r l } { { \langle z ^ { k } - \tilde { x } ^ { k } , B \tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \rangle - \frac { d } { 2 } \| z ^ { k } - \tilde { x } ^ { k } \| ^ { 2 } } } \\ & { = \langle z ^ { k } - \tilde { x } ^ { k } , B z ^ { k } - w _ { n + 1 } ^ { k } \rangle + \langle z ^ { k } - \tilde { x } ^ { k } , B \tilde { x } ^ { k } - B z ^ { k } \rangle - \frac { d } { 2 } \| z ^ { k } - \tilde { x } ^ { k } \| ^ { 2 } } \\ & { \quad \quad \geq \underbrace { ( \rho _ { k } ^ { - 1 } - d / 2 ) \| z ^ { k } - \tilde { x } ^ { k } \| ^ { 2 } - \| z ^ { k } - \tilde { x } ^ { k } \| \| B \tilde { x } ^ { k } - B z ^ { k } \| } _ { q _ { k } } , } \end{array} +$$ + +where in the final inequality we use the Cauchy-Schwartz inequality and substitute $B z ^ { k } - w _ { n + 1 } ^ { k } =$ $\rho _ { k } ^ { - 1 } ( z ^ { k } - \tilde { x } ^ { k } )$ , from the definition of $\tilde { x } ^ { k }$ in (80). We have now accounted for all the terms appearing in (79). + +The recursion (79) is analogous to equation (F.7) on page 24 of Hsieh et al. (2020) and provides the starting point for the local convergence analysis. The next step would be to derive an analog of Theorem F.1. of Hsieh et al. (2020) using (79). The following translation to the notation of Theorem F.1. could be used (note that Hsieh et al. (2020) uses $t$ for iteration counter): + +$$ +\begin{array} { r l } & { D _ { k } = \| p ^ { k } - p ^ { * } \| ^ { 2 } , } \\ & { \zeta _ { k } = c _ { 2 } \alpha _ { k } \rho _ { k } ( T _ { k } ^ { \prime } + l _ { k } ) + c _ { 3 } \alpha _ { k } q _ { k } , } \\ & { \xi _ { k } = - c _ { 2 } \alpha _ { k } \rho _ { k } r _ { k } - c _ { 3 } \alpha _ { k } r _ { k } ^ { \prime } , } \\ & { \chi _ { k } = c _ { 1 } \alpha _ { k } ^ { 2 } \big ( \| e ^ { k } \| ^ { 2 } + \| \epsilon ^ { k } \| ^ { 2 } + \| p ^ { k } - p ^ { * } \| ^ { 2 } + c _ { 4 } \big ) + c _ { 5 } \alpha _ { k } q _ { k } ^ { \prime } , } \end{array} +$$ + +and the event $E _ { \infty } ^ { \rho }$ is translated to + +$$ +\begin{array} { r } { E _ { \infty } ^ { \rho } = \left\{ x _ { n + 1 } ^ { k } \in \mathbb { B } _ { r } ( z ^ { * } ) , \tilde { x } ^ { k } \in \mathbb { B } _ { \rho r } ( z ^ { * } ) , p ^ { k } \in \mathbb { B } _ { \rho r } ( p ^ { * } ) \mathrm { ~ f o r ~ a l l ~ } k = 1 , 2 , \ldots \right\} . } \end{array} +$$ + +An analog of Theorem 2 of Hsieh et al. (2020) could then be developed based on this result. \ No newline at end of file diff --git a/md/test/TrloAXEJ2B/TrloAXEJ2B.md b/md/test/TrloAXEJ2B/TrloAXEJ2B.md new file mode 100644 index 0000000000000000000000000000000000000000..87d00bee4aac5a17ed7d3b8679e5d36e567fa964 --- /dev/null +++ b/md/test/TrloAXEJ2B/TrloAXEJ2B.md @@ -0,0 +1,301 @@ +# LoraHub: Efficient Cross-Task Generalization via Dynamic LoRA Composition + +Chengsong Huang $\mathbf { \Delta } \mathbf { \dag \ S \mathrm { \ s \mathrm { \ s } } }$ , Qian Liuβ€ βˆ—, Bill Yuchen $\mathbf { L i n } ^ { \bigotimes * }$ , Tianyu Pang†, Chao ${ { \mathbf { D } } { { \mathbf { u } } } ^ { \dag } }$ , Min Lin† †Sea AI Lab, Singapore Β§Washington University in St. Louis, MO, USA β™’Allen Institute for AI, Seattle, WA, USA + +# Abstract + +Low-rank adaptations (LoRA) are often employed to fine-tune large language models (LLMs) for new tasks. This paper investigates LoRA composability for cross-task generalization and introduces LoraHub, a simple framework devised for the purposive assembly of LoRA modules trained on diverse given tasks, with the objective of achieving adaptable performance on unseen tasks. With just a few examples from a new task, LoraHub can fluidly combine multiple LoRA modules, eliminating the need for human expertise and assumptions. Notably, the composition requires neither additional model parameters nor gradients. Empirical results on the Big-Bench Hard benchmark suggest that LoraHub, while not surpassing the performance of in-context learning, offers a notable performanceefficiency trade-off in few-shot scenarios by employing a significantly reduced number of tokens per example during inference. Notably, LoraHub establishes a better upper bound compared to in-context learning when paired with different demonstration examples, demonstrating its potential for future development. Our vision is to establish a platform for LoRA modules, empowering users to share their trained LoRA modules. This collaborative approach facilitates the seamless application of LoRA modules to novel tasks, contributing to an adaptive ecosystem. Our code is available at github.com/sail-sg/lorahub, and all the pre-trained LoRA modules are released at huggingface.co/lorahub. + +# 1 Introduction + +![](images/95e2ddec39022b4d6452e07a5b9cddb6f0b9d45a3c19a11a7273387b6b7e1205.jpg) +Figure 1: The illustration of zero-shot learning, few-shot in-context learning and few-shot LoraHub learning (ours). Note that the Compose procedure is conducted per task rather than per example. Our method achieves similar inference throughput as zero-shot learning, yet approaches the performance of in-context learning on the BIG-Bench Hard (BBH) benchmark. + +Recent progress in natural language processing (NLP) has been largely fueled by large language models (LLMs) such as OpenAI GPT (Brown et al., 2020), FLAN-T5 (Chung et al., 2022), and LLaMA (Touvron et al., 2023). These models demonstrate top-tier performance across different NLP tasks. However, their enormous parameter size presents issues regarding computational efficiency and memory usage during fine-tuning. To mitigate these challenges, Low-Rank Adaptation (LoRA) (Hu et al., 2022) has emerged as a parameterefficient fine-tuning technique (Lester et al., 2021; He et al., 2022; An et al., 2022). By reducing memory demands and computational costs, it speeds up LLM training. LoRA achieves this by freezing the base model parameters (that is, an LLM) and training a lightweight module, which regularly delivers high performance on target tasks. + +While prior research has targeted the efficiency enhancement facilitated by LoRA, there is a dearth of investigation into the inherent modularity and composability of LoRA modules. Typically, previous methods train LoRA modules to specialize in individual tasks. Yet, the intrinsic modularity of LoRA modules presents an intriguing research question: Would it be possible to compose LoRA modules to generalize to novel tasks in an efficient manner? In this paper, we tap into the potential of LoRA modularity for broad task generalization, going beyond single-task training to meticulously compose LoRA modules for malleable performance on unknown tasks. Crucially, our method enables an automatic assembling of LoRA modules, eliminating dependency on manual design or human expertise. With just a handful of examples from new tasks (e.g., 5), our approach can autonomously compose compatible LoRA modules without human intrusion. We do not make assumptions about which LoRA modules trained on particular tasks can be combined, allowing for flexibility in amalgamating any modules as long as they conform to the specification (e.g., using the same LLM). As our approach leverages several available LoRA modules, we refer to it as LoraHub and denote our learning method as LoraHub learning. + +To validate the efficiency of our proposed methods, we test our approaches using the widely recognized BBH benchmark with FLAN-T5 (Chung et al., 2022) serving as the base LLM. The results underline the effectiveness of the LoRA module composition for unfamiliar tasks through a few-shot LoraHub learning process. Notably, our methodology achieves an average performance that closely matches that of few-shot in-context learning, while demonstrating a superior upper bound, particularly when using different demonstration examples. Additionally, our method substantially reduces the inference cost compared to in-context learning, eliminating the requirement of examples as inputs for the LLM. With fewer tokens per example during inference, our method significantly reduces computational overhead and enables faster responses. It aligns with a broader research trend, where recent studies are actively exploring approaches to reduce the number of input tokens (Zhou et al., 2023; Ge et al., 2023; Chevalier et al., 2023; Jiang et al., 2023a; Li et al., 2023; Jiang et al., 2023b). Our learning procedure is also notable for its computational efficiency, using a gradient-free approach to obtain the coefficients of LoRA modules and requiring only a handful of inference steps for unseen tasks. For example, when applied to a new task in BBH, our methodology can deliver superior performance in less than a minute using a single A100 card. + +Importantly, LoraHub learning can feasibly be accomplished with a CPU-only machine, requiring proficiency solely for processing LLM inference. In our pursuit to democratize artificial intelligence, we are taking an important step forward by envisioning the establishment of the LoRA platform. The platform would serve as a marketplace where users can seamlessly share and access well-trained LoRA modules for diverse applications. LoRA providers have the flexibility to freely share or sell their modules on the platform without compromising data privacy. Users, equipped with CPU capability, can leverage trained LoRA modules contributed by others through automated distribution and composition algorithms. This platform not only cultivates a repository of reusable LoRA modules with a myriad of capabilities but also sets the stage for cooperative AI development. It empowers the community to collectively enrich the LLM’s capabilities through dynamic LoRA composition. + +# 2 Problem Statement + +Large Language Models We assume that a large language model $M _ { \theta }$ is based on Transformer architecture (Vaswani et al., 2017) and has been pre-trained on a large-scale text corpus. The model architecture can be either encoder-decoder (Raffel et al., 2020) or decoderonly (Brown et al., 2020). Also, $M _ { \theta }$ could also have been fine-tuned with a large set of instruction-following datasets such as Flan Colleciton (Longpre et al., 2023) and PromptSource (Bach et al., 2022). + +Cross-Task Generalization In real-world situations, users often desire an LLM to perform novel tasks that it has not encountered before β€” an ability widely known as cross-task generalization. Generally, cross-task generalization falls into two categories: zero-shot learning (Mishra et al., 2022; Sanh et al., 2022; Chung et al., 2022; OpenAI, 2022; Lin et al., 2022), which necessitates no labeled examples of the new task, and few-shot learning (Ye et al., 2021; Min et al., 2022) which demands a handful of labeled examples. Assume we have $N$ distinct upstream tasks that the LLM has been trained on, denoted as $\mathbb { T } = \{ \mathcal { T } _ { 1 } , . . . , \mathcal { T } _ { N } \}$ . Our paper primarily focuses on the latter category, where for an unseen target task $\mathcal { T } ^ { \prime } \notin \mathbb { T } ,$ , users can only provide a limited set of labeled examples, Q. Our aim is to modify the model $M _ { \theta }$ to adapt it to task $\tau ^ { \prime }$ using only $Q$ . An intuitive method would be to fine-tune the weights of ${ \mathrm { { \dot { M } } } } _ { \theta }$ based on $Q ,$ yielding an updated model $M _ { \phi }$ with enhanced performance on $\tau ^ { \prime }$ . However, this approach is inefficient, time-consuming, and unstable when $Q$ is small. + +LoRA Tuning LoRA is a parameter-efficient fine-tuning method (Hu et al., 2022), facilitates the adaptation of LLMs using lightweight modules, eliminating the need for finetuning the entire weights. LoRA tuning involves keeping the original model weights frozen while introducing trainable low-rank decomposition matrices as adapter modules into each layer of the model. Compared to the base LLM, this module possesses significantly fewer trainable parameters, paving the way for rapid adaptation using minimal examples. As such, LoRA tuning presents a resource-efficient technique to quickly adapt LLMs for new tasks with restricted training data. However, traditional LoRA methods primarily concentrate on training and testing within the same tasks (Gema et al., 2023), rather than venturing into few-shot cross-task generalization. + +# 3 Methodology + +In this section, we provide an overview of our proposed method. We then explain the LoRA tuning procedure in detail. Last, we introduce the procedure of our LoraHub learning, which consists of the COMPOSE stage and the ADAPT stage. + +# 3.1 Method Overview + +As depicted in Figure 2, we initially train LoRA modules on a variety of upstream tasks. Specifically, for $N$ distinct upstream tasks, we separately train $N$ LoRA modules, each represented as $m _ { i }$ for task $\mathscr { T } _ { i } \in \mathbf { \hat { T } }$ . Subsequently, for a new task $\mathcal { T } ^ { \prime } \notin \mathbb { T } ,$ , such as Boolean Expressions represented in Figure 2, its examples $Q$ are utilized to steer the LoraHub learning process. The LoraHub learning encapsulates two main phases: the COMPOSE phase and the ADAPT phase. In the COMPOSE phase, all available LoRA modules are combined into a single integrated module $\hat { m } _ { - }$ , using $\left\{ w _ { 1 } , w _ { 2 } , \dots , w _ { N } \right\}$ as coefficients. Each $w _ { i }$ is a scalar value that can take on positive or negative values, and the combination can be done in different ways. During the ADAPT phase, the combined LoRA module $\hat { m }$ is amalgamated with the LLM $M _ { \theta }$ , and its performance on few-shot examples from the new task $\mathbf { \breve { { \mathbf { \nabla } } } } _ { \mathbf { \mathbf { \mathbf { \mathbf { \mathcal { T } } } } } ^ { \prime } }$ is assessed. A gradient-free algorithm is subsequently deployed to update $w _ { . }$ , enhancing mΛ† ’s performance (e.g., loss) on the few-shot examples $Q$ . Finally, after iterating through $K$ steps, the optimum performing LoRA module is applied to the LLM $M _ { \theta }$ , yielding the final LLM $M _ { \phi } = \mathrm { L o R A } ( \hat { M } _ { \theta } , \hat { m } )$ . This serves as an effectively adjusted model for the unseen task $\tau ^ { \prime }$ , which will then be deployed and not updated anymore. + +# 3.2 LoRA tuning on upstream tasks + +LoRA effectively minimizes the number of trainable parameters through the process of decomposing the attention weight matrix update of the LLM, denoted as $W _ { 0 } \in \bar { R } ^ { d \times k } ,$ , into low-rank matrices. In more specific terms, LoRA exhibits the updated weight matrix in the form $W _ { 0 } + \delta W = W _ { 0 } + A B ,$ where $A \in \mathbb { R } ^ { d \times r }$ and $B \in \mathbb { R } ^ { r \times k }$ are trainable low-rank matrices with rank $r ,$ a dimension significantly smaller than those of $d$ and $k$ . In this context, the product $A B$ defines the LoRA module $m ,$ , as previously elaborated. By leveraging the low-rank decomposition, LoRA substantially reduces the number of trainable parameters needed to adapt the weights of LLMs duriing fine-tuning. + +![](images/fdc28d30d1864590ed2196198df1e30168cf83fc2b25e930c617edf738bdbc3b.jpg) +Figure 2: Our method encompasses two stages: the COMPOSE stage and the ADAPT stage. During the COMPOSE stage, existing LoRA modules are integrated into one unified module, employing a set of coefficients, denoted as $w$ . In the ADAPT stage, the combined LoRA module is evaluated on a few examples from the unseen task. Subsequently, a gradient-free algorithm is applied to refine $w$ . After executing $K$ iterations, a highly adapted combined LoRA module is produced, which can be incorporated with the LLM to perform the intended task. + +# 3.3 COMPOSE: Element-wise composition of LoRA modules + +Within the COMPOSE stage, we implement an element-wise method to combine LoRA modules. This process integrates the corresponding parameters of the LoRA modules, requiring the modules being combined to have the same rank $r$ to properly align the structures. Given that $m _ { i } = A _ { i } B _ { i } ,$ the combined LoRA module $\hat { m }$ can be obtained by: + +$$ +\hat { m } = ( w _ { 1 } A _ { 1 } + w _ { 2 } A _ { 2 } + \cdot \cdot \cdot + w _ { N } A _ { N } ) ( w _ { 1 } B _ { 1 } + w _ { 2 } B _ { 2 } + \cdot \cdot \cdot + w _ { N } B _ { N } ) . +$$ + +Notbly, as we show in Sec. 5, combining too many LoRA modules at once can expand the search space exponentially, which may destabilize the LoraHub learning process and prevent optimal performance. To mitigate this, we employ random selection to prune the candidate space, and more advanced pre-filtering algorithms could be explored in the future. + +# 3.4 ADAPT: Weight optimization via gradient-free methods + +During the ADAPT stage, our goal is to modify the coefficients $w$ to boost the model’s performace on the examples from an unseen task. One might think of using gradient descent to optimize $w ,$ following standard backpropagation methods. However, this approach demands constructing a hypernetwork for all LoRA modules, similar to differentiable architecture search methods (Zhang et al., 2019). Constructing these hypernetworks demands for substantial GPU memory and time, posing a challenge. Given that $w$ consists of a relatively small number of parameters, we opted for gradient-free methods for optimization instead of gradient descent. + +Inspired by previous work (Sun et al., 2022), we utilize a black-box optimization technique to find the optimal $w$ . The optimization process is steered by the cross-entropy loss, setting the goal to locate the best set $\left\{ w _ { 1 } , w _ { 2 } , \ldots , w _ { N } \right\}$ that reduces the loss $L$ on the few-shot examples $Q$ . Furthermore, we incorporate L1 regularization to penalize the sum of the absolute values of $w _ { . }$ , helping to prevent obtaining extreme values. Consequently, the final objective of LoraHub is to minimize $\begin{array} { r } { L + \alpha \cdot \sum _ { i = 1 } ^ { N } | \dot { w } _ { i } | , } \end{array}$ , where $\alpha$ serves as a hyperparameter. + +In terms of the gradient-free method, we leverage Shiwa, a combinatorial optimization approach (Liu et al., 2020). Shiwa offers a variety of algorithms and chooses the most suitable optimization algorithm for different circumstances. In most of the forthcoming experimental setups, we primarily employ the Covariance Matrix Adaptive Evolution Strategies (CMA-ES) (Hansen & Ostermeier, 1996). CMA-ES, as a stochastic and population-based optimization algorithm, offers versatility in addressing a broad spectrum of optimization challenges. It dynamically adjusts a search distribution, which is defined by a covariance matrix. During each iteration, CMA-ES systematically updates both the mean and covariance of this distribution to optimize the target function. In our application, we employ this algorithm to mold the search space for w. Ultimately, we use it to identify the optimal $w$ by evaluating their performance on the few-shot examples from an unseen task. + +# 4 Experimental Results + +In this section, we provide details on our main experiments. First, we give an overview of the experimental setup and implementation details. Next, we present our findings along with the results. + +# 4.1 Experimental setup + +Large Language Model In our main experiments, we employ FLAN-T5 (Chung et al., 2022), particularly FLAN-T5-large, as the base LLM. The model has shown impressive abilities to perform zero-shot and few-shot learning. + +Candidate LoRA Modules Our methodology requires a compendium of LoRA modules trained on preceding tasks. For parity with FLAN, we adopt the tasks utilized to instruct FLAN-T5, thereby incorporating nearly 200 distinct tasks and their corresponding instructions. Following this, we trained several LoRA modules as potential candidates. During each experimental sequence, we randomly select 20 LoRA modules from them as the candidate for our LoraHub learning. + +Dataset and evaluation Our method is evaluated using the Big-Bench Hard (BBH) benchmark, a well-established standard that consists of multiple-choice questions from a variety of domains. The benchmark consists of 27 different tasks, which are regarded to be challenging for language models. For all tasks, we employ the exact match (EM) as our evaluation metric. + +Baseline Setup To enhance the demonstration of our method’s performance, we expanded our comparisons beyond the zero-shot and in-context learning settings. We specifically chose three representative gradient-based methods for comparison: full fine-tuning (FFT), LoRA tuning (LoRA) (Hu et al., 2022), and IA3 fine-tuning (IA3) (Liu et al., 2022). For all gradient-based methods, for a fair comparsion, we train for 40 epochs on the same three runs of 5 examples employed in our methods. In the case of FFT, a learning rate of 3e-5 is employed, whereas for IA3 and LoRA, we adopt a learning rate of 2e-4. We report the performance of each method on the test set at the end of training (averaged over three runs) without any model selection to avoid potential selection bias. + +# 4.2 Main results + +As shown in Table 1, our experimental results demonstarte the superior efficacy of our method in comparison to zero-shot learning while closely resembling the performance of in-context learning (ICL) in few-shot scenarios. This observation is derived from an average performance of three runs, each leveraging different few-shot examples. Importantly, our model utilizes an equivalent number of tokens as the zero-shot method, notably fewer than the count used by ICL. Although occasional performance fluctuations, our method consistently outperforms zero-shot learning in most tasks. In the era of LLMs, the input length is directly proportional to the inference cost, and thus LoraHub’s ability to economize on input tokens while approaching the peak performance grows increasingly significant. Moreover, as shown in Appendix Table 4, the upper bound performance of our method across these runs can surpass ICL on 18 tasks, demonstrating its potential for future development. + +Table 1: Experimental results of zero-shot learning (Zero), few-shot in-context learning (ICL), IA3 fine-tuning (IA3), LoRA tuning (LoRA), full fine-tuning (FFT) and our proposed few-shot LoraHub learning (LoraHub) on the BBH benchmark with FLAN-T5-large as the base LLM. We denote algorithmic tasks with the superscript $\ S$ following previous work (Wu et al., 2023b). Note that we employ three runs, each leveraging different 5-shot examples per task, as demonstrations for all few-shot methods. The average performance of all methods is reported below, and the best performance of each few-shot method can be found in the Appendix B. + +
TaskZeroICLavgIA3avgLoRAavgFFTavgLoraHubavg
Boolean Expressions54.059.656.256.062.255.5
Causal Judgement57.559.460.255.657.554.3
Date Understanding15.320.420.035.859.332.9
Disambiguation0.069.10.068.068.245.2
Dyck Languages1.30.94.222.219.51.0
Formal Fallacies51.355.351.553.654.052.8
Geometric Shapes6.719.614.72431.17.4
Hyperbaton6.771.849.355.377.362.8
Logical DeductionS (five objects)21.339.132.740.042.236.1
Logical DeductionS (seven objects)12.740.733.837.344.936.8
Logical DeductionS (three objects)0.051.68.553.652.945.7
Movie Recommendation62.755.861.851.566.055.3
Multistep Arithmetic0.70.70.70.20.00.4
Navigate47.345.346.248.048.047.1
Object Counting34.732.435.138.735.633.7
Penguins in a Table43.541.345.036.231.935.9
Reasoning about Colored Objects32.040.240.739.637.640.0
Ruin Names23.319.324.437.861.324.4
Salient Translation Error Detection37.347.337.116.016.236.0
Snarks50.054.253.955.666.756.9
Sports Understanding56.054.755.156.554.056.7
Temporal Sequences16.725.118.225.137.818.2
Tracking Shuffled ObjectsS (five objects)12.012.012.013.816.912.3
Tracking Shuffled Objects (seven objects)6.76.76.710.09.87.7
Tracking Shuffled ObjectsS (three objects)24.731.130.730.932.029.2
Web of Lies54.053.854.252.748.250.1
Word Sorting1.30.51.34.94.91.1
Avg Performance Per Task27.037.331.637.742.134.7
Avg Tokens Per Example111.6597.8111.6111.6111.6111.6
Gradient-based TrainingNoNoYesYesYesNo
+ +Even when compared to certain gradient-based optimization methods, our approach consistently demonstrates competitive performance. For example, as depicted in Table 1, our method exhibits a notable improvement of $3 . 1 \%$ on average in contrast to the promising IA3 method. Nevertheless, we acknowledge that our approach still falls behind LoRA tuning and full fine-tuning, especially in tasks that exhibit significant deviation from the upstream task. Taking Dyck Languages as an example, both LoraHub and ICL achieve only an average performance of nearly $1 . 0 \%$ on these tasks, while LoRA and FFT methods showcase impressive results with only 5 examples. + +# 4.3 Discussion + +LoraHub addresses the challenge of reducing inference costs by eliminating the need for processing additional tokens, resulting in a noticeable reduction in overall inference expenses. However, it introduces an inherent cost during the ADAPT stage, necessitating extra inference steps, such as the 40 steps employed in our experiments. This introduces a trade-off between choosing the ICL approach and LoraHub, with the decision typically hinging on the nature of the situation. + +For one-time ad-hoc tasks, the ICL approach should be more pragmatic due to LoraHub’s additional inference step costs. In such scenarios, where immediate, single-use solutions are preferred, the simplicity and efficiency of ICL might outweigh the benefits of potential savings offered by LoraHub. Conversely, for recurring or similar tasks, LoraHub emerges as a compelling option. Despite the added inference step cost, LoraHub’s ability to efficiently handle repetitive tasks, often occurring thousands of times, while concurrently reducing overall expenses, positions it as a viable option in such kind of situations. + +In summary, our intention is not to replace $\scriptstyle { \mathrm { I C L } } ,$ but to present LoraHub as a complementary strategy with performance-efficiency trade-offs. Thus, we encourage a careful consideration of specific use cases and requirements when choosing between ICL and LoraHub, recognizing that the optimal solution may vary based on the nature and frequency of the tasks at hand. + +# 5 Experimental Analysis + +In this section, we thoroughly examine the characteristics of our proposed method and uncover several insightful findings. If not specified, we use FLAN-T5-large for all analysis. + +Does composing LoRA modules extend beyond the single module’s benefits? + +We acknowledge the investigation of cross-task performance in prior work (Jang et al., 2023), which delved into the capabilities of LoRA and proposed a novel method centered around LoRA module retrieval. In order to ensure a fair comparison, we conducted an experiment where we + +Table 2: The average performance of various methods across all tasks in the benchmark BBH. + +
LoRA RetrievalLoraHub avgLoraHub best
31.734.741.2
+ +designed a LoRA retrieval mechanism based on the loss derived from few-shot examples. Specifically, we ranked all LoRA module candidates according to this loss and evaluated the best candidate on the test set of the unseen task. As depicted in Table 2, the performance of LoRA retrieval is notably impressive, positioning it as a strong baseline. However, in comparison to LoraHub, the performance of LoRA retrieval is relatively less favorable + +How effective is the gradient-free optimization method? + +To assess the effectiveness of our gradient-free optimization method in correctly identifying the most suitable LoRA module for a given downstream task, we carried out an empirical study using the WikiTableQuestions (Pasupat & Liang, 2015) (WTQ) dataset. We strategically included a LoRA module that was specifically trained on the WTQ dataset into our pool of LoRA candidate modules, which originally stemmed from tasks exclusive to the Flan Collection. Subsequently, we designated WTQ as the targeted downstream task and computed the weights consistent with the methods employed in LoraHub learning. As an end result, the WTQ-specific LoRA module was awarded the highest weight, exemplifying the algorithm’s success in recognizing it as the most relevant. Moreover, the combined LoRA module demonstrated marginal superiority over the WTQ LoRA module. This underscores the claim that the gradient-free optimization method has the ability to proficiently select the optimal upstream LoRA module for an unseen task. + +Can LoraHub work well on non-instruction-tuning models? + +In previous investigations, we primarily focused on models with zero-shot capabilities that were trained with instruction tuning. However, for models like T5 without zero-shot abilities, where training has a larger effect on parameters, it was unclear if LoraHub could still effectively manage and improve them. Our experiments show that although these models perform worse than FLAN-T5, LoraHub learning can still enable them to effectively generlize to unseen tasks. See Appendix C for more details. + +Will the rank of LoRA modules impact the performance of LoraHub learning? + +The parameter rank plays a crucial role in the LoRA framework, directly influencing the number of trainable parameters utilized during LoRA tuning. This prompts an intriguing question: does the variation in rank values influence the outcomes observed within the LoraHub learning? Our analysis indicates that, for FLAN-T5, the choice of rank has minimal impact. However, for T5, it still exerts some influence. Empirical findings reveal that, in comparison to rank values of 4 or 64, a rank value of 16 consistently demonstrates superior performance across different runs, both in terms of average and optimal values. Additional results are available in Appendix C. + +Does more LoRA modules lead to better results? + +In our main experiments, we randomly selected 20 LoRA modules for LoraHub learning. Therefore, we conducted experiments to investigate the effect of using different numbers of LoRA modules. The results demonstrate that as we increased the number of LoRA modules, the variance in performance increased. However, the maximum achievable performance also improved. More analysis on the variance and the detailed results can be found in Appendix H. + +How much computational resource can be saved? + +We follow to the memory test settings from the LoRA-FA (Zhang et al., 2023b) study for an accurate benchmark. In this context, full fine-tuning required about 40GB of memory, whereas LoRA fine-tuning used around 34GB. Remarkably, LoraHub only utilized about 5GB of memory, illustrating its efficiency due to the inference-only mode, which eliminates the need for storing gradients and optimization states. + +# 6 Related work + +Model Merging Our method substantially draws on the concept of LoRA module composition, and thus, aligns with the significant thread of research in model merging. This research focus is broadly categorized based on the ultimate objectives of model merging. + +The first category focuses on merging entire models, and the goal is to combine individually trained models to approximate the performance benefits of model ensembling or multi-task learning. Prior works (Matena & Raffel, 2021; Jin et al., 2023; Yadav et al., 2023; Wu et al., 2023a) operated under the assumption of shared model architectures. For example, Matena & Raffel (2021) amalgamates models by approximating Gaussian posterior distributions garnered from Fisher information, while Yadav et al. (2023) merges models via resolving model interferences. Another approach is merging models with different architectures. For instance, Ainsworth et al. (2023) configures weights of different models prior to their merger. Following this objective, Stoica et al. (2023) merges models operating on varying tasks by identifying common features, without requiring additional training. Unlike these works, our work focuses on merging models for better cross-task generalization. + +The second category most closely aligns with our research, stemming from a shared motivation of module composition. Various scholars have made advances in this line of research: Kingetsu et al. (2021) decomposes and recomposes modules on the basis of their functionality; Ilharco et al. (2023) proposes modulating model behavior using task vectors; Lv et al. (2023) amalgamates parameter-efficient modules weighted according to task similarity; Zhang et al. (2023a) crafts modules by employing specific arithmetic operations; Sun et al. (2023) improves few-shot performance of unseen tasks by multi-task pre-training of prompts; Chronopoulou et al. (2023) averages adapter weights intended for transfer; Ponti et al. (2023) focuses on jointly learning adapters and a routing function that allocates skills to each task; and Muqeeth et al. (2023) concentrates on amalgamating experts in mixture of experts models; However, these methods generally necessitate multi-task training or human prior on module selection for the downstream task. In contrast, our method does not impose any special training requirements and simply employs vanilla LoRA tuning. Additionally, the module selection for downstream tasks is entirely data-driven without human prior knowledge. This design gives the advantage of easily adding new LoRA modules for reuse, allowing our method to flexibly scale up the number of LoRA module candidates in the future. + +Mixture of Experts The Mixture of Experts (MoE) is an ensemble method, often visualized as a collection of sub-modules, or β€œexperts”, each specializing in processing different types of input data. Each expert in this system is controlled by a unique gating network, activated based on the distinct nature of the input data. For every token in these input sequences, this network identifies and engages the most suitable experts to process the data. As a result, the performance is superior compared to relying on a single, generic model for all types of input. This technique has proven instrumental in numerous domains, such as natural language processing and computer vision (Jacobs et al., 1991; Shazeer et al., 2017; Du et al., 2022; Zhang et al., 2022; Wang et al., 2022; crumb, 2023). Our methodology displays similarities to MoE, wherein upstream-trained LoRA modules can be aligned with MoE’s expert design. A noteworthy distinguishing factor is that our approach mechanism does not require any specialized manipulation of LoRAs during training while facilitating dynamic LoRA module assembly at any scale, each pre-tuned to different tasks. In contrast, MoE mandates a predetermined count of experts during both the training and testing phases. Recent studies on the interrelation between MoE and instruction tuning have demonstrated that the simultaneous application of both approaches enhances the effectiveness of each individually (Shen et al., 2023). + +Cross-Task generalization Recent advancements like CrossFit (Ye et al., 2021), ExT5 (Aribandi et al., 2022), FLAN (Wei et al., 2022), T0 (Sanh et al., 2022), InstructGPT (Ouyang et al., 2022), and ReCross (Lin et al., 2022) have been striving to foster a vastly multi-task model’s generalization across different tasks, very much aligned with the objectives of our research. Among this cohort, the connections of CrossFit and ReCross with LoraHub are particularly noteworthy. The CrossFit framework (Ye et al., 2021) mandates a minimal number of labeled examples of the target task for few-shot fine-tuning. However, its limitation lies in the application of task names as hard prefixes in templates, posing challenges in the task’s generalization. On the other hand, while ReCross mitigates the need for labels in few-shot examples for retrieval, it necessitates a fine-tuning process using the retrieved data. This procedure appears time-consuming when compared to LoraHub’s approach. Through the deployment of few-shot labeled examples and a gradientfree optimization process, LoraHub facilitates an iterative update of weights to compose the LoRA modules. The resultant method is more efficient and cost-effective relative to previous work. Overall, LoraHub offers a more practical and viable solution to the optimization process. + +# 7 Conclusion + +In this work, we have introduced LoraHub, a strategic framework for composing LoRA modules trained on diverse tasks in order to achieve adaptable performance on new tasks. Our approach enables the fluid combination of multiple LoRA modules using just a few examples from a novel task, without requiring additional model parameters or human expertise. The empirical results on the BBH benchmark demonstrate that LoraHub can effectively match the performance of in-context learning in few-shot scenarios, removing the need for in-context examples during inference. Overall, our work shows the promise of strategic LoRA composability for rapidly adapting LLMs to diverse tasks. By fostering reuse and combination of LoRA modules, we can work towards more general and adaptable LLMs while minimizing training costs. + +# Reproducibility Statement + +The authors have made great efforts to ensure the reproducibility of the empirical results reported in this paper. Firstly, the experiment settings, evaluation metrics, and datasets were described in detail in Section 4.1. Secondly, the codes and script for reproduce the result will be opensource after accepted. Second, the source code implementing the proposed method and experiments will be made publicly available at upon acceptance of the paper. Third, pre-trained LoRA modules from this work along with their configuration files and weights will be shared. These allow reproduction without retraining the LoRA modules, enabling quick testing and verification. + +# References + +Samuel Ainsworth, Jonathan Hayase, and Siddhartha Srinivasa. Git re-basin: Merging models modulo permutation symmetries. In The Eleventh International Conference on Learning Representations, 2023. +Shengnan An, Yifei Li, Zeqi Lin, Qian Liu, Bei Chen, Qiang Fu, Weizhu Chen, Nanning Zheng, and Jian-Guang Lou. Input-tuning: Adapting unfamiliar inputs to frozen pretrained models. ArXiv preprint, 2022. +Vamsi Aribandi, Yi Tay, Tal Schuster, Jinfeng Rao, Huaixiu Steven Zheng, Sanket Vaibhav Mehta, Honglei Zhuang, Vinh Q. Tran, Dara Bahri, Jianmo Ni, Jai Prakash Gupta, Kai Hui, Sebastian Ruder, and Donald Metzler. Ext5: Towards extreme multi-task scaling for transfer learning. In Proc. of ICLR, 2022. +Stephen Bach, Victor Sanh, Zheng Xin Yong, Albert Webson, Colin Raffel, Nihal V. Nayak, Abheesht Sharma, Taewoon Kim, M Saiful Bari, Thibault Fevry, Zaid Alyafeai, Manan Dey, Andrea Santilli, Zhiqing Sun, Srulik Ben-david, Canwen Xu, Gunjan Chhablani, Han Wang, Jason Fries, Maged Al-shaibani, Shanya Sharma, Urmish Thakker, Khalid Almubarak, Xiangru Tang, Dragomir Radev, Mike Tian-jian Jiang, and Alexander Rush. PromptSource: An integrated development environment and repository for natural language prompts. In Proc. of ACL, 2022. +Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In Hugo Larochelle, Marc’Aurelio Ranzato, Raia Hadsell, MariaFlorina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. + +Alexis Chevalier, Alexander Wettig, Anirudh Ajith, and Danqi Chen. Adapting language models to compress contexts. CoRR, abs/2305.14788, 2023. doi: 10.48550/ARXIV.2305. 14788. URL https://doi.org/10.48550/arXiv.2305.14788. + +Alexandra Chronopoulou, Matthew Peters, Alexander Fraser, and Jesse Dodge. AdapterSoup: Weight averaging to improve generalization of pretrained language models. In Findings of the Association for Computational Linguistics: EACL 2023, 2023. + +Hyung Won Chung, Le Hou, S. Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Dasha Valter, Sharan Narang, Gaurav Mishra, Adams Wei Yu, Vincent Zhao, Yanping Huang, Andrew M. Dai, Hongkun Yu, Slav Petrov, Ed Huai hsin Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei. Scaling instruction-finetuned language models. ArXiv preprint, 2022. +crumb. Llama-2, mixutre of lora. https://crumbly.medium.com/ llama-2-molora-f5f909434711, 2023. +Nan Du, Yanping Huang, Andrew M. Dai, Simon Tong, Dmitry Lepikhin, Yuanzhong Xu, Maxim Krikun, Yanqi Zhou, Adams Wei Yu, Orhan Firat, Barret Zoph, Liam Fedus, Maarten P. Bosma, Zongwei Zhou, Tao Wang, Yu Emma Wang, Kellie Webster, Marie Pellat, Kevin Robinson, Kathleen S. Meier-Hellstern, Toju Duke, Lucas Dixon, Kun Zhang, Quoc V. Le, Yonghui Wu, Zhifeng Chen, and Claire Cui. Glam: Efficient scaling of language models with mixture-of-experts. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Β΄ International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, Proceedings of Machine Learning Research, 2022. +Tao Ge, Jing Hu, Xun Wang, Si-Qing Chen, and Furu Wei. In-context autoencoder for context compression in a large language model. CoRR, abs/2307.06945, 2023. doi: 10. 48550/ARXIV.2307.06945. URL https://doi.org/10.48550/arXiv.2307.06945. +Aryo Pradipta Gema, Luke Daines, Pasquale Minervini, and Beatrice Alex. Parameterefficient fine-tuning of llama for the clinical domain. ArXiv preprint, 2023. +Nikolaus Hansen and Andreas Ostermeier. Adapting arbitrary normal mutation distributions in evolution strategies: the covariance matrix adaptation. Proceedings of IEEE International Conference on Evolutionary Computation, 1996. +Junxian He, Chunting Zhou, Xuezhe Ma, Taylor Berg-Kirkpatrick, and Graham Neubig. Towards a unified view of parameter-efficient transfer learning. In Proc. of ICLR, 2022. +Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. In Proc. of ICLR, 2022. +Gabriel Ilharco, Marco Tulio Ribeiro, Mitchell Wortsman, Ludwig Schmidt, Hannaneh Hajishirzi, and Ali Farhadi. Editing models with task arithmetic. In The Eleventh International Conference on Learning Representations, 2023. +Robert A. Jacobs, Michael I. Jordan, Steven J. Nowlan, and Geoffrey E. Hinton. Adaptive mixtures of local experts. Neural Computation, 1991. +Joel Jang, Seungone Kim, Seonghyeon Ye, Doyoung Kim, Lajanugen Logeswaran, Moontae Lee, Kyungjae Lee, and Minjoon Seo. Exploring the benefits of training expert language models over instruction tuning. In International Conference on Machine Learning, 2023. URL https://api.semanticscholar.org/CorpusID:256627673. +Huiqiang Jiang, Qianhui Wu, Chin-Yew Lin, Yuqing Yang, and Lili Qiu. Llmlingua: Compressing prompts for accelerated inference of large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics, December 2023a. URL https://arxiv.org/abs/2310.05736. + +Huiqiang Jiang, Qianhui Wu, Xufang Luo, Dongsheng Li, Chin-Yew Lin, Yuqing Yang, and Lili Qiu. Longllmlingua: Accelerating and enhancing llms in long context scenarios via prompt compression. CoRR, abs/2310.06839, 2023b. doi: 10.48550/ARXIV.2310.06839. URL https://doi.org/10.48550/arXiv.2310.06839. + +Xisen Jin, Xiang Ren, Daniel Preotiuc-Pietro, and Pengxiang Cheng. Dataless knowledge fusion by merging weights of language models. In The Eleventh International Conference on Learning Representations, 2023. +Hiroaki Kingetsu, Kenichi Kobayashi, and Taiji Suzuki. Neural network module decomposition and recomposition. ArXiv preprint, 2021. +Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. In Proc. of EMNLP, 2021. +Yucheng Li, Bo Dong, Chenghua Lin, and Frank Guerin. Compressing context to enhance inference efficiency of large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics, December 2023. URL https://arxiv.org/abs/2310.06201. +Bill Yuchen Lin, Kangmin Tan, Chris Miller, Beiwen Tian, and Xiang Ren. Unsupervised cross-task generalization via retrieval augmentation. In NeurIPS, 2022. +Haokun Liu, Derek Tam, Mohammed Muqeeth, Jay Mohta, Tenghao Huang, Mohit Bansal, and Colin Raffel. Few-shot parameter-efficient fine-tuning is better and cheaper than incontext learning. ArXiv, abs/2205.05638, 2022. URL https://api.semanticscholar.org/ CorpusID:248693283. +Jialin Liu, A. Moreau, Mike Preuss, Baptiste Roziere, J \` erΒ΄ emy Rapin, Fabien Teytaud, and Β΄ Olivier Teytaud. Versatile black-box optimization. Proceedings of the 2020 Genetic and Evolutionary Computation Conference, 2020. +Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V. Le, Barret Zoph, Jason Wei, and Adam Roberts. The flan collection: Designing data and methods for effective instruction tuning, 2023. +Xingtai Lv, Ning Ding, Yujia Qin, Zhiyuan Liu, and Maosong Sun. Parameter-efficient weight ensembling facilitates task-level knowledge transfer. In Annual Meeting of the Association for Computational Linguistics, 2023. +Sourab Mangrulkar, Sylvain Gugger, Lysandre Debut, Younes Belkada, and Sayak Paul. Peft: State-of-the-art parameter-efficient fine-tuning methods. https://github.com/ huggingface/peft, 2022. +Michael Matena and Colin Raffel. Merging models with fisher-weighted averaging. ArXiv preprint, 2021. +Sewon Min, Mike Lewis, Luke Zettlemoyer, and Hannaneh Hajishirzi. MetaICL: Learning to learn in context. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2022. +Swaroop Mishra, Daniel Khashabi, Chitta Baral, and Hannaneh Hajishirzi. Cross-task generalization via natural language crowdsourcing instructions. In Proc. of ACL, 2022. +Mohammed Muqeeth, Haokun Liu, and Colin Raffel. Soft merging of experts with adaptive routing. ArXiv preprint, 2023. +OpenAI. ChatGPT. 2022. URL https://openai.com/blog/chatgpt. +Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke E. Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Francis Christiano, Jan Leike, and Ryan J. Lowe. Training language models to follow instructions with human feedback. ArXiv preprint, 2022. + +Panupong Pasupat and Percy Liang. Compositional semantic parsing on semi-structured tables. In Proc. of ACL, 2015. + +Edoardo Maria Ponti, Alessandro Sordoni, Yoshua Bengio, and Siva Reddy. Combining parameter-efficient modules for task-level generalisation. In Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics, 2023. + +Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 2020. + +J. Rapin and O. Teytaud. Nevergrad - A gradient-free optimization platform. https:// GitHub.com/FacebookResearch/Nevergrad, 2018. + +Victor Sanh, Albert Webson, Colin Raffel, Stephen H. Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Arun Raja, Manan Dey, M Saiful Bari, Canwen Xu, Urmish Thakker, Shanya Sharma Sharma, Eliza Szczechla, Taewoon Kim, Gunjan Chhablani, Nihal V. Nayak, Debajyoti Datta, Jonathan Chang, Mike Tian-Jian Jiang, Han Wang, Matteo Manica, Sheng Shen, Zheng Xin Yong, Harshit Pandey, Rachel Bawden, Thomas Wang, Trishala Neeraj, Jos Rozen, Abheesht Sharma, Andrea Santilli, Thibault Fevry, Jason Alan Fries, Ryan Teehan, Teven Le Scao, Stella Biderman, Leo Gao, Β΄ Thomas Wolf, and Alexander M. Rush. Multitask prompted training enables zero-shot task generalization. In Proc. of ICLR, 2022. + +Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc V. Le, Geoffrey E. Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixtureof-experts layer. In Proc. of ICLR, 2017. + +Sheng Shen, Le Hou, Yanqi Zhou, Nan Du, Shayne Longpre, Jason Wei, Hyung Won Chung, Barret Zoph, William Fedus, Xinyun Chen, Tu Vu, Yuexin Wu, Wuyang Chen, Albert Webson, Yunxuan Li, Vincent Zhao, Hongkun Yu, Kurt Keutzer, Trevor Darrell, and Denny Zhou. Mixture-of-experts meets instruction tuning:a winning combination for large language models, 2023. + +George Stoica, Daniel Bolya, Jakob Bjorner, Taylor Hearn, and Judy Hoffman. Zipit! merging models from different tasks without training. arXiv, 2023. + +Tianxiang Sun, Yunfan Shao, Hong Qian, Xuanjing Huang, and Xipeng Qiu. Black-box tuning for language-model-as-a-service. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Β΄ International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, Proceedings of Machine Learning Research, 2022. + +Tianxiang Sun, Zhengfu He, Qin Zhu, Xipeng Qiu, and Xuanjing Huang. Multitask pretraining of modular prompt for Chinese few-shot learning. In Proc. of ACL, 2023. + +Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozi Β΄ ere, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien \` Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. Llama: Open and efficient foundation language models. ArXiv preprint, 2023. + +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, 2017. + +Yaqing Wang, Sahaj Agarwal, Subhabrata Mukherjee, Xiaodong Liu, Jing Gao, Ahmed Hassan Awadallah, and Jianfeng Gao. AdaMix: Mixture-of-adaptations for parameter-efficient model tuning. In Proc. of EMNLP, 2022. + +Jason Wei, Maarten Bosma, Vincent Y. Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M. Dai, and Quoc V. Le. Finetuned language models are zero-shot learners. In Proc. of ICLR, 2022. +Chengyue Wu, Teng Wang, Yixiao Ge, Zeyu Lu, Ruisong Zhou, Ying Shan, and Ping Luo. $\pi$ -tuning: Transferring multimodal foundation models with optimal multi-task interpolation. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 37713–37727. PMLR, 2023a. URL https://proceedings.mlr. press/v202/wu23t.html. +Shijie Wu, Ozan Irsoy, Steven Lu, Vadim Dabravolski, Mark Dredze, Sebastian Gehrmann, Prabhanjan Kambadur, David S. Rosenberg, and Gideon Mann. Bloomberggpt: A large language model for finance. CoRR, abs/2303.17564, 2023b. doi: 10.48550/arXiv.2303. 17564. URL https://doi.org/10.48550/arXiv.2303.17564. +Prateek Yadav, Derek Tam, Leshem Choshen, Colin Raffel, and Mohit Bansal. TIESmerging: Resolving interference when merging models. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id= xtaX3WyCj1. +Qinyuan Ye, Bill Yuchen Lin, and Xiang Ren. CrossFit: A few-shot learning challenge for cross-task generalization in NLP. In Proc. of EMNLP, 2021. +Chris Zhang, Mengye Ren, and Raquel Urtasun. Graph hypernetworks for neural architecture search. In Proc. of ICLR, 2019. +Fan Zhang, Duyu Tang, Yong Dai, Cong Zhou, Shuangzhi Wu, and Shuming Shi. Skillnetnlu: A sparsely activated model for general-purpose natural language understanding, 2022. +Jinghan Zhang, Shiqi Chen, Junteng Liu, and Junxian He. Composing parameter-efficient modules with arithmetic operations. ArXiv preprint, 2023a. +Longteng Zhang, Lin Zhang, Shaohuai Shi, Xiaowen Chu, and Bo Li. Lora-fa: Memory-efficient low-rank adaptation for large language models fine-tuning. ArXiv, abs/2308.03303, 2023b. URL https://api.semanticscholar.org/CorpusID:260683267. +Wangchunshu Zhou, Yuchen Eleanor Jiang, Ryan Cotterell, and Mrinmaya Sachan. Efficient prompting via dynamic in-context learning. CoRR, abs/2305.11170, 2023. doi: 10.48550/ARXIV.2305.11170. URL https://doi.org/10.48550/arXiv.2305.11170. + +Table 3: The top five beneficial LoRA modules for BBH tasks and their associated upstream tasks, the average weight values and the average performance on all BBH tasks. + +
RankDataset: TaskWeightPerfTask Description
1WIQA: Last Process0.7228.1 Identifying the last step of a given process.
2RACE: Is this the Right Answer0.6830.8Determining if given answer is correct.
3WIQA: First Process0.6328.1 Identifying the first step of a given process.
4AdversarialQA: BiDAF0.6125.1Aserialmode-in-the-eby an
5WebQuestions: What is the Answer0.5827.0 Asweringrqomesten based oninformation
+ +# A More Analysis + +Which LoRA modules are most effective for BBH tasks? + +We hypothesized that the amalgamation of LoRA modules could incorporate skills and insights from a variety of specific tasks. To evaluate this, we examined the extent of influence a single LoRA module had amongst all tasks from the BBH benchmark. We measured the impact of each isolated task by calculating the average absolute weight. The top five modules, presented in Table 3, were found to have substantial influence, as indicated by their maximum average weights, which suggested that they were notably more effective in cross-task transfer. Remarkably, a common feature among these top five modules was their association with tasks requiring reading comprehension and reasoning skillsβ€”attributes indicative of higher cognitive complexity. However, it is worth noting that none of the modules exhibited consistent improvement across all BBH tasks, as reflected in their average performance on all BBH tasks, which did not show a significant improvement compared to the original FLAN-T5-large, except for the Rank 2. The results underscore the advantages of composing diverse modules in LoraHub. + +How effective is the gradient-free optimization method? + +To assess the effectiveness of our gradient-free optimization method in correctly identifying the most suitable LoRA module for a given downstream task, we carried out an empirical study using the WikiTableQuestions (Pasupat & Liang, 2015) (WTQ) dataset. We strategically included a LoRA module that was specifically trained on the WTQ dataset into our pool of LoRA candidate modules, which originally stemmed from tasks exclusive to the Flan Collection. Subsequently, we designated WTQ as the targeted downstream task and computed the weights consistent with the methods employed in LoraHub learning. As an end result, the WTQ-specific LoRA module was awarded the highest weight, exemplifying the algorithm’s success in recognizing it as the most relevant. Moreover, the combined LoRA module demonstrated marginal superiority over the WTQ LoRA module. This underscores the claim that the gradient-free optimization method has the ability to proficiently select the optimal upstream LoRA module for an unseen task. + +# B Result of Best Results + +As shown in Table 4, compared to gradient-based parameter-efficient training methods like LoRA and IA3, our approach demonstrates superior performance in terms of best results over experimental runs. While it exhibits a noticeable lag behind the fully fine-tuning (FFT) method, which updates all parameters during training, this observation suggests that our proposed method has a promising upper limit. We anticipate that future research efforts can contribute to accelerating the optimization speed and further enhancing the efficacy of our approach. + +Table 4: Experimental results of several few-shot methods, including in-context learning (ICL), IA3 fine-tuning (IA3), LoRA tuning (LoRA), full fine-tuning (FFT) and our LoraHub learning (LoraHub) on the BBH benchmark with FLAN-T5-large as the base LLM. We denote algorithmic tasks with the superscript $\ S$ following previous work (Wu et al., 2023b). Note that we use 5 examples per task as the demonstration for all methods. The best (best) performance is reported as the maximum value obtained across three runs. + +
TaskICLbestIA3bestLoRAbestFFTbestLoraHubbest
Boolean Expressions62.758.060.765.360.7
Causal Judgement59.862.157.560.963.2
Date Understanding21.320.740.767.345.3
Disambiguation69.30.068.770.768.0
Dyck Languages2.04.725.333.32.7
Formal Fallacies59.352.056.756.059.3
Geometric Shapes20.015.328.739.318.7
Hyperbaton72.749.357.382.072.7
Logical DeductionS (five objects)39.332.741.343.340.0
Logical DeductionS (seven objects)42.034.042.746.046.0
LogicalDrectjoets)52.78.756.760.752.7
Movie Recommendation56.762.064.570.762.0
Multistep Arithmetic0.70.70.70.01.3
Navigate46.747.350.750.051.3
Object Counting34.735.342.038.036.7
Penguins in a Table43.545.741.337.047.8
Reasoning about Colored Objects41.341.340.738.744.7
Ruin Names20.725.342.066.028.7
Salient Translation Error Detection48.037.317.321.342.7
Snarks55.156.459.069.261.5
Sports Understanding56.755.358.758.762.7
Temporal Sequences26.718.731.348.721.3
Tracking Shuffled ObjectsS (five objects)12.012.016.020.016.7
Tracking Shuffled ObjectsS (seven objects)6.76.712.010.015.3
Tracking Shuffled ObjectsS (three objects)31.330.732.036.031.3
Web of Lies54.054.755.354.057.3
Word Sorting0.71.35.36.01.3
Best Performance (Average)38.432.140.946.241.2
+ +# C Result of non-instrcution-tuned models + +Table 5: Comparsion among different ranks for few-shot LoraHub learning with the backbone T5-large (Raffel et al., 2020) on the BBH benchmark. Note that the T5-large model achieved $0 . { \bar { 0 } } \%$ on all tasks under the zero-shot setting except Dyck Languages, where it scored $0 . 6 7 \%$ . + +
Task ↓ Rank β†’4best 4avg16avg16best64avg64best
Boolean Expressions52.13 57.3350.6758.0047.4758.00
Causal Judgement52.4155.1749.6654.0250.8054.02
Date Understanding0.402.0014.4029.334.5310.00
Disambiguation10.0031.3326.9342.001.734.67
Dyck Languages0.400.670.400.670.402.00
Formal Fallacies48.4054.0046.9351.3346.9350.00
Geometric Shapes0.000.006.5332.671.477.33
Hyperbaton30.1350.0039.07 57.3332.9348.00
Logical DeductionS (five objects)5.2014.678.8019.331.336.67
Logical DeductionS (seven objects)6.4017.339.3319.333.4716.00
Logical DeductionS14.4032.0021.7334.676.9315.33
(three objects) Movie Recommendation7.0718.677.8722.001.206.00
Multistep Arithmetic two0.000.000.000.000.000.00
Navigate49.6054.6752.2756.6749.8752.00
Object Counting7.2018.0016.0021.3313.7326.67
Penguins ina Table6.5213.0410.4317.390.432.17
Reasoning about Colored Objects6.2710.005.0716.670.532.67
Ruin Names7.7313.3313.2028.005.7315.33
Salient Translation Error Detection0.000.001.738.670.000.00
Snarks21.2842.3149.4960.2616.1538.46
Sports Understanding46.5358.6746.8058.6746.5358.67
Temporal Sequences3.0713.336.5326.672.4012.00
Tracking Shuffled ObjectsS5.2014.004.139.330.130.67
(five objects) Tracking Shuffled ObjectsS (seven objects)2.6710.002.8014.003.208.00
Tracking Shuffled ObjectsS3.7317.3316.2734.675.8726.67
(three objects) Web of Lies48.5354.00 57.33
Word Sorting0.400.6754.00 0.1356.00 0.6754.67 0.000.00
20.78
Average Performance per Task16.1424.1730.7314.7621.43
+ +# D Result of larger model + +Table 6: Experimental results of zero-shot learning (Zero) and our few-shot LoraHub learning (LoraHub) on the BBH benchmark with FLAN-T5-xl as the base LLM. Note that we use 5 examples per task as the demonstration for both ICL and LoraHub. The average $( a v g )$ performance of LoraHub is computed over 5 runs with different random seeds, while the best (best) performance is reported as the maximum value obtained across these runs. We can see the trend of the results are similar to FLAN-T5-large. + +
TaskZeroLoraHub avgLoraHub best
Boolean Expressions52.058.763.3
Causal Judgement62.153.859.8
Date Understanding38.037.638.0
Disambiguation Qa0.020.5 54.7
Dyck Languages1.30.92.0
Formal Fallacies56.056.056.0
Geometric Shapes8.717.528.0
Hyperbaton45.353.556.7
Logical DeductionS (five objects)1.342.748.7
Logical DeductionS (seven objects)8.744.350.0
Logical DeductionS (three objects)0.756.461.3
Movie Recommendation2.062.866.0
Multistep Arithmetic Two0.00.40.7
Navigate50.750.750.7
Object Counting39.340.748.0
Penguins In A Table17.440.945.7
Reasoning About Colored Objects46.747.350.7
Ruin Names18.035.644.7
Salient Translation Error Detection44.745.148.7
Snarks60.360.861.5
Sports Understanding56.751.353.3
Temporal Sequences21.321.522.0
Tracking Shuffled ObjectsS3.39.913.3
(five objects) Tracking Shuffled ObjectsS (seven objects)5.37.38.7
Tracking Shuffled ObjectsS7.321.731.3
(three objects) Web Of Lies54.747.148.7
Word Sorting1.31.52.0
Average Performance per Task25.836.541.3
+ +# E Improving the Robustness of LoraHub + +In order to enhance the robustness of LoraHub, we explored a straightforward approach in the selection of LoRA module candidates. Specifically, we first identified 20 LoRA module candidates with the lowest loss on the few-shot examples. Our findings indicate a slight improvement in overall performance after applying the pre-filtering startegy. Since the primary instability in our approach arises from the selection of LoRA candidates. This method involves choosing a fixed set of LoRA candidates to ensure the stability of our approach. + +Table 7: The experimental results of loss-based pre-filtering. + +
TaskLoraHubavgLoraHubfilter
Boolean Expressions55.560.00
Causal Judgement54.352.9
Date Understanding32.933.3
Disambiguation45.262.7
Dyck Languages1.00.0
Formal Fallacies52.854.0
Geometric Shapes7.44.0
Hyperbaton62.864.0
Logical DeductionS (five objects)36.137.3
Logical DeductionS (seven objects)36.822.0
Logical DeductionS (three objects)45.756.0
Movie Recommendation55.368.0
Multistep Arithmetic0.40.7
Navigate47.149.3
Object Counting33.738.7
Penguins in a Table35.937.0
Reasoning about Colored Objects40.033.3
Ruin Names24.422.0
Salient Translation Error Detection36.024.0
Snarks56.952.66
Sports Understanding56.758.0
Temporal Sequences18.227.3
Tracking Shuffled ObjectsS12.311.3
(five objects) Tracking Shuffled ObjectsS7.78.0
(seven objects) Tracking Shuffled ObjectsS29.232.7
(three objects) Web of Lies50.146.0
Word Sorting1.11.3
34.735.4
Avg Performance Per Task
+ +# F Performance on General Important Task + +In our research, we have identified specific LoRA modules that exhibit significant impact when integrated into merged LoRAs. Our focus lies in assessing the performance of the top five task-related LoRAs on the BBH benchmark. The results indicate that these top LoRAs perform similarly or even worse than zero-shot in most cases. Only one of them stands out as significantly better than zero-shot. However, it’s worth noting that this performance is not as impressive as Lorahub. These findings support the idea that the merging process can improve overall performance. + +Table 8: Detailed experimental results of top five LoRA modules shown in Table 3 on BBH tasks. + +
TaskWIQA: LastRACE: RightWIQA: FirstADQAWebQA
Boolean Expressions52.6758.0052.6754.6753.33
Causal Judgement55.1763.2255.1757.4757.47
Date Understanding17.3319.3317.3316.6715.33
Disambiguation0.000.000.000.000.00
Dyck Languages0.670.670.671.331.33
Formal Fallacies51.3351.3351.3351.3351.33
Geometric Shapes8.0013.338.006.677.33
Hyperbaton16.6744.0016.671.336.00
Logical Ded uctionts)23.3328.0023.3319.3320.67
Logical DeductionS (seven objects)22.0026.0022.0010.6712.00
Logical DeductionS (three objects)0.679.330.670.000.00
Movie Recommendation63.3362.6763.3356.6763.33
Multistep Arithmetic0.670.670.670.670.67
Navigate47.3350.0047.3347.3347.33
Object Counting34.6734.0034.6735.3335.33
Penguins in a Table45.6541.3045.6539.1343.48
Reasoning about Colored Objects40.0037.3340.0031.3330.67
Ruin Names22.0021.3322.0017.3322.67
Salient Translation Error Detection36.6734.6736.6732.6737.33
Snarks52.5655.1352.5647.4452.56
Sports Understanding56.0058.6756.0055.33
Temporal Sequences16.6717.3316.6712.6755.33 17.33
Tracking Shuffled ObjectsS (five objects)12.0012.0012.0010.6712.00
Tracking Shuffled ObjectsS (seven objects)6.676.676.676.676.67
Tracking Shuffled ObjectsS20.6730.6720.6710.6725.33
(three objects) Web of Lies54.6754.0054.6754.00
Word Sorting1.331.331.331.3354.00 1.33
Avg Performance per Task β–³ FLAN-T5-large28.10 1.1030.78 3.7828.10 1.1025.14 -1.8627.04 0.04
+ +![](images/f08459cc633da4d25e332908058acffc5a55cf3fadee5264d074582bf20749f5.jpg) +Figure 3: The influence of number of LoRA modules on 15 tasks from BBH, and each box is obtained from 5 separate runs. The horizontal axis shows the number of LoRA modules to be composed in LoraHub learning. + +# G Implementation details + +We implemented LoRA tuning using the Huggingface PEFT library (Mangrulkar et al., 2022), with the rank being set as 16. The gradient-free method was implemented using the open-source Nevergrad optimization library (Rapin & Teytaud, 2018), with a constraint that the absolute value of LoRA weights should not exceed 1.5. Originally, all coefficients of LoRA modules were set at zero. + +In our standard settings, we set the maximum number of iterations $K$ as 40. The same 5 examples were used during our LoraHub learning and the few-shot in-context learning. The hyperparameter $\alpha$ is set as 0.05. Regarding the hyperparameters for training candidate LoRA modules, we maintained consistency across all modules, setting the batch size at 64, the learning rate at $1 e - 4 ,$ and the number of training epochs at 10. + +# H Influence of Number of LoRA modules + +As shown in Figure 3, with an increase in the number of LoRA module candidates, there is a corresponding increase in the performance variance. Based on our in-depth analysis, the primary source of variance is not related to gradient-free optimization algorithms but rather associated with the LoRA candidate modules. In other words, once the candidates are determined, random seeds have minimal impact on the final performance. Hence, we posit that the observed instability primarily arises from the inherent challenge of balancing the quantity and quality of the LoRA module candidates. + +# I The Impact of Threshold + +In this section, we omitted the threshold in our implementation, and the results are summarized in Table 9. Our observations indicate that the removal of the threshold had minimal impact on the majority of tasks, underscoring the robustness of the gradient-free optimization algorithm itself in most cases. The algorithm efficiently identified reasonable ranges even without specific upper and lower bounds. However, three tasks, namely Date Understanding, Disambiguation and Hyperbaton, exhibited notable effects. The resulting performance decline led to an average decrease of $1 . 2 \%$ compared to the setting with threshold. + +This highlights the significance of establishing a reasonable threshold to mitigate extreme scenarios. + +Table 9: The comparsion between LoraHub and LoraHub without threshold. + +
TaskLoraHubavg with thresholdLoraHubavg without threshold
Boolean Expressions55.554.0
Causal Judgement54.354.8
Date Understanding32.917.7
Disambiguation45.240.6
Dyck Languages1.01.1
Formal Fallacies52.851.7
Geometric Shapes7.46.7
Hyperbaton62.855.5
Logical DeductionS (five objects)36.136.5
Logical DeductionS (seven objects)36.835.6
Logical DeductionS45.7
(three objects) Movie Recommendation49.9
Multistep Arithmetic55.359.3
Navigate0.40.7
Object Counting47.147.6
33.734.7
Penguins in a Table35.933.8
Reasoning about Colored Objects40.037.9
Ruin Names24.424.0
Salient Translation Error Detection36.037.1
Snarks56.951.6
Sports Understanding56.755.9
Temporal Sequences18.216.7
Tracking Shuffled ObjectsS (five objects)12.312.3
Tracking Shuffled ObjectsS (seven objects)7.78.5
Tracking Shuffled ObjectsS (three objects)29.229.8
Web of Lies50.150.3
Word Sorting1.11.3
Avg Performance Per Task34.733.5
\ No newline at end of file diff --git a/md/test/rzQGHXNReU/rzQGHXNReU.md b/md/test/rzQGHXNReU/rzQGHXNReU.md new file mode 100644 index 0000000000000000000000000000000000000000..58f20823a82596d380f3afb6d592f4f45df796e3 --- /dev/null +++ b/md/test/rzQGHXNReU/rzQGHXNReU.md @@ -0,0 +1,203 @@ +# RAFT: Adapting Language Model to Domain Specific RAG + +Tianjun Zhang \* +Department of Computer Science +UC Berkeley +Berkeley, CA 94720, USA +{tianjunz}@berkeley.edu +Shishir G. Patil, Naman Jain, Sheng Shen +Department of Computer Science +UC Berkeley +Berkeley, CA 94720, USA +{shishirpatil,naman_jain,sheng.s}@berkeley.edu +Matei Zaharia, Ion Stoica, Joseph E. Gonzalez +Department of Computer Science +UC Berkeley +Berkeley, CA 94720, USA +{matei,istoica,jegonzal}@berkeley.edu + +# Abstract + +Pretraining Large Language Models (LLMs) on large corpora of textual data is now a standard paradigm. When using these LLMs for many downstream applications, it is common to additionally incorporate new information into the pretrained model either through RAG-based-prompting, or finetuning. However, the best methodology to incorporate information remains an open question. In this paper, we present Retrieval Augmented Fine Tuning (RAFT), a training recipe which improves the model’s ability to answer questions in "open-book" in-domain settings. In training RAFT, given a question, and a set of retrieved documents, we train the model to ignore those documents that don’t help in answering the question, which we call, distractor documents. RAFT accomplishes this by citing verbatim the right sequence from the relevant document to help answer the question. This coupled with RAFT’s chain-of-thought-style response helps improve the model’s ability to reason. In domain specific RAG, RAFT consistently improves the model’s performance across PubMed, HotpotQA, and Gorilla datasets, presenting a post-training recipe to improve pre-trained LLMs to in-domain RAG. + +# 1 Introduction + +Trained on vast quantities of public data, Large Language Models LLMs have achieved significant advances in a wide range of general knowledge reasoning tasks Brown et al. (2020); Wei et al. (2022). However, increasingly LLMs are being employed in specialized domains to support tasks ranging from code completion for specific software frameworks to question answering on specific document collections (e.g., legal or medical documents). In these settings, general knowledge reasoning is less critical and instead the primary goal is to maximize accuracy based on a given set of documents. Indeed, adapting LLMs to the specialized domains (e.g., recent news, enterprise private documents, or program resources constructed after the training cutoff) is essential to many emerging applications (Vu et al., 2023; Lazaridou et al., 2022) and is the focus of this work. + +This paper studies the following question – How do we adapt pre-trained LLMs for Retrieval Augmented Generation (RAG) in specialized domains? + +When it comes to adapting LLMs to specialized domains, we consider the following two candidates: in-context learning through Retrieval-Augmented Generation (RAG) and supervised fine-tuning. RAG based methods allow the LLM to reference the documents when answering questions. However, RAG based in-context learning methods fail to leverage the learning opportunity afforded by the fixed domain setting and early access to the test documents. Alternatively, supervised fine-tuning offers the opportunity to learn more general patterns in the documents and better align to end tasks and user preferences Zhou et al. (2023). However, existing fine-tuning based approaches either fail to leverage the documents at test time (don’t incorporate RAG) or fail to account for the imperfections in retrieval process during training. + +![](images/f0db1ef1b057bc1bc9295a8166a6d296edcd745858acff236281a68693676d87.jpg) +Figure 1: How best to prepare for an Exam?(a) Fine-tuning based approaches implement "studying" by either directly "memorizing" the input documents or answering practice QA without referencing the documents. (b) Alternatively, in-context retrieval methods fail to leverage the learning opportunity afforded by the fixed domain and are equivalent to taking an open-book exam without studying. In contrast, our approach (c) RAFT leverages fine-tuning with question-answer pairs while referencing the documents in a simulated imperfect retrieval setting β€” thereby effectively preparing for the open-book exam setting. + +We can draw an analogy to an open-book exam. Existing in-context retrieval methods are equivalent to taking an open-book exam without studying. Alternatively, existing finetuning based approaches implement β€œstudying" by either directly β€œmemorizing" Xiong et al. (2023) the input documents or answering practice questions Wang et al. (2022) without referencing the documents. While these approaches leverage in-domain learning they fail to prepare for the open-book nature of the test setting. + +In this paper, we study how to combine instruction fine-tuning (IFT) with retrieval augmented generation (RAG). We propose a novel adaptation strategy – Retrieval-Augmented Fine Tuning (RAFT). RAFT specifically addresses the challenge of fine-tuning LLMs to both incorporate domain knowledge while also improving in-domain RAG performance. RAFT aims to not only enable models to learn domain-specific knowledge through fine-tuning, but also to ensure robustness against distracting retrieved information. This is achieved by training the models to understand the dynamics between the question (prompt), the domain-specific documents retrieved, and the right answer. Going back to our analogy to the open book exam, our approach is analogous to studying for an open-book exam by recognizing relevant, and irrelevant retrieved documents. + +In RAFT, we train the model to answer the question (Q) from Document(s) $( \mathrm { D ^ { * } } )$ to generate answer $( \mathrm { A } ^ { * } )$ , where $\mathsf { A } ^ { * }$ includes chain-of-thought reasoning Wei et al. (2022); Anthropic (2023), and in the presence of distractor documents $( D _ { k } )$ . We explain the methodology in Section 3 and analyze the sensitivity to the number of distractor documents $( k )$ at train- and test- time in Section 5. RAFT consistently outperforms Supervised-finetuning both withand without- RAG across PubMed Dernoncourt & Lee (2017), HotPot QA Yang et al. (2018), and HuggingFace Hub, Torch Hub, and Tensorflow Hub Gorilla datasets Patil et al. (2023), presenting a novel, yet simple technique to improve pre-trained LLMs for in-domain RAG. Our code is available at https://github.com/ShishirPatil/gorilla. + +# 2 LLMs for Open-Book Exam + +To understand our goal better, we expand on our analogy between training an LLM with the real-world setting of prepararing for an exam. + +Closed-Book Exam A closed book exam often refers to the scenario where the LLMs do not have access to any additional documents or references to answer the questions during the exam. For LLMs, this is equivalent to the scenario, for example, in which the LLM is used as a chatbot. In this scenario the LLM draws from the knowledge baked in during pre-training and supervised-finetuning to respond to the users’ prompt. + +![](images/d54b49279897f74e204c908fc173727448ff6a0c168d92f2063727d1a38456d9.jpg) +Figure 2: Overview of our RAFT method. The top-left figure depicts our approach of adapting LLMs to reading solution from a set of positive and distractor documents in contrast to standard RAG setup where models are trained based on the retriever outputs, which is a mixture of both memorization and reading. At test time, all methods follow the standard RAG setting, provided with a top-k retrieved documents in the context. + +Open Book Exam In contrast, we liken the open-book exam setting to the scenario in which the LLM can refer to external sources of information (e.g., a website or a book chapter). In such scenarios, typically, the LLM is paired with retriever which retrieves $^ { \prime } \mathbf { k } ^ { \prime }$ documents (or specific segments of the document) which are appended to the users’ prompt. It is only through these documents retrieved that the LLM gains access to β€œdomain-specific information”. As a result, we argue that the LLM’s performance in these settings, where it is trained as a general-purpose LLM is largely dependent on the quality of the retriever and how accurately the retriever can identify the most relevant piece of information. + +Domain-Specific Open-Book Exam In this paper, we focus on the narrower but increasingly popular domain than the general open book exam, which we call the domain-specific open-book exam. Here, we know apriori the domain in which the LLM will be tested. The LLM can respond to the users’ prompt using use any and all information from this specific domain, which it has been fine-tuned on. Examples of domain specific examples include enterprise documents, code repositories belonging to an organization, etc. In all these scenarios, the LLM will be used to respond to the questions, whose answers can be found within a collection of documents. The retrieval technique itself has little to no-impact on the mechanism (though it may impact the accuracy). This paper studies the domain-specific open-book setting and how to adapt a pretrained LLM to this specific domain, including how to make it more robust to a varying number of retrieved documents and distractors. + +# 3 RAFT + +In this section, we present RAFT, a novel way of training LLMs for domain-specific openbook exams. We first introduce the classical technique of supervised fine-tuning, followed with the key takeaways from our experiments. Then, we introduce RAFT , a modified version of general instruction tuning. Lastly, we provide an overview of the experiments to expect in the later sections. + +# Supervised Finetuning + +Consider the supervised fine-tuning (SFT) setting for a Question-Answer dataset. The formulation consists of the Dataset $( \bar { D } )$ from which a set of Question (Q) and corresponding answer $( A )$ pairs are derived or already available. In the classical SFT setting, the model is trained to improve it’s ability to answer the questions based on it’s knowledge - obtained either during pre-training, or during the SFT training phase. The model so trained can also + +Figure 3: RAFT prompt to help LLM evaluate its own generated reasoning and answers, contrasting them with the correct reasoning and answers. The LLM is prompted to identify errors in its reasoning and extract key insights for improvement. This figure specifically represents the β€˜GenerateExplanationβ€˜ step in the RAFT algorithm (Section 3). + +be used at test-time with Retrieval Augmented Generation (RAG) setting, where additional documents can be introduced in the prompt to help the model answer the question. This can be represented as follows: + +{Train: $\mathbf Q \to \mathbf A _ { \mathrm { j } } ^ { \prime }$ , {0-shot Inference: $\mathbf Q \to \mathbf A \}$ , {RAG Inference: $\mathbf { Q } + \mathbf { D } \mathbf { A } \}$ + +RAFT: Retrieval Augmented Fine-Tuning (RAFT), presents a novel recipe to prepare finetuning data to tailor the models for domain-specific open-book setting, equivalent to indomain RAG In RAFT, we prepare the training data such that each data point contains a question $( Q )$ , a set of documents $( D _ { k } )$ , and a corresponding Chain-of-though style answer $( \hat { \boldsymbol { A } } ^ { * } )$ generated from one of the document $( D ^ { * } )$ . We differentiate between two types of documents: β€˜golden’ documents $( D * )$ i.e. the documents from which the answer to the question can be deduced, and β€˜distractor’ documents $( D _ { i } )$ that do not contain answerrelevant information. As an implementation detail, the β€˜golden’ document doesn’t need to be a single document, but can be more than one document, as is the case in HotpotQA Yang et al. (2018). Then, for $P$ fraction of the questions $( q _ { i } )$ in the dataset, we retain the golden document $( d _ { i } ^ { * } )$ along with distractor documents $( d _ { k - 1 } )$ . For $( 1 - P )$ fraction of the questions $( q _ { i } )$ in the dataset, we include no golden document and only include distractor documents $( d _ { k } )$ . We then fine-tune the language model using standard supervised training (SFT) technique, training it to generate answers from the provided documents and question. Fig. 2 illustrates the high-level design principal for RAFT . + +We demonstrate that our RAG approach trains the model to perform better RAG on the set of documents it is trained on i.e., in-domain. By removing the golden documents in some instances, we are compelling the model to memorize answers instead of deriving them from the context. The training data for RAFT is as follows, and an example training data can be seen in Fig. 3: + +$\mathbf { P } \%$ of data: $\mathbf { Q } + \mathbf { D } ^ { * } + \mathbf { D } _ { 1 } + \mathbf { D } _ { 2 } + \ldots + \mathbf { D } _ { k } \mathbf { A } *$ $( 1 - \mathbf { P } ) \%$ of data: $\mathbf { Q } + \mathbf { D } _ { 1 } + \mathbf { D } _ { 2 } + \ldots + \mathbf { D } _ { k } \mathbf { A } *$ + +Subsequently, for the test scenario, the model is provided with the Q and top-k documents retrieved by the RAG pipeline. Note that RAFT is independent of the retriever used. + +A key factor in enhancing training quality is the generation of a reasoning process, such as Chain-of-Thought, to explain the provided answers. RAFT approach is similar: we demonstrate that creating a full reasoning chain and in-addition, clearly citing sources enhances the model’s accuracy in answering questions. In Fig. 3, we illustrate this setup. Generating the training data in this fashion, involves presenting the model with a question, context, and verified answers, and then requesting it to form a reasoning chain that appropriately references the original context. + +For all the datasets in our experiments, we generate the answers using the technique described above. Note that the Gorilla APIBench dataset, already includes reasoning in the answers. We provide an example of the generation step in Fig. 3, the detailed reasoning answer includes a citation from the original context inside ##begin_quote## and ##end_quote## as well as the detailed explanation on how to reach the conclusion based on the citations. We demonstrate that adding detailed reasoning paragraphs can help boost the model’s performance in our experiment section. + +Table 1: RAFT improves RAG performance for all specialized domains: Across PubMed, HotPot, HuggingFace, Torch Hub, and Tensorflow Hub, we see that Domain-specific Finetuning improves significantly of the performance of the base model, RAFT consistently outperforms the existing domain-specific finetuning method with or without RAG. This suggests the need to train the model with context. We compare our model with LLaMA finetuning receipes, and provide GPT-3.5 for reference. + +
PubMedHotPotHuggingFaceTorch HubTensorFlow
GPT-3.5 + RAG71.6041.529.0860.2165.59
LLaMA2-7B56.50.540.2200
LLaMA2-7B + RAG58.80.0326.4308.6043.06
DSF59.76.3861.0684.9486.56
DSF + RAG71.64.4142.5982.8060.29
RAFT (LLaMA2-7B)73.3035.2874.0084.9586.86
+ +# 4 Evaluation + +We design our experiments to study how well RAFT performs compared to various baselines. We find that the RAFT-7B model (a finetuned version of LlaMA-2) is better at reading and extracting information from in-domain documents, than domain-specific finetuned model, and general-purpose model with RAG. As an ablation, we also demonstrate how important it is for the model to learn with Chain-of-Thought responses. In this section, we will first introduce all the datasets we used in the experiments, then all the baseline model/fine-tuning techniques that we benchmark against. + +Datasets In our experiments, we use the following datasets to evaluate our model and all baselines. We selected these datasets to represent both popular and diverse domains including Wikipedia, Coding/API documents, and question-answering on medical documents. Natural Questions (NQ) Kwiatkowski et al. (2019), Trivia QA Joshi et al. (2017) and HotpotQA Yang et al. (2018) are the open-domain question-answers based on Wikipedia, mainly focused on common knowledge (e.g., movies, sports, etc). HuggingFace, Torch Hub, and TensorFlow Hub are from the APIBench Patil et al. (2023) proposed in the Gorilla paper. These benchmarks measure how to generate the correct, functional, and executable API calls based on the documentation. PubMed QA Jin et al. (2019) is a question-answering dataset tailored only for biomedical-research question-answering. It mainly focuses on answering medical and biology questions based on a given set of documents. We would like to highlight that $( \mathrm { N Q } ,$ Trivia $\{ \hat { \mathrm { Q A } } ,$ and HotpotQA) are relatively general domain whereas the latter two domains are on domain-specific documents. + +Baselines We consider the following baselines for our experiments: + +β€’ LlaMA2-7B-chat model with 0-shot prompting: this is the commonly used instruction-finetuned model for QA tasks, where we provide clearly written instructions, but no reference documentation. +β€’ LlaMA2-7B-chat model with RAG (Llama2 $^ +$ RAG): similar to the previous setting, except here we include reference documents. This is a popular technique when dealing with domain-specific QA tasks. Domain-Specific Finetuning with 0-shot prompting (DSF): Standard supervisedfinetuning, without documents in context. We find that its mostly useful to align the answering style of the model as well as get familiar with the domain context. +Domain-Specific Finetuning with RAG $( \mathrm { D S F } + \mathrm { R A G } )$ ): Equip a domain-specific finetuned-model with external knowledge using RAG. So, for the β€œknowledge” the model does not know, it can still refer to the context. + +Table 2: Ablation on Chain-of-Thought: The numbers of RAFT and RAFT without CoT. Results on various datasets show that adding CoT can significantly improve the performance of the finetuned model. With a gains of $9 . 6 6 \%$ and $1 \bar { 4 } . 9 3 \%$ in the Hotpot QA and HuggingFace datasets respectively. + +
PubMedHotpotQAHuggingFaceTorch HubTensorFlow
RAFT w.0 CoT68.3025.6259.0786.5683.21
RAFT73.3035.2874.0084.9586.86
+ +# 4.1 Results + +Using the above datasets and baselines, we evaluate our model RAFT and demonstrate the effectiveness of RAFT in Tab. 1. We see that RAFT consistently and significantly outperforms the baselines. Compared with the base Llama-2 instruction-tuned model, RAFT with RAG does much better in terms of extracting information as well as being robust towards distractors. The gain can be as big as $3 5 . 2 5 \%$ on Hotpot QA and $7 6 . 3 5 \%$ on Torch Hub evaluation. Compared with DSF on the specific dataset, our model does better at relying on the provided context to solve the problem. RAFT does much better on the tasks like Hotpot and HuggingFace datasets $( 3 0 . { \bar { 8 } } 7 \%$ on Hotpot and $3 1 . 4 1 \%$ on HuggingFace). Note that for PubMed QA, since it is a binary yes/no question, we don’t observe significant gains when we compare our model with $\mathrm { D } \mathbf { \dot { S } } \mathbf { \dot { F } } + \mathbf { R } \mathbf { A } \mathbf { G }$ . Even compared with a much larger and better model GPT-3.5, RAFT demonstrates significant advantages. + +Overall, the LLaMA-7B model, both with and without the RAG, performs poorly due to its answering style not aligning with the ground truth. By applying domain-specific tuning, we significantly enhance its performance. This process enables the model to learn and adopt the appropriate style of answering. However, introducing RAG to a domain-specifically fine-tuned (DSF) model doesn’t invariably lead to better outcomes. This might indicate that the model lacks training in context processing and extracting useful information from it. By incorporating our method, RAFT , we train the model not only to match its answering style with that required but also to improve its document processing capabilities. Consequently, our approach outperforms all others. + +# 4.2 Effect of CoT + +We also conduct an analysis to evaluate the effectiveness of the Chain-of-Thought approach in enhancing the model’s performance. As indicated in Table 2, simply providing the answer to a question may not always be adequate. This approach can lead to a rapid decrease in loss, resulting in the model beginning to overfit. Incorporating a reasoning chain that not only guides the model to the answer but also enriches the model’s understanding can improve the overall accuracy and prevent overfitting to concise answers. In our experiments, integrating the Chain-of-Thought significantly enhances training robustness. We employ GPT-4-1106 to generate our Chain-of-Thought prompts and include an example of the prompt we used in Figure 3. + +# 4.3 Qualitative Analysis + +To illustrate the potential advantages of RAFT over the domain-specifically fine-tuned (DSF) approach, we present a comparative example in Figure 4. This example qualitatively demonstrates a scenario where the DSF model becomes confused by a question asking for the identity of a screenwriter. Instead of providing the correct name, it mistakenly cites one of the films written by the screenwriter. In contrast, the RAFT model accurately answers the question. This discrepancy suggests that training a model solely with question-answer pairs may impair its ability to derive relevant context from provided documents. The comparison underscores the importance of incorporating both standard instructional tuning and context comprehension into the training dataset to preserve and enhance the model’s ability to process text effectively. + +# HotPot QA + +Question: What screenwriter with credits for β€œEvolution”[0/1879]e a film starring Nicolas Cage and TΓ©a Leoni? +Documents: . . . David Weissman is a screenwriter and director. His film credits include β€œThe Family Man” (2000), β€œEvolution” (2001), and β€œWhen in Rome” (2010). The Family Man is a 2000 American romantic comedy-drama film directed by Brett Ratner, written by David Diamond and David Weissman, and starring Nicolas Cage and TΓ©a Leoni. + +![](images/0bcc3d38e5c0488526bd52fb267fd171f556aebd1617289e7668d2a9205a03f4.jpg) +Figure 4: Comparison of RAFT and DSF: On the HotPot QA dataset, we can see that DSF model extracts the wrong information from the context when the question is asking who is the screen writer and it answers a film name. RAFT manages to get the accurate results . + +4.4 Should we train the LLM always with the golden context for RAG? + +In our exploration of whether large language models (LLMs) should always be trained with the golden context for Retrieval-Augmented Generation (RAG), we address a key question: what proportion $( \mathrm { p \% ) }$ of the training data should include golden documents? Intuitively, one might assume that for effective training in reading and extracting information from context (e.g., RAG tasks), the golden document should always be included during training $\mathrm { ( P = 1 0 0 \% }$ ). However, our findings challenge this assumption: incorporating a portion of the training data without the golden document in the context $\mathrm { ( P = 8 0 \% }$ ) appears to enhance the model’s performance on RAG tasks. + +Figure 5 presents our investigation into the hyperparameter $\mathrm { P \% }$ , which represents the percentage of training instances that should include golden documents. We find that the optimal proportion varies across datasets, with $\mathrm { P \% }$ ranging from $4 0 \%$ , $6 0 \% ,$ and $1 0 0 \%$ . This indicates that training your LLM without the correct corresponding context at times can be beneficial for the downstream task of answering questions related to the documents. In our training setup, we include four distractor documents alongside the golden document, and at test time, we maintain this format by providing the golden document with four distractors. Our findings suggest that, for domain-specific RAG tasks, including a certain percentage of training data without the golden documents in the context proves to be advantageous. + +# 5 RAFT Generalizes to Top-K RAG + +We now study another important problem: How does the number of distractor documents in RAFT affect the model’s performance when augmented with top-k RAG results during evaluation? Previous research has highlighted the vulnerability of LLMs to irrelevant text (see studies (Shi et al., 2023a; Weston & Sukhbaatar, 2023; Liu et al., 2023)). This issue is particularly critical for LLMs $^ +$ RAG since top-k RAG is frequently employed at test time to ensure high recall. Such a scenario necessitates the model to have the ability to discern and disregard irrelevant content, focusing solely on pertinent information. + +![](images/8e086fb8cb885d22dfb048538cf47a1ac09dfb1873a60ba60c02530cd07d066d.jpg) +Figure 5: How many golden documents to involve? We study the hyperparameter $\mathrm { P \% }$ where it indicates how much portion of training data is with golden document. Results on NQ, TQA and HotpotQA suggest that mixing some amount of data that the golden document is not put in the context is helpful for in-domain RAG. + +# 5.1 Making Model Robust to top-K RAG + +To tackle the challenge of enhancing large language models’ (LLMs) ability to sift through irrelevant text within the retrieval pipeline, our analysis revealed that training solely with golden (highly relevant) documents can inadvertently diminish the model’s ability to discern and disregard irrelevant information. To address this, our algorithm, RAFT , adopts a strategy that integrates golden documents with a mix of irrelevant ones. This methodology prompts us to investigate the ideal fraction of distractor (irrelevant) documents to incorporate throughout the training process and to assess how well this training approach adapts to different volumes of documents encountered by the Retrieval-Augmented Generation (RAG) during the test phase. Our aim is to refine the balance between relevant and irrelevant information to strenghten the model’s efficiency in identifying and utilizing pertinent content. Notice that Sec 4.4 looked what what $\mathrm { P \% }$ of training data should include distractors, while in this section, we study test-time scenarios. + +Training with Distractor Documents To enhance the robustness of LLMs against irrelevant text in retrieved documents, we adopted a finetuning approach that incorporates both golden (highly relevant) documents and distractor (irrelevant) documents. The model was trained with varying numbers of distractor documents, but consistently evaluated using the top-3 documents obtained from the retriever - not to be confused with $p$ . Our findings, detailed in Fig. 6, reveal that finetuning with only the golden document frequently results in inferior performance compared to configurations that include a greater number of distractor documents. As we can see in the figure, the better performance for Natural Questions is training with $D ^ { * } + 3 D$ and it is $D ^ { * } + 1 D$ documents with Hotpot QA. This insight has been particularly beneficial for our algorithm, RAFT . In our experiments, we consistently employ a training setup consisting of one golden document alongside four distractor documents. + +Generalization to a variable number of test-time documents. We extended our research to examine the impact of different quantities of test-time documents on the model’s performance. Specifically, our experiments focused on assessing how models, trained with varying numbers of distractor documents, respond to changes in the number of documents presented at test time. The results, illustrated in Fig. 6, confirm that the inclusion of distractor documents during training indeed makes the model more resilient to fluctuations in the number of documents encountered during testing. This ability to maintain consistent performance despite variations in test-time document numbers further validates the robustness of our approach, RAFT . This finding underscores the importance of a well-calibrated training environment to prepare the model for a range of scenarios it may encounter in real-world. + +# 6 Related Works + +Retrieval-Augmented Language Models Retrieval-Augmented Language Models (RALMs) enhance LLMs by integrating a retrieval module that sources relevant information from external knowledge bases, significantly improving performance across various NLP tasks, including language modeling (Guu et al., 2020; Borgeaud et al., 2022; Khandelwal et al., + +![](images/12b8e955ae9a0307c0a7f13890daa53d74edc9d6d0f2d3be9e950103c883cfdd.jpg) +Figure 6: Test-Time Documents Varying: To analyze how robust RAFT is to varying number of test-time documents, we study three domains – NQ, Trivia QA and HotPot QA. In ${ \mathrm { N Q } } ,$ we find that training with 4 documents leads to optimal performance, and this changes to 3 and 2 for for Trivia QA and HotPot QA respectively. However, we see that training with only golden documents leads to poor performance. + +2019; Shi et al., 2023d; Lin et al., 2023b; Shi et al., 2023c; Asai et al., 2023; Xu et al., 2023; Wang et al., 2023) and open-domain question answering (Izacard et al., 2023; Lewis et al., 2020). For instance, Atlas (Izacard et al., 2023) fine-tunes T5 models with the retriever, treating documents as latent variables, while RETRO (Borgeaud et al., 2022) modifies the decoder-only architecture to include retrieved texts and conducts pre-training from scratch. kNN-LM (Khandelwal et al., 2019) interpolates between the LM’s next token distribution and distributions computed from retrieved tokens at inference. (Shi et al., 2023d; Ram et al., 2023) assume black-box access to an LLM, combining it with either off-the-shelf or fine-tuned retriever. + +Memorization A key question around large neural language models is whether they truly β€œunderstand” text (Feldman, 2020; Power et al., 2022) or simply rely on surface pattern memorization (Carlini et al., 2019; TΓ€nzer et al., 2022). (Feldman, 2020; Carlini et al., 2019; 2022) develop methodologies to quantify the extent of memorization in neural models. (Brown et al., 2020; Power et al., 2022; Liu et al., 2022) further explored how memorization impacts the models’ generalization capabilities. (Carlini et al., 2021; Shi et al., 2023b) demonstrated the ability of language models to memorize and regurgitate training data, raising significant privacy concerns (Kandpal et al., 2022; Pan et al., 2020). + +Finetuning for RAG More recently, several papers have been exploring the idea of finetuning a pretrained LLM to be better at RAG tasks (Lin et al., 2023a; Wang et al., 2023; Xu et al., 2023; Liu et al., 2024). These works focus on constructing a combination of finetuning dataset for RAG and train a model to perform well on these tasks. In particular, in their settings, at test time, the domain or documents can be different than the training time; whereas our paper studies a slightly opposite scenario where we only care about testing the LLM on the same set of documents. + +# 7 Conclusion + +RAFT is a training strategy designed to enhance the model’s performance in answering questions within a specific domain, in "open-book" settings. We highlight several crucial design decisions, such as training the model alongside distractor documents, organizing the dataset so a portion lacks golden documents in their context, and formulating answers in a chain-of-thought manner with direct quotations from the relevant text. Our evaluations on PubMed, HotpotQA, and Gorilla API Bench underline RAFT’s significant potential. + +# References + +Anthropic. Prompt engineering for claude’s long context window. 2023. + +Asai, A., Wu, Z., Wang, Y., Sil, A., and Hajishirzi, H. Self-rag: Learning to retrieve, generate, and critique through self-reflection. arXiv preprint arXiv:2310.11511, 2023. + +Borgeaud, S., Mensch, A., Hoffmann, J., Cai, T., Rutherford, E., Millican, K., Van Den Driessche, G. B., Lespiau, J.-B., Damoc, B., Clark, A., et al. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, pp. 2206–2240. PMLR, 2022. +Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877–1901, 2020. +Carlini, N., Liu, C., Erlingsson, Ú., Kos, J., and Song, D. The secret sharer: Evaluating and testing unintended memorization in neural networks. In 28th USENIX Security Symposium (USENIX Security 19), pp. 267–284, 2019. +Carlini, N., Tramer, F., Wallace, E., Jagielski, M., Herbert-Voss, A., Lee, K., Roberts, A., Brown, T., Song, D., Erlingsson, U., et al. Extracting training data from large language models. In 30th USENIX Security Symposium (USENIX Security 21), pp. 2633–2650, 2021. +Carlini, N., Ippolito, D., Jagielski, M., Lee, K., Tramer, F., and Zhang, C. Quantifying memorization across neural language models. In The Eleventh International Conference on Learning Representations, 2022. +Dernoncourt, F. and Lee, J. Y. Pubmed 200k rct: a dataset for sequential sentence classification in medical abstracts. arXiv preprint arXiv:1710.06071, 2017. +Feldman, V. Does learning require memorization? a short tale about a long tail. In Proceedings of the 52nd Annual ACM SIGACT Symposium on Theory of Computing, pp. 954–959, 2020. +Guu, K., Lee, K., Tung, Z., Pasupat, P., and Chang, M. Retrieval augmented language model pre-training. In International conference on machine learning, pp. 3929–3938. PMLR, 2020. +Izacard, G., Lewis, P., Lomeli, M., Hosseini, L., Petroni, F., Schick, T., Dwivedi-Yu, J., Joulin, A., Riedel, S., and Grave, E. Atlas: Few-shot learning with retrieval augmented language models. Journal of Machine Learning Research, 24(251):1–43, 2023. URL http: //jmlr.org/papers/v24/23-0037.html. +Jin, Q., Dhingra, B., Liu, Z., Cohen, W. W., and Lu, X. Pubmedqa: A dataset for biomedical research question answering. arXiv preprint arXiv:1909.06146, 2019. +Joshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. arXiv preprint arXiv:1705.03551, 2017. +Kandpal, N., Wallace, E., and Raffel, C. Deduplicating training data mitigates privacy risks in language models. In International Conference on Machine Learning, pp. 10697–10707. PMLR, 2022. +Khandelwal, U., Levy, O., Jurafsky, D., Zettlemoyer, L., and Lewis, M. Generalization through memorization: Nearest neighbor language models. arXiv preprint arXiv:1911.00172, 2019. +Kwiatkowski, T., Palomaki, J., Redfield, O., Collins, M., Parikh, A., Alberti, C., Epstein, D., Polosukhin, I., Devlin, J., Lee, K., et al. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453–466, 2019. +Lazaridou, A., Gribovskaya, E., Stokowiec, W., and Grigorev, N. Internet-augmented language models through few-shot prompting for open-domain question answering. arXiv preprint arXiv:2203.05115, 2022. +Lewis, P., Perez, E., Piktus, A., Petroni, F., Karpukhin, V., Goyal, N., KΓΌttler, H., Lewis, M., Yih, W.-t., RocktΓ€schel, T., et al. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems, 33:9459–9474, 2020. +Lin, X. V., Chen, X., Chen, M., Shi, W., Lomeli, M., James, R., Rodriguez, P., Kahn, J., Szilvasy, G., Lewis, M., et al. Ra-dit: Retrieval-augmented dual instruction tuning. arXiv preprint arXiv:2310.01352, 2023a. +Lin, X. V., Chen, X., Chen, M., Shi, W., Lomeli, M., James, R., Rodriguez, P., Kahn, J., Szilvasy, G., Lewis, M., et al. Ra-dit: Retrieval-augmented dual instruction tuning. arXiv preprint arXiv:2310.01352, 2023b. +Liu, N. F., Lin, K., Hewitt, J., Paranjape, A., Bevilacqua, M., Petroni, F., and Liang, P. Lost in the middle: How language models use long contexts. arXiv preprint arXiv:2307.03172, 2023. +Liu, Z., Kitouni, O., Nolte, N. S., Michaud, E., Tegmark, M., and Williams, M. Towards understanding grokking: An effective theory of representation learning. Advances in Neural Information Processing Systems, 35:34651–34663, 2022. +Liu, Z., Ping, W., Roy, R., Xu, P., Shoeybi, M., and Catanzaro, B. Chatqa: Building gpt-4 level conversational qa models. arXiv preprint arXiv:2401.10225, 2024. +Pan, X., Zhang, M., Ji, S., and Yang, M. Privacy risks of general-purpose language models. In 2020 IEEE Symposium on Security and Privacy (SP), pp. 1314–1331. IEEE, 2020. +Patil, S. G., Zhang, T., Wang, X., and Gonzalez, J. E. Gorilla: Large language model connected with massive apis. arXiv preprint arXiv:2305.15334, 2023. +Power, A., Burda, Y., Edwards, H., Babuschkin, I., and Misra, V. Grokking: Generalization beyond overfitting on small algorithmic datasets. arXiv preprint arXiv:2201.02177, 2022. +Ram, O., Levine, Y., Dalmedigos, I., Muhlgay, D., Shashua, A., Leyton-Brown, K., and Shoham, Y. In-context retrieval-augmented language models. arXiv preprint arXiv:2302.00083, 2023. +Shi, F., Chen, X., Misra, K., Scales, N., Dohan, D., Chi, E. H., SchΓ€rli, N., and Zhou, D. Large language models can be easily distracted by irrelevant context. In International Conference on Machine Learning, pp. 31210–31227. PMLR, 2023a. +Shi, W., Ajith, A., Xia, M., Huang, Y., Liu, D., Blevins, T., Chen, D., and Zettlemoyer, L. Detecting pretraining data from large language models. arXiv preprint arXiv:2310.16789, 2023b. +Shi, W., Min, S., Lomeli, M., Zhou, C., Li, M., Lin, V., Smith, N. A., Zettlemoyer, L., Yih, S., and Lewis, M. In-context pretraining: Language modeling beyond document boundaries. arXiv preprint arXiv:2310.10638, 2023c. +Shi, W., Min, S., Yasunaga, M., Seo, M., James, R., Lewis, M., Zettlemoyer, L., and Yih, W.-t. Replug: Retrieval-augmented black-box language models. arXiv preprint arXiv:2301.12652, 2023d. +TΓ€nzer, M., Ruder, S., and Rei, M. Memorisation versus generalisation in pre-trained language models. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 7564–7578, 2022. +Vu, T., Iyyer, M., Wang, X., Constant, N., Wei, J., Wei, J., Tar, C., Sung, Y.-H., Zhou, D., Le, Q., et al. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214, 2023. +Wang, B., Ping, W., McAfee, L., Xu, P., Li, B., Shoeybi, M., and Catanzaro, B. Instructretro: Instruction tuning post retrieval-augmented pretraining. arXiv preprint arXiv:2310.07713, 2023. +Wang, Y., Kordi, Y., Mishra, S., Liu, A., Smith, N. A., Khashabi, D., and Hajishirzi, H. Self-instruct: Aligning language models with self-generated instructions. arXiv preprint arXiv:2212.10560, 2022. +Wei, J., Wang, X., Schuurmans, D., Bosma, M., Xia, F., Chi, E., Le, Q. V., Zhou, D., et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, 35:24824–24837, 2022. +Weston, J. and Sukhbaatar, S. System 2 attention (is something you might need too). arXiv preprint arXiv:2311.11829, 2023. +Xiong, W., Liu, J., Molybog, I., Zhang, H., Bhargava, P., Hou, R., Martin, L., Rungta, R., Sankararaman, K. A., Oguz, B., et al. Effective long-context scaling of foundation models. arXiv preprint arXiv:2309.16039, 2023. +Xu, P., Ping, W., Wu, X., McAfee, L., Zhu, C., Liu, Z., Subramanian, S., Bakhturina, E., Shoeybi, M., and Catanzaro, B. Retrieval meets long context large language models. arXiv preprint arXiv:2310.03025, 2023. +Yang, Z., Qi, P., Zhang, S., Bengio, Y., Cohen, W. W., Salakhutdinov, R., and Manning, C. D. Hotpotqa: A dataset for diverse, explainable multi-hop question answering. arXiv preprint arXiv:1809.09600, 2018. +Zhou, C., Liu, P., Xu, P., Iyer, S., Sun, J., Mao, Y., Ma, X., Efrat, A., Yu, P., Yu, L., et al. Lima: Less is more for alignment. arXiv preprint arXiv:2305.11206, 2023. \ No newline at end of file diff --git a/md/train/fy4ZBWxYbIo/fy4ZBWxYbIo.md b/md/train/fy4ZBWxYbIo/fy4ZBWxYbIo.md new file mode 100644 index 0000000000000000000000000000000000000000..8ce8283677ad2d9e9a1dca0a1b9c12bbe840df5c --- /dev/null +++ b/md/train/fy4ZBWxYbIo/fy4ZBWxYbIo.md @@ -0,0 +1,239 @@ +# A Workflow for Offline Model-Free Robotic Reinforcement Learning + +Aviral Kumar?,1, Anikait Singh?,1, Stephen $\mathbf { T i a n } ^ { 1 }$ , Chelsea $\mathbf { F i n n ^ { 2 } }$ , Sergey Levine1 1 UC Berkeley, 2 Stanford University (βˆ— Equal Contribution) aviralk@berkeley.edu, asap7772@berkeley.edu + +Abstract: Offline reinforcement learning (RL) enables learning control policies by utilizing only prior experience, without any online interaction. This can allow robots to acquire generalizable skills from large and diverse datasets, without any costly or unsafe online data collection. Despite recent algorithmic advances in offline RL, applying these methods to real-world problems has proven challenging. Although offline RL methods can learn from prior data, there is no clear and well-understood process for making various design choices, from model architecture to algorithm hyperparameters, without actually evaluating the learned policies online. In this paper, our aim is to develop a practical workflow for using offline RL analogous to the relatively well-understood workflows for supervised learning problems. To this end, we devise a set of metrics and conditions that can be tracked over the course of offline training, and can inform the practitioner about how the algorithm and model architecture should be adjusted to improve final performance. Our workflow is derived from a conceptual understanding of the behavior of conservative offline RL algorithms and cross-validation in supervised learning. We demonstrate the efficacy of this workflow in producing effective policies without any online tuning, both in several simulated robotic learning scenarios and for three tasks on two distinct real robots, focusing on learning manipulation skills with raw image observations with sparse binary rewards. Explanatory video and additional content can be found at sites.google.com/view/offline-rl-workflow. + +Keywords: workflow, offline RL, offline tuning + +# 1 Introduction + +Offline reinforcement learning (RL) can in principle make it possible to convert existing large datasets of robotic experience into effective policies, without the need for costly or dangerous online interaction for each training run. While offline RL algorithms have improved significantly [1, 2, 3, 4, 5], applying such methods to real-world robotic control problems presents a number of major challenges. In standard online RL, any intermediate policy found during training is executed in the environment to collect more experience, which naturally allows for an evaluation of the policy performance. This ability to evaluate intermediate policies lets practitioners use β€œbrute + +![](images/93b250387a2633b12ccbb56017457a77edb5b8bb68d22310145c16c8258e9664.jpg) +Figure 1: Our proposed workflow aims to detect overfitting and underfitting, and provides guidelines for addressing these issues via policy selection, regularization, and architecture design. We evaluate this workflow on two real-world robotic systems and simulation domains, and we find it to be effective. + +force” to evaluate the effects of various design factors, such as model capacity and expressivity, the number of training steps, and so forth, and facilitates comparatively straightforward tuning. In contrast, offline RL methods do not have access to real-world on-policy rollouts for evaluating the learned policy. Thus, in order for these methods to be truly practical for real-world applications, we not only require effective algorithms, but also an effective workflow: a set of protocols and metrics that can be used to reliably and consistently adjust model capacity, regularization, etc in offline RL to obtain policies with good performance, without requiring real-world rollouts for tuning. + +A number of prior works have studied model selection in offline RL by utilizing off-policy evaluation (OPE) methods [6] to estimate policy performance. These methods can be based either on model or value learning [7, 8, 9, 10] or importance sampling [6, 11, 12, 13]. However, developing reliable OPE methods is itself an open problem, and modern OPE methods themselves suffer from hyperparameter selection challenges (see Fu et al. [14] for an empirical study). Moreover, accurate off-policy evaluation is likely not necessary to simply tune algorithms for best performance – we do not need a precise estimate of how good our policy is, but rather a workflow that enables us to best improve it by adjusting various algorithm hyperparameters. + +In this paper, we devise a practical workflow for selecting regularizers, model architectures, and policy checkpoints for offline RL methods in robotic learning settings. We focus on a specific class of conservative offline RL algorithms [15, 2] that regularize the Q-function, but also show that our workflow can be effectively applied to policy constraint methods [16]. Our aim is not to focus on complete off-policy evaluation or to devise a new approach for off-policy evaluation, but rather to adopt a strategy similar to the one in supervised learning. Analogously to how supervised learning practitioners can detect overfitting and underfitting by tracking training and validation losses, and then adjust hyperparameters based on these metrics, our workflow (see Figure 1 for a schematic) first defines and characterizes overfitting and underfitting, proposes metrics and conditions that users can track to determine if an offline RL exhibits overfitting or underfitting, and then utilizes these metrics to inform design decisions pertaining to neural net architectures, regularization, and early stopping. This protocol is intended to act as a β€œuser’s manual” for a practitioner, with guidelines for how to modify algorithm parameters for best results without real-world evaluation rollouts. + +The primary contribution of this paper is a simple yet effective workflow for robotic offline RL. We propose metrics and protocols to assist practitioners in selecting policy checkpoints, regularization parameters, and model architectures for conservative offline RL algorithms such as CQL [2] and BRAC [16]. We empirically verify the efficacy of our proposed workflow on simulated robotic manipulation problems as well as three real-world robotic manipulation problems on two different robots, with diverse objects, pixel observations, and sparse binary reward supervision. Experimentally, we evaluate our method on two real-world robots (the Sawyer and WidowX robots), and one realistic simulated tasks. Our approach is effective in all of these cases, and on two tasks with the Sawyer robot that initially fail completely, our workflow improves the success rate to $70 \%$ . + +# 2 Preliminaries, Background, and Definitions + +The goal in RL is to optimize the infinite horizon discounted return $\begin{array} { r } { R = \sum _ { t = 0 } ^ { \infty } \gamma ^ { t } r ( \mathbf { s } _ { t } , \mathbf { a } _ { t } ) } \end{array}$ , where $r ( s , a )$ represents the reward function evaluated at a state-action pair $( \mathbf { s } , \mathbf { a } )$ . We operate in the offline RL setting and are provided with a fixed dataset $\mathcal { D } = \{ ( \mathbf { s } , \mathbf { a } , r ( \mathbf { s } , \mathbf { a } ) , \mathbf { s } ^ { \prime } ) \}$ , consisting of transition tuples obtained from rollouts under a behavior policy $\pi _ { \beta } ( \mathbf { a } | \mathbf { s } )$ . Our goal is to obtain the best possible policy by only training on this fixed offline dataset $\mathcal { D }$ , with no access to online rollouts. We focus on conservative offline RL algorithms that modify the $\mathrm { Q }$ -function to penalize distributional shift, with most experiments on CQL [2], though we also adapt our workflow to BRAC [16] in Appendix F.1. + +Conservative Q-learning (CQL). The actor-critic formulation of CQL trains a Q-function $Q _ { \boldsymbol { \theta } } ( \mathbf { s } , \mathbf { a } )$ with a separate policy $\pi _ { \phi } ( \mathbf { a } | \mathbf { s } )$ , which maximizes the expected $\mathrm { Q }$ -value $\begin{array} { r } { \mathbb { E } _ { \mathbf { s } \sim \mathcal { D } , \mathbf { a } \sim \pi _ { \phi } } \left[ Q _ { \theta } ( \mathbf { s } , \mathbf { a } ) \right] } \end{array}$ like other standard actor-critic deep RL methods [17, 18, 19]. However, in addition to the standard TD error ${ \mathcal { L } } _ { \mathrm { T D } } ( \theta )$ (in blue below), CQL applies a regularizer ${ \mathcal { R } } ( \theta )$ (in red below) to prevent overestimation of $\mathrm { Q }$ -values for out-of-distribution (OOD) actions. This term minimizes the $\mathrm { Q }$ -values under a distribution $\mu ( \mathbf { a } | \mathbf { s } )$ , which is automatically chosen to pick actions a with high Q-values $Q _ { \boldsymbol { \theta } } ( \mathbf { s } , \mathbf { a } )$ , and counterbalances this term by maximizing the values of the actions in the dataset: + +$$ +\begin{array} { r l } { \underset { \theta } { \mathrm { m i n } } \ : \ : \ : } & { { } \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : } \\ { \mathrm { m i n } \ : \ : \ : \ : \ : } & { { } \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : } & { \mathrm { ~ \ : ~ \ : \ : \ : \ : } \ : \ : \ : } \end{array} +$$ + +where $B ^ { \pi } \bar { Q } ( \mathbf { s } , \mathbf { a } )$ is the Bellman backup operator with a delayed target Q-function, $\bar { Q }$ : $B ^ { \pi } \bar { Q } ( { \bf s } , { \bf a } ) : =$ $r ( \mathbf { s } , \mathbf { a } ) + \gamma \mathbb { E } _ { \mathbf { a ^ { \prime } } \sim \pi ( \mathbf { a ^ { \prime } } | \mathbf { s ^ { \prime } } ) } [ \bar { Q } ( \mathbf { s ^ { \prime } } , \mathbf { a ^ { \prime } } ) ]$ . In practice, CQL computes $\mu ( \mathbf { a } | \mathbf { s } )$ using actions sampled from the policy $\pi _ { \phi } ( \mathbf { a } | \mathbf { s } )$ . More discussion of CQL is in Appendix B. In this paper, we will utilize CQL as a base algorithm that our workflow intends to tune, but we also extend it to BRAC. + +Overfitting and underfitting in CQL. Conservative offline RL algorithms [2, 20] like CQL can be sensitive to design choices, including number of gradient steps for training [21, 22] and network capacity. These challenges are also present in supervised learning, but supervised learning methods benefit from a simple and powerful workflow that involves using training error and validation error to characterize overfitting and underfitting. A practitioner can then make tuning choices based on these characterizations. To derive an analogous workflow for offline RL, we first ask: what do overfitting and underfitting actually mean for the case of conservative offline RL? + +To define overfitting and underfitting generically for any conservative offline RL method, we consider an abstract optimization formulation for such methods [2]: + +$$ +\pi ^ { * } : = \arg \operatorname* { m a x } _ { \pi } ~ J _ { \mathcal { D } } ( \pi ) - \alpha D ( \pi , \pi _ { \beta } ) +$$ + +(Conservative offline RL). + +$J _ { \mathcal { D } } ( \pi )$ denotes the average return of policy $\pi$ in the empirical MDP induced by the transitions in the offline dataset $\mathcal { D }$ , and $D ( \pi , \pi _ { \beta } )$ denotes a closeness constraint to the behavior policy, effectively applied by the offline RL method. Our definition of conservative offline RL requires that this + +Table 1: Summary of train error, test error and our definitions of overfitting and underfitting in supervised learning and conservative offline RL methods. We will propose metrics to measure these phenomena in a purely offline manner and recommend how to tune the underlying method accordingly. + +
QuantitySupervised LearningConservative Offline RL
Test errorLoss L evaluated on test data,DtestPerformance of policy,J(Ο€)
Train errorLoss L evaluated on train data,DtrainObjective in Equations 2,1
OverfittingL(Dtrain) low,L(Dval) high,Dval is a validation set drawn i.i.d.as DtrainTraining objective in Equation l is ex- tremely low,low value of J(Ο€)
Underfittinghigh value of train error L(Dtrain)Training objective in Equation 1 is ex- tremely high,low value of J(Ο€)
+ +divergence be computed in expectation over the state visitation distribution of the learned policy $\pi$ in the empirical MDP as discussed in Appendix F.1. For example, Equation 1 translates to utilizing $\begin{array} { r } { D _ { \mathrm { C Q L } } ( p , \bar { q } ) : = \sum _ { \mathbf { x } } p ( \mathbf { x } ) ( p ( \mathbf { x } ) / q ( \mathbf { x } ) - \bar { 1 } ) } \end{array}$ in Equation 2 (see Theorem 3.5 in Kumar et al. [2] for a proof). The training loss is discussed in Equations 1 and 2 and the test loss is equal to the negative of the actual return $J ( \pi )$ of the learned policy. Analogously to supervised learning, we can use the notion of train and test error to define overfitting and underfitting in offline RL, as discussed in Table 1. However, note that the conditions summarized in Table 1 are not measurable completely offline. Precisely estimating if a run of an offline RL method overfits or underfits requires evaluating the learned policy via interaction with the real-world environment. In Section 3, our goal will be to devise offline metrics for characterizing overfitting that do not have this requirement. We will tailor our study specifically towards CQL, though we extend it to BRAC in Appendix F.1. A similar procedure could be devised for other offline RL methods, but we leave this for future work. + +# 3 Detecting Overfitting and Underfitting in Conservative Offline RL + +In standard supervised learning, we can determine if a method overfits or underfits by comparing the training loss to the same loss function evaluated on a held-out validation dataset, which serves as a β€œproxy” test dataset. In contrast, the return of the learned policy $J ( \pi )$ in RL does not have a direct proxy that can be computed offline. Thus, our goal is to identify offline metrics and conditions that allow us to measure overfitting and underfitting in conservative offline RL, with a focus on CQL. We also adapt these conditions to BRAC [16], a policy-constraint method in Appendix F.2. + +Detecting overfitting in CQL. Our definition of overfitting (Table 1) corresponds to a low value for the training loss (Equation 1), but poor actual policy performance $J ( \pi )$ . To detect this, we analyze the time series of the estimated Q-values averaged over the dataset samples $( \mathbf { s } , \mathbf { a } , r , \mathbf { s } ^ { \prime } ) \in \mathcal { D }$ over the course of training with a large number of gradient steps. A run is labeled as overfitting if we see that the expected dataset Q-value exhibits a non-monotonic trend: if the average Q-values first increase and then decrease as shown in the figure on the right. Additionally, we would see that training loss in Equation 1 eventually becomes very low. Why do we see such a trend in the average dataset $\mathbf { Q }$ -value? Since CQL selectively penalizes the average Qvalue under the distribution $\mu ( \mathbf { a } | \mathbf { s } )$ supported on actions with large Q-values, we would expect the Q-values on states from the dataset s $\sim \mathcal { D }$ and the learned $\mathbf { a } \sim \pi ( \cdot | \mathbf { s } )$ to be small since the policy is trained to maximize the Q-function as well. This in turn would lead to an eventual reduction in the average Q-value on dataset actions, $\mathbb { E } _ { \mathbf { s } , \mathbf { a } \sim \mathcal { D } } [ Q _ { \theta } ( \mathbf { s } , \mathbf { a } ) ]$ . This would be visible after sufficiently many steps of training, when values have propagated via Bellman backups in Equation 1 giving rise to the non-monotonic trend. If such a trend is observed, this raises two questions, as we discuss next. + +![](images/a834916e9152742e6ff0bc3376e3cee94d030ab397761d3b7ccebf41e7204020.jpg) + +What does a low average $\varrho$ -value $\mathbb { E } _ { \mathbf { s } , \mathbf { a } \sim \mathcal { D } } [ Q _ { \theta } ( \mathbf { s } , \mathbf { a } ) ]$ imply about $J ( \pi )$ ? We show in Appendix A that, in principle, CQL training (Equation 1) should never learn Q-values smaller than the dataset Monte-Carlo return, and the $\mathrm { Q }$ -values should increase unless the learned policy $\pi$ is better than $\pi _ { \beta }$ . Intuitively, this is because the objective in Equation 1 aims to also maximize the average dataset + +Q-value and thus the Q-values for the behavior policy are not underestimated in expectation. Now, if the policy optimizer finds a policy that attains a smaller learned Q-value than the dataset return, the policy can always be updated further towards the behavior policy so as to raise the Q-value. Therefore, Q-values can only decrease when the policy found by CQL is better than the behavior policy. We formalize this intuition in Appendix A in Theorem A.1. Thus, a low $\mathrm { Q }$ -value on $( \mathbf { s } , \mathbf { a } ) \in$ $\mathcal { D }$ indicates that the Q-function predicts extremely small Q-values on actions sampled from $\mu ( \mathbf { a } | \mathbf { s } )$ . Typically, this would mean the highest Q-value actions a at a state $\mathbf { s } \in \mathcal { D }$ are those sampled from the offline dataset, drawn from the behavior policy. Thus, policy optimization, which aims to maximize the Q-value, would make $\pi ( \mathbf { a } | \mathbf { s } )$ closer to the behavior policy $\pi _ { \beta } ( \mathbf { a } | \mathbf { s } )$ on $\mathbf { s } \in \mathcal { D }$ . + +Which training checkpoint is likely to attain the best policy performance? Tracking overfitting in supervised learning is important for selecting the best-performing checkpoint, before overfitting becomes severe. Analogously, we can compare the average dataset Q-value across different checkpoints within the same run to pick the best policy. Since CQL aims to increase the average dataset Q-value (Equation 1), we would expect Q-values to initially increase, until learning starts to overfit and the average dataset $\mathrm { Q }$ -value starts decreasing. We should therefore select the latest checkpoint that corresponds to a peak in the estimated dataset Q-value. A visual illustration of this idea is shown in the figure on the previous page, where the checkpoint marked by the green line is recommended to be chosen. In summary, (a) to detect overfitting we can track: + +Metric 3.1 (Overfitting). A low average data $Q$ -value $\mathbb { E } _ { \mathbf { s } , \mathbf { a } \sim \mathcal { D } } [ Q _ { \theta } ( \mathbf { s } , \mathbf { a } ) ]$ that decreases with more gradient steps on Equation 1 indicates that the offline RL algorithm is overfitting. + +and (b) further, given a run that exhibits overfitting, our principle for policy selection is given by: + +Guideline 3.1 (Policy selection). If a run overfits (per Metric 3.1), select the checkpoint that attains the highest average dataset $Q$ -value before overfitting for deployment. + +Finally, for actor-critic algorithms [18] that update the actor slower than the critic, the next policy checkpoint after the peak in the average dataset Q-value appears must be selected. In most of our experiments, we find that simply utilizing the policy checkpoint at the point of the peak in the Qvalue also leads to good results making this a rare concern, but in some cases, utilizing the next checkpoint after the Q-value peak performs better empirically. + +Detecting underfitting in CQL. Next, we turn to devising a procedure to detect underfitting. As summarized in Table 1, underfitting occurs when the RL algorithm is unable to minimize the training objective in Equation 1 effectively. Therefore, large values for the TD error, the CQL regularizer, or both imply underfitting. A large value for the CQL regularizer, ${ \mathcal { R } } ( \theta )$ , indicates an overestimation of Q-values relative to their true value [2] and thus, unlike the overfitting regime, we would not expect the average learned $\mathrm { Q }$ -value to decrease with more training. Thus, one approach to predict underfitting is to track both the TD error, ${ \mathcal { L } } _ { \mathrm { T D } } ( \theta )$ , and the CQL regularizer, ${ \mathcal { R } } ( \theta )$ , and check if the value of even one of these quantities is large. More discussion is provided in Appendix A. + +![](images/4b1e144e763ab56eff446401eff0f116a6cc810b779d08af3c7a767883b102d8.jpg) + +How do we determine if the $\mathbf { \nabla } ^ { T D }$ error and the CQL regularizer are β€œlarge”? In order to determine if the error of a particular run is large, we can rerun the base CQL algorithm but with models of higher capacity, which does not necessarily correspond to the function approximator size, as we will discuss in Section 4. For each model, we record the corresponding training errors and check if the training TD error and CQL regularizer value are reduced with capacity increase. If increasing capacity leads to a reduction in the loss without exhibiting the overfitting signs described previously, then we are in an underfitting regime. Another approach to answer the question is to utilize the value of the TD error $\left( \mathcal { L } _ { \mathrm { T D } } ( \theta ) \right)$ and the task horizon $( 1 / ( 1 - \gamma ) )$ to estimate the overall error in the learned Q-values against the actual Q-value, which is equal to $\dot { \mathcal { L } } _ { \mathrm { T D } } ( \theta ) / ( 1 - \gamma )$ [23] (see Appendix A). If this overall error spans the range of allowed Q-values on the task – which could be inferred based on the structure of the reward function in the task – then we can say that the algorithm is underfitting. + +Metric 3.2 (Underfitting). Compute the values of the training $T D$ error, ${ \mathcal { L } } _ { \mathrm { T D } } ( \theta )$ and CQL regularizer, ${ \mathcal { R } } ( \theta )$ for the current run and another identical run with increased model capacity. If the training errors reduce with increasing model capacity, the original run was underfitting. + +# 4 Addressing Overfitting and Underfitting in Conservative Offline RL + +The typical workflow for supervised learning not only identifies overfitting and underfitting, but also guides the practitioner how to adjust their method so as to alleviate it (e.g., by modifying regularization or model capacity), thus improving performance. Can we devise similar guidelines to address overfitting and underfitting with conservative offline RL? Here, we discuss some ways to adjust regularization and model capacity to alleviate these phenomena. + +Capacity-decreasing regularization for overfitting. As we observed in Section 3, the mechanism behind extremely low $\mathbf { Q }$ -values on the dataset is that CQL training minimizes $\mathrm { Q }$ -values on actions sampled from $\mu ( \mathbf { a } | \mathbf { s } )$ . Two possible approaches to preventing over-minimization of these values are (1) applying regularization such as dropout [24] on Q-function layers, similar to supervised learning, and (2) enforcing that representations of the learned Q-function match a pre-specified target for all state-action tuples. For (2), we can apply techniques such as a variational information bottleneck (VIB) [25, 26] regularizer on the learned representations, $\phi ( \mathbf { s } )$ . Formally, let $( \mathbf { s } , \mathbf { a } )$ denote a stateaction pair. Instead of predicting a deterministic $\phi ( \mathbf { s } ) \in \mathbb { R } ^ { d }$ (Figure 10), we modify the Q-network to predict two distinct vectors, $\phi _ { m } ( \mathbf { s } ) \in \mathbb { R } ^ { d }$ and $\phi _ { \Sigma } ( \mathbf { s } ) \in \mathbb { R } ^ { d }$ , and sample $\phi ( \mathbf { s } )$ randomly from a Gaussian centered at $\phi _ { m }$ with covariance $\phi _ { \Sigma }$ , i.e., $\phi ( \mathbf { s } ) \sim \mathcal { N } ( \phi _ { m } ( \mathbf { s } ) , \mathrm { d i a g } ( \phi _ { \Sigma } ( \mathbf { s } ) )$ . VIB then regularizes $\mathcal { N } ( \phi _ { m } ( \mathbf { s } ) , \mathrm { d i a g } ( \phi _ { \Sigma } ( \mathbf { s } ) )$ to be close to a prior distribution, $\mathcal { N } ( 0 , \mathbb { I } )$ : + +$$ +\operatorname* { m i n } _ { \theta } \ \mathcal { L } _ { \mathrm { C Q L } } ( \theta ) + \beta \mathbb { E } _ { \mathrm { s } \sim \mathcal { D } } \left[ \mathrm { D } _ { \mathrm { K L } } \left( \mathcal { N } ( \phi _ { m } ( \mathbf { s } ) , \mathrm { d i a g } ( \phi _ { \Sigma } ( \mathbf { s } ) ) ) \ | | \mathcal { N } ( 0 , \mathbb { I } ) \right) \right] \quad ( \mathrm { V I B ~ r e g u l a r i z e r } ) , +$$ + +Guideline 4.1. To address overfitting, we recommend using some form of capacity-decreasing regularization on the $Q$ -function, such as dropout or the VIB regularizer shown in Equation 3. + +Capacity-increasing techniques for underfitting. To address underfitting, we need to increase model capacity to improve optimization of the training objective. Analogous to supervised learning, model capacity can be increased by using more expressive neural nets (e.g., ResNets [27], transformers [28]) for representing the learned policy. We use ResNets in our experiments (Figure 10). However, the RL setting presents an additional challenge with capacity: while larger models in principle have more capacity, recent work [29, 21, 22] has shown that utilizing larger networks to represent Q-functions does not always improve its capacity in practice, because TD-based RL methods introduce an β€œimplicit under-parameterization” effect that can result in aliased (i.e., similar) internal representations for different state-action inputs, even for very large neural networks that can express the true Q-function effectively. To address this issue, these works apply a β€œcapacityincreasing” regularizer to Q-function training. For instance, we can use the DR3 regularizer [22], which penalizes the dot product of $\phi ( \mathbf { s } )$ and $\phi ( \mathbf { s } ^ { \prime } )$ for a transition $( \mathbf { s } , \mathbf { a } , \mathbf { s } ^ { \prime } ) \in \mathcal { D }$ , and hence reduces aliasing. This objective is given by: + +$$ +\operatorname* { m i n } _ { \theta } \ \mathcal { L } _ { \mathrm { C Q L } } ( \theta ) + \beta \mathbb { E } _ { { \mathbf s } , { \mathbf a } , { \mathbf s } ^ { \prime } \sim \mathcal { D } } \left[ \left| \phi ( { \mathbf s } ) ^ { \top } \phi ( { \mathbf s } ^ { \prime } ) \right| \right] \qquad ( { \mathrm { D R 3 ~ r e g u l a r i z e r ~ } } [ 2 2 ] ) , +$$ + +Guideline 4.2. To address underfitting, we recommend using some capacity-increasing regularization on the Q-function and the policy either in conjunction or separately. Examples: (1) bigger policy networks (e.g., ResNets), (2) DR3 regularizer on the Q-network. + +# 5 Evaluation of Our Workflow Metrics and Protocols in Simulation + +Next, we empirically validate the workflow proposed in Sections 3 and 4 on a suite of simulated robotic manipulation domains that mimic real-robot scenarios, from image observations with sparse binary rewards. We will examine how applying the workflow in Section 3 to detect overfitting or underfitting and then utilizing the strategies in Section 4 affects the performance of offline RL methods. An improved performance would indicate the efficacy of our workflow in making successful design decisions without any online tuning. + +![](images/feadf98282550c14dd6939d9b1aaebdb526316856a1034070ef48af1c82a1480.jpg) + +Experimental setup. We use the environments from Singh et al. [3] to design offline RL tasks and datasets that we use for our empirical analysis. We consider two tasks: (1) a pick and place task and (2) a grasping object from a drawer task. Examples of trajectories in both of these simulated domains are shown in Figure 2 and are detailed in Appendix D. Briefly, the pick and place task consists of a 6-DoF WidowX robot in front of a tray with an object. The goal is to put the object inside the tray. A non-zero reward of $+ 1$ is provided only when the object has been placed in the box. The offline dataset for this task consists of trajectories that grasp an object with a $3 5 \%$ success and other trajectories that place an object with a $40 \%$ success. Our second task is a grasping from drawer task where the WidowX robot is placed in front of a drawer and multiple objects. The robot can open or close the drawer, grasp objects from inside the drawer or on the table, and place them anywhere in the scene. The goal is to close the top drawer, then open the bottom drawer and take the object out. Only if the object has been taken out, a reward of $+ 1$ is obtained. The offline dataset consists of trajectories with a $3 0 { - } 4 0 \%$ success rate for opening and closing a drawer and other trajectories with only $40 \%$ placing success. We use $\alpha = 1 . 0$ for CQL training in all experiments, which is directly taken from prior work [3], without any tuning. However, too low or too high $\alpha$ values will inhibit the effectiveness of regular CQL and we first need to tune $\alpha$ as discussed in Appendix G. More details are provided in Appendix D. + +![](images/7149b62299170ecd7a26a8a110d939daa58337b186bfd1671932f83c2c649963.jpg) +Figure 3: Policy performance (Top) and average dataset Q-values of CQL (bottom) with varying number of trajectories. Vertical bands indicate regions around the peak in average $\mathrm { Q }$ -value and observe that these regions correspond to policies with good actual performance. + +Scenario #1: Variable amount of training data. Our first scenario consists of the simulated tasks discussed above with a variable number of trajectories in the training data (50, 100, 500, 10000). We run CQL and track metrics 3.1 and 3.2 in each case. Observe in Figure 3 (bottom) that with fewer trajectories, the average dataset Q-value $\mathbb { E } _ { \mathbf { s } , \mathbf { a } \sim \mathcal { D } } [ Q _ { \theta } ( \mathbf { s } , \mathbf { a } ) ]$ first rises, and then drops. This matches the description of overfitting in Section 3. Observe in Figure 4 (left) that, at the same time, the value of the CQL regularizer is very low, which is not consistent with what we expect of underfitting. Thus, we can conclude that these conditions exhibit overfitting, especially with 50 and 100 trajectories. The vertical dashed lines indicate the checkpoints that would be selected for evaluation per Guideline 3.1. We further visualize the performance of the chosen checkpoints against the actual return of each intermediate policy in Figure 3 (top). Note that this value is obtained by rolling out the learned policy, and would not be available in a realistic offline RL setting, but is provided only for analysis. Selecting the checkpoint based on Guideline 3.1 leads us to select a model with close to the peak performance over the training process, validating the efficacy of Guideline 3.1. + +Since we detected overfitting by following our workflow, we now aim to address it by using the VIB regularizer in the setting with 100 trajectories. As shown in Figure 4 (right), applying this regularizer not only alleviates the drop in Q-values after many training steps, but allows us to pick later checkpoints in training which perform better than base CQL on both the tasks. This validates that overfitting, as detected via our workflow, can be effectively mitigated by decreasing capacity, in this case by using VIB. We evaluate dropout, $\ell _ { 1 }$ and $\ell _ { 2 }$ regularization schemes in Appendix J. + +![](images/c8b831ad68078d018a8733761f0da0593a9224d909869ecb852f0ffd8ee06f24.jpg) +Figure 4: Left: CQL regularizer attains low values, especially with 50 and 100 trajectories in the pick and place task, Right: Using VIB mitigates overfitting, giving rise to a stable trend in $\mathrm { Q }$ -values and better performance which does not degrade with more training steps. + +Scenario #2: Multiple training objects. Our second test scenario consists of the pick and place task, modified to include a variable number of object types (1, 5, 10, 20, 35). Handling more objects requires higher capacity, since each object has a different shape and appearance. In each case, CQL is provided with 5000 trajectories. Following our workflow from Section 3, we first compute the average dataset Q-value and the training TD error. We observe in Figure 5 that, unlike in Scenario #1, Q-values do not generally decrease when trained for many steps, suggesting that the Q-function is likely not overfitting. To check for underfitting, we visualize the training TD error and find that, with 10, 20 and 35 objects, TD error magnitudes are in the range of [1.0, 2.0], which suggests a overall Q-value error of [30.0, 60.0] since the task horizon is 30. On an absolute scale, this error magnitude is large: since the rewards are $_ { 0 / 1 }$ , the range of difference between actual Q-values for any two policies is at most 30, which suggests that the error magnitude in the runs in Figure 5 are high. Hence, we conclude that this scenario generally exhibits underfitting with more objects. Indeed this trend is reflected in the policy performance that we plot for analysis in Figure 5: note that the policy return decreases with an increased number of objects, and the policy performance initially increases and saturates at a suboptimal value. + +![](images/abb86c89607619cd53397449a92fc47f01701017a73f7df2599b8c654f14d0ca.jpg) +Figure 5: Performance (left), TD error (middle) and average dataset Qvalues (right) for the pick and place task with a variable number of objects. Note that while the learned Q-values increase and stabilize, the TD error values in scenarios with more than 10 objects are large (1.0-2.0). Correspondingly, the performance generally decreases as the number of objects increases. + +![](images/b4ce278afd05bdaaa57ddb722d6767ecad2d352a23720d7d7f144183cc608616.jpg) +Figure 6: Correcting underfitting by applying our workflow for 35 objects. + +To address underfitting in the multi-object case, we apply the proposed capacity-increasing measures to the 35-object task (results for 10 and 20 object settings are in Appendix I). We use a more expressive ResNet architecture for the policy and the DR3 regularizer for the Q-function together. Observe in the figure on the right that this combination (shown in red) improves policy performance in this setting (compared to green), which validates our workflow protocol for addressing underfitting. + +# 6 Tuning CQL for Real-World Robotic Manipulation + +Having evaluated the efficacy of our proposed workflow in simulation, we now utilize our workflow to tune CQL for real-world robotic manipulation. We test in two setups that require the robot to learn from sparse binary rewards and image observations. The settings differ in robot platform, task specification, and dataset size. Additional results and robot videos are at the following website: https://sites. google.com/view/offline-rl-workflow + +Sawyer manipulation tasks [30]. First, we train a + +![](images/566e1138148ac542d1676690513ab2bab4af3cbfec1257dc70a864019d00681c.jpg) +Figure 7: Real-world tasks. Successful rollouts of CQL tuned with our workflow from Sections 3 & 4. Top to bottom: Sawyer lid on pot, Sawyer drawer opening, WidowX pick-place task. + +Sawyer robot in a tabletop setting to perform two tasks: (1) placing the lid onto a pot and (2) opening a drawer. The robot must perform these tasks in the presence of visual distractor objects, as shown in Figure 7. We directly use the dataset of 100 trajectories for each task collected by Khazatsky et al. [30] for our experiments so as to mimic the real-world use case of leveraging existing data with offline RL. We use four-dimensional actions with 3D end-effector velocity control in xyz-space and 1D gripper open/close action. More details regarding the setup are provided in Appendix D. + +We run default CQL on these tasks and track the average Q-value, TD error, and CQL regularizer value. As shown in Figure 8, the average Q-value does not decrease over training, and the TD error (and CQL regularizer shown in Appendix E.2) is large. Per our discussion in Section 3, this indicates underfitting. Following our guidelines from Section 4, we utilize a more expressive ResNet policy (Figure 10), which increases the number of total convolutional layers from 3 to 9. We observe that this reduces the values of both the TD error Figure 8 and CQL regularizer (Appendix E.2) on both tasks. We + +![](images/d5317090e1ad32bc153c1daf1e4d3802dfab85d171f13b7f15890473be1358cc.jpg) +Figure 8: Average Q-value and TD error on Sawyer tasks as model capacity increases. Q-values increase over training with lower capacity ruling out overfitting and increasing model capacity leads to a reduction in TD error indicating the presence of underfitting. + +then evaluate the learned policy over 12 trials conducted with different sets of distractor objects, including ones that are unseen during training. While the policy trained using base CQL is unable to successfully complete either task even once attaining a score of 0/12 on both tasks, the run that uses ResNet attains a significantly better success rate of 9/12 on the put lid on pot task and 8/12 on the drawer opening task, equal to $7 0 . 8 \%$ success rate on average. + +![](images/b9dc053e0a74e8660fec33a5ef35e25d9ae5b1e03b4824f6ff4428e483bb79a7.jpg) +Figure 9: Q-values (left) and performance of CQL with (middle) and without (right) the variational information bottleneck correction for overfitting on the real-world widowX pick and place task. Since the Q-values start to decrease with more training, our workflow detects that CQL is overfitting. Using our policy selection guideline (Guideline 3.1) enables us to choose checkpoint 50 marked with the green vertical dashed line (right) which performs well. Further, addressing overfitting by applying the VIB regularizer stabilizes the Q-values (brown) which do not decrease unlike base CQL (blue) (left). Finally, applying the VIB regularizer improves performance and reduces sensitivity to policy selection (middle). + +WidowX pick and place task. In our second setting, we tune CQL on a pick and place task with a WidowX 250 robotic arm, shown in Figure 7. The dataset consists of 200 trajectories collected by running a noisy scripted policy (Appendix D) with $3 5 \%$ success. We run CQL on this task and track the average Q-values, which we find initially increase and then decrease (Figure 9 (left; labeled as β€œQ-values”)), indicating overfitting. We then evaluate our policy selection scheme, which in this case suggests deploying checkpoint 50, the immediate checkpoint after the peak in Q-values. To see if this checkpoint is effective, we evaluate the performance of a few other policy checkpoints (for analysis only) and plot this performance trend in Figure 9 (right) as a dashed line. Observe that indeed the checkpoint found by our workflow attains the highest success rate (7/9) compared to other checkpoints, which only succeed $\leq 4 / 9$ times. + +Since overfitting is detected, we now turn to addressing overfitting by adding the VIB regularizer (Equation 3) during training. As shown in Figure 9 (left), the Q-values obtained after the addition of this regularizer (shown in brown; labeled β€œQ-values (VIB)”) are now stable and do not decrease over the course of training and so we can choose any policy for evaluation. We evaluate multiple policies, for visualization pur + +
Real-world WidowX pick and place
MethodEpoch5075100200
CQL7/94/94/92/9
CQL + VIB3/98/97/97/9
+ +Table 2: Performance of various policy checkpoints of CQL and $\mathrm { C Q L + V I B }$ on the real WidowX pick and place task (bold entry denotes the checkpoint selected by our workflow). Note that when overfitting is corrected via VIB, multiple checkpoints perform well. + +poses only, in Figure 9 (middle), we find that all of them attain a $\geq 7 / 9$ success, comparable or better than the base CQL algorithm (Figure 9 (right)). This indicates that addressing overfitting not only leads to some gains in performance but also greatly simplifies policy selection as all checkpoints perform similarly and well. Table 2 summarizes these results below, where the bold entries denote the checkpoints found by our policy selection rule. These results indicate the effectiveness of our workflow in tuning CQL by addressing overfitting and underfitting on multiple real robot platforms. + +# 7 Discussion + +While offline RL algorithms have improved significantly, applying these methods to real-world robotic domains is still challenging due to little guidance on tuning them. In this paper, we devise a workflow for algorithms such as CQL and BRAC, which consists of a set of metrics and conditions that can be tracked by a practitioner over the course of offline training to detect overfitting and underfitting, and recommendations to addresses the observed challenges. Applying our workflow both in simulation and the real world shows strong performance benefits. While our proposed workflow is an initial step towards practical robotic offline RL and is based on our best conceptual understanding of certain offline RL algorithms, these guidelines are heuristic. To some extent this is unavoidable, since a workflow is a set of guidelines and recommendations, rather than a rigid algorithm. Regardless of how theoretically justified it is, in the end, its value is determined by its ability to produce good results. We believe the breadth of tasks considered, which consist of two different real robots and multiple simulated tasks, indicates its broad applicability. However, deriving theoretical guarantees regarding workflows of this type is an important direction for future research. + +# Acknowledgements + +We thank Ilya Kostrikov, Avi Singh, Ashvin Nair, Alexander Khazatsky, Albert Yu, Jedrzej Orbik, and Jonathan Yang for their help with setting up and debugging various aspects of the experimental setup as well as for providing us with offline datasets we could test our workflow on. We thank Dibya Ghosh, anonymous reviewers, and the area chair from CoRL for constructive feedback on an earlier version of this paper. AK thanks George Tucker and Rishabh Agarwal for valuable discussions. This research was funded by the DARPA Assued Autonomy Program and compute support from Google and Microsoft Azure. + +# References + +[1] D. Kalashnikov, J. Varley, Y. Chebotar, B. Swanson, R. Jonschkowski, C. Finn, S. Levine, and K. Hausman. Mt-opt: Continuous multi-task robotic reinforcement learning at scale. arXiv preprint arXiv:2104.08212, 2021. +[2] A. Kumar, A. Zhou, G. Tucker, and S. Levine. Conservative q-learning for offline reinforcement learning. arXiv preprint arXiv:2006.04779, 2020. +[3] A. Singh, A. Yu, J. Yang, J. Zhang, A. Kumar, and S. Levine. Cog: Connecting new skills to past experience with offline reinforcement learning. arXiv preprint arXiv:2010.14500, 2020. +[4] Y. Chebotar, K. Hausman, Y. Lu, T. Xiao, D. Kalashnikov, J. Varley, A. Irpan, B. Eysenbach, R. Julian, C. Finn, and S. Levine. Actionable models: Unsupervised offline reinforcement learning of robotic skills. arXiv preprint arXiv:2104.07749, 2021. +[5] D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan, V. Vanhoucke, et al. Scalable deep reinforcement learning for vision-based robotic manipulation. In Conference on Robot Learning, pages 651–673. PMLR, 2018. +[6] D. Precup. Eligibility traces for off-policy policy evaluation. Computer Science Department Faculty Publication Series, page 80, 2000. +[7] I. Kostrikov and O. Nachum. Statistical bootstrapping for uncertainty estimation in off-policy evaluation. arXiv preprint arXiv:2007.13609, 2020. +[8] C. Paduraru. Off-policy evaluation in Markov decision processes. PhD thesis, Ph. D. Dissertation. McGill University, 2012. +[9] T. L. Paine, C. Paduraru, A. Michi, C. Gulcehre, K. Zolna, A. Novikov, Z. Wang, and N. de Freitas. Hyperparameter selection for offline reinforcement learning. arXiv preprint arXiv:2007.09055, 2020. +[10] O. Nachum and B. Dai. Reinforcement learning via fenchel-rockafellar duality. arXiv preprint arXiv:2001.01866, 2020. +[11] P. Thomas, G. Theocharous, and M. Ghavamzadeh. High confidence policy improvement. In International Conference on Machine Learning, pages 2380–2388, 2015. +[12] P. S. Thomas, G. Theocharous, and M. Ghavamzadeh. High-confidence off-policy evaluation. In Twenty-Ninth AAAI Conference on Artificial Intelligence, 2015. +[13] N. Jiang and L. Li. Doubly robust off-policy value evaluation for reinforcement learning. arXiv preprint arXiv:1511.03722, 2015. +[14] J. Fu, M. Norouzi, O. Nachum, G. Tucker, ziyu wang, A. Novikov, M. Yang, M. R. Zhang, Y. Chen, A. Kumar, C. Paduraru, S. Levine, and T. Paine. Benchmarks for deep off-policy evaluation. In International Conference on Learning Representations, 2021. URL https: //openreview.net/forum?id=kWSeGEeHvF8. +[15] S. Levine, A. Kumar, G. Tucker, and J. Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020. +[16] Y. Wu, G. Tucker, and O. Nachum. Behavior regularized offline reinforcement learning. arXiv preprint arXiv:1911.11361, 2019. +[17] T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015. +[18] S. Fujimoto, H. Van Hoof, and D. Meger. Addressing function approximation error in actorcritic methods. arXiv preprint arXiv:1802.09477, 2018. +[19] T. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018. +[20] I. Kostrikov, J. Tompson, R. Fergus, and O. Nachum. Offline reinforcement learning with fisher divergence critic regularization. arXiv preprint arXiv:2103.08050, 2021. +[21] A. Kumar, R. Agarwal, D. Ghosh, and S. Levine. Implicit under-parameterization inhibits data-efficient deep reinforcement learning. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=O9bnihsFfXU. +[22] A. Kumar, R. Agarwal, A. Courville, T. Ma, G. Tucker, and S. Levine. Value-based deep reinforcement learning requires explicit regularization. In RL for Real Life Workshop & Overparameterization: Pitfalls and Opportunities Workshop, ICML, 2021. URL https: //drive.google.com/file/d/1Fg43H5oagQp-ksjpWBf_aDYEzAFMVJm6/view. +[23] R. Munos. Error bounds for approximate policy iteration. In Proceedings of the Twentieth International Conference on International Conference on Machine Learning, ICML’03, page 560–567. AAAI Press, 2003. ISBN 1577351894. +[24] N. Srivastava, G. Hinton, A. Krizhevsky, I. Sutskever, and R. Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15 (1):1929–1958, 2014. +[25] A. A. Alemi, I. Fischer, J. V. Dillon, and K. Murphy. Deep variational information bottleneck. arXiv preprint arXiv:1612.00410, 2016. +[26] A. Achille and S. Soatto. Emergence of invariance and disentanglement in deep representations. The Journal of Machine Learning Research, 19(1):1947–1980, 2018. +[27] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770–778, 2016. +[28] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin. Attention is all you need. arXiv preprint arXiv:1706.03762, 2017. +[29] D. Ghosh and M. G. Bellemare. Representations for stable off-policy reinforcement learning. arXiv preprint arXiv:2007.05520, 2020. +[30] A. Khazatsky, A. Nair, D. Jing, and S. Levine. What can i do here? learning new skills by imagining visual affordances. arXiv preprint arXiv:2106.00671, 2021. +[31] D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan, V. Vanhoucke, et al. Scalable deep reinforcement learning for vision-based robotic manipulation. In Conference on Robot Learning, pages 651–673, 2018. +[32] A. Zeng, S. Song, S. Welker, J. Lee, A. Rodriguez, and T. Funkhouser. Learning synergies between pushing and grasping with self-supervised deep reinforcement learning. 2018. +[33] OpenAI. Learning dexterous in-hand manipulation. In arXiv preprint arXiv:1808.00177, 2018. +[34] H. van Hoof, T. Hermans, G. Neumann, and J. Peters. Learning robot in-hand manipulation with tactile features. 2015. +[35] A. Rajeswaran, V. Kumar, A. Gupta, G. Vezzani, J. Schulman, E. Todorov, and S. Levine. Learning complex dexterous manipulation with deep reinforcement learning and demonstrations. In RSS, 2018. +[36] V. Kumar, A. Gupta, E. Todorov, and S. Levine. Learning dexterous manipulation policies from experience and imitation. CoRR, abs/1611.05095, 2016. +[37] C. Schenck and D. Fox. Visual closed-loop control for pouring liquids. In International Conference on Robotics and Automation (ICRA), 2017. +[38] A. Yahya, A. Li, M. Kalakrishnan, Y. Chebotar, and S. Levine. Collective robot reinforcement learning with distributed asynchronous guided policy search. In IROS, 2017. +[39] J. Matas, S. James, and A. J. Davison. Sim-to-real reinforcement learning for deformable object manipulation. In Conference on Robot Learning (CoRL), 2018. +[40] R. Julian, B. Swanson, G. S. Sukhatme, S. Levine, C. Finn, and K. Hausman. Efficient adaptation for end-to-end vision-based robotic manipulation. arXiv arXiv:2004.10190, 2020. +[41] S. Cabi, S. G. Colmenarejo, A. Novikov, K. Konyushkova, S. Reed, R. Jeong, K. ZoΕ‚na, Y. Ay- Λ™ tar, D. Budden, M. Vecerik, et al. A framework for data-driven robotics. arXiv preprint arXiv:1909.12200, 2019. +[42] C. Finn and S. Levine. Deep visual foresight for planning robot motion. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 2786–2793. IEEE, 2017. +[43] F. Ebert, C. Finn, S. Dasari, A. Xie, A. Lee, and S. Levine. Visual foresight: Model-based deep reinforcement learning for vision-based robotic control. arXiv preprint arXiv:1812.00568, 2018. +[44] A. Xie, F. Ebert, S. Levine, and C. Finn. Improvisation through physical understanding: Using novel objects as tools with visual foresight. Robotics: Science and Systems (RSS), 2019. +[45] Y. Hristov, A. Lascarides, and S. Ramamoorthy. Interpretable latent spaces for learning from demonstration. arXiv preprint arXiv:1807.06583, 2018. +[46] S. Tian, S. Nair, F. Ebert, S. Dasari, B. Eysenbach, C. Finn, and S. Levine. Model-based visual planning with self-supervised functional distances. arXiv preprint arXiv:2012.15373, 2020. +[47] S. Young, D. Gandhi, S. Tulsiani, A. Gupta, P. Abbeel, and L. Pinto. Visual imitation made easy. arXiv e-prints, pages arXiv–2008, 2020. +[48] E. Johns. Coarse-to-fine imitation learning: Robot manipulation from a single demonstration. arXiv preprint arXiv:2105.06411, 2021. +[49] A. Mandlekar, F. Ramos, B. Boots, S. Savarese, L. Fei-Fei, A. Garg, and D. Fox. Iris: Implicit reinforcement without interaction at scale for learning control from offline robot manipulation data. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 4414–4420. IEEE, 2020. +[50] A. Mandlekar, D. Xu, R. MartΒ΄Δ±n-MartΒ΄Δ±n, S. Savarese, and L. Fei-Fei. Learning to generalize across long-horizon tasks from human demonstrations, 2020. +[51] S. Lange, T. Gabel, and M. Riedmiller. Batch reinforcement learning. In Reinforcement learning, pages 45–73. Springer, 2012. +[52] S. Fujimoto, D. Meger, and D. Precup. Off-policy deep reinforcement learning without exploration. arXiv preprint arXiv:1812.02900, 2018. +[53] A. Kumar, J. Fu, M. Soh, G. Tucker, and S. Levine. Stabilizing off-policy q-learning via bootstrapping error reduction. In Advances in Neural Information Processing Systems, pages 11761–11771, 2019. +[54] X. B. Peng, A. Kumar, G. Zhang, and S. Levine. Advantage-weighted regression: Simple and scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019. +[55] N. Jaques, A. Ghandeharioun, J. H. Shen, C. Ferguson, A. Lapedriza, N. Jones, S. Gu, and R. Picard. Way off-policy batch deep reinforcement learning of implicit human preferences in dialog. arXiv preprint arXiv:1907.00456, 2019. +[56] A. Nair, M. Dalal, A. Gupta, and S. Levine. Accelerating online reinforcement learning with offline datasets. arXiv preprint arXiv:2006.09359, 2020. +[57] R. Fakoor, J. Mueller, P. Chaudhari, and A. J. Smola. Continuous doubly constrained batch reinforcement learning. arXiv preprint arXiv:2102.09225, 2021. +[58] T. Yu, G. Thomas, L. Yu, S. Ermon, J. Zou, S. Levine, C. Finn, and T. Ma. Mopo: Model-based offline policy optimization. arXiv preprint arXiv:2005.13239, 2020. +[59] R. Kidambi, A. Rajeswaran, P. Netrapalli, and T. Joachims. Morel: Model-based offline reinforcement learning. arXiv preprint arXiv:2005.05951, 2020. +[60] R. Rafailov, T. Yu, A. Rajeswaran, and C. Finn. Offline reinforcement learning from images with latent space models. Learning for Decision Making and Control (L4DC), 2021. +[61] D. Precup, R. S. Sutton, and S. Dasgupta. Off-policy temporal-difference learning with function approximation. In ICML, pages 417–424, 2001. +[62] C. Voloshin, H. M. Le, N. Jiang, and Y. Yue. Empirical study of off-policy policy evaluation for reinforcement learning. arXiv preprint arXiv:1911.06854, 2019. +[63] O. Nachum, Y. Chow, B. Dai, and L. Li. Dualdice: Behavior-agnostic estimation of discounted stationary distribution corrections. In Advances in Neural Information Processing Systems, pages 2315–2325, 2019. +[64] R. Qin, S. Gao, X. Zhang, Z. Xu, S. Huang, Z. Li, W. Zhang, and Y. Yu. Neorl: A near real-world benchmark for offline reinforcement learning. arXiv preprint arXiv:2102.00714, 2021. +[65] T. Haarnoja, H. Tang, P. Abbeel, and S. Levine. Reinforcement learning with deep energybased policies. In International Conference on Machine Learning (ICML), 2017. +[66] X. B. Peng, A. Kumar, G. Zhang, and S. Levine. Advantage-weighted regression: Simple and scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019. +[67] S. Fujimoto and S. S. Gu. A minimalist approach to offline reinforcement learning. arXiv preprint arXiv:2106.06860, 2021. \ No newline at end of file diff --git a/md/train/wK2fDDJ5VcF/wK2fDDJ5VcF.md b/md/train/wK2fDDJ5VcF/wK2fDDJ5VcF.md new file mode 100644 index 0000000000000000000000000000000000000000..2cb4b74de683c55101ff90c3a8910dd4e5da079c --- /dev/null +++ b/md/train/wK2fDDJ5VcF/wK2fDDJ5VcF.md @@ -0,0 +1,163 @@ +# Learning to Walk in Minutes Using Massively Parallel Deep Reinforcement Learning + +Nikita Rudin ETH Zurich and NVIDIA rudinn@ethz.ch + +David Hoeller ETH Zurich and NVIDIA dhoeller@ethz.ch + +Philipp Reist NVIDIA preist@nvidia.com + +Marco Hutter ETH Zurich mahutter@ethz.com + +Abstract: In this work, we present and study a training set-up that achieves fast policy generation for real-world robotic tasks by using massive parallelism on a single workstation GPU. We analyze and discuss the impact of different training algorithm components in the massively parallel regime on the final policy performance and training times. In addition, we present a novel game-inspired curriculum that is well suited for training with thousands of simulated robots in parallel. We evaluate the approach by training the quadrupedal robot ANYmal to walk on challenging terrain. The parallel approach allows training policies for flat terrain in under four minutes, and in twenty minutes for uneven terrain. This represents a speedup of multiple orders of magnitude compared to previous work. Finally, we transfer the policies to the real robot to validate the approach. We open-source our training code to help accelerate further research in the field of learned legged locomotion: https://leggedrobotics.github.io/legged_gym/. + +Keywords: Reinforcement Learning, Legged Robots, Sim-to-real + +![](images/b6480bd30beb6d1b9762af85125a32c3c8067d4548699e684f5c674891d50b92.jpg) +Figure 1: Thousands of robots learning to walk in simulation. + +# 1 Introduction + +Deep reinforcement learning (DRL) is proving to be a powerful tool for robotics. Tasks such as legged locomotion [1], manipulation [2], and navigation [3], have been solved using these new tools, and research continues to keep adding more and more challenging tasks to the list. The amount of data required to train a policy increases with the task complexity. For this reason, most work focuses on training in simulation before transferring to real robots. We have reached a point where multiple days or even weeks are needed to fully train an agent with current simulators. For example, OpenAI’s block reorientation task was trained for up to 14 days and their Rubik’s cube solving policy took several months to train [4]. The problem is exacerbated by the fact that deep reinforcement learning requires hyper-parameter tuning to obtain a suitable solution which requires sequentially rerunning time-consuming training. Reducing training times using massively parallel approaches such as presented here can therefore help improve the quality and time-to-deployment of DRL policies, as a training setup can be iterated on more often in the same time frame. + +In this paper, we examine the effects of massive parallelism for on-policy DRL algorithms and present considerations in how the standard RL formulation and the most commonly used hyperparameters should be adapted to learn efficiently in the highly parallel regime. Additionally, we present a novel game-inspired curriculum which automatically adapts the task difficulty to the performance of the policy. The proposed curriculum architecture is straightforward to implement, does not require tuning, and is well suited for the massively parallel regime. Common robotic simulators such as Mujoco [5], Bullet [6], or Raisim [7] feature efficient multi-body dynamics implementations. However, they have been developed to run on CPUs with only a reduced amount of parallelism. In this work, we use NVIDIA’s Isaac Gym simulation environment [8], which runs both the simulation and training on the GPU and is capable of simulating thousands of robots in parallel. + +The massively parallel training regime has been explored before [4, 9] in the context of distributed systems with a network of thousands of CPUs each running a separate instance of the simulation. The parallelization was achieved by averaging the gradients between the different workers without reducing the number of samples provided by each agent. This results in large batch sizes of millions of samples for each policy update which improves the learning dynamics, but does not optimize the overall training time. In parallel, recent works have aimed to increase the simulation throughput and reduce training times of standard DRL benchmark tasks. A framework combining parallel simulation with multi-GPU training [10] was proposed to achieve fast training using hundreds of parallel agents. In the context of visual navigation, large batch simulation has been used to increase the training throughput [11]. Furthermore, GPU accelerated physics simulation has been shown to significantly improve the training time of the Humanoid running task [12]. A differentiable simulator running on Google’s TPUs has also been shown to greatly accelerate the training of multiple tasks [13]. We build upon [10, 12] by pushing the parallelization further, optimizing the training algorithm, and applying the approach to a challenging real-world robotics task. + +Perceptive and dynamic locomotion for legged robots in unstructured environments is a demanding task that, until recently, had only been partially demonstrated with complex model-based approaches [14, 15]. Learning-based approaches are emerging as a promising alternative. For quadrupeds, DRL has been used to train blind policies robust to highly uneven ground [16] (12 hours of training). Perceptive locomotion over challenging terrain has been achieved by combining learning with optimal control techniques [17, 18] (82 and 88 hours of training) and recently, a fully learned approach has shown great robustness in this setting [19] (120 hours of training). Similarly, bipedal robots have also been trained to walk blindly on stairs [20] (training time not reported). With our approach we can train a perceptive policy in under 20 minutes on a single GPU, with the complexity of simto-real transfer to the hardware, which increases the performance and robustness requirements and provides clear validation of the overall approach. Training such behaviors in minutes opens up new exciting possibilities ranging from automatic tuning to customized training using scans of particular environments. + +# 2 Massively Parallel Reinforcement Learning + +Current (on-policy) reinforcement learning algorithms are divided into two parts: data collection and policy update. The policy update, which corresponds to back-propagation for neural networks, is easily performed in parallel on the GPU. Parallelizing data collection is not as straightforward. Each step consists of policy inference, simulation, reward, and observation calculation. Current popular pipelines have the simulation and reward/observation calculation computed on the CPU, making the GPU unsuitable for policy inference because of communication bottle-necks. Data transfer over PCIe is known to be the weakest link of GPU acceleration, and can be as much as 50 times slower than the GPU processing time alone [21]. Furthermore, with CPU data collection, a large amount of data must be sent to the GPU for each policy update, slowing down the overall process. Limited parallelization can be achieved by using multiple CPU cores and spawning many processes, each running the simulation for one agent. However, the number of agents is quickly limited by the number of cores and other issues such as memory usage. We explore the potential of massive parallelism with Isaac Gym’s end-to-end data collection and policy updates on the GPU, significantly reducing data copying and improving simulation throughput. + +# 2.1 Simulation Throughput + +The main factor affecting the total simulation throughput is the number of robots simulated in parallel. Modern GPUs can handle tens of thousands of parallel instructions. Similarly, IsaacGym’s PhysX engine can process thousands of robots in a single simulation and all other computations of our pipeline are vectorized to scale favorably with the number of robots. Using a single simulation with thousands of robots presents some new challenges. For example, a single common terrain mesh must be used, and it cannot be easily changed at each reset. We circumvent this problem by creating the whole mesh with all terrain types and levels tiled side by side. We change the terrain level of the robots by physically moving them on the mesh. In supplementary material, we show the computational time of different parts of the pipeline, examine how these times scale with the number of robots, and provide other techniques to optimize the simulation throughput. + +# 2.2 DRL Algorithm + +We build upon a custom implementation of the Proximal Policy Optimization (PPO) algorithm [22]. Our implementation is designed to perform every operation and store all the data on the GPU. In order to efficiently learn from thousands of robots in parallel, we perform some essential modifications to the algorithm and change some of the commonly used hyper-parameter values. + +# 2.2.1 Hyper-Parameters Modification + +In an on-policy algorithm such as PPO, a fixed policy collects a selected amount of data before doing the next policy update. This batch size, $B$ , is a crucial hyper-parameter for successful learning. With too little data, the gradients will be too noisy, and the algorithm will not learn effectively. With too much data, the samples become repetitive, and the algorithm cannot extract more information from them. These samples represent wasted simulation time and slow down the overall training. We have $B = n _ { r o b o t s } n _ { s t e p s }$ , where $ { n _ { s t e p s } }$ is the number of steps each robot takes per policy update and $n _ { r o b o t s }$ the number of robots simulated in parallel. Since we increase $n _ { r o b o t s }$ by a few orders of magnitude, we must choose a small $n _ { s t e p s }$ to keep $B$ reasonable and hence optimize training times, which is a setting that has not been extensively explored for on-policy reinforcement learning algorithms. It turns out that we can not choose $n _ { s t e p s }$ to be arbitrarily low. The algorithm requires trajectories with coherent temporal information to learn effectively. Even though, in theory, information of single steps could be used, we find that the algorithm fails to converge to the optimal solution below a certain threshold. This can be explained by the fact that we use Generalized Advantage Estimation (GAE) [23], which requires rewards from multiple time steps to be effective. For our task, we find that the algorithm struggles when we provide fewer than 25 consecutive steps, corresponding to $0 . 5 \mathrm { s }$ of simulated time. It is important to distinguish $ { n _ { s t e p s } }$ from the maximum episode length leading to a time-out and a reset, which we define as $2 0 \mathrm { s }$ . The environments are reset when they reach this maximum length and not after each iteration, meaning that a single episode can cover many policy updates. This limits the total number of robots training in parallel, and consequently, prohibits us from using the full computational capabilities of the GPU. + +The mini-batch size represents the size of the chunks in which the batch size is split to perform backpropagation. We find that having mini-batch sizes much larger than what is usually considered best practice is beneficial for our massively parallel use case. We use mini-batches of tens of thousands of samples and observe that it stabilizes the learning process without increasing the total training time. + +# 2.2.2 Reset Handling + +During training, the robots must be reset whenever they fall, and also after some time to keep them exploring new trajectories and terrains. The PPO algorithm includes a critic predicting an infinite horizon sum of future discounted rewards. Resets break this infinite horizon assumption and can lead to inferior critic performance if not handled carefully. Resets based on failure or reaching a goal are not a problem because the critic can predict them. However, a reset based on a time out can not be predicted (we do not provide episode time in the observations). The solution is to distinguish the two termination modes and augment the reward with the expected infinite sum of discounted future rewards in a time-out case. In other words, we bootstrap the target of the critic with its own prediction. This solution has been discussed in [24], but interestingly, this distinction is not part of the widely used Gym environment interface [25] and is ignored by popular implementations such as Stable-Baselines $[ 2 6 ] ^ { 1 }$ . After investigating multiple implementations, we conclude that this important detail is often avoided by assuming that the environments either never time out or only on the very last step of a batch collection. In our case, with few robot steps per batch, we can not make such an assumption since a meaningful episode length covers the collection of many batches. We modify the standard Gym interface to detect time-outs and implement the bootstrapping solution. In supplementary material, we show the effect of this solution on the total reward as well as the critic loss. + +![](images/9b48083b251353c43d2e71ff8968b79f99600e9e05461c3dd723d12ace8ceddd.jpg) +Figure 2: Terrain types used for training and testing in simulation. (a) Randomly rough terrain with variations of $0 . 1 \mathrm { m }$ . (b) Sloped terrain with an inclination of $2 5 \mathrm { d e g }$ . (c) Stairs with a width of $0 . 3 \mathrm { m }$ and height of $\mathrm { 0 . 2 m }$ . (d) Randomized, discrete obstacles with heights of up to $\pm 0 . 2 \mathrm { m }$ . + +# 3 Task Description + +A quadruped robot must learn to walk across challenging terrain, including uneven surfaces, slopes, stairs, and obstacles, while following base-heading and linear-velocity commands. We conduct most of the simulation and real-world deployment experiments on the ANYbotics ANYmal C robot. However, in simulation, we demonstrate the broader applicability of the approach by additionally training policies for ANYmal B, ANYmal C with an attached arm, and the Unitree A1 robots. + +# 3.1 Game-Inspired Curriculum + +The terrains are selected to be representative of real-world environments. We create five types of procedurally generated terrains presented in Fig. 2: flat, sloped, randomly rough, discrete obstacles, and stairs. The terrains are tiled squares with $8 \mathrm { m }$ sides. The robots start at the center of the terrain and are given randomized heading and velocity commands (kept constant for the duration of an episode) pushing them to walk across the terrain. Slopes and stairs are organized in pyramids to allow traversability in all directions. + +Previous works have shown the benefits of using an automated curriculum of task difficulty to learn complex locomotion policies [28, 29, 16]. Similarly, we find that it is essential to first train the policy on less challenging terrain before progressively increasing the complexity. We adopt a solution inspired by [16], but replace the particle filter approach with a new game-inspired automatic curriculum. All robots are assigned a terrain type and a level that represents the difficulty of that terrain. For stairs and randomized obstacles, we gradually increase the step height from $5 \mathrm { c m }$ to $2 0 \mathrm { c m }$ . Sloped terrain inclination is increased from 0 deg to 25 deg. If a robot manages to walk past the borders of its terrain, its level is increased, and at the next reset, it will start on more difficult terrain. However, if at the end of an episode it moved by less than half of the distance required by its target velocity, its level is reduced again. Robots solving the highest level are looped back to a randomly selected level to increase the diversity and avoid catastrophic forgetting. This approach has the advantage of training the robots at a level of difficulty tailored to their performance without requiring any external tuning. It adapts the difficulty level for each terrain type individually and provides us with visual and quantitative feedback on the progress of the training. When the robots have reached the final level and are evenly spread across all terrains due to looping back, we can conclude they have fully learned to solve the task. + +![](images/ecaf8198256af450c96e9b482c8d3d3a06909d2addf9a2530c761fb4193fd419.jpg) +Figure 3: 4000 robots progressing through the terrains with automatic curriculum, after 500 (top) and 1000 (bottom) policy updates. The robots start the training session on the first row (closest to the camera) and progressively reach harder terrains. + +The proposed curriculum structure is well suited for the massively parallel regime. With thousands of robots we can directly use their current progress in the curriculum as the distribution of the policy’s performance, and do not need learn it with a generator network [30]. Furthermore, our method doesn’t require tuning and is straightforward to implement in a parallel manner with nearzero processing cost. We remove the computational overhead of re-sampling and re-generating new terrains needed for the particle filter approach. + +Fig. 3 shows robots progressing through the terrains at two different stages of the training process. On complex terrain types, the robots require more training iterations to reach the highest levels. The distribution of robots after 500 iterations shows that while the policy is able to cross sloped terrains and to go down stairs, climbing stairs and traversing obstacles requires more training iterations. However, after 1000 iterations, the robots have reached the most challenging level for all terrain types and are spread across the map. We train for a total for 1500 iterations to let the policy converge to its highest performance. + +# 3.2 Observations, Actions, and Rewards + +The policy receives proprioceptive measurements of the robot as well as terrain information around the robot’s base. The observations are composed of: base linear and angular velocities, measurement of the gravity vector, joint positions and velocities, the previous actions selected by the policy, and finally, 108 measurements of the terrain sampled from a grid around the robot’s base. Each measurement is the distance from the terrain surface to the robot’s base height. + +The total reward is a weighted sum of nine terms, detailed in supplementary material. The main terms encourage the robot to follow the commanded velocities while avoiding undesired base velocities along other axes. In order to create a smoother, more natural motion, we also penalize joint torques, joint accelerations, joint target changes, and collisions. Contacts with the knees, shanks or between the feet and a vertical surface are considered collisions, while contacts with the base are considered crashes and lead to resets. Finally, we add an additional reward term encouraging the robot to take longer steps, which results in a more visually appealing behavior. We train a single policy with the same rewards for all terrains. + +The actions are interpreted as desired joint positions sent to the motors. There, a PD controller produces motor torques. In contrast to other works [16, 20], neither the reward function nor the action space has any gait-dependent elements. + +# 3.3 Sim-to-Real Additions + +In order to make the trained policies amenable for sim-to-real transfer, we randomize the friction of the ground, add noise to the observations and randomly push the robots during the episode to teach them a more stable stance. Each robot has a friction coefficient sampled uniformly in [0.5, 1.25]. The pushes happen every $1 0 \mathrm { s }$ . The robots’ base is accelerated up to $\pm 1 \mathrm { m } / \mathrm { s }$ in both $\mathbf { X }$ and y directions. The amount of noise is based on real data measured on the robot and is detailed in supplementary material. + +The ANYmal robot uses series elastic actuators with fairly complex dynamics, which are hard to model in simulation. For this reason and following the methodology of previous work [1], we use a neural network to compute torques from joint position commands. However, we simplify the inputs of the model. Instead of concatenating past measurements at fixed time steps and sending all of that information to a standard feed-forward network, we only provide the current measurements to an LSTM network. A potential drawback of this set-up is that the policy does not have the temporal information of the actuators as in previous work. We have experimented with various ways of providing that information through memory mechanisms for the policy but found that it does not improve the final performance. + +# 4 Results + +# 4.1 Effects of Massive Parallelism + +In this section, we study the effects of the number of parallel robots on the final performance of the policy. In order to use the total reward as a single representative metric, we have to remove the curriculum, otherwise a more performant policy sees its task difficulty increase and consequently a decrease in the total reward. As such, we simplify the task by reducing the maximum step size of stairs and obstacles and directly train robots on the full range of difficulties. + +We begin by setting a baseline with $n _ { r o b o t s } = 2 0 0 0 0$ and $n _ { s t e p s } = 5 0$ , resulting in a batch size of 1M samples. Using this very large batch size results in the best policy but at the cost of a relatively long training time. + +We then conduct experiments in which we increase the number of robots while keeping the batch size constant. As a result, the number of steps each robot takes per policy update decreases. In this case, the training time decreases with a higher number of robots, but the policy performance drops if that number is too high. We start from 128 robots corresponding to the level of parallelization of previous CPU implementations and increase that number up to 16384, which is close to the maximum amount of robots we could simulate on rough terrain with Isaac Gym running on a single workstation GPU. + +In Fig. 4, we compare these results with the baseline, which allows us to select the most favorable trade-off between policy performance and training time. We see two interesting effects at play. First, when the number of robots is too high, the performance drops sharply, which can be explained by the time horizon of each robot becoming too small. As expected, with larger batch sizes, the overall reward is higher, and the time horizon effect is shifted, meaning that we can use more robots before seeing the drop. On the other hand, below a certain threshold, we see a slow decrease in performance with fewer robots. We believe this is explained by the fact that the samples are very similar with many steps per robot because of the relatively small time steps between them. This means that for the same amount of samples, there is less diversity in the data. In other words, with a low number of robots, we are further from the standard assumption that the samples are independent and identically distributed, which seems to have a noticeable effect on the training process. In terms of training time, we see a nearly linear scaling up to 4000 robots, after which simulation throughput gains slow down. As such, we can conclude that increasing the number of robots is beneficial for both final performance and training time, but there is an upper limit on this number after which an on-policy algorithm cannot learn effectively. Increasing the batch size to values much larger than what is typically used in similar works seems highly beneficial. Unfortunately, it also scales the training time so it is a trade-off that must be balanced. From the third plot we can conclude that using 2048 to 4096 robots with a batch size of $\approx 1 0 0 k$ or $\approx 2 0 0 k$ provides the best trade-off for this specific task. + +![](images/b511ee0c2aca987f5fa8e39a0d14f3ee54098cf431491e0a271f167d3bdcff6f.jpg) +Figure 4: (a) Average and standard deviation (over 5 runs) of the total reward of an episode after 1500 policy updates for different number of robots and 3 different batch sizes. The ideal case of a batch size of 1M samples with 20000 robots is shown in red. (b) Total training time for the same experiments. (c) Reward dependency on total training time. Colors represent the number of robots, while shapes show the batch size (circles: 49152, crosses: 98304, triangles: 196608). Points in the upper left part of the graph (highlighted in green) represent the most desirable configuration. + +![](images/f15e005bc1e3d3db64d82f1f7796d15a227032608677590845862fc30d3d7da3.jpg) +Figure 5: Success rate of the tested policy on increasing terrain complexities. Robots start in the center of the terrain and are given a forward velocity command of $0 . 7 5 \mathrm { m } / \mathrm { s }$ , and a side velocity command randomized within $[ - 0 . 1 , 0 . 1 ] \mathrm { m } / \mathrm { s }$ . (a) Success rate for climbing stairs, descending stairs and traversing discrete obstacles. (b) Success rate for climbing and descending sloped terrains. + +![](images/4eab7c74f6cac548026b3ff28e7f10ab6832f65e03b74ce577a9f1ea90202882.jpg) +Figure 6: ANYmal C with a fixed arm, ANYmal B, A1 and Cassie in simulation. + +# 4.2 Simulation + +For our simulation and deployment experiments, we use a policy trained with 4096 robots and a batch size of 98304, which we train for 1500 policy updates in under 20 minutes2. We begin by measuring the performance of our trained policy in simulation. To that end, we perform robustness and traversability tests. For each terrain type, we command the robots to traverse the representative difficulty of the terrain at high forward velocity and measure the success rate. A success is defined as managing to cross the terrain while avoiding any contacts on the robot’s base. Fig. 5 shows the results for the different terrains. For stairs, we see a nearly $1 0 0 \%$ success rate for steps up to $\mathrm { 0 . 2 m }$ , which is the hardest stair difficulty we train on and close to the kinematic limits of our robot. Randomized obstacles seem to be more demanding, with the success rate decreasing steadily. We must note that in this case, the largest step is double the reported height since neighboring obstacles can have positive and negative heights. In the case of slopes, we can observe that after $2 5 \mathrm { d e g }$ the robots are not able to climb anymore but still learn to slide down with a moderate success rate. + +Given our relatively simple rewards and action space, the policy is free to adopt any gait and behavior. Interestingly, it always converges to a trotting gait, but there are often artifacts in the behavior, such as a dragging leg or unreasonably high or low base heights. After tuning of the reward weights, we can obtain a policy that respects all our constraints and can be transferred to the physical robot. + +To verify the generalizability of the approach, we train policies for multiple robots with the same set-up. We use the ANYmal C robot with a fixed robotic arm, which adds about $2 0 \%$ of additional weight, and the ANYmal B robot, which has comparable dimensions but modified kinematic and dynamic properties. In these two cases, we can retrain a policy without any modifications to the rewards or algorithm hyper-parameters and obtain a very similar performance. Next, we use the Unitree A1 robot, which has smaller dimensions, four times lower weight, and a different leg configuration. In this case, we remove the actuator model of the ANYdrive motors, reduce PD gains and the torque penalties, and change the default joint configurations. We can train a dynamic policy that learns to solve the same terrains even with the reduced size of the robot. Finally, we apply our approach to Agility Robotics’ bipedal robot Cassie. We find that an additional reward encouraging standing on a single foot is necessary to achieve a walking gait. With this addition, we are able to train the robot on the same terrains as its quadrupedal counterparts. Fig. 6 shows the different robots. + +![](images/f0dd8a9ddc78c32773c604659ba9f56cbd659b68c879ccbb91f00fe6ccb27b59.jpg) +Figure 7: Locomotion policy, trained in under $2 0 \mathrm { { m i n } }$ , deployed on the physical robot. + +# 4.3 Sim-to-real Transfer + +On the physical robot, our policy is fixed. We compute the observations from the robot’s sensors, feed them to the policy, and directly send the produced actions as target joint positions to the motors. We do not apply any additional filtering or constraint satisfaction checks. The terrain height measurements are queried from an elevation map that the robot is building from Lidar scans. + +Unfortunately, this height map is far from perfect, which results in a decrease in robustness between simulation and reality. We observe that these issues mainly occur at high velocities and therefore reduce the maximum linear velocity commands to $0 . 6 \mathrm { m } / \mathrm { s }$ for policies deployed on the hardware. The robot can walk up and down stairs and handles obstacles in a dynamic manner. We show samples of these experiments in Fig. 7 and in the supplementary video. To overcome issues with imperfect terrain mapping or state estimation drift, the authors of [19] implemented a teacher-student set-up, which provided outstanding robustness even in adverse conditions. As part of future work, we plan to merge the two approaches. + +# 5 Conclusion + +In this work, we demonstrated that a complex real-world robotics task can be trained in minutes with an on-policy deep reinforcement learning algorithm. Using an end-to-end GPU pipeline with thousands of robots simulated in parallel, combined with our proposed curriculum structure, we showed that the training time can be reduced by multiple orders of magnitude compared to previous work. We discussed multiple modifications to the learning algorithm and the standard hyper-parameters required to use the massively parallel regime effectively. Using our fast training pipeline, we performed many training runs, simplified the set-up, and kept only essential components. We showed that the task can be solved using simple observation and action spaces as well as relatively straightforward rewards without encouraging particular gaits or providing motion primitives. + +The purpose of this work is not to obtain the absolute best-performing policy with the highest robustness. For that use case, many other techniques can be incorporated into the pipeline. We aim to show that a policy can be trained in record time with our set-up while still being usable on the real hardware. We wish to shift other researchers’ perspective on the required training time for a real-world application, and hope that our work can serve as a reference for future research. We expect many other tasks to benefit from the massively parallel regime. By reducing the training time of these future robotic tasks, we can greatly accelerate the developments in this field. + +# Acknowledgments + +We would like to thank Mayank Mittal, Joonho Lee, Takahiro Miki, and Peter Werner for their valuable suggestions and help with hardware experiments as well as the Isaac Gym and PhysX teams for their continuous support. + +# References + +[1] J. Hwangbo, J. Lee, A. Dosovitskiy, D. Bellicoso, V. Tsounis, V. Koltun, and M. Hutter. Learning agile and dynamic motor skills for legged robots. Science Robotics, 4(26), 2019. +[2] S. Gu, E. Holly, T. Lillicrap, and S. Levine. Deep reinforcement learning for robotic manipulation with asynchronous off-policy updates. In IEEE International Conference on Robotics and Automation (ICRA), May 2017. +[3] G. Kahn, A. Villaflor, B. Ding, P. Abbeel, and S. Levine. Self-supervised deep reinforcement learning with generalized computation graphs for robot navigation. In IEEE International Conference on Robotics and Automation (ICRA), 2018. +[4] OpenAI, I. Akkaya, M. Andrychowicz, M. Chociej, M. Litwin, B. McGrew, A. Petron, A. Paino, M. Plappert, G. Powell, R. Ribas, J. Schneider, N. Tezak, J. Tworek, P. Welinder, L. Weng, Q. Yuan, W. Zaremba, and L. Zhang. Solving rubik’s cube with a robot hand, 2019. +[5] E. Todorov, T. Erez, and Y. Tassa. Mujoco: A physics engine for model-based control. In IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2012. +[6] E. Coumans and Y. Bai. Pybullet, a python module for physics simulation for games, robotics and machine learning. http://pybullet.org, 2016–2021. +[7] J. Hwangbo, J. Lee, and M. Hutter. Per-contact iteration method for solving contact dynamics. IEEE Robotics and Automation Letters, 3(2), 2018. URL www.raisim.com. +[8] V. Makoviychuk, L. Wawrzyniak, Y. Guo, M. Lu, K. Storey, M. Macklin, D. Hoeller, N. Rudin, A. Allshire, A. Handa, and G. State. Isaac gym: High performance GPU based physics simulation for robot learning. In Conference on Neural Information Processing Systems (NeurIPS) Datasets and Benchmarks Track, 2021. +[9] N. Heess, D. TB, S. Sriram, J. Lemmon, J. Merel, G. Wayne, Y. Tassa, T. Erez, Z. Wang, S. M. A. Eslami, M. A. Riedmiller, and D. Silver. Emergence of locomotion behaviours in rich environments. CoRR, abs/1707.02286, 2017. +[10] A. Stooke and P. Abbeel. Accelerated methods for deep reinforcement learning. CoRR, abs/1803.02811, 2018. +[11] B. Shacklett, E. Wijmans, A. Petrenko, M. Savva, D. Batra, V. Koltun, and K. Fatahalian. Large batch simulation for deep reinforcement learning. In International Conference on Learning Representations (ICLR), 2021. +[12] J. Liang, V. Makoviychuk, A. Handa, N. Chentanez, M. Macklin, and D. Fox. Gpu-accelerated robotic simulation for distributed reinforcement learning. In Conference on Robot Learning (CoRL), 2018. +[13] C. D. Freeman, E. Frey, A. Raichuk, S. Girgin, I. Mordatch, and O. Bachem. Brax - a differentiable physics engine for large scale rigid body simulation. In 35th Conference on Neural Information Processing Systems (NeurIPS) Datasets and Benchmarks Track, 2021. +[14] A. Bouman, M. F. Ginting, N. Alatur, M. Palieri, D. D. Fan, T. Touma, T. Pailevanian, S.- K. Kim, K. Otsu, J. Burdick, and A.-a. Agha-Mohammadi. Autonomous spot: Long-range autonomous exploration of extreme environments with legged locomotion. In IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2020. +[15] C. Gehring, P. Fankhauser, L. Isler, R. Diethelm, S. Bachmann, M. Potz, L. Gerstenberg, and M. Hutter. Anymal in the field: Solving industrial inspection of an offshore hvdc platform with a quadrupedal robot. In Field and Service Robotics, 2021. +[16] J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomotion over challenging terrain. Science Robotics, 5(47), 2020. +[17] V. Tsounis, M. Alge, J. Lee, F. Farshidian, and M. Hutter. Deepgait: Planning and control of quadrupedal gaits using deep reinforcement learning. IEEE Robotics and Automation Letters, PP, 03 2020. +[18] S. Gangapurwala, M. Geisert, R. Orsolino, M. Fallon, and I. Havoutis. Real-time trajectory adaptation for quadrupedal locomotion using deep reinforcement learning. In IEEE International Conference on Robotics and Automation (ICRA), 2021. +[19] T. Miki, J. Lee, L. Wellhausen, V. Koltun, and M. Hutter. Wild anymal: Robust zero-shot perceptive locomotion. Submitted to Science Robotics, 2021. +[20] J. Siekmann, K. Green, J. Warila, A. Fern, and J. W. Hurst. Blind bipedal stair traversal via sim-to-real reinforcement learning. CoRR, abs/2105.08328, 2021. +[21] C. Gregg and K. Hazelwood. Where is the data? why you cannot debate cpu vs. gpu performance without the answer. In IEEE International Symposium on Performance Analysis of Systems and Software (ISPASS), 2011. +[22] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms. CoRR, abs/1707.06347, 2017. +[23] J. Schulman, P. Moritz, S. Levine, M. Jordan, and P. Abbeel. High-dimensional continuous control using generalized advantage estimation. In Proceedings of the International Conference on Learning Representations (ICLR), 2016. +[24] F. Pardo, A. Tavakoli, V. Levdik, and P. Kormushev. Time limits in reinforcement learning. CoRR, abs/1712.00378, 2017. +[25] G. Brockman, V. Cheung, L. Pettersson, J. Schneider, J. Schulman, J. Tang, and W. Zaremba. Openai gym, 2016. +[26] A. Hill, A. Raffin, M. Ernestus, A. Gleave, A. Kanervisto, R. Traore, P. Dhariwal, C. Hesse, O. Klimov, A. Nichol, M. Plappert, A. Radford, J. Schulman, S. Sidor, and Y. Wu. Stable baselines. https://github.com/hill-a/stable-baselines, 2018. +[27] J. Achiam. Spinning up in deep reinforcement learning, 2018. URL https://spinningup. openai.com/en/latest/. +[28] R. Wang, J. Lehman, J. Clune, and K. O. Stanley. Paired open-ended trailblazer (POET): endlessly generating increasingly complex and diverse learning environments and their solutions. CoRR, abs/1901.01753, 2019. +[29] Z. Xie, H. Y. Ling, N. H. Kim, and M. van de Panne. Allsteps: Curriculum-driven learning of stepping stone skills. Proceedings of ACM SIGGRAPH / Eurographics Symposium on Computer Animation, 2020. +[30] C. Florensa, D. Held, X. Geng, and P. Abbeel. Automatic goal generation for reinforcement learning agents. In Proceedings of the 35th International Conference on Machine Learning (ICML), volume 80 of Proceedings of Machine Learning Research, 2018. \ No newline at end of file diff --git a/parse/dev/3RBY8fKjHeu/3RBY8fKjHeu.md b/parse/dev/3RBY8fKjHeu/3RBY8fKjHeu.md new file mode 100644 index 0000000000000000000000000000000000000000..25b944c3974c3467c8e4f1b34efa8a0bb617ba58 --- /dev/null +++ b/parse/dev/3RBY8fKjHeu/3RBY8fKjHeu.md @@ -0,0 +1,243 @@ +# DayDreamer: World Models for Physical Robot Learning + +# Philipp Wu\* + +Alejandro Escontrela\* Danijar Hafner\* + +Ken Goldberg Pieter Abbeel + +University of California, Berkeley \*Equal contribution + +Abstract: To solve tasks in complex environments, robots need to learn from experience. Deep reinforcement learning is a common approach to robot learning but requires a large amount of trial and error to learn, limiting its deployment in the physical world. As a consequence, many advances in robot learning rely on simulators. On the other hand, learning inside of simulators fails to capture the complexity of the real world, is prone to simulator inaccuracies, and the resulting behaviors do not adapt to changes in the world. The Dreamer algorithm has recently shown great promise for learning from small amounts of interaction by planning within a learned world model, outperforming pure reinforcement learning in video games. Learning a world model to predict the outcomes of potential actions enables planning in imagination, reducing the amount of trial and error needed in the real environment. However, it is unknown whether Dreamer can facilitate faster learning on physical robots. In this paper, we apply Dreamer to 4 robots to learn online and directly in the real world, without any simulators. Dreamer trains a quadruped robot to roll off its back, stand up, and walk from scratch and without resets in only 1 hour. We then push the robot and find that Dreamer adapts within 10 minutes to withstand perturbations or quickly roll over and stand back up. On two different robotic arms, Dreamer learns to pick and place objects from camera images and sparse rewards, approaching human-level teleoperation performance. On a wheeled robot, Dreamer learns to navigate to a goal position purely from camera images, automatically resolving ambiguity about the robot orientation. Using the same hyperparameters across all experiments, we find that Dreamer is capable of online learning in the real world, which establishes a strong baseline. We release our infrastructure for future applications of world models to robot learning. Videos are available on the project website: https://danijar.com/daydreamer + +![](images/17f2d11eee9937e70f62a1993623ebccd221887d067e71919c350fa57662f4d3.jpg) +Figure 1: To study the applicability of Dreamer for sample-efficient robot learning, we apply the algorithm to learn robot locomotion, manipulation, and navigation tasks from scratch in the real world on 4 robots, without simulators. The tasks evaluate a diverse range of challenges, including continuous and discrete actions, dense and sparse rewards, proprioceptive and camera inputs, as well as sensor fusion of multiple input modalities. Learning successfully using the same hyperparameters across all experiments, Dreamer establishes a strong baseline for real world robot learning. + +# 1 Introduction + +Teaching robots to solve complex tasks in the real world is a foundational problem of robotics research. Deep reinforcement learning (RL) offers a popular approach to robot learning that enables robots to improve their behavior over time through trial and error. However, current algorithms require too much interaction with the environment to learn successful behaviors. Recently, modern world models have shown great promise for data efficient learning in simulated domains and video games (Hafner et al., 2019; 2020). Learning world models from past experience enables robots to imagine the future outcomes of potential actions, reducing the amount of trial and error in the real environment needed to learn. + +While learning accurate world models can be challenging, they offer compelling properties for robot learning. By predicting future outcomes, world models allow for planning and behavior learning given only small amounts of real world interaction (Gal et al., 2016; Ebert et al., 2018). Moreover, world models summarize general dynamics knowledge about the environment that, once learned, could be reused for a wide range of downstream tasks (Sekar et al., 2020). World models also learn representations that fuse multiple sensor modalities and integrate them into latent states, reducing the need for sophisticated state estimators. Finally, world models generalize well from available offline data (Yu et al., 2021), which further accelerates learning in the real world. + +![](images/e30f877426a1aa2686b70c08629a56889403c96105b017890f8ea57b7982c4a2.jpg) +Figure 2: Dreamer follows a simple pipeline for online learning on robot hardware without simulators. The current learned policy collects experience on the robot. This experience is added to the replay buffer. The world model is trained on replayed off-policy sequences through supervised learning. An actor critic algorithm optimizes a neural network policy from imagined rollouts in the latent space of the world model. We parallelize data collection and neural network learning. + +Despite the promises of world models, learning accurate world models for the real world is a open challenge. In this paper, we leverage recent advances of the Dreamer world model for training a variety of robots in the most straight-forward and fundamental problem setting: online reinforcement learning in the real world, without simulators or demonstrations. As shown in Figure 2, Dreamer learns a world model from a replay buffer of past experience, learns behaviors from rollouts imagined in the latent space of the world model, and continuously interacts with the environment to explore and improve its behaviors. Our aim is to push the limits of robot learning directly in the real world and offer a robust platform to enable future work that develops the benefits of world models for robot learning. The key contributions of this paper are summarized as follows: + +β€’ Dreamer on Robots We apply Dreamer to 4 robots, demonstrating successful learning directly in the real world, without introducing new algorithms. The tasks cover a range of challenges, including different action spaces, sensory modalities, and reward structures. +β€’ Walking in 1 Hour We teach a quadruped from scratch in the real world to roll off its back, stand up, and walk in only 1 hour. Afterwards, we find that the robot adapts to being pushed within 10 minutes, learning to withstand pushes or quickly roll over and get back on its feet. +β€’ Visual Pick and Place We train robotic arms to pick and place objects from sparse rewards, which requires localizing objects from pixels and fusing images with proprioceptive inputs. The learned behavior outperforms model-free agents and approaches the performance of a human teleoperator using the same control interface as the robot. +β€’ Open Source We publicly release the software infrastructure for all our experiments, which supports different action spaces and sensory modalities, offering a flexible platform for future research of world models for robot learning in the real world. + +![](images/bbd9aa6b3f541685e1ecf9dd1c4451b92904b361a6547ee2e39414769cb64de4.jpg) +Figure 3: Neural Network Training We leverage the Dreamer algorithm (Hafner et al., 2019; 2020) for fast robot learning in real world. Dreamer consists of two main neural network components, the world model and the policy. Left: The world model follows the structure of a deep Kalman filter that is trained on subsequences drawn from the replay buffer. The encoder fuses all sensory modalities into discrete codes. The decoder reconstructs the inputs from the codes, providing a rich learning signal and enabling human inspection of model predictions. A recurrent state-space model (RSSM) is trained to predict future codes given actions, without observing intermediate inputs. + +Right: The world model enables massively parallel policy optimization from imagined rollouts in the compact latent space using a large batch size, without having to reconstruct sensory inputs. Dreamer trains a policy network and value network from the imagined rollouts and a learned reward function. + +# 2 Approach + +We leverage the Dreamer algorithm (Hafner et al., 2019; 2020) for online learning on physical robots, without the need for simulators. Figure 2 shows an overview of the approach. Dreamer learns a world model from a replay buffer of past experiences, uses an actor critic algorithm to learn behaviors from trajectories predicted by the learned model, and deploys its behavior in the environment to continuously grow the replay buffer. We decouple learning updates from data collection to meet latency requirements and to enable fast training without waiting for the environment. In our implementation, a learner thread continuously trains the world model and actor critic behavior, while an actor thread in parallel computes actions for environment interaction. + +World Model Learning The world model is a deep neural network that learns to predict the environment dynamics, as shown in Figure 3 (left). Because sensory inputs can be large images, we predict future representations rather than future inputs. This reduces accumulating errors and enables massively parallel training with a large batch size. Thus, the world model can be thought of as a fast simulator of the environment that the robot learns autonomously, starting from a blank slate and continuously improving its model as it explores the real world. The world model is based on the Recurrent State-Space Model (RSSM; Hafner et al., 2018), which consists of four components: + +$$ +{ \begin{array} { r l r l } & { \operatorname { e n c } _ { \theta } { \big ( } s _ { t } \ { \big | } \ s _ { t - 1 } , a _ { t - 1 } , x _ { t } { \big ) } } & & { { \mathrm { D e c o d e r ~ N e t w o r k : } } \quad \operatorname* { d e c } _ { \theta } { \big ( } s _ { t } { \big ) } \approx x _ { t } } \\ & { \operatorname { d y n } _ { \theta } { \big ( } s _ { t } \ { \big | } \ s _ { t - 1 } , a _ { t - 1 } { \big ) } } & & { { \mathrm { R e w a r d ~ N e t w o r k : } } \quad \operatorname { r e w } _ { \theta } { \big ( } s _ { t + 1 } { \big ) } \approx r _ { t } } \end{array} } +$$ + +Physical robots are often equipped with multiple sensors of different modalities, such as proprioceptive joint readings, force sensors, and high-dimensional inputs such as RGB and depth camera images. The encoder network fuses all sensory inputs $x _ { t }$ together into the stochastic representations $z _ { t }$ . The dynamics model learns to predict the sequence of stochastic representations by using its recurrent state $h _ { t }$ . The decoder reconstructs the sensory inputs to provide a rich signal for learning representations and enables human inspection of model predictions. In our experiments, the robot has to discover task rewards by interacting with the real world, which the reward network learns to predict. Using manually specified rewards as a function of the decoded sensory inputs is also possible. We optimize all components of the world model jointly by stochastic backpropagation (Kingma and Welling, 2013; Rezende et al., 2014). + +Actor Critic Learning While the world model represents task-agnostic knowledge about the dynamics, the actor critic algorithm learns a behavior that is specific to the task at hand. As shown in Figure 3 (right), we learn behaviors from rollouts that are predicted in the latent space of the world model, without decoding observations. This enables massively parallel behavior learning with typical batch sizes of 16K on a single GPU. The actor critic algorithm consists of an actor network $\pi ( a _ { t } | s _ { t } )$ and a critic network $v ( s _ { t } )$ . + +The role of the actor network is to learn a distribution over successful actions $a _ { t }$ for each latent model state $s _ { t }$ that maximizes the sum of future predicted task rewards. The critic network learns to predict the sum of future task rewards through temporal difference learning (Sutton and Barto, 2018). This allows the algorithm to take into account rewards beyond the planning horizon of $H = 1 6$ steps to learn long-term strategies. Given a predicted trajectory of model states, the critic is trained to regress the return of the trajectory. We compute $\lambda$ -returns following Hafner et al. (2020; 2019): + +$$ +V _ { t } ^ { \lambda } \doteq r _ { t } + \gamma \Big ( ( 1 - \lambda ) v ( s _ { t + 1 } ) + \lambda V _ { t + 1 } ^ { \lambda } \Big ) , \quad V _ { H } ^ { \lambda } \doteq v ( s _ { H } ) . +$$ + +While the critic network is trained to regress the $\lambda$ -returns, the actor network is trained to maximize them. Different gradient estimators are available for computing the policy gradient for optimizing the actor, such as Reinforce (Williams, 1992) and the reparameterization trick (Kingma and Welling, 2013; Rezende et al., 2014) that directly backpropagates return gradients through the differentiable dynamics network (Henaff et al., 2019). Following Hafner et al. (2020), we choose reparameterization gradients for continuous control tasks and Reinforce gradients for tasks with discrete actions. In addition to maximizing returns, the actor is also incentivized to maintain high entropy to prevent collapse to a deterministic policy and maintain some amount of exploration throughout training: + +$$ +\begin{array} { r } { \mathcal { L } ( \pi ) \doteq - \operatorname { E } \bigl [ \sum _ { t = 1 } ^ { H } \ln \pi ( a _ { t } \mid s _ { t } ) \mathrm { s g } ( V _ { t } ^ { \lambda } - v ( s _ { t } ) ) + \eta \mathrm { H } \bigl [ \pi ( a _ { t } \mid s _ { t } ) \bigr ] \bigr ] } \end{array} +$$ + +We optimize the actor and critic using the Adam optimizer (Kingma and Ba, 2014). To compute the $\lambda$ -returns, we use a slowly updated copy of the critic network as common in the literature (Mnih et al., 2015; Lillicrap et al., 2015). The actor and critic gradients do not affect the world model, as this would lead to incorrect and overly optimistic model predictions. The hyperparameters are listed in Appendix D. + +# 3 Experiments + +We evaluate Dreamer on 4 robots, each with a different task, and compare its performance to appropriate algorithmic and human baselines. The experiments are representative of common robotic tasks, such as locomotion, manipulation, and navigation. The tasks pose a diverse range of challenges, including continuous and discrete actions, dense and sparse rewards, proprioceptive and image observations, and sensor fusion. The goal of the experiments is to evaluate whether the recent successes of learned world models enables sample-efficient robot learning directly in the real world. Specifically, we aim to answer the following research questions: + +β€’ Does Dreamer enable robot learning directly in the real world, without simulators? β€’ Does Dreamer succeed across various robot platforms, sensory modalities, and action spaces? β€’ How does the data-efficiency of Dreamer compare to previous reinforcement learning algorithms? + +Implementation We build on the official implementation of DreamerV2 (Hafner et al., 2020). We develop an asynchronous actor and learner setup, which is essential in environments with high control rates, such as the quadruped, and also accelerates learning for slower environments, such as the robot arms. The actor thread computes online actions for the robot and sends trajectories of 128 time steps to the replay buffer. The learner thread samples data from the replay buffer, updates the world model, and optimizes the policy using imagination rollouts. Policy weights are synced from the learner to the actor every 20 seconds. We use an RSSM with 256 units to speed up the training computation. We use identical hyperparameters across all experiments, enabling off-the-shelf training on different robot embodiments. + +![](images/159d86a4fe017221206965fa98efc6ce35e16bebec7536f231b04a5fa470830b.jpg) +Figure 4: A1 Quadruped Walking Starting from lying on its back with the feet in the air, Dreamer learns to roll over, stand up, and walk in 1 hour of real world training time, without simulators or resets. In contrast, SAC only learns to roll over but neither to stand up nor to walk. For SAC, we also had to help the robot out of a dead-locked leg configuration during training. On the right we show training curves for both SAC and Dreamer. The maximum reward is 14. The filled circles indicate times where the robot fell on its back, requiring the learning of a robust strategy for getting back up. After 1 hour of training, we start pushing the robot and find that it adapts its behavior within 10 minutes to withstand light pushes and quickly roll back on its feet for hard pushes. The graph shows a single training run with the shaded area indicating one standard deviation within each time bin. + +Baselines We compare to a strong learning algorithm for each of our experimental setups. The A1 quadruped robot uses continuous actions and low-dimensional inputs, allowing us to compare to SAC (Haarnoja et al., 2018a;b), a popular algorithm for data-efficient continuous control. For the visual pick and place experiments on the XArm and UR5 robots, inputs are images and proprioceptive readings and actions are discrete, suggesting algorithms from the DQN (Mnih et al., 2015) line of work as baselines. We choose Rainbow (Hessel et al., 2018) as a powerful representative of this category, an algorithm that combines many improvements of DQN. To input the proprioceptive readings, we concatenate them as broadcasted planes to the RGB channels of the image, a common practice in the literature (Schrittwieser et al., 2019). For the UR5, we additionally compare against PPO (Schulman et al., 2017), with similar modifications for fusing image and proprioceptive readings. In addition, we compare against a human operator controlling the robot arm through the robot control interface. For the Sphero navigation task, inputs are images and actions are continuous. The state-ofthe-art baseline in this category is DrQv2 (Yarats et al., 2021), which uses image augmentation to increase sample-efficiency. + +# 3.1 A1 Quadruped Walking + +This high-dimensional continuous control task requires training a quadruped robot to roll over from its back, stand up, and walk forward at a fixed target velocity. Prior work in quadruped locomotion requires either extensive training in simulation under domain randomization, using recovery controllers to avoid unsafe states, or defining the action space as parameterized trajectory generators that restrict the space of motions (Rusu et al., 2016; Peng et al., 2018; Rudin et al., 2021; Lee et al., 2020; Yang et al., 2019). In contrast, we train in the end-to-end reinforcement learning setting directly on the robot, without simulators or resets. We use the Unitree A1 robot that consists of 12 direct drive motors. The motors are controlled at $2 0 \mathrm { H z }$ via continuous actions that represent motor angles that are realized by a PD controller on the hardware. Actions are filtered with a Butterworth filter to protect the motor from high-frequency actions. The input consists of motor angles, orientations, and angular velocities. Due to space constraints, we manually intervene when the robot has reached the end of the available training area, without modifying the joint configuration or orientation that the robot is in. + +![](images/69e7a0dc11e3a7ecd812dec526777f2a39e7aed63605587ef789903e7f57fb8c.jpg) +Figure 8: Within 10 minutes of perturbing the learned walking behavior, the robot adapts to withstanding pushes or quickly rolling over and back on its feet. + +The reward function is the sum of five terms. An upright reward is computed from the base frame up vector $\hat { z } ^ { T }$ , terms for matching the standing pose are computed from the joint angles of the hips, shoulders, and knees, and a forward velocity term is computed from the projected forward velocity $\boldsymbol { s } _ { v } \boldsymbol { x }$ and the total velocity $s _ { v }$ . Without the reward curriculum, the agent receives spurious reward values due to the velocity estimator’s dependence on foot-ground contact events. Each of the five terms is active while its preceding terms are satisfied to at least 0.7 and otherwise set to 0: + +![](images/69863294723843746383e47fe99dcd32744e499d1c526f51df4121c52ff99fe8.jpg) +Figure 5: UR5 Multi Object Visual Pick and Place This task requires learning to locate three ball objects from third-person camera images, grasp them, and move them into the other bin. The arm is free to move within and above the bins and sparse rewards are given for grasping a ball and for dropping it in the opposite bin. The environment requires the world model to learn multi-object dynamics in the real world and the sparse reward structure poses a challenge for policy optimization. Dreamer overcomes the challenges of visual localization and sparse rewards on this task, learning a successful strategy within a few hours of autonomous operation. + +$$ +\begin{array} { r l } { r ^ { \mathrm { u p r } } \doteq ( \hat { z } ^ { T } [ 0 , 0 , 1 ] - 1 ) / 2 } & { { } r ^ { \mathrm { h i p } } \doteq 1 - \frac 1 4 \| q ^ { \mathrm { h i p } } + 0 . 2 \| _ { 1 } \quad r ^ { \mathrm { s h o u l d e r } } \doteq 1 - \frac 1 4 \| q ^ { \mathrm { s h o u l d e r } } + 0 . 2 \| _ { 1 } } \end{array} +$$ + +$$ +\begin{array} { r l } { r ^ { \mathrm { k n e e } } \doteq 1 - \frac 1 4 \parallel q ^ { \mathrm { k n e e } } - 1 . 0 \parallel _ { 1 } } & { { } r ^ { \mathrm { v e l o c i t y } } \doteq 5 \big ( \operatorname* { m a x } ( 0 , ^ { \mathcal { B } } v _ { x } ) / \parallel ^ { \mathcal { B } } v \parallel _ { 2 } \cdot \mathrm { c l i p } ( ^ { \mathcal { B } } v _ { x } / 0 . 3 , - 1 , 1 ) + 1 \big ) } \end{array} +$$ + +As shown in Figure 4, after one hour of training, Dreamer learns to consistently flip the robot over from its back, stand up, and walk forward. In the first 5 minutes of training, the robot manages to roll off its back and land on its feet. 20 minutes later, it learns how to stand up on its feet. About 1 hour into training, the robot learns a pronking gait to walk forward at the desired velocity. After succeeding at this task, we tested the robustness of the algorithms by repeatedly knocking the robot off of its feet with a large pole, shown in Figure 8. Within 10 minutes of additional online learning, the robot adapts and withstand pushes or quickly rolls back on its feet. In comparison, SAC quickly learns to roll off its back but fails to stand up or walk given the small data budget. + +# 3.2 UR5 Multi-Object Visual Pick and Place + +Common in warehouse and logistics environments, pick and place tasks require a robot manipulator to transport items from one bin into another. Figure 5 shows a successful pick and place cycle of this task. The task is challenging because of sparse rewards, the need to infer object positions from pixels, and the challenging dynamics of multiple moving objects. The sensory inputs consist of proprioceptive readings (joint angles, gripper position, end effector Cartesian position) and a 3rd person RGB image of the scene. Successfully grasping one of the 3 objects, detected by partial gripper closure, results in a $+ 1$ reward, releasing the object in the same bin gives a $- 1$ reward, and placing in the opposite bin gives a $+ 1 0$ reward. We control the UR5 robot from Universal Robotics at $2 \ \mathrm { H z }$ . Actions are discrete for moving the end effector in increments along X, Y, and $\textsf { Z }$ axes and for toggling the gripper state. Movement in the Z axis is only enabled while holding an object and the gripper automatically opens once above the correct bin. We estimate human teleoperation performance by recording 3 demonstrators for 20 minutes each, controlling the UR5 with a joystick. + +Dreamer reaches an average pick rate of 2.5 objects per minute within 8 hours. The robot initially struggles to learn as the reward signal is very sparse, but begins to gradually improve after 2 hours of training. The robot first learns to localize the objects and toggles the gripper when near an object. Over time, grasping becomes precise and the robot learns to push objects out of corners. Figure 5 shows the learning curves of Dreamer compared to Rainbow DQN, PPO, and the human baseline. Both Rainbow DQN and PPO only learn the short-sighted behavior of grasping and immediately dropping objects in the same bin. In contrast, Dreamer approaches human-level teleoperation performance after 8 hours. We hypothesize that Rainbow DQN and PPO fail because they require larger amounts of experience, which is not feasible for us to collect in the real world. + +# 3.3 XArm Visual Pick and Place + +While the UR5 robot is a high performance industrial robot, the XArm is an accessible low-cost 7 DOF manipulation, which we control at approximately $0 . 5 \ : \mathrm { H z }$ . Similar to Section 3.2, the task requires localizing and grasping a soft object and moving it from one bin to another and back, shown in Figure 6. We connect the object to the gripper with a string, which makes it less likely for the object to get stuck in corners at the cost of more complex dynamics. The sparse reward, discrete action space, and observation space match the UR5 setup except for the addition of depth image observations. + +![](images/dfce202941b6d7b7a3b4e91b152da625264b3b1c43837193ab53e137e11b01f3.jpg) +Figure 6: XArm Visual Pick and Place The XArm is an affordable robot arm that operates slower than the UR5. To demonstrate successful learning on this robot, we use a third-person RealSense camera with RGB and depth modalities, as well as proprioceptive inputs for the robot arm, requiring the world model to learn sensor fusion. The pick and place task uses a soft object. While soft objects would be challenging to model accurately in a simulator, Dreamer avoids this issue by directly learning on the real robot without a simulator. While Rainbow and PPO using R3M visual embeddings converge to the local optimum of grasping and ungrasping the object in the same bin, Dreamer learns a successful pick and place policy from sparse rewards in under 10 hours. + +Dreamer learns a policy that enables the XArm to achieve an average pick rate of 3.1 objects per minute in 10 hours of time, which is comparable to human performance on this task. Figure 6 shows that Dreamer learns to solve the task within 10 hours, whereas the Rainbow algorithm, a top model-free algorithm for discrete control from pixels, fails to learn. We additionally compare Dreamer against a PPO baseline that utilizes R3M (Nair et al., 2022) pretrained visual embeddings for the state, but notice no improvement in performance. Interestingly, we observed that Dreamer learns to sometimes use the string to pull the object out of a corner before grasping it, demonstrating multi-modal behaviors. Moreover, we observed that when lighting conditions change drastically (such as sharp shadows during sunrise), performance initially collapses but Dreamer then adapts to the changing conditions and exceeds its previous performance after a few hours of additional training, reported in Appendix A. + +# 3.4 Sphero Navigation + +We evaluate Dreamer on a visual navigation task that requires maneuvering a wheeled robot to a fixed goal location given only RGB images as input. We use the Sphero Ollie robot, a cylindrical robot with two controllable motors, which we control through continuous torque commands at $2 \ : \mathrm { H z }$ Because the robot is symmetric and the robot only has access to image observations, it has to infer the heading direction from the history of observations. The robot is provided with a dense reward equal to the negative L2 distance, which is computed using a oracle vision pipeline that detects the Sphero’s position (this information is not provided to the agent). As the goal is fixed, after 100 environment steps, we end the episode and randomize the robot’s position through a sequence of high power random motor actions. + +In 2 hours, Dreamer learns to quickly and consistently navigate to the goal and stay near the goal for the remainder of the episode. As shown in Figure 7, Dreamer achieves an average distance to the goal of 0.15, measured in units of the area size and averaged across time steps. We find that DrQv2, a model-free algorithm specifically designed to continuous control from pixels, achieves similar performance. This result matches the simulated experiments of Yarats et al. (2021) that showed the two algorithms to perform similarly for continuous control tasks from images. + +# 4 Related Work + +Existing work on robot learning commonly leverages large amounts of simulated experience before deploying to the real world (Rusu et al., 2016; Peng et al., 2018; OpenAI et al., 2018; Lee et al., 2020; Irpan et al., 2020; Kumar et al., 2021; Siekmann et al., 2021; Escontrela et al., 2022), leverage fleets of robots to collect experience datasets (Kalashnikov et al., 2018; Dasari et al., 2019; Kalashnikov et al., 2021; Ebert et al., 2021), or rely on external information such as human expert demonstrations or task priors to achieve sample-efficient learning (Xie et al., 2019; Schoettler et al., 2019; James et al., 2021; Shah and Levine, 2022; Bohez et al., 2022; Sivakumar et al., 2022). However, designing simulated tasks and collecting expert demonstrations is time-consuming. Moreover, many of these approaches require specialized algorithms for leveraging offline experience, demonstrations, or simulator inaccuracies. In contrast, our experiments show that learning end-to-end from rewards in the physical world is feasible for a diverse range of tasks through world models. + +![](images/372acb53fc3f9ab9f178baac319f8b0cb0c4ee1ca96f374bada7397c4ec23630.jpg) +Figure 7: Sphero Navigation This task requires the Sphero robot to navigate to a goal location given a top-down RGB image as the only input. The task requires the robot to localize itself from raw pixels, to infer its orientation from the sequence of past images because it is ambiguous from a single image, and to control the robot from under-actuated motors that require building up momentum over time. Dreamer learns a successful policy on this task in under 2 hours. + +Relatively few works have demonstrated end-to-end learning from scratch in the physical world. Visual Foresight (Finn et al., 2016; Finn and Levine, 2017; Ebert et al., 2018) learns a video prediction model to solve real world tasks by online planning, but is limited to short-horizon tasks and requires generating images during planning, making it computationally expensive. Yang et al. (2019; 2022) learn quadruped locomotion through a model-based approach by predicting foot placement and leveraging a domain-specific controller to achieve them. Ha et al. (2020) learn a quadruped walking policy by relying on a scripted reset policy, so the robot does not have to learn to stand up. SOLAR (Zhang et al., 2019) learns a latent dynamics model from images and demonstrates reaching and pushing with a robot arm. Nagabandi et al. (2019) learns manipulation policies by planning through a learned dynamics model from state observations. In comparison, our experiments show successful learning across 4 challenging robot tasks that cover a wide range of challenges and sensory modalities, with a single learning algorithm and hyperparameter setting. + +# 5 Discussion + +We applied Dreamer to physical robot learning, finding that modern world models enable sampleefficient robot learning for a range of tasks, from scratch in the real world and without simulators. We also find that the approach is generally applicable in that it can solve robot locomotion, manipulation, and navigation tasks without changing hyperparameters. Dreamer taught a quadruped robot to roll off the back, stand up, and walk in 1 hour from scratch, which previously required extensive training in simulation followed by transfer to the real world or parameterized trajectory generators and given reset policies. We also demonstrate learning to pick and place objects from pixels and sparse rewards on two robot arms in 8–10 hours. + +Limitations While Dreamer shows promising results, learning on hardware over many hours creates wear on robots that may require human intervention or repair. Additionally, more work is required to explore the limits of Dreamer and our baselines by training for a longer time. Finally, we see tackling more challenging tasks, potentially by combining the benefits of fast real world learning with those of simulators, as an impactful future research direction. + +Acknowledgements We thank Stephen James and Justin Kerr for helpful suggestions and help with printing the protective shell of the quadruped robot. We thank Ademi Adeniji for help with setting up the XArm robot and Raven Huang for help with setting up the UR5 robot. This work was supported in part by an NSF Fellowship, NSF NRI #2024675, and the Vanier Canada Graduate Scholarship. + +References +D. Hafner, T. Lillicrap, J. Ba, and M. Norouzi. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019. +D. Hafner, T. Lillicrap, M. Norouzi, and J. Ba. Mastering atari with discrete world models. arXiv preprint arXiv:2010.02193, 2020. +Y. Gal, R. McAllister, and C. E. Rasmussen. Improving pilco with bayesian neural network dynamics models. In Data-Efficient Machine Learning workshop, ICML, 2016. +F. Ebert, C. Finn, S. Dasari, A. Xie, A. Lee, and S. Levine. Visual foresight: Model-based deep reinforcement learning for vision-based robotic control. arXiv preprint arXiv:1812.00568, 2018. +R. Sekar, O. Rybkin, K. Daniilidis, P. Abbeel, D. Hafner, and D. Pathak. Planning to explore via selfsupervised world models. In International Conference on Machine Learning, pages 8583–8592. PMLR, 2020. +T. Yu, A. Kumar, R. Rafailov, A. Rajeswaran, S. Levine, and C. Finn. Combo: Conservative offline model-based policy optimization. Advances in neural information processing systems, 34: 28954–28967, 2021. +D. Hafner, T. Lillicrap, I. Fischer, R. Villegas, D. Ha, H. Lee, and J. Davidson. Learning latent dynamics for planning from pixels. arXiv preprint arXiv:1811.04551, 2018. +D. P. Kingma and M. Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. +D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. arXiv preprint arXiv:1401.4082, 2014. +R. S. Sutton and A. G. Barto. Reinforcement learning: An introduction. MIT press, 2018. +R. J. Williams. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine learning, 8(3-4):229–256, 1992. +M. Henaff, A. Canziani, and Y. LeCun. Model-predictive policy learning with uncertainty regularization for driving in dense traffic. arXiv preprint arXiv:1901.02705, 2019. +D. P. Kingma and J. Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. +V. Mnih, K. Kavukcuoglu, D. Silver, A. A. Rusu, J. Veness, M. G. Bellemare, A. Graves, M. Riedmiller, A. K. Fidjeland, G. Ostrovski, et al. Human-level control through deep reinforcement learning. Nature, 518(7540):529, 2015. +T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015. +T. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018a. +T. Haarnoja, A. Zhou, K. Hartikainen, G. Tucker, S. Ha, J. Tan, V. Kumar, H. Zhu, A. Gupta, P. Abbeel, et al. Soft actor-critic algorithms and applications. arXiv preprint arXiv:1812.05905, 2018b. +M. Hessel, J. Modayil, H. Van Hasselt, T. Schaul, G. Ostrovski, W. Dabney, D. Horgan, B. Piot, M. Azar, and D. Silver. Rainbow: Combining improvements in deep reinforcement learning. In Thirty-Second AAAI Conference on Artificial Intelligence, 2018. +J. Schrittwieser, I. Antonoglou, T. Hubert, K. Simonyan, L. Sifre, S. Schmitt, A. Guez, E. Lockhart, D. Hassabis, T. Graepel, et al. Mastering atari, go, chess and shogi by planning with a learned model. arXiv preprint arXiv:1911.08265, 2019. +J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. +D. Yarats, R. Fergus, A. Lazaric, and L. Pinto. Mastering visual continuous control: Improved data-augmented reinforcement learning. arXiv preprint arXiv:2107.09645, 2021. +A. A. Rusu, M. Vecerik, T. RothΓΆrl, N. Heess, R. Pascanu, and R. Hadsell. Sim-to-real robot learning from pixels with progressive nets, 2016. +X. B. Peng, M. Andrychowicz, W. Zaremba, and P. Abbeel. Sim-to-real transfer of robotic control with dynamics randomization. In 2018 IEEE International Conference on Robotics and Automation (ICRA), pages 1–8, May 2018. doi:10.1109/ICRA.2018.8460528. +N. Rudin, D. Hoeller, P. Reist, and M. Hutter. Learning to walk in minutes using massively parallel deep reinforcement learning, 2021. +J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomotion over challenging terrain. Science Robotics, 5(47), oct 2020. doi:10.1126/scirobotics.abc5986. URL https://doi.org/10.1126%2Fscirobotics.abc5986. +Y. Yang, K. Caluwaerts, A. Iscen, T. Zhang, J. Tan, and V. Sindhwani. Data efficient reinforcement learning for legged robots, 2019. +S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta. R3m: A universal visual representation for robot manipulation, 2022. +OpenAI, M. Andrychowicz, B. Baker, M. Chociej, R. Jozefowicz, B. McGrew, J. Pachocki, A. Petron, M. Plappert, G. Powell, A. Ray, J. Schneider, S. Sidor, J. Tobin, P. Welinder, L. Weng, and W. Zaremba. Learning dexterous in-hand manipulation, 2018. +A. Irpan, C. Harris, J. Ibarz, K. Rao, M. Khansari, and S. Levine. Rl-cyclegan: Improving deep-rl robotics with simulation-to-real. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2020), 2020. +A. Kumar, Z. Fu, D. Pathak, and J. Malik. Rma: Rapid motor adaptation for legged robots, 2021. +J. Siekmann, K. Green, J. Warila, A. Fern, and J. Hurst. Blind bipedal stair traversal via sim-to-real reinforcement learning, 2021. +A. Escontrela, X. B. Peng, W. Yu, T. Zhang, A. Iscen, K. Goldberg, and P. Abbeel. Adversarial motion priors make good substitutes for complex reward functions, 2022. +D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan, V. Vanhoucke, and S. Levine. Qt-opt: Scalable deep reinforcement learning for vision-based robotic manipulation, 2018. +S. Dasari, F. Ebert, S. Tian, S. Nair, B. Bucher, K. Schmeckpeper, S. Singh, S. Levine, and C. Finn. Robonet: Large-scale multi-robot learning, 2019. +D. Kalashnikov, J. Varley, Y. Chebotar, B. Swanson, R. Jonschkowski, C. Finn, S. Levine, and K. Hausman. Mt-opt: Continuous multi-task robotic reinforcement learning at scale, 2021. +F. Ebert, Y. Yang, K. Schmeckpeper, B. Bucher, G. Georgakis, K. Daniilidis, C. Finn, and S. Levine. Bridge data: Boosting generalization of robotic skills with cross-domain datasets, 2021. +A. Xie, F. Ebert, S. Levine, and C. Finn. Improvisation through physical understanding: Using novel objects as tools with visual foresight. arXiv preprint arXiv:1904.05538, 2019. +G. Schoettler, A. Nair, J. Luo, S. Bahl, J. A. Ojea, E. Solowjow, and S. Levine. Deep reinforcement learning for industrial insertion tasks with visual inputs and natural rewards, 2019. +S. James, K. Wada, T. Laidlow, and A. J. Davison. Coarse-to-fine q-attention: Efficient learning for visual robotic manipulation via discretisation, 2021. +D. Shah and S. Levine. Viking: Vision-based kilometer-scale navigation with geographic hints, 2022. +S. Bohez, S. Tunyasuvunakool, P. Brakel, F. Sadeghi, L. Hasenclever, Y. Tassa, E. Parisotto, J. Humplik, T. Haarnoja, R. Hafner, M. Wulfmeier, M. Neunert, B. Moran, N. Siegel, A. Huber, F. Romano, N. Batchelor, F. Casarini, J. Merel, R. Hadsell, and N. Heess. Imitate and repurpose: Learning reusable robot movement skills from human and animal behaviors, 2022. +A. Sivakumar, K. Shaw, and D. Pathak. Robotic telekinesis: Learning a robotic hand imitator by watching humans on youtube, 2022. +C. Finn, I. Goodfellow, and S. Levine. Unsupervised learning for physical interaction through video prediction. In Advances in neural information processing systems, pages 64–72, 2016. +C. Finn and S. Levine. Deep visual foresight for planning robot motion. In Robotics and Automation (ICRA), 2017 IEEE International Conference on, pages 2786–2793. IEEE, 2017. +Y. Yang, T. Zhang, E. Coumans, J. Tan, and B. Boots. Fast and efficient locomotion via learned gait transitions. In Conference on Robot Learning, pages 773–783. PMLR, 2022. +S. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human effort. arXiv preprint arXiv:2002.08550, 2020. +M. Zhang, S. Vikram, L. Smith, P. Abbeel, M. Johnson, and S. Levine. Solar: deep structured representations for model-based reinforcement learning. In International Conference on Machine Learning, 2019. +A. Nagabandi, K. Konoglie, S. Levine, and V. Kumar. Deep dynamics models for learning dexterous manipulation, 2019. +G. I. Parisi, R. Kemker, J. L. Part, C. Kanan, and S. Wermter. Continual lifelong learning with neural networks: A review. Neural Networks, 113:54–71, 2019. ISSN 0893-6080. +T. Miki, J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning robust perceptive locomotion for quadrupedal robots in the wild. Science Robotics, 7(62), jan 2022. doi:10.1126/ scirobotics.abk2822. +L. Smith, J. C. Kew, X. B. Peng, S. Ha, J. Tan, and S. Levine. Legged robots that keep on learning: Fine-tuning locomotion policies in the real world, 2021. +T.-Y. Yang, T. Zhang, L. Luu, S. Ha, J. Tan, and W. Yu. Safe reinforcement learning for legged locomotion, 2022. URL https://arxiv.org/abs/2203.02638. +S. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human effort, 2020. URL https://arxiv.org/abs/2002.08550. +L. Smith, I. Kostrikov, and S. Levine. A walk in the park: Learning to walk in 20 minutes with model-free reinforcement learning, 2022. URL https://arxiv.org/abs/2208.07860. +S. Levine, P. Pastor, A. Krizhevsky, J. Ibarz, and D. Quillen. Learning hand-eye coordination for robotic grasping with deep learning and large-scale data collection. The International Journal of Robotics Research, 37(4-5):421–436, 2018. +L. Pinto and A. Gupta. Supersizing self-supervision: Learning to grasp from 50k tries and 700 robot hours, 2015. +H. Ha and S. Song. Flingbot: The unreasonable effectiveness of dynamic manipulation for cloth unfolding. Conference on Robot Learning, 2021. +S. James and A. J. Davison. Q-attention: Enabling efficient learning for vision-based robotic manipulation, 2021. +E. Tzeng, C. Devin, J. Hoffman, C. Finn, P. Abbeel, S. Levine, K. Saenko, and T. Darrell. Adapting deep visuomotor representations with weak pairwise constraints, 2015. +I. Akkaya, M. Andrychowicz, M. Chociej, M. Litwin, B. McGrew, A. Petron, A. Paino, M. Plappert, G. Powell, R. Ribas, et al. Solving rubik’s cube with a robot hand. arXiv preprint arXiv:1910.07113, 2019. +M. P. Deisenroth, G. Neumann, J. Peters, et al. A survey on policy search for robotics. Foundations and Trends in Robotics, 2(1–2):1–142, 2013. +K. Chua, R. Calandra, R. McAllister, and S. Levine. Deep reinforcement learning in a handful of trials using probabilistic dynamics models. In Advances in Neural Information Processing Systems, pages 4754–4765, 2018. +A. Nagabandi, G. Yang, T. Asmar, R. Pandya, G. Kahn, S. Levine, and R. S. Fearing. Learning image-conditioned dynamics models for control of under-actuated legged millirobots, 2017. +P. Becker-Ehmck, M. Karl, J. Peters, and P. van der Smagt. Learning to fly via deep model-based reinforcement learning. arXiv preprint arXiv:2003.08876, 2020. +F. Deng, I. Jang, and S. Ahn. Dreamerpro: Reconstruction-free model-based reinforcement learning with prototypical representations. arXiv preprint arXiv:2110.14565, 2021. +M. Okada and T. Taniguchi. Dreaming: Model-based reinforcement learning by latent imagination without reconstruction. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 4209–4215. IEEE, 2021. +H. Bharadhwaj, M. Babaeizadeh, D. Erhan, and S. Levine. Information prioritization through empowerment in visual model-based rl. arXiv preprint arXiv:2204.08585, 2022. +K. Paster, L. E. McKinney, S. A. McIlraith, and J. Ba. Blast: Latent dynamics models from bootstrapping. In Deep RL Workshop NeurIPS 2021, 2021. +K. Hsu, M. J. Kim, R. Rafailov, J. Wu, and C. Finn. Vision-based manipulators need to also see from their hands, 2022. URL https://arxiv.org/abs/2203.12677. + +# A Adaptation + +Real world robot learning faces practical challenges such as changing environmental conditions and time varying dynamics. We found that Dreamer is able to adapt to the current environmental conditions with no change to the learning algorithm. This shows promise for using Dreamer in continual learning settings (Parisi et al., 2019). Adaptation of the quadruped to external perturbations is reported in Section 3.1 and Figure 8. + +The XArm, situated near large windows, is able to adapt and maintain performance under the presence of changing lighting conditions. The XArm experiments were conducted after sundown to keep the lighting conditions constant throughout training. Figure A.1 shows the learning curve of the XArm. As expected, the performance of the XArm drops during sunrise. However, the XArm is able to adapt to the change in lighting conditions in about 5 hours time and recover the original performance, which is faster than it would be to train from scratch. A careful inspection of the image observations at these times, as shown in Figure A.1, reveals that the robot received observations with strong light rays covering the scene which greatly differs from the original training observations. + +![](images/db6cf431ae9355646aa06c810c30e311d8db38009707b4dea4bad788085ac2bb.jpg) +Figure A.1: The left two images are raw observations consumed by Dreamer. The leftmost image is an image observation as seen by the XArm at night, when it was trained. The next image shows an observation during sunrise. Despite the vast difference in pixel space, the XArm is able to recover, and then surpass, the original performance in approximately 5 hours. Even after 24 hours when the lighting shifts to night time conditions, the XArm is able to maintain performance. + +# B Imagination + +![](images/24fe88bf92baa43778d9defa3450750bc0d2c910fa9c12c5902630d7c2316e1e.jpg) +Figure B.1: To introspect the policy, we can roll out trajectories in the latent space of Dreamer, then decode the images to visualize the intent of the actor network. Each row is an imagined trajectory, showing every 2nd frame. Top: Latent rollouts on the UR5 environment. Multiple objects introduce more visual complexity that the network has to model. Note the second trajectory, which shows a static orange ball becoming a green ball. Bottom: Latent rollouts on the XArm environment. + +# C Detailed Related Work + +RL for locomotion A common approach is to train RL agents from large amounts of simulated data under domain and dynamics randomization (Peng et al., 2018; Lee et al., 2020; Rudin et al., 2021; Siekmann et al., 2021; Escontrela et al., 2022; Miki et al., 2022; Kumar et al., 2021; Rusu et al., 2016; Bohez et al., 2022), then freezing the learned policy and deploying it to the real world. Smith et al. (2021) explored pre-training policies in simulation and fine-tuning them with real world data. Yang et al. (2019) investigate learning a dynamics model using a multi-step loss and using model predictive control to accomplish a specified task. Yang et al. (2022) train locomotion policies in the real world but require a recovery controller trained in simulation to avoid unsafe states. In contrast, we use no simulators or reset policies and directly train on the physical robot. While prior work in locomotion has successfully learned walking behaviors in the real world, these works generally required several domain-specific assumptions or pretraining with simulators. Ha et al. (2020) achieved successful walking on the Minitaur robot in 90 minutes. However, the authors manually programmed a reset policy that was used when the robot fell on its back, while in our work the robot must learn to flip over and stand up. Additionally, the Minitaur robot is simpler than the A1 as it has 8 actuators compared to 12 on the A1. In recent work, Smith et al. (2022) utilize a high update-to-data ratio (UTD) RL algorithm to learn walking from 20 minutes of robot training data. However, their work assumes the availability of a reset policy and therefore comprises of a different learning problem compared to the problem we tackle of learning to flip over and walk from scratch. Additionally, we show our approach generalizes to environments with image observations and sparse rewards. + +RL for manipulation Learning promises to enable robot manipulators to solve contact rich tasks in open real world environments. One class of methods attempts to scale up experience collection through a fleet of robots (Kalashnikov et al., 2018; 2021; Ebert et al., 2021; Dasari et al., 2019; Levine et al., 2018). In contrast, we only leverage one robot, but parallelize an agent’s experience by using the learned world model. Another common approach is to leverage expert demonstrations or other task priors (Pinto and Gupta, 2015; Ha and Song, 2021; Xie et al., 2019; Schoettler et al., 2019; Sivakumar et al., 2022). James and Davison (2021); James et al. (2021) leverages a few demonstrations to increase the sample-efficiency of Q learning by focusing the learner on important aspects of the scene. Other approaches, as in locomotion, first utilize a simulator, then transfer to the real world (Tzeng et al., 2015; Akkaya et al., 2019; OpenAI et al., 2018; Irpan et al., 2020). Our work focuses on single-robot environments where the agent must learn through a small amount of interaction with the world. Meanwhile, the Google Arm Farm line of work by Levine et al. leverages over $5 8 0 \mathrm { k }$ grasp attempts gathered by 7 robots and collected over 4 months. We believe that a method such as Dreamer could benefit greatly from this scale of training data, however it is unlikely that works such as MT-OPT/QT-OPT Kalashnikov et al. (2018; 2021) would work well in the low data regime that Dreamer excels in. + +Model-based RL Due to its higher sample-efficiency over model-free methods, model-based RL is a promising approach to learning on real world robots (Deisenroth et al., 2013). A model based method first learns a dynamics model, which can then be used to plan actions (Nagabandi et al., 2019; Hafner et al., 2018; Chua et al., 2018; Nagabandi et al., 2017; Becker-Ehmck et al., 2020), or be used as a simulator to learn a policy network as in Dreamer (Hafner et al., 2019; 2020). One approach to tackle the high visual complexity of the world is to learn an action conditioned video prediction model (Finn and Levine, 2017; Ebert et al., 2018; Finn et al., 2016). One downside of this approach is the need to directly predict high dimensional observations, which can be computationally inefficient and easily drift. Dreamer learns a dynamics model in a latent space, allowing more efficient rollouts and avoids relying on high quality visual reconstructions for the policy. Another line of work proposes to learn latent dynamics models without having to reconstruct inputs (Deng et al., 2021; Okada and Taniguchi, 2021; Bharadhwaj et al., 2022; Paster et al., 2021), which we see as a promising approach for supporting moving view points in cluttered environments. + +# D Hyperparameters + +
NameSymbolValue
General
Replay capacity (FIFO)Start learningBatch sizeBatch lengthMLP sizeActivationBT10610432324Γ— 512LayerNorm+ELU
World Model
RSSM sizeNumber of latentsClasses per latentKL balancing51232320.8
Actor Critic
Imagination horizonDiscountReturn lambdaTarget update intervalH?150.950.95100
All Optimizers
Gradient clippingLearning rateAdam epsilonE10010-410-6
+ +# E Environment and Hardware Details + +For every robot setup that involved vision (UR5, XArm, Sphero), we used a RealSense D435 camera positioned to offer a fixed 3rd person view of the scene. + +A1 We used the A1 quadrupedal robot by Unitree. The RL policy outputs actions at a frequency that is too high for the PD controller to track, which we overcome by lowpass filtering the action sequence. The joint range allows the legs to self-collide with the body, which can be damaging to the motors and increase battery consumption. We limited the joint range to decrease self-collisions. Finally, the EKF velocity estimator relies on foot-ground contact events to prevent significant drift in the estimates, so we employ a curriculum reward function that does not reward the robot for forward velocity until the robot is upright with extended legs. We also designed a shell which we 3D printed in order to better protect the cables and hardware and provide a smoother rolling over. + +XArm & UR5 We utilized slanted bins to prevent objects from leaving the work area during the long-running pick and place experiments on the UR5, which is common practice Levine et al. (2018); Kalashnikov et al. (2018). We also added a partition behind the setup to keep the background constant. It would be interesting to study how a gripper-mounted camera would impact policy performance Hsu et al. (2022), however we report strong results without this design choice. For the XArm we use the uFactory xArm Gripper. For the UR5, we use the Robotiq 2F-85 parallel jaw gripper. The bin locations are predetermined and provided as part of the environment to prevent the robot from colliding with the bin. In addition, movement in the $\textsf { Z }$ axis is only enabled while holding an object and the gripper automatically opens once above the other bin. + +Sphero We used a rectangular enclosure of $0 . 8 \times 0 . 8 \mathrm { { m ^ { 2 } } }$ to keep the sphero robot within the camera view. We used a simple OpenCV script to estimate the L2 distance between the Sphero and the goal position to provide a dense reward for policy optimization. This positional information was not provided to the agent, which it had to learn from the raw top-down images. \ No newline at end of file diff --git a/parse/dev/3RBY8fKjHeu/3RBY8fKjHeu_content_list.json b/parse/dev/3RBY8fKjHeu/3RBY8fKjHeu_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..24f87faa27a2f288218c456040b5b57f05a356b3 --- /dev/null +++ b/parse/dev/3RBY8fKjHeu/3RBY8fKjHeu_content_list.json @@ -0,0 +1,995 @@ +[ + { + "type": "text", + "text": "DayDreamer: World Models for Physical Robot Learning ", + "text_level": 1, + "bbox": [ + 303, + 99, + 692, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Philipp Wu\\* ", + "text_level": 1, + "bbox": [ + 282, + 176, + 372, + 191 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Alejandro Escontrela\\* Danijar Hafner\\* ", + "bbox": [ + 406, + 176, + 718, + 190 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ken Goldberg Pieter Abbeel ", + "bbox": [ + 383, + 203, + 614, + 218 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of California, Berkeley \\*Equal contribution ", + "bbox": [ + 388, + 229, + 609, + 243 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "", + "bbox": [ + 434, + 250, + 563, + 263 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract: To solve tasks in complex environments, robots need to learn from experience. Deep reinforcement learning is a common approach to robot learning but requires a large amount of trial and error to learn, limiting its deployment in the physical world. As a consequence, many advances in robot learning rely on simulators. On the other hand, learning inside of simulators fails to capture the complexity of the real world, is prone to simulator inaccuracies, and the resulting behaviors do not adapt to changes in the world. The Dreamer algorithm has recently shown great promise for learning from small amounts of interaction by planning within a learned world model, outperforming pure reinforcement learning in video games. Learning a world model to predict the outcomes of potential actions enables planning in imagination, reducing the amount of trial and error needed in the real environment. However, it is unknown whether Dreamer can facilitate faster learning on physical robots. In this paper, we apply Dreamer to 4 robots to learn online and directly in the real world, without any simulators. Dreamer trains a quadruped robot to roll off its back, stand up, and walk from scratch and without resets in only 1 hour. We then push the robot and find that Dreamer adapts within 10 minutes to withstand perturbations or quickly roll over and stand back up. On two different robotic arms, Dreamer learns to pick and place objects from camera images and sparse rewards, approaching human-level teleoperation performance. On a wheeled robot, Dreamer learns to navigate to a goal position purely from camera images, automatically resolving ambiguity about the robot orientation. Using the same hyperparameters across all experiments, we find that Dreamer is capable of online learning in the real world, which establishes a strong baseline. We release our infrastructure for future applications of world models to robot learning. Videos are available on the project website: https://danijar.com/daydreamer ", + "bbox": [ + 232, + 292, + 766, + 659 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/17f2d11eee9937e70f62a1993623ebccd221887d067e71919c350fa57662f4d3.jpg", + "image_caption": [ + "Figure 1: To study the applicability of Dreamer for sample-efficient robot learning, we apply the algorithm to learn robot locomotion, manipulation, and navigation tasks from scratch in the real world on 4 robots, without simulators. The tasks evaluate a diverse range of challenges, including continuous and discrete actions, dense and sparse rewards, proprioceptive and camera inputs, as well as sensor fusion of multiple input modalities. Learning successfully using the same hyperparameters across all experiments, Dreamer establishes a strong baseline for real world robot learning. " + ], + "image_footnote": [], + "bbox": [ + 174, + 684, + 823, + 816 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction ", + "text_level": 1, + "bbox": [ + 174, + 90, + 310, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Teaching robots to solve complex tasks in the real world is a foundational problem of robotics research. Deep reinforcement learning (RL) offers a popular approach to robot learning that enables robots to improve their behavior over time through trial and error. However, current algorithms require too much interaction with the environment to learn successful behaviors. Recently, modern world models have shown great promise for data efficient learning in simulated domains and video games (Hafner et al., 2019; 2020). Learning world models from past experience enables robots to imagine the future outcomes of potential actions, reducing the amount of trial and error in the real environment needed to learn. ", + "bbox": [ + 174, + 126, + 550, + 318 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "While learning accurate world models can be challenging, they offer compelling properties for robot learning. By predicting future outcomes, world models allow for planning and behavior learning given only small amounts of real world interaction (Gal et al., 2016; Ebert et al., 2018). Moreover, world models summarize general dynamics knowledge about the environment that, once learned, could be reused for a wide range of downstream tasks (Sekar et al., 2020). World models also learn representations that fuse multiple sensor modalities and integrate them into latent states, reducing the need for sophisticated state estimators. Finally, world models generalize well from available offline data (Yu et al., 2021), which further accelerates learning in the real world. ", + "bbox": [ + 174, + 330, + 549, + 536 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e30f877426a1aa2686b70c08629a56889403c96105b017890f8ea57b7982c4a2.jpg", + "image_caption": [ + "Figure 2: Dreamer follows a simple pipeline for online learning on robot hardware without simulators. The current learned policy collects experience on the robot. This experience is added to the replay buffer. The world model is trained on replayed off-policy sequences through supervised learning. An actor critic algorithm optimizes a neural network policy from imagined rollouts in the latent space of the world model. We parallelize data collection and neural network learning. " + ], + "image_footnote": [], + "bbox": [ + 562, + 128, + 826, + 294 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite the promises of world models, learning accurate world models for the real world is a open challenge. In this paper, we leverage recent advances of the Dreamer world model for training a variety of robots in the most straight-forward and fundamental problem setting: online reinforcement learning in the real world, without simulators or demonstrations. As shown in Figure 2, Dreamer learns a world model from a replay buffer of past experience, learns behaviors from rollouts imagined in the latent space of the world model, and continuously interacts with the environment to explore and improve its behaviors. Our aim is to push the limits of robot learning directly in the real world and offer a robust platform to enable future work that develops the benefits of world models for robot learning. The key contributions of this paper are summarized as follows: ", + "bbox": [ + 173, + 549, + 825, + 683 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "β€’ Dreamer on Robots We apply Dreamer to 4 robots, demonstrating successful learning directly in the real world, without introducing new algorithms. The tasks cover a range of challenges, including different action spaces, sensory modalities, and reward structures. \nβ€’ Walking in 1 Hour We teach a quadruped from scratch in the real world to roll off its back, stand up, and walk in only 1 hour. Afterwards, we find that the robot adapts to being pushed within 10 minutes, learning to withstand pushes or quickly roll over and get back on its feet. \nβ€’ Visual Pick and Place We train robotic arms to pick and place objects from sparse rewards, which requires localizing objects from pixels and fusing images with proprioceptive inputs. The learned behavior outperforms model-free agents and approaches the performance of a human teleoperator using the same control interface as the robot. \nβ€’ Open Source We publicly release the software infrastructure for all our experiments, which supports different action spaces and sensory modalities, offering a flexible platform for future research of world models for robot learning in the real world. ", + "bbox": [ + 173, + 698, + 826, + 905 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/bbd9aa6b3f541685e1ecf9dd1c4451b92904b361a6547ee2e39414769cb64de4.jpg", + "image_caption": [ + "Figure 3: Neural Network Training We leverage the Dreamer algorithm (Hafner et al., 2019; 2020) for fast robot learning in real world. Dreamer consists of two main neural network components, the world model and the policy. Left: The world model follows the structure of a deep Kalman filter that is trained on subsequences drawn from the replay buffer. The encoder fuses all sensory modalities into discrete codes. The decoder reconstructs the inputs from the codes, providing a rich learning signal and enabling human inspection of model predictions. A recurrent state-space model (RSSM) is trained to predict future codes given actions, without observing intermediate inputs. " + ], + "image_footnote": [], + "bbox": [ + 169, + 65, + 826, + 272 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Right: The world model enables massively parallel policy optimization from imagined rollouts in the compact latent space using a large batch size, without having to reconstruct sensory inputs. Dreamer trains a policy network and value network from the imagined rollouts and a learned reward function. ", + "bbox": [ + 174, + 381, + 826, + 438 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Approach ", + "text_level": 1, + "bbox": [ + 174, + 448, + 289, + 465 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We leverage the Dreamer algorithm (Hafner et al., 2019; 2020) for online learning on physical robots, without the need for simulators. Figure 2 shows an overview of the approach. Dreamer learns a world model from a replay buffer of past experiences, uses an actor critic algorithm to learn behaviors from trajectories predicted by the learned model, and deploys its behavior in the environment to continuously grow the replay buffer. We decouple learning updates from data collection to meet latency requirements and to enable fast training without waiting for the environment. In our implementation, a learner thread continuously trains the world model and actor critic behavior, while an actor thread in parallel computes actions for environment interaction. ", + "bbox": [ + 173, + 476, + 825, + 594 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "World Model Learning The world model is a deep neural network that learns to predict the environment dynamics, as shown in Figure 3 (left). Because sensory inputs can be large images, we predict future representations rather than future inputs. This reduces accumulating errors and enables massively parallel training with a large batch size. Thus, the world model can be thought of as a fast simulator of the environment that the robot learns autonomously, starting from a blank slate and continuously improving its model as it explores the real world. The world model is based on the Recurrent State-Space Model (RSSM; Hafner et al., 2018), which consists of four components: ", + "bbox": [ + 173, + 599, + 825, + 703 + ], + "page_idx": 2 + }, + { + "type": "equation", + "img_path": "images/bd74dd0968c35c4dc8ffeba6cc51d9f109521ca66008a561e30a7fcccfe148a6.jpg", + "text": "$$\n{ \\begin{array} { r l r l } & { \\operatorname { e n c } _ { \\theta } { \\big ( } s _ { t } \\ { \\big | } \\ s _ { t - 1 } , a _ { t - 1 } , x _ { t } { \\big ) } } & & { { \\mathrm { D e c o d e r ~ N e t w o r k : } } \\quad \\operatorname* { d e c } _ { \\theta } { \\big ( } s _ { t } { \\big ) } \\approx x _ { t } } \\\\ & { \\operatorname { d y n } _ { \\theta } { \\big ( } s _ { t } \\ { \\big | } \\ s _ { t - 1 } , a _ { t - 1 } { \\big ) } } & & { { \\mathrm { R e w a r d ~ N e t w o r k : } } \\quad \\operatorname { r e w } _ { \\theta } { \\big ( } s _ { t + 1 } { \\big ) } \\approx r _ { t } } \\end{array} }\n$$", + "text_format": "latex", + "bbox": [ + 328, + 708, + 787, + 747 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Physical robots are often equipped with multiple sensors of different modalities, such as proprioceptive joint readings, force sensors, and high-dimensional inputs such as RGB and depth camera images. The encoder network fuses all sensory inputs $x _ { t }$ together into the stochastic representations $z _ { t }$ . The dynamics model learns to predict the sequence of stochastic representations by using its recurrent state $h _ { t }$ . The decoder reconstructs the sensory inputs to provide a rich signal for learning representations and enables human inspection of model predictions. In our experiments, the robot has to discover task rewards by interacting with the real world, which the reward network learns to predict. Using manually specified rewards as a function of the decoded sensory inputs is also possible. We optimize all components of the world model jointly by stochastic backpropagation (Kingma and Welling, 2013; Rezende et al., 2014). ", + "bbox": [ + 173, + 751, + 826, + 898 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Actor Critic Learning While the world model represents task-agnostic knowledge about the dynamics, the actor critic algorithm learns a behavior that is specific to the task at hand. As shown in Figure 3 (right), we learn behaviors from rollouts that are predicted in the latent space of the world model, without decoding observations. This enables massively parallel behavior learning with typical batch sizes of 16K on a single GPU. The actor critic algorithm consists of an actor network $\\pi ( a _ { t } | s _ { t } )$ and a critic network $v ( s _ { t } )$ . ", + "bbox": [ + 173, + 90, + 825, + 180 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The role of the actor network is to learn a distribution over successful actions $a _ { t }$ for each latent model state $s _ { t }$ that maximizes the sum of future predicted task rewards. The critic network learns to predict the sum of future task rewards through temporal difference learning (Sutton and Barto, 2018). This allows the algorithm to take into account rewards beyond the planning horizon of $H = 1 6$ steps to learn long-term strategies. Given a predicted trajectory of model states, the critic is trained to regress the return of the trajectory. We compute $\\lambda$ -returns following Hafner et al. (2020; 2019): ", + "bbox": [ + 173, + 188, + 825, + 277 + ], + "page_idx": 3 + }, + { + "type": "equation", + "img_path": "images/bced559cf73caae8b60315e212417c82fda960dadfbf4c1dc28fc8ea7be19aee.jpg", + "text": "$$\nV _ { t } ^ { \\lambda } \\doteq r _ { t } + \\gamma \\Big ( ( 1 - \\lambda ) v ( s _ { t + 1 } ) + \\lambda V _ { t + 1 } ^ { \\lambda } \\Big ) , \\quad V _ { H } ^ { \\lambda } \\doteq v ( s _ { H } ) .\n$$", + "text_format": "latex", + "bbox": [ + 305, + 281, + 691, + 309 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "While the critic network is trained to regress the $\\lambda$ -returns, the actor network is trained to maximize them. Different gradient estimators are available for computing the policy gradient for optimizing the actor, such as Reinforce (Williams, 1992) and the reparameterization trick (Kingma and Welling, 2013; Rezende et al., 2014) that directly backpropagates return gradients through the differentiable dynamics network (Henaff et al., 2019). Following Hafner et al. (2020), we choose reparameterization gradients for continuous control tasks and Reinforce gradients for tasks with discrete actions. In addition to maximizing returns, the actor is also incentivized to maintain high entropy to prevent collapse to a deterministic policy and maintain some amount of exploration throughout training: ", + "bbox": [ + 173, + 314, + 825, + 433 + ], + "page_idx": 3 + }, + { + "type": "equation", + "img_path": "images/29586778ea39548465206a088f601c3e740302c103692a574d7a70d8188b3983.jpg", + "text": "$$\n\\begin{array} { r } { \\mathcal { L } ( \\pi ) \\doteq - \\operatorname { E } \\bigl [ \\sum _ { t = 1 } ^ { H } \\ln \\pi ( a _ { t } \\mid s _ { t } ) \\mathrm { s g } ( V _ { t } ^ { \\lambda } - v ( s _ { t } ) ) + \\eta \\mathrm { H } \\bigl [ \\pi ( a _ { t } \\mid s _ { t } ) \\bigr ] \\bigr ] } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 271, + 440, + 725, + 462 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We optimize the actor and critic using the Adam optimizer (Kingma and Ba, 2014). To compute the $\\lambda$ -returns, we use a slowly updated copy of the critic network as common in the literature (Mnih et al., 2015; Lillicrap et al., 2015). The actor and critic gradients do not affect the world model, as this would lead to incorrect and overly optimistic model predictions. The hyperparameters are listed in Appendix D. ", + "bbox": [ + 174, + 465, + 825, + 540 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Experiments ", + "text_level": 1, + "bbox": [ + 174, + 564, + 312, + 582 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We evaluate Dreamer on 4 robots, each with a different task, and compare its performance to appropriate algorithmic and human baselines. The experiments are representative of common robotic tasks, such as locomotion, manipulation, and navigation. The tasks pose a diverse range of challenges, including continuous and discrete actions, dense and sparse rewards, proprioceptive and image observations, and sensor fusion. The goal of the experiments is to evaluate whether the recent successes of learned world models enables sample-efficient robot learning directly in the real world. Specifically, we aim to answer the following research questions: ", + "bbox": [ + 173, + 597, + 826, + 700 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "β€’ Does Dreamer enable robot learning directly in the real world, without simulators? β€’ Does Dreamer succeed across various robot platforms, sensory modalities, and action spaces? β€’ How does the data-efficiency of Dreamer compare to previous reinforcement learning algorithms? ", + "bbox": [ + 173, + 712, + 826, + 767 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Implementation We build on the official implementation of DreamerV2 (Hafner et al., 2020). We develop an asynchronous actor and learner setup, which is essential in environments with high control rates, such as the quadruped, and also accelerates learning for slower environments, such as the robot arms. The actor thread computes online actions for the robot and sends trajectories of 128 time steps to the replay buffer. The learner thread samples data from the replay buffer, updates the world model, and optimizes the policy using imagination rollouts. Policy weights are synced from the learner to the actor every 20 seconds. We use an RSSM with 256 units to speed up the training computation. We use identical hyperparameters across all experiments, enabling off-the-shelf training on different robot embodiments. ", + "bbox": [ + 173, + 779, + 826, + 911 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/159d86a4fe017221206965fa98efc6ce35e16bebec7536f231b04a5fa470830b.jpg", + "image_caption": [ + "Figure 4: A1 Quadruped Walking Starting from lying on its back with the feet in the air, Dreamer learns to roll over, stand up, and walk in 1 hour of real world training time, without simulators or resets. In contrast, SAC only learns to roll over but neither to stand up nor to walk. For SAC, we also had to help the robot out of a dead-locked leg configuration during training. On the right we show training curves for both SAC and Dreamer. The maximum reward is 14. The filled circles indicate times where the robot fell on its back, requiring the learning of a robust strategy for getting back up. After 1 hour of training, we start pushing the robot and find that it adapts its behavior within 10 minutes to withstand light pushes and quickly roll back on its feet for hard pushes. The graph shows a single training run with the shaded area indicating one standard deviation within each time bin. " + ], + "image_footnote": [], + "bbox": [ + 173, + 71, + 818, + 180 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines We compare to a strong learning algorithm for each of our experimental setups. The A1 quadruped robot uses continuous actions and low-dimensional inputs, allowing us to compare to SAC (Haarnoja et al., 2018a;b), a popular algorithm for data-efficient continuous control. For the visual pick and place experiments on the XArm and UR5 robots, inputs are images and proprioceptive readings and actions are discrete, suggesting algorithms from the DQN (Mnih et al., 2015) line of work as baselines. We choose Rainbow (Hessel et al., 2018) as a powerful representative of this category, an algorithm that combines many improvements of DQN. To input the proprioceptive readings, we concatenate them as broadcasted planes to the RGB channels of the image, a common practice in the literature (Schrittwieser et al., 2019). For the UR5, we additionally compare against PPO (Schulman et al., 2017), with similar modifications for fusing image and proprioceptive readings. In addition, we compare against a human operator controlling the robot arm through the robot control interface. For the Sphero navigation task, inputs are images and actions are continuous. The state-ofthe-art baseline in this category is DrQv2 (Yarats et al., 2021), which uses image augmentation to increase sample-efficiency. ", + "bbox": [ + 173, + 338, + 826, + 545 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 A1 Quadruped Walking ", + "text_level": 1, + "bbox": [ + 174, + 559, + 382, + 574 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This high-dimensional continuous control task requires training a quadruped robot to roll over from its back, stand up, and walk forward at a fixed target velocity. Prior work in quadruped locomotion requires either extensive training in simulation under domain randomization, using recovery controllers to avoid unsafe states, or defining the action space as parameterized trajectory generators that restrict the space of motions (Rusu et al., 2016; Peng et al., 2018; Rudin et al., 2021; Lee et al., 2020; Yang et al., 2019). In contrast, we train in the end-to-end reinforcement learning setting directly on the robot, without simulators or resets. We use the Unitree A1 robot that consists of 12 direct drive motors. The motors are controlled at $2 0 \\mathrm { H z }$ via continuous actions that represent motor angles that are realized by a PD controller on the hardware. Actions are filtered with a Butterworth filter to protect the motor from high-frequency actions. The input consists of motor angles, orientations, and angular velocities. Due to space constraints, we manually intervene when the robot has reached the end of the available training area, without modifying the joint configuration or orientation that the robot is in. ", + "bbox": [ + 174, + 579, + 549, + 768 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/69e7a0dc11e3a7ecd812dec526777f2a39e7aed63605587ef789903e7f57fb8c.jpg", + "image_caption": [ + "Figure 8: Within 10 minutes of perturbing the learned walking behavior, the robot adapts to withstanding pushes or quickly rolling over and back on its feet. " + ], + "image_footnote": [], + "bbox": [ + 562, + 580, + 823, + 695 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "", + "bbox": [ + 174, + 770, + 825, + 843 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The reward function is the sum of five terms. An upright reward is computed from the base frame up vector $\\hat { z } ^ { T }$ , terms for matching the standing pose are computed from the joint angles of the hips, shoulders, and knees, and a forward velocity term is computed from the projected forward velocity $\\boldsymbol { s } _ { v } \\boldsymbol { x }$ and the total velocity $s _ { v }$ . Without the reward curriculum, the agent receives spurious reward values due to the velocity estimator’s dependence on foot-ground contact events. Each of the five terms is active while its preceding terms are satisfied to at least 0.7 and otherwise set to 0: ", + "bbox": [ + 174, + 853, + 825, + 911 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/69863294723843746383e47fe99dcd32744e499d1c526f51df4121c52ff99fe8.jpg", + "image_caption": [ + "Figure 5: UR5 Multi Object Visual Pick and Place This task requires learning to locate three ball objects from third-person camera images, grasp them, and move them into the other bin. The arm is free to move within and above the bins and sparse rewards are given for grasping a ball and for dropping it in the opposite bin. The environment requires the world model to learn multi-object dynamics in the real world and the sparse reward structure poses a challenge for policy optimization. Dreamer overcomes the challenges of visual localization and sparse rewards on this task, learning a successful strategy within a few hours of autonomous operation. " + ], + "image_footnote": [], + "bbox": [ + 173, + 73, + 815, + 179 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "", + "bbox": [ + 174, + 309, + 825, + 339 + ], + "page_idx": 5 + }, + { + "type": "equation", + "img_path": "images/d59a106b3ad7e1f46762af4cb6c79589191a386db6dd18da2381b810f0e72627.jpg", + "text": "$$\n\\begin{array} { r l } { r ^ { \\mathrm { u p r } } \\doteq ( \\hat { z } ^ { T } [ 0 , 0 , 1 ] - 1 ) / 2 } & { { } r ^ { \\mathrm { h i p } } \\doteq 1 - \\frac 1 4 \\| q ^ { \\mathrm { h i p } } + 0 . 2 \\| _ { 1 } \\quad r ^ { \\mathrm { s h o u l d e r } } \\doteq 1 - \\frac 1 4 \\| q ^ { \\mathrm { s h o u l d e r } } + 0 . 2 \\| _ { 1 } } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 200, + 344, + 779, + 364 + ], + "page_idx": 5 + }, + { + "type": "equation", + "img_path": "images/d3f8da6b396f76d54da62528b9e91defa6543c22e48371ec0f3e6d668424058f.jpg", + "text": "$$\n\\begin{array} { r l } { r ^ { \\mathrm { k n e e } } \\doteq 1 - \\frac 1 4 \\parallel q ^ { \\mathrm { k n e e } } - 1 . 0 \\parallel _ { 1 } } & { { } r ^ { \\mathrm { v e l o c i t y } } \\doteq 5 \\big ( \\operatorname* { m a x } ( 0 , ^ { \\mathcal { B } } v _ { x } ) / \\parallel ^ { \\mathcal { B } } v \\parallel _ { 2 } \\cdot \\mathrm { c l i p } ( ^ { \\mathcal { B } } v _ { x } / 0 . 3 , - 1 , 1 ) + 1 \\big ) } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 199, + 371, + 777, + 391 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Figure 4, after one hour of training, Dreamer learns to consistently flip the robot over from its back, stand up, and walk forward. In the first 5 minutes of training, the robot manages to roll off its back and land on its feet. 20 minutes later, it learns how to stand up on its feet. About 1 hour into training, the robot learns a pronking gait to walk forward at the desired velocity. After succeeding at this task, we tested the robustness of the algorithms by repeatedly knocking the robot off of its feet with a large pole, shown in Figure 8. Within 10 minutes of additional online learning, the robot adapts and withstand pushes or quickly rolls back on its feet. In comparison, SAC quickly learns to roll off its back but fails to stand up or walk given the small data budget. ", + "bbox": [ + 173, + 396, + 826, + 515 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 UR5 Multi-Object Visual Pick and Place ", + "text_level": 1, + "bbox": [ + 173, + 525, + 495, + 540 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Common in warehouse and logistics environments, pick and place tasks require a robot manipulator to transport items from one bin into another. Figure 5 shows a successful pick and place cycle of this task. The task is challenging because of sparse rewards, the need to infer object positions from pixels, and the challenging dynamics of multiple moving objects. The sensory inputs consist of proprioceptive readings (joint angles, gripper position, end effector Cartesian position) and a 3rd person RGB image of the scene. Successfully grasping one of the 3 objects, detected by partial gripper closure, results in a $+ 1$ reward, releasing the object in the same bin gives a $- 1$ reward, and placing in the opposite bin gives a $+ 1 0$ reward. We control the UR5 robot from Universal Robotics at $2 \\ \\mathrm { H z }$ . Actions are discrete for moving the end effector in increments along X, Y, and $\\textsf { Z }$ axes and for toggling the gripper state. Movement in the Z axis is only enabled while holding an object and the gripper automatically opens once above the correct bin. We estimate human teleoperation performance by recording 3 demonstrators for 20 minutes each, controlling the UR5 with a joystick. ", + "bbox": [ + 173, + 541, + 826, + 717 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Dreamer reaches an average pick rate of 2.5 objects per minute within 8 hours. The robot initially struggles to learn as the reward signal is very sparse, but begins to gradually improve after 2 hours of training. The robot first learns to localize the objects and toggles the gripper when near an object. Over time, grasping becomes precise and the robot learns to push objects out of corners. Figure 5 shows the learning curves of Dreamer compared to Rainbow DQN, PPO, and the human baseline. Both Rainbow DQN and PPO only learn the short-sighted behavior of grasping and immediately dropping objects in the same bin. In contrast, Dreamer approaches human-level teleoperation performance after 8 hours. We hypothesize that Rainbow DQN and PPO fail because they require larger amounts of experience, which is not feasible for us to collect in the real world. ", + "bbox": [ + 173, + 722, + 825, + 856 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3 XArm Visual Pick and Place ", + "text_level": 1, + "bbox": [ + 174, + 866, + 411, + 881 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "While the UR5 robot is a high performance industrial robot, the XArm is an accessible low-cost 7 DOF manipulation, which we control at approximately $0 . 5 \\ : \\mathrm { H z }$ . Similar to Section 3.2, the task requires localizing and grasping a soft object and moving it from one bin to another and back, shown in Figure 6. We connect the object to the gripper with a string, which makes it less likely for the object to get stuck in corners at the cost of more complex dynamics. The sparse reward, discrete action space, and observation space match the UR5 setup except for the addition of depth image observations. ", + "bbox": [ + 173, + 882, + 821, + 911 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/dfce202941b6d7b7a3b4e91b152da625264b3b1c43837193ab53e137e11b01f3.jpg", + "image_caption": [ + "Figure 6: XArm Visual Pick and Place The XArm is an affordable robot arm that operates slower than the UR5. To demonstrate successful learning on this robot, we use a third-person RealSense camera with RGB and depth modalities, as well as proprioceptive inputs for the robot arm, requiring the world model to learn sensor fusion. The pick and place task uses a soft object. While soft objects would be challenging to model accurately in a simulator, Dreamer avoids this issue by directly learning on the real robot without a simulator. While Rainbow and PPO using R3M visual embeddings converge to the local optimum of grasping and ungrasping the object in the same bin, Dreamer learns a successful pick and place policy from sparse rewards in under 10 hours. " + ], + "image_footnote": [], + "bbox": [ + 174, + 66, + 816, + 172 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "", + "bbox": [ + 174, + 318, + 825, + 391 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Dreamer learns a policy that enables the XArm to achieve an average pick rate of 3.1 objects per minute in 10 hours of time, which is comparable to human performance on this task. Figure 6 shows that Dreamer learns to solve the task within 10 hours, whereas the Rainbow algorithm, a top model-free algorithm for discrete control from pixels, fails to learn. We additionally compare Dreamer against a PPO baseline that utilizes R3M (Nair et al., 2022) pretrained visual embeddings for the state, but notice no improvement in performance. Interestingly, we observed that Dreamer learns to sometimes use the string to pull the object out of a corner before grasping it, demonstrating multi-modal behaviors. Moreover, we observed that when lighting conditions change drastically (such as sharp shadows during sunrise), performance initially collapses but Dreamer then adapts to the changing conditions and exceeds its previous performance after a few hours of additional training, reported in Appendix A. ", + "bbox": [ + 173, + 397, + 825, + 560 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.4 Sphero Navigation ", + "text_level": 1, + "bbox": [ + 174, + 570, + 343, + 585 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We evaluate Dreamer on a visual navigation task that requires maneuvering a wheeled robot to a fixed goal location given only RGB images as input. We use the Sphero Ollie robot, a cylindrical robot with two controllable motors, which we control through continuous torque commands at $2 \\ : \\mathrm { H z }$ Because the robot is symmetric and the robot only has access to image observations, it has to infer the heading direction from the history of observations. The robot is provided with a dense reward equal to the negative L2 distance, which is computed using a oracle vision pipeline that detects the Sphero’s position (this information is not provided to the agent). As the goal is fixed, after 100 environment steps, we end the episode and randomize the robot’s position through a sequence of high power random motor actions. ", + "bbox": [ + 174, + 587, + 825, + 718 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In 2 hours, Dreamer learns to quickly and consistently navigate to the goal and stay near the goal for the remainder of the episode. As shown in Figure 7, Dreamer achieves an average distance to the goal of 0.15, measured in units of the area size and averaged across time steps. We find that DrQv2, a model-free algorithm specifically designed to continuous control from pixels, achieves similar performance. This result matches the simulated experiments of Yarats et al. (2021) that showed the two algorithms to perform similarly for continuous control tasks from images. ", + "bbox": [ + 174, + 726, + 825, + 814 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 Related Work ", + "text_level": 1, + "bbox": [ + 174, + 829, + 321, + 845 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Existing work on robot learning commonly leverages large amounts of simulated experience before deploying to the real world (Rusu et al., 2016; Peng et al., 2018; OpenAI et al., 2018; Lee et al., 2020; Irpan et al., 2020; Kumar et al., 2021; Siekmann et al., 2021; Escontrela et al., 2022), leverage fleets of robots to collect experience datasets (Kalashnikov et al., 2018; Dasari et al., 2019; Kalashnikov et al., 2021; Ebert et al., 2021), or rely on external information such as human expert demonstrations or task priors to achieve sample-efficient learning (Xie et al., 2019; Schoettler et al., 2019; James et al., 2021; Shah and Levine, 2022; Bohez et al., 2022; Sivakumar et al., 2022). However, designing simulated tasks and collecting expert demonstrations is time-consuming. Moreover, many of these approaches require specialized algorithms for leveraging offline experience, demonstrations, or simulator inaccuracies. In contrast, our experiments show that learning end-to-end from rewards in the physical world is feasible for a diverse range of tasks through world models. ", + "bbox": [ + 174, + 853, + 825, + 911 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/372acb53fc3f9ab9f178baac319f8b0cb0c4ee1ca96f374bada7397c4ec23630.jpg", + "image_caption": [ + "Figure 7: Sphero Navigation This task requires the Sphero robot to navigate to a goal location given a top-down RGB image as the only input. The task requires the robot to localize itself from raw pixels, to infer its orientation from the sequence of past images because it is ambiguous from a single image, and to control the robot from under-actuated motors that require building up momentum over time. Dreamer learns a successful policy on this task in under 2 hours. " + ], + "image_footnote": [], + "bbox": [ + 173, + 73, + 813, + 179 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "", + "bbox": [ + 174, + 282, + 825, + 386 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Relatively few works have demonstrated end-to-end learning from scratch in the physical world. Visual Foresight (Finn et al., 2016; Finn and Levine, 2017; Ebert et al., 2018) learns a video prediction model to solve real world tasks by online planning, but is limited to short-horizon tasks and requires generating images during planning, making it computationally expensive. Yang et al. (2019; 2022) learn quadruped locomotion through a model-based approach by predicting foot placement and leveraging a domain-specific controller to achieve them. Ha et al. (2020) learn a quadruped walking policy by relying on a scripted reset policy, so the robot does not have to learn to stand up. SOLAR (Zhang et al., 2019) learns a latent dynamics model from images and demonstrates reaching and pushing with a robot arm. Nagabandi et al. (2019) learns manipulation policies by planning through a learned dynamics model from state observations. In comparison, our experiments show successful learning across 4 challenging robot tasks that cover a wide range of challenges and sensory modalities, with a single learning algorithm and hyperparameter setting. ", + "bbox": [ + 174, + 398, + 825, + 575 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Discussion ", + "text_level": 1, + "bbox": [ + 174, + 598, + 292, + 614 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We applied Dreamer to physical robot learning, finding that modern world models enable sampleefficient robot learning for a range of tasks, from scratch in the real world and without simulators. We also find that the approach is generally applicable in that it can solve robot locomotion, manipulation, and navigation tasks without changing hyperparameters. Dreamer taught a quadruped robot to roll off the back, stand up, and walk in 1 hour from scratch, which previously required extensive training in simulation followed by transfer to the real world or parameterized trajectory generators and given reset policies. We also demonstrate learning to pick and place objects from pixels and sparse rewards on two robot arms in 8–10 hours. ", + "bbox": [ + 173, + 633, + 825, + 752 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations While Dreamer shows promising results, learning on hardware over many hours creates wear on robots that may require human intervention or repair. Additionally, more work is required to explore the limits of Dreamer and our baselines by training for a longer time. Finally, we see tackling more challenging tasks, potentially by combining the benefits of fast real world learning with those of simulators, as an impactful future research direction. ", + "bbox": [ + 174, + 766, + 825, + 839 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements We thank Stephen James and Justin Kerr for helpful suggestions and help with printing the protective shell of the quadruped robot. We thank Ademi Adeniji for help with setting up the XArm robot and Raven Huang for help with setting up the UR5 robot. This work was supported in part by an NSF Fellowship, NSF NRI #2024675, and the Vanier Canada Graduate Scholarship. ", + "bbox": [ + 176, + 853, + 823, + 911 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References \nD. Hafner, T. Lillicrap, J. Ba, and M. Norouzi. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019. \nD. Hafner, T. Lillicrap, M. Norouzi, and J. Ba. Mastering atari with discrete world models. arXiv preprint arXiv:2010.02193, 2020. \nY. Gal, R. McAllister, and C. E. Rasmussen. Improving pilco with bayesian neural network dynamics models. In Data-Efficient Machine Learning workshop, ICML, 2016. \nF. Ebert, C. Finn, S. Dasari, A. Xie, A. Lee, and S. Levine. Visual foresight: Model-based deep reinforcement learning for vision-based robotic control. arXiv preprint arXiv:1812.00568, 2018. \nR. Sekar, O. Rybkin, K. Daniilidis, P. Abbeel, D. Hafner, and D. Pathak. Planning to explore via selfsupervised world models. In International Conference on Machine Learning, pages 8583–8592. PMLR, 2020. \nT. Yu, A. Kumar, R. Rafailov, A. Rajeswaran, S. Levine, and C. Finn. Combo: Conservative offline model-based policy optimization. Advances in neural information processing systems, 34: 28954–28967, 2021. \nD. Hafner, T. Lillicrap, I. Fischer, R. Villegas, D. Ha, H. Lee, and J. Davidson. Learning latent dynamics for planning from pixels. arXiv preprint arXiv:1811.04551, 2018. \nD. P. Kingma and M. Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. \nD. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. arXiv preprint arXiv:1401.4082, 2014. \nR. S. Sutton and A. G. Barto. Reinforcement learning: An introduction. MIT press, 2018. \nR. J. Williams. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine learning, 8(3-4):229–256, 1992. \nM. Henaff, A. Canziani, and Y. LeCun. Model-predictive policy learning with uncertainty regularization for driving in dense traffic. arXiv preprint arXiv:1901.02705, 2019. \nD. P. Kingma and J. Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. \nV. Mnih, K. Kavukcuoglu, D. Silver, A. A. Rusu, J. Veness, M. G. Bellemare, A. Graves, M. Riedmiller, A. K. Fidjeland, G. Ostrovski, et al. Human-level control through deep reinforcement learning. Nature, 518(7540):529, 2015. \nT. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015. \nT. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018a. \nT. Haarnoja, A. Zhou, K. Hartikainen, G. Tucker, S. Ha, J. Tan, V. Kumar, H. Zhu, A. Gupta, P. Abbeel, et al. Soft actor-critic algorithms and applications. arXiv preprint arXiv:1812.05905, 2018b. \nM. Hessel, J. Modayil, H. Van Hasselt, T. Schaul, G. Ostrovski, W. Dabney, D. Horgan, B. Piot, M. Azar, and D. Silver. Rainbow: Combining improvements in deep reinforcement learning. In Thirty-Second AAAI Conference on Artificial Intelligence, 2018. \nJ. Schrittwieser, I. Antonoglou, T. Hubert, K. Simonyan, L. Sifre, S. Schmitt, A. Guez, E. Lockhart, D. Hassabis, T. Graepel, et al. Mastering atari, go, chess and shogi by planning with a learned model. arXiv preprint arXiv:1911.08265, 2019. \nJ. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. \nD. Yarats, R. Fergus, A. Lazaric, and L. Pinto. Mastering visual continuous control: Improved data-augmented reinforcement learning. arXiv preprint arXiv:2107.09645, 2021. \nA. A. Rusu, M. Vecerik, T. RothΓΆrl, N. Heess, R. Pascanu, and R. Hadsell. Sim-to-real robot learning from pixels with progressive nets, 2016. \nX. B. Peng, M. Andrychowicz, W. Zaremba, and P. Abbeel. Sim-to-real transfer of robotic control with dynamics randomization. In 2018 IEEE International Conference on Robotics and Automation (ICRA), pages 1–8, May 2018. doi:10.1109/ICRA.2018.8460528. \nN. Rudin, D. Hoeller, P. Reist, and M. Hutter. Learning to walk in minutes using massively parallel deep reinforcement learning, 2021. \nJ. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomotion over challenging terrain. Science Robotics, 5(47), oct 2020. doi:10.1126/scirobotics.abc5986. URL https://doi.org/10.1126%2Fscirobotics.abc5986. \nY. Yang, K. Caluwaerts, A. Iscen, T. Zhang, J. Tan, and V. Sindhwani. Data efficient reinforcement learning for legged robots, 2019. \nS. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta. R3m: A universal visual representation for robot manipulation, 2022. \nOpenAI, M. Andrychowicz, B. Baker, M. Chociej, R. Jozefowicz, B. McGrew, J. Pachocki, A. Petron, M. Plappert, G. Powell, A. Ray, J. Schneider, S. Sidor, J. Tobin, P. Welinder, L. Weng, and W. Zaremba. Learning dexterous in-hand manipulation, 2018. \nA. Irpan, C. Harris, J. Ibarz, K. Rao, M. Khansari, and S. Levine. Rl-cyclegan: Improving deep-rl robotics with simulation-to-real. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2020), 2020. \nA. Kumar, Z. Fu, D. Pathak, and J. Malik. Rma: Rapid motor adaptation for legged robots, 2021. \nJ. Siekmann, K. Green, J. Warila, A. Fern, and J. Hurst. Blind bipedal stair traversal via sim-to-real reinforcement learning, 2021. \nA. Escontrela, X. B. Peng, W. Yu, T. Zhang, A. Iscen, K. Goldberg, and P. Abbeel. Adversarial motion priors make good substitutes for complex reward functions, 2022. \nD. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan, V. Vanhoucke, and S. Levine. Qt-opt: Scalable deep reinforcement learning for vision-based robotic manipulation, 2018. \nS. Dasari, F. Ebert, S. Tian, S. Nair, B. Bucher, K. Schmeckpeper, S. Singh, S. Levine, and C. Finn. Robonet: Large-scale multi-robot learning, 2019. \nD. Kalashnikov, J. Varley, Y. Chebotar, B. Swanson, R. Jonschkowski, C. Finn, S. Levine, and K. Hausman. Mt-opt: Continuous multi-task robotic reinforcement learning at scale, 2021. \nF. Ebert, Y. Yang, K. Schmeckpeper, B. Bucher, G. Georgakis, K. Daniilidis, C. Finn, and S. Levine. Bridge data: Boosting generalization of robotic skills with cross-domain datasets, 2021. \nA. Xie, F. Ebert, S. Levine, and C. Finn. Improvisation through physical understanding: Using novel objects as tools with visual foresight. arXiv preprint arXiv:1904.05538, 2019. \nG. Schoettler, A. Nair, J. Luo, S. Bahl, J. A. Ojea, E. Solowjow, and S. Levine. Deep reinforcement learning for industrial insertion tasks with visual inputs and natural rewards, 2019. \nS. James, K. Wada, T. Laidlow, and A. J. Davison. Coarse-to-fine q-attention: Efficient learning for visual robotic manipulation via discretisation, 2021. \nD. Shah and S. Levine. Viking: Vision-based kilometer-scale navigation with geographic hints, 2022. \nS. Bohez, S. Tunyasuvunakool, P. Brakel, F. Sadeghi, L. Hasenclever, Y. Tassa, E. Parisotto, J. Humplik, T. Haarnoja, R. Hafner, M. Wulfmeier, M. Neunert, B. Moran, N. Siegel, A. Huber, F. Romano, N. Batchelor, F. Casarini, J. Merel, R. Hadsell, and N. Heess. Imitate and repurpose: Learning reusable robot movement skills from human and animal behaviors, 2022. \nA. Sivakumar, K. Shaw, and D. Pathak. Robotic telekinesis: Learning a robotic hand imitator by watching humans on youtube, 2022. \nC. Finn, I. Goodfellow, and S. Levine. Unsupervised learning for physical interaction through video prediction. In Advances in neural information processing systems, pages 64–72, 2016. \nC. Finn and S. Levine. Deep visual foresight for planning robot motion. In Robotics and Automation (ICRA), 2017 IEEE International Conference on, pages 2786–2793. IEEE, 2017. \nY. Yang, T. Zhang, E. Coumans, J. Tan, and B. Boots. Fast and efficient locomotion via learned gait transitions. In Conference on Robot Learning, pages 773–783. PMLR, 2022. \nS. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human effort. arXiv preprint arXiv:2002.08550, 2020. \nM. Zhang, S. Vikram, L. Smith, P. Abbeel, M. Johnson, and S. Levine. Solar: deep structured representations for model-based reinforcement learning. In International Conference on Machine Learning, 2019. \nA. Nagabandi, K. Konoglie, S. Levine, and V. Kumar. Deep dynamics models for learning dexterous manipulation, 2019. \nG. I. Parisi, R. Kemker, J. L. Part, C. Kanan, and S. Wermter. Continual lifelong learning with neural networks: A review. Neural Networks, 113:54–71, 2019. ISSN 0893-6080. \nT. Miki, J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning robust perceptive locomotion for quadrupedal robots in the wild. Science Robotics, 7(62), jan 2022. doi:10.1126/ scirobotics.abk2822. \nL. Smith, J. C. Kew, X. B. Peng, S. Ha, J. Tan, and S. Levine. Legged robots that keep on learning: Fine-tuning locomotion policies in the real world, 2021. \nT.-Y. Yang, T. Zhang, L. Luu, S. Ha, J. Tan, and W. Yu. Safe reinforcement learning for legged locomotion, 2022. URL https://arxiv.org/abs/2203.02638. \nS. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human effort, 2020. URL https://arxiv.org/abs/2002.08550. \nL. Smith, I. Kostrikov, and S. Levine. A walk in the park: Learning to walk in 20 minutes with model-free reinforcement learning, 2022. URL https://arxiv.org/abs/2208.07860. \nS. Levine, P. Pastor, A. Krizhevsky, J. Ibarz, and D. Quillen. Learning hand-eye coordination for robotic grasping with deep learning and large-scale data collection. The International Journal of Robotics Research, 37(4-5):421–436, 2018. \nL. Pinto and A. Gupta. Supersizing self-supervision: Learning to grasp from 50k tries and 700 robot hours, 2015. \nH. Ha and S. Song. Flingbot: The unreasonable effectiveness of dynamic manipulation for cloth unfolding. Conference on Robot Learning, 2021. \nS. James and A. J. Davison. Q-attention: Enabling efficient learning for vision-based robotic manipulation, 2021. \nE. Tzeng, C. Devin, J. Hoffman, C. Finn, P. Abbeel, S. Levine, K. Saenko, and T. Darrell. Adapting deep visuomotor representations with weak pairwise constraints, 2015. \nI. Akkaya, M. Andrychowicz, M. Chociej, M. Litwin, B. McGrew, A. Petron, A. Paino, M. Plappert, G. Powell, R. Ribas, et al. Solving rubik’s cube with a robot hand. arXiv preprint arXiv:1910.07113, 2019. \nM. P. Deisenroth, G. Neumann, J. Peters, et al. A survey on policy search for robotics. Foundations and Trends in Robotics, 2(1–2):1–142, 2013. \nK. Chua, R. Calandra, R. McAllister, and S. Levine. Deep reinforcement learning in a handful of trials using probabilistic dynamics models. In Advances in Neural Information Processing Systems, pages 4754–4765, 2018. \nA. Nagabandi, G. Yang, T. Asmar, R. Pandya, G. Kahn, S. Levine, and R. S. Fearing. Learning image-conditioned dynamics models for control of under-actuated legged millirobots, 2017. \nP. Becker-Ehmck, M. Karl, J. Peters, and P. van der Smagt. Learning to fly via deep model-based reinforcement learning. arXiv preprint arXiv:2003.08876, 2020. \nF. Deng, I. Jang, and S. Ahn. Dreamerpro: Reconstruction-free model-based reinforcement learning with prototypical representations. arXiv preprint arXiv:2110.14565, 2021. \nM. Okada and T. Taniguchi. Dreaming: Model-based reinforcement learning by latent imagination without reconstruction. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 4209–4215. IEEE, 2021. \nH. Bharadhwaj, M. Babaeizadeh, D. Erhan, and S. Levine. Information prioritization through empowerment in visual model-based rl. arXiv preprint arXiv:2204.08585, 2022. \nK. Paster, L. E. McKinney, S. A. McIlraith, and J. Ba. Blast: Latent dynamics models from bootstrapping. In Deep RL Workshop NeurIPS 2021, 2021. \nK. Hsu, M. J. Kim, R. Rafailov, J. Wu, and C. Finn. Vision-based manipulators need to also see from their hands, 2022. URL https://arxiv.org/abs/2203.12677. ", + "bbox": [ + 171, + 90, + 828, + 917 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "", + "bbox": [ + 168, + 55, + 828, + 920 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "", + "bbox": [ + 169, + 90, + 828, + 916 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "", + "bbox": [ + 168, + 85, + 828, + 647 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A Adaptation ", + "text_level": 1, + "bbox": [ + 174, + 90, + 303, + 107 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Real world robot learning faces practical challenges such as changing environmental conditions and time varying dynamics. We found that Dreamer is able to adapt to the current environmental conditions with no change to the learning algorithm. This shows promise for using Dreamer in continual learning settings (Parisi et al., 2019). Adaptation of the quadruped to external perturbations is reported in Section 3.1 and Figure 8. ", + "bbox": [ + 174, + 117, + 825, + 191 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The XArm, situated near large windows, is able to adapt and maintain performance under the presence of changing lighting conditions. The XArm experiments were conducted after sundown to keep the lighting conditions constant throughout training. Figure A.1 shows the learning curve of the XArm. As expected, the performance of the XArm drops during sunrise. However, the XArm is able to adapt to the change in lighting conditions in about 5 hours time and recover the original performance, which is faster than it would be to train from scratch. A careful inspection of the image observations at these times, as shown in Figure A.1, reveals that the robot received observations with strong light rays covering the scene which greatly differs from the original training observations. ", + "bbox": [ + 173, + 196, + 826, + 315 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/db6cf431ae9355646aa06c810c30e311d8db38009707b4dea4bad788085ac2bb.jpg", + "image_caption": [ + "Figure A.1: The left two images are raw observations consumed by Dreamer. The leftmost image is an image observation as seen by the XArm at night, when it was trained. The next image shows an observation during sunrise. Despite the vast difference in pixel space, the XArm is able to recover, and then surpass, the original performance in approximately 5 hours. Even after 24 hours when the lighting shifts to night time conditions, the XArm is able to maintain performance. " + ], + "image_footnote": [], + "bbox": [ + 173, + 327, + 813, + 440 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B Imagination ", + "text_level": 1, + "bbox": [ + 174, + 545, + 310, + 563 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/24fe88bf92baa43778d9defa3450750bc0d2c910fa9c12c5902630d7c2316e1e.jpg", + "image_caption": [ + "Figure B.1: To introspect the policy, we can roll out trajectories in the latent space of Dreamer, then decode the images to visualize the intent of the actor network. Each row is an imagined trajectory, showing every 2nd frame. Top: Latent rollouts on the UR5 environment. Multiple objects introduce more visual complexity that the network has to model. Note the second trajectory, which shows a static orange ball becoming a green ball. Bottom: Latent rollouts on the XArm environment. " + ], + "image_footnote": [], + "bbox": [ + 179, + 578, + 818, + 820 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C Detailed Related Work ", + "text_level": 1, + "bbox": [ + 174, + 89, + 400, + 107 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "RL for locomotion A common approach is to train RL agents from large amounts of simulated data under domain and dynamics randomization (Peng et al., 2018; Lee et al., 2020; Rudin et al., 2021; Siekmann et al., 2021; Escontrela et al., 2022; Miki et al., 2022; Kumar et al., 2021; Rusu et al., 2016; Bohez et al., 2022), then freezing the learned policy and deploying it to the real world. Smith et al. (2021) explored pre-training policies in simulation and fine-tuning them with real world data. Yang et al. (2019) investigate learning a dynamics model using a multi-step loss and using model predictive control to accomplish a specified task. Yang et al. (2022) train locomotion policies in the real world but require a recovery controller trained in simulation to avoid unsafe states. In contrast, we use no simulators or reset policies and directly train on the physical robot. While prior work in locomotion has successfully learned walking behaviors in the real world, these works generally required several domain-specific assumptions or pretraining with simulators. Ha et al. (2020) achieved successful walking on the Minitaur robot in 90 minutes. However, the authors manually programmed a reset policy that was used when the robot fell on its back, while in our work the robot must learn to flip over and stand up. Additionally, the Minitaur robot is simpler than the A1 as it has 8 actuators compared to 12 on the A1. In recent work, Smith et al. (2022) utilize a high update-to-data ratio (UTD) RL algorithm to learn walking from 20 minutes of robot training data. However, their work assumes the availability of a reset policy and therefore comprises of a different learning problem compared to the problem we tackle of learning to flip over and walk from scratch. Additionally, we show our approach generalizes to environments with image observations and sparse rewards. ", + "bbox": [ + 173, + 117, + 825, + 397 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "RL for manipulation Learning promises to enable robot manipulators to solve contact rich tasks in open real world environments. One class of methods attempts to scale up experience collection through a fleet of robots (Kalashnikov et al., 2018; 2021; Ebert et al., 2021; Dasari et al., 2019; Levine et al., 2018). In contrast, we only leverage one robot, but parallelize an agent’s experience by using the learned world model. Another common approach is to leverage expert demonstrations or other task priors (Pinto and Gupta, 2015; Ha and Song, 2021; Xie et al., 2019; Schoettler et al., 2019; Sivakumar et al., 2022). James and Davison (2021); James et al. (2021) leverages a few demonstrations to increase the sample-efficiency of Q learning by focusing the learner on important aspects of the scene. Other approaches, as in locomotion, first utilize a simulator, then transfer to the real world (Tzeng et al., 2015; Akkaya et al., 2019; OpenAI et al., 2018; Irpan et al., 2020). Our work focuses on single-robot environments where the agent must learn through a small amount of interaction with the world. Meanwhile, the Google Arm Farm line of work by Levine et al. leverages over $5 8 0 \\mathrm { k }$ grasp attempts gathered by 7 robots and collected over 4 months. We believe that a method such as Dreamer could benefit greatly from this scale of training data, however it is unlikely that works such as MT-OPT/QT-OPT Kalashnikov et al. (2018; 2021) would work well in the low data regime that Dreamer excels in. ", + "bbox": [ + 173, + 404, + 825, + 638 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Model-based RL Due to its higher sample-efficiency over model-free methods, model-based RL is a promising approach to learning on real world robots (Deisenroth et al., 2013). A model based method first learns a dynamics model, which can then be used to plan actions (Nagabandi et al., 2019; Hafner et al., 2018; Chua et al., 2018; Nagabandi et al., 2017; Becker-Ehmck et al., 2020), or be used as a simulator to learn a policy network as in Dreamer (Hafner et al., 2019; 2020). One approach to tackle the high visual complexity of the world is to learn an action conditioned video prediction model (Finn and Levine, 2017; Ebert et al., 2018; Finn et al., 2016). One downside of this approach is the need to directly predict high dimensional observations, which can be computationally inefficient and easily drift. Dreamer learns a dynamics model in a latent space, allowing more efficient rollouts and avoids relying on high quality visual reconstructions for the policy. Another line of work proposes to learn latent dynamics models without having to reconstruct inputs (Deng et al., 2021; Okada and Taniguchi, 2021; Bharadhwaj et al., 2022; Paster et al., 2021), which we see as a promising approach for supporting moving view points in cluttered environments. ", + "bbox": [ + 173, + 645, + 826, + 837 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "D Hyperparameters ", + "text_level": 1, + "bbox": [ + 173, + 89, + 359, + 108 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/476b14497f73d983953dbe7a34cff454d210243e28b1aa97dafb54ae6a92e42f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
NameSymbolValue
General
Replay capacity (FIFO)Start learningBatch sizeBatch lengthMLP sizeActivationBT10610432324Γ— 512LayerNorm+ELU
World Model
RSSM sizeNumber of latentsClasses per latentKL balancing51232320.8
Actor Critic
Imagination horizonDiscountReturn lambdaTarget update intervalH?150.950.95100
All Optimizers
Gradient clippingLearning rateAdam epsilonE10010-410-6
", + "bbox": [ + 202, + 128, + 789, + 503 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "E Environment and Hardware Details ", + "text_level": 1, + "bbox": [ + 173, + 527, + 508, + 546 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For every robot setup that involved vision (UR5, XArm, Sphero), we used a RealSense D435 camera positioned to offer a fixed 3rd person view of the scene. ", + "bbox": [ + 176, + 555, + 823, + 585 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A1 We used the A1 quadrupedal robot by Unitree. The RL policy outputs actions at a frequency that is too high for the PD controller to track, which we overcome by lowpass filtering the action sequence. The joint range allows the legs to self-collide with the body, which can be damaging to the motors and increase battery consumption. We limited the joint range to decrease self-collisions. Finally, the EKF velocity estimator relies on foot-ground contact events to prevent significant drift in the estimates, so we employ a curriculum reward function that does not reward the robot for forward velocity until the robot is upright with extended legs. We also designed a shell which we 3D printed in order to better protect the cables and hardware and provide a smoother rolling over. ", + "bbox": [ + 173, + 590, + 825, + 709 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "XArm & UR5 We utilized slanted bins to prevent objects from leaving the work area during the long-running pick and place experiments on the UR5, which is common practice Levine et al. (2018); Kalashnikov et al. (2018). We also added a partition behind the setup to keep the background constant. It would be interesting to study how a gripper-mounted camera would impact policy performance Hsu et al. (2022), however we report strong results without this design choice. For the XArm we use the uFactory xArm Gripper. For the UR5, we use the Robotiq 2F-85 parallel jaw gripper. The bin locations are predetermined and provided as part of the environment to prevent the robot from colliding with the bin. In addition, movement in the $\\textsf { Z }$ axis is only enabled while holding an object and the gripper automatically opens once above the other bin. ", + "bbox": [ + 173, + 713, + 825, + 847 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Sphero We used a rectangular enclosure of $0 . 8 \\times 0 . 8 \\mathrm { { m ^ { 2 } } }$ to keep the sphero robot within the camera view. We used a simple OpenCV script to estimate the L2 distance between the Sphero and the goal position to provide a dense reward for policy optimization. This positional information was not provided to the agent, which it had to learn from the raw top-down images. ", + "bbox": [ + 174, + 852, + 823, + 911 + ], + "page_idx": 14 + } +] \ No newline at end of file diff --git a/parse/dev/3RBY8fKjHeu/3RBY8fKjHeu_middle.json b/parse/dev/3RBY8fKjHeu/3RBY8fKjHeu_middle.json new file mode 100644 index 0000000000000000000000000000000000000000..15d9e3c7747c2266f1ab2676997026819e918ab0 --- /dev/null +++ b/parse/dev/3RBY8fKjHeu/3RBY8fKjHeu_middle.json @@ -0,0 +1,33734 @@ +{ + "pdf_info": [ + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 186, + 79, + 424, + 121 + ], + "lines": [ + { + "bbox": [ + 184, + 80, + 426, + 99 + ], + "spans": [ + { + "bbox": [ + 184, + 80, + 426, + 99 + ], + "score": 1.0, + "content": "DayDreamer: World Models for", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 211, + 100, + 400, + 123 + ], + "spans": [ + { + "bbox": [ + 211, + 100, + 400, + 123 + ], + "score": 1.0, + "content": "Physical Robot Learning", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "title", + "bbox": [ + 173, + 140, + 228, + 152 + ], + "lines": [ + { + "bbox": [ + 172, + 139, + 230, + 154 + ], + "spans": [ + { + "bbox": [ + 172, + 139, + 230, + 154 + ], + "score": 1.0, + "content": "Philipp Wu*", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2 + }, + { + "type": "text", + "bbox": [ + 249, + 140, + 440, + 151 + ], + "lines": [ + { + "bbox": [ + 248, + 139, + 441, + 154 + ], + "spans": [ + { + "bbox": [ + 248, + 139, + 348, + 154 + ], + "score": 1.0, + "content": "Alejandro Escontrela*", + "type": "text" + }, + { + "bbox": [ + 365, + 139, + 441, + 153 + ], + "score": 1.0, + "content": "Danijar Hafner*", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 3 + }, + { + "type": "text", + "bbox": [ + 235, + 161, + 376, + 173 + ], + "lines": [ + { + "bbox": [ + 233, + 159, + 378, + 176 + ], + "spans": [ + { + "bbox": [ + 233, + 159, + 299, + 176 + ], + "score": 1.0, + "content": "Ken Goldberg", + "type": "text" + }, + { + "bbox": [ + 316, + 160, + 378, + 174 + ], + "score": 1.0, + "content": "Pieter Abbeel", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4 + }, + { + "type": "text", + "bbox": [ + 238, + 182, + 373, + 193 + ], + "lines": [ + { + "bbox": [ + 236, + 180, + 375, + 196 + ], + "spans": [ + { + "bbox": [ + 236, + 180, + 375, + 196 + ], + "score": 1.0, + "content": "University of California, Berkeley", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 266, + 198, + 345, + 209 + ], + "lines": [ + { + "bbox": [ + 264, + 196, + 347, + 211 + ], + "spans": [ + { + "bbox": [ + 264, + 196, + 347, + 211 + ], + "score": 1.0, + "content": "*Equal contribution", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6 + }, + { + "type": "text", + "bbox": [ + 142, + 232, + 469, + 522 + ], + "lines": [ + { + "bbox": [ + 142, + 231, + 469, + 243 + ], + "spans": [ + { + "bbox": [ + 142, + 231, + 469, + 243 + ], + "score": 1.0, + "content": "Abstract: To solve tasks in complex environments, robots need to learn from", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 243, + 469, + 256 + ], + "spans": [ + { + "bbox": [ + 141, + 243, + 469, + 256 + ], + "score": 1.0, + "content": "experience. Deep reinforcement learning is a common approach to robot learning", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 142, + 254, + 469, + 266 + ], + "spans": [ + { + "bbox": [ + 142, + 254, + 469, + 266 + ], + "score": 1.0, + "content": "but requires a large amount of trial and error to learn, limiting its deployment in", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 265, + 469, + 279 + ], + "spans": [ + { + "bbox": [ + 141, + 265, + 469, + 279 + ], + "score": 1.0, + "content": "the physical world. As a consequence, many advances in robot learning rely on", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 141, + 277, + 469, + 290 + ], + "spans": [ + { + "bbox": [ + 141, + 277, + 469, + 290 + ], + "score": 1.0, + "content": "simulators. On the other hand, learning inside of simulators fails to capture the", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 141, + 289, + 469, + 302 + ], + "spans": [ + { + "bbox": [ + 141, + 289, + 469, + 302 + ], + "score": 1.0, + "content": "complexity of the real world, is prone to simulator inaccuracies, and the resulting", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 301, + 469, + 313 + ], + "spans": [ + { + "bbox": [ + 141, + 301, + 469, + 313 + ], + "score": 1.0, + "content": "behaviors do not adapt to changes in the world. The Dreamer algorithm has recently", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 141, + 312, + 469, + 326 + ], + "spans": [ + { + "bbox": [ + 141, + 312, + 469, + 326 + ], + "score": 1.0, + "content": "shown great promise for learning from small amounts of interaction by planning", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 324, + 469, + 336 + ], + "spans": [ + { + "bbox": [ + 141, + 324, + 469, + 336 + ], + "score": 1.0, + "content": "within a learned world model, outperforming pure reinforcement learning in video", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 142, + 336, + 469, + 348 + ], + "spans": [ + { + "bbox": [ + 142, + 336, + 469, + 348 + ], + "score": 1.0, + "content": "games. Learning a world model to predict the outcomes of potential actions enables", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 348, + 469, + 359 + ], + "spans": [ + { + "bbox": [ + 141, + 348, + 469, + 359 + ], + "score": 1.0, + "content": "planning in imagination, reducing the amount of trial and error needed in the real", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 359, + 469, + 372 + ], + "spans": [ + { + "bbox": [ + 141, + 359, + 469, + 372 + ], + "score": 1.0, + "content": "environment. However, it is unknown whether Dreamer can facilitate faster learning", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 372, + 469, + 382 + ], + "spans": [ + { + "bbox": [ + 141, + 372, + 469, + 382 + ], + "score": 1.0, + "content": "on physical robots. In this paper, we apply Dreamer to 4 robots to learn online", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 141, + 383, + 470, + 395 + ], + "spans": [ + { + "bbox": [ + 141, + 383, + 470, + 395 + ], + "score": 1.0, + "content": "and directly in the real world, without any simulators. Dreamer trains a quadruped", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 394, + 469, + 407 + ], + "spans": [ + { + "bbox": [ + 141, + 394, + 469, + 407 + ], + "score": 1.0, + "content": "robot to roll off its back, stand up, and walk from scratch and without resets in only", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 406, + 469, + 418 + ], + "spans": [ + { + "bbox": [ + 141, + 406, + 469, + 418 + ], + "score": 1.0, + "content": "1 hour. We then push the robot and find that Dreamer adapts within 10 minutes to", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 142, + 419, + 469, + 429 + ], + "spans": [ + { + "bbox": [ + 142, + 419, + 469, + 429 + ], + "score": 1.0, + "content": "withstand perturbations or quickly roll over and stand back up. On two different", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 141, + 429, + 469, + 441 + ], + "spans": [ + { + "bbox": [ + 141, + 429, + 469, + 441 + ], + "score": 1.0, + "content": "robotic arms, Dreamer learns to pick and place objects from camera images and", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 141, + 442, + 470, + 453 + ], + "spans": [ + { + "bbox": [ + 141, + 442, + 470, + 453 + ], + "score": 1.0, + "content": "sparse rewards, approaching human-level teleoperation performance. On a wheeled", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 141, + 451, + 470, + 466 + ], + "spans": [ + { + "bbox": [ + 141, + 451, + 470, + 466 + ], + "score": 1.0, + "content": "robot, Dreamer learns to navigate to a goal position purely from camera images,", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 141, + 464, + 470, + 477 + ], + "spans": [ + { + "bbox": [ + 141, + 464, + 470, + 477 + ], + "score": 1.0, + "content": "automatically resolving ambiguity about the robot orientation. Using the same", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 141, + 477, + 469, + 488 + ], + "spans": [ + { + "bbox": [ + 141, + 477, + 469, + 488 + ], + "score": 1.0, + "content": "hyperparameters across all experiments, we find that Dreamer is capable of online", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 141, + 487, + 469, + 500 + ], + "spans": [ + { + "bbox": [ + 141, + 487, + 469, + 500 + ], + "score": 1.0, + "content": "learning in the real world, which establishes a strong baseline. We release our", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 141, + 498, + 469, + 513 + ], + "spans": [ + { + "bbox": [ + 141, + 498, + 469, + 513 + ], + "score": 1.0, + "content": "infrastructure for future applications of world models to robot learning. Videos are", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 141, + 510, + 433, + 524 + ], + "spans": [ + { + "bbox": [ + 141, + 510, + 433, + 524 + ], + "score": 1.0, + "content": "available on the project website: https://danijar.com/daydreamer", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 107, + 542, + 504, + 647 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 107, + 542, + 504, + 647 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 107, + 542, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 107, + 542, + 504, + 647 + ], + "score": 0.973, + "type": "image", + "image_path": "17f2d11eee9937e70f62a1993623ebccd221887d067e71919c350fa57662f4d3.jpg" + } + ] + } + ], + "index": 33, + "virtual_lines": [ + { + "bbox": [ + 107, + 542, + 504, + 577.0 + ], + "spans": [], + "index": 32 + }, + { + "bbox": [ + 107, + 577.0, + 504, + 612.0 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 107, + 612.0, + 504, + 647.0 + ], + "spans": [], + "index": 34 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 653, + 505, + 721 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 653, + 505, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 505, + 666 + ], + "score": 1.0, + "content": "Figure 1: To study the applicability of Dreamer for sample-efficient robot learning, we apply the", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 664, + 505, + 676 + ], + "spans": [ + { + "bbox": [ + 106, + 664, + 505, + 676 + ], + "score": 1.0, + "content": "algorithm to learn robot locomotion, manipulation, and navigation tasks from scratch in the real", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 674, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 104, + 674, + 505, + 689 + ], + "score": 1.0, + "content": "world on 4 robots, without simulators. The tasks evaluate a diverse range of challenges, including", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "continuous and discrete actions, dense and sparse rewards, proprioceptive and camera inputs, as well", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 698, + 506, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 711 + ], + "score": 1.0, + "content": "as sensor fusion of multiple input modalities. Learning successfully using the same hyperparameters", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 709, + 470, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 470, + 722 + ], + "score": 1.0, + "content": "across all experiments, Dreamer establishes a strong baseline for real world robot learning.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 37.5 + } + ], + "index": 35.25 + } + ], + "page_idx": 0, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 732, + 377, + 743 + ], + "lines": [ + { + "bbox": [ + 106, + 732, + 378, + 744 + ], + "spans": [ + { + "bbox": [ + 106, + 732, + 378, + 744 + ], + "score": 1.0, + "content": "6th Conference on Robot Learning (CoRL 2022), Auckland, New Zealand.", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 186, + 79, + 424, + 121 + ], + "lines": [ + { + "bbox": [ + 184, + 80, + 426, + 99 + ], + "spans": [ + { + "bbox": [ + 184, + 80, + 426, + 99 + ], + "score": 1.0, + "content": "DayDreamer: World Models for", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 211, + 100, + 400, + 123 + ], + "spans": [ + { + "bbox": [ + 211, + 100, + 400, + 123 + ], + "score": 1.0, + "content": "Physical Robot Learning", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "title", + "bbox": [ + 173, + 140, + 228, + 152 + ], + "lines": [ + { + "bbox": [ + 172, + 139, + 230, + 154 + ], + "spans": [ + { + "bbox": [ + 172, + 139, + 230, + 154 + ], + "score": 1.0, + "content": "Philipp Wu*", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2 + }, + { + "type": "text", + "bbox": [ + 249, + 140, + 440, + 151 + ], + "lines": [ + { + "bbox": [ + 248, + 139, + 441, + 154 + ], + "spans": [ + { + "bbox": [ + 248, + 139, + 348, + 154 + ], + "score": 1.0, + "content": "Alejandro Escontrela*", + "type": "text" + }, + { + "bbox": [ + 365, + 139, + 441, + 153 + ], + "score": 1.0, + "content": "Danijar Hafner*", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 3, + "bbox_fs": [ + 248, + 139, + 441, + 154 + ] + }, + { + "type": "text", + "bbox": [ + 235, + 161, + 376, + 173 + ], + "lines": [ + { + "bbox": [ + 233, + 159, + 378, + 176 + ], + "spans": [ + { + "bbox": [ + 233, + 159, + 299, + 176 + ], + "score": 1.0, + "content": "Ken Goldberg", + "type": "text" + }, + { + "bbox": [ + 316, + 160, + 378, + 174 + ], + "score": 1.0, + "content": "Pieter Abbeel", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4, + "bbox_fs": [ + 233, + 159, + 378, + 176 + ] + }, + { + "type": "text", + "bbox": [ + 238, + 182, + 373, + 193 + ], + "lines": [ + { + "bbox": [ + 236, + 180, + 375, + 196 + ], + "spans": [ + { + "bbox": [ + 236, + 180, + 375, + 196 + ], + "score": 1.0, + "content": "University of California, Berkeley", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 264, + 196, + 347, + 211 + ], + "spans": [ + { + "bbox": [ + 264, + 196, + 347, + 211 + ], + "score": 1.0, + "content": "*Equal contribution", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 5, + "bbox_fs": [ + 236, + 180, + 375, + 196 + ] + }, + { + "type": "text", + "bbox": [ + 266, + 198, + 345, + 209 + ], + "lines": [], + "index": 6, + "bbox_fs": [ + 264, + 196, + 347, + 211 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 142, + 232, + 469, + 522 + ], + "lines": [ + { + "bbox": [ + 142, + 231, + 469, + 243 + ], + "spans": [ + { + "bbox": [ + 142, + 231, + 469, + 243 + ], + "score": 1.0, + "content": "Abstract: To solve tasks in complex environments, robots need to learn from", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 243, + 469, + 256 + ], + "spans": [ + { + "bbox": [ + 141, + 243, + 469, + 256 + ], + "score": 1.0, + "content": "experience. Deep reinforcement learning is a common approach to robot learning", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 142, + 254, + 469, + 266 + ], + "spans": [ + { + "bbox": [ + 142, + 254, + 469, + 266 + ], + "score": 1.0, + "content": "but requires a large amount of trial and error to learn, limiting its deployment in", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 265, + 469, + 279 + ], + "spans": [ + { + "bbox": [ + 141, + 265, + 469, + 279 + ], + "score": 1.0, + "content": "the physical world. As a consequence, many advances in robot learning rely on", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 141, + 277, + 469, + 290 + ], + "spans": [ + { + "bbox": [ + 141, + 277, + 469, + 290 + ], + "score": 1.0, + "content": "simulators. On the other hand, learning inside of simulators fails to capture the", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 141, + 289, + 469, + 302 + ], + "spans": [ + { + "bbox": [ + 141, + 289, + 469, + 302 + ], + "score": 1.0, + "content": "complexity of the real world, is prone to simulator inaccuracies, and the resulting", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 301, + 469, + 313 + ], + "spans": [ + { + "bbox": [ + 141, + 301, + 469, + 313 + ], + "score": 1.0, + "content": "behaviors do not adapt to changes in the world. The Dreamer algorithm has recently", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 141, + 312, + 469, + 326 + ], + "spans": [ + { + "bbox": [ + 141, + 312, + 469, + 326 + ], + "score": 1.0, + "content": "shown great promise for learning from small amounts of interaction by planning", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 324, + 469, + 336 + ], + "spans": [ + { + "bbox": [ + 141, + 324, + 469, + 336 + ], + "score": 1.0, + "content": "within a learned world model, outperforming pure reinforcement learning in video", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 142, + 336, + 469, + 348 + ], + "spans": [ + { + "bbox": [ + 142, + 336, + 469, + 348 + ], + "score": 1.0, + "content": "games. Learning a world model to predict the outcomes of potential actions enables", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 348, + 469, + 359 + ], + "spans": [ + { + "bbox": [ + 141, + 348, + 469, + 359 + ], + "score": 1.0, + "content": "planning in imagination, reducing the amount of trial and error needed in the real", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 359, + 469, + 372 + ], + "spans": [ + { + "bbox": [ + 141, + 359, + 469, + 372 + ], + "score": 1.0, + "content": "environment. However, it is unknown whether Dreamer can facilitate faster learning", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 372, + 469, + 382 + ], + "spans": [ + { + "bbox": [ + 141, + 372, + 469, + 382 + ], + "score": 1.0, + "content": "on physical robots. In this paper, we apply Dreamer to 4 robots to learn online", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 141, + 383, + 470, + 395 + ], + "spans": [ + { + "bbox": [ + 141, + 383, + 470, + 395 + ], + "score": 1.0, + "content": "and directly in the real world, without any simulators. Dreamer trains a quadruped", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 394, + 469, + 407 + ], + "spans": [ + { + "bbox": [ + 141, + 394, + 469, + 407 + ], + "score": 1.0, + "content": "robot to roll off its back, stand up, and walk from scratch and without resets in only", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 406, + 469, + 418 + ], + "spans": [ + { + "bbox": [ + 141, + 406, + 469, + 418 + ], + "score": 1.0, + "content": "1 hour. We then push the robot and find that Dreamer adapts within 10 minutes to", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 142, + 419, + 469, + 429 + ], + "spans": [ + { + "bbox": [ + 142, + 419, + 469, + 429 + ], + "score": 1.0, + "content": "withstand perturbations or quickly roll over and stand back up. On two different", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 141, + 429, + 469, + 441 + ], + "spans": [ + { + "bbox": [ + 141, + 429, + 469, + 441 + ], + "score": 1.0, + "content": "robotic arms, Dreamer learns to pick and place objects from camera images and", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 141, + 442, + 470, + 453 + ], + "spans": [ + { + "bbox": [ + 141, + 442, + 470, + 453 + ], + "score": 1.0, + "content": "sparse rewards, approaching human-level teleoperation performance. On a wheeled", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 141, + 451, + 470, + 466 + ], + "spans": [ + { + "bbox": [ + 141, + 451, + 470, + 466 + ], + "score": 1.0, + "content": "robot, Dreamer learns to navigate to a goal position purely from camera images,", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 141, + 464, + 470, + 477 + ], + "spans": [ + { + "bbox": [ + 141, + 464, + 470, + 477 + ], + "score": 1.0, + "content": "automatically resolving ambiguity about the robot orientation. Using the same", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 141, + 477, + 469, + 488 + ], + "spans": [ + { + "bbox": [ + 141, + 477, + 469, + 488 + ], + "score": 1.0, + "content": "hyperparameters across all experiments, we find that Dreamer is capable of online", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 141, + 487, + 469, + 500 + ], + "spans": [ + { + "bbox": [ + 141, + 487, + 469, + 500 + ], + "score": 1.0, + "content": "learning in the real world, which establishes a strong baseline. We release our", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 141, + 498, + 469, + 513 + ], + "spans": [ + { + "bbox": [ + 141, + 498, + 469, + 513 + ], + "score": 1.0, + "content": "infrastructure for future applications of world models to robot learning. Videos are", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 141, + 510, + 433, + 524 + ], + "spans": [ + { + "bbox": [ + 141, + 510, + 433, + 524 + ], + "score": 1.0, + "content": "available on the project website: https://danijar.com/daydreamer", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 19, + "bbox_fs": [ + 141, + 231, + 470, + 524 + ] + }, + { + "type": "image", + "bbox": [ + 107, + 542, + 504, + 647 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 107, + 542, + 504, + 647 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 107, + 542, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 107, + 542, + 504, + 647 + ], + "score": 0.973, + "type": "image", + "image_path": "17f2d11eee9937e70f62a1993623ebccd221887d067e71919c350fa57662f4d3.jpg" + } + ] + } + ], + "index": 33, + "virtual_lines": [ + { + "bbox": [ + 107, + 542, + 504, + 577.0 + ], + "spans": [], + "index": 32 + }, + { + "bbox": [ + 107, + 577.0, + 504, + 612.0 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 107, + 612.0, + 504, + 647.0 + ], + "spans": [], + "index": 34 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 653, + 505, + 721 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 653, + 505, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 505, + 666 + ], + "score": 1.0, + "content": "Figure 1: To study the applicability of Dreamer for sample-efficient robot learning, we apply the", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 664, + 505, + 676 + ], + "spans": [ + { + "bbox": [ + 106, + 664, + 505, + 676 + ], + "score": 1.0, + "content": "algorithm to learn robot locomotion, manipulation, and navigation tasks from scratch in the real", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 674, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 104, + 674, + 505, + 689 + ], + "score": 1.0, + "content": "world on 4 robots, without simulators. The tasks evaluate a diverse range of challenges, including", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "continuous and discrete actions, dense and sparse rewards, proprioceptive and camera inputs, as well", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 698, + 506, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 711 + ], + "score": 1.0, + "content": "as sensor fusion of multiple input modalities. Learning successfully using the same hyperparameters", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 709, + 470, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 470, + 722 + ], + "score": 1.0, + "content": "across all experiments, Dreamer establishes a strong baseline for real world robot learning.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 37.5 + } + ], + "index": 35.25 + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 72, + 190, + 84 + ], + "lines": [ + { + "bbox": [ + 105, + 70, + 192, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 70, + 192, + 87 + ], + "score": 1.0, + "content": "1 Introduction", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 100, + 337, + 252 + ], + "lines": [ + { + "bbox": [ + 106, + 101, + 336, + 112 + ], + "spans": [ + { + "bbox": [ + 106, + 101, + 336, + 112 + ], + "score": 1.0, + "content": "Teaching robots to solve complex tasks in the real world", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 112, + 337, + 124 + ], + "spans": [ + { + "bbox": [ + 105, + 112, + 337, + 124 + ], + "score": 1.0, + "content": "is a foundational problem of robotics research. Deep rein-", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 124, + 337, + 136 + ], + "spans": [ + { + "bbox": [ + 106, + 124, + 337, + 136 + ], + "score": 1.0, + "content": "forcement learning (RL) offers a popular approach to robot", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 136, + 337, + 147 + ], + "spans": [ + { + "bbox": [ + 106, + 136, + 337, + 147 + ], + "score": 1.0, + "content": "learning that enables robots to improve their behavior over", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 148, + 336, + 158 + ], + "spans": [ + { + "bbox": [ + 106, + 148, + 336, + 158 + ], + "score": 1.0, + "content": "time through trial and error. However, current algorithms", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 159, + 337, + 170 + ], + "spans": [ + { + "bbox": [ + 105, + 159, + 337, + 170 + ], + "score": 1.0, + "content": "require too much interaction with the environment to learn", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 171, + 336, + 182 + ], + "spans": [ + { + "bbox": [ + 106, + 171, + 336, + 182 + ], + "score": 1.0, + "content": "successful behaviors. Recently, modern world models", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 182, + 336, + 194 + ], + "spans": [ + { + "bbox": [ + 106, + 182, + 336, + 194 + ], + "score": 1.0, + "content": "have shown great promise for data efficient learning in", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 194, + 338, + 206 + ], + "spans": [ + { + "bbox": [ + 106, + 194, + 338, + 206 + ], + "score": 1.0, + "content": "simulated domains and video games (Hafner et al., 2019;", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 205, + 338, + 218 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 338, + 218 + ], + "score": 1.0, + "content": "2020). Learning world models from past experience en-", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 217, + 337, + 228 + ], + "spans": [ + { + "bbox": [ + 106, + 217, + 337, + 228 + ], + "score": 1.0, + "content": "ables robots to imagine the future outcomes of potential", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 229, + 337, + 240 + ], + "spans": [ + { + "bbox": [ + 106, + 229, + 337, + 240 + ], + "score": 1.0, + "content": "actions, reducing the amount of trial and error in the real", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 241, + 225, + 252 + ], + "spans": [ + { + "bbox": [ + 106, + 241, + 225, + 252 + ], + "score": 1.0, + "content": "environment needed to learn.", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 7 + }, + { + "type": "text", + "bbox": [ + 107, + 262, + 336, + 425 + ], + "lines": [ + { + "bbox": [ + 105, + 260, + 338, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 260, + 338, + 275 + ], + "score": 1.0, + "content": "While learning accurate world models can be challenging,", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 274, + 336, + 286 + ], + "spans": [ + { + "bbox": [ + 106, + 274, + 336, + 286 + ], + "score": 1.0, + "content": "they offer compelling properties for robot learning. By", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 286, + 338, + 297 + ], + "spans": [ + { + "bbox": [ + 106, + 286, + 338, + 297 + ], + "score": 1.0, + "content": "predicting future outcomes, world models allow for plan-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 297, + 338, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 297, + 338, + 308 + ], + "score": 1.0, + "content": "ning and behavior learning given only small amounts of", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 308, + 338, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 308, + 338, + 320 + ], + "score": 1.0, + "content": "real world interaction (Gal et al., 2016; Ebert et al., 2018).", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 320, + 337, + 332 + ], + "spans": [ + { + "bbox": [ + 106, + 320, + 337, + 332 + ], + "score": 1.0, + "content": "Moreover, world models summarize general dynamics", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 333, + 337, + 344 + ], + "spans": [ + { + "bbox": [ + 106, + 333, + 337, + 344 + ], + "score": 1.0, + "content": "knowledge about the environment that, once learned, could", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 343, + 337, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 337, + 355 + ], + "score": 1.0, + "content": "be reused for a wide range of downstream tasks (Sekar", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 356, + 337, + 367 + ], + "spans": [ + { + "bbox": [ + 106, + 356, + 337, + 367 + ], + "score": 1.0, + "content": "et al., 2020). World models also learn representations", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 367, + 336, + 379 + ], + "spans": [ + { + "bbox": [ + 106, + 367, + 336, + 379 + ], + "score": 1.0, + "content": "that fuse multiple sensor modalities and integrate them", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 378, + 336, + 390 + ], + "spans": [ + { + "bbox": [ + 106, + 378, + 336, + 390 + ], + "score": 1.0, + "content": "into latent states, reducing the need for sophisticated state", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 391, + 336, + 402 + ], + "spans": [ + { + "bbox": [ + 106, + 391, + 336, + 402 + ], + "score": 1.0, + "content": "estimators. Finally, world models generalize well from", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 402, + 338, + 414 + ], + "spans": [ + { + "bbox": [ + 106, + 402, + 338, + 414 + ], + "score": 1.0, + "content": "available offline data (Yu et al., 2021), which further ac-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 414, + 249, + 425 + ], + "spans": [ + { + "bbox": [ + 106, + 414, + 249, + 425 + ], + "score": 1.0, + "content": "celerates learning in the real world.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 20.5 + }, + { + "type": "image", + "bbox": [ + 344, + 102, + 506, + 233 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 344, + 102, + 506, + 233 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 344, + 102, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 344, + 102, + 506, + 233 + ], + "score": 0.973, + "type": "image", + "image_path": "e30f877426a1aa2686b70c08629a56889403c96105b017890f8ea57b7982c4a2.jpg" + } + ] + } + ], + "index": 32.5, + "virtual_lines": [ + { + "bbox": [ + 344, + 102, + 506, + 115.1 + ], + "spans": [], + "index": 28 + }, + { + "bbox": [ + 344, + 115.1, + 506, + 128.2 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 344, + 128.2, + 506, + 141.29999999999998 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 344, + 141.29999999999998, + 506, + 154.39999999999998 + ], + "spans": [], + "index": 31 + }, + { + "bbox": [ + 344, + 154.39999999999998, + 506, + 167.49999999999997 + ], + "spans": [], + "index": 32 + }, + { + "bbox": [ + 344, + 167.49999999999997, + 506, + 180.59999999999997 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 344, + 180.59999999999997, + 506, + 193.69999999999996 + ], + "spans": [], + "index": 34 + }, + { + "bbox": [ + 344, + 193.69999999999996, + 506, + 206.79999999999995 + ], + "spans": [], + "index": 35 + }, + { + "bbox": [ + 344, + 206.79999999999995, + 506, + 219.89999999999995 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 344, + 219.89999999999995, + 506, + 232.99999999999994 + ], + "spans": [], + "index": 37 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 344, + 243, + 505, + 389 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 343, + 242, + 505, + 255 + ], + "spans": [ + { + "bbox": [ + 343, + 242, + 505, + 255 + ], + "score": 1.0, + "content": "Figure 2: Dreamer follows a simple", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 343, + 254, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 343, + 254, + 506, + 266 + ], + "score": 1.0, + "content": "pipeline for online learning on robot", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 343, + 265, + 506, + 276 + ], + "spans": [ + { + "bbox": [ + 343, + 265, + 506, + 276 + ], + "score": 1.0, + "content": "hardware without simulators. The cur-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 343, + 276, + 506, + 288 + ], + "spans": [ + { + "bbox": [ + 343, + 276, + 506, + 288 + ], + "score": 1.0, + "content": "rent learned policy collects experience", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 343, + 288, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 343, + 288, + 506, + 299 + ], + "score": 1.0, + "content": "on the robot. This experience is added", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 343, + 299, + 505, + 311 + ], + "spans": [ + { + "bbox": [ + 343, + 299, + 505, + 311 + ], + "score": 1.0, + "content": "to the replay buffer. The world model is", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 343, + 309, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 343, + 309, + 506, + 322 + ], + "score": 1.0, + "content": "trained on replayed off-policy sequences", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 343, + 321, + 506, + 333 + ], + "spans": [ + { + "bbox": [ + 343, + 321, + 506, + 333 + ], + "score": 1.0, + "content": "through supervised learning. An actor", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 343, + 332, + 506, + 344 + ], + "spans": [ + { + "bbox": [ + 343, + 332, + 506, + 344 + ], + "score": 1.0, + "content": "critic algorithm optimizes a neural net-", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 343, + 344, + 505, + 356 + ], + "spans": [ + { + "bbox": [ + 343, + 344, + 505, + 356 + ], + "score": 1.0, + "content": "work policy from imagined rollouts in", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 343, + 354, + 506, + 366 + ], + "spans": [ + { + "bbox": [ + 343, + 354, + 506, + 366 + ], + "score": 1.0, + "content": "the latent space of the world model. We", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 343, + 366, + 507, + 377 + ], + "spans": [ + { + "bbox": [ + 343, + 366, + 507, + 377 + ], + "score": 1.0, + "content": "parallelize data collection and neural net-", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 343, + 376, + 405, + 390 + ], + "spans": [ + { + "bbox": [ + 343, + 376, + 405, + 390 + ], + "score": 1.0, + "content": "work learning.", + "type": "text" + } + ], + "index": 50 + } + ], + "index": 44 + } + ], + "index": 38.25 + }, + { + "type": "text", + "bbox": [ + 106, + 435, + 505, + 541 + ], + "lines": [ + { + "bbox": [ + 105, + 435, + 505, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 505, + 449 + ], + "score": 1.0, + "content": "Despite the promises of world models, learning accurate world models for the real world is a open", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 446, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 446, + 506, + 460 + ], + "score": 1.0, + "content": "challenge. In this paper, we leverage recent advances of the Dreamer world model for training a", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 106, + 460, + 505, + 471 + ], + "spans": [ + { + "bbox": [ + 106, + 460, + 505, + 471 + ], + "score": 1.0, + "content": "variety of robots in the most straight-forward and fundamental problem setting: online reinforcement", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 106, + 471, + 505, + 483 + ], + "spans": [ + { + "bbox": [ + 106, + 471, + 505, + 483 + ], + "score": 1.0, + "content": "learning in the real world, without simulators or demonstrations. As shown in Figure 2, Dreamer", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 106, + 483, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 106, + 483, + 506, + 495 + ], + "score": 1.0, + "content": "learns a world model from a replay buffer of past experience, learns behaviors from rollouts imagined", + "type": "text" + } + ], + "index": 55 + }, + { + "bbox": [ + 106, + 495, + 505, + 506 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 505, + 506 + ], + "score": 1.0, + "content": "in the latent space of the world model, and continuously interacts with the environment to explore", + "type": "text" + } + ], + "index": 56 + }, + { + "bbox": [ + 106, + 506, + 505, + 517 + ], + "spans": [ + { + "bbox": [ + 106, + 506, + 505, + 517 + ], + "score": 1.0, + "content": "and improve its behaviors. Our aim is to push the limits of robot learning directly in the real world", + "type": "text" + } + ], + "index": 57 + }, + { + "bbox": [ + 106, + 518, + 506, + 530 + ], + "spans": [ + { + "bbox": [ + 106, + 518, + 506, + 530 + ], + "score": 1.0, + "content": "and offer a robust platform to enable future work that develops the benefits of world models for robot", + "type": "text" + } + ], + "index": 58 + }, + { + "bbox": [ + 106, + 529, + 399, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 529, + 399, + 541 + ], + "score": 1.0, + "content": "learning. The key contributions of this paper are summarized as follows:", + "type": "text" + } + ], + "index": 59 + } + ], + "index": 55 + }, + { + "type": "text", + "bbox": [ + 106, + 553, + 506, + 717 + ], + "lines": [ + { + "bbox": [ + 106, + 554, + 504, + 566 + ], + "spans": [ + { + "bbox": [ + 106, + 554, + 504, + 566 + ], + "score": 1.0, + "content": "β€’ Dreamer on Robots We apply Dreamer to 4 robots, demonstrating successful learning directly", + "type": "text" + } + ], + "index": 60 + }, + { + "bbox": [ + 115, + 564, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 115, + 564, + 506, + 578 + ], + "score": 1.0, + "content": "in the real world, without introducing new algorithms. The tasks cover a range of challenges,", + "type": "text" + } + ], + "index": 61 + }, + { + "bbox": [ + 115, + 577, + 421, + 589 + ], + "spans": [ + { + "bbox": [ + 115, + 577, + 421, + 589 + ], + "score": 1.0, + "content": "including different action spaces, sensory modalities, and reward structures.", + "type": "text" + } + ], + "index": 62 + }, + { + "bbox": [ + 106, + 593, + 506, + 605 + ], + "spans": [ + { + "bbox": [ + 106, + 593, + 506, + 605 + ], + "score": 1.0, + "content": "β€’ Walking in 1 Hour We teach a quadruped from scratch in the real world to roll off its back,", + "type": "text" + } + ], + "index": 63 + }, + { + "bbox": [ + 115, + 604, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 115, + 604, + 505, + 617 + ], + "score": 1.0, + "content": "stand up, and walk in only 1 hour. Afterwards, we find that the robot adapts to being pushed within", + "type": "text" + } + ], + "index": 64 + }, + { + "bbox": [ + 115, + 617, + 457, + 628 + ], + "spans": [ + { + "bbox": [ + 115, + 617, + 457, + 628 + ], + "score": 1.0, + "content": "10 minutes, learning to withstand pushes or quickly roll over and get back on its feet.", + "type": "text" + } + ], + "index": 65 + }, + { + "bbox": [ + 106, + 631, + 506, + 644 + ], + "spans": [ + { + "bbox": [ + 106, + 631, + 506, + 644 + ], + "score": 1.0, + "content": "β€’ Visual Pick and Place We train robotic arms to pick and place objects from sparse rewards,", + "type": "text" + } + ], + "index": 66 + }, + { + "bbox": [ + 115, + 644, + 505, + 656 + ], + "spans": [ + { + "bbox": [ + 115, + 644, + 505, + 656 + ], + "score": 1.0, + "content": "which requires localizing objects from pixels and fusing images with proprioceptive inputs. The", + "type": "text" + } + ], + "index": 67 + }, + { + "bbox": [ + 115, + 654, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 115, + 654, + 505, + 668 + ], + "score": 1.0, + "content": "learned behavior outperforms model-free agents and approaches the performance of a human", + "type": "text" + } + ], + "index": 68 + }, + { + "bbox": [ + 115, + 667, + 347, + 679 + ], + "spans": [ + { + "bbox": [ + 115, + 667, + 347, + 679 + ], + "score": 1.0, + "content": "teleoperator using the same control interface as the robot.", + "type": "text" + } + ], + "index": 69 + }, + { + "bbox": [ + 107, + 683, + 505, + 695 + ], + "spans": [ + { + "bbox": [ + 107, + 683, + 505, + 695 + ], + "score": 1.0, + "content": "β€’ Open Source We publicly release the software infrastructure for all our experiments, which", + "type": "text" + } + ], + "index": 70 + }, + { + "bbox": [ + 115, + 694, + 506, + 707 + ], + "spans": [ + { + "bbox": [ + 115, + 694, + 506, + 707 + ], + "score": 1.0, + "content": "supports different action spaces and sensory modalities, offering a flexible platform for future", + "type": "text" + } + ], + "index": 71 + }, + { + "bbox": [ + 116, + 707, + 362, + 718 + ], + "spans": [ + { + "bbox": [ + 116, + 707, + 362, + 718 + ], + "score": 1.0, + "content": "research of world models for robot learning in the real world.", + "type": "text" + } + ], + "index": 72 + } + ], + "index": 66 + } + ], + "page_idx": 1, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 301, + 740, + 310, + 753 + ], + "spans": [ + { + "bbox": [ + 301, + 740, + 310, + 753 + ], + "score": 1.0, + "content": "2", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 72, + 190, + 84 + ], + "lines": [ + { + "bbox": [ + 105, + 70, + 192, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 70, + 192, + 87 + ], + "score": 1.0, + "content": "1 Introduction", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 100, + 337, + 252 + ], + "lines": [ + { + "bbox": [ + 106, + 101, + 336, + 112 + ], + "spans": [ + { + "bbox": [ + 106, + 101, + 336, + 112 + ], + "score": 1.0, + "content": "Teaching robots to solve complex tasks in the real world", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 112, + 337, + 124 + ], + "spans": [ + { + "bbox": [ + 105, + 112, + 337, + 124 + ], + "score": 1.0, + "content": "is a foundational problem of robotics research. Deep rein-", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 124, + 337, + 136 + ], + "spans": [ + { + "bbox": [ + 106, + 124, + 337, + 136 + ], + "score": 1.0, + "content": "forcement learning (RL) offers a popular approach to robot", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 136, + 337, + 147 + ], + "spans": [ + { + "bbox": [ + 106, + 136, + 337, + 147 + ], + "score": 1.0, + "content": "learning that enables robots to improve their behavior over", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 148, + 336, + 158 + ], + "spans": [ + { + "bbox": [ + 106, + 148, + 336, + 158 + ], + "score": 1.0, + "content": "time through trial and error. However, current algorithms", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 159, + 337, + 170 + ], + "spans": [ + { + "bbox": [ + 105, + 159, + 337, + 170 + ], + "score": 1.0, + "content": "require too much interaction with the environment to learn", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 171, + 336, + 182 + ], + "spans": [ + { + "bbox": [ + 106, + 171, + 336, + 182 + ], + "score": 1.0, + "content": "successful behaviors. Recently, modern world models", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 182, + 336, + 194 + ], + "spans": [ + { + "bbox": [ + 106, + 182, + 336, + 194 + ], + "score": 1.0, + "content": "have shown great promise for data efficient learning in", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 194, + 338, + 206 + ], + "spans": [ + { + "bbox": [ + 106, + 194, + 338, + 206 + ], + "score": 1.0, + "content": "simulated domains and video games (Hafner et al., 2019;", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 205, + 338, + 218 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 338, + 218 + ], + "score": 1.0, + "content": "2020). Learning world models from past experience en-", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 217, + 337, + 228 + ], + "spans": [ + { + "bbox": [ + 106, + 217, + 337, + 228 + ], + "score": 1.0, + "content": "ables robots to imagine the future outcomes of potential", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 229, + 337, + 240 + ], + "spans": [ + { + "bbox": [ + 106, + 229, + 337, + 240 + ], + "score": 1.0, + "content": "actions, reducing the amount of trial and error in the real", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 241, + 225, + 252 + ], + "spans": [ + { + "bbox": [ + 106, + 241, + 225, + 252 + ], + "score": 1.0, + "content": "environment needed to learn.", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 7, + "bbox_fs": [ + 105, + 101, + 338, + 252 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 262, + 336, + 425 + ], + "lines": [ + { + "bbox": [ + 105, + 260, + 338, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 260, + 338, + 275 + ], + "score": 1.0, + "content": "While learning accurate world models can be challenging,", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 274, + 336, + 286 + ], + "spans": [ + { + "bbox": [ + 106, + 274, + 336, + 286 + ], + "score": 1.0, + "content": "they offer compelling properties for robot learning. By", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 286, + 338, + 297 + ], + "spans": [ + { + "bbox": [ + 106, + 286, + 338, + 297 + ], + "score": 1.0, + "content": "predicting future outcomes, world models allow for plan-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 297, + 338, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 297, + 338, + 308 + ], + "score": 1.0, + "content": "ning and behavior learning given only small amounts of", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 308, + 338, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 308, + 338, + 320 + ], + "score": 1.0, + "content": "real world interaction (Gal et al., 2016; Ebert et al., 2018).", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 320, + 337, + 332 + ], + "spans": [ + { + "bbox": [ + 106, + 320, + 337, + 332 + ], + "score": 1.0, + "content": "Moreover, world models summarize general dynamics", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 333, + 337, + 344 + ], + "spans": [ + { + "bbox": [ + 106, + 333, + 337, + 344 + ], + "score": 1.0, + "content": "knowledge about the environment that, once learned, could", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 343, + 337, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 337, + 355 + ], + "score": 1.0, + "content": "be reused for a wide range of downstream tasks (Sekar", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 356, + 337, + 367 + ], + "spans": [ + { + "bbox": [ + 106, + 356, + 337, + 367 + ], + "score": 1.0, + "content": "et al., 2020). World models also learn representations", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 367, + 336, + 379 + ], + "spans": [ + { + "bbox": [ + 106, + 367, + 336, + 379 + ], + "score": 1.0, + "content": "that fuse multiple sensor modalities and integrate them", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 378, + 336, + 390 + ], + "spans": [ + { + "bbox": [ + 106, + 378, + 336, + 390 + ], + "score": 1.0, + "content": "into latent states, reducing the need for sophisticated state", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 391, + 336, + 402 + ], + "spans": [ + { + "bbox": [ + 106, + 391, + 336, + 402 + ], + "score": 1.0, + "content": "estimators. Finally, world models generalize well from", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 402, + 338, + 414 + ], + "spans": [ + { + "bbox": [ + 106, + 402, + 338, + 414 + ], + "score": 1.0, + "content": "available offline data (Yu et al., 2021), which further ac-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 414, + 249, + 425 + ], + "spans": [ + { + "bbox": [ + 106, + 414, + 249, + 425 + ], + "score": 1.0, + "content": "celerates learning in the real world.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 20.5, + "bbox_fs": [ + 105, + 260, + 338, + 425 + ] + }, + { + "type": "image", + "bbox": [ + 344, + 102, + 506, + 233 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 344, + 102, + 506, + 233 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 344, + 102, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 344, + 102, + 506, + 233 + ], + "score": 0.973, + "type": "image", + "image_path": "e30f877426a1aa2686b70c08629a56889403c96105b017890f8ea57b7982c4a2.jpg" + } + ] + } + ], + "index": 32.5, + "virtual_lines": [ + { + "bbox": [ + 344, + 102, + 506, + 115.1 + ], + "spans": [], + "index": 28 + }, + { + "bbox": [ + 344, + 115.1, + 506, + 128.2 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 344, + 128.2, + 506, + 141.29999999999998 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 344, + 141.29999999999998, + 506, + 154.39999999999998 + ], + "spans": [], + "index": 31 + }, + { + "bbox": [ + 344, + 154.39999999999998, + 506, + 167.49999999999997 + ], + "spans": [], + "index": 32 + }, + { + "bbox": [ + 344, + 167.49999999999997, + 506, + 180.59999999999997 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 344, + 180.59999999999997, + 506, + 193.69999999999996 + ], + "spans": [], + "index": 34 + }, + { + "bbox": [ + 344, + 193.69999999999996, + 506, + 206.79999999999995 + ], + "spans": [], + "index": 35 + }, + { + "bbox": [ + 344, + 206.79999999999995, + 506, + 219.89999999999995 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 344, + 219.89999999999995, + 506, + 232.99999999999994 + ], + "spans": [], + "index": 37 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 344, + 243, + 505, + 389 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 343, + 242, + 505, + 255 + ], + "spans": [ + { + "bbox": [ + 343, + 242, + 505, + 255 + ], + "score": 1.0, + "content": "Figure 2: Dreamer follows a simple", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 343, + 254, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 343, + 254, + 506, + 266 + ], + "score": 1.0, + "content": "pipeline for online learning on robot", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 343, + 265, + 506, + 276 + ], + "spans": [ + { + "bbox": [ + 343, + 265, + 506, + 276 + ], + "score": 1.0, + "content": "hardware without simulators. The cur-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 343, + 276, + 506, + 288 + ], + "spans": [ + { + "bbox": [ + 343, + 276, + 506, + 288 + ], + "score": 1.0, + "content": "rent learned policy collects experience", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 343, + 288, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 343, + 288, + 506, + 299 + ], + "score": 1.0, + "content": "on the robot. This experience is added", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 343, + 299, + 505, + 311 + ], + "spans": [ + { + "bbox": [ + 343, + 299, + 505, + 311 + ], + "score": 1.0, + "content": "to the replay buffer. The world model is", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 343, + 309, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 343, + 309, + 506, + 322 + ], + "score": 1.0, + "content": "trained on replayed off-policy sequences", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 343, + 321, + 506, + 333 + ], + "spans": [ + { + "bbox": [ + 343, + 321, + 506, + 333 + ], + "score": 1.0, + "content": "through supervised learning. An actor", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 343, + 332, + 506, + 344 + ], + "spans": [ + { + "bbox": [ + 343, + 332, + 506, + 344 + ], + "score": 1.0, + "content": "critic algorithm optimizes a neural net-", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 343, + 344, + 505, + 356 + ], + "spans": [ + { + "bbox": [ + 343, + 344, + 505, + 356 + ], + "score": 1.0, + "content": "work policy from imagined rollouts in", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 343, + 354, + 506, + 366 + ], + "spans": [ + { + "bbox": [ + 343, + 354, + 506, + 366 + ], + "score": 1.0, + "content": "the latent space of the world model. We", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 343, + 366, + 507, + 377 + ], + "spans": [ + { + "bbox": [ + 343, + 366, + 507, + 377 + ], + "score": 1.0, + "content": "parallelize data collection and neural net-", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 343, + 376, + 405, + 390 + ], + "spans": [ + { + "bbox": [ + 343, + 376, + 405, + 390 + ], + "score": 1.0, + "content": "work learning.", + "type": "text" + } + ], + "index": 50 + } + ], + "index": 44 + } + ], + "index": 38.25 + }, + { + "type": "text", + "bbox": [ + 106, + 435, + 505, + 541 + ], + "lines": [ + { + "bbox": [ + 105, + 435, + 505, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 505, + 449 + ], + "score": 1.0, + "content": "Despite the promises of world models, learning accurate world models for the real world is a open", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 446, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 446, + 506, + 460 + ], + "score": 1.0, + "content": "challenge. In this paper, we leverage recent advances of the Dreamer world model for training a", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 106, + 460, + 505, + 471 + ], + "spans": [ + { + "bbox": [ + 106, + 460, + 505, + 471 + ], + "score": 1.0, + "content": "variety of robots in the most straight-forward and fundamental problem setting: online reinforcement", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 106, + 471, + 505, + 483 + ], + "spans": [ + { + "bbox": [ + 106, + 471, + 505, + 483 + ], + "score": 1.0, + "content": "learning in the real world, without simulators or demonstrations. As shown in Figure 2, Dreamer", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 106, + 483, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 106, + 483, + 506, + 495 + ], + "score": 1.0, + "content": "learns a world model from a replay buffer of past experience, learns behaviors from rollouts imagined", + "type": "text" + } + ], + "index": 55 + }, + { + "bbox": [ + 106, + 495, + 505, + 506 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 505, + 506 + ], + "score": 1.0, + "content": "in the latent space of the world model, and continuously interacts with the environment to explore", + "type": "text" + } + ], + "index": 56 + }, + { + "bbox": [ + 106, + 506, + 505, + 517 + ], + "spans": [ + { + "bbox": [ + 106, + 506, + 505, + 517 + ], + "score": 1.0, + "content": "and improve its behaviors. Our aim is to push the limits of robot learning directly in the real world", + "type": "text" + } + ], + "index": 57 + }, + { + "bbox": [ + 106, + 518, + 506, + 530 + ], + "spans": [ + { + "bbox": [ + 106, + 518, + 506, + 530 + ], + "score": 1.0, + "content": "and offer a robust platform to enable future work that develops the benefits of world models for robot", + "type": "text" + } + ], + "index": 58 + }, + { + "bbox": [ + 106, + 529, + 399, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 529, + 399, + 541 + ], + "score": 1.0, + "content": "learning. The key contributions of this paper are summarized as follows:", + "type": "text" + } + ], + "index": 59 + } + ], + "index": 55, + "bbox_fs": [ + 105, + 435, + 506, + 541 + ] + }, + { + "type": "list", + "bbox": [ + 106, + 553, + 506, + 717 + ], + "lines": [ + { + "bbox": [ + 106, + 554, + 504, + 566 + ], + "spans": [ + { + "bbox": [ + 106, + 554, + 504, + 566 + ], + "score": 1.0, + "content": "β€’ Dreamer on Robots We apply Dreamer to 4 robots, demonstrating successful learning directly", + "type": "text" + } + ], + "index": 60, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 564, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 115, + 564, + 506, + 578 + ], + "score": 1.0, + "content": "in the real world, without introducing new algorithms. The tasks cover a range of challenges,", + "type": "text" + } + ], + "index": 61 + }, + { + "bbox": [ + 115, + 577, + 421, + 589 + ], + "spans": [ + { + "bbox": [ + 115, + 577, + 421, + 589 + ], + "score": 1.0, + "content": "including different action spaces, sensory modalities, and reward structures.", + "type": "text" + } + ], + "index": 62, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 593, + 506, + 605 + ], + "spans": [ + { + "bbox": [ + 106, + 593, + 506, + 605 + ], + "score": 1.0, + "content": "β€’ Walking in 1 Hour We teach a quadruped from scratch in the real world to roll off its back,", + "type": "text" + } + ], + "index": 63, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 604, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 115, + 604, + 505, + 617 + ], + "score": 1.0, + "content": "stand up, and walk in only 1 hour. Afterwards, we find that the robot adapts to being pushed within", + "type": "text" + } + ], + "index": 64 + }, + { + "bbox": [ + 115, + 617, + 457, + 628 + ], + "spans": [ + { + "bbox": [ + 115, + 617, + 457, + 628 + ], + "score": 1.0, + "content": "10 minutes, learning to withstand pushes or quickly roll over and get back on its feet.", + "type": "text" + } + ], + "index": 65, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 631, + 506, + 644 + ], + "spans": [ + { + "bbox": [ + 106, + 631, + 506, + 644 + ], + "score": 1.0, + "content": "β€’ Visual Pick and Place We train robotic arms to pick and place objects from sparse rewards,", + "type": "text" + } + ], + "index": 66, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 644, + 505, + 656 + ], + "spans": [ + { + "bbox": [ + 115, + 644, + 505, + 656 + ], + "score": 1.0, + "content": "which requires localizing objects from pixels and fusing images with proprioceptive inputs. The", + "type": "text" + } + ], + "index": 67 + }, + { + "bbox": [ + 115, + 654, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 115, + 654, + 505, + 668 + ], + "score": 1.0, + "content": "learned behavior outperforms model-free agents and approaches the performance of a human", + "type": "text" + } + ], + "index": 68 + }, + { + "bbox": [ + 115, + 667, + 347, + 679 + ], + "spans": [ + { + "bbox": [ + 115, + 667, + 347, + 679 + ], + "score": 1.0, + "content": "teleoperator using the same control interface as the robot.", + "type": "text" + } + ], + "index": 69, + "is_list_end_line": true + }, + { + "bbox": [ + 107, + 683, + 505, + 695 + ], + "spans": [ + { + "bbox": [ + 107, + 683, + 505, + 695 + ], + "score": 1.0, + "content": "β€’ Open Source We publicly release the software infrastructure for all our experiments, which", + "type": "text" + } + ], + "index": 70, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 694, + 506, + 707 + ], + "spans": [ + { + "bbox": [ + 115, + 694, + 506, + 707 + ], + "score": 1.0, + "content": "supports different action spaces and sensory modalities, offering a flexible platform for future", + "type": "text" + } + ], + "index": 71 + }, + { + "bbox": [ + 116, + 707, + 362, + 718 + ], + "spans": [ + { + "bbox": [ + 116, + 707, + 362, + 718 + ], + "score": 1.0, + "content": "research of world models for robot learning in the real world.", + "type": "text" + } + ], + "index": 72, + "is_list_end_line": true + } + ], + "index": 66, + "bbox_fs": [ + 106, + 554, + 506, + 718 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 104, + 52, + 506, + 216 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 104, + 52, + 506, + 216 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 104, + 52, + 506, + 216 + ], + "spans": [ + { + "bbox": [ + 104, + 52, + 506, + 216 + ], + "score": 0.972, + "type": "image", + "image_path": "bbd9aa6b3f541685e1ecf9dd1c4451b92904b361a6547ee2e39414769cb64de4.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 104, + 52, + 506, + 106.66666666666666 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 104, + 106.66666666666666, + 506, + 161.33333333333331 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 104, + 161.33333333333331, + 506, + 215.99999999999997 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 222, + 506, + 302 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 222, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 506, + 236 + ], + "score": 1.0, + "content": "Figure 3: Neural Network Training We leverage the Dreamer algorithm (Hafner et al., 2019;", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 233, + 507, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 507, + 247 + ], + "score": 1.0, + "content": "2020) for fast robot learning in real world. Dreamer consists of two main neural network components,", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 245, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 505, + 258 + ], + "score": 1.0, + "content": "the world model and the policy. Left: The world model follows the structure of a deep Kalman", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 255, + 505, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 505, + 269 + ], + "score": 1.0, + "content": "filter that is trained on subsequences drawn from the replay buffer. The encoder fuses all sensory", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 267, + 506, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 267, + 506, + 280 + ], + "score": 1.0, + "content": "modalities into discrete codes. The decoder reconstructs the inputs from the codes, providing a", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 280, + 505, + 291 + ], + "spans": [ + { + "bbox": [ + 106, + 280, + 505, + 291 + ], + "score": 1.0, + "content": "rich learning signal and enabling human inspection of model predictions. A recurrent state-space", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 290, + 507, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 507, + 303 + ], + "score": 1.0, + "content": "model (RSSM) is trained to predict future codes given actions, without observing intermediate inputs.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 6 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 107, + 302, + 506, + 347 + ], + "lines": [ + { + "bbox": [ + 105, + 300, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 506, + 313 + ], + "score": 1.0, + "content": "Right: The world model enables massively parallel policy optimization from imagined rollouts", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 313, + 507, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 507, + 325 + ], + "score": 1.0, + "content": "in the compact latent space using a large batch size, without having to reconstruct sensory inputs.", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 324, + 505, + 336 + ], + "spans": [ + { + "bbox": [ + 106, + 324, + 505, + 336 + ], + "score": 1.0, + "content": "Dreamer trains a policy network and value network from the imagined rollouts and a learned", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 336, + 180, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 336, + 180, + 346 + ], + "score": 1.0, + "content": "reward function.", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 11.5 + }, + { + "type": "title", + "bbox": [ + 107, + 355, + 177, + 369 + ], + "lines": [ + { + "bbox": [ + 104, + 353, + 178, + 372 + ], + "spans": [ + { + "bbox": [ + 104, + 353, + 178, + 372 + ], + "score": 1.0, + "content": "2 Approach", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 14 + }, + { + "type": "text", + "bbox": [ + 106, + 377, + 505, + 471 + ], + "lines": [ + { + "bbox": [ + 105, + 376, + 506, + 391 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 506, + 391 + ], + "score": 1.0, + "content": "We leverage the Dreamer algorithm (Hafner et al., 2019; 2020) for online learning on physical robots,", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 389, + 506, + 402 + ], + "spans": [ + { + "bbox": [ + 106, + 389, + 506, + 402 + ], + "score": 1.0, + "content": "without the need for simulators. Figure 2 shows an overview of the approach. Dreamer learns a", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 401, + 505, + 414 + ], + "spans": [ + { + "bbox": [ + 106, + 401, + 505, + 414 + ], + "score": 1.0, + "content": "world model from a replay buffer of past experiences, uses an actor critic algorithm to learn behaviors", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 412, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 106, + 412, + 505, + 425 + ], + "score": 1.0, + "content": "from trajectories predicted by the learned model, and deploys its behavior in the environment", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 424, + 505, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 424, + 505, + 437 + ], + "score": 1.0, + "content": "to continuously grow the replay buffer. We decouple learning updates from data collection to", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 435, + 505, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 505, + 448 + ], + "score": 1.0, + "content": "meet latency requirements and to enable fast training without waiting for the environment. In our", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 448, + 505, + 460 + ], + "spans": [ + { + "bbox": [ + 106, + 448, + 505, + 460 + ], + "score": 1.0, + "content": "implementation, a learner thread continuously trains the world model and actor critic behavior, while", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 460, + 397, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 397, + 471 + ], + "score": 1.0, + "content": "an actor thread in parallel computes actions for environment interaction.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 18.5 + }, + { + "type": "text", + "bbox": [ + 106, + 475, + 505, + 557 + ], + "lines": [ + { + "bbox": [ + 106, + 474, + 505, + 488 + ], + "spans": [ + { + "bbox": [ + 106, + 474, + 505, + 488 + ], + "score": 1.0, + "content": "World Model Learning The world model is a deep neural network that learns to predict the", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 487, + 505, + 500 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 505, + 500 + ], + "score": 1.0, + "content": "environment dynamics, as shown in Figure 3 (left). Because sensory inputs can be large images, we", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 499, + 505, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 505, + 511 + ], + "score": 1.0, + "content": "predict future representations rather than future inputs. This reduces accumulating errors and enables", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 510, + 506, + 523 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 506, + 523 + ], + "score": 1.0, + "content": "massively parallel training with a large batch size. Thus, the world model can be thought of as a", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 522, + 505, + 534 + ], + "spans": [ + { + "bbox": [ + 106, + 522, + 505, + 534 + ], + "score": 1.0, + "content": "fast simulator of the environment that the robot learns autonomously, starting from a blank slate and", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 534, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 106, + 534, + 505, + 545 + ], + "score": 1.0, + "content": "continuously improving its model as it explores the real world. The world model is based on the", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 546, + 488, + 558 + ], + "spans": [ + { + "bbox": [ + 106, + 546, + 488, + 558 + ], + "score": 1.0, + "content": "Recurrent State-Space Model (RSSM; Hafner et al., 2018), which consists of four components:", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 26 + }, + { + "type": "interline_equation", + "bbox": [ + 201, + 561, + 482, + 592 + ], + "lines": [ + { + "bbox": [ + 201, + 561, + 482, + 592 + ], + "spans": [ + { + "bbox": [ + 201, + 561, + 482, + 592 + ], + "score": 0.74, + "content": "{ \\begin{array} { r l r l } & { \\operatorname { e n c } _ { \\theta } { \\big ( } s _ { t } \\ { \\big | } \\ s _ { t - 1 } , a _ { t - 1 } , x _ { t } { \\big ) } } & & { { \\mathrm { D e c o d e r ~ N e t w o r k : } } \\quad \\operatorname* { d e c } _ { \\theta } { \\big ( } s _ { t } { \\big ) } \\approx x _ { t } } \\\\ & { \\operatorname { d y n } _ { \\theta } { \\big ( } s _ { t } \\ { \\big | } \\ s _ { t - 1 } , a _ { t - 1 } { \\big ) } } & & { { \\mathrm { R e w a r d ~ N e t w o r k : } } \\quad \\operatorname { r e w } _ { \\theta } { \\big ( } s _ { t + 1 } { \\big ) } \\approx r _ { t } } \\end{array} }", + "type": "interline_equation", + "image_path": "bd74dd0968c35c4dc8ffeba6cc51d9f109521ca66008a561e30a7fcccfe148a6.jpg" + } + ] + } + ], + "index": 31, + "virtual_lines": [ + { + "bbox": [ + 201, + 561, + 482, + 571.3333333333334 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 201, + 571.3333333333334, + 482, + 581.6666666666667 + ], + "spans": [], + "index": 31 + }, + { + "bbox": [ + 201, + 581.6666666666667, + 482, + 592.0000000000001 + ], + "spans": [], + "index": 32 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 595, + 506, + 712 + ], + "lines": [ + { + "bbox": [ + 105, + 595, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 595, + 506, + 608 + ], + "score": 1.0, + "content": "Physical robots are often equipped with multiple sensors of different modalities, such as proprioceptive", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 606, + 507, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 606, + 507, + 620 + ], + "score": 1.0, + "content": "joint readings, force sensors, and high-dimensional inputs such as RGB and depth camera images.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 618, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 618, + 289, + 631 + ], + "score": 1.0, + "content": "The encoder network fuses all sensory inputs", + "type": "text" + }, + { + "bbox": [ + 289, + 621, + 299, + 630 + ], + "score": 0.84, + "content": "x _ { t }", + "type": "inline_equation" + }, + { + "bbox": [ + 299, + 618, + 473, + 631 + ], + "score": 1.0, + "content": "together into the stochastic representations", + "type": "text" + }, + { + "bbox": [ + 473, + 621, + 482, + 630 + ], + "score": 0.84, + "content": "z _ { t }", + "type": "inline_equation" + }, + { + "bbox": [ + 483, + 618, + 505, + 631 + ], + "score": 1.0, + "content": ". The", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 630, + 505, + 643 + ], + "spans": [ + { + "bbox": [ + 106, + 630, + 505, + 643 + ], + "score": 1.0, + "content": "dynamics model learns to predict the sequence of stochastic representations by using its recurrent state", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 642, + 505, + 655 + ], + "spans": [ + { + "bbox": [ + 106, + 642, + 117, + 653 + ], + "score": 0.86, + "content": "h _ { t }", + "type": "inline_equation" + }, + { + "bbox": [ + 117, + 642, + 505, + 655 + ], + "score": 1.0, + "content": ". The decoder reconstructs the sensory inputs to provide a rich signal for learning representations", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 654, + 506, + 666 + ], + "spans": [ + { + "bbox": [ + 106, + 654, + 506, + 666 + ], + "score": 1.0, + "content": "and enables human inspection of model predictions. In our experiments, the robot has to discover", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 664, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 664, + 506, + 679 + ], + "score": 1.0, + "content": "task rewards by interacting with the real world, which the reward network learns to predict. Using", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 678, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 106, + 678, + 505, + 689 + ], + "score": 1.0, + "content": "manually specified rewards as a function of the decoded sensory inputs is also possible. We optimize", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 689, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 106, + 689, + 506, + 701 + ], + "score": 1.0, + "content": "all components of the world model jointly by stochastic backpropagation (Kingma and Welling, 2013;", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 700, + 196, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 196, + 712 + ], + "score": 1.0, + "content": "Rezende et al., 2014).", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 37.5 + } + ], + "page_idx": 2, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 301, + 740, + 310, + 752 + ], + "spans": [ + { + "bbox": [ + 301, + 740, + 310, + 752 + ], + "score": 1.0, + "content": "3", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 104, + 52, + 506, + 216 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 104, + 52, + 506, + 216 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 104, + 52, + 506, + 216 + ], + "spans": [ + { + "bbox": [ + 104, + 52, + 506, + 216 + ], + "score": 0.972, + "type": "image", + "image_path": "bbd9aa6b3f541685e1ecf9dd1c4451b92904b361a6547ee2e39414769cb64de4.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 104, + 52, + 506, + 106.66666666666666 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 104, + 106.66666666666666, + 506, + 161.33333333333331 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 104, + 161.33333333333331, + 506, + 215.99999999999997 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 222, + 506, + 302 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 222, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 506, + 236 + ], + "score": 1.0, + "content": "Figure 3: Neural Network Training We leverage the Dreamer algorithm (Hafner et al., 2019;", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 233, + 507, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 507, + 247 + ], + "score": 1.0, + "content": "2020) for fast robot learning in real world. Dreamer consists of two main neural network components,", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 245, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 505, + 258 + ], + "score": 1.0, + "content": "the world model and the policy. Left: The world model follows the structure of a deep Kalman", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 255, + 505, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 505, + 269 + ], + "score": 1.0, + "content": "filter that is trained on subsequences drawn from the replay buffer. The encoder fuses all sensory", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 267, + 506, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 267, + 506, + 280 + ], + "score": 1.0, + "content": "modalities into discrete codes. The decoder reconstructs the inputs from the codes, providing a", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 280, + 505, + 291 + ], + "spans": [ + { + "bbox": [ + 106, + 280, + 505, + 291 + ], + "score": 1.0, + "content": "rich learning signal and enabling human inspection of model predictions. A recurrent state-space", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 290, + 507, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 507, + 303 + ], + "score": 1.0, + "content": "model (RSSM) is trained to predict future codes given actions, without observing intermediate inputs.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 6 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 107, + 302, + 506, + 347 + ], + "lines": [ + { + "bbox": [ + 105, + 300, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 506, + 313 + ], + "score": 1.0, + "content": "Right: The world model enables massively parallel policy optimization from imagined rollouts", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 313, + 507, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 507, + 325 + ], + "score": 1.0, + "content": "in the compact latent space using a large batch size, without having to reconstruct sensory inputs.", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 324, + 505, + 336 + ], + "spans": [ + { + "bbox": [ + 106, + 324, + 505, + 336 + ], + "score": 1.0, + "content": "Dreamer trains a policy network and value network from the imagined rollouts and a learned", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 336, + 180, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 336, + 180, + 346 + ], + "score": 1.0, + "content": "reward function.", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 11.5, + "bbox_fs": [ + 105, + 300, + 507, + 346 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 355, + 177, + 369 + ], + "lines": [ + { + "bbox": [ + 104, + 353, + 178, + 372 + ], + "spans": [ + { + "bbox": [ + 104, + 353, + 178, + 372 + ], + "score": 1.0, + "content": "2 Approach", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 14 + }, + { + "type": "text", + "bbox": [ + 106, + 377, + 505, + 471 + ], + "lines": [ + { + "bbox": [ + 105, + 376, + 506, + 391 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 506, + 391 + ], + "score": 1.0, + "content": "We leverage the Dreamer algorithm (Hafner et al., 2019; 2020) for online learning on physical robots,", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 389, + 506, + 402 + ], + "spans": [ + { + "bbox": [ + 106, + 389, + 506, + 402 + ], + "score": 1.0, + "content": "without the need for simulators. Figure 2 shows an overview of the approach. Dreamer learns a", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 401, + 505, + 414 + ], + "spans": [ + { + "bbox": [ + 106, + 401, + 505, + 414 + ], + "score": 1.0, + "content": "world model from a replay buffer of past experiences, uses an actor critic algorithm to learn behaviors", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 412, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 106, + 412, + 505, + 425 + ], + "score": 1.0, + "content": "from trajectories predicted by the learned model, and deploys its behavior in the environment", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 424, + 505, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 424, + 505, + 437 + ], + "score": 1.0, + "content": "to continuously grow the replay buffer. We decouple learning updates from data collection to", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 435, + 505, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 505, + 448 + ], + "score": 1.0, + "content": "meet latency requirements and to enable fast training without waiting for the environment. In our", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 448, + 505, + 460 + ], + "spans": [ + { + "bbox": [ + 106, + 448, + 505, + 460 + ], + "score": 1.0, + "content": "implementation, a learner thread continuously trains the world model and actor critic behavior, while", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 460, + 397, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 397, + 471 + ], + "score": 1.0, + "content": "an actor thread in parallel computes actions for environment interaction.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 18.5, + "bbox_fs": [ + 105, + 376, + 506, + 471 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 475, + 505, + 557 + ], + "lines": [ + { + "bbox": [ + 106, + 474, + 505, + 488 + ], + "spans": [ + { + "bbox": [ + 106, + 474, + 505, + 488 + ], + "score": 1.0, + "content": "World Model Learning The world model is a deep neural network that learns to predict the", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 487, + 505, + 500 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 505, + 500 + ], + "score": 1.0, + "content": "environment dynamics, as shown in Figure 3 (left). Because sensory inputs can be large images, we", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 499, + 505, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 505, + 511 + ], + "score": 1.0, + "content": "predict future representations rather than future inputs. This reduces accumulating errors and enables", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 510, + 506, + 523 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 506, + 523 + ], + "score": 1.0, + "content": "massively parallel training with a large batch size. Thus, the world model can be thought of as a", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 522, + 505, + 534 + ], + "spans": [ + { + "bbox": [ + 106, + 522, + 505, + 534 + ], + "score": 1.0, + "content": "fast simulator of the environment that the robot learns autonomously, starting from a blank slate and", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 534, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 106, + 534, + 505, + 545 + ], + "score": 1.0, + "content": "continuously improving its model as it explores the real world. The world model is based on the", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 546, + 488, + 558 + ], + "spans": [ + { + "bbox": [ + 106, + 546, + 488, + 558 + ], + "score": 1.0, + "content": "Recurrent State-Space Model (RSSM; Hafner et al., 2018), which consists of four components:", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 26, + "bbox_fs": [ + 105, + 474, + 506, + 558 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 201, + 561, + 482, + 592 + ], + "lines": [ + { + "bbox": [ + 201, + 561, + 482, + 592 + ], + "spans": [ + { + "bbox": [ + 201, + 561, + 482, + 592 + ], + "score": 0.74, + "content": "{ \\begin{array} { r l r l } & { \\operatorname { e n c } _ { \\theta } { \\big ( } s _ { t } \\ { \\big | } \\ s _ { t - 1 } , a _ { t - 1 } , x _ { t } { \\big ) } } & & { { \\mathrm { D e c o d e r ~ N e t w o r k : } } \\quad \\operatorname* { d e c } _ { \\theta } { \\big ( } s _ { t } { \\big ) } \\approx x _ { t } } \\\\ & { \\operatorname { d y n } _ { \\theta } { \\big ( } s _ { t } \\ { \\big | } \\ s _ { t - 1 } , a _ { t - 1 } { \\big ) } } & & { { \\mathrm { R e w a r d ~ N e t w o r k : } } \\quad \\operatorname { r e w } _ { \\theta } { \\big ( } s _ { t + 1 } { \\big ) } \\approx r _ { t } } \\end{array} }", + "type": "interline_equation", + "image_path": "bd74dd0968c35c4dc8ffeba6cc51d9f109521ca66008a561e30a7fcccfe148a6.jpg" + } + ] + } + ], + "index": 31, + "virtual_lines": [ + { + "bbox": [ + 201, + 561, + 482, + 571.3333333333334 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 201, + 571.3333333333334, + 482, + 581.6666666666667 + ], + "spans": [], + "index": 31 + }, + { + "bbox": [ + 201, + 581.6666666666667, + 482, + 592.0000000000001 + ], + "spans": [], + "index": 32 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 595, + 506, + 712 + ], + "lines": [ + { + "bbox": [ + 105, + 595, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 595, + 506, + 608 + ], + "score": 1.0, + "content": "Physical robots are often equipped with multiple sensors of different modalities, such as proprioceptive", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 606, + 507, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 606, + 507, + 620 + ], + "score": 1.0, + "content": "joint readings, force sensors, and high-dimensional inputs such as RGB and depth camera images.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 618, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 618, + 289, + 631 + ], + "score": 1.0, + "content": "The encoder network fuses all sensory inputs", + "type": "text" + }, + { + "bbox": [ + 289, + 621, + 299, + 630 + ], + "score": 0.84, + "content": "x _ { t }", + "type": "inline_equation" + }, + { + "bbox": [ + 299, + 618, + 473, + 631 + ], + "score": 1.0, + "content": "together into the stochastic representations", + "type": "text" + }, + { + "bbox": [ + 473, + 621, + 482, + 630 + ], + "score": 0.84, + "content": "z _ { t }", + "type": "inline_equation" + }, + { + "bbox": [ + 483, + 618, + 505, + 631 + ], + "score": 1.0, + "content": ". The", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 630, + 505, + 643 + ], + "spans": [ + { + "bbox": [ + 106, + 630, + 505, + 643 + ], + "score": 1.0, + "content": "dynamics model learns to predict the sequence of stochastic representations by using its recurrent state", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 642, + 505, + 655 + ], + "spans": [ + { + "bbox": [ + 106, + 642, + 117, + 653 + ], + "score": 0.86, + "content": "h _ { t }", + "type": "inline_equation" + }, + { + "bbox": [ + 117, + 642, + 505, + 655 + ], + "score": 1.0, + "content": ". The decoder reconstructs the sensory inputs to provide a rich signal for learning representations", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 654, + 506, + 666 + ], + "spans": [ + { + "bbox": [ + 106, + 654, + 506, + 666 + ], + "score": 1.0, + "content": "and enables human inspection of model predictions. In our experiments, the robot has to discover", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 664, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 664, + 506, + 679 + ], + "score": 1.0, + "content": "task rewards by interacting with the real world, which the reward network learns to predict. Using", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 678, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 106, + 678, + 505, + 689 + ], + "score": 1.0, + "content": "manually specified rewards as a function of the decoded sensory inputs is also possible. We optimize", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 689, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 106, + 689, + 506, + 701 + ], + "score": 1.0, + "content": "all components of the world model jointly by stochastic backpropagation (Kingma and Welling, 2013;", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 700, + 196, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 196, + 712 + ], + "score": 1.0, + "content": "Rezende et al., 2014).", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 37.5, + "bbox_fs": [ + 104, + 595, + 507, + 712 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 72, + 505, + 143 + ], + "lines": [ + { + "bbox": [ + 106, + 73, + 505, + 85 + ], + "spans": [ + { + "bbox": [ + 106, + 73, + 505, + 85 + ], + "score": 1.0, + "content": "Actor Critic Learning While the world model represents task-agnostic knowledge about the", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 84, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 84, + 505, + 96 + ], + "score": 1.0, + "content": "dynamics, the actor critic algorithm learns a behavior that is specific to the task at hand. As shown in", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 96, + 505, + 108 + ], + "spans": [ + { + "bbox": [ + 106, + 96, + 505, + 108 + ], + "score": 1.0, + "content": "Figure 3 (right), we learn behaviors from rollouts that are predicted in the latent space of the world", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 107, + 506, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 107, + 506, + 120 + ], + "score": 1.0, + "content": "model, without decoding observations. This enables massively parallel behavior learning with typical", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 118, + 505, + 132 + ], + "spans": [ + { + "bbox": [ + 105, + 118, + 470, + 132 + ], + "score": 1.0, + "content": "batch sizes of 16K on a single GPU. The actor critic algorithm consists of an actor network", + "type": "text" + }, + { + "bbox": [ + 470, + 119, + 505, + 131 + ], + "score": 0.93, + "content": "\\pi ( a _ { t } | s _ { t } )", + "type": "inline_equation" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 129, + 215, + 145 + ], + "spans": [ + { + "bbox": [ + 105, + 129, + 188, + 145 + ], + "score": 1.0, + "content": "and a critic network", + "type": "text" + }, + { + "bbox": [ + 189, + 131, + 210, + 143 + ], + "score": 0.93, + "content": "v ( s _ { t } )", + "type": "inline_equation" + }, + { + "bbox": [ + 211, + 129, + 215, + 145 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 2.5 + }, + { + "type": "text", + "bbox": [ + 106, + 149, + 505, + 220 + ], + "lines": [ + { + "bbox": [ + 106, + 150, + 505, + 162 + ], + "spans": [ + { + "bbox": [ + 106, + 150, + 408, + 162 + ], + "score": 1.0, + "content": "The role of the actor network is to learn a distribution over successful actions", + "type": "text" + }, + { + "bbox": [ + 409, + 152, + 419, + 161 + ], + "score": 0.85, + "content": "a _ { t }", + "type": "inline_equation" + }, + { + "bbox": [ + 419, + 150, + 505, + 162 + ], + "score": 1.0, + "content": "for each latent model", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 162, + 505, + 174 + ], + "spans": [ + { + "bbox": [ + 106, + 162, + 127, + 174 + ], + "score": 1.0, + "content": "state", + "type": "text" + }, + { + "bbox": [ + 127, + 163, + 136, + 173 + ], + "score": 0.86, + "content": "s _ { t }", + "type": "inline_equation" + }, + { + "bbox": [ + 137, + 162, + 505, + 174 + ], + "score": 1.0, + "content": "that maximizes the sum of future predicted task rewards. The critic network learns to predict", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 174, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 174, + 505, + 186 + ], + "score": 1.0, + "content": "the sum of future task rewards through temporal difference learning (Sutton and Barto, 2018). This", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 185, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 106, + 185, + 437, + 198 + ], + "score": 1.0, + "content": "allows the algorithm to take into account rewards beyond the planning horizon of", + "type": "text" + }, + { + "bbox": [ + 437, + 185, + 471, + 195 + ], + "score": 0.9, + "content": "H = 1 6", + "type": "inline_equation" + }, + { + "bbox": [ + 471, + 185, + 505, + 198 + ], + "score": 1.0, + "content": "steps to", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 196, + 506, + 210 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 506, + 210 + ], + "score": 1.0, + "content": "learn long-term strategies. Given a predicted trajectory of model states, the critic is trained to regress", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 208, + 457, + 221 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 268, + 221 + ], + "score": 1.0, + "content": "the return of the trajectory. We compute", + "type": "text" + }, + { + "bbox": [ + 268, + 209, + 275, + 218 + ], + "score": 0.82, + "content": "\\lambda", + "type": "inline_equation" + }, + { + "bbox": [ + 276, + 208, + 457, + 221 + ], + "score": 1.0, + "content": "-returns following Hafner et al. (2020; 2019):", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 8.5 + }, + { + "type": "interline_equation", + "bbox": [ + 187, + 223, + 423, + 245 + ], + "lines": [ + { + "bbox": [ + 187, + 223, + 423, + 245 + ], + "spans": [ + { + "bbox": [ + 187, + 223, + 423, + 245 + ], + "score": 0.93, + "content": "V _ { t } ^ { \\lambda } \\doteq r _ { t } + \\gamma \\Big ( ( 1 - \\lambda ) v ( s _ { t + 1 } ) + \\lambda V _ { t + 1 } ^ { \\lambda } \\Big ) , \\quad V _ { H } ^ { \\lambda } \\doteq v ( s _ { H } ) .", + "type": "interline_equation", + "image_path": "bced559cf73caae8b60315e212417c82fda960dadfbf4c1dc28fc8ea7be19aee.jpg" + } + ] + } + ], + "index": 12, + "virtual_lines": [ + { + "bbox": [ + 187, + 223, + 423, + 245 + ], + "spans": [], + "index": 12 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 249, + 505, + 343 + ], + "lines": [ + { + "bbox": [ + 105, + 249, + 505, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 249, + 300, + 262 + ], + "score": 1.0, + "content": "While the critic network is trained to regress the", + "type": "text" + }, + { + "bbox": [ + 300, + 250, + 307, + 259 + ], + "score": 0.83, + "content": "\\lambda", + "type": "inline_equation" + }, + { + "bbox": [ + 307, + 249, + 505, + 262 + ], + "score": 1.0, + "content": "-returns, the actor network is trained to maximize", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 261, + 505, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 505, + 274 + ], + "score": 1.0, + "content": "them. Different gradient estimators are available for computing the policy gradient for optimizing", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 272, + 506, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 272, + 506, + 286 + ], + "score": 1.0, + "content": "the actor, such as Reinforce (Williams, 1992) and the reparameterization trick (Kingma and Welling,", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 284, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 505, + 297 + ], + "score": 1.0, + "content": "2013; Rezende et al., 2014) that directly backpropagates return gradients through the differentiable", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 296, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 106, + 296, + 505, + 309 + ], + "score": 1.0, + "content": "dynamics network (Henaff et al., 2019). Following Hafner et al. (2020), we choose reparameterization", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "score": 1.0, + "content": "gradients for continuous control tasks and Reinforce gradients for tasks with discrete actions. In", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 319, + 506, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 506, + 333 + ], + "score": 1.0, + "content": "addition to maximizing returns, the actor is also incentivized to maintain high entropy to prevent", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 330, + 492, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 330, + 492, + 345 + ], + "score": 1.0, + "content": "collapse to a deterministic policy and maintain some amount of exploration throughout training:", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 16.5 + }, + { + "type": "interline_equation", + "bbox": [ + 166, + 349, + 444, + 366 + ], + "lines": [ + { + "bbox": [ + 166, + 349, + 444, + 366 + ], + "spans": [ + { + "bbox": [ + 166, + 349, + 444, + 366 + ], + "score": 0.9, + "content": "\\begin{array} { r } { \\mathcal { L } ( \\pi ) \\doteq - \\operatorname { E } \\bigl [ \\sum _ { t = 1 } ^ { H } \\ln \\pi ( a _ { t } \\mid s _ { t } ) \\mathrm { s g } ( V _ { t } ^ { \\lambda } - v ( s _ { t } ) ) + \\eta \\mathrm { H } \\bigl [ \\pi ( a _ { t } \\mid s _ { t } ) \\bigr ] \\bigr ] } \\end{array}", + "type": "interline_equation", + "image_path": "29586778ea39548465206a088f601c3e740302c103692a574d7a70d8188b3983.jpg" + } + ] + } + ], + "index": 21, + "virtual_lines": [ + { + "bbox": [ + 166, + 349, + 444, + 366 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 369, + 505, + 428 + ], + "lines": [ + { + "bbox": [ + 105, + 369, + 505, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 505, + 381 + ], + "score": 1.0, + "content": "We optimize the actor and critic using the Adam optimizer (Kingma and Ba, 2014). To compute the", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 107, + 380, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 107, + 382, + 113, + 391 + ], + "score": 0.74, + "content": "\\lambda", + "type": "inline_equation" + }, + { + "bbox": [ + 114, + 380, + 505, + 394 + ], + "score": 1.0, + "content": "-returns, we use a slowly updated copy of the critic network as common in the literature (Mnih", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 393, + 505, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 393, + 505, + 405 + ], + "score": 1.0, + "content": "et al., 2015; Lillicrap et al., 2015). The actor and critic gradients do not affect the world model, as", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 403, + 505, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 505, + 416 + ], + "score": 1.0, + "content": "this would lead to incorrect and overly optimistic model predictions. The hyperparameters are listed", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 416, + 171, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 171, + 428 + ], + "score": 1.0, + "content": "in Appendix D.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 24 + }, + { + "type": "title", + "bbox": [ + 107, + 447, + 191, + 461 + ], + "lines": [ + { + "bbox": [ + 103, + 445, + 193, + 465 + ], + "spans": [ + { + "bbox": [ + 103, + 445, + 193, + 465 + ], + "score": 1.0, + "content": "3 Experiments", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "text", + "bbox": [ + 106, + 473, + 506, + 555 + ], + "lines": [ + { + "bbox": [ + 105, + 473, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 473, + 506, + 486 + ], + "score": 1.0, + "content": "We evaluate Dreamer on 4 robots, each with a different task, and compare its performance to", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 485, + 505, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 485, + 505, + 498 + ], + "score": 1.0, + "content": "appropriate algorithmic and human baselines. The experiments are representative of common robotic", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 495, + 506, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 506, + 511 + ], + "score": 1.0, + "content": "tasks, such as locomotion, manipulation, and navigation. The tasks pose a diverse range of challenges,", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 508, + 505, + 522 + ], + "spans": [ + { + "bbox": [ + 105, + 508, + 505, + 522 + ], + "score": 1.0, + "content": "including continuous and discrete actions, dense and sparse rewards, proprioceptive and image", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 519, + 506, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 519, + 506, + 533 + ], + "score": 1.0, + "content": "observations, and sensor fusion. The goal of the experiments is to evaluate whether the recent", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 533, + 506, + 544 + ], + "spans": [ + { + "bbox": [ + 106, + 533, + 506, + 544 + ], + "score": 1.0, + "content": "successes of learned world models enables sample-efficient robot learning directly in the real world.", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 544, + 365, + 557 + ], + "spans": [ + { + "bbox": [ + 106, + 544, + 365, + 557 + ], + "score": 1.0, + "content": "Specifically, we aim to answer the following research questions:", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 31 + }, + { + "type": "text", + "bbox": [ + 106, + 564, + 506, + 608 + ], + "lines": [ + { + "bbox": [ + 104, + 564, + 449, + 577 + ], + "spans": [ + { + "bbox": [ + 104, + 564, + 449, + 577 + ], + "score": 1.0, + "content": "β€’ Does Dreamer enable robot learning directly in the real world, without simulators?", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 579, + 493, + 593 + ], + "spans": [ + { + "bbox": [ + 105, + 579, + 493, + 593 + ], + "score": 1.0, + "content": "β€’ Does Dreamer succeed across various robot platforms, sensory modalities, and action spaces?", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 595, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 595, + 506, + 609 + ], + "score": 1.0, + "content": "β€’ How does the data-efficiency of Dreamer compare to previous reinforcement learning algorithms?", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 36 + }, + { + "type": "text", + "bbox": [ + 106, + 617, + 506, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 617, + 505, + 630 + ], + "spans": [ + { + "bbox": [ + 106, + 617, + 505, + 630 + ], + "score": 1.0, + "content": "Implementation We build on the official implementation of DreamerV2 (Hafner et al., 2020). We", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 629, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 106, + 629, + 506, + 641 + ], + "score": 1.0, + "content": "develop an asynchronous actor and learner setup, which is essential in environments with high control", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 641, + 505, + 653 + ], + "spans": [ + { + "bbox": [ + 106, + 641, + 505, + 653 + ], + "score": 1.0, + "content": "rates, such as the quadruped, and also accelerates learning for slower environments, such as the robot", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 651, + 506, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 651, + 506, + 666 + ], + "score": 1.0, + "content": "arms. The actor thread computes online actions for the robot and sends trajectories of 128 time steps", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 664, + 506, + 676 + ], + "spans": [ + { + "bbox": [ + 106, + 664, + 506, + 676 + ], + "score": 1.0, + "content": "to the replay buffer. The learner thread samples data from the replay buffer, updates the world model,", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 106, + 675, + 505, + 688 + ], + "spans": [ + { + "bbox": [ + 106, + 675, + 505, + 688 + ], + "score": 1.0, + "content": "and optimizes the policy using imagination rollouts. Policy weights are synced from the learner to", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "the actor every 20 seconds. We use an RSSM with 256 units to speed up the training computation.", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 698, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 713 + ], + "score": 1.0, + "content": "We use identical hyperparameters across all experiments, enabling off-the-shelf training on different", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 710, + 189, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 189, + 722 + ], + "score": 1.0, + "content": "robot embodiments.", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 42 + } + ], + "page_idx": 3, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 308, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 310, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 310, + 752 + ], + "score": 1.0, + "content": "4", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 72, + 505, + 143 + ], + "lines": [ + { + "bbox": [ + 106, + 73, + 505, + 85 + ], + "spans": [ + { + "bbox": [ + 106, + 73, + 505, + 85 + ], + "score": 1.0, + "content": "Actor Critic Learning While the world model represents task-agnostic knowledge about the", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 84, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 84, + 505, + 96 + ], + "score": 1.0, + "content": "dynamics, the actor critic algorithm learns a behavior that is specific to the task at hand. As shown in", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 96, + 505, + 108 + ], + "spans": [ + { + "bbox": [ + 106, + 96, + 505, + 108 + ], + "score": 1.0, + "content": "Figure 3 (right), we learn behaviors from rollouts that are predicted in the latent space of the world", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 107, + 506, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 107, + 506, + 120 + ], + "score": 1.0, + "content": "model, without decoding observations. This enables massively parallel behavior learning with typical", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 118, + 505, + 132 + ], + "spans": [ + { + "bbox": [ + 105, + 118, + 470, + 132 + ], + "score": 1.0, + "content": "batch sizes of 16K on a single GPU. The actor critic algorithm consists of an actor network", + "type": "text" + }, + { + "bbox": [ + 470, + 119, + 505, + 131 + ], + "score": 0.93, + "content": "\\pi ( a _ { t } | s _ { t } )", + "type": "inline_equation" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 129, + 215, + 145 + ], + "spans": [ + { + "bbox": [ + 105, + 129, + 188, + 145 + ], + "score": 1.0, + "content": "and a critic network", + "type": "text" + }, + { + "bbox": [ + 189, + 131, + 210, + 143 + ], + "score": 0.93, + "content": "v ( s _ { t } )", + "type": "inline_equation" + }, + { + "bbox": [ + 211, + 129, + 215, + 145 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 2.5, + "bbox_fs": [ + 105, + 73, + 506, + 145 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 149, + 505, + 220 + ], + "lines": [ + { + "bbox": [ + 106, + 150, + 505, + 162 + ], + "spans": [ + { + "bbox": [ + 106, + 150, + 408, + 162 + ], + "score": 1.0, + "content": "The role of the actor network is to learn a distribution over successful actions", + "type": "text" + }, + { + "bbox": [ + 409, + 152, + 419, + 161 + ], + "score": 0.85, + "content": "a _ { t }", + "type": "inline_equation" + }, + { + "bbox": [ + 419, + 150, + 505, + 162 + ], + "score": 1.0, + "content": "for each latent model", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 162, + 505, + 174 + ], + "spans": [ + { + "bbox": [ + 106, + 162, + 127, + 174 + ], + "score": 1.0, + "content": "state", + "type": "text" + }, + { + "bbox": [ + 127, + 163, + 136, + 173 + ], + "score": 0.86, + "content": "s _ { t }", + "type": "inline_equation" + }, + { + "bbox": [ + 137, + 162, + 505, + 174 + ], + "score": 1.0, + "content": "that maximizes the sum of future predicted task rewards. The critic network learns to predict", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 174, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 174, + 505, + 186 + ], + "score": 1.0, + "content": "the sum of future task rewards through temporal difference learning (Sutton and Barto, 2018). This", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 185, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 106, + 185, + 437, + 198 + ], + "score": 1.0, + "content": "allows the algorithm to take into account rewards beyond the planning horizon of", + "type": "text" + }, + { + "bbox": [ + 437, + 185, + 471, + 195 + ], + "score": 0.9, + "content": "H = 1 6", + "type": "inline_equation" + }, + { + "bbox": [ + 471, + 185, + 505, + 198 + ], + "score": 1.0, + "content": "steps to", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 196, + 506, + 210 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 506, + 210 + ], + "score": 1.0, + "content": "learn long-term strategies. Given a predicted trajectory of model states, the critic is trained to regress", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 208, + 457, + 221 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 268, + 221 + ], + "score": 1.0, + "content": "the return of the trajectory. We compute", + "type": "text" + }, + { + "bbox": [ + 268, + 209, + 275, + 218 + ], + "score": 0.82, + "content": "\\lambda", + "type": "inline_equation" + }, + { + "bbox": [ + 276, + 208, + 457, + 221 + ], + "score": 1.0, + "content": "-returns following Hafner et al. (2020; 2019):", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 8.5, + "bbox_fs": [ + 105, + 150, + 506, + 221 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 187, + 223, + 423, + 245 + ], + "lines": [ + { + "bbox": [ + 187, + 223, + 423, + 245 + ], + "spans": [ + { + "bbox": [ + 187, + 223, + 423, + 245 + ], + "score": 0.93, + "content": "V _ { t } ^ { \\lambda } \\doteq r _ { t } + \\gamma \\Big ( ( 1 - \\lambda ) v ( s _ { t + 1 } ) + \\lambda V _ { t + 1 } ^ { \\lambda } \\Big ) , \\quad V _ { H } ^ { \\lambda } \\doteq v ( s _ { H } ) .", + "type": "interline_equation", + "image_path": "bced559cf73caae8b60315e212417c82fda960dadfbf4c1dc28fc8ea7be19aee.jpg" + } + ] + } + ], + "index": 12, + "virtual_lines": [ + { + "bbox": [ + 187, + 223, + 423, + 245 + ], + "spans": [], + "index": 12 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 249, + 505, + 343 + ], + "lines": [ + { + "bbox": [ + 105, + 249, + 505, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 249, + 300, + 262 + ], + "score": 1.0, + "content": "While the critic network is trained to regress the", + "type": "text" + }, + { + "bbox": [ + 300, + 250, + 307, + 259 + ], + "score": 0.83, + "content": "\\lambda", + "type": "inline_equation" + }, + { + "bbox": [ + 307, + 249, + 505, + 262 + ], + "score": 1.0, + "content": "-returns, the actor network is trained to maximize", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 261, + 505, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 505, + 274 + ], + "score": 1.0, + "content": "them. Different gradient estimators are available for computing the policy gradient for optimizing", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 272, + 506, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 272, + 506, + 286 + ], + "score": 1.0, + "content": "the actor, such as Reinforce (Williams, 1992) and the reparameterization trick (Kingma and Welling,", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 284, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 505, + 297 + ], + "score": 1.0, + "content": "2013; Rezende et al., 2014) that directly backpropagates return gradients through the differentiable", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 296, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 106, + 296, + 505, + 309 + ], + "score": 1.0, + "content": "dynamics network (Henaff et al., 2019). Following Hafner et al. (2020), we choose reparameterization", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "score": 1.0, + "content": "gradients for continuous control tasks and Reinforce gradients for tasks with discrete actions. In", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 319, + 506, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 506, + 333 + ], + "score": 1.0, + "content": "addition to maximizing returns, the actor is also incentivized to maintain high entropy to prevent", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 330, + 492, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 330, + 492, + 345 + ], + "score": 1.0, + "content": "collapse to a deterministic policy and maintain some amount of exploration throughout training:", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 16.5, + "bbox_fs": [ + 105, + 249, + 506, + 345 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 166, + 349, + 444, + 366 + ], + "lines": [ + { + "bbox": [ + 166, + 349, + 444, + 366 + ], + "spans": [ + { + "bbox": [ + 166, + 349, + 444, + 366 + ], + "score": 0.9, + "content": "\\begin{array} { r } { \\mathcal { L } ( \\pi ) \\doteq - \\operatorname { E } \\bigl [ \\sum _ { t = 1 } ^ { H } \\ln \\pi ( a _ { t } \\mid s _ { t } ) \\mathrm { s g } ( V _ { t } ^ { \\lambda } - v ( s _ { t } ) ) + \\eta \\mathrm { H } \\bigl [ \\pi ( a _ { t } \\mid s _ { t } ) \\bigr ] \\bigr ] } \\end{array}", + "type": "interline_equation", + "image_path": "29586778ea39548465206a088f601c3e740302c103692a574d7a70d8188b3983.jpg" + } + ] + } + ], + "index": 21, + "virtual_lines": [ + { + "bbox": [ + 166, + 349, + 444, + 366 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 369, + 505, + 428 + ], + "lines": [ + { + "bbox": [ + 105, + 369, + 505, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 505, + 381 + ], + "score": 1.0, + "content": "We optimize the actor and critic using the Adam optimizer (Kingma and Ba, 2014). To compute the", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 107, + 380, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 107, + 382, + 113, + 391 + ], + "score": 0.74, + "content": "\\lambda", + "type": "inline_equation" + }, + { + "bbox": [ + 114, + 380, + 505, + 394 + ], + "score": 1.0, + "content": "-returns, we use a slowly updated copy of the critic network as common in the literature (Mnih", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 393, + 505, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 393, + 505, + 405 + ], + "score": 1.0, + "content": "et al., 2015; Lillicrap et al., 2015). The actor and critic gradients do not affect the world model, as", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 403, + 505, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 505, + 416 + ], + "score": 1.0, + "content": "this would lead to incorrect and overly optimistic model predictions. The hyperparameters are listed", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 416, + 171, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 171, + 428 + ], + "score": 1.0, + "content": "in Appendix D.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 24, + "bbox_fs": [ + 105, + 369, + 505, + 428 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 447, + 191, + 461 + ], + "lines": [ + { + "bbox": [ + 103, + 445, + 193, + 465 + ], + "spans": [ + { + "bbox": [ + 103, + 445, + 193, + 465 + ], + "score": 1.0, + "content": "3 Experiments", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "text", + "bbox": [ + 106, + 473, + 506, + 555 + ], + "lines": [ + { + "bbox": [ + 105, + 473, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 473, + 506, + 486 + ], + "score": 1.0, + "content": "We evaluate Dreamer on 4 robots, each with a different task, and compare its performance to", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 485, + 505, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 485, + 505, + 498 + ], + "score": 1.0, + "content": "appropriate algorithmic and human baselines. The experiments are representative of common robotic", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 495, + 506, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 506, + 511 + ], + "score": 1.0, + "content": "tasks, such as locomotion, manipulation, and navigation. The tasks pose a diverse range of challenges,", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 508, + 505, + 522 + ], + "spans": [ + { + "bbox": [ + 105, + 508, + 505, + 522 + ], + "score": 1.0, + "content": "including continuous and discrete actions, dense and sparse rewards, proprioceptive and image", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 519, + 506, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 519, + 506, + 533 + ], + "score": 1.0, + "content": "observations, and sensor fusion. The goal of the experiments is to evaluate whether the recent", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 533, + 506, + 544 + ], + "spans": [ + { + "bbox": [ + 106, + 533, + 506, + 544 + ], + "score": 1.0, + "content": "successes of learned world models enables sample-efficient robot learning directly in the real world.", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 544, + 365, + 557 + ], + "spans": [ + { + "bbox": [ + 106, + 544, + 365, + 557 + ], + "score": 1.0, + "content": "Specifically, we aim to answer the following research questions:", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 31, + "bbox_fs": [ + 105, + 473, + 506, + 557 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 564, + 506, + 608 + ], + "lines": [ + { + "bbox": [ + 104, + 564, + 449, + 577 + ], + "spans": [ + { + "bbox": [ + 104, + 564, + 449, + 577 + ], + "score": 1.0, + "content": "β€’ Does Dreamer enable robot learning directly in the real world, without simulators?", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 579, + 493, + 593 + ], + "spans": [ + { + "bbox": [ + 105, + 579, + 493, + 593 + ], + "score": 1.0, + "content": "β€’ Does Dreamer succeed across various robot platforms, sensory modalities, and action spaces?", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 595, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 595, + 506, + 609 + ], + "score": 1.0, + "content": "β€’ How does the data-efficiency of Dreamer compare to previous reinforcement learning algorithms?", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 36, + "bbox_fs": [ + 104, + 564, + 506, + 609 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 617, + 506, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 617, + 505, + 630 + ], + "spans": [ + { + "bbox": [ + 106, + 617, + 505, + 630 + ], + "score": 1.0, + "content": "Implementation We build on the official implementation of DreamerV2 (Hafner et al., 2020). We", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 629, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 106, + 629, + 506, + 641 + ], + "score": 1.0, + "content": "develop an asynchronous actor and learner setup, which is essential in environments with high control", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 641, + 505, + 653 + ], + "spans": [ + { + "bbox": [ + 106, + 641, + 505, + 653 + ], + "score": 1.0, + "content": "rates, such as the quadruped, and also accelerates learning for slower environments, such as the robot", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 651, + 506, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 651, + 506, + 666 + ], + "score": 1.0, + "content": "arms. The actor thread computes online actions for the robot and sends trajectories of 128 time steps", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 664, + 506, + 676 + ], + "spans": [ + { + "bbox": [ + 106, + 664, + 506, + 676 + ], + "score": 1.0, + "content": "to the replay buffer. The learner thread samples data from the replay buffer, updates the world model,", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 106, + 675, + 505, + 688 + ], + "spans": [ + { + "bbox": [ + 106, + 675, + 505, + 688 + ], + "score": 1.0, + "content": "and optimizes the policy using imagination rollouts. Policy weights are synced from the learner to", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "the actor every 20 seconds. We use an RSSM with 256 units to speed up the training computation.", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 698, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 713 + ], + "score": 1.0, + "content": "We use identical hyperparameters across all experiments, enabling off-the-shelf training on different", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 710, + 189, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 189, + 722 + ], + "score": 1.0, + "content": "robot embodiments.", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 42, + "bbox_fs": [ + 105, + 617, + 506, + 722 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 57, + 501, + 143 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 106, + 57, + 501, + 143 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 57, + 501, + 143 + ], + "spans": [ + { + "bbox": [ + 106, + 57, + 501, + 143 + ], + "score": 0.964, + "type": "image", + "image_path": "159d86a4fe017221206965fa98efc6ce35e16bebec7536f231b04a5fa470830b.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 106, + 57, + 501, + 85.66666666666667 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 106, + 85.66666666666667, + 501, + 114.33333333333334 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 106, + 114.33333333333334, + 501, + 143.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 156, + 505, + 258 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 157, + 506, + 169 + ], + "spans": [ + { + "bbox": [ + 106, + 157, + 506, + 169 + ], + "score": 1.0, + "content": "Figure 4: A1 Quadruped Walking Starting from lying on its back with the feet in the air, Dreamer", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 168, + 506, + 180 + ], + "spans": [ + { + "bbox": [ + 106, + 168, + 506, + 180 + ], + "score": 1.0, + "content": "learns to roll over, stand up, and walk in 1 hour of real world training time, without simulators or", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 180, + 505, + 191 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 505, + 191 + ], + "score": 1.0, + "content": "resets. In contrast, SAC only learns to roll over but neither to stand up nor to walk. For SAC, we also", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 189, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 189, + 506, + 204 + ], + "score": 1.0, + "content": "had to help the robot out of a dead-locked leg configuration during training. On the right we show", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 202, + 505, + 214 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 505, + 214 + ], + "score": 1.0, + "content": "training curves for both SAC and Dreamer. The maximum reward is 14. The filled circles indicate", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 213, + 506, + 226 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 506, + 226 + ], + "score": 1.0, + "content": "times where the robot fell on its back, requiring the learning of a robust strategy for getting back", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 224, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 105, + 224, + 506, + 237 + ], + "score": 1.0, + "content": "up. After 1 hour of training, we start pushing the robot and find that it adapts its behavior within 10", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 236, + 505, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 505, + 248 + ], + "score": 1.0, + "content": "minutes to withstand light pushes and quickly roll back on its feet for hard pushes. The graph shows", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 247, + 494, + 259 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 494, + 259 + ], + "score": 1.0, + "content": "a single training run with the shaded area indicating one standard deviation within each time bin.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 7 + } + ], + "index": 4.0 + }, + { + "type": "text", + "bbox": [ + 106, + 268, + 506, + 432 + ], + "lines": [ + { + "bbox": [ + 105, + 268, + 505, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 505, + 281 + ], + "score": 1.0, + "content": "Baselines We compare to a strong learning algorithm for each of our experimental setups. The A1", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 281, + 505, + 292 + ], + "spans": [ + { + "bbox": [ + 105, + 281, + 505, + 292 + ], + "score": 1.0, + "content": "quadruped robot uses continuous actions and low-dimensional inputs, allowing us to compare to SAC", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 293, + 505, + 304 + ], + "spans": [ + { + "bbox": [ + 106, + 293, + 505, + 304 + ], + "score": 1.0, + "content": "(Haarnoja et al., 2018a;b), a popular algorithm for data-efficient continuous control. For the visual", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 303, + 506, + 317 + ], + "spans": [ + { + "bbox": [ + 105, + 303, + 506, + 317 + ], + "score": 1.0, + "content": "pick and place experiments on the XArm and UR5 robots, inputs are images and proprioceptive", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 315, + 506, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 506, + 327 + ], + "score": 1.0, + "content": "readings and actions are discrete, suggesting algorithms from the DQN (Mnih et al., 2015) line of", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 327, + 505, + 339 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 505, + 339 + ], + "score": 1.0, + "content": "work as baselines. We choose Rainbow (Hessel et al., 2018) as a powerful representative of this", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 339, + 505, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 505, + 351 + ], + "score": 1.0, + "content": "category, an algorithm that combines many improvements of DQN. To input the proprioceptive", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 351, + 505, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 505, + 363 + ], + "score": 1.0, + "content": "readings, we concatenate them as broadcasted planes to the RGB channels of the image, a common", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 362, + 506, + 375 + ], + "spans": [ + { + "bbox": [ + 106, + 362, + 506, + 375 + ], + "score": 1.0, + "content": "practice in the literature (Schrittwieser et al., 2019). For the UR5, we additionally compare against", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 372, + 506, + 387 + ], + "spans": [ + { + "bbox": [ + 105, + 372, + 506, + 387 + ], + "score": 1.0, + "content": "PPO (Schulman et al., 2017), with similar modifications for fusing image and proprioceptive readings.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 385, + 506, + 398 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 506, + 398 + ], + "score": 1.0, + "content": "In addition, we compare against a human operator controlling the robot arm through the robot control", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 397, + 506, + 410 + ], + "spans": [ + { + "bbox": [ + 106, + 397, + 506, + 410 + ], + "score": 1.0, + "content": "interface. For the Sphero navigation task, inputs are images and actions are continuous. The state-of-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 408, + 506, + 421 + ], + "spans": [ + { + "bbox": [ + 106, + 408, + 506, + 421 + ], + "score": 1.0, + "content": "the-art baseline in this category is DrQv2 (Yarats et al., 2021), which uses image augmentation to", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 420, + 216, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 216, + 434 + ], + "score": 1.0, + "content": "increase sample-efficiency.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 18.5 + }, + { + "type": "title", + "bbox": [ + 107, + 443, + 234, + 455 + ], + "lines": [ + { + "bbox": [ + 105, + 441, + 235, + 458 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 235, + 458 + ], + "score": 1.0, + "content": "3.1 A1 Quadruped Walking", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 107, + 459, + 336, + 609 + ], + "lines": [ + { + "bbox": [ + 106, + 458, + 337, + 470 + ], + "spans": [ + { + "bbox": [ + 106, + 458, + 337, + 470 + ], + "score": 1.0, + "content": "This high-dimensional continuous control task requires", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 471, + 336, + 481 + ], + "spans": [ + { + "bbox": [ + 106, + 471, + 336, + 481 + ], + "score": 1.0, + "content": "training a quadruped robot to roll over from its back, stand", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 482, + 337, + 493 + ], + "spans": [ + { + "bbox": [ + 106, + 482, + 337, + 493 + ], + "score": 1.0, + "content": "up, and walk forward at a fixed target velocity. Prior work", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 493, + 337, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 337, + 506 + ], + "score": 1.0, + "content": "in quadruped locomotion requires either extensive training", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 504, + 337, + 518 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 337, + 518 + ], + "score": 1.0, + "content": "in simulation under domain randomization, using recovery", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 516, + 336, + 528 + ], + "spans": [ + { + "bbox": [ + 106, + 516, + 336, + 528 + ], + "score": 1.0, + "content": "controllers to avoid unsafe states, or defining the action", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 529, + 337, + 540 + ], + "spans": [ + { + "bbox": [ + 106, + 529, + 337, + 540 + ], + "score": 1.0, + "content": "space as parameterized trajectory generators that restrict", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 540, + 337, + 552 + ], + "spans": [ + { + "bbox": [ + 106, + 540, + 337, + 552 + ], + "score": 1.0, + "content": "the space of motions (Rusu et al., 2016; Peng et al., 2018;", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 551, + 336, + 563 + ], + "spans": [ + { + "bbox": [ + 106, + 551, + 336, + 563 + ], + "score": 1.0, + "content": "Rudin et al., 2021; Lee et al., 2020; Yang et al., 2019). In", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 563, + 337, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 337, + 576 + ], + "score": 1.0, + "content": "contrast, we train in the end-to-end reinforcement learning", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 576, + 338, + 587 + ], + "spans": [ + { + "bbox": [ + 106, + 576, + 338, + 587 + ], + "score": 1.0, + "content": "setting directly on the robot, without simulators or resets.", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 587, + 336, + 598 + ], + "spans": [ + { + "bbox": [ + 106, + 587, + 336, + 598 + ], + "score": 1.0, + "content": "We use the Unitree A1 robot that consists of 12 direct drive", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 599, + 336, + 610 + ], + "spans": [ + { + "bbox": [ + 106, + 599, + 251, + 610 + ], + "score": 1.0, + "content": "motors. The motors are controlled at", + "type": "text" + }, + { + "bbox": [ + 251, + 599, + 276, + 609 + ], + "score": 0.7, + "content": "2 0 \\mathrm { H z }", + "type": "inline_equation" + }, + { + "bbox": [ + 277, + 599, + 336, + 610 + ], + "score": 1.0, + "content": "via continuous", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 344, + 460, + 504, + 551 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 344, + 460, + 504, + 551 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 344, + 460, + 504, + 551 + ], + "spans": [ + { + "bbox": [ + 344, + 460, + 504, + 551 + ], + "score": 0.971, + "type": "image", + "image_path": "69e7a0dc11e3a7ecd812dec526777f2a39e7aed63605587ef789903e7f57fb8c.jpg" + } + ] + } + ], + "index": 43, + "virtual_lines": [ + { + "bbox": [ + 344, + 460, + 504, + 473.0 + ], + "spans": [], + "index": 40 + }, + { + "bbox": [ + 344, + 473.0, + 504, + 486.0 + ], + "spans": [], + "index": 41 + }, + { + "bbox": [ + 344, + 486.0, + 504, + 499.0 + ], + "spans": [], + "index": 42 + }, + { + "bbox": [ + 344, + 499.0, + 504, + 512.0 + ], + "spans": [], + "index": 43 + }, + { + "bbox": [ + 344, + 512.0, + 504, + 525.0 + ], + "spans": [], + "index": 44 + }, + { + "bbox": [ + 344, + 525.0, + 504, + 538.0 + ], + "spans": [], + "index": 45 + }, + { + "bbox": [ + 344, + 538.0, + 504, + 551.0 + ], + "spans": [], + "index": 46 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 344, + 556, + 505, + 602 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 343, + 556, + 506, + 568 + ], + "spans": [ + { + "bbox": [ + 343, + 556, + 506, + 568 + ], + "score": 1.0, + "content": "Figure 8: Within 10 minutes of perturb-", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 343, + 568, + 505, + 580 + ], + "spans": [ + { + "bbox": [ + 343, + 568, + 505, + 580 + ], + "score": 1.0, + "content": "ing the learned walking behavior, the", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 343, + 579, + 505, + 591 + ], + "spans": [ + { + "bbox": [ + 343, + 579, + 505, + 591 + ], + "score": 1.0, + "content": "robot adapts to withstanding pushes or", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 343, + 590, + 505, + 602 + ], + "spans": [ + { + "bbox": [ + 343, + 590, + 505, + 602 + ], + "score": 1.0, + "content": "quickly rolling over and back on its feet.", + "type": "text" + } + ], + "index": 50 + } + ], + "index": 48.5 + } + ], + "index": 45.75 + }, + { + "type": "text", + "bbox": [ + 107, + 610, + 505, + 668 + ], + "lines": [ + { + "bbox": [ + 106, + 610, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 106, + 610, + 505, + 622 + ], + "score": 1.0, + "content": "actions that represent motor angles that are realized by a PD controller on the hardware. Actions", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 621, + 506, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 621, + 506, + 635 + ], + "score": 1.0, + "content": "are filtered with a Butterworth filter to protect the motor from high-frequency actions. The input", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 106, + 634, + 504, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 634, + 504, + 646 + ], + "score": 1.0, + "content": "consists of motor angles, orientations, and angular velocities. Due to space constraints, we manually", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 105, + 644, + 505, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 644, + 505, + 658 + ], + "score": 1.0, + "content": "intervene when the robot has reached the end of the available training area, without modifying the", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 105, + 657, + 317, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 317, + 669 + ], + "score": 1.0, + "content": "joint configuration or orientation that the robot is in.", + "type": "text" + } + ], + "index": 55 + } + ], + "index": 53 + }, + { + "type": "text", + "bbox": [ + 107, + 676, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 105, + 675, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 505, + 689 + ], + "score": 1.0, + "content": "The reward function is the sum of five terms. An upright reward is computed from the base frame", + "type": "text" + } + ], + "index": 56 + }, + { + "bbox": [ + 105, + 686, + 507, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 146, + 700 + ], + "score": 1.0, + "content": "up vector", + "type": "text" + }, + { + "bbox": [ + 147, + 687, + 159, + 698 + ], + "score": 0.87, + "content": "\\hat { z } ^ { T }", + "type": "inline_equation" + }, + { + "bbox": [ + 159, + 686, + 507, + 700 + ], + "score": 1.0, + "content": ", terms for matching the standing pose are computed from the joint angles of the hips,", + "type": "text" + } + ], + "index": 57 + }, + { + "bbox": [ + 105, + 699, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 505, + 712 + ], + "score": 1.0, + "content": "shoulders, and knees, and a forward velocity term is computed from the projected forward velocity", + "type": "text" + } + ], + "index": 58 + }, + { + "bbox": [ + 107, + 708, + 505, + 725 + ], + "spans": [ + { + "bbox": [ + 107, + 710, + 122, + 721 + ], + "score": 0.89, + "content": "\\boldsymbol { s } _ { v } \\boldsymbol { x }", + "type": "inline_equation" + }, + { + "bbox": [ + 122, + 708, + 213, + 725 + ], + "score": 1.0, + "content": "and the total velocity", + "type": "text" + }, + { + "bbox": [ + 213, + 710, + 224, + 721 + ], + "score": 0.87, + "content": "s _ { v }", + "type": "inline_equation" + }, + { + "bbox": [ + 224, + 708, + 505, + 725 + ], + "score": 1.0, + "content": ". Without the reward curriculum, the agent receives spurious reward", + "type": "text" + } + ], + "index": 59 + } + ], + "index": 57.5 + } + ], + "page_idx": 4, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 308, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 740, + 309, + 753 + ], + "spans": [ + { + "bbox": [ + 302, + 740, + 309, + 753 + ], + "score": 1.0, + "content": "5", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 57, + 501, + 143 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 106, + 57, + 501, + 143 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 57, + 501, + 143 + ], + "spans": [ + { + "bbox": [ + 106, + 57, + 501, + 143 + ], + "score": 0.964, + "type": "image", + "image_path": "159d86a4fe017221206965fa98efc6ce35e16bebec7536f231b04a5fa470830b.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 106, + 57, + 501, + 85.66666666666667 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 106, + 85.66666666666667, + 501, + 114.33333333333334 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 106, + 114.33333333333334, + 501, + 143.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 156, + 505, + 258 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 157, + 506, + 169 + ], + "spans": [ + { + "bbox": [ + 106, + 157, + 506, + 169 + ], + "score": 1.0, + "content": "Figure 4: A1 Quadruped Walking Starting from lying on its back with the feet in the air, Dreamer", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 168, + 506, + 180 + ], + "spans": [ + { + "bbox": [ + 106, + 168, + 506, + 180 + ], + "score": 1.0, + "content": "learns to roll over, stand up, and walk in 1 hour of real world training time, without simulators or", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 180, + 505, + 191 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 505, + 191 + ], + "score": 1.0, + "content": "resets. In contrast, SAC only learns to roll over but neither to stand up nor to walk. For SAC, we also", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 189, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 189, + 506, + 204 + ], + "score": 1.0, + "content": "had to help the robot out of a dead-locked leg configuration during training. On the right we show", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 202, + 505, + 214 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 505, + 214 + ], + "score": 1.0, + "content": "training curves for both SAC and Dreamer. The maximum reward is 14. The filled circles indicate", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 213, + 506, + 226 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 506, + 226 + ], + "score": 1.0, + "content": "times where the robot fell on its back, requiring the learning of a robust strategy for getting back", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 224, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 105, + 224, + 506, + 237 + ], + "score": 1.0, + "content": "up. After 1 hour of training, we start pushing the robot and find that it adapts its behavior within 10", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 236, + 505, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 505, + 248 + ], + "score": 1.0, + "content": "minutes to withstand light pushes and quickly roll back on its feet for hard pushes. The graph shows", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 247, + 494, + 259 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 494, + 259 + ], + "score": 1.0, + "content": "a single training run with the shaded area indicating one standard deviation within each time bin.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 7 + } + ], + "index": 4.0 + }, + { + "type": "text", + "bbox": [ + 106, + 268, + 506, + 432 + ], + "lines": [ + { + "bbox": [ + 105, + 268, + 505, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 505, + 281 + ], + "score": 1.0, + "content": "Baselines We compare to a strong learning algorithm for each of our experimental setups. The A1", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 281, + 505, + 292 + ], + "spans": [ + { + "bbox": [ + 105, + 281, + 505, + 292 + ], + "score": 1.0, + "content": "quadruped robot uses continuous actions and low-dimensional inputs, allowing us to compare to SAC", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 293, + 505, + 304 + ], + "spans": [ + { + "bbox": [ + 106, + 293, + 505, + 304 + ], + "score": 1.0, + "content": "(Haarnoja et al., 2018a;b), a popular algorithm for data-efficient continuous control. For the visual", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 303, + 506, + 317 + ], + "spans": [ + { + "bbox": [ + 105, + 303, + 506, + 317 + ], + "score": 1.0, + "content": "pick and place experiments on the XArm and UR5 robots, inputs are images and proprioceptive", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 315, + 506, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 506, + 327 + ], + "score": 1.0, + "content": "readings and actions are discrete, suggesting algorithms from the DQN (Mnih et al., 2015) line of", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 327, + 505, + 339 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 505, + 339 + ], + "score": 1.0, + "content": "work as baselines. We choose Rainbow (Hessel et al., 2018) as a powerful representative of this", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 339, + 505, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 505, + 351 + ], + "score": 1.0, + "content": "category, an algorithm that combines many improvements of DQN. To input the proprioceptive", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 351, + 505, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 505, + 363 + ], + "score": 1.0, + "content": "readings, we concatenate them as broadcasted planes to the RGB channels of the image, a common", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 362, + 506, + 375 + ], + "spans": [ + { + "bbox": [ + 106, + 362, + 506, + 375 + ], + "score": 1.0, + "content": "practice in the literature (Schrittwieser et al., 2019). For the UR5, we additionally compare against", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 372, + 506, + 387 + ], + "spans": [ + { + "bbox": [ + 105, + 372, + 506, + 387 + ], + "score": 1.0, + "content": "PPO (Schulman et al., 2017), with similar modifications for fusing image and proprioceptive readings.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 385, + 506, + 398 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 506, + 398 + ], + "score": 1.0, + "content": "In addition, we compare against a human operator controlling the robot arm through the robot control", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 397, + 506, + 410 + ], + "spans": [ + { + "bbox": [ + 106, + 397, + 506, + 410 + ], + "score": 1.0, + "content": "interface. For the Sphero navigation task, inputs are images and actions are continuous. The state-of-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 408, + 506, + 421 + ], + "spans": [ + { + "bbox": [ + 106, + 408, + 506, + 421 + ], + "score": 1.0, + "content": "the-art baseline in this category is DrQv2 (Yarats et al., 2021), which uses image augmentation to", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 420, + 216, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 216, + 434 + ], + "score": 1.0, + "content": "increase sample-efficiency.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 18.5, + "bbox_fs": [ + 105, + 268, + 506, + 434 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 443, + 234, + 455 + ], + "lines": [ + { + "bbox": [ + 105, + 441, + 235, + 458 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 235, + 458 + ], + "score": 1.0, + "content": "3.1 A1 Quadruped Walking", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 107, + 459, + 336, + 609 + ], + "lines": [ + { + "bbox": [ + 106, + 458, + 337, + 470 + ], + "spans": [ + { + "bbox": [ + 106, + 458, + 337, + 470 + ], + "score": 1.0, + "content": "This high-dimensional continuous control task requires", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 471, + 336, + 481 + ], + "spans": [ + { + "bbox": [ + 106, + 471, + 336, + 481 + ], + "score": 1.0, + "content": "training a quadruped robot to roll over from its back, stand", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 482, + 337, + 493 + ], + "spans": [ + { + "bbox": [ + 106, + 482, + 337, + 493 + ], + "score": 1.0, + "content": "up, and walk forward at a fixed target velocity. Prior work", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 493, + 337, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 337, + 506 + ], + "score": 1.0, + "content": "in quadruped locomotion requires either extensive training", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 504, + 337, + 518 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 337, + 518 + ], + "score": 1.0, + "content": "in simulation under domain randomization, using recovery", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 516, + 336, + 528 + ], + "spans": [ + { + "bbox": [ + 106, + 516, + 336, + 528 + ], + "score": 1.0, + "content": "controllers to avoid unsafe states, or defining the action", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 529, + 337, + 540 + ], + "spans": [ + { + "bbox": [ + 106, + 529, + 337, + 540 + ], + "score": 1.0, + "content": "space as parameterized trajectory generators that restrict", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 540, + 337, + 552 + ], + "spans": [ + { + "bbox": [ + 106, + 540, + 337, + 552 + ], + "score": 1.0, + "content": "the space of motions (Rusu et al., 2016; Peng et al., 2018;", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 551, + 336, + 563 + ], + "spans": [ + { + "bbox": [ + 106, + 551, + 336, + 563 + ], + "score": 1.0, + "content": "Rudin et al., 2021; Lee et al., 2020; Yang et al., 2019). In", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 563, + 337, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 337, + 576 + ], + "score": 1.0, + "content": "contrast, we train in the end-to-end reinforcement learning", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 576, + 338, + 587 + ], + "spans": [ + { + "bbox": [ + 106, + 576, + 338, + 587 + ], + "score": 1.0, + "content": "setting directly on the robot, without simulators or resets.", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 587, + 336, + 598 + ], + "spans": [ + { + "bbox": [ + 106, + 587, + 336, + 598 + ], + "score": 1.0, + "content": "We use the Unitree A1 robot that consists of 12 direct drive", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 599, + 336, + 610 + ], + "spans": [ + { + "bbox": [ + 106, + 599, + 251, + 610 + ], + "score": 1.0, + "content": "motors. The motors are controlled at", + "type": "text" + }, + { + "bbox": [ + 251, + 599, + 276, + 609 + ], + "score": 0.7, + "content": "2 0 \\mathrm { H z }", + "type": "inline_equation" + }, + { + "bbox": [ + 277, + 599, + 336, + 610 + ], + "score": 1.0, + "content": "via continuous", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 610, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 106, + 610, + 505, + 622 + ], + "score": 1.0, + "content": "actions that represent motor angles that are realized by a PD controller on the hardware. Actions", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 621, + 506, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 621, + 506, + 635 + ], + "score": 1.0, + "content": "are filtered with a Butterworth filter to protect the motor from high-frequency actions. The input", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 106, + 634, + 504, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 634, + 504, + 646 + ], + "score": 1.0, + "content": "consists of motor angles, orientations, and angular velocities. Due to space constraints, we manually", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 105, + 644, + 505, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 644, + 505, + 658 + ], + "score": 1.0, + "content": "intervene when the robot has reached the end of the available training area, without modifying the", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 105, + 657, + 317, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 317, + 669 + ], + "score": 1.0, + "content": "joint configuration or orientation that the robot is in.", + "type": "text" + } + ], + "index": 55 + } + ], + "index": 33, + "bbox_fs": [ + 105, + 458, + 338, + 610 + ] + }, + { + "type": "image", + "bbox": [ + 344, + 460, + 504, + 551 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 344, + 460, + 504, + 551 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 344, + 460, + 504, + 551 + ], + "spans": [ + { + "bbox": [ + 344, + 460, + 504, + 551 + ], + "score": 0.971, + "type": "image", + "image_path": "69e7a0dc11e3a7ecd812dec526777f2a39e7aed63605587ef789903e7f57fb8c.jpg" + } + ] + } + ], + "index": 43, + "virtual_lines": [ + { + "bbox": [ + 344, + 460, + 504, + 473.0 + ], + "spans": [], + "index": 40 + }, + { + "bbox": [ + 344, + 473.0, + 504, + 486.0 + ], + "spans": [], + "index": 41 + }, + { + "bbox": [ + 344, + 486.0, + 504, + 499.0 + ], + "spans": [], + "index": 42 + }, + { + "bbox": [ + 344, + 499.0, + 504, + 512.0 + ], + "spans": [], + "index": 43 + }, + { + "bbox": [ + 344, + 512.0, + 504, + 525.0 + ], + "spans": [], + "index": 44 + }, + { + "bbox": [ + 344, + 525.0, + 504, + 538.0 + ], + "spans": [], + "index": 45 + }, + { + "bbox": [ + 344, + 538.0, + 504, + 551.0 + ], + "spans": [], + "index": 46 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 344, + 556, + 505, + 602 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 343, + 556, + 506, + 568 + ], + "spans": [ + { + "bbox": [ + 343, + 556, + 506, + 568 + ], + "score": 1.0, + "content": "Figure 8: Within 10 minutes of perturb-", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 343, + 568, + 505, + 580 + ], + "spans": [ + { + "bbox": [ + 343, + 568, + 505, + 580 + ], + "score": 1.0, + "content": "ing the learned walking behavior, the", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 343, + 579, + 505, + 591 + ], + "spans": [ + { + "bbox": [ + 343, + 579, + 505, + 591 + ], + "score": 1.0, + "content": "robot adapts to withstanding pushes or", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 343, + 590, + 505, + 602 + ], + "spans": [ + { + "bbox": [ + 343, + 590, + 505, + 602 + ], + "score": 1.0, + "content": "quickly rolling over and back on its feet.", + "type": "text" + } + ], + "index": 50 + } + ], + "index": 48.5 + } + ], + "index": 45.75 + }, + { + "type": "text", + "bbox": [ + 107, + 610, + 505, + 668 + ], + "lines": [], + "index": 53, + "bbox_fs": [ + 105, + 610, + 506, + 669 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 107, + 676, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 105, + 675, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 505, + 689 + ], + "score": 1.0, + "content": "The reward function is the sum of five terms. An upright reward is computed from the base frame", + "type": "text" + } + ], + "index": 56 + }, + { + "bbox": [ + 105, + 686, + 507, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 146, + 700 + ], + "score": 1.0, + "content": "up vector", + "type": "text" + }, + { + "bbox": [ + 147, + 687, + 159, + 698 + ], + "score": 0.87, + "content": "\\hat { z } ^ { T }", + "type": "inline_equation" + }, + { + "bbox": [ + 159, + 686, + 507, + 700 + ], + "score": 1.0, + "content": ", terms for matching the standing pose are computed from the joint angles of the hips,", + "type": "text" + } + ], + "index": 57 + }, + { + "bbox": [ + 105, + 699, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 505, + 712 + ], + "score": 1.0, + "content": "shoulders, and knees, and a forward velocity term is computed from the projected forward velocity", + "type": "text" + } + ], + "index": 58 + }, + { + "bbox": [ + 107, + 708, + 505, + 725 + ], + "spans": [ + { + "bbox": [ + 107, + 710, + 122, + 721 + ], + "score": 0.89, + "content": "\\boldsymbol { s } _ { v } \\boldsymbol { x }", + "type": "inline_equation" + }, + { + "bbox": [ + 122, + 708, + 213, + 725 + ], + "score": 1.0, + "content": "and the total velocity", + "type": "text" + }, + { + "bbox": [ + 213, + 710, + 224, + 721 + ], + "score": 0.87, + "content": "s _ { v }", + "type": "inline_equation" + }, + { + "bbox": [ + 224, + 708, + 505, + 725 + ], + "score": 1.0, + "content": ". Without the reward curriculum, the agent receives spurious reward", + "type": "text" + } + ], + "index": 59 + }, + { + "bbox": [ + 106, + 245, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 505, + 258 + ], + "score": 1.0, + "content": "values due to the velocity estimator’s dependence on foot-ground contact events. Each of the five", + "type": "text", + "cross_page": true + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 257, + 466, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 257, + 466, + 270 + ], + "score": 1.0, + "content": "terms is active while its preceding terms are satisfied to at least 0.7 and otherwise set to 0:", + "type": "text", + "cross_page": true + } + ], + "index": 11 + } + ], + "index": 57.5, + "bbox_fs": [ + 105, + 675, + 507, + 725 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 58, + 499, + 142 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 106, + 58, + 499, + 142 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 58, + 499, + 142 + ], + "spans": [ + { + "bbox": [ + 106, + 58, + 499, + 142 + ], + "score": 0.958, + "type": "image", + "image_path": "69863294723843746383e47fe99dcd32744e499d1c526f51df4121c52ff99fe8.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 106, + 58, + 499, + 86.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 106, + 86.0, + 499, + 114.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 106, + 114.0, + 499, + 142.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 156, + 506, + 236 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 157, + 505, + 169 + ], + "spans": [ + { + "bbox": [ + 105, + 157, + 505, + 169 + ], + "score": 1.0, + "content": "Figure 5: UR5 Multi Object Visual Pick and Place This task requires learning to locate three", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 167, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 105, + 167, + 506, + 182 + ], + "score": 1.0, + "content": "ball objects from third-person camera images, grasp them, and move them into the other bin. The", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 179, + 506, + 192 + ], + "spans": [ + { + "bbox": [ + 105, + 179, + 506, + 192 + ], + "score": 1.0, + "content": "arm is free to move within and above the bins and sparse rewards are given for grasping a ball and", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 191, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 106, + 191, + 506, + 203 + ], + "score": 1.0, + "content": "for dropping it in the opposite bin. The environment requires the world model to learn multi-object", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 202, + 506, + 215 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 506, + 215 + ], + "score": 1.0, + "content": "dynamics in the real world and the sparse reward structure poses a challenge for policy optimization.", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 213, + 506, + 226 + ], + "spans": [ + { + "bbox": [ + 106, + 213, + 506, + 226 + ], + "score": 1.0, + "content": "Dreamer overcomes the challenges of visual localization and sparse rewards on this task, learning a", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 225, + 365, + 237 + ], + "spans": [ + { + "bbox": [ + 105, + 225, + 365, + 237 + ], + "score": 1.0, + "content": "successful strategy within a few hours of autonomous operation.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 6 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 107, + 245, + 505, + 269 + ], + "lines": [ + { + "bbox": [ + 106, + 245, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 505, + 258 + ], + "score": 1.0, + "content": "values due to the velocity estimator’s dependence on foot-ground contact events. Each of the five", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 257, + 466, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 257, + 466, + 270 + ], + "score": 1.0, + "content": "terms is active while its preceding terms are satisfied to at least 0.7 and otherwise set to 0:", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 10.5 + }, + { + "type": "interline_equation", + "bbox": [ + 123, + 273, + 477, + 289 + ], + "lines": [ + { + "bbox": [ + 123, + 273, + 477, + 289 + ], + "spans": [ + { + "bbox": [ + 123, + 273, + 477, + 289 + ], + "score": 0.82, + "content": "\\begin{array} { r l } { r ^ { \\mathrm { u p r } } \\doteq ( \\hat { z } ^ { T } [ 0 , 0 , 1 ] - 1 ) / 2 } & { { } r ^ { \\mathrm { h i p } } \\doteq 1 - \\frac 1 4 \\| q ^ { \\mathrm { h i p } } + 0 . 2 \\| _ { 1 } \\quad r ^ { \\mathrm { s h o u l d e r } } \\doteq 1 - \\frac 1 4 \\| q ^ { \\mathrm { s h o u l d e r } } + 0 . 2 \\| _ { 1 } } \\end{array}", + "type": "interline_equation", + "image_path": "d59a106b3ad7e1f46762af4cb6c79589191a386db6dd18da2381b810f0e72627.jpg" + } + ] + } + ], + "index": 12, + "virtual_lines": [ + { + "bbox": [ + 123, + 273, + 477, + 289 + ], + "spans": [], + "index": 12 + } + ] + }, + { + "type": "interline_equation", + "bbox": [ + 122, + 294, + 476, + 310 + ], + "lines": [ + { + "bbox": [ + 122, + 294, + 476, + 310 + ], + "spans": [ + { + "bbox": [ + 122, + 294, + 476, + 310 + ], + "score": 0.85, + "content": "\\begin{array} { r l } { r ^ { \\mathrm { k n e e } } \\doteq 1 - \\frac 1 4 \\parallel q ^ { \\mathrm { k n e e } } - 1 . 0 \\parallel _ { 1 } } & { { } r ^ { \\mathrm { v e l o c i t y } } \\doteq 5 \\big ( \\operatorname* { m a x } ( 0 , ^ { \\mathcal { B } } v _ { x } ) / \\parallel ^ { \\mathcal { B } } v \\parallel _ { 2 } \\cdot \\mathrm { c l i p } ( ^ { \\mathcal { B } } v _ { x } / 0 . 3 , - 1 , 1 ) + 1 \\big ) } \\end{array}", + "type": "interline_equation", + "image_path": "d3f8da6b396f76d54da62528b9e91defa6543c22e48371ec0f3e6d668424058f.jpg" + } + ] + } + ], + "index": 13, + "virtual_lines": [ + { + "bbox": [ + 122, + 294, + 476, + 310 + ], + "spans": [], + "index": 13 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 314, + 506, + 408 + ], + "lines": [ + { + "bbox": [ + 105, + 313, + 506, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 506, + 327 + ], + "score": 1.0, + "content": "As shown in Figure 4, after one hour of training, Dreamer learns to consistently flip the robot over", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 326, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 106, + 326, + 505, + 338 + ], + "score": 1.0, + "content": "from its back, stand up, and walk forward. In the first 5 minutes of training, the robot manages to", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 338, + 505, + 349 + ], + "spans": [ + { + "bbox": [ + 106, + 338, + 505, + 349 + ], + "score": 1.0, + "content": "roll off its back and land on its feet. 20 minutes later, it learns how to stand up on its feet. About", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 349, + 506, + 362 + ], + "spans": [ + { + "bbox": [ + 104, + 349, + 506, + 362 + ], + "score": 1.0, + "content": "1 hour into training, the robot learns a pronking gait to walk forward at the desired velocity. After", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 361, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 506, + 374 + ], + "score": 1.0, + "content": "succeeding at this task, we tested the robustness of the algorithms by repeatedly knocking the robot", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 371, + 506, + 387 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 506, + 387 + ], + "score": 1.0, + "content": "off of its feet with a large pole, shown in Figure 8. Within 10 minutes of additional online learning,", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 384, + 505, + 397 + ], + "spans": [ + { + "bbox": [ + 106, + 384, + 505, + 397 + ], + "score": 1.0, + "content": "the robot adapts and withstand pushes or quickly rolls back on its feet. In comparison, SAC quickly", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 396, + 433, + 409 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 433, + 409 + ], + "score": 1.0, + "content": "learns to roll off its back but fails to stand up or walk given the small data budget.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 17.5 + }, + { + "type": "title", + "bbox": [ + 106, + 416, + 303, + 428 + ], + "lines": [ + { + "bbox": [ + 105, + 415, + 303, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 303, + 430 + ], + "score": 1.0, + "content": "3.2 UR5 Multi-Object Visual Pick and Place", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 22 + }, + { + "type": "text", + "bbox": [ + 106, + 429, + 506, + 568 + ], + "lines": [ + { + "bbox": [ + 106, + 428, + 505, + 441 + ], + "spans": [ + { + "bbox": [ + 106, + 428, + 505, + 441 + ], + "score": 1.0, + "content": "Common in warehouse and logistics environments, pick and place tasks require a robot manipulator", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 440, + 505, + 452 + ], + "spans": [ + { + "bbox": [ + 106, + 440, + 505, + 452 + ], + "score": 1.0, + "content": "to transport items from one bin into another. Figure 5 shows a successful pick and place cycle of", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 451, + 505, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 451, + 505, + 464 + ], + "score": 1.0, + "content": "this task. The task is challenging because of sparse rewards, the need to infer object positions from", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 464, + 506, + 476 + ], + "spans": [ + { + "bbox": [ + 106, + 464, + 506, + 476 + ], + "score": 1.0, + "content": "pixels, and the challenging dynamics of multiple moving objects. The sensory inputs consist of", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 475, + 506, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 506, + 488 + ], + "score": 1.0, + "content": "proprioceptive readings (joint angles, gripper position, end effector Cartesian position) and a 3rd", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 487, + 506, + 501 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 506, + 501 + ], + "score": 1.0, + "content": "person RGB image of the scene. Successfully grasping one of the 3 objects, detected by partial", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 498, + 506, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 498, + 218, + 511 + ], + "score": 1.0, + "content": "gripper closure, results in a", + "type": "text" + }, + { + "bbox": [ + 218, + 499, + 232, + 509 + ], + "score": 0.85, + "content": "+ 1", + "type": "inline_equation" + }, + { + "bbox": [ + 232, + 498, + 440, + 511 + ], + "score": 1.0, + "content": "reward, releasing the object in the same bin gives a", + "type": "text" + }, + { + "bbox": [ + 441, + 499, + 455, + 509 + ], + "score": 0.48, + "content": "- 1", + "type": "inline_equation" + }, + { + "bbox": [ + 455, + 498, + 506, + 511 + ], + "score": 1.0, + "content": "reward, and", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 509, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 509, + 245, + 524 + ], + "score": 1.0, + "content": "placing in the opposite bin gives a", + "type": "text" + }, + { + "bbox": [ + 245, + 510, + 264, + 521 + ], + "score": 0.87, + "content": "+ 1 0", + "type": "inline_equation" + }, + { + "bbox": [ + 265, + 509, + 506, + 524 + ], + "score": 1.0, + "content": "reward. We control the UR5 robot from Universal Robotics", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 521, + 506, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 117, + 534 + ], + "score": 1.0, + "content": "at", + "type": "text" + }, + { + "bbox": [ + 117, + 522, + 138, + 532 + ], + "score": 0.54, + "content": "2 \\ \\mathrm { H z }", + "type": "inline_equation" + }, + { + "bbox": [ + 139, + 521, + 474, + 534 + ], + "score": 1.0, + "content": ". Actions are discrete for moving the end effector in increments along X, Y, and", + "type": "text" + }, + { + "bbox": [ + 475, + 522, + 483, + 532 + ], + "score": 0.31, + "content": "\\textsf { Z }", + "type": "inline_equation" + }, + { + "bbox": [ + 484, + 521, + 506, + 534 + ], + "score": 1.0, + "content": "axes", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 533, + 506, + 546 + ], + "spans": [ + { + "bbox": [ + 106, + 533, + 506, + 546 + ], + "score": 1.0, + "content": "and for toggling the gripper state. Movement in the Z axis is only enabled while holding an object", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 545, + 506, + 558 + ], + "spans": [ + { + "bbox": [ + 106, + 545, + 506, + 558 + ], + "score": 1.0, + "content": "and the gripper automatically opens once above the correct bin. We estimate human teleoperation", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 556, + 507, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 507, + 569 + ], + "score": 1.0, + "content": "performance by recording 3 demonstrators for 20 minutes each, controlling the UR5 with a joystick.", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 28.5 + }, + { + "type": "text", + "bbox": [ + 106, + 572, + 505, + 678 + ], + "lines": [ + { + "bbox": [ + 105, + 572, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 506, + 586 + ], + "score": 1.0, + "content": "Dreamer reaches an average pick rate of 2.5 objects per minute within 8 hours. The robot initially", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 585, + 506, + 598 + ], + "spans": [ + { + "bbox": [ + 106, + 585, + 506, + 598 + ], + "score": 1.0, + "content": "struggles to learn as the reward signal is very sparse, but begins to gradually improve after 2 hours of", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 596, + 505, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 505, + 609 + ], + "score": 1.0, + "content": "training. The robot first learns to localize the objects and toggles the gripper when near an object. Over", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 609, + 505, + 621 + ], + "spans": [ + { + "bbox": [ + 106, + 609, + 505, + 621 + ], + "score": 1.0, + "content": "time, grasping becomes precise and the robot learns to push objects out of corners. Figure 5 shows", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 619, + 506, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 506, + 633 + ], + "score": 1.0, + "content": "the learning curves of Dreamer compared to Rainbow DQN, PPO, and the human baseline. Both", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 104, + 630, + 505, + 646 + ], + "spans": [ + { + "bbox": [ + 104, + 630, + 505, + 646 + ], + "score": 1.0, + "content": "Rainbow DQN and PPO only learn the short-sighted behavior of grasping and immediately dropping", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 643, + 505, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 643, + 505, + 656 + ], + "score": 1.0, + "content": "objects in the same bin. In contrast, Dreamer approaches human-level teleoperation performance", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 654, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 505, + 668 + ], + "score": 1.0, + "content": "after 8 hours. We hypothesize that Rainbow DQN and PPO fail because they require larger amounts", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 106, + 667, + 385, + 679 + ], + "spans": [ + { + "bbox": [ + 106, + 667, + 385, + 679 + ], + "score": 1.0, + "content": "of experience, which is not feasible for us to collect in the real world.", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 39 + }, + { + "type": "title", + "bbox": [ + 107, + 686, + 252, + 698 + ], + "lines": [ + { + "bbox": [ + 105, + 685, + 253, + 699 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 253, + 699 + ], + "score": 1.0, + "content": "3.3 XArm Visual Pick and Place", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 44 + }, + { + "type": "text", + "bbox": [ + 106, + 699, + 503, + 722 + ], + "lines": [ + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "score": 1.0, + "content": "While the UR5 robot is a high performance industrial robot, the XArm is an accessible low-cost", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 342, + 723 + ], + "score": 1.0, + "content": "7 DOF manipulation, which we control at approximately", + "type": "text" + }, + { + "bbox": [ + 342, + 711, + 371, + 721 + ], + "score": 0.6, + "content": "0 . 5 \\ : \\mathrm { H z }", + "type": "inline_equation" + }, + { + "bbox": [ + 371, + 711, + 505, + 723 + ], + "score": 1.0, + "content": ". Similar to Section 3.2, the task", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 45.5 + } + ], + "page_idx": 5, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 742, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 310, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 310, + 752 + ], + "score": 1.0, + "content": "6", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 58, + 499, + 142 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 106, + 58, + 499, + 142 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 58, + 499, + 142 + ], + "spans": [ + { + "bbox": [ + 106, + 58, + 499, + 142 + ], + "score": 0.958, + "type": "image", + "image_path": "69863294723843746383e47fe99dcd32744e499d1c526f51df4121c52ff99fe8.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 106, + 58, + 499, + 86.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 106, + 86.0, + 499, + 114.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 106, + 114.0, + 499, + 142.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 156, + 506, + 236 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 157, + 505, + 169 + ], + "spans": [ + { + "bbox": [ + 105, + 157, + 505, + 169 + ], + "score": 1.0, + "content": "Figure 5: UR5 Multi Object Visual Pick and Place This task requires learning to locate three", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 167, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 105, + 167, + 506, + 182 + ], + "score": 1.0, + "content": "ball objects from third-person camera images, grasp them, and move them into the other bin. The", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 179, + 506, + 192 + ], + "spans": [ + { + "bbox": [ + 105, + 179, + 506, + 192 + ], + "score": 1.0, + "content": "arm is free to move within and above the bins and sparse rewards are given for grasping a ball and", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 191, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 106, + 191, + 506, + 203 + ], + "score": 1.0, + "content": "for dropping it in the opposite bin. The environment requires the world model to learn multi-object", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 202, + 506, + 215 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 506, + 215 + ], + "score": 1.0, + "content": "dynamics in the real world and the sparse reward structure poses a challenge for policy optimization.", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 213, + 506, + 226 + ], + "spans": [ + { + "bbox": [ + 106, + 213, + 506, + 226 + ], + "score": 1.0, + "content": "Dreamer overcomes the challenges of visual localization and sparse rewards on this task, learning a", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 225, + 365, + 237 + ], + "spans": [ + { + "bbox": [ + 105, + 225, + 365, + 237 + ], + "score": 1.0, + "content": "successful strategy within a few hours of autonomous operation.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 6 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 107, + 245, + 505, + 269 + ], + "lines": [], + "index": 10.5, + "bbox_fs": [ + 105, + 245, + 505, + 270 + ], + "lines_deleted": true + }, + { + "type": "interline_equation", + "bbox": [ + 123, + 273, + 477, + 289 + ], + "lines": [ + { + "bbox": [ + 123, + 273, + 477, + 289 + ], + "spans": [ + { + "bbox": [ + 123, + 273, + 477, + 289 + ], + "score": 0.82, + "content": "\\begin{array} { r l } { r ^ { \\mathrm { u p r } } \\doteq ( \\hat { z } ^ { T } [ 0 , 0 , 1 ] - 1 ) / 2 } & { { } r ^ { \\mathrm { h i p } } \\doteq 1 - \\frac 1 4 \\| q ^ { \\mathrm { h i p } } + 0 . 2 \\| _ { 1 } \\quad r ^ { \\mathrm { s h o u l d e r } } \\doteq 1 - \\frac 1 4 \\| q ^ { \\mathrm { s h o u l d e r } } + 0 . 2 \\| _ { 1 } } \\end{array}", + "type": "interline_equation", + "image_path": "d59a106b3ad7e1f46762af4cb6c79589191a386db6dd18da2381b810f0e72627.jpg" + } + ] + } + ], + "index": 12, + "virtual_lines": [ + { + "bbox": [ + 123, + 273, + 477, + 289 + ], + "spans": [], + "index": 12 + } + ] + }, + { + "type": "interline_equation", + "bbox": [ + 122, + 294, + 476, + 310 + ], + "lines": [ + { + "bbox": [ + 122, + 294, + 476, + 310 + ], + "spans": [ + { + "bbox": [ + 122, + 294, + 476, + 310 + ], + "score": 0.85, + "content": "\\begin{array} { r l } { r ^ { \\mathrm { k n e e } } \\doteq 1 - \\frac 1 4 \\parallel q ^ { \\mathrm { k n e e } } - 1 . 0 \\parallel _ { 1 } } & { { } r ^ { \\mathrm { v e l o c i t y } } \\doteq 5 \\big ( \\operatorname* { m a x } ( 0 , ^ { \\mathcal { B } } v _ { x } ) / \\parallel ^ { \\mathcal { B } } v \\parallel _ { 2 } \\cdot \\mathrm { c l i p } ( ^ { \\mathcal { B } } v _ { x } / 0 . 3 , - 1 , 1 ) + 1 \\big ) } \\end{array}", + "type": "interline_equation", + "image_path": "d3f8da6b396f76d54da62528b9e91defa6543c22e48371ec0f3e6d668424058f.jpg" + } + ] + } + ], + "index": 13, + "virtual_lines": [ + { + "bbox": [ + 122, + 294, + 476, + 310 + ], + "spans": [], + "index": 13 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 314, + 506, + 408 + ], + "lines": [ + { + "bbox": [ + 105, + 313, + 506, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 506, + 327 + ], + "score": 1.0, + "content": "As shown in Figure 4, after one hour of training, Dreamer learns to consistently flip the robot over", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 326, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 106, + 326, + 505, + 338 + ], + "score": 1.0, + "content": "from its back, stand up, and walk forward. In the first 5 minutes of training, the robot manages to", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 338, + 505, + 349 + ], + "spans": [ + { + "bbox": [ + 106, + 338, + 505, + 349 + ], + "score": 1.0, + "content": "roll off its back and land on its feet. 20 minutes later, it learns how to stand up on its feet. About", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 349, + 506, + 362 + ], + "spans": [ + { + "bbox": [ + 104, + 349, + 506, + 362 + ], + "score": 1.0, + "content": "1 hour into training, the robot learns a pronking gait to walk forward at the desired velocity. After", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 361, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 506, + 374 + ], + "score": 1.0, + "content": "succeeding at this task, we tested the robustness of the algorithms by repeatedly knocking the robot", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 371, + 506, + 387 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 506, + 387 + ], + "score": 1.0, + "content": "off of its feet with a large pole, shown in Figure 8. Within 10 minutes of additional online learning,", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 384, + 505, + 397 + ], + "spans": [ + { + "bbox": [ + 106, + 384, + 505, + 397 + ], + "score": 1.0, + "content": "the robot adapts and withstand pushes or quickly rolls back on its feet. In comparison, SAC quickly", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 396, + 433, + 409 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 433, + 409 + ], + "score": 1.0, + "content": "learns to roll off its back but fails to stand up or walk given the small data budget.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 17.5, + "bbox_fs": [ + 104, + 313, + 506, + 409 + ] + }, + { + "type": "title", + "bbox": [ + 106, + 416, + 303, + 428 + ], + "lines": [ + { + "bbox": [ + 105, + 415, + 303, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 303, + 430 + ], + "score": 1.0, + "content": "3.2 UR5 Multi-Object Visual Pick and Place", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 22 + }, + { + "type": "text", + "bbox": [ + 106, + 429, + 506, + 568 + ], + "lines": [ + { + "bbox": [ + 106, + 428, + 505, + 441 + ], + "spans": [ + { + "bbox": [ + 106, + 428, + 505, + 441 + ], + "score": 1.0, + "content": "Common in warehouse and logistics environments, pick and place tasks require a robot manipulator", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 440, + 505, + 452 + ], + "spans": [ + { + "bbox": [ + 106, + 440, + 505, + 452 + ], + "score": 1.0, + "content": "to transport items from one bin into another. Figure 5 shows a successful pick and place cycle of", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 451, + 505, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 451, + 505, + 464 + ], + "score": 1.0, + "content": "this task. The task is challenging because of sparse rewards, the need to infer object positions from", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 464, + 506, + 476 + ], + "spans": [ + { + "bbox": [ + 106, + 464, + 506, + 476 + ], + "score": 1.0, + "content": "pixels, and the challenging dynamics of multiple moving objects. The sensory inputs consist of", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 475, + 506, + 488 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 506, + 488 + ], + "score": 1.0, + "content": "proprioceptive readings (joint angles, gripper position, end effector Cartesian position) and a 3rd", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 487, + 506, + 501 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 506, + 501 + ], + "score": 1.0, + "content": "person RGB image of the scene. Successfully grasping one of the 3 objects, detected by partial", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 498, + 506, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 498, + 218, + 511 + ], + "score": 1.0, + "content": "gripper closure, results in a", + "type": "text" + }, + { + "bbox": [ + 218, + 499, + 232, + 509 + ], + "score": 0.85, + "content": "+ 1", + "type": "inline_equation" + }, + { + "bbox": [ + 232, + 498, + 440, + 511 + ], + "score": 1.0, + "content": "reward, releasing the object in the same bin gives a", + "type": "text" + }, + { + "bbox": [ + 441, + 499, + 455, + 509 + ], + "score": 0.48, + "content": "- 1", + "type": "inline_equation" + }, + { + "bbox": [ + 455, + 498, + 506, + 511 + ], + "score": 1.0, + "content": "reward, and", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 509, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 509, + 245, + 524 + ], + "score": 1.0, + "content": "placing in the opposite bin gives a", + "type": "text" + }, + { + "bbox": [ + 245, + 510, + 264, + 521 + ], + "score": 0.87, + "content": "+ 1 0", + "type": "inline_equation" + }, + { + "bbox": [ + 265, + 509, + 506, + 524 + ], + "score": 1.0, + "content": "reward. We control the UR5 robot from Universal Robotics", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 521, + 506, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 117, + 534 + ], + "score": 1.0, + "content": "at", + "type": "text" + }, + { + "bbox": [ + 117, + 522, + 138, + 532 + ], + "score": 0.54, + "content": "2 \\ \\mathrm { H z }", + "type": "inline_equation" + }, + { + "bbox": [ + 139, + 521, + 474, + 534 + ], + "score": 1.0, + "content": ". Actions are discrete for moving the end effector in increments along X, Y, and", + "type": "text" + }, + { + "bbox": [ + 475, + 522, + 483, + 532 + ], + "score": 0.31, + "content": "\\textsf { Z }", + "type": "inline_equation" + }, + { + "bbox": [ + 484, + 521, + 506, + 534 + ], + "score": 1.0, + "content": "axes", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 533, + 506, + 546 + ], + "spans": [ + { + "bbox": [ + 106, + 533, + 506, + 546 + ], + "score": 1.0, + "content": "and for toggling the gripper state. Movement in the Z axis is only enabled while holding an object", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 545, + 506, + 558 + ], + "spans": [ + { + "bbox": [ + 106, + 545, + 506, + 558 + ], + "score": 1.0, + "content": "and the gripper automatically opens once above the correct bin. We estimate human teleoperation", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 556, + 507, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 507, + 569 + ], + "score": 1.0, + "content": "performance by recording 3 demonstrators for 20 minutes each, controlling the UR5 with a joystick.", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 28.5, + "bbox_fs": [ + 105, + 428, + 507, + 569 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 572, + 505, + 678 + ], + "lines": [ + { + "bbox": [ + 105, + 572, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 506, + 586 + ], + "score": 1.0, + "content": "Dreamer reaches an average pick rate of 2.5 objects per minute within 8 hours. The robot initially", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 585, + 506, + 598 + ], + "spans": [ + { + "bbox": [ + 106, + 585, + 506, + 598 + ], + "score": 1.0, + "content": "struggles to learn as the reward signal is very sparse, but begins to gradually improve after 2 hours of", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 596, + 505, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 505, + 609 + ], + "score": 1.0, + "content": "training. The robot first learns to localize the objects and toggles the gripper when near an object. Over", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 609, + 505, + 621 + ], + "spans": [ + { + "bbox": [ + 106, + 609, + 505, + 621 + ], + "score": 1.0, + "content": "time, grasping becomes precise and the robot learns to push objects out of corners. Figure 5 shows", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 619, + 506, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 506, + 633 + ], + "score": 1.0, + "content": "the learning curves of Dreamer compared to Rainbow DQN, PPO, and the human baseline. Both", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 104, + 630, + 505, + 646 + ], + "spans": [ + { + "bbox": [ + 104, + 630, + 505, + 646 + ], + "score": 1.0, + "content": "Rainbow DQN and PPO only learn the short-sighted behavior of grasping and immediately dropping", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 643, + 505, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 643, + 505, + 656 + ], + "score": 1.0, + "content": "objects in the same bin. In contrast, Dreamer approaches human-level teleoperation performance", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 654, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 505, + 668 + ], + "score": 1.0, + "content": "after 8 hours. We hypothesize that Rainbow DQN and PPO fail because they require larger amounts", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 106, + 667, + 385, + 679 + ], + "spans": [ + { + "bbox": [ + 106, + 667, + 385, + 679 + ], + "score": 1.0, + "content": "of experience, which is not feasible for us to collect in the real world.", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 39, + "bbox_fs": [ + 104, + 572, + 506, + 679 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 686, + 252, + 698 + ], + "lines": [ + { + "bbox": [ + 105, + 685, + 253, + 699 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 253, + 699 + ], + "score": 1.0, + "content": "3.3 XArm Visual Pick and Place", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 44 + }, + { + "type": "text", + "bbox": [ + 106, + 699, + 503, + 722 + ], + "lines": [ + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "score": 1.0, + "content": "While the UR5 robot is a high performance industrial robot, the XArm is an accessible low-cost", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 342, + 723 + ], + "score": 1.0, + "content": "7 DOF manipulation, which we control at approximately", + "type": "text" + }, + { + "bbox": [ + 342, + 711, + 371, + 721 + ], + "score": 0.6, + "content": "0 . 5 \\ : \\mathrm { H z }", + "type": "inline_equation" + }, + { + "bbox": [ + 371, + 711, + 505, + 723 + ], + "score": 1.0, + "content": ". Similar to Section 3.2, the task", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 252, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 506, + 266 + ], + "score": 1.0, + "content": "requires localizing and grasping a soft object and moving it from one bin to another and back, shown", + "type": "text", + "cross_page": true + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 263, + 505, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 505, + 277 + ], + "score": 1.0, + "content": "in Figure 6. We connect the object to the gripper with a string, which makes it less likely for the", + "type": "text", + "cross_page": true + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 275, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 106, + 275, + 505, + 289 + ], + "score": 1.0, + "content": "object to get stuck in corners at the cost of more complex dynamics. The sparse reward, discrete", + "type": "text", + "cross_page": true + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 286, + 505, + 302 + ], + "spans": [ + { + "bbox": [ + 105, + 286, + 505, + 302 + ], + "score": 1.0, + "content": "action space, and observation space match the UR5 setup except for the addition of depth image", + "type": "text", + "cross_page": true + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 300, + 162, + 311 + ], + "spans": [ + { + "bbox": [ + 106, + 300, + 162, + 311 + ], + "score": 1.0, + "content": "observations.", + "type": "text", + "cross_page": true + } + ], + "index": 15 + } + ], + "index": 45.5, + "bbox_fs": [ + 105, + 698, + 505, + 723 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 53, + 500, + 137 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 107, + 53, + 500, + 137 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 107, + 53, + 500, + 137 + ], + "spans": [ + { + "bbox": [ + 107, + 53, + 500, + 137 + ], + "score": 0.957, + "type": "image", + "image_path": "dfce202941b6d7b7a3b4e91b152da625264b3b1c43837193ab53e137e11b01f3.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 107, + 53, + 500, + 81.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 107, + 81.0, + 500, + 109.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 107, + 109.0, + 500, + 137.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 151, + 505, + 243 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 151, + 506, + 165 + ], + "spans": [ + { + "bbox": [ + 106, + 151, + 506, + 165 + ], + "score": 1.0, + "content": "Figure 6: XArm Visual Pick and Place The XArm is an affordable robot arm that operates slower", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 164, + 505, + 176 + ], + "spans": [ + { + "bbox": [ + 106, + 164, + 505, + 176 + ], + "score": 1.0, + "content": "than the UR5. To demonstrate successful learning on this robot, we use a third-person RealSense", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 174, + 505, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 174, + 505, + 189 + ], + "score": 1.0, + "content": "camera with RGB and depth modalities, as well as proprioceptive inputs for the robot arm, requiring", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 186, + 505, + 199 + ], + "spans": [ + { + "bbox": [ + 106, + 186, + 505, + 199 + ], + "score": 1.0, + "content": "the world model to learn sensor fusion. The pick and place task uses a soft object. While soft", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 196, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 506, + 211 + ], + "score": 1.0, + "content": "objects would be challenging to model accurately in a simulator, Dreamer avoids this issue by", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 208, + 505, + 221 + ], + "spans": [ + { + "bbox": [ + 106, + 208, + 505, + 221 + ], + "score": 1.0, + "content": "directly learning on the real robot without a simulator. While Rainbow and PPO using R3M visual", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 220, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 506, + 233 + ], + "score": 1.0, + "content": "embeddings converge to the local optimum of grasping and ungrasping the object in the same bin,", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 231, + 466, + 244 + ], + "spans": [ + { + "bbox": [ + 106, + 231, + 466, + 244 + ], + "score": 1.0, + "content": "Dreamer learns a successful pick and place policy from sparse rewards in under 10 hours.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 6.5 + } + ], + "index": 3.75 + }, + { + "type": "text", + "bbox": [ + 107, + 252, + 505, + 310 + ], + "lines": [ + { + "bbox": [ + 105, + 252, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 506, + 266 + ], + "score": 1.0, + "content": "requires localizing and grasping a soft object and moving it from one bin to another and back, shown", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 263, + 505, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 505, + 277 + ], + "score": 1.0, + "content": "in Figure 6. We connect the object to the gripper with a string, which makes it less likely for the", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 275, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 106, + 275, + 505, + 289 + ], + "score": 1.0, + "content": "object to get stuck in corners at the cost of more complex dynamics. The sparse reward, discrete", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 286, + 505, + 302 + ], + "spans": [ + { + "bbox": [ + 105, + 286, + 505, + 302 + ], + "score": 1.0, + "content": "action space, and observation space match the UR5 setup except for the addition of depth image", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 300, + 162, + 311 + ], + "spans": [ + { + "bbox": [ + 106, + 300, + 162, + 311 + ], + "score": 1.0, + "content": "observations.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 13 + }, + { + "type": "text", + "bbox": [ + 106, + 315, + 505, + 444 + ], + "lines": [ + { + "bbox": [ + 106, + 315, + 506, + 329 + ], + "spans": [ + { + "bbox": [ + 106, + 315, + 506, + 329 + ], + "score": 1.0, + "content": "Dreamer learns a policy that enables the XArm to achieve an average pick rate of 3.1 objects per", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 328, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 106, + 328, + 506, + 340 + ], + "score": 1.0, + "content": "minute in 10 hours of time, which is comparable to human performance on this task. Figure 6", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 338, + 506, + 352 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 506, + 352 + ], + "score": 1.0, + "content": "shows that Dreamer learns to solve the task within 10 hours, whereas the Rainbow algorithm, a", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 350, + 506, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 506, + 364 + ], + "score": 1.0, + "content": "top model-free algorithm for discrete control from pixels, fails to learn. We additionally compare", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 362, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 362, + 505, + 376 + ], + "score": 1.0, + "content": "Dreamer against a PPO baseline that utilizes R3M (Nair et al., 2022) pretrained visual embeddings", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 374, + 506, + 387 + ], + "spans": [ + { + "bbox": [ + 106, + 374, + 506, + 387 + ], + "score": 1.0, + "content": "for the state, but notice no improvement in performance. Interestingly, we observed that Dreamer", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 385, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 505, + 399 + ], + "score": 1.0, + "content": "learns to sometimes use the string to pull the object out of a corner before grasping it, demonstrating", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 397, + 505, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 397, + 505, + 410 + ], + "score": 1.0, + "content": "multi-modal behaviors. Moreover, we observed that when lighting conditions change drastically", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 410, + 504, + 421 + ], + "spans": [ + { + "bbox": [ + 106, + 410, + 504, + 421 + ], + "score": 1.0, + "content": "(such as sharp shadows during sunrise), performance initially collapses but Dreamer then adapts to", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 419, + 507, + 435 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 507, + 435 + ], + "score": 1.0, + "content": "the changing conditions and exceeds its previous performance after a few hours of additional training,", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 432, + 206, + 446 + ], + "spans": [ + { + "bbox": [ + 106, + 432, + 206, + 446 + ], + "score": 1.0, + "content": "reported in Appendix A.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 21 + }, + { + "type": "title", + "bbox": [ + 107, + 452, + 210, + 464 + ], + "lines": [ + { + "bbox": [ + 105, + 451, + 210, + 466 + ], + "spans": [ + { + "bbox": [ + 105, + 451, + 210, + 466 + ], + "score": 1.0, + "content": "3.4 Sphero Navigation", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "text", + "bbox": [ + 107, + 465, + 505, + 569 + ], + "lines": [ + { + "bbox": [ + 106, + 465, + 506, + 478 + ], + "spans": [ + { + "bbox": [ + 106, + 465, + 506, + 478 + ], + "score": 1.0, + "content": "We evaluate Dreamer on a visual navigation task that requires maneuvering a wheeled robot to a", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 477, + 505, + 489 + ], + "spans": [ + { + "bbox": [ + 106, + 477, + 505, + 489 + ], + "score": 1.0, + "content": "fixed goal location given only RGB images as input. We use the Sphero Ollie robot, a cylindrical", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 488, + 503, + 501 + ], + "spans": [ + { + "bbox": [ + 106, + 489, + 482, + 501 + ], + "score": 1.0, + "content": "robot with two controllable motors, which we control through continuous torque commands at", + "type": "text" + }, + { + "bbox": [ + 483, + 488, + 503, + 499 + ], + "score": 0.56, + "content": "2 \\ : \\mathrm { H z }", + "type": "inline_equation" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 500, + 505, + 513 + ], + "spans": [ + { + "bbox": [ + 106, + 500, + 505, + 513 + ], + "score": 1.0, + "content": "Because the robot is symmetric and the robot only has access to image observations, it has to infer", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 511, + 505, + 524 + ], + "spans": [ + { + "bbox": [ + 106, + 511, + 505, + 524 + ], + "score": 1.0, + "content": "the heading direction from the history of observations. The robot is provided with a dense reward", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 524, + 505, + 536 + ], + "spans": [ + { + "bbox": [ + 106, + 524, + 505, + 536 + ], + "score": 1.0, + "content": "equal to the negative L2 distance, which is computed using a oracle vision pipeline that detects the", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 535, + 505, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 535, + 505, + 547 + ], + "score": 1.0, + "content": "Sphero’s position (this information is not provided to the agent). As the goal is fixed, after 100", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 547, + 505, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 547, + 505, + 560 + ], + "score": 1.0, + "content": "environment steps, we end the episode and randomize the robot’s position through a sequence of high", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 560, + 226, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 226, + 570 + ], + "score": 1.0, + "content": "power random motor actions.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 107, + 575, + 505, + 645 + ], + "lines": [ + { + "bbox": [ + 105, + 574, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 506, + 588 + ], + "score": 1.0, + "content": "In 2 hours, Dreamer learns to quickly and consistently navigate to the goal and stay near the goal for", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 587, + 505, + 599 + ], + "spans": [ + { + "bbox": [ + 106, + 587, + 505, + 599 + ], + "score": 1.0, + "content": "the remainder of the episode. As shown in Figure 7, Dreamer achieves an average distance to the", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 597, + 507, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 507, + 612 + ], + "score": 1.0, + "content": "goal of 0.15, measured in units of the area size and averaged across time steps. We find that DrQv2,", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 611, + 505, + 623 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 505, + 623 + ], + "score": 1.0, + "content": "a model-free algorithm specifically designed to continuous control from pixels, achieves similar", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 622, + 505, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 622, + 505, + 635 + ], + "score": 1.0, + "content": "performance. This result matches the simulated experiments of Yarats et al. (2021) that showed the", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 633, + 419, + 647 + ], + "spans": [ + { + "bbox": [ + 106, + 633, + 419, + 647 + ], + "score": 1.0, + "content": "two algorithms to perform similarly for continuous control tasks from images.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 39.5 + }, + { + "type": "title", + "bbox": [ + 107, + 657, + 197, + 670 + ], + "lines": [ + { + "bbox": [ + 105, + 656, + 198, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 198, + 672 + ], + "score": 1.0, + "content": "4 Related Work", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 43 + }, + { + "type": "text", + "bbox": [ + 107, + 676, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 105, + 675, + 505, + 688 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 505, + 688 + ], + "score": 1.0, + "content": "Existing work on robot learning commonly leverages large amounts of simulated experience before", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 106, + 687, + 505, + 698 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 505, + 698 + ], + "score": 1.0, + "content": "deploying to the real world (Rusu et al., 2016; Peng et al., 2018; OpenAI et al., 2018; Lee et al., 2020;", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 698, + 506, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 711 + ], + "score": 1.0, + "content": "Irpan et al., 2020; Kumar et al., 2021; Siekmann et al., 2021; Escontrela et al., 2022), leverage fleets", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 106, + 711, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 711, + 505, + 722 + ], + "score": 1.0, + "content": "of robots to collect experience datasets (Kalashnikov et al., 2018; Dasari et al., 2019; Kalashnikov", + "type": "text" + } + ], + "index": 47 + } + ], + "index": 45.5 + } + ], + "page_idx": 6, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 753 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 753 + ], + "score": 1.0, + "content": "7", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 53, + 500, + 137 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 107, + 53, + 500, + 137 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 107, + 53, + 500, + 137 + ], + "spans": [ + { + "bbox": [ + 107, + 53, + 500, + 137 + ], + "score": 0.957, + "type": "image", + "image_path": "dfce202941b6d7b7a3b4e91b152da625264b3b1c43837193ab53e137e11b01f3.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 107, + 53, + 500, + 81.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 107, + 81.0, + 500, + 109.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 107, + 109.0, + 500, + 137.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 151, + 505, + 243 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 151, + 506, + 165 + ], + "spans": [ + { + "bbox": [ + 106, + 151, + 506, + 165 + ], + "score": 1.0, + "content": "Figure 6: XArm Visual Pick and Place The XArm is an affordable robot arm that operates slower", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 164, + 505, + 176 + ], + "spans": [ + { + "bbox": [ + 106, + 164, + 505, + 176 + ], + "score": 1.0, + "content": "than the UR5. To demonstrate successful learning on this robot, we use a third-person RealSense", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 174, + 505, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 174, + 505, + 189 + ], + "score": 1.0, + "content": "camera with RGB and depth modalities, as well as proprioceptive inputs for the robot arm, requiring", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 186, + 505, + 199 + ], + "spans": [ + { + "bbox": [ + 106, + 186, + 505, + 199 + ], + "score": 1.0, + "content": "the world model to learn sensor fusion. The pick and place task uses a soft object. While soft", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 196, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 506, + 211 + ], + "score": 1.0, + "content": "objects would be challenging to model accurately in a simulator, Dreamer avoids this issue by", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 208, + 505, + 221 + ], + "spans": [ + { + "bbox": [ + 106, + 208, + 505, + 221 + ], + "score": 1.0, + "content": "directly learning on the real robot without a simulator. While Rainbow and PPO using R3M visual", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 220, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 506, + 233 + ], + "score": 1.0, + "content": "embeddings converge to the local optimum of grasping and ungrasping the object in the same bin,", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 231, + 466, + 244 + ], + "spans": [ + { + "bbox": [ + 106, + 231, + 466, + 244 + ], + "score": 1.0, + "content": "Dreamer learns a successful pick and place policy from sparse rewards in under 10 hours.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 6.5 + } + ], + "index": 3.75 + }, + { + "type": "text", + "bbox": [ + 107, + 252, + 505, + 310 + ], + "lines": [], + "index": 13, + "bbox_fs": [ + 105, + 252, + 506, + 311 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 106, + 315, + 505, + 444 + ], + "lines": [ + { + "bbox": [ + 106, + 315, + 506, + 329 + ], + "spans": [ + { + "bbox": [ + 106, + 315, + 506, + 329 + ], + "score": 1.0, + "content": "Dreamer learns a policy that enables the XArm to achieve an average pick rate of 3.1 objects per", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 328, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 106, + 328, + 506, + 340 + ], + "score": 1.0, + "content": "minute in 10 hours of time, which is comparable to human performance on this task. Figure 6", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 338, + 506, + 352 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 506, + 352 + ], + "score": 1.0, + "content": "shows that Dreamer learns to solve the task within 10 hours, whereas the Rainbow algorithm, a", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 350, + 506, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 506, + 364 + ], + "score": 1.0, + "content": "top model-free algorithm for discrete control from pixels, fails to learn. We additionally compare", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 362, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 362, + 505, + 376 + ], + "score": 1.0, + "content": "Dreamer against a PPO baseline that utilizes R3M (Nair et al., 2022) pretrained visual embeddings", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 374, + 506, + 387 + ], + "spans": [ + { + "bbox": [ + 106, + 374, + 506, + 387 + ], + "score": 1.0, + "content": "for the state, but notice no improvement in performance. Interestingly, we observed that Dreamer", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 385, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 505, + 399 + ], + "score": 1.0, + "content": "learns to sometimes use the string to pull the object out of a corner before grasping it, demonstrating", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 397, + 505, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 397, + 505, + 410 + ], + "score": 1.0, + "content": "multi-modal behaviors. Moreover, we observed that when lighting conditions change drastically", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 410, + 504, + 421 + ], + "spans": [ + { + "bbox": [ + 106, + 410, + 504, + 421 + ], + "score": 1.0, + "content": "(such as sharp shadows during sunrise), performance initially collapses but Dreamer then adapts to", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 419, + 507, + 435 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 507, + 435 + ], + "score": 1.0, + "content": "the changing conditions and exceeds its previous performance after a few hours of additional training,", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 432, + 206, + 446 + ], + "spans": [ + { + "bbox": [ + 106, + 432, + 206, + 446 + ], + "score": 1.0, + "content": "reported in Appendix A.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 21, + "bbox_fs": [ + 105, + 315, + 507, + 446 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 452, + 210, + 464 + ], + "lines": [ + { + "bbox": [ + 105, + 451, + 210, + 466 + ], + "spans": [ + { + "bbox": [ + 105, + 451, + 210, + 466 + ], + "score": 1.0, + "content": "3.4 Sphero Navigation", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "text", + "bbox": [ + 107, + 465, + 505, + 569 + ], + "lines": [ + { + "bbox": [ + 106, + 465, + 506, + 478 + ], + "spans": [ + { + "bbox": [ + 106, + 465, + 506, + 478 + ], + "score": 1.0, + "content": "We evaluate Dreamer on a visual navigation task that requires maneuvering a wheeled robot to a", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 477, + 505, + 489 + ], + "spans": [ + { + "bbox": [ + 106, + 477, + 505, + 489 + ], + "score": 1.0, + "content": "fixed goal location given only RGB images as input. We use the Sphero Ollie robot, a cylindrical", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 488, + 503, + 501 + ], + "spans": [ + { + "bbox": [ + 106, + 489, + 482, + 501 + ], + "score": 1.0, + "content": "robot with two controllable motors, which we control through continuous torque commands at", + "type": "text" + }, + { + "bbox": [ + 483, + 488, + 503, + 499 + ], + "score": 0.56, + "content": "2 \\ : \\mathrm { H z }", + "type": "inline_equation" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 500, + 505, + 513 + ], + "spans": [ + { + "bbox": [ + 106, + 500, + 505, + 513 + ], + "score": 1.0, + "content": "Because the robot is symmetric and the robot only has access to image observations, it has to infer", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 511, + 505, + 524 + ], + "spans": [ + { + "bbox": [ + 106, + 511, + 505, + 524 + ], + "score": 1.0, + "content": "the heading direction from the history of observations. The robot is provided with a dense reward", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 524, + 505, + 536 + ], + "spans": [ + { + "bbox": [ + 106, + 524, + 505, + 536 + ], + "score": 1.0, + "content": "equal to the negative L2 distance, which is computed using a oracle vision pipeline that detects the", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 535, + 505, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 535, + 505, + 547 + ], + "score": 1.0, + "content": "Sphero’s position (this information is not provided to the agent). As the goal is fixed, after 100", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 547, + 505, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 547, + 505, + 560 + ], + "score": 1.0, + "content": "environment steps, we end the episode and randomize the robot’s position through a sequence of high", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 560, + 226, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 226, + 570 + ], + "score": 1.0, + "content": "power random motor actions.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 32, + "bbox_fs": [ + 105, + 465, + 506, + 570 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 575, + 505, + 645 + ], + "lines": [ + { + "bbox": [ + 105, + 574, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 506, + 588 + ], + "score": 1.0, + "content": "In 2 hours, Dreamer learns to quickly and consistently navigate to the goal and stay near the goal for", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 587, + 505, + 599 + ], + "spans": [ + { + "bbox": [ + 106, + 587, + 505, + 599 + ], + "score": 1.0, + "content": "the remainder of the episode. As shown in Figure 7, Dreamer achieves an average distance to the", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 597, + 507, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 507, + 612 + ], + "score": 1.0, + "content": "goal of 0.15, measured in units of the area size and averaged across time steps. We find that DrQv2,", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 611, + 505, + 623 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 505, + 623 + ], + "score": 1.0, + "content": "a model-free algorithm specifically designed to continuous control from pixels, achieves similar", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 622, + 505, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 622, + 505, + 635 + ], + "score": 1.0, + "content": "performance. This result matches the simulated experiments of Yarats et al. (2021) that showed the", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 633, + 419, + 647 + ], + "spans": [ + { + "bbox": [ + 106, + 633, + 419, + 647 + ], + "score": 1.0, + "content": "two algorithms to perform similarly for continuous control tasks from images.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 39.5, + "bbox_fs": [ + 105, + 574, + 507, + 647 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 657, + 197, + 670 + ], + "lines": [ + { + "bbox": [ + 105, + 656, + 198, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 198, + 672 + ], + "score": 1.0, + "content": "4 Related Work", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 43 + }, + { + "type": "text", + "bbox": [ + 107, + 676, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 105, + 675, + 505, + 688 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 505, + 688 + ], + "score": 1.0, + "content": "Existing work on robot learning commonly leverages large amounts of simulated experience before", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 106, + 687, + 505, + 698 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 505, + 698 + ], + "score": 1.0, + "content": "deploying to the real world (Rusu et al., 2016; Peng et al., 2018; OpenAI et al., 2018; Lee et al., 2020;", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 698, + 506, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 711 + ], + "score": 1.0, + "content": "Irpan et al., 2020; Kumar et al., 2021; Siekmann et al., 2021; Escontrela et al., 2022), leverage fleets", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 106, + 711, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 711, + 505, + 722 + ], + "score": 1.0, + "content": "of robots to collect experience datasets (Kalashnikov et al., 2018; Dasari et al., 2019; Kalashnikov", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 225, + 505, + 236 + ], + "spans": [ + { + "bbox": [ + 106, + 225, + 505, + 236 + ], + "score": 1.0, + "content": "et al., 2021; Ebert et al., 2021), or rely on external information such as human expert demonstrations", + "type": "text", + "cross_page": true + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 236, + 506, + 249 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 506, + 249 + ], + "score": 1.0, + "content": "or task priors to achieve sample-efficient learning (Xie et al., 2019; Schoettler et al., 2019; James", + "type": "text", + "cross_page": true + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 246, + 505, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 246, + 505, + 261 + ], + "score": 1.0, + "content": "et al., 2021; Shah and Levine, 2022; Bohez et al., 2022; Sivakumar et al., 2022). However, designing", + "type": "text", + "cross_page": true + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 259, + 505, + 271 + ], + "spans": [ + { + "bbox": [ + 106, + 259, + 505, + 271 + ], + "score": 1.0, + "content": "simulated tasks and collecting expert demonstrations is time-consuming. Moreover, many of these", + "type": "text", + "cross_page": true + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 271, + 506, + 284 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 506, + 284 + ], + "score": 1.0, + "content": "approaches require specialized algorithms for leveraging offline experience, demonstrations, or", + "type": "text", + "cross_page": true + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 282, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 506, + 295 + ], + "score": 1.0, + "content": "simulator inaccuracies. In contrast, our experiments show that learning end-to-end from rewards in", + "type": "text", + "cross_page": true + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 295, + 427, + 306 + ], + "spans": [ + { + "bbox": [ + 106, + 295, + 427, + 306 + ], + "score": 1.0, + "content": "the physical world is feasible for a diverse range of tasks through world models.", + "type": "text", + "cross_page": true + } + ], + "index": 14 + } + ], + "index": 45.5, + "bbox_fs": [ + 105, + 675, + 506, + 722 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 58, + 498, + 142 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 106, + 58, + 498, + 142 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 58, + 498, + 142 + ], + "spans": [ + { + "bbox": [ + 106, + 58, + 498, + 142 + ], + "score": 0.945, + "type": "image", + "image_path": "372acb53fc3f9ab9f178baac319f8b0cb0c4ee1ca96f374bada7397c4ec23630.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 106, + 58, + 498, + 86.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 106, + 86.0, + 498, + 114.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 106, + 114.0, + 498, + 142.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 156, + 505, + 213 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 505, + 169 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 505, + 169 + ], + "score": 1.0, + "content": "Figure 7: Sphero Navigation This task requires the Sphero robot to navigate to a goal location", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 168, + 505, + 181 + ], + "spans": [ + { + "bbox": [ + 105, + 168, + 505, + 181 + ], + "score": 1.0, + "content": "given a top-down RGB image as the only input. The task requires the robot to localize itself from raw", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 179, + 505, + 192 + ], + "spans": [ + { + "bbox": [ + 105, + 179, + 505, + 192 + ], + "score": 1.0, + "content": "pixels, to infer its orientation from the sequence of past images because it is ambiguous from a single", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 190, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 506, + 204 + ], + "score": 1.0, + "content": "image, and to control the robot from under-actuated motors that require building up momentum over", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 201, + 389, + 214 + ], + "spans": [ + { + "bbox": [ + 106, + 201, + 389, + 214 + ], + "score": 1.0, + "content": "time. Dreamer learns a successful policy on this task in under 2 hours.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 5 + } + ], + "index": 3.0 + }, + { + "type": "text", + "bbox": [ + 107, + 224, + 505, + 306 + ], + "lines": [ + { + "bbox": [ + 106, + 225, + 505, + 236 + ], + "spans": [ + { + "bbox": [ + 106, + 225, + 505, + 236 + ], + "score": 1.0, + "content": "et al., 2021; Ebert et al., 2021), or rely on external information such as human expert demonstrations", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 236, + 506, + 249 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 506, + 249 + ], + "score": 1.0, + "content": "or task priors to achieve sample-efficient learning (Xie et al., 2019; Schoettler et al., 2019; James", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 246, + 505, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 246, + 505, + 261 + ], + "score": 1.0, + "content": "et al., 2021; Shah and Levine, 2022; Bohez et al., 2022; Sivakumar et al., 2022). However, designing", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 259, + 505, + 271 + ], + "spans": [ + { + "bbox": [ + 106, + 259, + 505, + 271 + ], + "score": 1.0, + "content": "simulated tasks and collecting expert demonstrations is time-consuming. Moreover, many of these", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 271, + 506, + 284 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 506, + 284 + ], + "score": 1.0, + "content": "approaches require specialized algorithms for leveraging offline experience, demonstrations, or", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 282, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 506, + 295 + ], + "score": 1.0, + "content": "simulator inaccuracies. In contrast, our experiments show that learning end-to-end from rewards in", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 295, + 427, + 306 + ], + "spans": [ + { + "bbox": [ + 106, + 295, + 427, + 306 + ], + "score": 1.0, + "content": "the physical world is feasible for a diverse range of tasks through world models.", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 11 + }, + { + "type": "text", + "bbox": [ + 107, + 316, + 505, + 456 + ], + "lines": [ + { + "bbox": [ + 106, + 316, + 506, + 329 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 506, + 329 + ], + "score": 1.0, + "content": "Relatively few works have demonstrated end-to-end learning from scratch in the physical world.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 327, + 505, + 340 + ], + "spans": [ + { + "bbox": [ + 106, + 327, + 505, + 340 + ], + "score": 1.0, + "content": "Visual Foresight (Finn et al., 2016; Finn and Levine, 2017; Ebert et al., 2018) learns a video prediction", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 339, + 505, + 352 + ], + "spans": [ + { + "bbox": [ + 106, + 339, + 505, + 352 + ], + "score": 1.0, + "content": "model to solve real world tasks by online planning, but is limited to short-horizon tasks and requires", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 351, + 505, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 505, + 364 + ], + "score": 1.0, + "content": "generating images during planning, making it computationally expensive. Yang et al. (2019; 2022)", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 362, + 505, + 375 + ], + "spans": [ + { + "bbox": [ + 106, + 362, + 505, + 375 + ], + "score": 1.0, + "content": "learn quadruped locomotion through a model-based approach by predicting foot placement and", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 374, + 505, + 388 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 505, + 388 + ], + "score": 1.0, + "content": "leveraging a domain-specific controller to achieve them. Ha et al. (2020) learn a quadruped walking", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 386, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 386, + 505, + 399 + ], + "score": 1.0, + "content": "policy by relying on a scripted reset policy, so the robot does not have to learn to stand up. SOLAR", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 398, + 505, + 411 + ], + "spans": [ + { + "bbox": [ + 106, + 398, + 505, + 411 + ], + "score": 1.0, + "content": "(Zhang et al., 2019) learns a latent dynamics model from images and demonstrates reaching and", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 409, + 505, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 409, + 505, + 423 + ], + "score": 1.0, + "content": "pushing with a robot arm. Nagabandi et al. (2019) learns manipulation policies by planning through", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 420, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 506, + 433 + ], + "score": 1.0, + "content": "a learned dynamics model from state observations. In comparison, our experiments show successful", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 433, + 506, + 446 + ], + "spans": [ + { + "bbox": [ + 105, + 433, + 506, + 446 + ], + "score": 1.0, + "content": "learning across 4 challenging robot tasks that cover a wide range of challenges and sensory modalities,", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 443, + 349, + 458 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 349, + 458 + ], + "score": 1.0, + "content": "with a single learning algorithm and hyperparameter setting.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 20.5 + }, + { + "type": "title", + "bbox": [ + 107, + 474, + 179, + 487 + ], + "lines": [ + { + "bbox": [ + 104, + 472, + 181, + 490 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 181, + 490 + ], + "score": 1.0, + "content": "5 Discussion", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "text", + "bbox": [ + 106, + 502, + 505, + 596 + ], + "lines": [ + { + "bbox": [ + 106, + 503, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 106, + 503, + 506, + 515 + ], + "score": 1.0, + "content": "We applied Dreamer to physical robot learning, finding that modern world models enable sample-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 515, + 505, + 527 + ], + "spans": [ + { + "bbox": [ + 106, + 515, + 505, + 527 + ], + "score": 1.0, + "content": "efficient robot learning for a range of tasks, from scratch in the real world and without simulators. We", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 526, + 506, + 539 + ], + "spans": [ + { + "bbox": [ + 106, + 526, + 506, + 539 + ], + "score": 1.0, + "content": "also find that the approach is generally applicable in that it can solve robot locomotion, manipulation,", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 538, + 505, + 551 + ], + "spans": [ + { + "bbox": [ + 106, + 538, + 505, + 551 + ], + "score": 1.0, + "content": "and navigation tasks without changing hyperparameters. Dreamer taught a quadruped robot to roll", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 548, + 505, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 505, + 564 + ], + "score": 1.0, + "content": "off the back, stand up, and walk in 1 hour from scratch, which previously required extensive training", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 561, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 561, + 505, + 574 + ], + "score": 1.0, + "content": "in simulation followed by transfer to the real world or parameterized trajectory generators and given", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 574, + 504, + 585 + ], + "spans": [ + { + "bbox": [ + 106, + 574, + 504, + 585 + ], + "score": 1.0, + "content": "reset policies. We also demonstrate learning to pick and place objects from pixels and sparse rewards", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 585, + 241, + 596 + ], + "spans": [ + { + "bbox": [ + 106, + 585, + 241, + 596 + ], + "score": 1.0, + "content": "on two robot arms in 8–10 hours.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 31.5 + }, + { + "type": "text", + "bbox": [ + 107, + 607, + 505, + 665 + ], + "lines": [ + { + "bbox": [ + 105, + 606, + 505, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 505, + 620 + ], + "score": 1.0, + "content": "Limitations While Dreamer shows promising results, learning on hardware over many hours", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 619, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 505, + 631 + ], + "score": 1.0, + "content": "creates wear on robots that may require human intervention or repair. Additionally, more work is", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 630, + 506, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 630, + 506, + 644 + ], + "score": 1.0, + "content": "required to explore the limits of Dreamer and our baselines by training for a longer time. Finally, we", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 641, + 505, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 641, + 505, + 657 + ], + "score": 1.0, + "content": "see tackling more challenging tasks, potentially by combining the benefits of fast real world learning", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 653, + 372, + 666 + ], + "spans": [ + { + "bbox": [ + 106, + 653, + 372, + 666 + ], + "score": 1.0, + "content": "with those of simulators, as an impactful future research direction.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 38 + }, + { + "type": "text", + "bbox": [ + 108, + 676, + 504, + 722 + ], + "lines": [ + { + "bbox": [ + 105, + 675, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 505, + 689 + ], + "score": 1.0, + "content": "Acknowledgements We thank Stephen James and Justin Kerr for helpful suggestions and help with", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 686, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 506, + 701 + ], + "score": 1.0, + "content": "printing the protective shell of the quadruped robot. We thank Ademi Adeniji for help with setting up", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 698, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 712 + ], + "score": 1.0, + "content": "the XArm robot and Raven Huang for help with setting up the UR5 robot. This work was supported", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 711, + 498, + 724 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 498, + 724 + ], + "score": 1.0, + "content": "in part by an NSF Fellowship, NSF NRI #2024675, and the Vanier Canada Graduate Scholarship.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 42.5 + } + ], + "page_idx": 7, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 742, + 308, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "score": 1.0, + "content": "8", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 58, + 498, + 142 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 106, + 58, + 498, + 142 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 58, + 498, + 142 + ], + "spans": [ + { + "bbox": [ + 106, + 58, + 498, + 142 + ], + "score": 0.945, + "type": "image", + "image_path": "372acb53fc3f9ab9f178baac319f8b0cb0c4ee1ca96f374bada7397c4ec23630.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 106, + 58, + 498, + 86.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 106, + 86.0, + 498, + 114.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 106, + 114.0, + 498, + 142.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 156, + 505, + 213 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 505, + 169 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 505, + 169 + ], + "score": 1.0, + "content": "Figure 7: Sphero Navigation This task requires the Sphero robot to navigate to a goal location", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 168, + 505, + 181 + ], + "spans": [ + { + "bbox": [ + 105, + 168, + 505, + 181 + ], + "score": 1.0, + "content": "given a top-down RGB image as the only input. The task requires the robot to localize itself from raw", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 179, + 505, + 192 + ], + "spans": [ + { + "bbox": [ + 105, + 179, + 505, + 192 + ], + "score": 1.0, + "content": "pixels, to infer its orientation from the sequence of past images because it is ambiguous from a single", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 190, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 506, + 204 + ], + "score": 1.0, + "content": "image, and to control the robot from under-actuated motors that require building up momentum over", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 201, + 389, + 214 + ], + "spans": [ + { + "bbox": [ + 106, + 201, + 389, + 214 + ], + "score": 1.0, + "content": "time. Dreamer learns a successful policy on this task in under 2 hours.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 5 + } + ], + "index": 3.0 + }, + { + "type": "text", + "bbox": [ + 107, + 224, + 505, + 306 + ], + "lines": [], + "index": 11, + "bbox_fs": [ + 105, + 225, + 506, + 306 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 107, + 316, + 505, + 456 + ], + "lines": [ + { + "bbox": [ + 106, + 316, + 506, + 329 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 506, + 329 + ], + "score": 1.0, + "content": "Relatively few works have demonstrated end-to-end learning from scratch in the physical world.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 327, + 505, + 340 + ], + "spans": [ + { + "bbox": [ + 106, + 327, + 505, + 340 + ], + "score": 1.0, + "content": "Visual Foresight (Finn et al., 2016; Finn and Levine, 2017; Ebert et al., 2018) learns a video prediction", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 339, + 505, + 352 + ], + "spans": [ + { + "bbox": [ + 106, + 339, + 505, + 352 + ], + "score": 1.0, + "content": "model to solve real world tasks by online planning, but is limited to short-horizon tasks and requires", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 351, + 505, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 505, + 364 + ], + "score": 1.0, + "content": "generating images during planning, making it computationally expensive. Yang et al. (2019; 2022)", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 362, + 505, + 375 + ], + "spans": [ + { + "bbox": [ + 106, + 362, + 505, + 375 + ], + "score": 1.0, + "content": "learn quadruped locomotion through a model-based approach by predicting foot placement and", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 374, + 505, + 388 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 505, + 388 + ], + "score": 1.0, + "content": "leveraging a domain-specific controller to achieve them. Ha et al. (2020) learn a quadruped walking", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 386, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 386, + 505, + 399 + ], + "score": 1.0, + "content": "policy by relying on a scripted reset policy, so the robot does not have to learn to stand up. SOLAR", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 398, + 505, + 411 + ], + "spans": [ + { + "bbox": [ + 106, + 398, + 505, + 411 + ], + "score": 1.0, + "content": "(Zhang et al., 2019) learns a latent dynamics model from images and demonstrates reaching and", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 409, + 505, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 409, + 505, + 423 + ], + "score": 1.0, + "content": "pushing with a robot arm. Nagabandi et al. (2019) learns manipulation policies by planning through", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 420, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 506, + 433 + ], + "score": 1.0, + "content": "a learned dynamics model from state observations. In comparison, our experiments show successful", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 433, + 506, + 446 + ], + "spans": [ + { + "bbox": [ + 105, + 433, + 506, + 446 + ], + "score": 1.0, + "content": "learning across 4 challenging robot tasks that cover a wide range of challenges and sensory modalities,", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 443, + 349, + 458 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 349, + 458 + ], + "score": 1.0, + "content": "with a single learning algorithm and hyperparameter setting.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 20.5, + "bbox_fs": [ + 105, + 316, + 506, + 458 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 474, + 179, + 487 + ], + "lines": [ + { + "bbox": [ + 104, + 472, + 181, + 490 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 181, + 490 + ], + "score": 1.0, + "content": "5 Discussion", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "text", + "bbox": [ + 106, + 502, + 505, + 596 + ], + "lines": [ + { + "bbox": [ + 106, + 503, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 106, + 503, + 506, + 515 + ], + "score": 1.0, + "content": "We applied Dreamer to physical robot learning, finding that modern world models enable sample-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 515, + 505, + 527 + ], + "spans": [ + { + "bbox": [ + 106, + 515, + 505, + 527 + ], + "score": 1.0, + "content": "efficient robot learning for a range of tasks, from scratch in the real world and without simulators. We", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 526, + 506, + 539 + ], + "spans": [ + { + "bbox": [ + 106, + 526, + 506, + 539 + ], + "score": 1.0, + "content": "also find that the approach is generally applicable in that it can solve robot locomotion, manipulation,", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 538, + 505, + 551 + ], + "spans": [ + { + "bbox": [ + 106, + 538, + 505, + 551 + ], + "score": 1.0, + "content": "and navigation tasks without changing hyperparameters. Dreamer taught a quadruped robot to roll", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 548, + 505, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 505, + 564 + ], + "score": 1.0, + "content": "off the back, stand up, and walk in 1 hour from scratch, which previously required extensive training", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 561, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 561, + 505, + 574 + ], + "score": 1.0, + "content": "in simulation followed by transfer to the real world or parameterized trajectory generators and given", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 574, + 504, + 585 + ], + "spans": [ + { + "bbox": [ + 106, + 574, + 504, + 585 + ], + "score": 1.0, + "content": "reset policies. We also demonstrate learning to pick and place objects from pixels and sparse rewards", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 585, + 241, + 596 + ], + "spans": [ + { + "bbox": [ + 106, + 585, + 241, + 596 + ], + "score": 1.0, + "content": "on two robot arms in 8–10 hours.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 31.5, + "bbox_fs": [ + 105, + 503, + 506, + 596 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 607, + 505, + 665 + ], + "lines": [ + { + "bbox": [ + 105, + 606, + 505, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 505, + 620 + ], + "score": 1.0, + "content": "Limitations While Dreamer shows promising results, learning on hardware over many hours", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 619, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 505, + 631 + ], + "score": 1.0, + "content": "creates wear on robots that may require human intervention or repair. Additionally, more work is", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 630, + 506, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 630, + 506, + 644 + ], + "score": 1.0, + "content": "required to explore the limits of Dreamer and our baselines by training for a longer time. Finally, we", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 641, + 505, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 641, + 505, + 657 + ], + "score": 1.0, + "content": "see tackling more challenging tasks, potentially by combining the benefits of fast real world learning", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 653, + 372, + 666 + ], + "spans": [ + { + "bbox": [ + 106, + 653, + 372, + 666 + ], + "score": 1.0, + "content": "with those of simulators, as an impactful future research direction.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 38, + "bbox_fs": [ + 105, + 606, + 506, + 666 + ] + }, + { + "type": "text", + "bbox": [ + 108, + 676, + 504, + 722 + ], + "lines": [ + { + "bbox": [ + 105, + 675, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 505, + 689 + ], + "score": 1.0, + "content": "Acknowledgements We thank Stephen James and Justin Kerr for helpful suggestions and help with", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 686, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 506, + 701 + ], + "score": 1.0, + "content": "printing the protective shell of the quadruped robot. We thank Ademi Adeniji for help with setting up", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 698, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 712 + ], + "score": 1.0, + "content": "the XArm robot and Raven Huang for help with setting up the UR5 robot. This work was supported", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 711, + 498, + 724 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 498, + 724 + ], + "score": 1.0, + "content": "in part by an NSF Fellowship, NSF NRI #2024675, and the Vanier Canada Graduate Scholarship.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 42.5, + "bbox_fs": [ + 105, + 675, + 506, + 724 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 72, + 507, + 727 + ], + "lines": [ + { + "bbox": [ + 106, + 70, + 165, + 87 + ], + "spans": [ + { + "bbox": [ + 106, + 70, + 165, + 87 + ], + "score": 1.0, + "content": "References", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 95, + 506, + 111 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 506, + 111 + ], + "score": 1.0, + "content": "D. Hafner, T. Lillicrap, J. Ba, and M. Norouzi. Dream to control: Learning behaviors by latent", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 108, + 332, + 121 + ], + "spans": [ + { + "bbox": [ + 115, + 108, + 332, + 121 + ], + "score": 1.0, + "content": "imagination. arXiv preprint arXiv:1912.01603, 2019.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 128, + 505, + 142 + ], + "spans": [ + { + "bbox": [ + 105, + 128, + 505, + 142 + ], + "score": 1.0, + "content": "D. Hafner, T. Lillicrap, M. Norouzi, and J. Ba. Mastering atari with discrete world models. arXiv", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 140, + 255, + 154 + ], + "spans": [ + { + "bbox": [ + 113, + 140, + 255, + 154 + ], + "score": 1.0, + "content": "preprint arXiv:2010.02193, 2020.", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 159, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 506, + 176 + ], + "score": 1.0, + "content": "Y. Gal, R. McAllister, and C. E. Rasmussen. Improving pilco with bayesian neural network dynamics", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 114, + 172, + 396, + 188 + ], + "spans": [ + { + "bbox": [ + 114, + 172, + 396, + 188 + ], + "score": 1.0, + "content": "models. In Data-Efficient Machine Learning workshop, ICML, 2016.", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 193, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 505, + 208 + ], + "score": 1.0, + "content": "F. Ebert, C. Finn, S. Dasari, A. Xie, A. Lee, and S. Levine. Visual foresight: Model-based deep", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 114, + 206, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 114, + 206, + 505, + 220 + ], + "score": 1.0, + "content": "reinforcement learning for vision-based robotic control. arXiv preprint arXiv:1812.00568, 2018.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 227, + 506, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 227, + 506, + 240 + ], + "score": 1.0, + "content": "R. Sekar, O. Rybkin, K. Daniilidis, P. Abbeel, D. Hafner, and D. Pathak. Planning to explore via self-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 238, + 506, + 252 + ], + "spans": [ + { + "bbox": [ + 115, + 238, + 506, + 252 + ], + "score": 1.0, + "content": "supervised world models. In International Conference on Machine Learning, pages 8583–8592.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 250, + 174, + 263 + ], + "spans": [ + { + "bbox": [ + 115, + 250, + 174, + 263 + ], + "score": 1.0, + "content": "PMLR, 2020.", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 271, + 505, + 284 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 505, + 284 + ], + "score": 1.0, + "content": "T. Yu, A. Kumar, R. Rafailov, A. Rajeswaran, S. Levine, and C. Finn. Combo: Conservative", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 281, + 507, + 298 + ], + "spans": [ + { + "bbox": [ + 114, + 281, + 507, + 298 + ], + "score": 1.0, + "content": "offline model-based policy optimization. Advances in neural information processing systems, 34:", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 294, + 201, + 307 + ], + "spans": [ + { + "bbox": [ + 115, + 294, + 201, + 307 + ], + "score": 1.0, + "content": "28954–28967, 2021.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 314, + 507, + 330 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 507, + 330 + ], + "score": 1.0, + "content": "D. Hafner, T. Lillicrap, I. Fischer, R. Villegas, D. Ha, H. Lee, and J. Davidson. Learning latent", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 327, + 424, + 342 + ], + "spans": [ + { + "bbox": [ + 114, + 327, + 424, + 342 + ], + "score": 1.0, + "content": "dynamics for planning from pixels. arXiv preprint arXiv:1811.04551, 2018.", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 346, + 507, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 507, + 363 + ], + "score": 1.0, + "content": "D. P. Kingma and M. Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114,", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 358, + 143, + 375 + ], + "spans": [ + { + "bbox": [ + 115, + 358, + 143, + 375 + ], + "score": 1.0, + "content": "2013.", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 381, + 505, + 395 + ], + "spans": [ + { + "bbox": [ + 105, + 381, + 505, + 395 + ], + "score": 1.0, + "content": "D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 393, + 385, + 407 + ], + "spans": [ + { + "bbox": [ + 114, + 393, + 385, + 407 + ], + "score": 1.0, + "content": "in deep generative models. arXiv preprint arXiv:1401.4082, 2014.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 414, + 466, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 466, + 427 + ], + "score": 1.0, + "content": "R. S. Sutton and A. G. Barto. Reinforcement learning: An introduction. MIT press, 2018.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 435, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 506, + 449 + ], + "score": 1.0, + "content": "R. J. Williams. Simple statistical gradient-following algorithms for connectionist reinforcement", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 447, + 324, + 460 + ], + "spans": [ + { + "bbox": [ + 115, + 447, + 324, + 460 + ], + "score": 1.0, + "content": "learning. Machine learning, 8(3-4):229–256, 1992.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 466, + 506, + 483 + ], + "spans": [ + { + "bbox": [ + 104, + 466, + 506, + 483 + ], + "score": 1.0, + "content": "M. Henaff, A. Canziani, and Y. LeCun. Model-predictive policy learning with uncertainty", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 113, + 479, + 447, + 493 + ], + "spans": [ + { + "bbox": [ + 113, + 479, + 447, + 493 + ], + "score": 1.0, + "content": "regularization for driving in dense traffic. arXiv preprint arXiv:1901.02705, 2019.", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 500, + 505, + 513 + ], + "spans": [ + { + "bbox": [ + 105, + 500, + 505, + 513 + ], + "score": 1.0, + "content": "D. P. Kingma and J. Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980,", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 512, + 144, + 526 + ], + "spans": [ + { + "bbox": [ + 114, + 512, + 144, + 526 + ], + "score": 1.0, + "content": "2014.", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 531, + 507, + 548 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 507, + 548 + ], + "score": 1.0, + "content": "V. Mnih, K. Kavukcuoglu, D. Silver, A. A. Rusu, J. Veness, M. G. Bellemare, A. Graves,", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 113, + 541, + 507, + 561 + ], + "spans": [ + { + "bbox": [ + 113, + 541, + 507, + 561 + ], + "score": 1.0, + "content": "M. Riedmiller, A. K. Fidjeland, G. Ostrovski, et al. Human-level control through deep", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 115, + 556, + 335, + 570 + ], + "spans": [ + { + "bbox": [ + 115, + 556, + 335, + 570 + ], + "score": 1.0, + "content": "reinforcement learning. Nature, 518(7540):529, 2015.", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 104, + 576, + 506, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 576, + 506, + 592 + ], + "score": 1.0, + "content": "T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra. Continuous", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 114, + 588, + 449, + 604 + ], + "spans": [ + { + "bbox": [ + 114, + 588, + 449, + 604 + ], + "score": 1.0, + "content": "control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015.", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 104, + 609, + 506, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 506, + 625 + ], + "score": 1.0, + "content": "T. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy deep", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 113, + 621, + 471, + 637 + ], + "spans": [ + { + "bbox": [ + 113, + 621, + 471, + 637 + ], + "score": 1.0, + "content": "reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018a.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 104, + 642, + 507, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 642, + 507, + 658 + ], + "score": 1.0, + "content": "T. Haarnoja, A. Zhou, K. Hartikainen, G. Tucker, S. Ha, J. Tan, V. Kumar, H. Zhu, A. Gupta,", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 655, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 114, + 655, + 506, + 668 + ], + "score": 1.0, + "content": "P. Abbeel, et al. Soft actor-critic algorithms and applications. arXiv preprint arXiv:1812.05905,", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 664, + 148, + 681 + ], + "spans": [ + { + "bbox": [ + 115, + 664, + 148, + 681 + ], + "score": 1.0, + "content": "2018b.", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 687, + 507, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 507, + 700 + ], + "score": 1.0, + "content": "M. Hessel, J. Modayil, H. Van Hasselt, T. Schaul, G. Ostrovski, W. Dabney, D. Horgan, B. Piot,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 698, + 507, + 714 + ], + "spans": [ + { + "bbox": [ + 114, + 698, + 507, + 714 + ], + "score": 1.0, + "content": "M. Azar, and D. Silver. Rainbow: Combining improvements in deep reinforcement learning. In", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 115, + 711, + 374, + 724 + ], + "spans": [ + { + "bbox": [ + 115, + 711, + 374, + 724 + ], + "score": 1.0, + "content": "Thirty-Second AAAI Conference on Artificial Intelligence, 2018.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 20 + } + ], + "page_idx": 8, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "score": 1.0, + "content": "9", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "list", + "bbox": [ + 105, + 72, + 507, + 727 + ], + "lines": [ + { + "bbox": [ + 106, + 70, + 165, + 87 + ], + "spans": [ + { + "bbox": [ + 106, + 70, + 165, + 87 + ], + "score": 1.0, + "content": "References", + "type": "text" + } + ], + "index": 0, + "is_list_start_line": true, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 95, + 506, + 111 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 506, + 111 + ], + "score": 1.0, + "content": "D. Hafner, T. Lillicrap, J. Ba, and M. Norouzi. Dream to control: Learning behaviors by latent", + "type": "text" + } + ], + "index": 1, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 108, + 332, + 121 + ], + "spans": [ + { + "bbox": [ + 115, + 108, + 332, + 121 + ], + "score": 1.0, + "content": "imagination. arXiv preprint arXiv:1912.01603, 2019.", + "type": "text" + } + ], + "index": 2, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 128, + 505, + 142 + ], + "spans": [ + { + "bbox": [ + 105, + 128, + 505, + 142 + ], + "score": 1.0, + "content": "D. Hafner, T. Lillicrap, M. Norouzi, and J. Ba. Mastering atari with discrete world models. arXiv", + "type": "text" + } + ], + "index": 3, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 140, + 255, + 154 + ], + "spans": [ + { + "bbox": [ + 113, + 140, + 255, + 154 + ], + "score": 1.0, + "content": "preprint arXiv:2010.02193, 2020.", + "type": "text" + } + ], + "index": 4, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 159, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 506, + 176 + ], + "score": 1.0, + "content": "Y. Gal, R. McAllister, and C. E. Rasmussen. Improving pilco with bayesian neural network dynamics", + "type": "text" + } + ], + "index": 5, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 172, + 396, + 188 + ], + "spans": [ + { + "bbox": [ + 114, + 172, + 396, + 188 + ], + "score": 1.0, + "content": "models. In Data-Efficient Machine Learning workshop, ICML, 2016.", + "type": "text" + } + ], + "index": 6, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 193, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 505, + 208 + ], + "score": 1.0, + "content": "F. Ebert, C. Finn, S. Dasari, A. Xie, A. Lee, and S. Levine. Visual foresight: Model-based deep", + "type": "text" + } + ], + "index": 7, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 206, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 114, + 206, + 505, + 220 + ], + "score": 1.0, + "content": "reinforcement learning for vision-based robotic control. arXiv preprint arXiv:1812.00568, 2018.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 227, + 506, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 227, + 506, + 240 + ], + "score": 1.0, + "content": "R. Sekar, O. Rybkin, K. Daniilidis, P. Abbeel, D. Hafner, and D. Pathak. Planning to explore via self-", + "type": "text" + } + ], + "index": 9, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 238, + 506, + 252 + ], + "spans": [ + { + "bbox": [ + 115, + 238, + 506, + 252 + ], + "score": 1.0, + "content": "supervised world models. In International Conference on Machine Learning, pages 8583–8592.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 250, + 174, + 263 + ], + "spans": [ + { + "bbox": [ + 115, + 250, + 174, + 263 + ], + "score": 1.0, + "content": "PMLR, 2020.", + "type": "text" + } + ], + "index": 11, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 271, + 505, + 284 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 505, + 284 + ], + "score": 1.0, + "content": "T. Yu, A. Kumar, R. Rafailov, A. Rajeswaran, S. Levine, and C. Finn. Combo: Conservative", + "type": "text" + } + ], + "index": 12, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 281, + 507, + 298 + ], + "spans": [ + { + "bbox": [ + 114, + 281, + 507, + 298 + ], + "score": 1.0, + "content": "offline model-based policy optimization. Advances in neural information processing systems, 34:", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 294, + 201, + 307 + ], + "spans": [ + { + "bbox": [ + 115, + 294, + 201, + 307 + ], + "score": 1.0, + "content": "28954–28967, 2021.", + "type": "text" + } + ], + "index": 14, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 314, + 507, + 330 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 507, + 330 + ], + "score": 1.0, + "content": "D. Hafner, T. Lillicrap, I. Fischer, R. Villegas, D. Ha, H. Lee, and J. Davidson. Learning latent", + "type": "text" + } + ], + "index": 15, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 327, + 424, + 342 + ], + "spans": [ + { + "bbox": [ + 114, + 327, + 424, + 342 + ], + "score": 1.0, + "content": "dynamics for planning from pixels. arXiv preprint arXiv:1811.04551, 2018.", + "type": "text" + } + ], + "index": 16, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 346, + 507, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 507, + 363 + ], + "score": 1.0, + "content": "D. P. Kingma and M. Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114,", + "type": "text" + } + ], + "index": 17, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 358, + 143, + 375 + ], + "spans": [ + { + "bbox": [ + 115, + 358, + 143, + 375 + ], + "score": 1.0, + "content": "2013.", + "type": "text" + } + ], + "index": 18, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 381, + 505, + 395 + ], + "spans": [ + { + "bbox": [ + 105, + 381, + 505, + 395 + ], + "score": 1.0, + "content": "D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference", + "type": "text" + } + ], + "index": 19, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 393, + 385, + 407 + ], + "spans": [ + { + "bbox": [ + 114, + 393, + 385, + 407 + ], + "score": 1.0, + "content": "in deep generative models. arXiv preprint arXiv:1401.4082, 2014.", + "type": "text" + } + ], + "index": 20, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 414, + 466, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 466, + 427 + ], + "score": 1.0, + "content": "R. S. Sutton and A. G. Barto. Reinforcement learning: An introduction. MIT press, 2018.", + "type": "text" + } + ], + "index": 21, + "is_list_start_line": true, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 435, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 506, + 449 + ], + "score": 1.0, + "content": "R. J. Williams. Simple statistical gradient-following algorithms for connectionist reinforcement", + "type": "text" + } + ], + "index": 22, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 447, + 324, + 460 + ], + "spans": [ + { + "bbox": [ + 115, + 447, + 324, + 460 + ], + "score": 1.0, + "content": "learning. Machine learning, 8(3-4):229–256, 1992.", + "type": "text" + } + ], + "index": 23, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 466, + 506, + 483 + ], + "spans": [ + { + "bbox": [ + 104, + 466, + 506, + 483 + ], + "score": 1.0, + "content": "M. Henaff, A. Canziani, and Y. LeCun. Model-predictive policy learning with uncertainty", + "type": "text" + } + ], + "index": 24, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 479, + 447, + 493 + ], + "spans": [ + { + "bbox": [ + 113, + 479, + 447, + 493 + ], + "score": 1.0, + "content": "regularization for driving in dense traffic. arXiv preprint arXiv:1901.02705, 2019.", + "type": "text" + } + ], + "index": 25, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 500, + 505, + 513 + ], + "spans": [ + { + "bbox": [ + 105, + 500, + 505, + 513 + ], + "score": 1.0, + "content": "D. P. Kingma and J. Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980,", + "type": "text" + } + ], + "index": 26, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 512, + 144, + 526 + ], + "spans": [ + { + "bbox": [ + 114, + 512, + 144, + 526 + ], + "score": 1.0, + "content": "2014.", + "type": "text" + } + ], + "index": 27, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 531, + 507, + 548 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 507, + 548 + ], + "score": 1.0, + "content": "V. Mnih, K. Kavukcuoglu, D. Silver, A. A. Rusu, J. Veness, M. G. Bellemare, A. Graves,", + "type": "text" + } + ], + "index": 28, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 541, + 507, + 561 + ], + "spans": [ + { + "bbox": [ + 113, + 541, + 507, + 561 + ], + "score": 1.0, + "content": "M. Riedmiller, A. K. Fidjeland, G. Ostrovski, et al. Human-level control through deep", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 115, + 556, + 335, + 570 + ], + "spans": [ + { + "bbox": [ + 115, + 556, + 335, + 570 + ], + "score": 1.0, + "content": "reinforcement learning. Nature, 518(7540):529, 2015.", + "type": "text" + } + ], + "index": 30, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 576, + 506, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 576, + 506, + 592 + ], + "score": 1.0, + "content": "T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra. Continuous", + "type": "text" + } + ], + "index": 31, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 588, + 449, + 604 + ], + "spans": [ + { + "bbox": [ + 114, + 588, + 449, + 604 + ], + "score": 1.0, + "content": "control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015.", + "type": "text" + } + ], + "index": 32, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 609, + 506, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 506, + 625 + ], + "score": 1.0, + "content": "T. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy deep", + "type": "text" + } + ], + "index": 33, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 621, + 471, + 637 + ], + "spans": [ + { + "bbox": [ + 113, + 621, + 471, + 637 + ], + "score": 1.0, + "content": "reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018a.", + "type": "text" + } + ], + "index": 34, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 642, + 507, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 642, + 507, + 658 + ], + "score": 1.0, + "content": "T. Haarnoja, A. Zhou, K. Hartikainen, G. Tucker, S. Ha, J. Tan, V. Kumar, H. Zhu, A. Gupta,", + "type": "text" + } + ], + "index": 35, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 655, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 114, + 655, + 506, + 668 + ], + "score": 1.0, + "content": "P. Abbeel, et al. Soft actor-critic algorithms and applications. arXiv preprint arXiv:1812.05905,", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 664, + 148, + 681 + ], + "spans": [ + { + "bbox": [ + 115, + 664, + 148, + 681 + ], + "score": 1.0, + "content": "2018b.", + "type": "text" + } + ], + "index": 37, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 687, + 507, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 507, + 700 + ], + "score": 1.0, + "content": "M. Hessel, J. Modayil, H. Van Hasselt, T. Schaul, G. Ostrovski, W. Dabney, D. Horgan, B. Piot,", + "type": "text" + } + ], + "index": 38, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 698, + 507, + 714 + ], + "spans": [ + { + "bbox": [ + 114, + 698, + 507, + 714 + ], + "score": 1.0, + "content": "M. Azar, and D. Silver. Rainbow: Combining improvements in deep reinforcement learning. In", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 115, + 711, + 374, + 724 + ], + "spans": [ + { + "bbox": [ + 115, + 711, + 374, + 724 + ], + "score": 1.0, + "content": "Thirty-Second AAAI Conference on Artificial Intelligence, 2018.", + "type": "text" + } + ], + "index": 40, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 73, + 506, + 86 + ], + "spans": [ + { + "bbox": [ + 105, + 73, + 506, + 86 + ], + "score": 1.0, + "content": "J. Schrittwieser, I. Antonoglou, T. Hubert, K. Simonyan, L. Sifre, S. Schmitt, A. Guez, E. Lockhart,", + "type": "text", + "cross_page": true + } + ], + "index": 0, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 83, + 506, + 97 + ], + "spans": [ + { + "bbox": [ + 115, + 83, + 506, + 97 + ], + "score": 1.0, + "content": "D. Hassabis, T. Graepel, et al. Mastering atari, go, chess and shogi by planning with a learned", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 114, + 96, + 310, + 109 + ], + "spans": [ + { + "bbox": [ + 114, + 96, + 310, + 109 + ], + "score": 1.0, + "content": "model. arXiv preprint arXiv:1911.08265, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 2, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 115, + 506, + 130 + ], + "spans": [ + { + "bbox": [ + 105, + 115, + 506, + 130 + ], + "score": 1.0, + "content": "J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization", + "type": "text", + "cross_page": true + } + ], + "index": 3, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 127, + 328, + 142 + ], + "spans": [ + { + "bbox": [ + 115, + 127, + 328, + 142 + ], + "score": 1.0, + "content": "algorithms. arXiv preprint arXiv:1707.06347, 2017.", + "type": "text", + "cross_page": true + } + ], + "index": 4, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 147, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 506, + 161 + ], + "score": 1.0, + "content": "D. Yarats, R. Fergus, A. Lazaric, and L. Pinto. Mastering visual continuous control: Improved", + "type": "text", + "cross_page": true + } + ], + "index": 5, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 159, + 441, + 173 + ], + "spans": [ + { + "bbox": [ + 115, + 159, + 441, + 173 + ], + "score": 1.0, + "content": "data-augmented reinforcement learning. arXiv preprint arXiv:2107.09645, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 6, + "is_list_end_line": true + }, + { + "bbox": [ + 103, + 176, + 506, + 194 + ], + "spans": [ + { + "bbox": [ + 103, + 176, + 506, + 194 + ], + "score": 1.0, + "content": "A. A. Rusu, M. Vecerik, T. RothΓΆrl, N. Heess, R. Pascanu, and R. Hadsell. Sim-to-real robot learning", + "type": "text", + "cross_page": true + } + ], + "index": 7, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 190, + 279, + 205 + ], + "spans": [ + { + "bbox": [ + 115, + 190, + 279, + 205 + ], + "score": 1.0, + "content": "from pixels with progressive nets, 2016.", + "type": "text", + "cross_page": true + } + ], + "index": 8, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 210, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 105, + 210, + 506, + 224 + ], + "score": 1.0, + "content": "X. B. Peng, M. Andrychowicz, W. Zaremba, and P. Abbeel. Sim-to-real transfer of robotic control", + "type": "text", + "cross_page": true + } + ], + "index": 9, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 222, + 505, + 234 + ], + "spans": [ + { + "bbox": [ + 115, + 222, + 505, + 234 + ], + "score": 1.0, + "content": "with dynamics randomization. In 2018 IEEE International Conference on Robotics and Automation", + "type": "text", + "cross_page": true + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 234, + 381, + 248 + ], + "spans": [ + { + "bbox": [ + 114, + 234, + 381, + 248 + ], + "score": 1.0, + "content": "(ICRA), pages 1–8, May 2018. doi:10.1109/ICRA.2018.8460528.", + "type": "text", + "cross_page": true + } + ], + "index": 11, + "is_list_end_line": true + }, + { + "bbox": [ + 103, + 251, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 103, + 251, + 506, + 268 + ], + "score": 1.0, + "content": "N. Rudin, D. Hoeller, P. Reist, and M. Hutter. Learning to walk in minutes using massively parallel", + "type": "text", + "cross_page": true + } + ], + "index": 12, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 265, + 260, + 279 + ], + "spans": [ + { + "bbox": [ + 115, + 265, + 260, + 279 + ], + "score": 1.0, + "content": "deep reinforcement learning, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 13, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 285, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 506, + 299 + ], + "score": 1.0, + "content": "J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomotion over", + "type": "text", + "cross_page": true + } + ], + "index": 14, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 295, + 507, + 311 + ], + "spans": [ + { + "bbox": [ + 114, + 295, + 507, + 311 + ], + "score": 1.0, + "content": "challenging terrain. Science Robotics, 5(47), oct 2020. doi:10.1126/scirobotics.abc5986. URL", + "type": "text", + "cross_page": true + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 308, + 357, + 322 + ], + "spans": [ + { + "bbox": [ + 115, + 308, + 357, + 322 + ], + "score": 1.0, + "content": "https://doi.org/10.1126%2Fscirobotics.abc5986.", + "type": "text", + "cross_page": true + } + ], + "index": 16, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 327, + 506, + 343 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 506, + 343 + ], + "score": 1.0, + "content": "Y. Yang, K. Caluwaerts, A. Iscen, T. Zhang, J. Tan, and V. Sindhwani. Data efficient reinforcement", + "type": "text", + "cross_page": true + } + ], + "index": 17, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 340, + 251, + 353 + ], + "spans": [ + { + "bbox": [ + 115, + 340, + 251, + 353 + ], + "score": 1.0, + "content": "learning for legged robots, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 18, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 358, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 506, + 374 + ], + "score": 1.0, + "content": "S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta. R3m: A universal visual representation for", + "type": "text", + "cross_page": true + } + ], + "index": 19, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 373, + 222, + 385 + ], + "spans": [ + { + "bbox": [ + 115, + 373, + 222, + 385 + ], + "score": 1.0, + "content": "robot manipulation, 2022.", + "type": "text", + "cross_page": true + } + ], + "index": 20, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 392, + 506, + 405 + ], + "spans": [ + { + "bbox": [ + 106, + 392, + 506, + 405 + ], + "score": 1.0, + "content": "OpenAI, M. Andrychowicz, B. Baker, M. Chociej, R. Jozefowicz, B. McGrew, J. Pachocki, A. Petron,", + "type": "text", + "cross_page": true + } + ], + "index": 21, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 403, + 505, + 417 + ], + "spans": [ + { + "bbox": [ + 115, + 403, + 505, + 417 + ], + "score": 1.0, + "content": "M. Plappert, G. Powell, A. Ray, J. Schneider, S. Sidor, J. Tobin, P. Welinder, L. Weng, and", + "type": "text", + "cross_page": true + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 415, + 366, + 429 + ], + "spans": [ + { + "bbox": [ + 115, + 415, + 366, + 429 + ], + "score": 1.0, + "content": "W. Zaremba. Learning dexterous in-hand manipulation, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 23, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 434, + 506, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 506, + 448 + ], + "score": 1.0, + "content": "A. Irpan, C. Harris, J. Ibarz, K. Rao, M. Khansari, and S. Levine. Rl-cyclegan: Improving deep-rl", + "type": "text", + "cross_page": true + } + ], + "index": 24, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 446, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 114, + 446, + 506, + 461 + ], + "score": 1.0, + "content": "robotics with simulation-to-real. In Proceedings of the IEEE Conference on Computer Vision and", + "type": "text", + "cross_page": true + } + ], + "index": 25 + }, + { + "bbox": [ + 115, + 459, + 281, + 471 + ], + "spans": [ + { + "bbox": [ + 115, + 459, + 281, + 471 + ], + "score": 1.0, + "content": "Pattern Recognition (CVPR 2020), 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 26, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 478, + 496, + 490 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 496, + 490 + ], + "score": 1.0, + "content": "A. Kumar, Z. Fu, D. Pathak, and J. Malik. Rma: Rapid motor adaptation for legged robots, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 27, + "is_list_start_line": true + }, + { + "bbox": [ + 105, + 497, + 506, + 512 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 506, + 512 + ], + "score": 1.0, + "content": "J. Siekmann, K. Green, J. Warila, A. Fern, and J. Hurst. Blind bipedal stair traversal via sim-to-real", + "type": "text", + "cross_page": true + } + ], + "index": 28, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 511, + 239, + 523 + ], + "spans": [ + { + "bbox": [ + 115, + 511, + 239, + 523 + ], + "score": 1.0, + "content": "reinforcement learning, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 29, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 531, + 505, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 505, + 542 + ], + "score": 1.0, + "content": "A. Escontrela, X. B. Peng, W. Yu, T. Zhang, A. Iscen, K. Goldberg, and P. Abbeel. Adversarial", + "type": "text", + "cross_page": true + } + ], + "index": 30, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 541, + 411, + 554 + ], + "spans": [ + { + "bbox": [ + 113, + 541, + 411, + 554 + ], + "score": 1.0, + "content": "motion priors make good substitutes for complex reward functions, 2022.", + "type": "text", + "cross_page": true + } + ], + "index": 31, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "score": 1.0, + "content": "D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan,", + "type": "text", + "cross_page": true + } + ], + "index": 32, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 572, + 506, + 587 + ], + "spans": [ + { + "bbox": [ + 115, + 572, + 506, + 587 + ], + "score": 1.0, + "content": "V. Vanhoucke, and S. Levine. Qt-opt: Scalable deep reinforcement learning for vision-based robotic", + "type": "text", + "cross_page": true + } + ], + "index": 33 + }, + { + "bbox": [ + 115, + 585, + 199, + 597 + ], + "spans": [ + { + "bbox": [ + 115, + 585, + 199, + 597 + ], + "score": 1.0, + "content": "manipulation, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 34, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 604, + 507, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 507, + 618 + ], + "score": 1.0, + "content": "S. Dasari, F. Ebert, S. Tian, S. Nair, B. Bucher, K. Schmeckpeper, S. Singh, S. Levine, and C. Finn.", + "type": "text", + "cross_page": true + } + ], + "index": 35, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 617, + 317, + 629 + ], + "spans": [ + { + "bbox": [ + 115, + 617, + 317, + 629 + ], + "score": 1.0, + "content": "Robonet: Large-scale multi-robot learning, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 36, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 636, + 506, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 506, + 648 + ], + "score": 1.0, + "content": "D. Kalashnikov, J. Varley, Y. Chebotar, B. Swanson, R. Jonschkowski, C. Finn, S. Levine, and", + "type": "text", + "cross_page": true + } + ], + "index": 37, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 648, + 481, + 662 + ], + "spans": [ + { + "bbox": [ + 115, + 648, + 481, + 662 + ], + "score": 1.0, + "content": "K. Hausman. Mt-opt: Continuous multi-task robotic reinforcement learning at scale, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 38, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 667, + 507, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 507, + 681 + ], + "score": 1.0, + "content": "F. Ebert, Y. Yang, K. Schmeckpeper, B. Bucher, G. Georgakis, K. Daniilidis, C. Finn, and S. Levine.", + "type": "text", + "cross_page": true + } + ], + "index": 39, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 679, + 470, + 693 + ], + "spans": [ + { + "bbox": [ + 115, + 679, + 470, + 693 + ], + "score": 1.0, + "content": "Bridge data: Boosting generalization of robotic skills with cross-domain datasets, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 40, + "is_list_end_line": true + }, + { + "bbox": [ + 103, + 698, + 507, + 713 + ], + "spans": [ + { + "bbox": [ + 103, + 698, + 507, + 713 + ], + "score": 1.0, + "content": "A. Xie, F. Ebert, S. Levine, and C. Finn. Improvisation through physical understanding: Using novel", + "type": "text", + "cross_page": true + } + ], + "index": 41, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 711, + 430, + 725 + ], + "spans": [ + { + "bbox": [ + 115, + 711, + 430, + 725 + ], + "score": 1.0, + "content": "objects as tools with visual foresight. arXiv preprint arXiv:1904.05538, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 42, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 72, + 506, + 86 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 506, + 86 + ], + "score": 1.0, + "content": "G. Schoettler, A. Nair, J. Luo, S. Bahl, J. A. Ojea, E. Solowjow, and S. Levine. Deep reinforcement", + "type": "text", + "cross_page": true + } + ], + "index": 0, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 84, + 446, + 98 + ], + "spans": [ + { + "bbox": [ + 115, + 84, + 446, + 98 + ], + "score": 1.0, + "content": "learning for industrial insertion tasks with visual inputs and natural rewards, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 1, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 105, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 506, + 118 + ], + "score": 1.0, + "content": "S. James, K. Wada, T. Laidlow, and A. J. Davison. Coarse-to-fine q-attention: Efficient learning for", + "type": "text", + "cross_page": true + } + ], + "index": 2, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 118, + 326, + 129 + ], + "spans": [ + { + "bbox": [ + 115, + 118, + 326, + 129 + ], + "score": 1.0, + "content": "visual robotic manipulation via discretisation, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 3, + "is_list_end_line": true + }, + { + "bbox": [ + 103, + 136, + 507, + 152 + ], + "spans": [ + { + "bbox": [ + 103, + 136, + 507, + 152 + ], + "score": 1.0, + "content": "D. Shah and S. Levine. Viking: Vision-based kilometer-scale navigation with geographic hints, 2022.", + "type": "text", + "cross_page": true + } + ], + "index": 4, + "is_list_start_line": true + }, + { + "bbox": [ + 105, + 158, + 506, + 171 + ], + "spans": [ + { + "bbox": [ + 105, + 158, + 506, + 171 + ], + "score": 1.0, + "content": "S. Bohez, S. Tunyasuvunakool, P. Brakel, F. Sadeghi, L. Hasenclever, Y. Tassa, E. Parisotto,", + "type": "text", + "cross_page": true + } + ], + "index": 5, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 169, + 507, + 183 + ], + "spans": [ + { + "bbox": [ + 114, + 169, + 507, + 183 + ], + "score": 1.0, + "content": "J. Humplik, T. Haarnoja, R. Hafner, M. Wulfmeier, M. Neunert, B. Moran, N. Siegel, A. Huber,", + "type": "text", + "cross_page": true + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 179, + 508, + 196 + ], + "spans": [ + { + "bbox": [ + 113, + 179, + 508, + 196 + ], + "score": 1.0, + "content": "F. Romano, N. Batchelor, F. Casarini, J. Merel, R. Hadsell, and N. Heess. Imitate and repurpose:", + "type": "text", + "cross_page": true + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 193, + 447, + 207 + ], + "spans": [ + { + "bbox": [ + 115, + 193, + 447, + 207 + ], + "score": 1.0, + "content": "Learning reusable robot movement skills from human and animal behaviors, 2022.", + "type": "text", + "cross_page": true + } + ], + "index": 8, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 212, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 212, + 505, + 228 + ], + "score": 1.0, + "content": "A. Sivakumar, K. Shaw, and D. Pathak. Robotic telekinesis: Learning a robotic hand imitator by", + "type": "text", + "cross_page": true + } + ], + "index": 9, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 225, + 264, + 238 + ], + "spans": [ + { + "bbox": [ + 115, + 225, + 264, + 238 + ], + "score": 1.0, + "content": "watching humans on youtube, 2022.", + "type": "text", + "cross_page": true + } + ], + "index": 10, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 245, + 506, + 259 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 506, + 259 + ], + "score": 1.0, + "content": "C. Finn, I. Goodfellow, and S. Levine. Unsupervised learning for physical interaction through video", + "type": "text", + "cross_page": true + } + ], + "index": 11, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 257, + 464, + 271 + ], + "spans": [ + { + "bbox": [ + 114, + 257, + 464, + 271 + ], + "score": 1.0, + "content": "prediction. In Advances in neural information processing systems, pages 64–72, 2016.", + "type": "text", + "cross_page": true + } + ], + "index": 12, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 276, + 506, + 292 + ], + "spans": [ + { + "bbox": [ + 104, + 276, + 506, + 292 + ], + "score": 1.0, + "content": "C. Finn and S. Levine. Deep visual foresight for planning robot motion. In Robotics and Automation", + "type": "text", + "cross_page": true + } + ], + "index": 13, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 289, + 441, + 304 + ], + "spans": [ + { + "bbox": [ + 114, + 289, + 441, + 304 + ], + "score": 1.0, + "content": "(ICRA), 2017 IEEE International Conference on, pages 2786–2793. IEEE, 2017.", + "type": "text", + "cross_page": true + } + ], + "index": 14, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 308, + 506, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 308, + 506, + 325 + ], + "score": 1.0, + "content": "Y. Yang, T. Zhang, E. Coumans, J. Tan, and B. Boots. Fast and efficient locomotion via learned gait", + "type": "text", + "cross_page": true + } + ], + "index": 15, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 322, + 425, + 335 + ], + "spans": [ + { + "bbox": [ + 115, + 322, + 425, + 335 + ], + "score": 1.0, + "content": "transitions. In Conference on Robot Learning, pages 773–783. PMLR, 2022.", + "type": "text", + "cross_page": true + } + ], + "index": 16, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 342, + 506, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 506, + 356 + ], + "score": 1.0, + "content": "S. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human", + "type": "text", + "cross_page": true + } + ], + "index": 17, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 353, + 308, + 369 + ], + "spans": [ + { + "bbox": [ + 115, + 353, + 308, + 369 + ], + "score": 1.0, + "content": "effort. arXiv preprint arXiv:2002.08550, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 18, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 375, + 505, + 387 + ], + "spans": [ + { + "bbox": [ + 106, + 375, + 505, + 387 + ], + "score": 1.0, + "content": "M. Zhang, S. Vikram, L. Smith, P. Abbeel, M. Johnson, and S. Levine. Solar: deep structured", + "type": "text", + "cross_page": true + } + ], + "index": 19, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 387, + 506, + 400 + ], + "spans": [ + { + "bbox": [ + 115, + 387, + 506, + 400 + ], + "score": 1.0, + "content": "representations for model-based reinforcement learning. In International Conference on Machine", + "type": "text", + "cross_page": true + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 398, + 182, + 411 + ], + "spans": [ + { + "bbox": [ + 114, + 398, + 182, + 411 + ], + "score": 1.0, + "content": "Learning, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 21, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 418, + 506, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 418, + 506, + 432 + ], + "score": 1.0, + "content": "A. Nagabandi, K. Konoglie, S. Levine, and V. Kumar. Deep dynamics models for learning dexterous", + "type": "text", + "cross_page": true + } + ], + "index": 22, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 430, + 199, + 444 + ], + "spans": [ + { + "bbox": [ + 115, + 430, + 199, + 444 + ], + "score": 1.0, + "content": "manipulation, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 23, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 450, + 505, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 505, + 464 + ], + "score": 1.0, + "content": "G. I. Parisi, R. Kemker, J. L. Part, C. Kanan, and S. Wermter. Continual lifelong learning with neural", + "type": "text", + "cross_page": true + } + ], + "index": 24, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 461, + 419, + 475 + ], + "spans": [ + { + "bbox": [ + 115, + 461, + 419, + 475 + ], + "score": 1.0, + "content": "networks: A review. Neural Networks, 113:54–71, 2019. ISSN 0893-6080.", + "type": "text", + "cross_page": true + } + ], + "index": 25, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 482, + 506, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 506, + 496 + ], + "score": 1.0, + "content": "T. Miki, J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning robust perceptive", + "type": "text", + "cross_page": true + } + ], + "index": 26, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 493, + 506, + 508 + ], + "spans": [ + { + "bbox": [ + 114, + 493, + 506, + 508 + ], + "score": 1.0, + "content": "locomotion for quadrupedal robots in the wild. Science Robotics, 7(62), jan 2022. doi:10.1126/", + "type": "text", + "cross_page": true + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 506, + 201, + 519 + ], + "spans": [ + { + "bbox": [ + 115, + 506, + 201, + 519 + ], + "score": 1.0, + "content": "scirobotics.abk2822.", + "type": "text", + "cross_page": true + } + ], + "index": 28, + "is_list_end_line": true + }, + { + "bbox": [ + 103, + 524, + 506, + 542 + ], + "spans": [ + { + "bbox": [ + 103, + 524, + 506, + 542 + ], + "score": 1.0, + "content": "L. Smith, J. C. Kew, X. B. Peng, S. Ha, J. Tan, and S. Levine. Legged robots that keep on learning:", + "type": "text", + "cross_page": true + } + ], + "index": 29, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 538, + 342, + 552 + ], + "spans": [ + { + "bbox": [ + 115, + 538, + 342, + 552 + ], + "score": 1.0, + "content": "Fine-tuning locomotion policies in the real world, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 30, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 557, + 506, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 557, + 506, + 573 + ], + "score": 1.0, + "content": "T.-Y. Yang, T. Zhang, L. Luu, S. Ha, J. Tan, and W. Yu. Safe reinforcement learning for legged", + "type": "text", + "cross_page": true + } + ], + "index": 31, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 570, + 387, + 584 + ], + "spans": [ + { + "bbox": [ + 115, + 570, + 387, + 584 + ], + "score": 1.0, + "content": "locomotion, 2022. URL https://arxiv.org/abs/2203.02638.", + "type": "text", + "cross_page": true + } + ], + "index": 32, + "is_list_end_line": true + }, + { + "bbox": [ + 103, + 589, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 103, + 589, + 506, + 606 + ], + "score": 1.0, + "content": "S. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human", + "type": "text", + "cross_page": true + } + ], + "index": 33, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 603, + 363, + 616 + ], + "spans": [ + { + "bbox": [ + 115, + 603, + 363, + 616 + ], + "score": 1.0, + "content": "effort, 2020. URL https://arxiv.org/abs/2002.08550.", + "type": "text", + "cross_page": true + } + ], + "index": 34, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 623, + 506, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 506, + 636 + ], + "score": 1.0, + "content": "L. Smith, I. Kostrikov, and S. Levine. A walk in the park: Learning to walk in 20 minutes with", + "type": "text", + "cross_page": true + } + ], + "index": 35, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 635, + 478, + 649 + ], + "spans": [ + { + "bbox": [ + 115, + 635, + 478, + 649 + ], + "score": 1.0, + "content": "model-free reinforcement learning, 2022. URL https://arxiv.org/abs/2208.07860.", + "type": "text", + "cross_page": true + } + ], + "index": 36, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 655, + 506, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 506, + 669 + ], + "score": 1.0, + "content": "S. Levine, P. Pastor, A. Krizhevsky, J. Ibarz, and D. Quillen. Learning hand-eye coordination for", + "type": "text", + "cross_page": true + } + ], + "index": 37, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 666, + 507, + 682 + ], + "spans": [ + { + "bbox": [ + 114, + 666, + 507, + 682 + ], + "score": 1.0, + "content": "robotic grasping with deep learning and large-scale data collection. The International Journal of", + "type": "text", + "cross_page": true + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 678, + 294, + 692 + ], + "spans": [ + { + "bbox": [ + 114, + 678, + 294, + 692 + ], + "score": 1.0, + "content": "Robotics Research, 37(4-5):421–436, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 39, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 699, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 506, + 713 + ], + "score": 1.0, + "content": "L. Pinto and A. Gupta. Supersizing self-supervision: Learning to grasp from 50k tries and 700 robot", + "type": "text", + "cross_page": true + } + ], + "index": 40, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 711, + 169, + 724 + ], + "spans": [ + { + "bbox": [ + 114, + 711, + 169, + 724 + ], + "score": 1.0, + "content": "hours, 2015.", + "type": "text", + "cross_page": true + } + ], + "index": 41, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 73, + 505, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 73, + 505, + 87 + ], + "score": 1.0, + "content": "H. Ha and S. Song. Flingbot: The unreasonable effectiveness of dynamic manipulation for cloth", + "type": "text", + "cross_page": true + } + ], + "index": 0, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 84, + 315, + 98 + ], + "spans": [ + { + "bbox": [ + 115, + 84, + 315, + 98 + ], + "score": 1.0, + "content": "unfolding. Conference on Robot Learning, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 1, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 103, + 505, + 119 + ], + "spans": [ + { + "bbox": [ + 104, + 103, + 505, + 119 + ], + "score": 1.0, + "content": "S. James and A. J. Davison. Q-attention: Enabling efficient learning for vision-based robotic", + "type": "text", + "cross_page": true + } + ], + "index": 2, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 116, + 200, + 129 + ], + "spans": [ + { + "bbox": [ + 115, + 116, + 200, + 129 + ], + "score": 1.0, + "content": "manipulation, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 3, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 134, + 505, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 134, + 505, + 150 + ], + "score": 1.0, + "content": "E. Tzeng, C. Devin, J. Hoffman, C. Finn, P. Abbeel, S. Levine, K. Saenko, and T. Darrell. Adapting", + "type": "text", + "cross_page": true + } + ], + "index": 4, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 147, + 402, + 162 + ], + "spans": [ + { + "bbox": [ + 115, + 147, + 402, + 162 + ], + "score": 1.0, + "content": "deep visuomotor representations with weak pairwise constraints, 2015.", + "type": "text", + "cross_page": true + } + ], + "index": 5, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 166, + 506, + 181 + ], + "spans": [ + { + "bbox": [ + 104, + 166, + 506, + 181 + ], + "score": 1.0, + "content": "I. Akkaya, M. Andrychowicz, M. Chociej, M. Litwin, B. McGrew, A. Petron, A. Paino, M. Plappert,", + "type": "text", + "cross_page": true + } + ], + "index": 6, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 178, + 507, + 192 + ], + "spans": [ + { + "bbox": [ + 115, + 178, + 507, + 192 + ], + "score": 1.0, + "content": "G. Powell, R. Ribas, et al. Solving rubik’s cube with a robot hand. arXiv preprint arXiv:1910.07113,", + "type": "text", + "cross_page": true + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 190, + 142, + 203 + ], + "spans": [ + { + "bbox": [ + 115, + 190, + 142, + 203 + ], + "score": 1.0, + "content": "2019.", + "type": "text", + "cross_page": true + } + ], + "index": 8, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 208, + 505, + 223 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 505, + 223 + ], + "score": 1.0, + "content": "M. P. Deisenroth, G. Neumann, J. Peters, et al. A survey on policy search for robotics. Foundations", + "type": "text", + "cross_page": true + } + ], + "index": 9, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 221, + 298, + 235 + ], + "spans": [ + { + "bbox": [ + 115, + 221, + 298, + 235 + ], + "score": 1.0, + "content": "and Trends in Robotics, 2(1–2):1–142, 2013.", + "type": "text", + "cross_page": true + } + ], + "index": 10, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 241, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 506, + 255 + ], + "score": 1.0, + "content": "K. Chua, R. Calandra, R. McAllister, and S. Levine. Deep reinforcement learning in a handful of", + "type": "text", + "cross_page": true + } + ], + "index": 11, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 252, + 507, + 268 + ], + "spans": [ + { + "bbox": [ + 114, + 252, + 507, + 268 + ], + "score": 1.0, + "content": "trials using probabilistic dynamics models. In Advances in Neural Information Processing Systems,", + "type": "text", + "cross_page": true + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 264, + 218, + 278 + ], + "spans": [ + { + "bbox": [ + 114, + 264, + 218, + 278 + ], + "score": 1.0, + "content": "pages 4754–4765, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 13, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 282, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 506, + 299 + ], + "score": 1.0, + "content": "A. Nagabandi, G. Yang, T. Asmar, R. Pandya, G. Kahn, S. Levine, and R. S. Fearing. Learning", + "type": "text", + "cross_page": true + } + ], + "index": 14, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 295, + 487, + 310 + ], + "spans": [ + { + "bbox": [ + 114, + 295, + 487, + 310 + ], + "score": 1.0, + "content": "image-conditioned dynamics models for control of under-actuated legged millirobots, 2017.", + "type": "text", + "cross_page": true + } + ], + "index": 15, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 314, + 505, + 329 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 505, + 329 + ], + "score": 1.0, + "content": "P. Becker-Ehmck, M. Karl, J. Peters, and P. van der Smagt. Learning to fly via deep model-based", + "type": "text", + "cross_page": true + } + ], + "index": 16, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 327, + 376, + 340 + ], + "spans": [ + { + "bbox": [ + 114, + 327, + 376, + 340 + ], + "score": 1.0, + "content": "reinforcement learning. arXiv preprint arXiv:2003.08876, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 17, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 345, + 505, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 345, + 505, + 360 + ], + "score": 1.0, + "content": "F. Deng, I. Jang, and S. Ahn. Dreamerpro: Reconstruction-free model-based reinforcement learning", + "type": "text", + "cross_page": true + } + ], + "index": 18, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 358, + 416, + 372 + ], + "spans": [ + { + "bbox": [ + 115, + 358, + 416, + 372 + ], + "score": 1.0, + "content": "with prototypical representations. arXiv preprint arXiv:2110.14565, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 19, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 377, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 377, + 505, + 392 + ], + "score": 1.0, + "content": "M. Okada and T. Taniguchi. Dreaming: Model-based reinforcement learning by latent imagination", + "type": "text", + "cross_page": true + } + ], + "index": 20, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 389, + 506, + 403 + ], + "spans": [ + { + "bbox": [ + 114, + 389, + 506, + 403 + ], + "score": 1.0, + "content": "without reconstruction. In 2021 IEEE International Conference on Robotics and Automation", + "type": "text", + "cross_page": true + } + ], + "index": 21 + }, + { + "bbox": [ + 115, + 402, + 275, + 414 + ], + "spans": [ + { + "bbox": [ + 115, + 402, + 275, + 414 + ], + "score": 1.0, + "content": "(ICRA), pages 4209–4215. IEEE, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 22, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 420, + 505, + 435 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 505, + 435 + ], + "score": 1.0, + "content": "H. Bharadhwaj, M. Babaeizadeh, D. Erhan, and S. Levine. Information prioritization through", + "type": "text", + "cross_page": true + } + ], + "index": 23, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 433, + 439, + 446 + ], + "spans": [ + { + "bbox": [ + 115, + 433, + 439, + 446 + ], + "score": 1.0, + "content": "empowerment in visual model-based rl. arXiv preprint arXiv:2204.08585, 2022.", + "type": "text", + "cross_page": true + } + ], + "index": 24, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 452, + 505, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 452, + 505, + 467 + ], + "score": 1.0, + "content": "K. Paster, L. E. McKinney, S. A. McIlraith, and J. Ba. Blast: Latent dynamics models from", + "type": "text", + "cross_page": true + } + ], + "index": 25, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 464, + 355, + 478 + ], + "spans": [ + { + "bbox": [ + 115, + 464, + 355, + 478 + ], + "score": 1.0, + "content": "bootstrapping. In Deep RL Workshop NeurIPS 2021, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 26, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 482, + 506, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 506, + 499 + ], + "score": 1.0, + "content": "K. Hsu, M. J. Kim, R. Rafailov, J. Wu, and C. Finn. Vision-based manipulators need to also see from", + "type": "text", + "cross_page": true + } + ], + "index": 27, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 495, + 387, + 509 + ], + "spans": [ + { + "bbox": [ + 115, + 495, + 387, + 509 + ], + "score": 1.0, + "content": "their hands, 2022. URL https://arxiv.org/abs/2203.12677.", + "type": "text", + "cross_page": true + } + ], + "index": 28, + "is_list_end_line": true + } + ], + "index": 20, + "bbox_fs": [ + 104, + 70, + 507, + 724 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 103, + 44, + 507, + 729 + ], + "lines": [ + { + "bbox": [ + 105, + 73, + 506, + 86 + ], + "spans": [ + { + "bbox": [ + 105, + 73, + 506, + 86 + ], + "score": 1.0, + "content": "J. Schrittwieser, I. Antonoglou, T. Hubert, K. Simonyan, L. Sifre, S. Schmitt, A. Guez, E. Lockhart,", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 83, + 506, + 97 + ], + "spans": [ + { + "bbox": [ + 115, + 83, + 506, + 97 + ], + "score": 1.0, + "content": "D. Hassabis, T. Graepel, et al. Mastering atari, go, chess and shogi by planning with a learned", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 114, + 96, + 310, + 109 + ], + "spans": [ + { + "bbox": [ + 114, + 96, + 310, + 109 + ], + "score": 1.0, + "content": "model. arXiv preprint arXiv:1911.08265, 2019.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 115, + 506, + 130 + ], + "spans": [ + { + "bbox": [ + 105, + 115, + 506, + 130 + ], + "score": 1.0, + "content": "J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 127, + 328, + 142 + ], + "spans": [ + { + "bbox": [ + 115, + 127, + 328, + 142 + ], + "score": 1.0, + "content": "algorithms. arXiv preprint arXiv:1707.06347, 2017.", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 147, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 506, + 161 + ], + "score": 1.0, + "content": "D. Yarats, R. Fergus, A. Lazaric, and L. Pinto. Mastering visual continuous control: Improved", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 159, + 441, + 173 + ], + "spans": [ + { + "bbox": [ + 115, + 159, + 441, + 173 + ], + "score": 1.0, + "content": "data-augmented reinforcement learning. arXiv preprint arXiv:2107.09645, 2021.", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 103, + 176, + 506, + 194 + ], + "spans": [ + { + "bbox": [ + 103, + 176, + 506, + 194 + ], + "score": 1.0, + "content": "A. A. Rusu, M. Vecerik, T. RothΓΆrl, N. Heess, R. Pascanu, and R. Hadsell. Sim-to-real robot learning", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 190, + 279, + 205 + ], + "spans": [ + { + "bbox": [ + 115, + 190, + 279, + 205 + ], + "score": 1.0, + "content": "from pixels with progressive nets, 2016.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 210, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 105, + 210, + 506, + 224 + ], + "score": 1.0, + "content": "X. B. Peng, M. Andrychowicz, W. Zaremba, and P. Abbeel. Sim-to-real transfer of robotic control", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 222, + 505, + 234 + ], + "spans": [ + { + "bbox": [ + 115, + 222, + 505, + 234 + ], + "score": 1.0, + "content": "with dynamics randomization. In 2018 IEEE International Conference on Robotics and Automation", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 234, + 381, + 248 + ], + "spans": [ + { + "bbox": [ + 114, + 234, + 381, + 248 + ], + "score": 1.0, + "content": "(ICRA), pages 1–8, May 2018. doi:10.1109/ICRA.2018.8460528.", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 103, + 251, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 103, + 251, + 506, + 268 + ], + "score": 1.0, + "content": "N. Rudin, D. Hoeller, P. Reist, and M. Hutter. Learning to walk in minutes using massively parallel", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 265, + 260, + 279 + ], + "spans": [ + { + "bbox": [ + 115, + 265, + 260, + 279 + ], + "score": 1.0, + "content": "deep reinforcement learning, 2021.", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 285, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 506, + 299 + ], + "score": 1.0, + "content": "J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomotion over", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 295, + 507, + 311 + ], + "spans": [ + { + "bbox": [ + 114, + 295, + 507, + 311 + ], + "score": 1.0, + "content": "challenging terrain. Science Robotics, 5(47), oct 2020. doi:10.1126/scirobotics.abc5986. URL", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 308, + 357, + 322 + ], + "spans": [ + { + "bbox": [ + 115, + 308, + 357, + 322 + ], + "score": 1.0, + "content": "https://doi.org/10.1126%2Fscirobotics.abc5986.", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 327, + 506, + 343 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 506, + 343 + ], + "score": 1.0, + "content": "Y. Yang, K. Caluwaerts, A. Iscen, T. Zhang, J. Tan, and V. Sindhwani. Data efficient reinforcement", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 340, + 251, + 353 + ], + "spans": [ + { + "bbox": [ + 115, + 340, + 251, + 353 + ], + "score": 1.0, + "content": "learning for legged robots, 2019.", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 358, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 506, + 374 + ], + "score": 1.0, + "content": "S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta. R3m: A universal visual representation for", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 373, + 222, + 385 + ], + "spans": [ + { + "bbox": [ + 115, + 373, + 222, + 385 + ], + "score": 1.0, + "content": "robot manipulation, 2022.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 392, + 506, + 405 + ], + "spans": [ + { + "bbox": [ + 106, + 392, + 506, + 405 + ], + "score": 1.0, + "content": "OpenAI, M. Andrychowicz, B. Baker, M. Chociej, R. Jozefowicz, B. McGrew, J. Pachocki, A. Petron,", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 115, + 403, + 505, + 417 + ], + "spans": [ + { + "bbox": [ + 115, + 403, + 505, + 417 + ], + "score": 1.0, + "content": "M. Plappert, G. Powell, A. Ray, J. Schneider, S. Sidor, J. Tobin, P. Welinder, L. Weng, and", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 415, + 366, + 429 + ], + "spans": [ + { + "bbox": [ + 115, + 415, + 366, + 429 + ], + "score": 1.0, + "content": "W. Zaremba. Learning dexterous in-hand manipulation, 2018.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 434, + 506, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 506, + 448 + ], + "score": 1.0, + "content": "A. Irpan, C. Harris, J. Ibarz, K. Rao, M. Khansari, and S. Levine. Rl-cyclegan: Improving deep-rl", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 446, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 114, + 446, + 506, + 461 + ], + "score": 1.0, + "content": "robotics with simulation-to-real. In Proceedings of the IEEE Conference on Computer Vision and", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 115, + 459, + 281, + 471 + ], + "spans": [ + { + "bbox": [ + 115, + 459, + 281, + 471 + ], + "score": 1.0, + "content": "Pattern Recognition (CVPR 2020), 2020.", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 478, + 496, + 490 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 496, + 490 + ], + "score": 1.0, + "content": "A. Kumar, Z. Fu, D. Pathak, and J. Malik. Rma: Rapid motor adaptation for legged robots, 2021.", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 497, + 506, + 512 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 506, + 512 + ], + "score": 1.0, + "content": "J. Siekmann, K. Green, J. Warila, A. Fern, and J. Hurst. Blind bipedal stair traversal via sim-to-real", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 511, + 239, + 523 + ], + "spans": [ + { + "bbox": [ + 115, + 511, + 239, + 523 + ], + "score": 1.0, + "content": "reinforcement learning, 2021.", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 531, + 505, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 505, + 542 + ], + "score": 1.0, + "content": "A. Escontrela, X. B. Peng, W. Yu, T. Zhang, A. Iscen, K. Goldberg, and P. Abbeel. Adversarial", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 113, + 541, + 411, + 554 + ], + "spans": [ + { + "bbox": [ + 113, + 541, + 411, + 554 + ], + "score": 1.0, + "content": "motion priors make good substitutes for complex reward functions, 2022.", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "score": 1.0, + "content": "D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan,", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 115, + 572, + 506, + 587 + ], + "spans": [ + { + "bbox": [ + 115, + 572, + 506, + 587 + ], + "score": 1.0, + "content": "V. Vanhoucke, and S. Levine. Qt-opt: Scalable deep reinforcement learning for vision-based robotic", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 115, + 585, + 199, + 597 + ], + "spans": [ + { + "bbox": [ + 115, + 585, + 199, + 597 + ], + "score": 1.0, + "content": "manipulation, 2018.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 604, + 507, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 507, + 618 + ], + "score": 1.0, + "content": "S. Dasari, F. Ebert, S. Tian, S. Nair, B. Bucher, K. Schmeckpeper, S. Singh, S. Levine, and C. Finn.", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 115, + 617, + 317, + 629 + ], + "spans": [ + { + "bbox": [ + 115, + 617, + 317, + 629 + ], + "score": 1.0, + "content": "Robonet: Large-scale multi-robot learning, 2019.", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 636, + 506, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 506, + 648 + ], + "score": 1.0, + "content": "D. Kalashnikov, J. Varley, Y. Chebotar, B. Swanson, R. Jonschkowski, C. Finn, S. Levine, and", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 115, + 648, + 481, + 662 + ], + "spans": [ + { + "bbox": [ + 115, + 648, + 481, + 662 + ], + "score": 1.0, + "content": "K. Hausman. Mt-opt: Continuous multi-task robotic reinforcement learning at scale, 2021.", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 667, + 507, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 507, + 681 + ], + "score": 1.0, + "content": "F. Ebert, Y. Yang, K. Schmeckpeper, B. Bucher, G. Georgakis, K. Daniilidis, C. Finn, and S. Levine.", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 115, + 679, + 470, + 693 + ], + "spans": [ + { + "bbox": [ + 115, + 679, + 470, + 693 + ], + "score": 1.0, + "content": "Bridge data: Boosting generalization of robotic skills with cross-domain datasets, 2021.", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 103, + 698, + 507, + 713 + ], + "spans": [ + { + "bbox": [ + 103, + 698, + 507, + 713 + ], + "score": 1.0, + "content": "A. Xie, F. Ebert, S. Levine, and C. Finn. Improvisation through physical understanding: Using novel", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 711, + 430, + 725 + ], + "spans": [ + { + "bbox": [ + 115, + 711, + 430, + 725 + ], + "score": 1.0, + "content": "objects as tools with visual foresight. arXiv preprint arXiv:1904.05538, 2019.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 21 + } + ], + "page_idx": 9, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 741, + 311, + 751 + ], + "lines": [ + { + "bbox": [ + 299, + 740, + 313, + 754 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 313, + 754 + ], + "score": 1.0, + "content": "10", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "list", + "bbox": [ + 103, + 44, + 507, + 729 + ], + "lines": [], + "index": 21, + "bbox_fs": [ + 103, + 73, + 507, + 725 + ], + "lines_deleted": true + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 104, + 72, + 507, + 726 + ], + "lines": [ + { + "bbox": [ + 105, + 72, + 506, + 86 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 506, + 86 + ], + "score": 1.0, + "content": "G. Schoettler, A. Nair, J. Luo, S. Bahl, J. A. Ojea, E. Solowjow, and S. Levine. Deep reinforcement", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 84, + 446, + 98 + ], + "spans": [ + { + "bbox": [ + 115, + 84, + 446, + 98 + ], + "score": 1.0, + "content": "learning for industrial insertion tasks with visual inputs and natural rewards, 2019.", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 506, + 118 + ], + "score": 1.0, + "content": "S. James, K. Wada, T. Laidlow, and A. J. Davison. Coarse-to-fine q-attention: Efficient learning for", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 118, + 326, + 129 + ], + "spans": [ + { + "bbox": [ + 115, + 118, + 326, + 129 + ], + "score": 1.0, + "content": "visual robotic manipulation via discretisation, 2021.", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 103, + 136, + 507, + 152 + ], + "spans": [ + { + "bbox": [ + 103, + 136, + 507, + 152 + ], + "score": 1.0, + "content": "D. Shah and S. Levine. Viking: Vision-based kilometer-scale navigation with geographic hints, 2022.", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 158, + 506, + 171 + ], + "spans": [ + { + "bbox": [ + 105, + 158, + 506, + 171 + ], + "score": 1.0, + "content": "S. Bohez, S. Tunyasuvunakool, P. Brakel, F. Sadeghi, L. Hasenclever, Y. Tassa, E. Parisotto,", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 114, + 169, + 507, + 183 + ], + "spans": [ + { + "bbox": [ + 114, + 169, + 507, + 183 + ], + "score": 1.0, + "content": "J. Humplik, T. Haarnoja, R. Hafner, M. Wulfmeier, M. Neunert, B. Moran, N. Siegel, A. Huber,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 179, + 508, + 196 + ], + "spans": [ + { + "bbox": [ + 113, + 179, + 508, + 196 + ], + "score": 1.0, + "content": "F. Romano, N. Batchelor, F. Casarini, J. Merel, R. Hadsell, and N. Heess. Imitate and repurpose:", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 193, + 447, + 207 + ], + "spans": [ + { + "bbox": [ + 115, + 193, + 447, + 207 + ], + "score": 1.0, + "content": "Learning reusable robot movement skills from human and animal behaviors, 2022.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 212, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 212, + 505, + 228 + ], + "score": 1.0, + "content": "A. Sivakumar, K. Shaw, and D. Pathak. Robotic telekinesis: Learning a robotic hand imitator by", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 225, + 264, + 238 + ], + "spans": [ + { + "bbox": [ + 115, + 225, + 264, + 238 + ], + "score": 1.0, + "content": "watching humans on youtube, 2022.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 245, + 506, + 259 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 506, + 259 + ], + "score": 1.0, + "content": "C. Finn, I. Goodfellow, and S. Levine. Unsupervised learning for physical interaction through video", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 257, + 464, + 271 + ], + "spans": [ + { + "bbox": [ + 114, + 257, + 464, + 271 + ], + "score": 1.0, + "content": "prediction. In Advances in neural information processing systems, pages 64–72, 2016.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 276, + 506, + 292 + ], + "spans": [ + { + "bbox": [ + 104, + 276, + 506, + 292 + ], + "score": 1.0, + "content": "C. Finn and S. Levine. Deep visual foresight for planning robot motion. In Robotics and Automation", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 289, + 441, + 304 + ], + "spans": [ + { + "bbox": [ + 114, + 289, + 441, + 304 + ], + "score": 1.0, + "content": "(ICRA), 2017 IEEE International Conference on, pages 2786–2793. IEEE, 2017.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 308, + 506, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 308, + 506, + 325 + ], + "score": 1.0, + "content": "Y. Yang, T. Zhang, E. Coumans, J. Tan, and B. Boots. Fast and efficient locomotion via learned gait", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 322, + 425, + 335 + ], + "spans": [ + { + "bbox": [ + 115, + 322, + 425, + 335 + ], + "score": 1.0, + "content": "transitions. In Conference on Robot Learning, pages 773–783. PMLR, 2022.", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 342, + 506, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 506, + 356 + ], + "score": 1.0, + "content": "S. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 353, + 308, + 369 + ], + "spans": [ + { + "bbox": [ + 115, + 353, + 308, + 369 + ], + "score": 1.0, + "content": "effort. arXiv preprint arXiv:2002.08550, 2020.", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 375, + 505, + 387 + ], + "spans": [ + { + "bbox": [ + 106, + 375, + 505, + 387 + ], + "score": 1.0, + "content": "M. Zhang, S. Vikram, L. Smith, P. Abbeel, M. Johnson, and S. Levine. Solar: deep structured", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 387, + 506, + 400 + ], + "spans": [ + { + "bbox": [ + 115, + 387, + 506, + 400 + ], + "score": 1.0, + "content": "representations for model-based reinforcement learning. In International Conference on Machine", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 398, + 182, + 411 + ], + "spans": [ + { + "bbox": [ + 114, + 398, + 182, + 411 + ], + "score": 1.0, + "content": "Learning, 2019.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 418, + 506, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 418, + 506, + 432 + ], + "score": 1.0, + "content": "A. Nagabandi, K. Konoglie, S. Levine, and V. Kumar. Deep dynamics models for learning dexterous", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 430, + 199, + 444 + ], + "spans": [ + { + "bbox": [ + 115, + 430, + 199, + 444 + ], + "score": 1.0, + "content": "manipulation, 2019.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 450, + 505, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 505, + 464 + ], + "score": 1.0, + "content": "G. I. Parisi, R. Kemker, J. L. Part, C. Kanan, and S. Wermter. Continual lifelong learning with neural", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 115, + 461, + 419, + 475 + ], + "spans": [ + { + "bbox": [ + 115, + 461, + 419, + 475 + ], + "score": 1.0, + "content": "networks: A review. Neural Networks, 113:54–71, 2019. ISSN 0893-6080.", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 482, + 506, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 506, + 496 + ], + "score": 1.0, + "content": "T. Miki, J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning robust perceptive", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 493, + 506, + 508 + ], + "spans": [ + { + "bbox": [ + 114, + 493, + 506, + 508 + ], + "score": 1.0, + "content": "locomotion for quadrupedal robots in the wild. Science Robotics, 7(62), jan 2022. doi:10.1126/", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 506, + 201, + 519 + ], + "spans": [ + { + "bbox": [ + 115, + 506, + 201, + 519 + ], + "score": 1.0, + "content": "scirobotics.abk2822.", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 103, + 524, + 506, + 542 + ], + "spans": [ + { + "bbox": [ + 103, + 524, + 506, + 542 + ], + "score": 1.0, + "content": "L. Smith, J. C. Kew, X. B. Peng, S. Ha, J. Tan, and S. Levine. Legged robots that keep on learning:", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 115, + 538, + 342, + 552 + ], + "spans": [ + { + "bbox": [ + 115, + 538, + 342, + 552 + ], + "score": 1.0, + "content": "Fine-tuning locomotion policies in the real world, 2021.", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 557, + 506, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 557, + 506, + 573 + ], + "score": 1.0, + "content": "T.-Y. Yang, T. Zhang, L. Luu, S. Ha, J. Tan, and W. Yu. Safe reinforcement learning for legged", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 570, + 387, + 584 + ], + "spans": [ + { + "bbox": [ + 115, + 570, + 387, + 584 + ], + "score": 1.0, + "content": "locomotion, 2022. URL https://arxiv.org/abs/2203.02638.", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 103, + 589, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 103, + 589, + 506, + 606 + ], + "score": 1.0, + "content": "S. Ha, P. Xu, Z. Tan, S. Levine, and J. Tan. Learning to walk in the real world with minimal human", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 115, + 603, + 363, + 616 + ], + "spans": [ + { + "bbox": [ + 115, + 603, + 363, + 616 + ], + "score": 1.0, + "content": "effort, 2020. URL https://arxiv.org/abs/2002.08550.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 623, + 506, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 506, + 636 + ], + "score": 1.0, + "content": "L. Smith, I. Kostrikov, and S. Levine. A walk in the park: Learning to walk in 20 minutes with", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 115, + 635, + 478, + 649 + ], + "spans": [ + { + "bbox": [ + 115, + 635, + 478, + 649 + ], + "score": 1.0, + "content": "model-free reinforcement learning, 2022. URL https://arxiv.org/abs/2208.07860.", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 655, + 506, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 506, + 669 + ], + "score": 1.0, + "content": "S. Levine, P. Pastor, A. Krizhevsky, J. Ibarz, and D. Quillen. Learning hand-eye coordination for", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 666, + 507, + 682 + ], + "spans": [ + { + "bbox": [ + 114, + 666, + 507, + 682 + ], + "score": 1.0, + "content": "robotic grasping with deep learning and large-scale data collection. The International Journal of", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 678, + 294, + 692 + ], + "spans": [ + { + "bbox": [ + 114, + 678, + 294, + 692 + ], + "score": 1.0, + "content": "Robotics Research, 37(4-5):421–436, 2018.", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 699, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 506, + 713 + ], + "score": 1.0, + "content": "L. Pinto and A. Gupta. Supersizing self-supervision: Learning to grasp from 50k tries and 700 robot", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 711, + 169, + 724 + ], + "spans": [ + { + "bbox": [ + 114, + 711, + 169, + 724 + ], + "score": 1.0, + "content": "hours, 2015.", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 20.5 + } + ], + "page_idx": 10, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 741, + 311, + 751 + ], + "lines": [ + { + "bbox": [ + 299, + 740, + 312, + 754 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 312, + 754 + ], + "score": 1.0, + "content": "11", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "list", + "bbox": [ + 104, + 72, + 507, + 726 + ], + "lines": [], + "index": 20.5, + "bbox_fs": [ + 103, + 72, + 508, + 724 + ], + "lines_deleted": true + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 103, + 68, + 507, + 513 + ], + "lines": [ + { + "bbox": [ + 105, + 73, + 505, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 73, + 505, + 87 + ], + "score": 1.0, + "content": "H. Ha and S. Song. Flingbot: The unreasonable effectiveness of dynamic manipulation for cloth", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 84, + 315, + 98 + ], + "spans": [ + { + "bbox": [ + 115, + 84, + 315, + 98 + ], + "score": 1.0, + "content": "unfolding. Conference on Robot Learning, 2021.", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 103, + 505, + 119 + ], + "spans": [ + { + "bbox": [ + 104, + 103, + 505, + 119 + ], + "score": 1.0, + "content": "S. James and A. J. Davison. Q-attention: Enabling efficient learning for vision-based robotic", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 116, + 200, + 129 + ], + "spans": [ + { + "bbox": [ + 115, + 116, + 200, + 129 + ], + "score": 1.0, + "content": "manipulation, 2021.", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 134, + 505, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 134, + 505, + 150 + ], + "score": 1.0, + "content": "E. Tzeng, C. Devin, J. Hoffman, C. Finn, P. Abbeel, S. Levine, K. Saenko, and T. Darrell. Adapting", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 147, + 402, + 162 + ], + "spans": [ + { + "bbox": [ + 115, + 147, + 402, + 162 + ], + "score": 1.0, + "content": "deep visuomotor representations with weak pairwise constraints, 2015.", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 166, + 506, + 181 + ], + "spans": [ + { + "bbox": [ + 104, + 166, + 506, + 181 + ], + "score": 1.0, + "content": "I. Akkaya, M. Andrychowicz, M. Chociej, M. Litwin, B. McGrew, A. Petron, A. Paino, M. Plappert,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 178, + 507, + 192 + ], + "spans": [ + { + "bbox": [ + 115, + 178, + 507, + 192 + ], + "score": 1.0, + "content": "G. Powell, R. Ribas, et al. Solving rubik’s cube with a robot hand. arXiv preprint arXiv:1910.07113,", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 190, + 142, + 203 + ], + "spans": [ + { + "bbox": [ + 115, + 190, + 142, + 203 + ], + "score": 1.0, + "content": "2019.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 208, + 505, + 223 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 505, + 223 + ], + "score": 1.0, + "content": "M. P. Deisenroth, G. Neumann, J. Peters, et al. A survey on policy search for robotics. Foundations", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 221, + 298, + 235 + ], + "spans": [ + { + "bbox": [ + 115, + 221, + 298, + 235 + ], + "score": 1.0, + "content": "and Trends in Robotics, 2(1–2):1–142, 2013.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 241, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 506, + 255 + ], + "score": 1.0, + "content": "K. Chua, R. Calandra, R. McAllister, and S. Levine. Deep reinforcement learning in a handful of", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 252, + 507, + 268 + ], + "spans": [ + { + "bbox": [ + 114, + 252, + 507, + 268 + ], + "score": 1.0, + "content": "trials using probabilistic dynamics models. In Advances in Neural Information Processing Systems,", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 264, + 218, + 278 + ], + "spans": [ + { + "bbox": [ + 114, + 264, + 218, + 278 + ], + "score": 1.0, + "content": "pages 4754–4765, 2018.", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 282, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 506, + 299 + ], + "score": 1.0, + "content": "A. Nagabandi, G. Yang, T. Asmar, R. Pandya, G. Kahn, S. Levine, and R. S. Fearing. Learning", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 295, + 487, + 310 + ], + "spans": [ + { + "bbox": [ + 114, + 295, + 487, + 310 + ], + "score": 1.0, + "content": "image-conditioned dynamics models for control of under-actuated legged millirobots, 2017.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 314, + 505, + 329 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 505, + 329 + ], + "score": 1.0, + "content": "P. Becker-Ehmck, M. Karl, J. Peters, and P. van der Smagt. Learning to fly via deep model-based", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 327, + 376, + 340 + ], + "spans": [ + { + "bbox": [ + 114, + 327, + 376, + 340 + ], + "score": 1.0, + "content": "reinforcement learning. arXiv preprint arXiv:2003.08876, 2020.", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 345, + 505, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 345, + 505, + 360 + ], + "score": 1.0, + "content": "F. Deng, I. Jang, and S. Ahn. Dreamerpro: Reconstruction-free model-based reinforcement learning", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 115, + 358, + 416, + 372 + ], + "spans": [ + { + "bbox": [ + 115, + 358, + 416, + 372 + ], + "score": 1.0, + "content": "with prototypical representations. arXiv preprint arXiv:2110.14565, 2021.", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 377, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 377, + 505, + 392 + ], + "score": 1.0, + "content": "M. Okada and T. Taniguchi. Dreaming: Model-based reinforcement learning by latent imagination", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 114, + 389, + 506, + 403 + ], + "spans": [ + { + "bbox": [ + 114, + 389, + 506, + 403 + ], + "score": 1.0, + "content": "without reconstruction. In 2021 IEEE International Conference on Robotics and Automation", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 115, + 402, + 275, + 414 + ], + "spans": [ + { + "bbox": [ + 115, + 402, + 275, + 414 + ], + "score": 1.0, + "content": "(ICRA), pages 4209–4215. IEEE, 2021.", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 420, + 505, + 435 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 505, + 435 + ], + "score": 1.0, + "content": "H. Bharadhwaj, M. Babaeizadeh, D. Erhan, and S. Levine. Information prioritization through", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 115, + 433, + 439, + 446 + ], + "spans": [ + { + "bbox": [ + 115, + 433, + 439, + 446 + ], + "score": 1.0, + "content": "empowerment in visual model-based rl. arXiv preprint arXiv:2204.08585, 2022.", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 452, + 505, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 452, + 505, + 467 + ], + "score": 1.0, + "content": "K. Paster, L. E. McKinney, S. A. McIlraith, and J. Ba. Blast: Latent dynamics models from", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 115, + 464, + 355, + 478 + ], + "spans": [ + { + "bbox": [ + 115, + 464, + 355, + 478 + ], + "score": 1.0, + "content": "bootstrapping. In Deep RL Workshop NeurIPS 2021, 2021.", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 482, + 506, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 506, + 499 + ], + "score": 1.0, + "content": "K. Hsu, M. J. Kim, R. Rafailov, J. Wu, and C. Finn. Vision-based manipulators need to also see from", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 495, + 387, + 509 + ], + "spans": [ + { + "bbox": [ + 115, + 495, + 387, + 509 + ], + "score": 1.0, + "content": "their hands, 2022. URL https://arxiv.org/abs/2203.12677.", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 14 + } + ], + "page_idx": 11, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 741, + 311, + 750 + ], + "lines": [ + { + "bbox": [ + 299, + 740, + 312, + 754 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 312, + 754 + ], + "score": 1.0, + "content": "12", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "list", + "bbox": [ + 103, + 68, + 507, + 513 + ], + "lines": [], + "index": 14, + "bbox_fs": [ + 104, + 73, + 507, + 509 + ], + "lines_deleted": true + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 72, + 186, + 85 + ], + "lines": [ + { + "bbox": [ + 105, + 70, + 188, + 88 + ], + "spans": [ + { + "bbox": [ + 105, + 70, + 188, + 88 + ], + "score": 1.0, + "content": "A Adaptation", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 93, + 505, + 152 + ], + "lines": [ + { + "bbox": [ + 105, + 93, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 505, + 106 + ], + "score": 1.0, + "content": "Real world robot learning faces practical challenges such as changing environmental conditions", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 505, + 118 + ], + "score": 1.0, + "content": "and time varying dynamics. We found that Dreamer is able to adapt to the current environmental", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 117, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 117, + 505, + 129 + ], + "score": 1.0, + "content": "conditions with no change to the learning algorithm. This shows promise for using Dreamer in", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 128, + 505, + 142 + ], + "spans": [ + { + "bbox": [ + 105, + 128, + 505, + 142 + ], + "score": 1.0, + "content": "continual learning settings (Parisi et al., 2019). Adaptation of the quadruped to external perturbations", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 141, + 264, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 141, + 264, + 153 + ], + "score": 1.0, + "content": "is reported in Section 3.1 and Figure 8.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 3 + }, + { + "type": "text", + "bbox": [ + 106, + 156, + 506, + 250 + ], + "lines": [ + { + "bbox": [ + 105, + 156, + 505, + 169 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 505, + 169 + ], + "score": 1.0, + "content": "The XArm, situated near large windows, is able to adapt and maintain performance under the presence", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 169, + 505, + 180 + ], + "spans": [ + { + "bbox": [ + 106, + 169, + 505, + 180 + ], + "score": 1.0, + "content": "of changing lighting conditions. The XArm experiments were conducted after sundown to keep the", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 180, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 506, + 193 + ], + "score": 1.0, + "content": "lighting conditions constant throughout training. Figure A.1 shows the learning curve of the XArm.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 192, + 505, + 204 + ], + "spans": [ + { + "bbox": [ + 106, + 192, + 505, + 204 + ], + "score": 1.0, + "content": "As expected, the performance of the XArm drops during sunrise. However, the XArm is able to", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 203, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 506, + 217 + ], + "score": 1.0, + "content": "adapt to the change in lighting conditions in about 5 hours time and recover the original performance,", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 214, + 505, + 227 + ], + "spans": [ + { + "bbox": [ + 105, + 214, + 505, + 227 + ], + "score": 1.0, + "content": "which is faster than it would be to train from scratch. A careful inspection of the image observations", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 226, + 505, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 226, + 505, + 239 + ], + "score": 1.0, + "content": "at these times, as shown in Figure A.1, reveals that the robot received observations with strong light", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 238, + 446, + 252 + ], + "spans": [ + { + "bbox": [ + 105, + 238, + 446, + 252 + ], + "score": 1.0, + "content": "rays covering the scene which greatly differs from the original training observations.", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 9.5 + }, + { + "type": "image", + "bbox": [ + 106, + 259, + 498, + 349 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 106, + 259, + 498, + 349 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 259, + 498, + 349 + ], + "spans": [ + { + "bbox": [ + 106, + 259, + 498, + 349 + ], + "score": 0.962, + "type": "image", + "image_path": "db6cf431ae9355646aa06c810c30e311d8db38009707b4dea4bad788085ac2bb.jpg" + } + ] + } + ], + "index": 15, + "virtual_lines": [ + { + "bbox": [ + 106, + 259, + 498, + 289.0 + ], + "spans": [], + "index": 14 + }, + { + "bbox": [ + 106, + 289.0, + 498, + 319.0 + ], + "spans": [], + "index": 15 + }, + { + "bbox": [ + 106, + 319.0, + 498, + 349.0 + ], + "spans": [], + "index": 16 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 355, + 506, + 412 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 355, + 505, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 355, + 505, + 369 + ], + "score": 1.0, + "content": "Figure A.1: The left two images are raw observations consumed by Dreamer. The leftmost image is", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 367, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 506, + 380 + ], + "score": 1.0, + "content": "an image observation as seen by the XArm at night, when it was trained. The next image shows an", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 378, + 507, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 507, + 392 + ], + "score": 1.0, + "content": "observation during sunrise. Despite the vast difference in pixel space, the XArm is able to recover,", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 389, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 505, + 403 + ], + "score": 1.0, + "content": "and then surpass, the original performance in approximately 5 hours. Even after 24 hours when the", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 400, + 438, + 414 + ], + "spans": [ + { + "bbox": [ + 106, + 400, + 438, + 414 + ], + "score": 1.0, + "content": "lighting shifts to night time conditions, the XArm is able to maintain performance.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 19 + } + ], + "index": 17.0 + }, + { + "type": "title", + "bbox": [ + 107, + 432, + 190, + 446 + ], + "lines": [ + { + "bbox": [ + 104, + 430, + 192, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 192, + 449 + ], + "score": 1.0, + "content": "B Imagination", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 110, + 458, + 501, + 650 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 110, + 458, + 501, + 650 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 110, + 458, + 501, + 650 + ], + "spans": [ + { + "bbox": [ + 110, + 458, + 501, + 650 + ], + "score": 0.98, + "type": "image", + "image_path": "24fe88bf92baa43778d9defa3450750bc0d2c910fa9c12c5902630d7c2316e1e.jpg" + } + ] + } + ], + "index": 24, + "virtual_lines": [ + { + "bbox": [ + 110, + 458, + 501, + 522.0 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 110, + 522.0, + 501, + 586.0 + ], + "spans": [], + "index": 24 + }, + { + "bbox": [ + 110, + 586.0, + 501, + 650.0 + ], + "spans": [], + "index": 25 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 656, + 506, + 713 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 105, + 656, + 505, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 505, + 669 + ], + "score": 1.0, + "content": "Figure B.1: To introspect the policy, we can roll out trajectories in the latent space of Dreamer, then", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 664, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 664, + 506, + 683 + ], + "score": 1.0, + "content": "decode the images to visualize the intent of the actor network. Each row is an imagined trajectory,", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 678, + 505, + 692 + ], + "spans": [ + { + "bbox": [ + 106, + 678, + 505, + 692 + ], + "score": 1.0, + "content": "showing every 2nd frame. Top: Latent rollouts on the UR5 environment. Multiple objects introduce", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 690, + 506, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 690, + 506, + 703 + ], + "score": 1.0, + "content": "more visual complexity that the network has to model. Note the second trajectory, which shows a", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 701, + 479, + 713 + ], + "spans": [ + { + "bbox": [ + 106, + 701, + 479, + 713 + ], + "score": 1.0, + "content": "static orange ball becoming a green ball. Bottom: Latent rollouts on the XArm environment.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 28 + } + ], + "index": 26.0 + } + ], + "page_idx": 12, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 741, + 311, + 750 + ], + "lines": [ + { + "bbox": [ + 299, + 740, + 312, + 754 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 312, + 754 + ], + "score": 1.0, + "content": "13", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 72, + 186, + 85 + ], + "lines": [ + { + "bbox": [ + 105, + 70, + 188, + 88 + ], + "spans": [ + { + "bbox": [ + 105, + 70, + 188, + 88 + ], + "score": 1.0, + "content": "A Adaptation", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 93, + 505, + 152 + ], + "lines": [ + { + "bbox": [ + 105, + 93, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 505, + 106 + ], + "score": 1.0, + "content": "Real world robot learning faces practical challenges such as changing environmental conditions", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 505, + 118 + ], + "score": 1.0, + "content": "and time varying dynamics. We found that Dreamer is able to adapt to the current environmental", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 117, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 117, + 505, + 129 + ], + "score": 1.0, + "content": "conditions with no change to the learning algorithm. This shows promise for using Dreamer in", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 128, + 505, + 142 + ], + "spans": [ + { + "bbox": [ + 105, + 128, + 505, + 142 + ], + "score": 1.0, + "content": "continual learning settings (Parisi et al., 2019). Adaptation of the quadruped to external perturbations", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 141, + 264, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 141, + 264, + 153 + ], + "score": 1.0, + "content": "is reported in Section 3.1 and Figure 8.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 3, + "bbox_fs": [ + 105, + 93, + 505, + 153 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 156, + 506, + 250 + ], + "lines": [ + { + "bbox": [ + 105, + 156, + 505, + 169 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 505, + 169 + ], + "score": 1.0, + "content": "The XArm, situated near large windows, is able to adapt and maintain performance under the presence", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 169, + 505, + 180 + ], + "spans": [ + { + "bbox": [ + 106, + 169, + 505, + 180 + ], + "score": 1.0, + "content": "of changing lighting conditions. The XArm experiments were conducted after sundown to keep the", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 180, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 506, + 193 + ], + "score": 1.0, + "content": "lighting conditions constant throughout training. Figure A.1 shows the learning curve of the XArm.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 192, + 505, + 204 + ], + "spans": [ + { + "bbox": [ + 106, + 192, + 505, + 204 + ], + "score": 1.0, + "content": "As expected, the performance of the XArm drops during sunrise. However, the XArm is able to", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 203, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 506, + 217 + ], + "score": 1.0, + "content": "adapt to the change in lighting conditions in about 5 hours time and recover the original performance,", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 214, + 505, + 227 + ], + "spans": [ + { + "bbox": [ + 105, + 214, + 505, + 227 + ], + "score": 1.0, + "content": "which is faster than it would be to train from scratch. A careful inspection of the image observations", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 226, + 505, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 226, + 505, + 239 + ], + "score": 1.0, + "content": "at these times, as shown in Figure A.1, reveals that the robot received observations with strong light", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 238, + 446, + 252 + ], + "spans": [ + { + "bbox": [ + 105, + 238, + 446, + 252 + ], + "score": 1.0, + "content": "rays covering the scene which greatly differs from the original training observations.", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 9.5, + "bbox_fs": [ + 105, + 156, + 506, + 252 + ] + }, + { + "type": "image", + "bbox": [ + 106, + 259, + 498, + 349 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 106, + 259, + 498, + 349 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 259, + 498, + 349 + ], + "spans": [ + { + "bbox": [ + 106, + 259, + 498, + 349 + ], + "score": 0.962, + "type": "image", + "image_path": "db6cf431ae9355646aa06c810c30e311d8db38009707b4dea4bad788085ac2bb.jpg" + } + ] + } + ], + "index": 15, + "virtual_lines": [ + { + "bbox": [ + 106, + 259, + 498, + 289.0 + ], + "spans": [], + "index": 14 + }, + { + "bbox": [ + 106, + 289.0, + 498, + 319.0 + ], + "spans": [], + "index": 15 + }, + { + "bbox": [ + 106, + 319.0, + 498, + 349.0 + ], + "spans": [], + "index": 16 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 355, + 506, + 412 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 355, + 505, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 355, + 505, + 369 + ], + "score": 1.0, + "content": "Figure A.1: The left two images are raw observations consumed by Dreamer. The leftmost image is", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 367, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 506, + 380 + ], + "score": 1.0, + "content": "an image observation as seen by the XArm at night, when it was trained. The next image shows an", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 378, + 507, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 507, + 392 + ], + "score": 1.0, + "content": "observation during sunrise. Despite the vast difference in pixel space, the XArm is able to recover,", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 389, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 505, + 403 + ], + "score": 1.0, + "content": "and then surpass, the original performance in approximately 5 hours. Even after 24 hours when the", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 400, + 438, + 414 + ], + "spans": [ + { + "bbox": [ + 106, + 400, + 438, + 414 + ], + "score": 1.0, + "content": "lighting shifts to night time conditions, the XArm is able to maintain performance.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 19 + } + ], + "index": 17.0 + }, + { + "type": "title", + "bbox": [ + 107, + 432, + 190, + 446 + ], + "lines": [ + { + "bbox": [ + 104, + 430, + 192, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 192, + 449 + ], + "score": 1.0, + "content": "B Imagination", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 110, + 458, + 501, + 650 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 110, + 458, + 501, + 650 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 110, + 458, + 501, + 650 + ], + "spans": [ + { + "bbox": [ + 110, + 458, + 501, + 650 + ], + "score": 0.98, + "type": "image", + "image_path": "24fe88bf92baa43778d9defa3450750bc0d2c910fa9c12c5902630d7c2316e1e.jpg" + } + ] + } + ], + "index": 24, + "virtual_lines": [ + { + "bbox": [ + 110, + 458, + 501, + 522.0 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 110, + 522.0, + 501, + 586.0 + ], + "spans": [], + "index": 24 + }, + { + "bbox": [ + 110, + 586.0, + 501, + 650.0 + ], + "spans": [], + "index": 25 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 656, + 506, + 713 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 105, + 656, + 505, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 505, + 669 + ], + "score": 1.0, + "content": "Figure B.1: To introspect the policy, we can roll out trajectories in the latent space of Dreamer, then", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 664, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 664, + 506, + 683 + ], + "score": 1.0, + "content": "decode the images to visualize the intent of the actor network. Each row is an imagined trajectory,", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 678, + 505, + 692 + ], + "spans": [ + { + "bbox": [ + 106, + 678, + 505, + 692 + ], + "score": 1.0, + "content": "showing every 2nd frame. Top: Latent rollouts on the UR5 environment. Multiple objects introduce", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 690, + 506, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 690, + 506, + 703 + ], + "score": 1.0, + "content": "more visual complexity that the network has to model. Note the second trajectory, which shows a", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 701, + 479, + 713 + ], + "spans": [ + { + "bbox": [ + 106, + 701, + 479, + 713 + ], + "score": 1.0, + "content": "static orange ball becoming a green ball. Bottom: Latent rollouts on the XArm environment.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 28 + } + ], + "index": 26.0 + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 71, + 245, + 85 + ], + "lines": [ + { + "bbox": [ + 106, + 70, + 247, + 87 + ], + "spans": [ + { + "bbox": [ + 106, + 70, + 247, + 87 + ], + "score": 1.0, + "content": "C Detailed Related Work", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 93, + 505, + 315 + ], + "lines": [ + { + "bbox": [ + 105, + 92, + 505, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 92, + 505, + 107 + ], + "score": 1.0, + "content": "RL for locomotion A common approach is to train RL agents from large amounts of simulated data", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 506, + 117 + ], + "score": 1.0, + "content": "under domain and dynamics randomization (Peng et al., 2018; Lee et al., 2020; Rudin et al., 2021;", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 117, + 506, + 129 + ], + "spans": [ + { + "bbox": [ + 106, + 117, + 506, + 129 + ], + "score": 1.0, + "content": "Siekmann et al., 2021; Escontrela et al., 2022; Miki et al., 2022; Kumar et al., 2021; Rusu et al., 2016;", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 129, + 506, + 141 + ], + "spans": [ + { + "bbox": [ + 106, + 129, + 506, + 141 + ], + "score": 1.0, + "content": "Bohez et al., 2022), then freezing the learned policy and deploying it to the real world. Smith et al.", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 139, + 506, + 154 + ], + "spans": [ + { + "bbox": [ + 105, + 139, + 506, + 154 + ], + "score": 1.0, + "content": "(2021) explored pre-training policies in simulation and fine-tuning them with real world data. Yang", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 152, + 505, + 164 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 505, + 164 + ], + "score": 1.0, + "content": "et al. (2019) investigate learning a dynamics model using a multi-step loss and using model predictive", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 164, + 505, + 176 + ], + "spans": [ + { + "bbox": [ + 106, + 164, + 505, + 176 + ], + "score": 1.0, + "content": "control to accomplish a specified task. Yang et al. (2022) train locomotion policies in the real world", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 176, + 505, + 187 + ], + "spans": [ + { + "bbox": [ + 106, + 176, + 505, + 187 + ], + "score": 1.0, + "content": "but require a recovery controller trained in simulation to avoid unsafe states. In contrast, we use no", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 187, + 505, + 200 + ], + "spans": [ + { + "bbox": [ + 106, + 187, + 505, + 200 + ], + "score": 1.0, + "content": "simulators or reset policies and directly train on the physical robot. While prior work in locomotion", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 199, + 505, + 211 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 505, + 211 + ], + "score": 1.0, + "content": "has successfully learned walking behaviors in the real world, these works generally required several", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 211, + 505, + 223 + ], + "spans": [ + { + "bbox": [ + 106, + 211, + 505, + 223 + ], + "score": 1.0, + "content": "domain-specific assumptions or pretraining with simulators. Ha et al. (2020) achieved successful", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 223, + 505, + 234 + ], + "spans": [ + { + "bbox": [ + 106, + 223, + 505, + 234 + ], + "score": 1.0, + "content": "walking on the Minitaur robot in 90 minutes. However, the authors manually programmed a reset", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 234, + 506, + 246 + ], + "spans": [ + { + "bbox": [ + 106, + 234, + 506, + 246 + ], + "score": 1.0, + "content": "policy that was used when the robot fell on its back, while in our work the robot must learn to flip over", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 245, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 505, + 258 + ], + "score": 1.0, + "content": "and stand up. Additionally, the Minitaur robot is simpler than the A1 as it has 8 actuators compared", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 257, + 505, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 257, + 505, + 269 + ], + "score": 1.0, + "content": "to 12 on the A1. In recent work, Smith et al. (2022) utilize a high update-to-data ratio (UTD) RL", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 269, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 106, + 269, + 505, + 282 + ], + "score": 1.0, + "content": "algorithm to learn walking from 20 minutes of robot training data. However, their work assumes", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 280, + 505, + 293 + ], + "spans": [ + { + "bbox": [ + 106, + 280, + 505, + 293 + ], + "score": 1.0, + "content": "the availability of a reset policy and therefore comprises of a different learning problem compared", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 293, + 505, + 305 + ], + "spans": [ + { + "bbox": [ + 106, + 293, + 505, + 305 + ], + "score": 1.0, + "content": "to the problem we tackle of learning to flip over and walk from scratch. Additionally, we show our", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 304, + 439, + 316 + ], + "spans": [ + { + "bbox": [ + 106, + 304, + 439, + 316 + ], + "score": 1.0, + "content": "approach generalizes to environments with image observations and sparse rewards.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 10 + }, + { + "type": "text", + "bbox": [ + 106, + 320, + 505, + 506 + ], + "lines": [ + { + "bbox": [ + 106, + 320, + 505, + 333 + ], + "spans": [ + { + "bbox": [ + 106, + 320, + 505, + 333 + ], + "score": 1.0, + "content": "RL for manipulation Learning promises to enable robot manipulators to solve contact rich tasks", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 331, + 505, + 344 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 505, + 344 + ], + "score": 1.0, + "content": "in open real world environments. One class of methods attempts to scale up experience collection", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 343, + 506, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 506, + 356 + ], + "score": 1.0, + "content": "through a fleet of robots (Kalashnikov et al., 2018; 2021; Ebert et al., 2021; Dasari et al., 2019;", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 354, + 505, + 368 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 505, + 368 + ], + "score": 1.0, + "content": "Levine et al., 2018). In contrast, we only leverage one robot, but parallelize an agent’s experience", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 367, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 106, + 367, + 505, + 380 + ], + "score": 1.0, + "content": "by using the learned world model. Another common approach is to leverage expert demonstrations", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 378, + 506, + 391 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 506, + 391 + ], + "score": 1.0, + "content": "or other task priors (Pinto and Gupta, 2015; Ha and Song, 2021; Xie et al., 2019; Schoettler et al.,", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 390, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 106, + 390, + 505, + 402 + ], + "score": 1.0, + "content": "2019; Sivakumar et al., 2022). James and Davison (2021); James et al. (2021) leverages a few", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 401, + 506, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 506, + 415 + ], + "score": 1.0, + "content": "demonstrations to increase the sample-efficiency of Q learning by focusing the learner on important", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 414, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 506, + 426 + ], + "score": 1.0, + "content": "aspects of the scene. Other approaches, as in locomotion, first utilize a simulator, then transfer to", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 425, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 425, + 506, + 437 + ], + "score": 1.0, + "content": "the real world (Tzeng et al., 2015; Akkaya et al., 2019; OpenAI et al., 2018; Irpan et al., 2020). Our", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 437, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 106, + 437, + 506, + 449 + ], + "score": 1.0, + "content": "work focuses on single-robot environments where the agent must learn through a small amount of", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 448, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 105, + 448, + 505, + 461 + ], + "score": 1.0, + "content": "interaction with the world. Meanwhile, the Google Arm Farm line of work by Levine et al. leverages", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 460, + 505, + 471 + ], + "spans": [ + { + "bbox": [ + 106, + 461, + 126, + 471 + ], + "score": 1.0, + "content": "over", + "type": "text" + }, + { + "bbox": [ + 126, + 460, + 147, + 471 + ], + "score": 0.49, + "content": "5 8 0 \\mathrm { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 147, + 461, + 505, + 471 + ], + "score": 1.0, + "content": "grasp attempts gathered by 7 robots and collected over 4 months. We believe that a method", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 472, + 505, + 484 + ], + "spans": [ + { + "bbox": [ + 106, + 472, + 505, + 484 + ], + "score": 1.0, + "content": "such as Dreamer could benefit greatly from this scale of training data, however it is unlikely that", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 484, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 106, + 484, + 504, + 495 + ], + "score": 1.0, + "content": "works such as MT-OPT/QT-OPT Kalashnikov et al. (2018; 2021) would work well in the low data", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 496, + 231, + 507 + ], + "spans": [ + { + "bbox": [ + 106, + 496, + 231, + 507 + ], + "score": 1.0, + "content": "regime that Dreamer excels in.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 27.5 + }, + { + "type": "text", + "bbox": [ + 106, + 511, + 506, + 663 + ], + "lines": [ + { + "bbox": [ + 106, + 510, + 505, + 523 + ], + "spans": [ + { + "bbox": [ + 106, + 510, + 505, + 523 + ], + "score": 1.0, + "content": "Model-based RL Due to its higher sample-efficiency over model-free methods, model-based RL", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 523, + 505, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 523, + 505, + 534 + ], + "score": 1.0, + "content": "is a promising approach to learning on real world robots (Deisenroth et al., 2013). A model based", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 534, + 506, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 534, + 506, + 547 + ], + "score": 1.0, + "content": "method first learns a dynamics model, which can then be used to plan actions (Nagabandi et al., 2019;", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 546, + 505, + 558 + ], + "spans": [ + { + "bbox": [ + 106, + 546, + 505, + 558 + ], + "score": 1.0, + "content": "Hafner et al., 2018; Chua et al., 2018; Nagabandi et al., 2017; Becker-Ehmck et al., 2020), or be used", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 558, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 558, + 505, + 570 + ], + "score": 1.0, + "content": "as a simulator to learn a policy network as in Dreamer (Hafner et al., 2019; 2020). One approach to", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 569, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 569, + 506, + 582 + ], + "score": 1.0, + "content": "tackle the high visual complexity of the world is to learn an action conditioned video prediction model", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 582, + 505, + 593 + ], + "spans": [ + { + "bbox": [ + 106, + 582, + 505, + 593 + ], + "score": 1.0, + "content": "(Finn and Levine, 2017; Ebert et al., 2018; Finn et al., 2016). One downside of this approach is the", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 592, + 506, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 506, + 605 + ], + "score": 1.0, + "content": "need to directly predict high dimensional observations, which can be computationally inefficient and", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 605, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 506, + 617 + ], + "score": 1.0, + "content": "easily drift. Dreamer learns a dynamics model in a latent space, allowing more efficient rollouts and", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 106, + 617, + 505, + 628 + ], + "spans": [ + { + "bbox": [ + 106, + 617, + 505, + 628 + ], + "score": 1.0, + "content": "avoids relying on high quality visual reconstructions for the policy. Another line of work proposes", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 628, + 505, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 505, + 640 + ], + "score": 1.0, + "content": "to learn latent dynamics models without having to reconstruct inputs (Deng et al., 2021; Okada and", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 106, + 640, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 106, + 640, + 505, + 651 + ], + "score": 1.0, + "content": "Taniguchi, 2021; Bharadhwaj et al., 2022; Paster et al., 2021), which we see as a promising approach", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 651, + 353, + 663 + ], + "spans": [ + { + "bbox": [ + 106, + 651, + 353, + 663 + ], + "score": 1.0, + "content": "for supporting moving view points in cluttered environments.", + "type": "text" + } + ], + "index": 48 + } + ], + "index": 42 + } + ], + "page_idx": 13, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 741, + 311, + 750 + ], + "lines": [ + { + "bbox": [ + 299, + 740, + 313, + 754 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 313, + 754 + ], + "score": 1.0, + "content": "14", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 71, + 245, + 85 + ], + "lines": [ + { + "bbox": [ + 106, + 70, + 247, + 87 + ], + "spans": [ + { + "bbox": [ + 106, + 70, + 247, + 87 + ], + "score": 1.0, + "content": "C Detailed Related Work", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 93, + 505, + 315 + ], + "lines": [ + { + "bbox": [ + 105, + 92, + 505, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 92, + 505, + 107 + ], + "score": 1.0, + "content": "RL for locomotion A common approach is to train RL agents from large amounts of simulated data", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 105, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 506, + 117 + ], + "score": 1.0, + "content": "under domain and dynamics randomization (Peng et al., 2018; Lee et al., 2020; Rudin et al., 2021;", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 117, + 506, + 129 + ], + "spans": [ + { + "bbox": [ + 106, + 117, + 506, + 129 + ], + "score": 1.0, + "content": "Siekmann et al., 2021; Escontrela et al., 2022; Miki et al., 2022; Kumar et al., 2021; Rusu et al., 2016;", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 129, + 506, + 141 + ], + "spans": [ + { + "bbox": [ + 106, + 129, + 506, + 141 + ], + "score": 1.0, + "content": "Bohez et al., 2022), then freezing the learned policy and deploying it to the real world. Smith et al.", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 139, + 506, + 154 + ], + "spans": [ + { + "bbox": [ + 105, + 139, + 506, + 154 + ], + "score": 1.0, + "content": "(2021) explored pre-training policies in simulation and fine-tuning them with real world data. Yang", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 152, + 505, + 164 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 505, + 164 + ], + "score": 1.0, + "content": "et al. (2019) investigate learning a dynamics model using a multi-step loss and using model predictive", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 164, + 505, + 176 + ], + "spans": [ + { + "bbox": [ + 106, + 164, + 505, + 176 + ], + "score": 1.0, + "content": "control to accomplish a specified task. Yang et al. (2022) train locomotion policies in the real world", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 176, + 505, + 187 + ], + "spans": [ + { + "bbox": [ + 106, + 176, + 505, + 187 + ], + "score": 1.0, + "content": "but require a recovery controller trained in simulation to avoid unsafe states. In contrast, we use no", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 187, + 505, + 200 + ], + "spans": [ + { + "bbox": [ + 106, + 187, + 505, + 200 + ], + "score": 1.0, + "content": "simulators or reset policies and directly train on the physical robot. While prior work in locomotion", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 199, + 505, + 211 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 505, + 211 + ], + "score": 1.0, + "content": "has successfully learned walking behaviors in the real world, these works generally required several", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 211, + 505, + 223 + ], + "spans": [ + { + "bbox": [ + 106, + 211, + 505, + 223 + ], + "score": 1.0, + "content": "domain-specific assumptions or pretraining with simulators. Ha et al. (2020) achieved successful", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 223, + 505, + 234 + ], + "spans": [ + { + "bbox": [ + 106, + 223, + 505, + 234 + ], + "score": 1.0, + "content": "walking on the Minitaur robot in 90 minutes. However, the authors manually programmed a reset", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 234, + 506, + 246 + ], + "spans": [ + { + "bbox": [ + 106, + 234, + 506, + 246 + ], + "score": 1.0, + "content": "policy that was used when the robot fell on its back, while in our work the robot must learn to flip over", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 245, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 505, + 258 + ], + "score": 1.0, + "content": "and stand up. Additionally, the Minitaur robot is simpler than the A1 as it has 8 actuators compared", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 257, + 505, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 257, + 505, + 269 + ], + "score": 1.0, + "content": "to 12 on the A1. In recent work, Smith et al. (2022) utilize a high update-to-data ratio (UTD) RL", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 269, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 106, + 269, + 505, + 282 + ], + "score": 1.0, + "content": "algorithm to learn walking from 20 minutes of robot training data. However, their work assumes", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 280, + 505, + 293 + ], + "spans": [ + { + "bbox": [ + 106, + 280, + 505, + 293 + ], + "score": 1.0, + "content": "the availability of a reset policy and therefore comprises of a different learning problem compared", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 293, + 505, + 305 + ], + "spans": [ + { + "bbox": [ + 106, + 293, + 505, + 305 + ], + "score": 1.0, + "content": "to the problem we tackle of learning to flip over and walk from scratch. Additionally, we show our", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 304, + 439, + 316 + ], + "spans": [ + { + "bbox": [ + 106, + 304, + 439, + 316 + ], + "score": 1.0, + "content": "approach generalizes to environments with image observations and sparse rewards.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 10, + "bbox_fs": [ + 105, + 92, + 506, + 316 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 320, + 505, + 506 + ], + "lines": [ + { + "bbox": [ + 106, + 320, + 505, + 333 + ], + "spans": [ + { + "bbox": [ + 106, + 320, + 505, + 333 + ], + "score": 1.0, + "content": "RL for manipulation Learning promises to enable robot manipulators to solve contact rich tasks", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 331, + 505, + 344 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 505, + 344 + ], + "score": 1.0, + "content": "in open real world environments. One class of methods attempts to scale up experience collection", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 343, + 506, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 506, + 356 + ], + "score": 1.0, + "content": "through a fleet of robots (Kalashnikov et al., 2018; 2021; Ebert et al., 2021; Dasari et al., 2019;", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 354, + 505, + 368 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 505, + 368 + ], + "score": 1.0, + "content": "Levine et al., 2018). In contrast, we only leverage one robot, but parallelize an agent’s experience", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 367, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 106, + 367, + 505, + 380 + ], + "score": 1.0, + "content": "by using the learned world model. Another common approach is to leverage expert demonstrations", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 378, + 506, + 391 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 506, + 391 + ], + "score": 1.0, + "content": "or other task priors (Pinto and Gupta, 2015; Ha and Song, 2021; Xie et al., 2019; Schoettler et al.,", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 390, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 106, + 390, + 505, + 402 + ], + "score": 1.0, + "content": "2019; Sivakumar et al., 2022). James and Davison (2021); James et al. (2021) leverages a few", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 401, + 506, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 506, + 415 + ], + "score": 1.0, + "content": "demonstrations to increase the sample-efficiency of Q learning by focusing the learner on important", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 414, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 506, + 426 + ], + "score": 1.0, + "content": "aspects of the scene. Other approaches, as in locomotion, first utilize a simulator, then transfer to", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 425, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 425, + 506, + 437 + ], + "score": 1.0, + "content": "the real world (Tzeng et al., 2015; Akkaya et al., 2019; OpenAI et al., 2018; Irpan et al., 2020). Our", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 437, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 106, + 437, + 506, + 449 + ], + "score": 1.0, + "content": "work focuses on single-robot environments where the agent must learn through a small amount of", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 448, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 105, + 448, + 505, + 461 + ], + "score": 1.0, + "content": "interaction with the world. Meanwhile, the Google Arm Farm line of work by Levine et al. leverages", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 460, + 505, + 471 + ], + "spans": [ + { + "bbox": [ + 106, + 461, + 126, + 471 + ], + "score": 1.0, + "content": "over", + "type": "text" + }, + { + "bbox": [ + 126, + 460, + 147, + 471 + ], + "score": 0.49, + "content": "5 8 0 \\mathrm { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 147, + 461, + 505, + 471 + ], + "score": 1.0, + "content": "grasp attempts gathered by 7 robots and collected over 4 months. We believe that a method", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 472, + 505, + 484 + ], + "spans": [ + { + "bbox": [ + 106, + 472, + 505, + 484 + ], + "score": 1.0, + "content": "such as Dreamer could benefit greatly from this scale of training data, however it is unlikely that", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 484, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 106, + 484, + 504, + 495 + ], + "score": 1.0, + "content": "works such as MT-OPT/QT-OPT Kalashnikov et al. (2018; 2021) would work well in the low data", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 496, + 231, + 507 + ], + "spans": [ + { + "bbox": [ + 106, + 496, + 231, + 507 + ], + "score": 1.0, + "content": "regime that Dreamer excels in.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 27.5, + "bbox_fs": [ + 105, + 320, + 506, + 507 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 511, + 506, + 663 + ], + "lines": [ + { + "bbox": [ + 106, + 510, + 505, + 523 + ], + "spans": [ + { + "bbox": [ + 106, + 510, + 505, + 523 + ], + "score": 1.0, + "content": "Model-based RL Due to its higher sample-efficiency over model-free methods, model-based RL", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 523, + 505, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 523, + 505, + 534 + ], + "score": 1.0, + "content": "is a promising approach to learning on real world robots (Deisenroth et al., 2013). A model based", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 534, + 506, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 534, + 506, + 547 + ], + "score": 1.0, + "content": "method first learns a dynamics model, which can then be used to plan actions (Nagabandi et al., 2019;", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 546, + 505, + 558 + ], + "spans": [ + { + "bbox": [ + 106, + 546, + 505, + 558 + ], + "score": 1.0, + "content": "Hafner et al., 2018; Chua et al., 2018; Nagabandi et al., 2017; Becker-Ehmck et al., 2020), or be used", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 558, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 558, + 505, + 570 + ], + "score": 1.0, + "content": "as a simulator to learn a policy network as in Dreamer (Hafner et al., 2019; 2020). One approach to", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 569, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 569, + 506, + 582 + ], + "score": 1.0, + "content": "tackle the high visual complexity of the world is to learn an action conditioned video prediction model", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 582, + 505, + 593 + ], + "spans": [ + { + "bbox": [ + 106, + 582, + 505, + 593 + ], + "score": 1.0, + "content": "(Finn and Levine, 2017; Ebert et al., 2018; Finn et al., 2016). One downside of this approach is the", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 592, + 506, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 506, + 605 + ], + "score": 1.0, + "content": "need to directly predict high dimensional observations, which can be computationally inefficient and", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 605, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 506, + 617 + ], + "score": 1.0, + "content": "easily drift. Dreamer learns a dynamics model in a latent space, allowing more efficient rollouts and", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 106, + 617, + 505, + 628 + ], + "spans": [ + { + "bbox": [ + 106, + 617, + 505, + 628 + ], + "score": 1.0, + "content": "avoids relying on high quality visual reconstructions for the policy. Another line of work proposes", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 628, + 505, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 505, + 640 + ], + "score": 1.0, + "content": "to learn latent dynamics models without having to reconstruct inputs (Deng et al., 2021; Okada and", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 106, + 640, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 106, + 640, + 505, + 651 + ], + "score": 1.0, + "content": "Taniguchi, 2021; Bharadhwaj et al., 2022; Paster et al., 2021), which we see as a promising approach", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 651, + 353, + 663 + ], + "spans": [ + { + "bbox": [ + 106, + 651, + 353, + 663 + ], + "score": 1.0, + "content": "for supporting moving view points in cluttered environments.", + "type": "text" + } + ], + "index": 48 + } + ], + "index": 42, + "bbox_fs": [ + 105, + 510, + 506, + 663 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 106, + 71, + 220, + 86 + ], + "lines": [ + { + "bbox": [ + 104, + 69, + 221, + 90 + ], + "spans": [ + { + "bbox": [ + 104, + 69, + 221, + 90 + ], + "score": 1.0, + "content": "D Hyperparameters", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 124, + 102, + 483, + 399 + ], + "blocks": [ + { + "type": "table_body", + "bbox": [ + 124, + 102, + 483, + 399 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 124, + 102, + 483, + 399 + ], + "spans": [ + { + "bbox": [ + 124, + 102, + 483, + 399 + ], + "score": 0.985, + "html": "
NameSymbolValue
General
Replay capacity (FIFO)Start learningBatch sizeBatch lengthMLP sizeActivationBT10610432324Γ— 512LayerNorm+ELU
World Model
RSSM sizeNumber of latentsClasses per latentKL balancing51232320.8
Actor Critic
Imagination horizonDiscountReturn lambdaTarget update intervalH?150.950.95100
All Optimizers
Gradient clippingLearning rateAdam epsilonE10010-410-6
", + "type": "table", + "image_path": "476b14497f73d983953dbe7a34cff454d210243e28b1aa97dafb54ae6a92e42f.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 124, + 102, + 483, + 201.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 124, + 201.0, + 483, + 300.0 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 124, + 300.0, + 483, + 399.0 + ], + "spans": [], + "index": 3 + } + ] + } + ], + "index": 2 + }, + { + "type": "title", + "bbox": [ + 106, + 418, + 311, + 433 + ], + "lines": [ + { + "bbox": [ + 105, + 418, + 311, + 433 + ], + "spans": [ + { + "bbox": [ + 105, + 418, + 311, + 433 + ], + "score": 1.0, + "content": "E Environment and Hardware Details", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4 + }, + { + "type": "text", + "bbox": [ + 108, + 440, + 504, + 464 + ], + "lines": [ + { + "bbox": [ + 106, + 439, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 106, + 439, + 506, + 453 + ], + "score": 1.0, + "content": "For every robot setup that involved vision (UR5, XArm, Sphero), we used a RealSense D435 camera", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 452, + 330, + 465 + ], + "spans": [ + { + "bbox": [ + 106, + 452, + 330, + 465 + ], + "score": 1.0, + "content": "positioned to offer a fixed 3rd person view of the scene.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 5.5 + }, + { + "type": "text", + "bbox": [ + 106, + 468, + 505, + 562 + ], + "lines": [ + { + "bbox": [ + 105, + 466, + 505, + 482 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 505, + 482 + ], + "score": 1.0, + "content": "A1 We used the A1 quadrupedal robot by Unitree. The RL policy outputs actions at a frequency", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 480, + 505, + 493 + ], + "spans": [ + { + "bbox": [ + 106, + 480, + 505, + 493 + ], + "score": 1.0, + "content": "that is too high for the PD controller to track, which we overcome by lowpass filtering the action", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 491, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 506, + 505 + ], + "score": 1.0, + "content": "sequence. The joint range allows the legs to self-collide with the body, which can be damaging to", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 503, + 506, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 503, + 506, + 516 + ], + "score": 1.0, + "content": "the motors and increase battery consumption. We limited the joint range to decrease self-collisions.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 514, + 505, + 527 + ], + "spans": [ + { + "bbox": [ + 106, + 514, + 505, + 527 + ], + "score": 1.0, + "content": "Finally, the EKF velocity estimator relies on foot-ground contact events to prevent significant drift in", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 527, + 505, + 539 + ], + "spans": [ + { + "bbox": [ + 106, + 527, + 505, + 539 + ], + "score": 1.0, + "content": "the estimates, so we employ a curriculum reward function that does not reward the robot for forward", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 538, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 106, + 538, + 506, + 551 + ], + "score": 1.0, + "content": "velocity until the robot is upright with extended legs. We also designed a shell which we 3D printed", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 550, + 451, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 451, + 563 + ], + "score": 1.0, + "content": "in order to better protect the cables and hardware and provide a smoother rolling over.", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 10.5 + }, + { + "type": "text", + "bbox": [ + 106, + 565, + 505, + 671 + ], + "lines": [ + { + "bbox": [ + 106, + 565, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 106, + 565, + 505, + 578 + ], + "score": 1.0, + "content": "XArm & UR5 We utilized slanted bins to prevent objects from leaving the work area during the", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 578, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 106, + 578, + 506, + 590 + ], + "score": 1.0, + "content": "long-running pick and place experiments on the UR5, which is common practice Levine et al. (2018);", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 588, + 506, + 602 + ], + "spans": [ + { + "bbox": [ + 105, + 588, + 506, + 602 + ], + "score": 1.0, + "content": "Kalashnikov et al. (2018). We also added a partition behind the setup to keep the background constant.", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 600, + 505, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 600, + 505, + 615 + ], + "score": 1.0, + "content": "It would be interesting to study how a gripper-mounted camera would impact policy performance", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 613, + 505, + 625 + ], + "spans": [ + { + "bbox": [ + 106, + 613, + 505, + 625 + ], + "score": 1.0, + "content": "Hsu et al. (2022), however we report strong results without this design choice. For the XArm we", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 624, + 505, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 505, + 637 + ], + "score": 1.0, + "content": "use the uFactory xArm Gripper. For the UR5, we use the Robotiq 2F-85 parallel jaw gripper. The", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 635, + 505, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 505, + 649 + ], + "score": 1.0, + "content": "bin locations are predetermined and provided as part of the environment to prevent the robot from", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 648, + 505, + 660 + ], + "spans": [ + { + "bbox": [ + 106, + 648, + 316, + 660 + ], + "score": 1.0, + "content": "colliding with the bin. In addition, movement in the", + "type": "text" + }, + { + "bbox": [ + 317, + 648, + 325, + 658 + ], + "score": 0.31, + "content": "\\textsf { Z }", + "type": "inline_equation" + }, + { + "bbox": [ + 325, + 648, + 505, + 660 + ], + "score": 1.0, + "content": "axis is only enabled while holding an object", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 660, + 354, + 672 + ], + "spans": [ + { + "bbox": [ + 106, + 660, + 354, + 672 + ], + "score": 1.0, + "content": "and the gripper automatically opens once above the other bin.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 19 + }, + { + "type": "text", + "bbox": [ + 107, + 675, + 504, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 675, + 505, + 688 + ], + "spans": [ + { + "bbox": [ + 106, + 676, + 285, + 688 + ], + "score": 1.0, + "content": "Sphero We used a rectangular enclosure of", + "type": "text" + }, + { + "bbox": [ + 285, + 675, + 335, + 686 + ], + "score": 0.91, + "content": "0 . 8 \\times 0 . 8 \\mathrm { { m ^ { 2 } } }", + "type": "inline_equation" + }, + { + "bbox": [ + 336, + 676, + 505, + 688 + ], + "score": 1.0, + "content": "to keep the sphero robot within the camera", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "view. We used a simple OpenCV script to estimate the L2 distance between the Sphero and the", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 699, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 506, + 712 + ], + "score": 1.0, + "content": "goal position to provide a dense reward for policy optimization. This positional information was not", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 710, + 408, + 725 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 408, + 725 + ], + "score": 1.0, + "content": "provided to the agent, which it had to learn from the raw top-down images.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 25.5 + } + ], + "page_idx": 14, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 741, + 311, + 750 + ], + "lines": [ + { + "bbox": [ + 299, + 740, + 312, + 754 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 312, + 754 + ], + "score": 1.0, + "content": "15", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 106, + 71, + 220, + 86 + ], + "lines": [ + { + "bbox": [ + 104, + 69, + 221, + 90 + ], + "spans": [ + { + "bbox": [ + 104, + 69, + 221, + 90 + ], + "score": 1.0, + "content": "D Hyperparameters", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 124, + 102, + 483, + 399 + ], + "blocks": [ + { + "type": "table_body", + "bbox": [ + 124, + 102, + 483, + 399 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 124, + 102, + 483, + 399 + ], + "spans": [ + { + "bbox": [ + 124, + 102, + 483, + 399 + ], + "score": 0.985, + "html": "
NameSymbolValue
General
Replay capacity (FIFO)Start learningBatch sizeBatch lengthMLP sizeActivationBT10610432324Γ— 512LayerNorm+ELU
World Model
RSSM sizeNumber of latentsClasses per latentKL balancing51232320.8
Actor Critic
Imagination horizonDiscountReturn lambdaTarget update intervalH?150.950.95100
All Optimizers
Gradient clippingLearning rateAdam epsilonE10010-410-6
", + "type": "table", + "image_path": "476b14497f73d983953dbe7a34cff454d210243e28b1aa97dafb54ae6a92e42f.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 124, + 102, + 483, + 201.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 124, + 201.0, + 483, + 300.0 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 124, + 300.0, + 483, + 399.0 + ], + "spans": [], + "index": 3 + } + ] + } + ], + "index": 2 + }, + { + "type": "title", + "bbox": [ + 106, + 418, + 311, + 433 + ], + "lines": [ + { + "bbox": [ + 105, + 418, + 311, + 433 + ], + "spans": [ + { + "bbox": [ + 105, + 418, + 311, + 433 + ], + "score": 1.0, + "content": "E Environment and Hardware Details", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4 + }, + { + "type": "text", + "bbox": [ + 108, + 440, + 504, + 464 + ], + "lines": [ + { + "bbox": [ + 106, + 439, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 106, + 439, + 506, + 453 + ], + "score": 1.0, + "content": "For every robot setup that involved vision (UR5, XArm, Sphero), we used a RealSense D435 camera", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 452, + 330, + 465 + ], + "spans": [ + { + "bbox": [ + 106, + 452, + 330, + 465 + ], + "score": 1.0, + "content": "positioned to offer a fixed 3rd person view of the scene.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 5.5, + "bbox_fs": [ + 106, + 439, + 506, + 465 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 468, + 505, + 562 + ], + "lines": [ + { + "bbox": [ + 105, + 466, + 505, + 482 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 505, + 482 + ], + "score": 1.0, + "content": "A1 We used the A1 quadrupedal robot by Unitree. The RL policy outputs actions at a frequency", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 480, + 505, + 493 + ], + "spans": [ + { + "bbox": [ + 106, + 480, + 505, + 493 + ], + "score": 1.0, + "content": "that is too high for the PD controller to track, which we overcome by lowpass filtering the action", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 491, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 506, + 505 + ], + "score": 1.0, + "content": "sequence. The joint range allows the legs to self-collide with the body, which can be damaging to", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 503, + 506, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 503, + 506, + 516 + ], + "score": 1.0, + "content": "the motors and increase battery consumption. We limited the joint range to decrease self-collisions.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 514, + 505, + 527 + ], + "spans": [ + { + "bbox": [ + 106, + 514, + 505, + 527 + ], + "score": 1.0, + "content": "Finally, the EKF velocity estimator relies on foot-ground contact events to prevent significant drift in", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 527, + 505, + 539 + ], + "spans": [ + { + "bbox": [ + 106, + 527, + 505, + 539 + ], + "score": 1.0, + "content": "the estimates, so we employ a curriculum reward function that does not reward the robot for forward", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 538, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 106, + 538, + 506, + 551 + ], + "score": 1.0, + "content": "velocity until the robot is upright with extended legs. We also designed a shell which we 3D printed", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 550, + 451, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 451, + 563 + ], + "score": 1.0, + "content": "in order to better protect the cables and hardware and provide a smoother rolling over.", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 10.5, + "bbox_fs": [ + 105, + 466, + 506, + 563 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 565, + 505, + 671 + ], + "lines": [ + { + "bbox": [ + 106, + 565, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 106, + 565, + 505, + 578 + ], + "score": 1.0, + "content": "XArm & UR5 We utilized slanted bins to prevent objects from leaving the work area during the", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 578, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 106, + 578, + 506, + 590 + ], + "score": 1.0, + "content": "long-running pick and place experiments on the UR5, which is common practice Levine et al. (2018);", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 588, + 506, + 602 + ], + "spans": [ + { + "bbox": [ + 105, + 588, + 506, + 602 + ], + "score": 1.0, + "content": "Kalashnikov et al. (2018). We also added a partition behind the setup to keep the background constant.", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 600, + 505, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 600, + 505, + 615 + ], + "score": 1.0, + "content": "It would be interesting to study how a gripper-mounted camera would impact policy performance", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 613, + 505, + 625 + ], + "spans": [ + { + "bbox": [ + 106, + 613, + 505, + 625 + ], + "score": 1.0, + "content": "Hsu et al. (2022), however we report strong results without this design choice. For the XArm we", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 624, + 505, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 505, + 637 + ], + "score": 1.0, + "content": "use the uFactory xArm Gripper. For the UR5, we use the Robotiq 2F-85 parallel jaw gripper. The", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 635, + 505, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 505, + 649 + ], + "score": 1.0, + "content": "bin locations are predetermined and provided as part of the environment to prevent the robot from", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 648, + 505, + 660 + ], + "spans": [ + { + "bbox": [ + 106, + 648, + 316, + 660 + ], + "score": 1.0, + "content": "colliding with the bin. In addition, movement in the", + "type": "text" + }, + { + "bbox": [ + 317, + 648, + 325, + 658 + ], + "score": 0.31, + "content": "\\textsf { Z }", + "type": "inline_equation" + }, + { + "bbox": [ + 325, + 648, + 505, + 660 + ], + "score": 1.0, + "content": "axis is only enabled while holding an object", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 660, + 354, + 672 + ], + "spans": [ + { + "bbox": [ + 106, + 660, + 354, + 672 + ], + "score": 1.0, + "content": "and the gripper automatically opens once above the other bin.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 19, + "bbox_fs": [ + 105, + 565, + 506, + 672 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 675, + 504, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 675, + 505, + 688 + ], + "spans": [ + { + "bbox": [ + 106, + 676, + 285, + 688 + ], + "score": 1.0, + "content": "Sphero We used a rectangular enclosure of", + "type": "text" + }, + { + "bbox": [ + 285, + 675, + 335, + 686 + ], + "score": 0.91, + "content": "0 . 8 \\times 0 . 8 \\mathrm { { m ^ { 2 } } }", + "type": "inline_equation" + }, + { + "bbox": [ + 336, + 676, + 505, + 688 + ], + "score": 1.0, + "content": "to keep the sphero robot within the camera", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "view. We used a simple OpenCV script to estimate the L2 distance between the Sphero and the", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 699, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 506, + 712 + ], + "score": 1.0, + "content": "goal position to provide a dense reward for policy optimization. This positional information was not", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 710, + 408, + 725 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 408, + 725 + ], + "score": 1.0, + "content": "provided to the agent, which it had to learn from the raw top-down images.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 25.5, + "bbox_fs": [ + 105, + 675, + 506, + 725 + ] + } + ] + } + ], + "_backend": "pipeline", + "_version_name": "2.2.2" +} \ No newline at end of file diff --git a/parse/dev/3RBY8fKjHeu/3RBY8fKjHeu_model.json b/parse/dev/3RBY8fKjHeu/3RBY8fKjHeu_model.json new file mode 100644 index 0000000000000000000000000000000000000000..15f598330bbb887c8763a59285f94b7d0d1e2e4d --- /dev/null +++ b/parse/dev/3RBY8fKjHeu/3RBY8fKjHeu_model.json @@ -0,0 +1,14791 @@ +[ + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 397, + 645, + 1303, + 645, + 1303, + 1452, + 397, + 1452 + ], + "score": 0.983 + }, + { + "category_id": 3, + "poly": [ + 298, + 1507, + 1402, + 1507, + 1402, + 1798, + 298, + 1798 + ], + "score": 0.973 + }, + { + "category_id": 0, + "poly": [ + 519, + 222, + 1179, + 222, + 1179, + 337, + 519, + 337 + ], + "score": 0.955 + }, + { + "category_id": 4, + "poly": [ + 296, + 1815, + 1404, + 1815, + 1404, + 2003, + 296, + 2003 + ], + "score": 0.892 + }, + { + "category_id": 1, + "poly": [ + 654, + 448, + 1047, + 448, + 1047, + 482, + 654, + 482 + ], + "score": 0.871 + }, + { + "category_id": 1, + "poly": [ + 694, + 390, + 1223, + 390, + 1223, + 422, + 694, + 422 + ], + "score": 0.792 + }, + { + "category_id": 1, + "poly": [ + 296, + 2036, + 1048, + 2036, + 1048, + 2066, + 296, + 2066 + ], + "score": 0.675 + }, + { + "category_id": 1, + "poly": [ + 740, + 552, + 959, + 552, + 959, + 582, + 740, + 582 + ], + "score": 0.596 + }, + { + "category_id": 1, + "poly": [ + 662, + 506, + 1038, + 506, + 1038, + 538, + 662, + 538 + ], + "score": 0.574 + }, + { + "category_id": 0, + "poly": [ + 483, + 390, + 634, + 390, + 634, + 424, + 483, + 424 + ], + "score": 0.499 + }, + { + "category_id": 1, + "poly": [ + 483, + 390, + 634, + 390, + 634, + 424, + 483, + 424 + ], + "score": 0.408 + }, + { + "category_id": 2, + "poly": [ + 296, + 2036, + 1048, + 2036, + 1048, + 2066, + 296, + 2066 + ], + "score": 0.309 + }, + { + "category_id": 1, + "poly": [ + 296, + 1815, + 1404, + 1815, + 1404, + 2003, + 296, + 2003 + ], + "score": 0.144 + }, + { + "category_id": 15, + "poly": [ + 1184.0, + 1509.0, + 1326.0, + 1509.0, + 1326.0, + 1545.0, + 1184.0, + 1545.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1246.0, + 1545.0, + 1302.0, + 1545.0, + 1302.0, + 1561.0, + 1246.0, + 1561.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1246.0, + 1645.0, + 1266.0, + 1645.0, + 1266.0, + 1670.0, + 1246.0, + 1670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1760.0, + 1399.0, + 1760.0, + 1399.0, + 1805.0, + 293.0, + 1805.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 512.0, + 223.0, + 1186.0, + 223.0, + 1186.0, + 277.0, + 512.0, + 277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 588.0, + 278.0, + 1112.0, + 278.0, + 1112.0, + 344.0, + 588.0, + 344.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1814.0, + 1405.0, + 1814.0, + 1405.0, + 1850.0, + 294.0, + 1850.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1847.0, + 1405.0, + 1847.0, + 1405.0, + 1879.0, + 296.0, + 1879.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1873.0, + 1405.0, + 1873.0, + 1405.0, + 1915.0, + 291.0, + 1915.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1909.0, + 1406.0, + 1909.0, + 1406.0, + 1945.0, + 295.0, + 1945.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1940.0, + 1406.0, + 1940.0, + 1406.0, + 1976.0, + 294.0, + 1976.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1971.0, + 1308.0, + 1971.0, + 1308.0, + 2008.0, + 293.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 479.0, + 387.0, + 639.0, + 387.0, + 639.0, + 428.0, + 479.0, + 428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 2035.0, + 1051.0, + 2035.0, + 1051.0, + 2069.0, + 296.0, + 2069.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 396.0, + 643.0, + 1304.0, + 643.0, + 1304.0, + 676.0, + 396.0, + 676.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 675.0, + 1304.0, + 675.0, + 1304.0, + 712.0, + 394.0, + 712.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 395.0, + 708.0, + 1303.0, + 708.0, + 1303.0, + 741.0, + 395.0, + 741.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 738.0, + 1304.0, + 738.0, + 1304.0, + 776.0, + 394.0, + 776.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 772.0, + 1304.0, + 772.0, + 1304.0, + 806.0, + 393.0, + 806.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 804.0, + 1304.0, + 804.0, + 1304.0, + 840.0, + 394.0, + 840.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 837.0, + 1304.0, + 837.0, + 1304.0, + 872.0, + 394.0, + 872.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 868.0, + 1305.0, + 868.0, + 1305.0, + 907.0, + 394.0, + 907.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 902.0, + 1304.0, + 902.0, + 1304.0, + 935.0, + 393.0, + 935.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 395.0, + 936.0, + 1305.0, + 936.0, + 1305.0, + 967.0, + 395.0, + 967.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 969.0, + 1305.0, + 969.0, + 1305.0, + 998.0, + 394.0, + 998.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 999.0, + 1304.0, + 999.0, + 1304.0, + 1034.0, + 393.0, + 1034.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 1034.0, + 1304.0, + 1034.0, + 1304.0, + 1063.0, + 394.0, + 1063.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 1065.0, + 1306.0, + 1065.0, + 1306.0, + 1098.0, + 394.0, + 1098.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1095.0, + 1305.0, + 1095.0, + 1305.0, + 1131.0, + 393.0, + 1131.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1129.0, + 1305.0, + 1129.0, + 1305.0, + 1162.0, + 393.0, + 1162.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 396.0, + 1164.0, + 1304.0, + 1164.0, + 1304.0, + 1193.0, + 396.0, + 1193.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 1194.0, + 1304.0, + 1194.0, + 1304.0, + 1227.0, + 394.0, + 1227.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 1228.0, + 1306.0, + 1228.0, + 1306.0, + 1259.0, + 394.0, + 1259.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 1255.0, + 1308.0, + 1255.0, + 1308.0, + 1296.0, + 392.0, + 1296.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 1290.0, + 1306.0, + 1290.0, + 1306.0, + 1327.0, + 392.0, + 1327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1325.0, + 1303.0, + 1325.0, + 1303.0, + 1357.0, + 393.0, + 1357.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1354.0, + 1305.0, + 1354.0, + 1305.0, + 1391.0, + 393.0, + 1391.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1385.0, + 1304.0, + 1385.0, + 1304.0, + 1425.0, + 393.0, + 1425.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1419.0, + 1204.0, + 1419.0, + 1204.0, + 1457.0, + 393.0, + 1457.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 648.0, + 442.0, + 832.0, + 442.0, + 832.0, + 489.0, + 648.0, + 489.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 878.0, + 446.0, + 1050.0, + 446.0, + 1050.0, + 484.0, + 878.0, + 484.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 691.0, + 387.0, + 968.0, + 387.0, + 968.0, + 430.0, + 691.0, + 430.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1016.0, + 388.0, + 1227.0, + 388.0, + 1227.0, + 427.0, + 1016.0, + 427.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 2035.0, + 1051.0, + 2035.0, + 1051.0, + 2069.0, + 296.0, + 2069.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 734.0, + 547.0, + 965.0, + 547.0, + 965.0, + 588.0, + 734.0, + 588.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 656.0, + 500.0, + 1042.0, + 500.0, + 1042.0, + 546.0, + 656.0, + 546.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 479.0, + 387.0, + 639.0, + 387.0, + 639.0, + 428.0, + 479.0, + 428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1814.0, + 1405.0, + 1814.0, + 1405.0, + 1850.0, + 294.0, + 1850.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1847.0, + 1405.0, + 1847.0, + 1405.0, + 1879.0, + 296.0, + 1879.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1873.0, + 1405.0, + 1873.0, + 1405.0, + 1915.0, + 291.0, + 1915.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1909.0, + 1406.0, + 1909.0, + 1406.0, + 1945.0, + 295.0, + 1945.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1940.0, + 1406.0, + 1940.0, + 1406.0, + 1976.0, + 294.0, + 1976.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1971.0, + 1308.0, + 1971.0, + 1308.0, + 2008.0, + 293.0, + 2008.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 0, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 299, + 280, + 937, + 280, + 937, + 700, + 299, + 700 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 297, + 1210, + 1405, + 1210, + 1405, + 1503, + 297, + 1503 + ], + "score": 0.979 + }, + { + "category_id": 3, + "poly": [ + 957, + 286, + 1406, + 286, + 1406, + 648, + 957, + 648 + ], + "score": 0.973 + }, + { + "category_id": 4, + "poly": [ + 957, + 675, + 1404, + 675, + 1404, + 1081, + 957, + 1081 + ], + "score": 0.971 + }, + { + "category_id": 1, + "poly": [ + 298, + 730, + 936, + 730, + 936, + 1182, + 298, + 1182 + ], + "score": 0.952 + }, + { + "category_id": 0, + "poly": [ + 299, + 200, + 530, + 200, + 530, + 236, + 299, + 236 + ], + "score": 0.904 + }, + { + "category_id": 2, + "poly": [ + 841, + 2062, + 858, + 2062, + 858, + 2085, + 841, + 2085 + ], + "score": 0.723 + }, + { + "category_id": 1, + "poly": [ + 295, + 1538, + 1408, + 1538, + 1408, + 1994, + 295, + 1994 + ], + "score": 0.628 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 859, + 2061, + 859, + 2085, + 841, + 2085 + ], + "score": 0.102 + }, + { + "category_id": 15, + "poly": [ + 1018.0, + 288.0, + 1150.0, + 288.0, + 1150.0, + 317.0, + 1018.0, + 317.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1204.0, + 285.0, + 1370.0, + 285.0, + 1370.0, + 319.0, + 1204.0, + 319.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1013.0, + 617.0, + 1156.0, + 617.0, + 1156.0, + 653.0, + 1013.0, + 653.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1208.0, + 618.0, + 1363.0, + 618.0, + 1363.0, + 651.0, + 1208.0, + 651.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 955.0, + 673.0, + 1404.0, + 673.0, + 1404.0, + 709.0, + 955.0, + 709.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 954.0, + 706.0, + 1406.0, + 706.0, + 1406.0, + 740.0, + 954.0, + 740.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 955.0, + 737.0, + 1407.0, + 737.0, + 1407.0, + 769.0, + 955.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 954.0, + 768.0, + 1406.0, + 768.0, + 1406.0, + 802.0, + 954.0, + 802.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 954.0, + 800.0, + 1406.0, + 800.0, + 1406.0, + 831.0, + 954.0, + 831.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 953.0, + 831.0, + 1405.0, + 831.0, + 1405.0, + 864.0, + 953.0, + 864.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 953.0, + 860.0, + 1406.0, + 860.0, + 1406.0, + 897.0, + 953.0, + 897.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 955.0, + 893.0, + 1406.0, + 893.0, + 1406.0, + 926.0, + 955.0, + 926.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 955.0, + 924.0, + 1408.0, + 924.0, + 1408.0, + 958.0, + 955.0, + 958.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 953.0, + 956.0, + 1405.0, + 956.0, + 1405.0, + 989.0, + 953.0, + 989.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 954.0, + 986.0, + 1406.0, + 986.0, + 1406.0, + 1019.0, + 954.0, + 1019.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 953.0, + 1018.0, + 1410.0, + 1018.0, + 1410.0, + 1049.0, + 953.0, + 1049.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 954.0, + 1047.0, + 1125.0, + 1047.0, + 1125.0, + 1086.0, + 954.0, + 1086.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 196.0, + 536.0, + 196.0, + 536.0, + 243.0, + 292.0, + 243.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2059.0, + 862.0, + 2059.0, + 862.0, + 2094.0, + 839.0, + 2094.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2058.0, + 862.0, + 2058.0, + 862.0, + 2093.0, + 838.0, + 2093.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 281.0, + 936.0, + 281.0, + 936.0, + 312.0, + 296.0, + 312.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 312.0, + 938.0, + 312.0, + 938.0, + 345.0, + 294.0, + 345.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 347.0, + 937.0, + 347.0, + 937.0, + 378.0, + 295.0, + 378.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 378.0, + 937.0, + 378.0, + 937.0, + 410.0, + 295.0, + 410.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 413.0, + 934.0, + 413.0, + 934.0, + 441.0, + 297.0, + 441.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 443.0, + 937.0, + 443.0, + 937.0, + 474.0, + 294.0, + 474.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 476.0, + 935.0, + 476.0, + 935.0, + 507.0, + 296.0, + 507.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 508.0, + 936.0, + 508.0, + 936.0, + 540.0, + 295.0, + 540.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 539.0, + 939.0, + 539.0, + 939.0, + 573.0, + 295.0, + 573.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 571.0, + 939.0, + 571.0, + 939.0, + 606.0, + 295.0, + 606.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 605.0, + 937.0, + 605.0, + 937.0, + 636.0, + 297.0, + 636.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 638.0, + 937.0, + 638.0, + 937.0, + 668.0, + 296.0, + 668.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 671.0, + 625.0, + 671.0, + 625.0, + 701.0, + 296.0, + 701.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1209.0, + 1405.0, + 1209.0, + 1405.0, + 1248.0, + 294.0, + 1248.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1241.0, + 1408.0, + 1241.0, + 1408.0, + 1280.0, + 294.0, + 1280.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1279.0, + 1405.0, + 1279.0, + 1405.0, + 1310.0, + 296.0, + 1310.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1309.0, + 1405.0, + 1309.0, + 1405.0, + 1343.0, + 295.0, + 1343.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1342.0, + 1406.0, + 1342.0, + 1406.0, + 1376.0, + 295.0, + 1376.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1375.0, + 1403.0, + 1375.0, + 1403.0, + 1406.0, + 295.0, + 1406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1407.0, + 1403.0, + 1407.0, + 1403.0, + 1438.0, + 295.0, + 1438.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1439.0, + 1406.0, + 1439.0, + 1406.0, + 1473.0, + 295.0, + 1473.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1471.0, + 1109.0, + 1471.0, + 1109.0, + 1505.0, + 295.0, + 1505.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 724.0, + 941.0, + 724.0, + 941.0, + 766.0, + 293.0, + 766.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 762.0, + 936.0, + 762.0, + 936.0, + 796.0, + 296.0, + 796.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 795.0, + 939.0, + 795.0, + 939.0, + 827.0, + 295.0, + 827.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 826.0, + 939.0, + 826.0, + 939.0, + 858.0, + 295.0, + 858.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 858.0, + 940.0, + 858.0, + 940.0, + 891.0, + 294.0, + 891.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 890.0, + 937.0, + 890.0, + 937.0, + 924.0, + 295.0, + 924.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 925.0, + 937.0, + 925.0, + 937.0, + 956.0, + 295.0, + 956.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 955.0, + 937.0, + 955.0, + 937.0, + 988.0, + 294.0, + 988.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 989.0, + 937.0, + 989.0, + 937.0, + 1021.0, + 295.0, + 1021.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1022.0, + 936.0, + 1022.0, + 936.0, + 1053.0, + 296.0, + 1053.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1052.0, + 936.0, + 1052.0, + 936.0, + 1086.0, + 295.0, + 1086.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1087.0, + 935.0, + 1087.0, + 935.0, + 1118.0, + 296.0, + 1118.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1118.0, + 939.0, + 1118.0, + 939.0, + 1150.0, + 296.0, + 1150.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1152.0, + 692.0, + 1152.0, + 692.0, + 1183.0, + 297.0, + 1183.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1539.0, + 1402.0, + 1539.0, + 1402.0, + 1574.0, + 297.0, + 1574.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1569.0, + 1408.0, + 1569.0, + 1408.0, + 1607.0, + 321.0, + 1607.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1603.0, + 1170.0, + 1603.0, + 1170.0, + 1638.0, + 322.0, + 1638.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1648.0, + 1407.0, + 1648.0, + 1407.0, + 1683.0, + 297.0, + 1683.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1679.0, + 1405.0, + 1679.0, + 1405.0, + 1714.0, + 322.0, + 1714.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1714.0, + 1271.0, + 1714.0, + 1271.0, + 1745.0, + 322.0, + 1745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1755.0, + 1406.0, + 1755.0, + 1406.0, + 1791.0, + 296.0, + 1791.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1790.0, + 1404.0, + 1790.0, + 1404.0, + 1824.0, + 322.0, + 1824.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1817.0, + 1405.0, + 1817.0, + 1405.0, + 1858.0, + 320.0, + 1858.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1854.0, + 966.0, + 1854.0, + 966.0, + 1888.0, + 321.0, + 1888.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 1898.0, + 1405.0, + 1898.0, + 1405.0, + 1932.0, + 298.0, + 1932.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1930.0, + 1406.0, + 1930.0, + 1406.0, + 1965.0, + 321.0, + 1965.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1965.0, + 1007.0, + 1965.0, + 1007.0, + 1996.0, + 323.0, + 1996.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 1, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 1048, + 1405, + 1048, + 1405, + 1309, + 297, + 1309 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 296, + 1654, + 1406, + 1654, + 1406, + 1980, + 296, + 1980 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 297, + 1320, + 1405, + 1320, + 1405, + 1549, + 297, + 1549 + ], + "score": 0.979 + }, + { + "category_id": 3, + "poly": [ + 291, + 147, + 1407, + 147, + 1407, + 602, + 291, + 602 + ], + "score": 0.972 + }, + { + "category_id": 0, + "poly": [ + 298, + 987, + 492, + 987, + 492, + 1027, + 298, + 1027 + ], + "score": 0.91 + }, + { + "category_id": 1, + "poly": [ + 299, + 841, + 1407, + 841, + 1407, + 964, + 299, + 964 + ], + "score": 0.84 + }, + { + "category_id": 9, + "poly": [ + 1366, + 1588, + 1400, + 1588, + 1400, + 1617, + 1366, + 1617 + ], + "score": 0.82 + }, + { + "category_id": 4, + "poly": [ + 295, + 619, + 1407, + 619, + 1407, + 839, + 295, + 839 + ], + "score": 0.763 + }, + { + "category_id": 8, + "poly": [ + 322, + 1562, + 1330, + 1562, + 1330, + 1642, + 322, + 1642 + ], + "score": 0.693 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 859, + 2061, + 859, + 2085, + 841, + 2085 + ], + "score": 0.677 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 859, + 2061, + 859, + 2085, + 841, + 2085 + ], + "score": 0.292 + }, + { + "category_id": 1, + "poly": [ + 322, + 1562, + 1330, + 1562, + 1330, + 1642, + 322, + 1642 + ], + "score": 0.246 + }, + { + "category_id": 13, + "poly": [ + 297, + 1786, + 326, + 1786, + 326, + 1816, + 297, + 1816 + ], + "score": 0.86, + "latex": "h _ { t }" + }, + { + "category_id": 13, + "poly": [ + 804, + 1725, + 832, + 1725, + 832, + 1751, + 804, + 1751 + ], + "score": 0.84, + "latex": "x _ { t }" + }, + { + "category_id": 13, + "poly": [ + 1316, + 1725, + 1341, + 1725, + 1341, + 1751, + 1316, + 1751 + ], + "score": 0.84, + "latex": "z _ { t }" + }, + { + "category_id": 14, + "poly": [ + 561, + 1560, + 1341, + 1560, + 1341, + 1645, + 561, + 1645 + ], + "score": 0.74, + "latex": "{ \\begin{array} { r l r l } & { \\operatorname { e n c } _ { \\theta } { \\big ( } s _ { t } \\ { \\big | } \\ s _ { t - 1 } , a _ { t - 1 } , x _ { t } { \\big ) } } & & { { \\mathrm { D e c o d e r ~ N e t w o r k : } } \\quad \\operatorname* { d e c } _ { \\theta } { \\big ( } s _ { t } { \\big ) } \\approx x _ { t } } \\\\ & { \\operatorname { d y n } _ { \\theta } { \\big ( } s _ { t } \\ { \\big | } \\ s _ { t - 1 } , a _ { t - 1 } { \\big ) } } & & { { \\mathrm { R e w a r d ~ N e t w o r k : } } \\quad \\operatorname { r e w } _ { \\theta } { \\big ( } s _ { t + 1 } { \\big ) } \\approx r _ { t } } \\end{array} }" + }, + { + "category_id": 13, + "poly": [ + 581, + 1606, + 826, + 1606, + 826, + 1642, + 581, + 1642 + ], + "score": 0.69, + "latex": "\\mathrm { d y n } _ { \\theta } \\big ( s _ { t } \\ \\vert \\ s _ { t - 1 } , a _ { t - 1 } \\big )" + }, + { + "category_id": 13, + "poly": [ + 580, + 1563, + 862, + 1563, + 862, + 1600, + 580, + 1600 + ], + "score": 0.41, + "latex": "\\mathrm { e n c } _ { \\theta } \\big ( s _ { t } \\ \\big | \\ s _ { t - 1 } , a _ { t - 1 } , x _ { t } \\big )" + }, + { + "category_id": 15, + "poly": [ + 1366.0, + 160.0, + 1377.0, + 160.0, + 1377.0, + 173.0, + 1366.0, + 173.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1137.0, + 169.0, + 1150.0, + 169.0, + 1150.0, + 179.0, + 1137.0, + 179.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 369.0, + 246.0, + 393.0, + 246.0, + 393.0, + 267.0, + 369.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 360.0, + 309.0, + 408.0, + 309.0, + 408.0, + 347.0, + 360.0, + 347.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 517.0, + 300.0, + 602.0, + 300.0, + 602.0, + 351.0, + 517.0, + 351.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 948.0, + 324.0, + 958.0, + 324.0, + 958.0, + 336.0, + 948.0, + 336.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 396.0, + 361.0, + 396.0, + 361.0, + 418.0, + 318.0, + 418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 406.0, + 389.0, + 454.0, + 389.0, + 454.0, + 420.0, + 406.0, + 420.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 510.0, + 396.0, + 553.0, + 396.0, + 553.0, + 418.0, + 510.0, + 418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 596.0, + 389.0, + 645.0, + 389.0, + 645.0, + 420.0, + 596.0, + 420.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 700.0, + 396.0, + 743.0, + 396.0, + 743.0, + 418.0, + 700.0, + 418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 786.0, + 389.0, + 835.0, + 389.0, + 835.0, + 420.0, + 786.0, + 420.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 974.0, + 397.0, + 1017.0, + 397.0, + 1017.0, + 419.0, + 974.0, + 419.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 313.0, + 476.0, + 331.0, + 476.0, + 331.0, + 492.0, + 313.0, + 492.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 401.0, + 475.0, + 423.0, + 475.0, + 423.0, + 493.0, + 401.0, + 493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 501.0, + 475.0, + 525.0, + 475.0, + 525.0, + 493.0, + 501.0, + 493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 592.0, + 476.0, + 610.0, + 476.0, + 610.0, + 492.0, + 592.0, + 492.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 693.0, + 476.0, + 711.0, + 476.0, + 711.0, + 492.0, + 693.0, + 492.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 783.0, + 476.0, + 801.0, + 476.0, + 801.0, + 492.0, + 783.0, + 492.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 969.0, + 479.0, + 986.0, + 479.0, + 986.0, + 493.0, + 969.0, + 493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 492.0, + 327.0, + 492.0, + 327.0, + 506.0, + 317.0, + 506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 407.0, + 493.0, + 417.0, + 493.0, + 417.0, + 506.0, + 407.0, + 506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 504.0, + 490.0, + 520.0, + 490.0, + 520.0, + 507.0, + 504.0, + 507.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 596.0, + 493.0, + 607.0, + 493.0, + 607.0, + 506.0, + 596.0, + 506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 697.0, + 493.0, + 708.0, + 493.0, + 708.0, + 506.0, + 697.0, + 506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 784.0, + 490.0, + 798.0, + 490.0, + 798.0, + 507.0, + 784.0, + 507.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 501.0, + 507.0, + 521.0, + 507.0, + 521.0, + 521.0, + 501.0, + 521.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 593.0, + 510.0, + 608.0, + 510.0, + 608.0, + 519.0, + 593.0, + 519.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 784.0, + 510.0, + 798.0, + 510.0, + 798.0, + 519.0, + 784.0, + 519.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 531.0, + 359.0, + 531.0, + 359.0, + 563.0, + 327.0, + 563.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 417.0, + 527.0, + 449.0, + 527.0, + 449.0, + 562.0, + 417.0, + 562.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 517.0, + 531.0, + 549.0, + 531.0, + 549.0, + 563.0, + 517.0, + 563.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 605.0, + 526.0, + 641.0, + 526.0, + 641.0, + 564.0, + 605.0, + 564.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 707.0, + 532.0, + 739.0, + 532.0, + 739.0, + 564.0, + 707.0, + 564.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 796.0, + 526.0, + 831.0, + 526.0, + 831.0, + 564.0, + 796.0, + 564.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 985.0, + 533.0, + 1012.0, + 533.0, + 1012.0, + 560.0, + 985.0, + 560.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 435.0, + 565.0, + 714.0, + 565.0, + 714.0, + 609.0, + 435.0, + 609.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1052.0, + 568.0, + 1286.0, + 568.0, + 1286.0, + 608.0, + 1052.0, + 608.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1029.75, + 158.0, + 1052.75, + 158.0, + 1052.75, + 181.0, + 1029.75, + 181.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1176.75, + 161.5, + 1193.75, + 161.5, + 1193.75, + 177.5, + 1176.75, + 177.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1220.25, + 158.0, + 1241.25, + 158.0, + 1241.25, + 181.5, + 1220.25, + 181.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 609.0, + 172.0, + 625.0, + 172.0, + 625.0, + 190.5, + 609.0, + 190.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1325.0, + 163.0, + 1332.0, + 163.0, + 1332.0, + 176.0, + 1325.0, + 176.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 406.0, + 167.5, + 442.0, + 167.5, + 442.0, + 194.0, + 406.0, + 194.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 564.0, + 247.0, + 582.0, + 247.0, + 582.0, + 271.0, + 564.0, + 271.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 759.0, + 252.5, + 769.0, + 252.5, + 769.0, + 266.0, + 759.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 991.0, + 250.5, + 999.0, + 250.5, + 999.0, + 265.0, + 991.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1180.0, + 250.0, + 1188.0, + 250.0, + 1188.0, + 266.0, + 1180.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1368.25, + 250.0, + 1377.25, + 250.0, + 1377.25, + 267.0, + 1368.25, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 335.25, + 322.5, + 349.25, + 322.5, + 349.25, + 341.5, + 335.25, + 341.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 719.75, + 324.0, + 728.75, + 324.0, + 728.75, + 343.0, + 719.75, + 343.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1137.0, + 323.0, + 1151.0, + 323.0, + 1151.0, + 342.0, + 1137.0, + 342.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 982.0, + 496.0, + 982.0, + 496.0, + 1035.0, + 289.0, + 1035.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 618.0, + 1408.0, + 618.0, + 1408.0, + 656.0, + 294.0, + 656.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 649.0, + 1409.0, + 649.0, + 1409.0, + 687.0, + 294.0, + 687.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 681.0, + 1405.0, + 681.0, + 1405.0, + 717.0, + 296.0, + 717.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 711.0, + 1405.0, + 711.0, + 1405.0, + 749.0, + 294.0, + 749.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 743.0, + 1407.0, + 743.0, + 1407.0, + 780.0, + 293.0, + 780.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 778.0, + 1405.0, + 778.0, + 1405.0, + 809.0, + 296.0, + 809.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 806.0, + 1409.0, + 806.0, + 1409.0, + 843.0, + 294.0, + 843.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2058.0, + 862.0, + 2058.0, + 862.0, + 2091.0, + 838.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2058.0, + 862.0, + 2058.0, + 862.0, + 2091.0, + 838.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1047.0, + 1408.0, + 1047.0, + 1408.0, + 1087.0, + 294.0, + 1087.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1083.0, + 1407.0, + 1083.0, + 1407.0, + 1118.0, + 295.0, + 1118.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1115.0, + 1405.0, + 1115.0, + 1405.0, + 1150.0, + 295.0, + 1150.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1147.0, + 1403.0, + 1147.0, + 1403.0, + 1182.0, + 295.0, + 1182.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1180.0, + 1405.0, + 1180.0, + 1405.0, + 1215.0, + 295.0, + 1215.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1211.0, + 1403.0, + 1211.0, + 1403.0, + 1246.0, + 294.0, + 1246.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1246.0, + 1403.0, + 1246.0, + 1403.0, + 1278.0, + 296.0, + 1278.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1278.0, + 1103.0, + 1278.0, + 1103.0, + 1311.0, + 294.0, + 1311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1654.0, + 1406.0, + 1654.0, + 1406.0, + 1691.0, + 294.0, + 1691.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1685.0, + 1409.0, + 1685.0, + 1409.0, + 1724.0, + 291.0, + 1724.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1719.0, + 803.0, + 1719.0, + 803.0, + 1755.0, + 293.0, + 1755.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 833.0, + 1719.0, + 1315.0, + 1719.0, + 1315.0, + 1755.0, + 833.0, + 1755.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1342.0, + 1719.0, + 1404.0, + 1719.0, + 1404.0, + 1755.0, + 1342.0, + 1755.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1752.0, + 1404.0, + 1752.0, + 1404.0, + 1788.0, + 295.0, + 1788.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 1784.0, + 1404.0, + 1784.0, + 1404.0, + 1821.0, + 327.0, + 1821.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1818.0, + 1406.0, + 1818.0, + 1406.0, + 1852.0, + 296.0, + 1852.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1845.0, + 1406.0, + 1845.0, + 1406.0, + 1887.0, + 294.0, + 1887.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1885.0, + 1403.0, + 1885.0, + 1403.0, + 1915.0, + 296.0, + 1915.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1915.0, + 1407.0, + 1915.0, + 1407.0, + 1949.0, + 295.0, + 1949.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1946.0, + 546.0, + 1946.0, + 546.0, + 1980.0, + 294.0, + 1980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1319.0, + 1405.0, + 1319.0, + 1405.0, + 1356.0, + 296.0, + 1356.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1354.0, + 1405.0, + 1354.0, + 1405.0, + 1391.0, + 294.0, + 1391.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1387.0, + 1405.0, + 1387.0, + 1405.0, + 1422.0, + 294.0, + 1422.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1419.0, + 1407.0, + 1419.0, + 1407.0, + 1455.0, + 294.0, + 1455.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1452.0, + 1403.0, + 1452.0, + 1403.0, + 1485.0, + 296.0, + 1485.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1486.0, + 1403.0, + 1486.0, + 1403.0, + 1515.0, + 295.0, + 1515.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1518.0, + 1357.0, + 1518.0, + 1357.0, + 1550.0, + 296.0, + 1550.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 836.0, + 1407.0, + 836.0, + 1407.0, + 872.0, + 294.0, + 872.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 870.0, + 1409.0, + 870.0, + 1409.0, + 903.0, + 294.0, + 903.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 901.0, + 1405.0, + 901.0, + 1405.0, + 934.0, + 296.0, + 934.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 934.0, + 500.0, + 934.0, + 500.0, + 962.0, + 293.0, + 962.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 326.0, + 1559.0, + 560.0, + 1559.0, + 560.0, + 1603.0, + 326.0, + 1603.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 326.0, + 1600.0, + 560.0, + 1600.0, + 560.0, + 1646.0, + 326.0, + 1646.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 2, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 693, + 1405, + 693, + 1405, + 955, + 297, + 955 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 297, + 416, + 1404, + 416, + 1404, + 613, + 297, + 613 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 1026, + 1404, + 1026, + 1404, + 1189, + 298, + 1189 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 297, + 1314, + 1406, + 1314, + 1406, + 1544, + 297, + 1544 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 297, + 1715, + 1406, + 1715, + 1406, + 2008, + 297, + 2008 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 297, + 202, + 1405, + 202, + 1405, + 399, + 297, + 399 + ], + "score": 0.976 + }, + { + "category_id": 8, + "poly": [ + 521, + 624, + 1176, + 624, + 1176, + 681, + 521, + 681 + ], + "score": 0.95 + }, + { + "category_id": 8, + "poly": [ + 460, + 971, + 1238, + 971, + 1238, + 1018, + 460, + 1018 + ], + "score": 0.93 + }, + { + "category_id": 0, + "poly": [ + 298, + 1244, + 532, + 1244, + 532, + 1282, + 298, + 1282 + ], + "score": 0.919 + }, + { + "category_id": 9, + "poly": [ + 1366, + 978, + 1400, + 978, + 1400, + 1008, + 1366, + 1008 + ], + "score": 0.88 + }, + { + "category_id": 9, + "poly": [ + 1366, + 637, + 1400, + 637, + 1400, + 667, + 1366, + 667 + ], + "score": 0.875 + }, + { + "category_id": 1, + "poly": [ + 296, + 1569, + 1408, + 1569, + 1408, + 1689, + 296, + 1689 + ], + "score": 0.857 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 858, + 2061, + 858, + 2084, + 841, + 2084 + ], + "score": 0.763 + }, + { + "category_id": 13, + "poly": [ + 525, + 365, + 586, + 365, + 586, + 399, + 525, + 399 + ], + "score": 0.93, + "latex": "v ( s _ { t } )" + }, + { + "category_id": 14, + "poly": [ + 522, + 622, + 1177, + 622, + 1177, + 682, + 522, + 682 + ], + "score": 0.93, + "latex": "V _ { t } ^ { \\lambda } \\doteq r _ { t } + \\gamma \\Big ( ( 1 - \\lambda ) v ( s _ { t + 1 } ) + \\lambda V _ { t + 1 } ^ { \\lambda } \\Big ) , \\quad V _ { H } ^ { \\lambda } \\doteq v ( s _ { H } ) ." + }, + { + "category_id": 13, + "poly": [ + 1308, + 332, + 1403, + 332, + 1403, + 366, + 1308, + 366 + ], + "score": 0.93, + "latex": "\\pi ( a _ { t } | s _ { t } )" + }, + { + "category_id": 13, + "poly": [ + 1216, + 516, + 1309, + 516, + 1309, + 544, + 1216, + 544 + ], + "score": 0.9, + "latex": "H = 1 6" + }, + { + "category_id": 14, + "poly": [ + 462, + 970, + 1235, + 970, + 1235, + 1017, + 462, + 1017 + ], + "score": 0.9, + "latex": "\\begin{array} { r } { \\mathcal { L } ( \\pi ) \\doteq - \\operatorname { E } \\bigl [ \\sum _ { t = 1 } ^ { H } \\ln \\pi ( a _ { t } \\mid s _ { t } ) \\mathrm { s g } ( V _ { t } ^ { \\lambda } - v ( s _ { t } ) ) + \\eta \\mathrm { H } \\bigl [ \\pi ( a _ { t } \\mid s _ { t } ) \\bigr ] \\bigr ] } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 354, + 454, + 380, + 454, + 380, + 482, + 354, + 482 + ], + "score": 0.86, + "latex": "s _ { t }" + }, + { + "category_id": 13, + "poly": [ + 1137, + 424, + 1165, + 424, + 1165, + 449, + 1137, + 449 + ], + "score": 0.85, + "latex": "a _ { t }" + }, + { + "category_id": 13, + "poly": [ + 835, + 696, + 854, + 696, + 854, + 722, + 835, + 722 + ], + "score": 0.83, + "latex": "\\lambda" + }, + { + "category_id": 13, + "poly": [ + 747, + 582, + 766, + 582, + 766, + 608, + 747, + 608 + ], + "score": 0.82, + "latex": "\\lambda" + }, + { + "category_id": 13, + "poly": [ + 298, + 1062, + 316, + 1062, + 316, + 1088, + 298, + 1088 + ], + "score": 0.74, + "latex": "\\lambda" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 1238.0, + 538.0, + 1238.0, + 538.0, + 1292.0, + 288.0, + 1292.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2059.0, + 862.0, + 2059.0, + 862.0, + 2091.0, + 839.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 692.0, + 834.0, + 692.0, + 834.0, + 728.0, + 294.0, + 728.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 855.0, + 692.0, + 1405.0, + 692.0, + 1405.0, + 728.0, + 855.0, + 728.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 725.0, + 1405.0, + 725.0, + 1405.0, + 762.0, + 294.0, + 762.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 757.0, + 1407.0, + 757.0, + 1407.0, + 795.0, + 294.0, + 795.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 789.0, + 1405.0, + 789.0, + 1405.0, + 827.0, + 294.0, + 827.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 824.0, + 1405.0, + 824.0, + 1405.0, + 859.0, + 296.0, + 859.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 855.0, + 1406.0, + 855.0, + 1406.0, + 892.0, + 292.0, + 892.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 887.0, + 1406.0, + 887.0, + 1406.0, + 925.0, + 294.0, + 925.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 918.0, + 1367.0, + 918.0, + 1367.0, + 960.0, + 296.0, + 960.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 418.0, + 1136.0, + 418.0, + 1136.0, + 451.0, + 296.0, + 451.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1166.0, + 418.0, + 1404.0, + 418.0, + 1404.0, + 451.0, + 1166.0, + 451.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 451.0, + 353.0, + 451.0, + 353.0, + 484.0, + 296.0, + 484.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 381.0, + 451.0, + 1405.0, + 451.0, + 1405.0, + 484.0, + 381.0, + 484.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 484.0, + 1405.0, + 484.0, + 1405.0, + 517.0, + 296.0, + 517.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 514.0, + 1215.0, + 514.0, + 1215.0, + 550.0, + 295.0, + 550.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1310.0, + 514.0, + 1405.0, + 514.0, + 1405.0, + 550.0, + 1310.0, + 550.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 546.0, + 1406.0, + 546.0, + 1406.0, + 585.0, + 294.0, + 585.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 579.0, + 746.0, + 579.0, + 746.0, + 615.0, + 294.0, + 615.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 767.0, + 579.0, + 1272.0, + 579.0, + 1272.0, + 615.0, + 767.0, + 615.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1025.0, + 1404.0, + 1025.0, + 1404.0, + 1061.0, + 293.0, + 1061.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1058.0, + 297.0, + 1058.0, + 297.0, + 1095.0, + 293.0, + 1095.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 1058.0, + 1405.0, + 1058.0, + 1405.0, + 1095.0, + 317.0, + 1095.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1092.0, + 1405.0, + 1092.0, + 1405.0, + 1125.0, + 294.0, + 1125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1122.0, + 1404.0, + 1122.0, + 1404.0, + 1158.0, + 293.0, + 1158.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1157.0, + 475.0, + 1157.0, + 475.0, + 1191.0, + 293.0, + 1191.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1314.0, + 1406.0, + 1314.0, + 1406.0, + 1352.0, + 294.0, + 1352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1348.0, + 1404.0, + 1348.0, + 1404.0, + 1384.0, + 294.0, + 1384.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1377.0, + 1407.0, + 1377.0, + 1407.0, + 1421.0, + 292.0, + 1421.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1412.0, + 1404.0, + 1412.0, + 1404.0, + 1451.0, + 294.0, + 1451.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1444.0, + 1406.0, + 1444.0, + 1406.0, + 1482.0, + 294.0, + 1482.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1481.0, + 1407.0, + 1481.0, + 1407.0, + 1513.0, + 295.0, + 1513.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1512.0, + 1015.0, + 1512.0, + 1015.0, + 1548.0, + 296.0, + 1548.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1715.0, + 1404.0, + 1715.0, + 1404.0, + 1750.0, + 296.0, + 1750.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1749.0, + 1406.0, + 1749.0, + 1406.0, + 1783.0, + 295.0, + 1783.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1783.0, + 1404.0, + 1783.0, + 1404.0, + 1814.0, + 295.0, + 1814.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1811.0, + 1406.0, + 1811.0, + 1406.0, + 1850.0, + 292.0, + 1850.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1846.0, + 1407.0, + 1846.0, + 1407.0, + 1880.0, + 295.0, + 1880.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1877.0, + 1404.0, + 1877.0, + 1404.0, + 1912.0, + 295.0, + 1912.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1911.0, + 1407.0, + 1911.0, + 1407.0, + 1945.0, + 295.0, + 1945.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1940.0, + 1407.0, + 1940.0, + 1407.0, + 1981.0, + 292.0, + 1981.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1974.0, + 525.0, + 1974.0, + 525.0, + 2008.0, + 294.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 204.0, + 1405.0, + 204.0, + 1405.0, + 237.0, + 296.0, + 237.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 236.0, + 1405.0, + 236.0, + 1405.0, + 269.0, + 295.0, + 269.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 269.0, + 1405.0, + 269.0, + 1405.0, + 302.0, + 295.0, + 302.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 299.0, + 1406.0, + 299.0, + 1406.0, + 336.0, + 294.0, + 336.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 330.0, + 1307.0, + 330.0, + 1307.0, + 369.0, + 292.0, + 369.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 360.0, + 524.0, + 360.0, + 524.0, + 403.0, + 294.0, + 403.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 587.0, + 360.0, + 598.0, + 360.0, + 598.0, + 403.0, + 587.0, + 403.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1567.0, + 1248.0, + 1567.0, + 1248.0, + 1604.0, + 291.0, + 1604.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1611.0, + 1370.0, + 1611.0, + 1370.0, + 1648.0, + 294.0, + 1648.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1654.0, + 1406.0, + 1654.0, + 1406.0, + 1692.0, + 294.0, + 1692.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 3, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 747, + 1406, + 747, + 1406, + 1200, + 297, + 1200 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 298, + 1275, + 935, + 1275, + 935, + 1693, + 298, + 1693 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 299, + 1878, + 1404, + 1878, + 1404, + 2007, + 299, + 2007 + ], + "score": 0.974 + }, + { + "category_id": 3, + "poly": [ + 958, + 1280, + 1401, + 1280, + 1401, + 1531, + 958, + 1531 + ], + "score": 0.971 + }, + { + "category_id": 1, + "poly": [ + 298, + 1696, + 1404, + 1696, + 1404, + 1857, + 298, + 1857 + ], + "score": 0.971 + }, + { + "category_id": 3, + "poly": [ + 297, + 159, + 1394, + 159, + 1394, + 398, + 297, + 398 + ], + "score": 0.964 + }, + { + "category_id": 4, + "poly": [ + 956, + 1547, + 1403, + 1547, + 1403, + 1673, + 956, + 1673 + ], + "score": 0.959 + }, + { + "category_id": 4, + "poly": [ + 296, + 436, + 1405, + 436, + 1405, + 719, + 296, + 719 + ], + "score": 0.955 + }, + { + "category_id": 0, + "poly": [ + 299, + 1231, + 650, + 1231, + 650, + 1265, + 299, + 1265 + ], + "score": 0.916 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 858, + 2061, + 858, + 2085, + 841, + 2085 + ], + "score": 0.725 + }, + { + "category_id": 13, + "poly": [ + 298, + 1973, + 340, + 1973, + 340, + 2003, + 298, + 2003 + ], + "score": 0.89, + "latex": "\\boldsymbol { s } _ { v } \\boldsymbol { x }" + }, + { + "category_id": 13, + "poly": [ + 409, + 1909, + 443, + 1909, + 443, + 1939, + 409, + 1939 + ], + "score": 0.87, + "latex": "\\hat { z } ^ { T }" + }, + { + "category_id": 13, + "poly": [ + 594, + 1974, + 623, + 1974, + 623, + 2003, + 594, + 2003 + ], + "score": 0.87, + "latex": "s _ { v }" + }, + { + "category_id": 13, + "poly": [ + 699, + 1664, + 769, + 1664, + 769, + 1693, + 699, + 1693 + ], + "score": 0.7, + "latex": "2 0 \\mathrm { H z }" + }, + { + "category_id": 15, + "poly": [ + 1044.75, + 1410.0, + 1095.75, + 1410.0, + 1095.75, + 1431.5, + 1044.75, + 1431.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1164.0, + 149.0, + 1402.0, + 149.0, + 1402.0, + 185.0, + 1164.0, + 185.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1147.0, + 188.0, + 1182.0, + 188.0, + 1182.0, + 218.0, + 1147.0, + 218.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1217.0, + 188.0, + 1286.0, + 188.0, + 1286.0, + 211.0, + 1217.0, + 211.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1119.0, + 196.0, + 1151.0, + 196.0, + 1151.0, + 327.0, + 1119.0, + 327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1216.0, + 207.0, + 1254.0, + 207.0, + 1254.0, + 231.0, + 1216.0, + 231.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1161.0, + 232.0, + 1180.0, + 232.0, + 1180.0, + 257.0, + 1161.0, + 257.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1223.0, + 228.0, + 1282.0, + 228.0, + 1282.0, + 251.0, + 1223.0, + 251.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1163.0, + 317.0, + 1178.0, + 317.0, + 1178.0, + 338.0, + 1163.0, + 338.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1190.0, + 347.0, + 1208.0, + 347.0, + 1208.0, + 370.0, + 1190.0, + 370.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1223.0, + 341.0, + 1364.0, + 341.0, + 1364.0, + 375.0, + 1223.0, + 375.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1190.0, + 373.0, + 1379.0, + 373.0, + 1379.0, + 402.0, + 1190.0, + 402.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 955.0, + 1545.0, + 1407.0, + 1545.0, + 1407.0, + 1580.0, + 955.0, + 1580.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 955.0, + 1578.0, + 1405.0, + 1578.0, + 1405.0, + 1612.0, + 955.0, + 1612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 954.0, + 1609.0, + 1405.0, + 1609.0, + 1405.0, + 1642.0, + 954.0, + 1642.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 954.0, + 1640.0, + 1405.0, + 1640.0, + 1405.0, + 1673.0, + 954.0, + 1673.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 437.0, + 1407.0, + 437.0, + 1407.0, + 471.0, + 295.0, + 471.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 469.0, + 1407.0, + 469.0, + 1407.0, + 502.0, + 295.0, + 502.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 500.0, + 1405.0, + 500.0, + 1405.0, + 532.0, + 293.0, + 532.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 527.0, + 1406.0, + 527.0, + 1406.0, + 567.0, + 291.0, + 567.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 563.0, + 1405.0, + 563.0, + 1405.0, + 597.0, + 295.0, + 597.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 594.0, + 1406.0, + 594.0, + 1406.0, + 628.0, + 294.0, + 628.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 624.0, + 1406.0, + 624.0, + 1406.0, + 659.0, + 294.0, + 659.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 656.0, + 1405.0, + 656.0, + 1405.0, + 690.0, + 294.0, + 690.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 688.0, + 1374.0, + 688.0, + 1374.0, + 721.0, + 293.0, + 721.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1226.0, + 653.0, + 1226.0, + 653.0, + 1273.0, + 293.0, + 1273.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2058.0, + 861.0, + 2058.0, + 861.0, + 2093.0, + 839.0, + 2093.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 745.0, + 1404.0, + 745.0, + 1404.0, + 782.0, + 294.0, + 782.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 781.0, + 1404.0, + 781.0, + 1404.0, + 813.0, + 294.0, + 813.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 814.0, + 1403.0, + 814.0, + 1403.0, + 845.0, + 296.0, + 845.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 844.0, + 1407.0, + 844.0, + 1407.0, + 882.0, + 294.0, + 882.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 877.0, + 1406.0, + 877.0, + 1406.0, + 911.0, + 292.0, + 911.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 909.0, + 1404.0, + 909.0, + 1404.0, + 944.0, + 294.0, + 944.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 943.0, + 1404.0, + 943.0, + 1404.0, + 977.0, + 294.0, + 977.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 975.0, + 1404.0, + 975.0, + 1404.0, + 1011.0, + 294.0, + 1011.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1008.0, + 1406.0, + 1008.0, + 1406.0, + 1043.0, + 295.0, + 1043.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1036.0, + 1408.0, + 1036.0, + 1408.0, + 1076.0, + 294.0, + 1076.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1071.0, + 1406.0, + 1071.0, + 1406.0, + 1107.0, + 294.0, + 1107.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1104.0, + 1408.0, + 1104.0, + 1408.0, + 1139.0, + 295.0, + 1139.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1135.0, + 1406.0, + 1135.0, + 1406.0, + 1172.0, + 295.0, + 1172.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1167.0, + 602.0, + 1167.0, + 602.0, + 1207.0, + 294.0, + 1207.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1273.0, + 937.0, + 1273.0, + 937.0, + 1307.0, + 296.0, + 1307.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1309.0, + 936.0, + 1309.0, + 936.0, + 1337.0, + 296.0, + 1337.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1340.0, + 937.0, + 1340.0, + 937.0, + 1371.0, + 296.0, + 1371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1371.0, + 938.0, + 1371.0, + 938.0, + 1408.0, + 294.0, + 1408.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1401.0, + 937.0, + 1401.0, + 937.0, + 1441.0, + 294.0, + 1441.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1435.0, + 936.0, + 1435.0, + 936.0, + 1469.0, + 295.0, + 1469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1471.0, + 937.0, + 1471.0, + 937.0, + 1501.0, + 295.0, + 1501.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1502.0, + 937.0, + 1502.0, + 937.0, + 1534.0, + 296.0, + 1534.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1533.0, + 936.0, + 1533.0, + 936.0, + 1566.0, + 295.0, + 1566.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1564.0, + 938.0, + 1564.0, + 938.0, + 1601.0, + 294.0, + 1601.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1600.0, + 940.0, + 1600.0, + 940.0, + 1632.0, + 297.0, + 1632.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1631.0, + 936.0, + 1631.0, + 936.0, + 1663.0, + 295.0, + 1663.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1664.0, + 698.0, + 1664.0, + 698.0, + 1695.0, + 296.0, + 1695.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 770.0, + 1664.0, + 936.0, + 1664.0, + 936.0, + 1695.0, + 770.0, + 1695.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1876.0, + 1405.0, + 1876.0, + 1405.0, + 1914.0, + 293.0, + 1914.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1907.0, + 408.0, + 1907.0, + 408.0, + 1947.0, + 293.0, + 1947.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 444.0, + 1907.0, + 1410.0, + 1907.0, + 1410.0, + 1947.0, + 444.0, + 1947.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1942.0, + 1403.0, + 1942.0, + 1403.0, + 1978.0, + 294.0, + 1978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1969.0, + 297.0, + 1969.0, + 297.0, + 2014.0, + 293.0, + 2014.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 341.0, + 1969.0, + 593.0, + 1969.0, + 593.0, + 2014.0, + 341.0, + 2014.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 624.0, + 1969.0, + 1405.0, + 1969.0, + 1405.0, + 2014.0, + 624.0, + 2014.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1696.0, + 1405.0, + 1696.0, + 1405.0, + 1730.0, + 296.0, + 1730.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1726.0, + 1406.0, + 1726.0, + 1406.0, + 1766.0, + 293.0, + 1766.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1763.0, + 1402.0, + 1763.0, + 1402.0, + 1797.0, + 296.0, + 1797.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1790.0, + 1404.0, + 1790.0, + 1404.0, + 1830.0, + 293.0, + 1830.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1827.0, + 881.0, + 1827.0, + 881.0, + 1861.0, + 293.0, + 1861.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 4, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 1591, + 1405, + 1591, + 1405, + 1885, + 297, + 1885 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 297, + 1193, + 1406, + 1193, + 1406, + 1579, + 297, + 1579 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 296, + 873, + 1406, + 873, + 1406, + 1134, + 296, + 1134 + ], + "score": 0.983 + }, + { + "category_id": 4, + "poly": [ + 296, + 435, + 1406, + 435, + 1406, + 657, + 296, + 657 + ], + "score": 0.961 + }, + { + "category_id": 8, + "poly": [ + 335, + 760, + 1326, + 760, + 1326, + 862, + 335, + 862 + ], + "score": 0.959 + }, + { + "category_id": 3, + "poly": [ + 297, + 163, + 1387, + 163, + 1387, + 396, + 297, + 396 + ], + "score": 0.958 + }, + { + "category_id": 1, + "poly": [ + 299, + 683, + 1403, + 683, + 1403, + 748, + 299, + 748 + ], + "score": 0.952 + }, + { + "category_id": 1, + "poly": [ + 297, + 1944, + 1398, + 1944, + 1398, + 2008, + 297, + 2008 + ], + "score": 0.95 + }, + { + "category_id": 0, + "poly": [ + 299, + 1907, + 702, + 1907, + 702, + 1940, + 299, + 1940 + ], + "score": 0.918 + }, + { + "category_id": 9, + "poly": [ + 1366, + 797, + 1400, + 797, + 1400, + 826, + 1366, + 826 + ], + "score": 0.867 + }, + { + "category_id": 2, + "poly": [ + 840, + 2062, + 859, + 2062, + 859, + 2085, + 840, + 2085 + ], + "score": 0.791 + }, + { + "category_id": 0, + "poly": [ + 296, + 1156, + 844, + 1156, + 844, + 1189, + 296, + 1189 + ], + "score": 0.726 + }, + { + "category_id": 0, + "poly": [ + 297, + 1156, + 844, + 1156, + 844, + 1189, + 297, + 1189 + ], + "score": 0.182 + }, + { + "category_id": 13, + "poly": [ + 682, + 1419, + 736, + 1419, + 736, + 1449, + 682, + 1449 + ], + "score": 0.87, + "latex": "+ 1 0" + }, + { + "category_id": 13, + "poly": [ + 607, + 1387, + 646, + 1387, + 646, + 1415, + 607, + 1415 + ], + "score": 0.85, + "latex": "+ 1" + }, + { + "category_id": 14, + "poly": [ + 339, + 818, + 1323, + 818, + 1323, + 863, + 339, + 863 + ], + "score": 0.85, + "latex": "\\begin{array} { r l } { r ^ { \\mathrm { k n e e } } \\doteq 1 - \\frac 1 4 \\parallel q ^ { \\mathrm { k n e e } } - 1 . 0 \\parallel _ { 1 } } & { { } r ^ { \\mathrm { v e l o c i t y } } \\doteq 5 \\big ( \\operatorname* { m a x } ( 0 , ^ { \\mathcal { B } } v _ { x } ) / \\parallel ^ { \\mathcal { B } } v \\parallel _ { 2 } \\cdot \\mathrm { c l i p } ( ^ { \\mathcal { B } } v _ { x } / 0 . 3 , - 1 , 1 ) + 1 \\big ) } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 342, + 761, + 1326, + 761, + 1326, + 805, + 342, + 805 + ], + "score": 0.82, + "latex": "\\begin{array} { r l } { r ^ { \\mathrm { u p r } } \\doteq ( \\hat { z } ^ { T } [ 0 , 0 , 1 ] - 1 ) / 2 } & { { } r ^ { \\mathrm { h i p } } \\doteq 1 - \\frac 1 4 \\| q ^ { \\mathrm { h i p } } + 0 . 2 \\| _ { 1 } \\quad r ^ { \\mathrm { s h o u l d e r } } \\doteq 1 - \\frac 1 4 \\| q ^ { \\mathrm { s h o u l d e r } } + 0 . 2 \\| _ { 1 } } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 951, + 1975, + 1031, + 1975, + 1031, + 2005, + 951, + 2005 + ], + "score": 0.6, + "latex": "0 . 5 \\ : \\mathrm { H z }" + }, + { + "category_id": 13, + "poly": [ + 326, + 1451, + 386, + 1451, + 386, + 1480, + 326, + 1480 + ], + "score": 0.54, + "latex": "2 \\ \\mathrm { H z }" + }, + { + "category_id": 13, + "poly": [ + 1225, + 1387, + 1264, + 1387, + 1264, + 1415, + 1225, + 1415 + ], + "score": 0.48, + "latex": "- 1" + }, + { + "category_id": 13, + "poly": [ + 1320, + 1451, + 1344, + 1451, + 1344, + 1479, + 1320, + 1479 + ], + "score": 0.31, + "latex": "\\textsf { Z }" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 437.0, + 1404.0, + 437.0, + 1404.0, + 472.0, + 294.0, + 472.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 465.0, + 1407.0, + 465.0, + 1407.0, + 506.0, + 293.0, + 506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 499.0, + 1406.0, + 499.0, + 1406.0, + 535.0, + 294.0, + 535.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 531.0, + 1406.0, + 531.0, + 1406.0, + 566.0, + 295.0, + 566.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 562.0, + 1408.0, + 562.0, + 1408.0, + 598.0, + 295.0, + 598.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 594.0, + 1406.0, + 594.0, + 1406.0, + 628.0, + 295.0, + 628.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 625.0, + 1015.0, + 625.0, + 1015.0, + 660.0, + 294.0, + 660.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1169.0, + 155.0, + 1386.0, + 155.0, + 1386.0, + 179.0, + 1169.0, + 179.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1118.0, + 175.0, + 1149.0, + 175.0, + 1149.0, + 348.0, + 1118.0, + 348.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1151.0, + 178.0, + 1166.0, + 178.0, + 1166.0, + 196.0, + 1151.0, + 196.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1209.0, + 200.0, + 1277.0, + 200.0, + 1277.0, + 226.0, + 1209.0, + 226.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1151.0, + 213.0, + 1166.0, + 213.0, + 1166.0, + 233.0, + 1151.0, + 233.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1197.0, + 223.0, + 1255.0, + 223.0, + 1255.0, + 249.0, + 1197.0, + 249.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1197.0, + 242.0, + 1254.0, + 242.0, + 1254.0, + 264.0, + 1197.0, + 264.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1195.0, + 257.0, + 1229.0, + 257.0, + 1229.0, + 281.0, + 1195.0, + 281.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1204.0, + 278.0, + 1255.0, + 278.0, + 1255.0, + 295.0, + 1204.0, + 295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1173.0, + 348.0, + 1192.0, + 348.0, + 1192.0, + 371.0, + 1173.0, + 371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1218.0, + 347.0, + 1239.0, + 347.0, + 1239.0, + 371.0, + 1218.0, + 371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1257.0, + 347.0, + 1292.0, + 347.0, + 1292.0, + 372.0, + 1257.0, + 372.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1297.0, + 342.0, + 1384.0, + 342.0, + 1384.0, + 376.0, + 1297.0, + 376.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1183.0, + 372.0, + 1375.0, + 372.0, + 1375.0, + 400.0, + 1183.0, + 400.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1150.0, + 322.5, + 1165.0, + 322.5, + 1165.0, + 334.5, + 1150.0, + 334.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1905.0, + 704.0, + 1905.0, + 704.0, + 1944.0, + 294.0, + 1944.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 2060.0, + 862.0, + 2060.0, + 862.0, + 2091.0, + 840.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1154.0, + 844.0, + 1154.0, + 844.0, + 1196.0, + 293.0, + 1196.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1154.0, + 845.0, + 1154.0, + 845.0, + 1196.0, + 292.0, + 1196.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1590.0, + 1406.0, + 1590.0, + 1406.0, + 1630.0, + 294.0, + 1630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1627.0, + 1406.0, + 1627.0, + 1406.0, + 1662.0, + 295.0, + 1662.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1658.0, + 1405.0, + 1658.0, + 1405.0, + 1694.0, + 294.0, + 1694.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1692.0, + 1405.0, + 1692.0, + 1405.0, + 1727.0, + 295.0, + 1727.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1722.0, + 1406.0, + 1722.0, + 1406.0, + 1760.0, + 294.0, + 1760.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1750.0, + 1405.0, + 1750.0, + 1405.0, + 1795.0, + 291.0, + 1795.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1787.0, + 1403.0, + 1787.0, + 1403.0, + 1824.0, + 294.0, + 1824.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1818.0, + 1405.0, + 1818.0, + 1405.0, + 1856.0, + 294.0, + 1856.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1854.0, + 1071.0, + 1854.0, + 1071.0, + 1888.0, + 295.0, + 1888.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1191.0, + 1404.0, + 1191.0, + 1404.0, + 1225.0, + 296.0, + 1225.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1224.0, + 1403.0, + 1224.0, + 1403.0, + 1256.0, + 295.0, + 1256.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1254.0, + 1404.0, + 1254.0, + 1404.0, + 1291.0, + 294.0, + 1291.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1289.0, + 1406.0, + 1289.0, + 1406.0, + 1324.0, + 295.0, + 1324.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1320.0, + 1406.0, + 1320.0, + 1406.0, + 1357.0, + 292.0, + 1357.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1353.0, + 1407.0, + 1353.0, + 1407.0, + 1392.0, + 292.0, + 1392.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1386.0, + 606.0, + 1386.0, + 606.0, + 1421.0, + 292.0, + 1421.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 647.0, + 1386.0, + 1224.0, + 1386.0, + 1224.0, + 1421.0, + 647.0, + 1421.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1265.0, + 1386.0, + 1407.0, + 1386.0, + 1407.0, + 1421.0, + 1265.0, + 1421.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1416.0, + 681.0, + 1416.0, + 681.0, + 1456.0, + 294.0, + 1456.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 737.0, + 1416.0, + 1406.0, + 1416.0, + 1406.0, + 1456.0, + 737.0, + 1456.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1449.0, + 325.0, + 1449.0, + 325.0, + 1486.0, + 292.0, + 1486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 387.0, + 1449.0, + 1319.0, + 1449.0, + 1319.0, + 1486.0, + 387.0, + 1486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1345.0, + 1449.0, + 1406.0, + 1449.0, + 1406.0, + 1486.0, + 1345.0, + 1486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1483.0, + 1406.0, + 1483.0, + 1406.0, + 1519.0, + 295.0, + 1519.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1516.0, + 1406.0, + 1516.0, + 1406.0, + 1552.0, + 295.0, + 1552.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1547.0, + 1409.0, + 1547.0, + 1409.0, + 1583.0, + 292.0, + 1583.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 872.0, + 1406.0, + 872.0, + 1406.0, + 910.0, + 294.0, + 910.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 908.0, + 1404.0, + 908.0, + 1404.0, + 940.0, + 295.0, + 940.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 941.0, + 1404.0, + 941.0, + 1404.0, + 972.0, + 295.0, + 972.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 970.0, + 1406.0, + 970.0, + 1406.0, + 1007.0, + 291.0, + 1007.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1005.0, + 1406.0, + 1005.0, + 1406.0, + 1040.0, + 294.0, + 1040.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1031.0, + 1407.0, + 1031.0, + 1407.0, + 1075.0, + 291.0, + 1075.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1069.0, + 1404.0, + 1069.0, + 1404.0, + 1104.0, + 295.0, + 1104.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1100.0, + 1205.0, + 1100.0, + 1205.0, + 1138.0, + 293.0, + 1138.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 682.0, + 1405.0, + 682.0, + 1405.0, + 718.0, + 295.0, + 718.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 716.0, + 1297.0, + 716.0, + 1297.0, + 750.0, + 294.0, + 750.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1940.0, + 1404.0, + 1940.0, + 1404.0, + 1977.0, + 293.0, + 1977.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1975.0, + 950.0, + 1975.0, + 950.0, + 2011.0, + 293.0, + 2011.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1032.0, + 1975.0, + 1404.0, + 1975.0, + 1404.0, + 2011.0, + 1032.0, + 2011.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 5, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1293, + 1405, + 1293, + 1405, + 1583, + 298, + 1583 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 297, + 877, + 1405, + 877, + 1405, + 1234, + 297, + 1234 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 1599, + 1404, + 1599, + 1404, + 1794, + 298, + 1794 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 299, + 1878, + 1403, + 1878, + 1403, + 2007, + 299, + 2007 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 299, + 702, + 1404, + 702, + 1404, + 863, + 299, + 863 + ], + "score": 0.977 + }, + { + "category_id": 3, + "poly": [ + 298, + 149, + 1390, + 149, + 1390, + 382, + 298, + 382 + ], + "score": 0.957 + }, + { + "category_id": 4, + "poly": [ + 297, + 422, + 1405, + 422, + 1405, + 676, + 297, + 676 + ], + "score": 0.95 + }, + { + "category_id": 0, + "poly": [ + 299, + 1826, + 549, + 1826, + 549, + 1863, + 299, + 1863 + ], + "score": 0.92 + }, + { + "category_id": 0, + "poly": [ + 299, + 1258, + 584, + 1258, + 584, + 1291, + 299, + 1291 + ], + "score": 0.918 + }, + { + "category_id": 2, + "poly": [ + 842, + 2061, + 858, + 2061, + 858, + 2084, + 842, + 2084 + ], + "score": 0.712 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 859, + 2061, + 859, + 2084, + 841, + 2084 + ], + "score": 0.097 + }, + { + "category_id": 13, + "poly": [ + 1342, + 1358, + 1399, + 1358, + 1399, + 1388, + 1342, + 1388 + ], + "score": 0.56, + "latex": "2 \\ : \\mathrm { H z }" + }, + { + "category_id": 15, + "poly": [ + 1162.0, + 141.0, + 1394.0, + 141.0, + 1394.0, + 169.0, + 1162.0, + 169.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1120.0, + 160.0, + 1152.0, + 160.0, + 1152.0, + 335.0, + 1120.0, + 335.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1151.0, + 165.0, + 1166.0, + 165.0, + 1166.0, + 182.0, + 1151.0, + 182.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1219.0, + 181.0, + 1288.0, + 181.0, + 1288.0, + 210.0, + 1219.0, + 210.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1152.0, + 204.0, + 1164.0, + 204.0, + 1164.0, + 216.0, + 1152.0, + 216.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1199.0, + 213.0, + 1259.0, + 213.0, + 1259.0, + 234.0, + 1199.0, + 234.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1153.0, + 242.0, + 1164.0, + 242.0, + 1164.0, + 252.0, + 1153.0, + 252.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1199.0, + 230.0, + 1257.0, + 230.0, + 1257.0, + 251.0, + 1199.0, + 251.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1199.0, + 246.0, + 1260.0, + 246.0, + 1260.0, + 267.0, + 1199.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1152.0, + 277.0, + 1164.0, + 277.0, + 1164.0, + 289.0, + 1152.0, + 289.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1205.0, + 264.0, + 1269.0, + 264.0, + 1269.0, + 285.0, + 1205.0, + 285.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1230.0, + 302.0, + 1250.0, + 302.0, + 1250.0, + 315.0, + 1230.0, + 315.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1151.0, + 308.0, + 1166.0, + 308.0, + 1166.0, + 327.0, + 1151.0, + 327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1173.0, + 337.0, + 1189.0, + 337.0, + 1189.0, + 355.0, + 1173.0, + 355.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1208.0, + 336.0, + 1229.0, + 336.0, + 1229.0, + 358.0, + 1208.0, + 358.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1239.0, + 333.0, + 1385.0, + 333.0, + 1385.0, + 362.0, + 1239.0, + 362.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1182.0, + 359.0, + 1374.0, + 359.0, + 1374.0, + 388.0, + 1182.0, + 388.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 422.0, + 1406.0, + 422.0, + 1406.0, + 459.0, + 295.0, + 459.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 457.0, + 1405.0, + 457.0, + 1405.0, + 491.0, + 295.0, + 491.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 485.0, + 1405.0, + 485.0, + 1405.0, + 526.0, + 292.0, + 526.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 518.0, + 1405.0, + 518.0, + 1405.0, + 553.0, + 295.0, + 553.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 547.0, + 1406.0, + 547.0, + 1406.0, + 587.0, + 292.0, + 587.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 580.0, + 1403.0, + 580.0, + 1403.0, + 614.0, + 295.0, + 614.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 612.0, + 1408.0, + 612.0, + 1408.0, + 649.0, + 294.0, + 649.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 644.0, + 1296.0, + 644.0, + 1296.0, + 678.0, + 295.0, + 678.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1823.0, + 551.0, + 1823.0, + 551.0, + 1867.0, + 293.0, + 1867.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1255.0, + 586.0, + 1255.0, + 586.0, + 1297.0, + 293.0, + 1297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 2059.0, + 860.0, + 2059.0, + 860.0, + 2092.0, + 840.0, + 2092.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 2059.0, + 860.0, + 2059.0, + 860.0, + 2092.0, + 840.0, + 2092.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1292.0, + 1407.0, + 1292.0, + 1407.0, + 1328.0, + 295.0, + 1328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1326.0, + 1405.0, + 1326.0, + 1405.0, + 1360.0, + 296.0, + 1360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1359.0, + 1341.0, + 1359.0, + 1341.0, + 1393.0, + 295.0, + 1393.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1400.0, + 1359.0, + 1407.0, + 1359.0, + 1407.0, + 1393.0, + 1400.0, + 1393.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1390.0, + 1405.0, + 1390.0, + 1405.0, + 1425.0, + 295.0, + 1425.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1422.0, + 1403.0, + 1422.0, + 1403.0, + 1456.0, + 296.0, + 1456.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1456.0, + 1405.0, + 1456.0, + 1405.0, + 1490.0, + 295.0, + 1490.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1488.0, + 1405.0, + 1488.0, + 1405.0, + 1522.0, + 293.0, + 1522.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1522.0, + 1403.0, + 1522.0, + 1403.0, + 1556.0, + 293.0, + 1556.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1556.0, + 629.0, + 1556.0, + 629.0, + 1584.0, + 293.0, + 1584.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 876.0, + 1406.0, + 876.0, + 1406.0, + 914.0, + 295.0, + 914.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 913.0, + 1406.0, + 913.0, + 1406.0, + 945.0, + 296.0, + 945.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 941.0, + 1407.0, + 941.0, + 1407.0, + 979.0, + 294.0, + 979.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 974.0, + 1406.0, + 974.0, + 1406.0, + 1013.0, + 292.0, + 1013.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1006.0, + 1405.0, + 1006.0, + 1405.0, + 1045.0, + 292.0, + 1045.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1039.0, + 1406.0, + 1039.0, + 1406.0, + 1076.0, + 295.0, + 1076.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1071.0, + 1405.0, + 1071.0, + 1405.0, + 1110.0, + 292.0, + 1110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1104.0, + 1405.0, + 1104.0, + 1405.0, + 1141.0, + 292.0, + 1141.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1140.0, + 1402.0, + 1140.0, + 1402.0, + 1172.0, + 295.0, + 1172.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1166.0, + 1410.0, + 1166.0, + 1410.0, + 1210.0, + 292.0, + 1210.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1202.0, + 573.0, + 1202.0, + 573.0, + 1239.0, + 295.0, + 1239.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1596.0, + 1406.0, + 1596.0, + 1406.0, + 1635.0, + 293.0, + 1635.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1632.0, + 1404.0, + 1632.0, + 1404.0, + 1665.0, + 296.0, + 1665.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1661.0, + 1409.0, + 1661.0, + 1409.0, + 1700.0, + 292.0, + 1700.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1698.0, + 1405.0, + 1698.0, + 1405.0, + 1731.0, + 294.0, + 1731.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1728.0, + 1405.0, + 1728.0, + 1405.0, + 1764.0, + 293.0, + 1764.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1760.0, + 1166.0, + 1760.0, + 1166.0, + 1798.0, + 295.0, + 1798.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1875.0, + 1405.0, + 1875.0, + 1405.0, + 1913.0, + 294.0, + 1913.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1911.0, + 1405.0, + 1911.0, + 1405.0, + 1941.0, + 297.0, + 1941.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1941.0, + 1407.0, + 1941.0, + 1407.0, + 1977.0, + 293.0, + 1977.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1975.0, + 1404.0, + 1975.0, + 1404.0, + 2008.0, + 295.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 701.0, + 1406.0, + 701.0, + 1406.0, + 739.0, + 294.0, + 739.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 732.0, + 1405.0, + 732.0, + 1405.0, + 772.0, + 293.0, + 772.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 766.0, + 1405.0, + 766.0, + 1405.0, + 804.0, + 295.0, + 804.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 795.0, + 1405.0, + 795.0, + 1405.0, + 841.0, + 292.0, + 841.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 834.0, + 450.0, + 834.0, + 450.0, + 865.0, + 297.0, + 865.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 6, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 879, + 1405, + 879, + 1405, + 1267, + 298, + 1267 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 297, + 1397, + 1404, + 1397, + 1404, + 1657, + 297, + 1657 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 624, + 1404, + 624, + 1404, + 851, + 298, + 851 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 1687, + 1403, + 1687, + 1403, + 1848, + 298, + 1848 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 300, + 1878, + 1402, + 1878, + 1402, + 2007, + 300, + 2007 + ], + "score": 0.978 + }, + { + "category_id": 4, + "poly": [ + 297, + 436, + 1404, + 436, + 1404, + 594, + 297, + 594 + ], + "score": 0.959 + }, + { + "category_id": 3, + "poly": [ + 297, + 162, + 1385, + 162, + 1385, + 397, + 297, + 397 + ], + "score": 0.945 + }, + { + "category_id": 0, + "poly": [ + 298, + 1317, + 499, + 1317, + 499, + 1355, + 298, + 1355 + ], + "score": 0.905 + }, + { + "category_id": 2, + "poly": [ + 841, + 2062, + 858, + 2062, + 858, + 2084, + 841, + 2084 + ], + "score": 0.796 + }, + { + "category_id": 15, + "poly": [ + 294.0, + 436.0, + 1404.0, + 436.0, + 1404.0, + 472.0, + 294.0, + 472.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 468.0, + 1405.0, + 468.0, + 1405.0, + 503.0, + 293.0, + 503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 499.0, + 1404.0, + 499.0, + 1404.0, + 535.0, + 292.0, + 535.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 528.0, + 1406.0, + 528.0, + 1406.0, + 567.0, + 294.0, + 567.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 561.0, + 1081.0, + 561.0, + 1081.0, + 596.0, + 295.0, + 596.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1191.0, + 154.0, + 1384.0, + 154.0, + 1384.0, + 181.0, + 1191.0, + 181.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1116.0, + 176.0, + 1152.0, + 176.0, + 1152.0, + 349.0, + 1116.0, + 349.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1148.0, + 181.0, + 1189.0, + 181.0, + 1189.0, + 209.0, + 1148.0, + 209.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1310.0, + 188.0, + 1374.0, + 188.0, + 1374.0, + 209.0, + 1310.0, + 209.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1310.0, + 204.0, + 1358.0, + 204.0, + 1358.0, + 226.0, + 1310.0, + 226.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1319.0, + 218.0, + 1377.0, + 218.0, + 1377.0, + 243.0, + 1319.0, + 243.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1165.0, + 249.0, + 1185.0, + 249.0, + 1185.0, + 274.0, + 1165.0, + 274.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1148.0, + 282.0, + 1189.0, + 282.0, + 1189.0, + 312.0, + 1148.0, + 312.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1149.0, + 316.0, + 1185.0, + 316.0, + 1185.0, + 341.0, + 1149.0, + 341.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1189.0, + 348.0, + 1210.0, + 348.0, + 1210.0, + 371.0, + 1189.0, + 371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1238.0, + 345.0, + 1262.0, + 345.0, + 1262.0, + 374.0, + 1238.0, + 374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1290.0, + 345.0, + 1314.0, + 345.0, + 1314.0, + 374.0, + 1290.0, + 374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1343.0, + 348.0, + 1363.0, + 348.0, + 1363.0, + 371.0, + 1343.0, + 371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1191.0, + 372.0, + 1382.0, + 372.0, + 1382.0, + 401.0, + 1191.0, + 401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1149.0, + 211.5, + 1184.0, + 211.5, + 1184.0, + 246.5, + 1149.0, + 246.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1152.0, + 251.5, + 1169.0, + 251.5, + 1169.0, + 272.5, + 1152.0, + 272.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1313.0, + 505.0, + 1313.0, + 505.0, + 1362.0, + 290.0, + 1362.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2059.0, + 860.0, + 2059.0, + 860.0, + 2089.0, + 839.0, + 2089.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 879.0, + 1407.0, + 879.0, + 1407.0, + 914.0, + 295.0, + 914.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 909.0, + 1405.0, + 909.0, + 1405.0, + 946.0, + 295.0, + 946.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 942.0, + 1405.0, + 942.0, + 1405.0, + 978.0, + 295.0, + 978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 976.0, + 1405.0, + 976.0, + 1405.0, + 1012.0, + 293.0, + 1012.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1007.0, + 1403.0, + 1007.0, + 1403.0, + 1043.0, + 295.0, + 1043.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1039.0, + 1405.0, + 1039.0, + 1405.0, + 1078.0, + 293.0, + 1078.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1073.0, + 1405.0, + 1073.0, + 1405.0, + 1109.0, + 293.0, + 1109.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1106.0, + 1405.0, + 1106.0, + 1405.0, + 1142.0, + 295.0, + 1142.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1138.0, + 1405.0, + 1138.0, + 1405.0, + 1175.0, + 292.0, + 1175.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1168.0, + 1406.0, + 1168.0, + 1406.0, + 1205.0, + 292.0, + 1205.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1203.0, + 1408.0, + 1203.0, + 1408.0, + 1239.0, + 293.0, + 1239.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1233.0, + 971.0, + 1233.0, + 971.0, + 1274.0, + 293.0, + 1274.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1398.0, + 1407.0, + 1398.0, + 1407.0, + 1433.0, + 295.0, + 1433.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1433.0, + 1404.0, + 1433.0, + 1404.0, + 1465.0, + 296.0, + 1465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1463.0, + 1406.0, + 1463.0, + 1406.0, + 1498.0, + 295.0, + 1498.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1497.0, + 1405.0, + 1497.0, + 1405.0, + 1532.0, + 295.0, + 1532.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1524.0, + 1404.0, + 1524.0, + 1404.0, + 1567.0, + 294.0, + 1567.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1559.0, + 1404.0, + 1559.0, + 1404.0, + 1596.0, + 292.0, + 1596.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1596.0, + 1402.0, + 1596.0, + 1402.0, + 1627.0, + 295.0, + 1627.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1627.0, + 672.0, + 1627.0, + 672.0, + 1658.0, + 296.0, + 1658.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 625.0, + 1404.0, + 625.0, + 1404.0, + 657.0, + 296.0, + 657.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 656.0, + 1406.0, + 656.0, + 1406.0, + 692.0, + 294.0, + 692.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 685.0, + 1405.0, + 685.0, + 1405.0, + 727.0, + 292.0, + 727.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 721.0, + 1404.0, + 721.0, + 1404.0, + 755.0, + 295.0, + 755.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 754.0, + 1406.0, + 754.0, + 1406.0, + 790.0, + 294.0, + 790.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 786.0, + 1406.0, + 786.0, + 1406.0, + 822.0, + 294.0, + 822.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 821.0, + 1188.0, + 821.0, + 1188.0, + 852.0, + 296.0, + 852.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1685.0, + 1405.0, + 1685.0, + 1405.0, + 1724.0, + 293.0, + 1724.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1720.0, + 1405.0, + 1720.0, + 1405.0, + 1754.0, + 294.0, + 1754.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1750.0, + 1407.0, + 1750.0, + 1407.0, + 1791.0, + 292.0, + 1791.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1781.0, + 1405.0, + 1781.0, + 1405.0, + 1825.0, + 292.0, + 1825.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1816.0, + 1035.0, + 1816.0, + 1035.0, + 1850.0, + 296.0, + 1850.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1876.0, + 1404.0, + 1876.0, + 1404.0, + 1915.0, + 294.0, + 1915.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1908.0, + 1406.0, + 1908.0, + 1406.0, + 1948.0, + 294.0, + 1948.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1941.0, + 1404.0, + 1941.0, + 1404.0, + 1979.0, + 294.0, + 1979.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1975.0, + 1384.0, + 1975.0, + 1384.0, + 2012.0, + 293.0, + 2012.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 7, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 292, + 207, + 1411, + 207, + 1411, + 2021, + 292, + 2021 + ], + "score": 0.933 + }, + { + "category_id": 0, + "poly": [ + 299, + 201, + 456, + 201, + 456, + 236, + 299, + 236 + ], + "score": 0.889 + }, + { + "category_id": 2, + "poly": [ + 839, + 2060, + 859, + 2060, + 859, + 2085, + 839, + 2085 + ], + "score": 0.774 + }, + { + "category_id": 15, + "poly": [ + 295.0, + 197.0, + 460.0, + 197.0, + 460.0, + 242.0, + 295.0, + 242.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2060.0, + 860.0, + 2060.0, + 860.0, + 2090.0, + 839.0, + 2090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 198.0, + 463.0, + 198.0, + 463.0, + 241.0, + 295.0, + 241.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 265.0, + 1407.0, + 265.0, + 1407.0, + 309.0, + 292.0, + 309.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 301.0, + 924.0, + 301.0, + 924.0, + 338.0, + 322.0, + 338.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 358.0, + 1405.0, + 358.0, + 1405.0, + 396.0, + 294.0, + 396.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 314.0, + 389.0, + 711.0, + 389.0, + 711.0, + 428.0, + 314.0, + 428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 444.0, + 1407.0, + 444.0, + 1407.0, + 490.0, + 290.0, + 490.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 480.0, + 1100.0, + 480.0, + 1100.0, + 524.0, + 318.0, + 524.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 538.0, + 1403.0, + 538.0, + 1403.0, + 580.0, + 292.0, + 580.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 574.0, + 1403.0, + 574.0, + 1403.0, + 612.0, + 318.0, + 612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 632.0, + 1407.0, + 632.0, + 1407.0, + 669.0, + 294.0, + 669.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 663.0, + 1407.0, + 663.0, + 1407.0, + 701.0, + 320.0, + 701.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 695.0, + 485.0, + 695.0, + 485.0, + 733.0, + 322.0, + 733.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 753.0, + 1405.0, + 753.0, + 1405.0, + 791.0, + 294.0, + 791.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 783.0, + 1409.0, + 783.0, + 1409.0, + 829.0, + 318.0, + 829.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 817.0, + 559.0, + 817.0, + 559.0, + 855.0, + 320.0, + 855.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 873.0, + 1409.0, + 873.0, + 1409.0, + 919.0, + 290.0, + 919.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 909.0, + 1178.0, + 909.0, + 1178.0, + 950.0, + 318.0, + 950.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 962.0, + 1409.0, + 962.0, + 1409.0, + 1010.0, + 290.0, + 1010.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 995.0, + 399.0, + 995.0, + 399.0, + 1043.0, + 320.0, + 1043.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1060.0, + 1403.0, + 1060.0, + 1403.0, + 1098.0, + 294.0, + 1098.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1094.0, + 1070.0, + 1094.0, + 1070.0, + 1132.0, + 318.0, + 1132.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1150.0, + 1295.0, + 1150.0, + 1295.0, + 1188.0, + 292.0, + 1188.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1210.0, + 1407.0, + 1210.0, + 1407.0, + 1248.0, + 294.0, + 1248.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1242.0, + 900.0, + 1242.0, + 900.0, + 1279.0, + 322.0, + 1279.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1295.0, + 1407.0, + 1295.0, + 1407.0, + 1343.0, + 290.0, + 1343.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 316.0, + 1331.0, + 1242.0, + 1331.0, + 1242.0, + 1371.0, + 316.0, + 1371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1389.0, + 1405.0, + 1389.0, + 1405.0, + 1427.0, + 294.0, + 1427.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1423.0, + 400.0, + 1423.0, + 400.0, + 1463.0, + 318.0, + 1463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1477.0, + 1411.0, + 1477.0, + 1411.0, + 1523.0, + 292.0, + 1523.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 314.0, + 1505.0, + 1409.0, + 1505.0, + 1409.0, + 1561.0, + 314.0, + 1561.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1547.0, + 932.0, + 1547.0, + 932.0, + 1585.0, + 320.0, + 1585.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1600.0, + 1407.0, + 1600.0, + 1407.0, + 1646.0, + 290.0, + 1646.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1636.0, + 1248.0, + 1636.0, + 1248.0, + 1680.0, + 318.0, + 1680.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1692.0, + 1407.0, + 1692.0, + 1407.0, + 1738.0, + 290.0, + 1738.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 316.0, + 1726.0, + 1311.0, + 1726.0, + 1311.0, + 1770.0, + 316.0, + 1770.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1784.0, + 1411.0, + 1784.0, + 1411.0, + 1828.0, + 290.0, + 1828.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1820.0, + 1407.0, + 1820.0, + 1407.0, + 1858.0, + 318.0, + 1858.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1845.0, + 413.0, + 1845.0, + 413.0, + 1892.0, + 320.0, + 1892.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1910.0, + 1409.0, + 1910.0, + 1409.0, + 1947.0, + 294.0, + 1947.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1941.0, + 1409.0, + 1941.0, + 1409.0, + 1985.0, + 318.0, + 1985.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1975.0, + 1040.0, + 1975.0, + 1040.0, + 2013.0, + 322.0, + 2013.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 8, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 2, + "poly": [ + 836, + 2061, + 866, + 2061, + 866, + 2087, + 836, + 2087 + ], + "score": 0.843 + }, + { + "category_id": 1, + "poly": [ + 287, + 124, + 1411, + 124, + 1411, + 2025, + 287, + 2025 + ], + "score": 0.69 + }, + { + "category_id": 15, + "poly": [ + 832.0, + 2057.0, + 870.0, + 2057.0, + 870.0, + 2095.0, + 832.0, + 2095.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 205.0, + 1408.0, + 205.0, + 1408.0, + 239.0, + 294.0, + 239.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 232.0, + 1406.0, + 232.0, + 1406.0, + 272.0, + 320.0, + 272.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 268.0, + 862.0, + 268.0, + 862.0, + 305.0, + 318.0, + 305.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 322.0, + 1408.0, + 322.0, + 1408.0, + 362.0, + 292.0, + 362.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 355.0, + 913.0, + 355.0, + 913.0, + 395.0, + 320.0, + 395.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 410.0, + 1408.0, + 410.0, + 1408.0, + 449.0, + 292.0, + 449.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 443.0, + 1227.0, + 443.0, + 1227.0, + 483.0, + 320.0, + 483.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 491.0, + 1408.0, + 491.0, + 1408.0, + 541.0, + 288.0, + 541.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 530.0, + 777.0, + 530.0, + 777.0, + 570.0, + 320.0, + 570.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 585.0, + 1408.0, + 585.0, + 1408.0, + 624.0, + 292.0, + 624.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 618.0, + 1404.0, + 618.0, + 1404.0, + 651.0, + 322.0, + 651.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 651.0, + 1059.0, + 651.0, + 1059.0, + 691.0, + 318.0, + 691.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 699.0, + 1408.0, + 699.0, + 1408.0, + 747.0, + 288.0, + 747.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 737.0, + 724.0, + 737.0, + 724.0, + 776.0, + 320.0, + 776.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 793.0, + 1408.0, + 793.0, + 1408.0, + 833.0, + 292.0, + 833.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 822.0, + 1410.0, + 822.0, + 1410.0, + 866.0, + 318.0, + 866.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 858.0, + 994.0, + 858.0, + 994.0, + 897.0, + 320.0, + 897.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 910.0, + 1408.0, + 910.0, + 1408.0, + 954.0, + 292.0, + 954.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 945.0, + 698.0, + 945.0, + 698.0, + 983.0, + 320.0, + 983.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 997.0, + 1408.0, + 997.0, + 1408.0, + 1039.0, + 290.0, + 1039.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1037.0, + 617.0, + 1037.0, + 617.0, + 1070.0, + 322.0, + 1070.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1091.0, + 1406.0, + 1091.0, + 1406.0, + 1125.0, + 297.0, + 1125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1120.0, + 1404.0, + 1120.0, + 1404.0, + 1160.0, + 320.0, + 1160.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1154.0, + 1019.0, + 1154.0, + 1019.0, + 1193.0, + 322.0, + 1193.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1206.0, + 1406.0, + 1206.0, + 1406.0, + 1245.0, + 294.0, + 1245.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1241.0, + 1408.0, + 1241.0, + 1408.0, + 1281.0, + 318.0, + 1281.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1277.0, + 783.0, + 1277.0, + 783.0, + 1310.0, + 322.0, + 1310.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1329.0, + 1378.0, + 1329.0, + 1378.0, + 1362.0, + 294.0, + 1362.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1383.0, + 1406.0, + 1383.0, + 1406.0, + 1423.0, + 292.0, + 1423.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1421.0, + 664.0, + 1421.0, + 664.0, + 1454.0, + 320.0, + 1454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1475.0, + 1404.0, + 1475.0, + 1404.0, + 1508.0, + 294.0, + 1508.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 316.0, + 1504.0, + 1144.0, + 1504.0, + 1144.0, + 1541.0, + 316.0, + 1541.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1558.0, + 1408.0, + 1558.0, + 1408.0, + 1598.0, + 294.0, + 1598.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1591.0, + 1408.0, + 1591.0, + 1408.0, + 1631.0, + 320.0, + 1631.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1627.0, + 554.0, + 1627.0, + 554.0, + 1660.0, + 322.0, + 1660.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1679.0, + 1410.0, + 1679.0, + 1410.0, + 1719.0, + 292.0, + 1719.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1714.0, + 881.0, + 1714.0, + 881.0, + 1748.0, + 320.0, + 1748.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1769.0, + 1406.0, + 1769.0, + 1406.0, + 1802.0, + 294.0, + 1802.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1800.0, + 1338.0, + 1800.0, + 1338.0, + 1839.0, + 320.0, + 1839.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1854.0, + 1410.0, + 1854.0, + 1410.0, + 1894.0, + 292.0, + 1894.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1887.0, + 1306.0, + 1887.0, + 1306.0, + 1927.0, + 320.0, + 1927.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 1940.0, + 1410.0, + 1940.0, + 1410.0, + 1983.0, + 288.0, + 1983.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1975.0, + 1195.0, + 1975.0, + 1195.0, + 2015.0, + 320.0, + 2015.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 9, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 2, + "poly": [ + 835, + 2061, + 864, + 2061, + 864, + 2087, + 835, + 2087 + ], + "score": 0.834 + }, + { + "category_id": 1, + "poly": [ + 290, + 202, + 1410, + 202, + 1410, + 2019, + 290, + 2019 + ], + "score": 0.726 + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2058.0, + 869.0, + 2058.0, + 869.0, + 2097.0, + 831.0, + 2097.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 202.0, + 1408.0, + 202.0, + 1408.0, + 240.0, + 294.0, + 240.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 236.0, + 1241.0, + 236.0, + 1241.0, + 274.0, + 322.0, + 274.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 292.0, + 1406.0, + 292.0, + 1406.0, + 330.0, + 292.0, + 330.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 328.0, + 906.0, + 328.0, + 906.0, + 360.0, + 322.0, + 360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 378.0, + 1410.0, + 378.0, + 1410.0, + 424.0, + 288.0, + 424.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 440.0, + 1408.0, + 440.0, + 1408.0, + 477.0, + 292.0, + 477.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 472.0, + 1410.0, + 472.0, + 1410.0, + 509.0, + 318.0, + 509.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 316.0, + 499.0, + 1412.0, + 499.0, + 1412.0, + 545.0, + 316.0, + 545.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 537.0, + 1243.0, + 537.0, + 1243.0, + 575.0, + 322.0, + 575.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 589.0, + 1404.0, + 589.0, + 1404.0, + 635.0, + 292.0, + 635.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 625.0, + 734.0, + 625.0, + 734.0, + 663.0, + 320.0, + 663.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 683.0, + 1406.0, + 683.0, + 1406.0, + 721.0, + 292.0, + 721.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 715.0, + 1289.0, + 715.0, + 1289.0, + 755.0, + 318.0, + 755.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 767.0, + 1408.0, + 767.0, + 1408.0, + 813.0, + 290.0, + 813.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 803.0, + 1227.0, + 803.0, + 1227.0, + 847.0, + 318.0, + 847.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 857.0, + 1408.0, + 857.0, + 1408.0, + 903.0, + 292.0, + 903.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 895.0, + 1181.0, + 895.0, + 1181.0, + 933.0, + 322.0, + 933.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 951.0, + 1406.0, + 951.0, + 1406.0, + 989.0, + 294.0, + 989.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 981.0, + 856.0, + 981.0, + 856.0, + 1025.0, + 320.0, + 1025.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1043.0, + 1404.0, + 1043.0, + 1404.0, + 1075.0, + 296.0, + 1075.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1075.0, + 1406.0, + 1075.0, + 1406.0, + 1112.0, + 320.0, + 1112.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1107.0, + 508.0, + 1107.0, + 508.0, + 1142.0, + 318.0, + 1142.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1162.0, + 1406.0, + 1162.0, + 1406.0, + 1200.0, + 292.0, + 1200.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1196.0, + 555.0, + 1196.0, + 555.0, + 1234.0, + 322.0, + 1234.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1252.0, + 1404.0, + 1252.0, + 1404.0, + 1290.0, + 292.0, + 1290.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1282.0, + 1165.0, + 1282.0, + 1165.0, + 1320.0, + 320.0, + 1320.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1340.0, + 1406.0, + 1340.0, + 1406.0, + 1378.0, + 294.0, + 1378.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1370.0, + 1408.0, + 1370.0, + 1408.0, + 1412.0, + 318.0, + 1412.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1406.0, + 559.0, + 1406.0, + 559.0, + 1444.0, + 320.0, + 1444.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 1456.0, + 1408.0, + 1456.0, + 1408.0, + 1506.0, + 288.0, + 1506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1496.0, + 952.0, + 1496.0, + 952.0, + 1534.0, + 322.0, + 1534.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1548.0, + 1408.0, + 1548.0, + 1408.0, + 1594.0, + 292.0, + 1594.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1586.0, + 1077.0, + 1586.0, + 1077.0, + 1624.0, + 320.0, + 1624.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 1638.0, + 1408.0, + 1638.0, + 1408.0, + 1684.0, + 288.0, + 1684.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1676.0, + 1010.0, + 1676.0, + 1010.0, + 1712.0, + 320.0, + 1712.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1732.0, + 1406.0, + 1732.0, + 1406.0, + 1769.0, + 292.0, + 1769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1765.0, + 1330.0, + 1765.0, + 1330.0, + 1803.0, + 320.0, + 1803.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1821.0, + 1408.0, + 1821.0, + 1408.0, + 1859.0, + 294.0, + 1859.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1851.0, + 1410.0, + 1851.0, + 1410.0, + 1895.0, + 318.0, + 1895.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1885.0, + 818.0, + 1885.0, + 818.0, + 1923.0, + 318.0, + 1923.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1943.0, + 1408.0, + 1943.0, + 1408.0, + 1981.0, + 292.0, + 1981.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1975.0, + 471.0, + 1975.0, + 471.0, + 2013.0, + 318.0, + 2013.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 10, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 288, + 189, + 1409, + 189, + 1409, + 1426, + 288, + 1426 + ], + "score": 0.911 + }, + { + "category_id": 2, + "poly": [ + 836, + 2061, + 866, + 2061, + 866, + 2086, + 836, + 2086 + ], + "score": 0.831 + }, + { + "category_id": 15, + "poly": [ + 832.0, + 2058.0, + 869.0, + 2058.0, + 869.0, + 2097.0, + 832.0, + 2097.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 203.0, + 1405.0, + 203.0, + 1405.0, + 242.0, + 293.0, + 242.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 235.0, + 877.0, + 235.0, + 877.0, + 273.0, + 321.0, + 273.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 287.0, + 1405.0, + 287.0, + 1405.0, + 331.0, + 290.0, + 331.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 323.0, + 556.0, + 323.0, + 556.0, + 360.0, + 320.0, + 360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 374.0, + 1405.0, + 374.0, + 1405.0, + 418.0, + 290.0, + 418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 411.0, + 1117.0, + 411.0, + 1117.0, + 450.0, + 321.0, + 450.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 462.0, + 1407.0, + 462.0, + 1407.0, + 504.0, + 290.0, + 504.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 497.0, + 1410.0, + 497.0, + 1410.0, + 536.0, + 320.0, + 536.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 528.0, + 396.0, + 528.0, + 396.0, + 565.0, + 321.0, + 565.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 580.0, + 1405.0, + 580.0, + 1405.0, + 622.0, + 292.0, + 622.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 615.0, + 830.0, + 615.0, + 830.0, + 653.0, + 320.0, + 653.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 670.0, + 1407.0, + 670.0, + 1407.0, + 709.0, + 293.0, + 709.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 700.0, + 1410.0, + 700.0, + 1410.0, + 745.0, + 317.0, + 745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 735.0, + 606.0, + 735.0, + 606.0, + 774.0, + 318.0, + 774.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 785.0, + 1407.0, + 785.0, + 1407.0, + 831.0, + 290.0, + 831.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 821.0, + 1353.0, + 821.0, + 1353.0, + 862.0, + 317.0, + 862.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 874.0, + 1405.0, + 874.0, + 1405.0, + 916.0, + 290.0, + 916.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 909.0, + 1045.0, + 909.0, + 1045.0, + 947.0, + 319.0, + 947.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 961.0, + 1405.0, + 961.0, + 1405.0, + 1002.0, + 290.0, + 1002.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 996.0, + 1158.0, + 996.0, + 1158.0, + 1036.0, + 320.0, + 1036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1048.0, + 1405.0, + 1048.0, + 1405.0, + 1089.0, + 292.0, + 1089.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1082.0, + 1407.0, + 1082.0, + 1407.0, + 1122.0, + 319.0, + 1122.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1117.0, + 766.0, + 1117.0, + 766.0, + 1151.0, + 320.0, + 1151.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1168.0, + 1405.0, + 1168.0, + 1405.0, + 1210.0, + 292.0, + 1210.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1203.0, + 1222.0, + 1203.0, + 1222.0, + 1241.0, + 320.0, + 1241.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1256.0, + 1405.0, + 1256.0, + 1405.0, + 1298.0, + 292.0, + 1298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1289.0, + 987.0, + 1289.0, + 987.0, + 1328.0, + 320.0, + 1328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1339.0, + 1407.0, + 1339.0, + 1407.0, + 1387.0, + 292.0, + 1387.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1376.0, + 1076.0, + 1376.0, + 1076.0, + 1415.0, + 321.0, + 1415.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 11, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 296, + 435, + 1406, + 435, + 1406, + 696, + 296, + 696 + ], + "score": 0.982 + }, + { + "category_id": 3, + "poly": [ + 307, + 1273, + 1394, + 1273, + 1394, + 1808, + 307, + 1808 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 260, + 1404, + 260, + 1404, + 423, + 298, + 423 + ], + "score": 0.978 + }, + { + "category_id": 4, + "poly": [ + 295, + 988, + 1406, + 988, + 1406, + 1147, + 295, + 1147 + ], + "score": 0.965 + }, + { + "category_id": 3, + "poly": [ + 297, + 722, + 1385, + 722, + 1385, + 971, + 297, + 971 + ], + "score": 0.962 + }, + { + "category_id": 4, + "poly": [ + 297, + 1823, + 1406, + 1823, + 1406, + 1981, + 297, + 1981 + ], + "score": 0.95 + }, + { + "category_id": 0, + "poly": [ + 298, + 1202, + 530, + 1202, + 530, + 1240, + 298, + 1240 + ], + "score": 0.912 + }, + { + "category_id": 0, + "poly": [ + 299, + 200, + 518, + 200, + 518, + 237, + 299, + 237 + ], + "score": 0.908 + }, + { + "category_id": 2, + "poly": [ + 836, + 2061, + 864, + 2061, + 864, + 2085, + 836, + 2085 + ], + "score": 0.841 + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1403.0, + 707.0, + 1403.0, + 707.0, + 1529.0, + 324.0, + 1529.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 870.0, + 1438.0, + 930.0, + 1438.0, + 930.0, + 1459.0, + 870.0, + 1459.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1019.0, + 1441.0, + 1061.0, + 1441.0, + 1061.0, + 1454.0, + 1019.0, + 1454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1165.0, + 1444.0, + 1192.0, + 1444.0, + 1192.0, + 1452.0, + 1165.0, + 1452.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 581.0, + 1464.0, + 591.0, + 1464.0, + 591.0, + 1478.0, + 581.0, + 1478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 876.0, + 1455.0, + 970.0, + 1455.0, + 970.0, + 1492.0, + 876.0, + 1492.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 331.0, + 1568.0, + 566.0, + 1568.0, + 566.0, + 1661.0, + 331.0, + 1661.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 653.0, + 1590.0, + 677.0, + 1590.0, + 677.0, + 1609.0, + 653.0, + 1609.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 740.0, + 1573.0, + 860.0, + 1573.0, + 860.0, + 1642.0, + 740.0, + 1642.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 862.0, + 1581.0, + 986.0, + 1581.0, + 986.0, + 1639.0, + 862.0, + 1639.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1127.0, + 1568.0, + 1372.0, + 1568.0, + 1372.0, + 1660.0, + 1127.0, + 1660.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 497.0, + 1598.0, + 525.0, + 1598.0, + 525.0, + 1606.0, + 497.0, + 1606.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 545.0, + 1604.0, + 562.0, + 1604.0, + 562.0, + 1619.0, + 545.0, + 1619.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 779.0, + 1603.0, + 794.0, + 1603.0, + 794.0, + 1611.0, + 779.0, + 1611.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 919.0, + 1603.0, + 935.0, + 1603.0, + 935.0, + 1611.0, + 919.0, + 1611.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 334.0, + 1702.0, + 454.0, + 1702.0, + 454.0, + 1789.0, + 334.0, + 1789.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 593.0, + 1731.0, + 703.0, + 1731.0, + 703.0, + 1759.0, + 593.0, + 1759.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 813.0, + 1678.0, + 1390.0, + 1678.0, + 1390.0, + 1806.0, + 813.0, + 1806.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 597.0, + 1432.0, + 678.0, + 1432.0, + 678.0, + 1460.5, + 597.0, + 1460.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 992.0, + 1578.0, + 1127.0, + 1578.0, + 1127.0, + 1646.0, + 992.0, + 1646.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 987.0, + 1404.0, + 987.0, + 1404.0, + 1026.0, + 294.0, + 1026.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1020.0, + 1406.0, + 1020.0, + 1406.0, + 1058.0, + 294.0, + 1058.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1050.0, + 1409.0, + 1050.0, + 1409.0, + 1089.0, + 293.0, + 1089.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1082.0, + 1404.0, + 1082.0, + 1404.0, + 1120.0, + 294.0, + 1120.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1113.0, + 1218.0, + 1113.0, + 1218.0, + 1151.0, + 295.0, + 1151.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1031.0, + 714.0, + 1238.0, + 714.0, + 1238.0, + 748.0, + 1031.0, + 748.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 810.0, + 739.0, + 846.0, + 739.0, + 846.0, + 920.0, + 810.0, + 920.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 920.0, + 749.0, + 1016.0, + 749.0, + 1016.0, + 784.0, + 920.0, + 784.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 762.0, + 888.0, + 762.0, + 888.0, + 809.0, + 840.0, + 809.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 926.0, + 777.0, + 1015.0, + 777.0, + 1015.0, + 805.0, + 926.0, + 805.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 930.0, + 805.0, + 1011.0, + 805.0, + 1011.0, + 829.0, + 930.0, + 829.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 925.0, + 830.0, + 1012.0, + 830.0, + 1012.0, + 854.0, + 925.0, + 854.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 892.0, + 925.0, + 908.0, + 925.0, + 908.0, + 942.0, + 892.0, + 942.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1033.0, + 918.0, + 1058.0, + 918.0, + 1058.0, + 948.0, + 1033.0, + 948.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1171.0, + 917.0, + 1212.0, + 917.0, + 1212.0, + 951.0, + 1171.0, + 951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1319.0, + 916.0, + 1358.0, + 916.0, + 1358.0, + 950.0, + 1319.0, + 950.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1098.0, + 943.0, + 1174.0, + 943.0, + 1174.0, + 980.0, + 1098.0, + 980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 832.0, + 803.0, + 896.0, + 803.0, + 896.0, + 837.5, + 832.0, + 837.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 841.75, + 849.0, + 888.75, + 849.0, + 888.75, + 878.0, + 841.75, + 878.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1823.0, + 1404.0, + 1823.0, + 1404.0, + 1860.0, + 294.0, + 1860.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1847.0, + 1408.0, + 1847.0, + 1408.0, + 1898.0, + 291.0, + 1898.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1886.0, + 1404.0, + 1886.0, + 1404.0, + 1923.0, + 295.0, + 1923.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1917.0, + 1408.0, + 1917.0, + 1408.0, + 1954.0, + 294.0, + 1954.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1949.0, + 1333.0, + 1949.0, + 1333.0, + 1983.0, + 296.0, + 1983.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1197.0, + 536.0, + 1197.0, + 536.0, + 1249.0, + 290.0, + 1249.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 197.0, + 523.0, + 197.0, + 523.0, + 245.0, + 293.0, + 245.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 832.0, + 2058.0, + 869.0, + 2058.0, + 869.0, + 2096.0, + 832.0, + 2096.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 434.0, + 1404.0, + 434.0, + 1404.0, + 472.0, + 293.0, + 472.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 470.0, + 1403.0, + 470.0, + 1403.0, + 502.0, + 295.0, + 502.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 502.0, + 1408.0, + 502.0, + 1408.0, + 537.0, + 294.0, + 537.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 535.0, + 1404.0, + 535.0, + 1404.0, + 567.0, + 295.0, + 567.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 565.0, + 1408.0, + 565.0, + 1408.0, + 603.0, + 293.0, + 603.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 597.0, + 1404.0, + 597.0, + 1404.0, + 633.0, + 293.0, + 633.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 629.0, + 1404.0, + 629.0, + 1404.0, + 666.0, + 293.0, + 666.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 663.0, + 1239.0, + 663.0, + 1239.0, + 701.0, + 293.0, + 701.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 260.0, + 1404.0, + 260.0, + 1404.0, + 296.0, + 293.0, + 296.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 293.0, + 1405.0, + 293.0, + 1405.0, + 329.0, + 293.0, + 329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 327.0, + 1405.0, + 327.0, + 1405.0, + 360.0, + 294.0, + 360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 358.0, + 1405.0, + 358.0, + 1405.0, + 396.0, + 292.0, + 396.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 392.0, + 734.0, + 392.0, + 734.0, + 425.0, + 293.0, + 425.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 12, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 891, + 1405, + 891, + 1405, + 1408, + 297, + 1408 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 297, + 259, + 1405, + 259, + 1405, + 877, + 297, + 877 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 297, + 1421, + 1406, + 1421, + 1406, + 1843, + 297, + 1843 + ], + "score": 0.982 + }, + { + "category_id": 0, + "poly": [ + 299, + 198, + 682, + 198, + 682, + 237, + 299, + 237 + ], + "score": 0.91 + }, + { + "category_id": 2, + "poly": [ + 836, + 2061, + 864, + 2061, + 864, + 2085, + 836, + 2085 + ], + "score": 0.853 + }, + { + "category_id": 13, + "poly": [ + 351, + 1280, + 410, + 1280, + 410, + 1309, + 351, + 1309 + ], + "score": 0.49, + "latex": "5 8 0 \\mathrm { k }" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 195.0, + 687.0, + 195.0, + 687.0, + 242.0, + 295.0, + 242.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 832.0, + 2058.0, + 870.0, + 2058.0, + 870.0, + 2096.0, + 832.0, + 2096.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 890.0, + 1405.0, + 890.0, + 1405.0, + 926.0, + 295.0, + 926.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 922.0, + 1405.0, + 922.0, + 1405.0, + 957.0, + 292.0, + 957.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 953.0, + 1406.0, + 953.0, + 1406.0, + 990.0, + 294.0, + 990.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 985.0, + 1405.0, + 985.0, + 1405.0, + 1024.0, + 294.0, + 1024.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1020.0, + 1405.0, + 1020.0, + 1405.0, + 1056.0, + 295.0, + 1056.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1052.0, + 1406.0, + 1052.0, + 1406.0, + 1087.0, + 292.0, + 1087.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1085.0, + 1403.0, + 1085.0, + 1403.0, + 1117.0, + 296.0, + 1117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1115.0, + 1406.0, + 1115.0, + 1406.0, + 1153.0, + 294.0, + 1153.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1150.0, + 1406.0, + 1150.0, + 1406.0, + 1184.0, + 292.0, + 1184.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1183.0, + 1406.0, + 1183.0, + 1406.0, + 1215.0, + 296.0, + 1215.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1216.0, + 1406.0, + 1216.0, + 1406.0, + 1248.0, + 296.0, + 1248.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1245.0, + 1405.0, + 1245.0, + 1405.0, + 1283.0, + 294.0, + 1283.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1281.0, + 350.0, + 1281.0, + 350.0, + 1311.0, + 295.0, + 1311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 411.0, + 1281.0, + 1403.0, + 1281.0, + 1403.0, + 1311.0, + 411.0, + 1311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1313.0, + 1403.0, + 1313.0, + 1403.0, + 1345.0, + 296.0, + 1345.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1345.0, + 1402.0, + 1345.0, + 1402.0, + 1377.0, + 296.0, + 1377.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1378.0, + 642.0, + 1378.0, + 642.0, + 1410.0, + 296.0, + 1410.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 258.0, + 1405.0, + 258.0, + 1405.0, + 298.0, + 294.0, + 298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 294.0, + 1407.0, + 294.0, + 1407.0, + 327.0, + 294.0, + 327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 327.0, + 1406.0, + 327.0, + 1406.0, + 359.0, + 296.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 359.0, + 1407.0, + 359.0, + 1407.0, + 393.0, + 295.0, + 393.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 387.0, + 1406.0, + 387.0, + 1406.0, + 430.0, + 292.0, + 430.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 423.0, + 1405.0, + 423.0, + 1405.0, + 458.0, + 294.0, + 458.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 458.0, + 1403.0, + 458.0, + 1403.0, + 489.0, + 296.0, + 489.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 491.0, + 1403.0, + 491.0, + 1403.0, + 522.0, + 296.0, + 522.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 521.0, + 1403.0, + 521.0, + 1403.0, + 556.0, + 295.0, + 556.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 553.0, + 1405.0, + 553.0, + 1405.0, + 588.0, + 295.0, + 588.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 587.0, + 1405.0, + 587.0, + 1405.0, + 622.0, + 295.0, + 622.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 620.0, + 1405.0, + 620.0, + 1405.0, + 652.0, + 296.0, + 652.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 650.0, + 1406.0, + 650.0, + 1406.0, + 685.0, + 295.0, + 685.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 683.0, + 1405.0, + 683.0, + 1405.0, + 718.0, + 295.0, + 718.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 715.0, + 1405.0, + 715.0, + 1405.0, + 749.0, + 292.0, + 749.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 749.0, + 1405.0, + 749.0, + 1405.0, + 784.0, + 295.0, + 784.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 780.0, + 1405.0, + 780.0, + 1405.0, + 815.0, + 295.0, + 815.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 814.0, + 1405.0, + 814.0, + 1405.0, + 848.0, + 295.0, + 848.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 847.0, + 1222.0, + 847.0, + 1222.0, + 878.0, + 295.0, + 878.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1419.0, + 1404.0, + 1419.0, + 1404.0, + 1454.0, + 295.0, + 1454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1455.0, + 1404.0, + 1455.0, + 1404.0, + 1485.0, + 294.0, + 1485.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1485.0, + 1406.0, + 1485.0, + 1406.0, + 1520.0, + 294.0, + 1520.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1518.0, + 1404.0, + 1518.0, + 1404.0, + 1550.0, + 295.0, + 1550.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1550.0, + 1404.0, + 1550.0, + 1404.0, + 1586.0, + 294.0, + 1586.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1581.0, + 1406.0, + 1581.0, + 1406.0, + 1617.0, + 294.0, + 1617.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1617.0, + 1404.0, + 1617.0, + 1404.0, + 1648.0, + 296.0, + 1648.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1647.0, + 1406.0, + 1647.0, + 1406.0, + 1682.0, + 294.0, + 1682.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1681.0, + 1406.0, + 1681.0, + 1406.0, + 1715.0, + 294.0, + 1715.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1714.0, + 1403.0, + 1714.0, + 1403.0, + 1745.0, + 295.0, + 1745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1745.0, + 1404.0, + 1745.0, + 1404.0, + 1780.0, + 294.0, + 1780.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1778.0, + 1403.0, + 1778.0, + 1403.0, + 1811.0, + 295.0, + 1811.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1809.0, + 982.0, + 1809.0, + 982.0, + 1844.0, + 295.0, + 1844.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 13, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 5, + "poly": [ + 346, + 285, + 1344, + 285, + 1344, + 1110, + 346, + 1110 + ], + "score": 0.985, + "html": "
NameSymbolValue
General
Replay capacity (FIFO)Start learningBatch sizeBatch lengthMLP sizeActivationBT10610432324Γ— 512LayerNorm+ELU
World Model
RSSM sizeNumber of latentsClasses per latentKL balancing51232320.8
Actor Critic
Imagination horizonDiscountReturn lambdaTarget update intervalH?150.950.95100
All Optimizers
Gradient clippingLearning rateAdam epsilonE10010-410-6
" + }, + { + "category_id": 1, + "poly": [ + 297, + 1572, + 1405, + 1572, + 1405, + 1865, + 297, + 1865 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 297, + 1301, + 1405, + 1301, + 1405, + 1562, + 297, + 1562 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 299, + 1877, + 1402, + 1877, + 1402, + 2007, + 299, + 2007 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 300, + 1223, + 1402, + 1223, + 1402, + 1289, + 300, + 1289 + ], + "score": 0.956 + }, + { + "category_id": 0, + "poly": [ + 296, + 1163, + 866, + 1163, + 866, + 1203, + 296, + 1203 + ], + "score": 0.943 + }, + { + "category_id": 0, + "poly": [ + 296, + 199, + 612, + 199, + 612, + 239, + 296, + 239 + ], + "score": 0.871 + }, + { + "category_id": 2, + "poly": [ + 836, + 2061, + 864, + 2061, + 864, + 2085, + 836, + 2085 + ], + "score": 0.821 + }, + { + "category_id": 2, + "poly": [ + 836, + 2061, + 864, + 2061, + 864, + 2086, + 836, + 2086 + ], + "score": 0.098 + }, + { + "category_id": 13, + "poly": [ + 794, + 1876, + 933, + 1876, + 933, + 1908, + 794, + 1908 + ], + "score": 0.91, + "latex": "0 . 8 \\times 0 . 8 \\mathrm { { m ^ { 2 } } }" + }, + { + "category_id": 13, + "poly": [ + 837, + 862, + 855, + 862, + 855, + 881, + 837, + 881 + ], + "score": 0.82, + "latex": "\\gamma" + }, + { + "category_id": 13, + "poly": [ + 837, + 885, + 855, + 885, + 855, + 906, + 837, + 906 + ], + "score": 0.78, + "latex": "\\lambda" + }, + { + "category_id": 13, + "poly": [ + 834, + 456, + 858, + 456, + 858, + 477, + 834, + 477 + ], + "score": 0.76, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 835, + 487, + 857, + 487, + 857, + 508, + 835, + 508 + ], + "score": 0.73, + "latex": "T" + }, + { + "category_id": 13, + "poly": [ + 832, + 825, + 859, + 825, + 859, + 845, + 832, + 845 + ], + "score": 0.72, + "latex": "H" + }, + { + "category_id": 13, + "poly": [ + 881, + 1802, + 904, + 1802, + 904, + 1828, + 881, + 1828 + ], + "score": 0.31, + "latex": "\\textsf { Z }" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1163.0, + 866.0, + 1163.0, + 866.0, + 1205.0, + 293.0, + 1205.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 194.0, + 615.0, + 194.0, + 615.0, + 251.0, + 291.0, + 251.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 832.0, + 2058.0, + 869.0, + 2058.0, + 869.0, + 2097.0, + 832.0, + 2097.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 832.0, + 2058.0, + 870.0, + 2058.0, + 870.0, + 2097.0, + 832.0, + 2097.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1571.0, + 1405.0, + 1571.0, + 1405.0, + 1608.0, + 295.0, + 1608.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1607.0, + 1407.0, + 1607.0, + 1407.0, + 1641.0, + 295.0, + 1641.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1636.0, + 1408.0, + 1636.0, + 1408.0, + 1674.0, + 294.0, + 1674.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1669.0, + 1405.0, + 1669.0, + 1405.0, + 1709.0, + 294.0, + 1709.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1704.0, + 1405.0, + 1704.0, + 1405.0, + 1738.0, + 295.0, + 1738.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1736.0, + 1405.0, + 1736.0, + 1405.0, + 1771.0, + 294.0, + 1771.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1766.0, + 1403.0, + 1766.0, + 1403.0, + 1803.0, + 294.0, + 1803.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1801.0, + 880.0, + 1801.0, + 880.0, + 1835.0, + 295.0, + 1835.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 905.0, + 1801.0, + 1405.0, + 1801.0, + 1405.0, + 1835.0, + 905.0, + 1835.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1834.0, + 986.0, + 1834.0, + 986.0, + 1868.0, + 296.0, + 1868.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1297.0, + 1405.0, + 1297.0, + 1405.0, + 1341.0, + 292.0, + 1341.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1335.0, + 1405.0, + 1335.0, + 1405.0, + 1370.0, + 295.0, + 1370.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1364.0, + 1406.0, + 1364.0, + 1406.0, + 1405.0, + 292.0, + 1405.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1398.0, + 1408.0, + 1398.0, + 1408.0, + 1435.0, + 294.0, + 1435.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1430.0, + 1403.0, + 1430.0, + 1403.0, + 1465.0, + 295.0, + 1465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1464.0, + 1405.0, + 1464.0, + 1405.0, + 1499.0, + 295.0, + 1499.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1497.0, + 1406.0, + 1497.0, + 1406.0, + 1532.0, + 295.0, + 1532.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1529.0, + 1255.0, + 1529.0, + 1255.0, + 1566.0, + 292.0, + 1566.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1879.0, + 793.0, + 1879.0, + 793.0, + 1912.0, + 295.0, + 1912.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 934.0, + 1879.0, + 1404.0, + 1879.0, + 1404.0, + 1912.0, + 934.0, + 1912.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1910.0, + 1406.0, + 1910.0, + 1406.0, + 1947.0, + 294.0, + 1947.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1944.0, + 1407.0, + 1944.0, + 1407.0, + 1979.0, + 294.0, + 1979.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1974.0, + 1134.0, + 1974.0, + 1134.0, + 2014.0, + 293.0, + 2014.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1222.0, + 1406.0, + 1222.0, + 1406.0, + 1260.0, + 295.0, + 1260.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1256.0, + 919.0, + 1256.0, + 919.0, + 1292.0, + 296.0, + 1292.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 14, + "width": 1700, + "height": 2200 + } + } +] \ No newline at end of file diff --git a/parse/dev/a0SRWViFYW/a0SRWViFYW.md b/parse/dev/a0SRWViFYW/a0SRWViFYW.md new file mode 100644 index 0000000000000000000000000000000000000000..8f212d0bd4046c83906c071a4a6e566f0487e0f8 --- /dev/null +++ b/parse/dev/a0SRWViFYW/a0SRWViFYW.md @@ -0,0 +1,1539 @@ +# STOCHASTIC PROJECTIVE SPLITTING:SOLVING SADDLE-POINT PROBLEMS WITH MULTIPLEREGULARIZERS + +Anonymous authors Paper under double-blind review + +# ABSTRACT + +We present a new, stochastic variant of the projective splitting (PS) family of algorithms for monotone inclusion problems. It can solve min-max and noncooperative game formulations arising in applications such as robust ML without the convergence issues associated with gradient descent-ascent, the current de facto standard approach in ML applications. Our proposal is the first version of PS able to use stochastic gradient oracles. It can solve min-max games while handling multiple constraints and nonsmooth regularizers via projection and proximal operators. Unlike other stochastic splitting methods that can solve such problems, our method does not rely on a product-space reformulation of the original problem. We prove almost-sure convergence of the iterates to the solution and a convergence rate for the expected residual. By working with monotone inclusions rather than variational inequalities, our analysis avoids the drawbacks of measuring convergence through the restricted gap function. We close with numerical experiments on a distributionally robust sparse logistic regression problem. + +# 1 INTRODUCTION + +The most prominent application of optimization in ML is empirical risk minimization. However, inspired by the success of GANs (Goodfellow et al., 2014). , ML practitioners have developed more complicated min-max and adversarial optimization formulations (Yu et al., 2021; Kuhn et al., 2019; Shafieezadeh-Abadeh et al., 2015; Sinha et al., 2018; Lin et al., 2020; Namkoong & Duchi, 2016; Huang et al., 2017; Wadsworth et al., 2018; Zhang et al., 2018; Edwards & Storkey, 2015; Celis & Keswani, 2019). Solving these multi-player games leads to issues not seen when minimizing a single-player loss function. The competitive nature of a game leads to rotational dynamics that can cause intuitive gradient-based methods to fail to converge (Gidel et al., 2019; Daskalakis et al., 2018; Hsieh et al., 2020). + +A mathematical framework underlying both convex optimization and saddle-point problems is the monotone inclusion problem; see Ryu & Boyd (2016) for an introduction. Methods developed for monotone inclusions will converge for convex-concave, games as they are explicitly designed to handle such problems’ governing dynamics. In recent years, monotone inclusion methods and theory have started to receive attention in the ML community (Diakonikolas, 2020; Liu et al., 2021; Ryu et al., 2020; Pathak & Wainwright, 2020), with a focus on monotone variational inequalities, which form a special case of monotone inclusions (Antonakopoulos et al., 2019; Gidel et al., 2019; Daskalakis et al., 2018; Hsieh et al., 2020; Mertikopoulos et al., 2019). + +The most prevalent methods for solving min-max games in ML are variants of gradient descent-ascent (GDA). This method alternates between a gradient-descent step for the minimizing player and a gradient-ascent step for the maximizing player. Unfortunately, GDA requires additional assumptions to converge on convex-concave games, and it even fails for some simple 2D bilinear games (Gidel et al., 2019, Prop. 1). While there have been several approaches to modify either GDA (Chavdarova et al., 2021; Grnarova et al., 2021; Balduzzi et al., 2018) or the underlying game objective (Mescheder et al., 2018; Nagarajan & Kolter, 2017; Mescheder et al., 2017) to ensure convergence, this paper instead develops a method for solving monotone inclusions that can naturally handle game dynamics. + +Our approach builds upon the recently proposed projective splitting (PS) method with forward steps (Johnstone & Eckstein, 2020b). PS is designed specifically for solving monotone inclusions, thus does not fall prey to the convergence issues that plague GDA, at least for convex-concave games. PS is within the general class of projective splitting methods invented by Eckstein & Svaiter (2008) and developed further in Eckstein & Svaiter (2009); Alotaibi et al. (2014); Combettes & Eckstein (2018); Eckstein (2017); Johnstone & Eckstein (2019; 2021; 2020a). These methods work by creating a separating hyperplane between the current iterate and the solution and then moving closer to the solution by projecting the current iterate onto this hyperplane (see Section 3 for an overview). Other than being able to natively handle game dynamics, the primary advantage of PS is that it fully splits problems involving an arbitrary number of regularizers and constraints. β€œFull splitting” means that the method can handle multiple regularizers and constraints through their respective individual proximal and projection operators, along with the smooth terms via gradients. What makes this useful is that many of the regularizers used in ML have proximal operators that are relatively easy to compute; see for example Parikh & Boyd (2013). + +Despite these advantages, the preexisting PS framework has a significant drawback: it requires deterministic gradient oracles. This feature makes it impractical for application to large datasets for which stochastic oracles may be the only feasible option. + +Contributions The primary contribution of this work is a new projective splitting algorithm that allows for a stochastic gradient oracle. We call the method stochastic projective splitting (SPS). Our method β€œfully splits” the monotone inclusion problem + +$$ +\begin{array} { r } { \mathrm { F i n d } z \in \mathbb { R } ^ { d } \mathrm { ~ s . t . ~ } 0 \in \sum _ { i = 1 } ^ { n } A _ { i } ( z ) + B ( z ) , } \end{array} +$$ + +where $B$ is monotone and $L$ -Lipschitz and each $A _ { i }$ is maximal monotone and typically set valued, usually arising from a constraint or a nonsmooth regularizer in the underlying optimization problem or game; see for example Ryu & Boyd (2016) for definitions. For some example ML applications of (1), see Section 2 and Appendix A. Here, an algorithm that β€œfully splits” (1) means one whose computational steps each involve only the individual operators $A _ { 1 } , \ldots , A _ { n } , B$ . Ours is the first method that can accomplish full splitting without a product-space reformulation that recasts (1) as a two-operator problem on a higher-dimensional space, a tactic whose disadvantages are discussed in Appendix F.7. Our method interrogates the Lipschitz operator $B$ through a stochastic oracle. Previous methods splitting (1) have either required a deterministic oracle for $B$ , or have made far more restrictive assumptions on the noise or the operators (BriceΓ±o-Arias & Combettes, 2011; Combettes & Pesquet, 2012; Malitsky & Tam, 2020; Bot et al., 2019; Van Dung & Vu, 2021) than we will require below. However, the stochastic methods of Alacaoglu et al. (2021) and BΓΆhm et al. (2020), when combined with a product-space reformulation, can solve (1) when all the $A _ { i }$ are subdifferentials of convex functions; see Section 6. + +When moving away from a deterministic gradient oracle in projective splitting, a key difficulty is that the generated hyperplanes do not guarantee separation between the solution and the current point. We solve this issue by relaxing the projection: we only update each iterate in the direction of the noisy projection and scale its movement by a decreasing stepsize that allows for control of the stochastic error. Using the framework of stochastic quasi-FejΓ©r monotonicity (Combettes & Pesquet, 2015), we prove almost-sure convergence of the final iterate and do not require averaging of the iterates (Theorem 1, Section 5). We also provide a non-asymptotic convergence rate for the approximation residual (Theorem 2, Section 5). + +A special case of SPS is the recently-developed Double Stepsize Extragradient Method (DSEG) (Hsieh et al., 2020). When $n = 0$ and therefore only $B$ is present in (1), DSEG and SPS coincide. Thus, our method extends DSEG to allow for regularizers and constraints. Our analysis also provides a new interpretation for DSEG as a special case of projective splitting. Our nonasymptotic convergence rate for SPS also applies to DSEG under no additional assumptions. By contrast, the original convergence rate analysis for DSEG requires either strong monotonicity or an error bound. + +We close with numerical experiments on a distributionally robust sparse logistic regression problem. This is a nonsmooth convex-concave min-max problem which can be converted to (1) with $n = 2$ set-valued operators. On this problems class, SPS compares well to the possible alternative splitting methods. + +Non-monotone problems The work of Hsieh et al. (2020) included a local convergence analysis for DSEG applied to locally monotone problems. For min-max problems, if the objective is locally convex-concave at a solution and DSEG is initialized in close proximity, then for small enough stepsizes it converges to the solution with high probability. It is possible to extend this result to SPS, along with our convergence rate analysis. This result is beyond the scope of this work, but Appendix J provides a proof sketch. + +# 2 BACKGROUND ON MONOTONE INCLUSIONS + +Since they are so important to SPS, this section provides some background material regarding monotone inclusions, along with their connections to convex optimization, games, and ML. Appendix G discusses their connections to variational inequalities. For a more thorough treatment, we refer to Bauschke & Combettes (2017). See Appendix A for a longer discussion of the applications of monotone inclusions to ML along with several examples. + +Fundamentals Let $f : \mathbb { R } ^ { d } \mathbb { R } \cup \{ \infty \}$ be closed, convex, and proper (CCP). Recall that its subdifferential $\partial f$ is given by $\partial f ( x ) \ { \overset { \cdot } { = } } \ \left\{ g : f ( y ) \geq f ( x ) + g ^ { \top } { \big ( } { \bar { y - x } } { \big ) } \right\}$ . The map $\partial f$ has the property + +$$ +u \in \partial f ( x ) , v \in \partial f ( y ) \implies ( u - v ) ^ { \top } ( x - y ) \geq 0 , +$$ + +and any point-to-set map having this property is called a monotone operator. A monotone operator $T$ is called maximal if no additional points can be included in the image $T ( x )$ of any $\boldsymbol { x } ^ { \mathrm { ~ \scriptsize ~ \in ~ } \mathbb { R } ^ { d } }$ without violating the above property (Bauschke & Combettes, 2017, Def. 20.20). Subgradient maps of CCP functions are maximal (Bauschke & Combettes, 2017, Thm. 20.25). A minimizer of $f$ is any $x ^ { * }$ such that $0 \in \partial f ( x ^ { * } )$ . This is perhaps the simplest example of a monotone inclusion, the problem of finding $x$ such that $0 \in T ( x )$ , where $T$ is a monotone operator. If $f$ is smooth, then $\bar { \partial } f ( x ) = \{ \nabla f ( x ) \}$ for all $x$ , and the monotone inclusion $0 \in \partial f ( x )$ is equivalent to the first-order optimality condition $0 = \nabla f ( x )$ . + +Under certain regularity conditions (Bauschke & Combettes, 2017, Cor. 16.5), minimizing a sum of CCP functions $f _ { 1 } , \ldots , f _ { n }$ is equivalent to solving the monotone inclusion formed from the sum of their subdifferentials: + +$$ +x ^ { * } \in \underset { x \in \mathbb { R } ^ { d } } { \arg \operatorname* { m i n } } \sum _ { i = 1 } ^ { n } f _ { i } ( x ) \iff 0 \in \sum _ { i = 1 } ^ { n } \partial f _ { i } ( x ^ { * } ) . +$$ + +As throughout this paper for all set addition operations, the summation on the right-hand side of (2) is the Minkowski sum $\textstyle \sum _ { i = 1 } ^ { n } S _ { i } = \{ \sum _ { i = 1 } ^ { n } s _ { i } \ | ^ { \cdot } s _ { i } \in S _ { i } \forall i \in { 1 . . n } \}$ . For a convex set $X$ , a constraint $x \in C$ for some convex set $C$ may be imposed by setting one of the $f _ { i }$ to be the indicator function $\iota _ { C }$ , defined by $\iota _ { C } ( x ) = 0$ for $x \in C$ and $\iota _ { C } \bar { ( } x ) = \dot { + } \infty$ for $x \not \in C$ . Indicator functions of closed convex sets are CCP (Bauschke & Combettes, 2017, Ex. 1.25), and the subgradient map of $\iota _ { C }$ is also referred to as the normal cone map $N _ { C }$ of $C$ (Bauschke & Combettes, 2017, Def. 6.37). Multiple constraints may be imposed by including multiple indicator functions in (2). + +ML applications The form (2) can be used to model ML problems with multiple constraints and/or nonsmooth regularizers, including sparse and overlapping group lasso (Jacob et al., 2009), sparse and low-rank matrix estimation problems (Richard et al., 2012), and rare feature selection (Yan & Bien, 2020); see Pedregosa & Gidel (2018) for an overview. + +Games Consider a two-player noncooperative game in which each player tries to selfishly minimize its own loss, with each loss depending on the actions of both players. Typically, the goal is to find a Nash equilibrium, in which neither player can improve its loss by changing strategy: + +$$ +x ^ { * } \in \arg \operatorname* { m i n } _ { x \in \Theta } F ( x , y ^ { * } ) \quad { \mathrm { a n d } } \quad y ^ { * } \in \arg \operatorname* { m i n } _ { y \in \Omega } G ( x ^ { * } , y ) . +$$ + +Assuming that the admissible strategy sets $\Theta \subseteq \mathbb { R } ^ { d _ { x } }$ and $\Omega \subseteq \mathbb { R } ^ { d _ { y } }$ are closed and convex and that $F$ and $G$ are differentiable, then writing the first-order necessary conditions for each optimization problem in (3) yields + +$$ +0 \in \left[ \begin{array} { l } { \nabla _ { x } F ( x ^ { * } , y ^ { * } ) } \\ { \nabla _ { y } G ( x ^ { * } , y ^ { * } ) } \end{array} \right] + \big ( N _ { \Theta } ( x ^ { * } ) \times N _ { \Omega } ( y ^ { * } ) \big ) . +$$ + +If $G = - F$ , then (3) is a min-max game. If $F$ is also convex in $x$ and concave in $y$ , then $B : ( x , y ) \mapsto$ $( \nabla _ { x } F ( x , y ) , - \nabla _ { y } F ( x , y ) ) ^ { \top }$ is monotone1 on $\mathbb { R } ^ { d _ { x } + d _ { y } }$ (Rockafellar, 1970). In many applications, $B$ is also Lipschitz continuous. In this situation, (4) is a monotone inclusion involving two operators $B$ and $N _ { \Theta \times \Omega }$ , with $B$ being Lipschitz. Using the simultaneous version of GDA on (3) is equivalent to applying the forward-backward method (FB) (Bauschke & Combettes, 2017, Thm. 26.14) to (4). However, convergence of FB requires that the operator $B$ be cocoercive (Bauschke & Combettes, 2017, Def. 4.10), and not merely Lipschitz (Bauschke & Combettes, 2017, Thm. 26.14). Thus, simultaneous GDA fails to converge for (3) without additional assumptions; see Gidel et al. (2019, Prop. 1) for a simple counterexample. + +Regularizers and further constraints may be imposed by adding more operators to (4). For example, if one wished to apply a (nonsmooth) convex regularizer $r : \bar { \mathbb { R } } ^ { d _ { x } } \bar { \mathbb { R } } \cup \{ + \infty \}$ to the $x$ variables and a similar regularizer $d : \mathbb { R } ^ { d _ { y } } \mathbb { R } \cup \{ + \infty \}$ to the $y$ variables, one would add the operator $A _ { 2 } : ( x , y ) \mapsto \bar { \partial r } ( x ) \times \partial d ( y )$ to the right-hand side of (4). + +ML applications of games Distributionally robust supervised learning (DRSL) is an emerging framework for improving the stability and reliability of ML models in the face of distributional shifts $\mathrm { T u }$ et al., 2021; Kuhn et al., 2019; Shafieezadeh-Abadeh et al., 2015; Sinha et al., 2018; Lin et al., 2020; Namkoong & Duchi, 2016). Common approaches to DRSL formulate the problem as a min-max game between a learner selecting the model parameters and an adversary selecting a worst-case distribution subject to some ambiguity set around the observed empirical distribution. This min-max problem is often further reduced to either a finite-dimensional saddlepoint problem or a convex optimization problem. + +DRSL is a source of games with multiple constraints/regularizers. One such formulation, based on Yu et al. (2021), is discussed in the experiments below. The work in Namkoong & Duchi (2016) uses an ambiguity set based on $f$ -divergences, while Sinha et al. (2018) introduce a Lagrangian relaxation of the Wasserstein ball. When applied to models utilizing multiple regularizers (Jacob et al., 2009; Richard et al., 2012; Yan & Bien, 2020), both of these approaches lead to min-max problems with multiple regularizers. + +Other applications of games in ML, although typically nonconvex, include generative adversarial networks (GANs) (Goodfellow et al., 2014; Arjovsky et al., 2017; Loizou et al., 2020; 2021; Mishchenko et al., 2020), fair classification (Wadsworth et al., 2018; Zhang et al., 2018; Edwards & Storkey, 2015; Celis & Keswani, 2019), and adversarial privacy (Huang et al., 2017). + +Resolvents, proximal operators, and projections A fundamental computational primitive for solving monotone inclusions is the resolvent. The resolvent of a monotone operator $A$ is defined to be $J _ { A } \overset { \cdot } { = } ( I + A ) ^ { - 1 }$ , where $I$ is the identity operator and the inverse of any operator $T$ is simply $T ^ { - 1 } : x \mapsto \{ y : T y \ni x \}$ . If $A$ is maximal monotone, then for any $\rho > 0$ , $J _ { \rho A }$ is single valued, nonexpansive, and has domain equal to $\mathbb { R } ^ { d }$ (Bauschke & Combettes, 2017, Thm. 21.1 and Prop. 23.8). Resolvents generalize proximal operators of convex functions: the proximal operator of a CCP function $f$ is + +$$ +\operatorname { p r o x } _ { \rho f } ( t ) \doteq \underset { x \in \mathbb { R } ^ { d } } { \arg \operatorname* { m i n } } \left\{ \rho f ( x ) + ( 1 / 2 ) \| x - t \| ^ { 2 } \right\} . +$$ + +It is easily proved that $\mathrm { p r o x } _ { \rho f } = \underset { - } { J } _ { \rho \partial f }$ . Like proximal operators, resolvents generalize projection onto convex sets: if $f = \iota _ { \mathcal { C } }$ , then $J _ { \rho N _ { C } } = \mathrm { p r o x } _ { \rho f } = \mathrm { p r o j } _ { \mathcal { C } }$ for any $\rho > 0$ . In many ML applications, proximal operators, and hence resolvents, are relatively straightforward to compute. For examples, see Parikh & Boyd (2013, Sec. 6). + +Operator splitting methods Operator splitting methods attempt to solve monotone inclusions such as (1) by a sequence of operations that each involve only one of the operators $A _ { 1 } , \ldots , A _ { n } , B$ . Such methods are often presented in the context of convex optimization problems like (2), but typically apply more generally to monotone inclusions such as (1). In the specific context of (1), each iteration of such a method ideally handles each $A _ { i }$ via its resolvent and the Lipschitz operator $B$ by explicit (not stochastic) evaluation. This is a feasible approach if the original problem can be decomposed in such a way that the resolvents of each $A _ { i }$ are relatively inexpensive to compute, and full evaluations of $B$ are possible. Although not discussed here, more general formulations in which matrices couple the arguments of the operators can broaden the applicability of operator splitting methods. + +# 3 THE PROJECTIVE SPLITTING FRAMEWORK + +Before introducing our proposed method, we give a brief introduction to the projective splitting class of methods. + +The extended solution set Projective splitting is a primal-dual framework and operates in an extended space of primal and dual variables. Rather than directly finding a solution to (1), we find a point in the extended solution set (or Kuhn-Tucker set) + +$$ +\begin{array} { r } { \mathcal { S } \doteq \left\{ ( z , w _ { 1 } , \ldots , w _ { n + 1 } ) \ \middle | \ w _ { i } \in A _ { i } ( z ) \forall i \in 1 . . n , w _ { n + 1 } = B ( z ) , \sum _ { i = 1 } ^ { n + 1 } w _ { i } = 0 \right\} . } \end{array} +$$ + +Given $p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \ldots , w _ { n + 1 } ^ { * } ) \in \mathcal { S }$ , it is straightforward to see that $z ^ { * }$ solves (1). Conversely, given a solution $z ^ { * }$ to (1), there must exist $w _ { 1 } ^ { * } , \ldots , w _ { n + 1 } ^ { * }$ such that $( z ^ { \ast } , w _ { 1 } ^ { \ast } , \dots , w _ { n + 1 } ^ { \ast } ) \in \mathcal { S }$ . Suppose $p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \ldots , w _ { n + 1 } ^ { * } ) \in \mathcal { S }$ . Since $z ^ { * }$ solves (1), $z ^ { * }$ is typically referred to as a primal solution. The vectors $w _ { 1 } ^ { * } , \ldots , w _ { n + 1 } ^ { * }$ solve a dual inclusion not described here, and are therefore called a dual solution. It can be shown that $s$ is closed and convex; see for example Johnstone $\&$ Eckstein (2020b). We will assume throughout that a solution to (1) exists, therefore the set $s$ is nonempty. + +Separator-projection framework Projective splitting methods are instances of the general separator-projection algorithmic framework for locating a member of a closed convex set $s$ within a linear space $\mathcal { P }$ . Each iteration $k$ of algorithms drawn from this framework operates by finding a set $H _ { k }$ that separates the current iterate $p ^ { k } \in \mathcal { P }$ from $s$ , meaning that $s$ is entirely in the set and $p ^ { k }$ typically is not. One then attempts to β€œmove closer" to $s$ by projecting the $p ^ { k }$ onto $H _ { k }$ . In the particular case of projective splitting applied to the problem (1) using (5), we select the space $\mathcal { P }$ to be + +$$ +\begin{array} { r } { \mathcal { P } \doteq \left\{ ( z , w _ { 1 } , \ldots , w _ { n + 1 } ) \in \mathbb { R } ^ { ( n + 2 ) d } \ \Big | \ \sum _ { i = 1 } ^ { n + 1 } w _ { i } = 0 \right\} , } \end{array} +$$ + +and each separating set $H _ { k }$ to be the half space $\{ p \in { \mathcal { P } } \mid \varphi _ { k } ( p ) \leq 0 \}$ generated by an affine function $\varphi _ { k } : \mathscr { P } \mathbb { R }$ . The general intention is to construct $\varphi _ { k }$ such that $\varphi _ { k } \tilde { ( p ^ { k } ) } > 0$ , but $\varphi _ { k } ( p ^ { * } ) \leq 0$ for all $p ^ { * } \in { \mathcal { S } }$ . The construction employed for $\varphi _ { k }$ in the case of (1) and (5) is of the form + +$$ +\begin{array} { r } { \varphi _ { k } ( z , w _ { 1 } , \ldots , w _ { n + 1 } ) \doteq \sum _ { i = 1 } ^ { n + 1 } \langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \rangle } \end{array} +$$ + +for some points $( x _ { i } ^ { k } , y _ { i } ^ { k } ) \in \mathbb { R } ^ { 2 d }$ , $i \in { 1 . . ( n + 1 ) }$ , that must be carefully chosen (see below). Any function of the form (7) can be shown to be affine when restricted to $\mathcal { P }$ . As mentioned above, the standard separator-projection algorithm obtains its next iterate $p ^ { k + 1 }$ by projecting $p ^ { k }$ onto $H _ { k }$ . This calculation involves the usual projection step for a half space, namely + +$$ +p ^ { k + 1 } = p ^ { k } - \alpha _ { k } \nabla \varphi _ { k } , \quad \mathrm { ~ w h e r e ~ } \quad \alpha _ { k } = \varphi _ { k } ( p ^ { k } ) / \| \nabla \varphi _ { k } \| ^ { 2 } , +$$ + +and the gradient $\nabla \varphi _ { k }$ is computed relative to $\mathcal { P }$ , thus resulting in $p ^ { k + 1 } \ \in \ { \mathcal { P } }$ , i.e. $\nabla \varphi _ { k } \ =$ +$\left( \sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k } , x _ { 1 } ^ { k } - { \bar { x } } ^ { k } , \dots , x _ { n + 1 } - { \bar { x } } ^ { k } \right)$ where $\begin{array} { r } { \bar { x } ^ { k } = \frac { 1 } { n + 1 } \sum _ { i = 1 } ^ { n + 1 } x _ { i } ^ { k } } \end{array}$ . + +# 4 PROPOSED METHOD + +The proposed method is given in Algorithm 1 and called Stochastic Projective Splitting (SPS). Unlike prior versions of projective splitting, SPS does not employ the stepsize $\alpha _ { k }$ of (8) that places the next iterate exactly on the hyperplane given by $\varphi _ { k } ( p ) = 0$ . Instead, it simply moves in the direction $- \nabla \varphi _ { k }$ with a pre-defined stepsize $\{ \alpha _ { k } \}$ . This fundamental change is required to deal with the stochastic noise on lines 6 and 8. This noise could lead to the usual choice of $\alpha _ { k }$ defined in (8) being unstable and difficult to analyze. In order to guarantee convergence, the parameters $\alpha _ { k }$ and $\rho _ { k }$ must be chosen to satisfy certain conditions given below. Note that the gradient is calculated with respect to the subspace $\mathcal { P }$ defined in (6); since the algorithm is initialized within $\mathcal { P }$ , it remains in $\mathcal { P }$ , within which $\varphi _ { k }$ the updates on lines 9-10 are equivalent to . $\boldsymbol { p } ^ { k + 1 } = \boldsymbol { p } ^ { k } - \alpha _ { k } \nabla \varphi _ { k }$ , where $\mathbf { \chi } ^ { \dot { k } } = ( z ^ { k } , w _ { 1 } ^ { k } , \dots , w _ { n + 1 } ^ { k } )$ + +Note that SPS does not explicitly evaluate $\varphi _ { k }$ , which is only used in the analysis, but it does keep track of $( x _ { i } ^ { k } , y _ { i } ^ { k } )$ for $i \in { 1 . . ( n + 1 ) }$ . The algorithm’s memory requirements scale linearly with the number of nonsmooth operators $n$ in the inclusion (1), with the simplest implementation storing $( 3 n + 5 ) d$ working-vector elements. This requirement can be reduced to $( n + 7 ) d$ through a technique discussed in Appendix H. In most applications, $n$ will be small, for example 2 or 3. + +Updating $( x _ { i } ^ { k } , y _ { i } ^ { k } )$ The variables $( x _ { i } ^ { k } , y _ { i } ^ { k } )$ are updated on lines 3-8 of Algorithm 1, in which $e ^ { k }$ and $\epsilon ^ { k }$ are $\mathbb { R } ^ { d }$ -valued random variables defined on a probability space $( \Omega , { \mathcal { F } } , P )$ . For $B$ we use a new, noisy version of the two-forward-step procedure from Johnstone & Eckstein (2020b). For each $A _ { i }$ , $i \in 1 . . n$ , we use the same resolvent step used in previous projective splitting papers, originating with (Eckstein & Svaiter, 2008). In the case $\epsilon ^ { k } = e ^ { k } = 0$ , the selection of the $( \bar { x _ { i } ^ { k } } , y _ { i } ^ { k } )$ is identical to that proposed by Johnstone & Eckstein (2020b), resulting in the hyperplane $\{ p : { \varphi } _ { k } ( p ) = 0 \}$ strictly separating $p ^ { k }$ from $s$ . + +SPS achieves full splitting of (1): each $A _ { i }$ is processed separately using a resolvent and the Lipschitz term $B$ is processed via a stochastic gradient oracle. When the $A _ { i }$ arise from regularizers or constraints, as discussed in Section 2, their resolvents can be readily computed so long as their respective proximal/projection operators have a convenient form. + +Noise assumptions Let $\mathcal { F } _ { k } \doteq \sigma ( p ^ { 1 } , \ldots , p ^ { k } )$ and $\mathcal { E } _ { k } \doteq \sigma ( \epsilon ^ { k } )$ . The stochastic estimators for the gradients, $r ^ { k }$ and $y _ { n + 1 } ^ { k }$ , are assumed to be unbiased, that is, the noise terms have mean 0 conditioned on the past: + +$$ +\mathbb { E } [ \epsilon ^ { k } | \mathcal { F } _ { k } ] = 0 , \quad \mathbb { E } [ e ^ { k } | \mathcal { F } _ { k } ] = 0 \quad a . s . +$$ + +We impose the following mild assumptions on the variance of the noise: + +$$ +\begin{array} { r l } & { \mathbb { E } \left[ \| \epsilon ^ { k } \| ^ { 2 } | \mathcal { F } _ { k } \right] \leq N _ { 1 } + N _ { 2 } \| B ( z ^ { k } ) \| ^ { 2 } \quad a . s . } \\ & { \mathbb { E } \left[ \| e ^ { k } \| ^ { 2 } | \mathcal { F } _ { k } , \mathcal { E } _ { k } \right] \leq N _ { 3 } + N _ { 4 } \| B ( x _ { n + 1 } ^ { k } ) \| ^ { 2 } \quad a . s . , } \end{array} +$$ + +where $0 \le N _ { 1 } , N _ { 2 } , N _ { 3 } , N _ { 4 } < \infty$ . We do not require $e ^ { k }$ and $\epsilon ^ { k }$ to be independent of one another. + +Stepsize choices The stepsizes $\rho _ { k }$ and $\alpha _ { k }$ are assumed to be deterministic. A constant stepsize choice which attains a non-asymptotic convergence rate will be considered in the next section (Theorem 2). The stepsize conditions we will impose to guarantee almost-sure convergence (Theorem 1) are + +$$ +\begin{array} { r } { \sum _ { k = 1 } ^ { \infty } \alpha _ { k } \rho _ { k } = \infty , \quad \sum _ { k = 1 } ^ { \infty } \alpha _ { k } ^ { 2 } < \infty , \quad \sum _ { k = 1 } ^ { \infty } \alpha _ { k } \rho _ { k } ^ { 2 } < \infty , \mathrm { a n d } \rho _ { k } \leq \overline { \rho } < 1 / L . } \end{array} +$$ + +For example, in the case $L = 1$ , a particular choice which satisfies these constraints is + +$$ +\alpha _ { k } = k ^ { - 0 . 5 - p } \mathrm { f o r } 0 < p < 0 . 5 , \mathrm { a n d } \rho _ { k } = k ^ { - 0 . 5 + t } \mathrm { f o r } p \leq t < 0 . 5 p + 0 . 2 5 . +$$ + +For simplicity, the stepsizes $\tau$ used for the resolvent updates in lines 3-5 are fixed, but they could be allowed to vary with both $i$ and $k$ so long as they have finite positive lower and upper bounds. + +# Algorithm 1: Stochastic Projective Splitting (SPS) + +# 5 MAIN THEORETICAL RESULTS + +Theorem 1. Suppose $A _ { 1 } , \ldots , A _ { n }$ are maximal monotone, $B$ is $L$ -Lipschitz and monotone, and a solution to (1) exists. For Algorithm $I$ , suppose (9)-(12) hold. Then with probability one it holds that $z ^ { k } \to z ^ { * }$ , where $z ^ { * }$ solves (1). Further, with probability one, $x _ { i } ^ { k } \to z ^ { * }$ for $i = 1 , \ldots , n$ . + +Proof sketch Theorem 1 is proved in Appendix C, but we provide a brief sketch here. The proof begins by deriving a simple recursion inspired by the analysis of SGD (Robbins & Monro, 1951). Since $p ^ { k + 1 } = p ^ { k } - \alpha _ { k } \nabla \bar { \varphi } _ { k }$ , a step of projective splitting can be viewed as GD applied to the affine hyperplane generator function $\varphi _ { k }$ . Thus, for any $p ^ { * } \in \mathcal { P }$ , + +$$ +\begin{array} { r l } & { \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } = \| p ^ { k } - p ^ { * } \| ^ { 2 } - 2 \alpha _ { k } \langle \nabla \varphi _ { k } , p ^ { k } - p ^ { * } \rangle + \alpha _ { k } ^ { 2 } \| \nabla \varphi _ { k } \| ^ { 2 } } \\ & { \qquad = \| p ^ { k } - p ^ { * } \| ^ { 2 } - 2 \alpha _ { k } ( \varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } ) ) + \alpha _ { k } ^ { 2 } \| \nabla \varphi _ { k } \| ^ { 2 } } \end{array} +$$ + +where in the second equation we have used that $\varphi _ { k } ( p )$ is affine on $\mathcal { P }$ . The basic strategy is to show that, for any $p ^ { * } \in { \mathcal { S } }$ , + +$$ +\begin{array} { r } { \mathbb { E } [ \| \nabla \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } ] \le C _ { 1 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + C _ { 2 } \quad a . s . } \end{array} +$$ + +for some $C _ { 1 } , C _ { 2 } > 0$ . This condition allows one to establish stochastic quasi-FejΓ©r monotonicity (SQFM) (Combettes & Pesquet, 2015, Proposition 2.3) of the iterates to $s$ . One consequence of SQFM is that with probability one there exists a subsequence $v _ { k }$ such that $\varphi _ { v _ { k } } ( p ^ { v _ { k } } ) - \varphi _ { v _ { k } } ( p ^ { * } )$ converges to 0. Furthermore, roughly speaking, we show that $\varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } )$ provides an upper bound on the following β€œapproximation residual" for SPS: + +$$ +\begin{array} { r } { G _ { k } \doteq \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } . } \end{array} +$$ + +provides an approximation error for SPS, as formalized in the following lemma: + +Lemma 1. For SPS, $p ^ { k } = ( z ^ { k } , w _ { 1 } ^ { k } , \ldots , w _ { n + 1 } ^ { k } ) \in \mathcal { S }$ if and only if $G _ { k } = 0$ + +Since $y _ { i } ^ { k } \ \in \ A _ { i } ( x _ { i } ^ { k } )$ for $i \in 1 . . n$ , having $G _ { k } ~ = ~ 0$ implies that $z ^ { k } = x _ { i } ^ { k }$ , $w _ { i } ^ { k } \ = \ y _ { i } ^ { k }$ , and thus $w _ { i } ^ { k } \in A _ { i } ( z ^ { k } )$ for $i \in 1 . . n$ . Since $w _ { n + 1 } ^ { k } = B ( z ^ { k } )$ and $\textstyle \sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0$ , it follows that $z ^ { k }$ solves (1). The reverse direction is proved in Appendix $\mathrm { D }$ . + +The quantity $G _ { k }$ generalizes the role played by the norm of the gradient in algorithms for smooth optimization. In particular, in the special case where $n = 0$ and $\bar { B } ( z ) = \nabla f ( z )$ for some smooth convex function $f$ , one has $G _ { k } = \| \bar { \nabla } f ( z ^ { k } ) \| ^ { 2 }$ . + +Combining the properties of $G _ { k }$ with other results following from SQFM (such as boundedness) will allow us to derive almost-sure convergence of the iterates to a solution of (1). + +Convergence rate We can also establish non-asymptotic convergence rates for the approximation residual $G _ { k }$ : + +Theorem 2. Fix the total iterations $K \geq 1$ of Algorithm 1 and set + +$$ +\forall k = 1 , \ldots , K : \rho _ { k } = \rho \doteq \operatorname* { m i n } \left\{ K ^ { - 1 / 4 } , 1 / 2 L \right\} \quad \ a n d \quad \alpha _ { k } = C _ { f } \rho ^ { 2 } +$$ + +for some $C _ { f } > 0$ . Suppose (9)-(11) hold. Then + +$$ +\begin{array} { r } { ( 1 / K ) { \sum } _ { j = 1 } ^ { K } \mathbb { E } [ G _ { j } ] = \mathcal { O } ( K ^ { - 1 / 4 } ) } \end{array} +$$ + +where the constants are given (along with the proof) in Appendix $E$ . + +Theorem 2 implies that if we pick an iterate $J$ uniformly at random from $1 . . K$ , then the expected value of $G _ { J }$ is $\mathcal { O } ( K ^ { - 1 / 4 } )$ . As far as we know, this is the first convergence rate for a stochastic fullsplitting method solving (1) in the general discontinuous (i.e. set-valued) monotone inclusion case, and it is not clear whether it can be improved, either by a better analysis or a better method. Faster rates are certainly possible for deterministic methods under various continuity assumptions; Tseng’s method obtains $\bar { \mathcal { O } } ( K ^ { - 1 } )$ rate (Monteiro $\&$ Svaiter, 2010) and the accelerated Halpern iteration under Lipschitz continuity obtains $\mathcal { O } ( K ^ { - 2 } )$ rate (Diakonikolas, 2020). While our rate may seem slow, it is worth remembering that (1) features $n$ discontinuous operators $A _ { i }$ , so we expect rates at least as slow as nonsmooth convex optimization, but perhaps worse because (1) is far more general than convex optimization. For a different error metric, the restricted gap function, in the special case of variational inequalities, faster rates have been established in Juditsky et al. (2011) and BΓΆhm et al. (2020). However, it is unclear how to relate the restricted gap function to $G _ { k }$ , so these rates may not be directly comparable to Theorem 2. + +# 6 RELATED WORK + +Arguably the three most popular classes of operator splitting algorithms are forward-backward splitting (FB) (Combettes & Pesquet, 2011), Douglas-Rachford splitting (DR) (Lions & Mercier, 1979), and Tseng’s method (Tseng, 2000). The extragradient method (EG) is similar to Tseng’s method, but has more projection steps per iteration and only applies to variational inequalities (Korpelevich, 1977; Nemirovski, 2004; Li et al., 2021). The popular Alternating Direction Method of Multipliers (ADMM), in its standard form, is a dual application of DR (Gabay, 1983). The three-operator splitting method (Davis & Yin, 2017) can only be applied to (1) if $B$ is cocoercive rather than merely Lipchitz, and thus its usefulness is mostly limited to optimization applications and not games. FB, DR, and Tseng’s method apply to monotone inclusions involving two operators, with varying assumptions on one of the operators. It is possible to derive splitting methods for the more complicated inclusion (1), involving more than two operators, by applying an appropriate 2-operator splitting method such as Tseng’s method to a product-space reformulation (PSR) (BriceΓ±o-Arias & Combettes, 2011; Combettes & Pesquet, 2012) (for more on PSR, see Appendix F). The recently developed forward-reflected-backward (FRB) method (Malitsky & Tam, 2020) can be used in the same way. However, there are several disadvantages to using a PSR, as discussed in Appendix F.7. + +By using a PSR, the stochastic methods of Alacaoglu et al. (2021) and BΓΆhm et al. (2020) can be applied to (1) in the case that each $A _ { i }$ is a subdifferential. Both of these methods are analyzed in terms of the restricted gap function. This merit function has a drawback compared with our approximation residual in that it requires one to find a bound for the iterates. However, Alacaoglu et al. (2021) and BΓΆhm et al. (2020) do not provide such a bound, meaning that their convergence rate results are somewhat incomplete. We discuss this issue in Appendix G. + +Theoretical convergence of the method of BΓΆhm et al. (2020) requires the use of averaging, since the final iterate does not converge for certain problems (Hsieh et al., 2020). Empirically, averaging tends to be slow and to destroy regularizer-induced structural properties such as sparsity or low matrix rank, so its utility is largely theoretical and it is usually avoided in practice. Furthermore, averaging loses even its theoretical benefits for nonconvex problems, so its use in such cases is rarer still. Another drawback of the analysis of BΓΆhm et al. (2020) is that, unlike in SPS, the resolvent (proximal) stepsizes also need to vanish. + +The method of Alacaoglu et al. (2021) applies variance reduction techniques to FRB. It only applies to finite-sum problems and requires the periodic computation of a full batch gradient, making it somewhat less flexible and scalable than our method. On the other hand, it has an accelerated ergodic rate for the restricted gap function in the variational inequality setting. We compare the empirical performance of SPS with Alacaoglu et al. (2021), BΓΆhm et al. (2020), and several deterministic methods using PSR in the numerical experiments described in Section 7. + +Additional related work is discussed in Appendix B. + +# 7 EXPERIMENTS + +We now present some numerical results on distributionally robust supervised learning (DRSL) problems. We follow the approach of Yu et al. (2021), which introduced a min-max formulation of Wasserstein DRSL. While other approaches reduce the problem to convex optimization, Yu et al. (2021) reduce it to a finite-dimensional min-max problem amenable to the use of stochastic methods on large datasets. However, unlike our proposed SPS method, the variance-reduced extragradient method that Yu et al. (2021) propose cannot handle multiple nonsmooth regularizers or constraints on the model parameters. Consequently, we consider distributionally robust sparse logistic regression (DRSLR), a problem class equivalent to that considered in Yu et al. (2021), but with an added $\ell _ { 1 }$ regularizer, a standard tool to induce sparsity. See the Appendix I for the full problem definition. + +We compared our SPS method to several methods for solving DRSLR for a collection of real datasets from the LIBSVM repository (Chang & Lin, 2011). We implemented SPS with $\alpha _ { k } = C _ { d } k ^ { - 0 . 5 1 }$ and $\rho _ { k } = C _ { d } k ^ { - 0 . 2 5 }$ and called it SPS-decay. We also implement SPS with the fixed stepsize given in (15) and called it SPS-fixed. We compared the method to deterministic projective splitting (Johnstone & Eckstein, 2020b) and the following methods based on PSR: Tseng’s method (Tseng, 2000; Combettes & Pesquet, 2012), the forward-reflected-backward (FRB) method (Malitsky & Tam, 2020), the stochastic Tseng (S-Tseng) method of BΓΆhm et al. (2020), and the variance-reduced stochastic FRB method (Alacaoglu et al., 2021), abbreviated FRB-VR. The S-Tseng and FRB-VR algorithms appear to be the only stochastic splitting methods other than SPS applicable to the tested problem class. + +![](images/5a3352bf1be8622af7e8437ef0309b7c8e82b213e948eae1d016add4ab16fe33.jpg) +Figure 1: Approximation residual versus running time for three LIBSVM benchmark datasets, with the markers at 10-iteration intervals. Left: epsilon, middle: SUSY, right: real-sim. For the stochastic algorithms (SPS, S-Tseng, and FRB-VR), we plot the median results over 10 trials, with unit standard deviation horizontal error bars for the running time and the vertical error bars displaying the min-to-max range of the approximation residual. The code is provided in the supplementary material. + +Figure 1 show results for three LIBSVM standard datasets: epsilon2 $m = 4 \cdot 1 0 ^ { 5 }$ , $d = 2 0 0 0 \mathrm { \Omega }$ ), SUSY (Baldi et al., 2014; Dua & Graff, 2017) $m = 2 \cdot 1 0 ^ { 6 }$ , $d = 1 8$ ), and real-sim3 ( $m = 7 2 { , } 3 0 9$ , $d = 2 0 { , } 9 5 8 _ { , }$ ). + +To measure the progress of the algorithms, we used the β€œapproximation residual” $R _ { k }$ defined in Appendix F. As with $G _ { k }$ , having $R _ { k } = 0$ implies that $z ^ { k }$ solves (1). We use $R _ { k }$ instead of $G _ { k }$ because it is also possible to compute essentially the same measure of convergence from the iterates of the other tested algorithms, establishing a fair comparison. Appendix F provides the details of the derivation of the residual measure for each algorithm, explores the relationship between $R _ { k }$ and $G _ { k }$ , and provides additional implementation details. + +Figure 1 plots the approximation residual versus running time for all seven algorithms under consideration. The computations were performed using Python 3.8.3 and numpy on a 2019 MacBook Pro with a 2.4GHz 8-core Intel I9 processor and 32GB of RAM . Being a stochastic method, SPS-decay seems to outperform the deterministic methods at obtaining a medium-accuracy solution quickly. It also seems to outperform the stochastic PSR-based methods S-Tseng and FRB-VR. + +# 8 CONCLUSIONS AND FUTURE WORK + +We have developed and analyzed a stochastic splitting method that can handle min-max problems with multiple regularizers and constraints. Going forward, this development should make it possible to incorporate regularizers and constraints into adversarial formulations trained from large datasets. + +Recent versions of deterministic projective splitting (Combettes & Eckstein, 2018; Johnstone & Eckstein, 2020b) allow for asynchronous and incremental operation, meaning that not all operators need to be activated at every iteration, with some calculations proceeding with stale inputs. Such characteristics make projective splitting well-suited to distributed implementations. Many of our SPS results may be extended to allow for these variations, but we leave those extensions to future work. + +# REFERENCES + +Ahmet Alacaoglu, Yura Malitsky, and Volkan Cevher. Forward-reflected-backward method with variance reduction. Computational Optimization and Applications, 2021. Available online. + +Abdullah Alotaibi, Patrick L Combettes, and Naseer Shahzad. Solving coupled composite monotone inclusions by successive FejΓ©r approximations of their Kuhn-Tucker set. SIAM Journal on Optimization, 24(4):2076–2095, 2014. + +Kimon Antonakopoulos, Veronica Belmega, and Panayotis Mertikopoulos. An adaptive mirrorprox method for variational inequalities with singular operators. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'AlchΓ©-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, 2019. + +Martin Arjovsky, Soumith Chintala, and LΓ©on Bottou. Wasserstein generative adversarial networks. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, volume 70 of Proceedings of Machine Learning Research, pp. 214–223, 06–11 Aug 2017. + +Pierre Baldi, Peter Sadowski, and Daniel Whiteson. Searching for exotic particles in high-energy physics with deep learning. Nature communications, 5(1):1–9, 2014. + +David Balduzzi, Sebastien Racaniere, James Martens, Jakob Foerster, Karl Tuyls, and Thore Graepel. The mechanics of $n$ -player differentiable games. In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 354–363. PMLR, 10–15 Jul 2018. + +Heinz H Bauschke and Patrick L Combettes. Convex analysis and monotone operator theory in Hilbert spaces. Springer, 2nd edition, 2017. + +Axel BΓΆhm, Michael Sedlmayer, ErnΓΆ Robert Csetnek, and Radu Ioan BoΒΈt. Two steps at a time β€” taking GAN training in stride with Tseng’s method. arXiv preprint arXiv:2006.09033, 2020. + +Radu Ioan Bot, Panayotis Mertikopoulos, Mathias Staudigl, and Phan Tu Vuong. Forward-backwardforward methods with variance reduction for stochastic variational inequalities. arXiv preprint arXiv:1902.03355, 2019. + +Luis M BriceΓ±o-Arias and Patrick L Combettes. A monotone+skew splitting model for composite monotone inclusions in duality. SIAM Journal on Optimization, 21(4):1230–1250, 2011. + +Luis M BriceΓ±o-Arias and Patrick L Combettes. Monotone operator methods for Nash equilibria in non-potential games. In Computational and Analytical Mathematics, volume 50 of Springer Proceedings in Mathematics and Statistics, pp. 143–159. Springer, 2013. + +L Elisa Celis and Vijay Keswani. Improved adversarial learning for fair classification. arXiv preprint arXiv:1901.10443, 2019. + +Chih-Chung Chang and Chih-Jen Lin. LIBSVM: A library for support vector machines. ACM Transactions on Intelligent Systems and Technology, 2:27:1–27:27, 2011. Software available at http://www.csie.ntu.edu.tw/\~cjlin/libsvm. + +Tatjana Chavdarova, Matteo Pagliardini, Sebastian U Stich, FranΓ§ois Fleuret, and Martin Jaggi. Taming GANs with lookahead-minmax. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id $=$ ZW0yXJyNmoG. + +Patrick L. Combettes and Jonathan Eckstein. Asynchronous block-iterative primal-dual decomposition methods for monotone inclusions. Mathematical Programming, 168(1-2):645–672, 2018. + +Patrick L Combettes and Jean-Christophe Pesquet. Proximal splitting methods in signal processing. In H.H. Bauschke, R.S.S. Burachik, P.L. Combettes, V. Elser, D.R. Luke, and H. Wolkowicz (eds.), Fixed-Point Algorithms for Inverse Problems in Science and Engineering, pp. 185–212. Springer, 2011. + +Patrick L Combettes and Jean-Christophe Pesquet. Primal-dual splitting algorithm for solving inclusions with mixtures of composite, Lipschitzian, and parallel-sum type monotone operators. Set-Valued and variational analysis, 20(2):307–330, 2012. + +Patrick L Combettes and Jean-Christophe Pesquet. Stochastic quasi-FejΓ©r block-coordinate fixed point iterations with random sweeping. SIAM Journal on Optimization, 25(2):1221–1248, 2015. + +Constantinos Daskalakis, Andrew Ilyas, Vasilis Syrgkanis, and Haoyang Zeng. Training GANs with optimism. In International Conference on Learning Representations, 2018. URL https: //openreview.net/forum?id ${ . } =$ SJJySbbAZ. + +Damek Davis and Wotao Yin. A three-operator splitting scheme and its optimization applications. Set-Valued and Variational Analysis, 25(4):829–858, 2017. + +Jelena Diakonikolas. Halpern iteration for near-optimal and parameter-free monotone inclusion and strong solutions to variational inequalities. In Conference on Learning Theory, pp. 1428–1451. PMLR, 2020. + +Dheeru Dua and Casey Graff. UCI machine learning repository, 2017. URL http://archive. ics.uci.edu/ml. + +Jonathan Eckstein. A simplified form of block-iterative operator splitting and an asynchronous algorithm resembling the multi-block alternating direction method of multipliers. Journal of Optimization Theory and Applications, 173(1):155–182, 2017. + +Jonathan Eckstein and Benar Fux Svaiter. A family of projective splitting methods for the sum of two maximal monotone operators. Mathematical Programming, 111(1):173–199, 2008. + +Jonathan Eckstein and Benar Fux Svaiter. General projective splitting methods for sums of maximal monotone operators. SIAM Journal on Control and Optimization, 48(2):787–811, 2009. + +Harrison Edwards and Amos Storkey. Censoring representations with an adversary. arXiv preprint arXiv:1511.05897, 2015. + +Daniel Gabay. Applications of the method of multipliers to variational inequalities. In M. Fortin and R. Glowinski (eds.), Augmented Lagrangian Methods: Applications to the Solution of Boundary Value Problems, chapter IX, pp. 299–340. North-Holland, Amsterdam, 1983. + +Gauthier Gidel, Hugo Berard, GaΓ«tan Vignoud, Pascal Vincent, and Simon Lacoste-Julien. A variational inequality perspective on generative adversarial networks. In International Conference on Learning Representations, 2019. URL https://openreview.net/forum?id $=$ r1laEnA5Ym. + +Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Z. Ghahramani, M. Welling, C. Cortes, N. Lawrence, and K. Q. Weinberger (eds.), Advances in Neural Information Processing Systems, volume 27. Curran Associates, 2014. + +Paulina Grnarova, Yannic Kilcher, Kfir Y Levy, Aurelien Lucchi, and Thomas Hofmann. Generative minimization networks: Training GANs without competition. arXiv preprint arXiv:2103.12685, 2021. + +Patrick T Harker and Jong-Shi Pang. Finite-dimensional variational inequality and nonlinear complementarity problems: a survey of theory, algorithms and applications. Mathematical programming, 48(1):161–220, 1990. + +Yu-Guan Hsieh, Franck Iutzeler, JΓ©rΓ΄me Malick, and Panayotis Mertikopoulos. On the convergence of single-call stochastic extra-gradient methods. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'AlchΓ©-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, 2019. + +Yu-Guan Hsieh, Franck Iutzeler, JΓ©rΓ΄me Malick, and Panayotis Mertikopoulos. Explore aggressively, update conservatively: Stochastic extragradient methods with variable stepsize scaling. In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 16223–16234. Curran Associates, 2020. + +Chong Huang, Peter Kairouz, Xiao Chen, Lalitha Sankar, and Ram Rajagopal. Context-aware generative adversarial privacy. Entropy, 19(12):656, 2017. + +Laurent Jacob, Guillaume Obozinski, and Jean-Philippe Vert. Group lasso with overlaps and graph lasso. In LΓ©on Bottou and Michael Littman (eds.), Proceedings of the 26th International Conference on Machine Learning, pp. 433–440, Montreal, June 2009. Omnipress. + +Patrick R Johnstone and Jonathan Eckstein. Convergence rates for projective splitting. SIAM Journal on Optimization, 29(3):1931–1957, 2019. + +Patrick R Johnstone and Jonathan Eckstein. Projective splitting with forward steps only requires continuity. Optimization Letters, 14(1):229–247, 2020a. + +Patrick R Johnstone and Jonathan Eckstein. Projective splitting with forward steps. Mathematical Programming, 2020b. Published online, to appear in print. + +Patrick R Johnstone and Jonathan Eckstein. Single-forward-step projective splitting: exploiting cocoercivity. Computational Optimization and Applications, 78(1):125–166, 2021. + +Anatoli Juditsky, Arkadi Nemirovski, and Claire Tauvel. Solving variational inequalities with stochastic mirror-prox algorithm. Stochastic Systems, 1(1):17–58, 2011. + +GM Korpelevich. Extragradient method for finding saddle points and other problems. Matekon, 13 (4):35–49, 1977. + +Daniel Kuhn, Peyman Mohajerin Esfahani, Viet Anh Nguyen, and Soroosh Shafieezadeh-Abadeh. Wasserstein distributionally robust optimization: Theory and applications in machine learning. In Serguei Netessine (ed.), Operations Research & Management Science in the Age of Analytics, Tutorials in Operations Research, pp. 130–166. INFORMS, 2019. + +Chris Junchi Li, Yaodong Yu, Nicolas Loizou, Gauthier Gidel, Yi Ma, Nicolas Le Roux, and Michael I Jordan. On the convergence of stochastic extragradient for bilinear games with restarted iteration averaging. arXiv preprint arXiv:2107.00464, 2021. + +Tianyi Lin, Chi Jin, and Michael Jordan. On gradient descent ascent for nonconvex-concave minimax problems. In Hal DaumΓ© III and Aarti Singh (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 6083–6093. PMLR, 2020. + +Pierre-Louis Lions and Bertrand Mercier. Splitting algorithms for the sum of two nonlinear operators. SIAM Journal on Numerical Analysis, 16(6):964–979, 1979. + +Mingrui Liu, Hassan Rafique, Qihang Lin, and Tianbao Yang. First-order convergence theory for weakly-convex-weakly-concave min-max problems. Journal of Machine Learning Research, 22 (169):1–34, 2021. + +Nicolas Loizou, Hugo Berard, Alexia Jolicoeur-Martineau, Pascal Vincent, Simon Lacoste-Julien, and Ioannis Mitliagkas. Stochastic hamiltonian gradient methods for smooth games. In International Conference on Machine Learning, pp. 6370–6381. PMLR, 2020. + +Nicolas Loizou, Hugo Berard, Gauthier Gidel, Ioannis Mitliagkas, and Simon Lacoste-Julien. Stochastic gradient descent-ascent and consensus optimization for smooth games: Convergence analysis under expected co-coercivity. arXiv preprint arXiv:2107.00052, 2021. + +Yura Malitsky and Matthew K Tam. A forward-backward splitting method for monotone inclusions without cocoercivity. SIAM Journal on Optimization, 30(2):1451–1472, 2020. + +Panayotis Mertikopoulos, Bruno Lecouat, Houssam Zenati, Chuan-Sheng Foo, Vijay Chandrasekhar, and Georgios Piliouras. Optimistic mirror descent in saddle-point problems: Going the extra(- gradient) mile. In International Conference on Learning Representations, 2019. URL https: //openreview.net/pdf?id=Bkg8jjC9KQ. + +Lars Mescheder, Sebastian Nowozin, and Andreas Geiger. The numerics of GANs. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, 2017. + +Lars Mescheder, Andreas Geiger, and Sebastian Nowozin. Which training methods for GANs do actually converge? In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 3481–3490. PMLR, 2018. + +Konstantin Mishchenko, Dmitry Kovalev, Egor Shulgin, Peter RichtΓ‘rik, and Yura Malitsky. Revisiting stochastic extragradient. In International Conference on Artificial Intelligence and Statistics, pp. 4573–4582. PMLR, 2020. + +Aryan Mokhtari, Asuman E Ozdaglar, and Sarath Pattathil. Convergence rate of $\mathbf { o } ( 1 / \mathrm { k } )$ for optimistic gradient and extragradient methods in smooth convex-concave saddle point problems. SIAM Journal on Optimization, 30(4):3230–3251, 2020. + +Renato DC Monteiro and Benar Fux Svaiter. On the complexity of the hybrid proximal extragradient method for the iterates and the ergodic mean. SIAM Journal on Optimization, 20(6):2755–2787, 2010. + +Vaishnavh Nagarajan and J. Zico Kolter. Gradient descent GAN optimization is locally stable. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, 2017. + +Hongseok Namkoong and John C Duchi. Stochastic gradient methods for distributionally robust optimization with $f$ -divergences. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 29. Curran Associates, 2016. + +Arkadi Nemirovski. Prox-method with rate of convergence $\mathrm { O } ( 1 / t )$ for variational inequalities with Lipschitz continuous monotone operators and smooth convex-concave saddle point problems. SIAM Journal on Optimization, 15(1):229–251, 2004. + +Yurii Nesterov. Dual extrapolation and its applications to solving variational inequalities and related problems. Mathematical Programming, 109(2):319–344, 2007. + +Neal Parikh and Stephen Boyd. Proximal algorithms. Foundations and Trends in Optimization, 1(3): 123–231, 2013. + +Reese Pathak and Martin J Wainwright. Fedsplit: an algorithmic framework for fast federated optimization. In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 7057–7066. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper/2020/file/ 4ebd440d99504722d80de606ea8507da-Paper.pdf. + +Fabian Pedregosa and Gauthier Gidel. Adaptive three-operator splitting. In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 4085–4094. PMLR, 10–15 Jul 2018. + +Fabian Pedregosa, Kilian Fatras, and Mattia Casotto. Proximal splitting meets variance reduction. In Kamalika Chaudhuri and Masashi Sugiyama (eds.), Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics, volume 89 of Proceedings of Machine Learning Research, pp. 1–10. PMLR, 16–18 Apr 2019. + +Emile Richard, Pierre-Andre Savalle, and Nicolas Vayatis. Estimation of simultaneously sparse and low rank matrices. In John Langford and Joelle Pineau (eds.), Proceedings of the 29th International Conference on Machine Learning, pp. 1351–1358. Omnipress, 2012. + +Herbert Robbins and Sutton Monro. A stochastic approximation method. The annals of mathematical statistics, pp. 400–407, 1951. + +R Tyrrell Rockafellar. Monotone operators associated with saddle-functions and minimax problems. Nonlinear functional analysis, 18(part 1):397–407, 1970. + +Ernest K Ryu and Stephen Boyd. Primer on monotone operator methods. Appl. Comput. Math, 15(1): 3–43, 2016. + +Ernest K. Ryu, Kun Yuan, and Wotao Yin. Ode analysis of stochastic gradient methods with optimism and anchoring for minimax problems, 2020. + +Gesualdo Scutari, Francisco Facchinei, Jong-Shi Pang, and Daniel P Palomar. Real and complex monotone communication games. IEEE Transactions on Information Theory, 60(7):4197–4231, 2014. + +Soroosh Shafieezadeh-Abadeh, Peyman Mohajerin Esfahani, and Daniel Kuhn. Distributionally robust logistic regression. In Corinna Cortes, Neil D. Lawrence, Daniel D. Lee, Masashi Sugiyama, and Roman Garnett (eds.), Advances in Neural Information Processing Systems, volume 28, pp. 1576–1584. Curran Associates, 2015. + +Aman Sinha, Hongseok Namkoong, and John Duchi. Certifying some distributional robustness with principled adversarial training. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id $=$ Hk6kPgZA-. + +Paul Tseng. A modified forward-backward splitting method for maximal monotone mappings. SIAM Journal on Control and Optimization, 38(2):431–446, 2000. + +Nguyen Van Dung and Bang Cong Vu. Convergence analysis of the stochastic reflected forwardbackward splitting algorithm. arXiv preprint arXiv:2102.08906, 2021. + +Christina Wadsworth, Francesca Vera, and Chris Piech. Achieving fairness through adversarial learning: an application to recidivism prediction. arXiv preprint arXiv:1807.00199, 2018. + +Xiaohan Yan and Jacob Bien. Rare feature selection in high dimensions. Journal of the American Statistical Association, 2020. Published online, to appear in print. + +Yaodong Yu, Tianyi Lin, Eric Mazumdar, and Michael I Jordan. Fast distributionally robust learning with variance reduced min-max optimization. arXiv preprint arXiv:2104.13326, 2021. + +Alp Yurtsever, Bang Cong Vu, and Volkan Cevher. Stochastic three-composite convex minimization. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 29. Curran Associates, 2016. + +Brian Hu Zhang, Blake Lemoine, and Margaret Mitchell. Mitigating unwanted biases with adversarial learning. In Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, pp. 335– 340, 2018. + +# A ML APPLICATIONS OF THE MONOTONE INCLUSION (1) + +There are two main classes of applications of (1) in ML: optimization problems and saddle-point games. + +Optimization Problems In this case the monotone inclusion arises from finding the zero of a sum of subgradients of convex functions, as discussed in Section 2. It is typical in ML to solve the empirical risk minimization problem + +$$ +\operatorname* { m i n } _ { x \in \mathbb { R } ^ { d } } \frac { 1 } { m } \sum _ { j = 1 } ^ { m } f _ { j } ( x ) + \sum _ { i = 1 } ^ { n } r _ { i } ( x ) +$$ + +over a size- $m$ dataset. Usually, the gradient of the loss function $f _ { j }$ for each datapoint $j$ is Lipschitz continuous. The terms $r _ { i }$ may be regularizers used to reduce overfitting or encourage structural properties such as sparsity or low matrix rank. They also may represent constraints on the parameters such as nonnegativity or the being in the probability simplex. Crucially, these regularizers are rarely differentiable. The first-order necessary condition for the solution of (16) is + +$$ +0 \in \nabla f ( x ^ { * } ) + \sum _ { i = 1 } ^ { n } \partial r _ { i } ( x ^ { * } ) , +$$ + +where $\begin{array} { r } { f ( x ) \doteq \frac { 1 } { m } \sum _ { j = 1 } ^ { m } f _ { j } ( x ) } \end{array}$ , thus $\begin{array} { r } { \nabla f ( x ) \doteq \frac { 1 } { m } \sum _ { j = 1 } ^ { m } \nabla f _ { j } ( x ) } \end{array}$ . The inclusion (17) is a special case of (1), and our method may use the standard stochastic oracle for $\nabla f ( x )$ , namely + +$$ +\frac { 1 } { | \mathbf { B } | } \sum _ { j \in \mathbf { B } } \nabla f _ { j } ( z ) +$$ + +which subsamples a randomly selected minibatch of datapoints $\mathbf { B } \in \{ 1 , \dots , m \}$ . + +Games Consider the following nonsmooth Nash equilibrium problem + +$$ +x ^ { * } \in \underset { x \in \mathbb { R } ^ { d _ { x } } } { \arg \operatorname* { m i n } } F ( x , y ^ { * } ) + \underset { i = 1 } { \overset { n _ { 1 } } { \sum } } r _ { i } ( x ) \quad \mathrm { a n d } \quad y ^ { * } \in \underset { y \in \mathbb { R } ^ { d _ { y } } } { \arg \operatorname* { m i n } } G ( x ^ { * } , y ) + \underset { i = 1 } { \overset { n _ { 2 } } { \sum } } d _ { i } ( y ) . +$$ + +The terms player’s st $\scriptstyle \sum _ { i = 1 } ^ { n _ { 1 } } r _ { i } ( x )$ and e tha $\textstyle \sum _ { i = 1 } ^ { n _ { 2 } } d _ { i } ( y )$ once again represent regularizers and constrai (saddle-point) problems correspond to having $F ( x , y ) =$ $- G ( x , y )$ . Under appropriate convexity conditions and constraint qualifications, the solutions of (18) correspond to the solutions of the following monotone inclusion in the form of (1): + +$$ +0 \in \left[ \begin{array} { l } { \nabla _ { x } F ( x ^ { * } , y ^ { * } ) } \\ { \nabla _ { y } G ( x ^ { * } , y ^ { * } ) } \end{array} \right] + \sum _ { i = 1 } ^ { \operatorname* { m a x } \{ n _ { 1 } , n _ { 2 } \} } \left( \partial r _ { i } ( x ^ { * } ) \times \partial d _ { i } ( y ^ { * } ) \right) +$$ + +where for $i > \operatorname* { m i n } \{ n _ { 1 } , n _ { 2 } \}$ we include β€œdummy functions", either $r _ { i } ( x ) = 0$ when $n _ { 1 } < n _ { 2 }$ or $d _ { i } ( y ) = 0$ when $n _ { 1 } < n _ { 2 }$ . If the functions $F$ and $G$ arise as averages in the same we as $f$ in (16), then our method may again use a stochastic oracle for them. + +Distributionally-Robust ML One example application of (19) is distributionally-robust ML, as demonstrated in the numerical experiment in Section 7. The full problem statement is given in Appendix I. + +Lagrangian Duality Another application of (19) is constrained optimization via Lagrangian duality. Consider + +$$ +\operatorname* { m i n } _ { x \in \mathbb { R } ^ { d } } \left\{ f ( x ) + \sum _ { i = 1 } ^ { n } r _ { i } ( x ) \right\} \quad { \mathrm { s . t . } } \quad h _ { j } ( x ) \leq 0 \quad j = 1 , \ldots , p . +$$ + +As in (16), $f$ is a loss function and the $r _ { i }$ may represent regularizers and (β€œsimple”) constraints; in addition, there are $p$ functional constraints on the model parameters $x$ . Introducing Lagrange multipliers $\gamma \in \mathbb { R } ^ { p }$ , the problem can be written as + +$$ +\operatorname* { m i n } _ { x \in \mathbb { R } ^ { d } } \operatorname* { m a x } _ { \gamma \in \mathbb { R } _ { + } ^ { p } } \left\{ f ( x ) + \sum _ { i = 1 } ^ { n } r _ { i } ( x ) + \sum _ { j = 1 } ^ { p } \gamma _ { j } h _ { j } ( x ) \right\} . +$$ + +Under appropriate convexity conditions and constraint-qualifications, this reduces to the following inclusion in the form of (1): + +$$ +0 \in \left[ \begin{array} { c } { \nabla f ( x ) + \sum _ { j = 1 } ^ { p } \gamma _ { j } \nabla h _ { j } ( x ) } \\ { - h ( x ) } \end{array} \right] + \sum _ { i = 1 } ^ { n } \left( \partial r _ { i } ( x ^ { * } ) \times \{ 0 \} \right) +$$ + +where $h ( \boldsymbol { x } ) = [ h _ { 1 } ( \boldsymbol { x } ) , h _ { 2 } ( \boldsymbol { x } ) , \ldots , h _ { p } ( \boldsymbol { x } ) ] ^ { \top }$ . For certain choices of $h$ , such as linear or quadratic functions, the first term above is monotone and (locally) Lipschitz continuous (Alacaoglu et al., 2021). + +Bilinear Games with Many Constraints Finally, consider the bilinear saddlepoint problem subject to multiple constraints: + +$$ +\begin{array} { l l l } { \underset { x \in \mathbb { R } ^ { d } } { \operatorname* { m i n } } \underset { y \in \mathbb { R } ^ { d } } { \operatorname* { m a x } } x ^ { \top } D y } & { \mathrm { s . t . } } & { x \in \mathcal { C } _ { j } ^ { 1 } } & { j = 1 , \dots , n _ { 1 } , } \\ & { } & { y \in \mathcal { C } _ { j } ^ { 2 } } & { j = 1 , \dots , n _ { 2 } . } \end{array} +$$ + +Under some regularity conditions, this problem reduces to the inclusion + +$$ +0 \in \left[ \begin{array} { c } { D y ^ { * } } \\ { - D ^ { \top } x ^ { * } } \end{array} \right] + \sum _ { j = 1 } ^ { \operatorname* { m a x } \{ n _ { 1 } , n _ { 2 } \} } \big ( N _ { { \mathcal C } _ { j } ^ { 1 } } ( x ^ { * } ) \times N _ { { \mathcal C } _ { j } ^ { 2 } } ( y ^ { * } ) \big ) , +$$ + +where we introduce additional β€œdummy” sets $\mathcal { C } _ { j } ^ { 1 } = \mathbb { R } ^ { d }$ or $\mathcal { C } _ { j } ^ { 2 } = \mathbb { R } ^ { d }$ when $n _ { 1 } \neq n _ { 2 }$ . The first term is linear and skew symmetric, and therefore can easily be shown to be Lipschitz continuous and monotone. If all the constraint sets are closed and convex, then the rest of the terms are maximal monotone, then the problem is of the form (1), meaning that projective splitting may be applied, possibly using a stochastic oracle for the first term. + +# B ADDITIONAL RELATED WORK + +The preprint by Bot et al. (2019) develops a stochastic version of Tseng’s method under the requirement that the noise variance converges to 0. In ML, this could be achieved with the use of perpetually increasing batch sizes, a strategy that is impractical in many scenarios. The stochastic version of FRB proposed by Van Dung & Vu (2021) has more practical noise requirements, but has stronger assumptions on the problem which are rarely satisfied in ML applications: either uniform/strong monotonicity or a bounded domain. The papers by Yurtsever et al. (2016) and Pedregosa et al. (2019) consider stochastic variants of three-operator splitting, but require $B$ in (1) to be cocoercive, essentially restricting them to optimization problems. + +There are several alternatives to the (stochastic) extragradient method that reduce the number of gradient evaluations per iteration from two to one (Hsieh et al., 2019; Malitsky & Tam, 2020; Gidel et al., 2019). However, these methods have more stringent stepsize limits, making it unclear a priori whether they will outperform two-step methods. + +DSEG is a stochastic version of EG (Hsieh et al., 2020). The primary innovation of DSEG is using different stepsizes for the extrapolation and update steps, thereby resolving some of the convergence issues affecting stochastic EG. As noted earlier, DSEG is the special case of our SPS method in which $n = 0$ , that is, no regularizers/constraints are present in the underlying game. The analysis in (Hsieh et al., 2020) also did not consider the fixed stepsize choice given in Theorem 2. + +In the context of GANs, several methods have been developed based on a variational inequality/monotone inclusion approach (Gidel et al., 2019; Daskalakis et al., 2018; Hsieh et al., 2019; 2020; BΓΆhm et al., 2020). Many of these papers point out that variational inequalities provide a principled framework for studying the GAN training problem and correcting some of the flaws in the standard method GDA. + +# C PROOF OF THEOREM 1 + +# C.1 STOCHASTIC QUASI-FEJER MONOTONICITY + +The key to the analysis is showing that the algorithm satisfies Stochastic Quasi-Fejer Monotonicity (Combettes & Pesquet, 2015). + +Lemma 2 ((Combettes & Pesquet, 2015), Proposition 2.3). Suppose $p ^ { k }$ is a sequence of $\mathbb { R } ^ { d }$ -valued random variables defined on a probability space $( \Omega , { \mathcal { F } } , P )$ . Let $\mathcal { F } _ { k } \overset { \cdot } { = } \sigma ( p ^ { 1 } , \cdot \cdot \cdot , p ^ { k } )$ . Let $F$ be $a$ osed subssuch that $\mathbb { R } ^ { d }$ , $p \in F$ , there exists d $\chi ^ { k } ( p ) \geq 0 , \eta ^ { k } ( p ) \geq$ $0 , \nu ^ { k } ( \hat { p } ) \stackrel { \cdot } { \geq } 0$ $\scriptstyle \sum _ { k = 1 } ^ { \infty } \chi ^ { k } ( p ) ^ { \widehat { < } } \infty$ $\scriptstyle \sum _ { k = 1 } ^ { \infty } \eta ^ { k } ( p ) < \infty$ + +$$ +\begin{array} { r l } { ( \forall k \in \mathbb { N } ) } & { \mathbb { E } [ \| p ^ { k + 1 } - p \| ^ { 2 } | \mathcal { F } _ { k } ] \leq ( 1 + \chi ^ { k } ( p ) ) \| p ^ { k } - p \| ^ { 2 } - \nu ^ { k } ( p ) + \eta ^ { k } ( p ) . } \end{array} +$$ + +Then the following hold: + +$$ +\begin{array} { r l } { I . \ ( \forall p \in F ) : } & { { } \sum _ { k = 1 } ^ { \infty } \nu ^ { k } ( p ) < \infty a . s . } \end{array} +$$ + +2. $p ^ { k }$ is bounded a.s. + +3. There exists $\tilde { \Omega }$ such that $P [ \tilde { \Omega } ] = 1$ and $\left\{ \| p ^ { k } ( \omega ) - p \| \right\}$ converges for every $\omega \in \tilde { \Omega }$ and $p \in F$ . + +# C.2 IMPORTANT RECURSION FOR SPS + +The following lemma summarizes the key recursion satisfied by Algorithm 1, to which we will apply Lemma 2. Recall that $L$ is the Lipschitz constant of $B$ . + +Lemma 3. For Algorithm $I$ , suppose (9)–(11) hold and + +$$ +\rho _ { k } \leq \overline { { \rho } } < 1 / L . +$$ + +Let + +$$ +T _ { k } \doteq \frac { \tau } { \overline { { \rho } } } \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { \overline { { \rho } } \tau } \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 \big ( 1 - \overline { { \rho } } L \big ) \| B \big ( z ^ { k } \big ) - w _ { n + 1 } ^ { k } \| ^ { 2 } +$$ + +then for all $p ^ { * } \in { \mathcal { S } }$ , with probability one + +$\begin{array} { r } { \mathbb { E } [ \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } | \mathcal { F } _ { k } ] \le \big ( 1 + C _ { 1 } \alpha _ { k } ^ { 2 } + C _ { 3 } \alpha _ { k } \rho _ { k } ^ { 2 } \big ) \| p ^ { k } - p ^ { * } \| ^ { 2 } - \alpha _ { k } \rho _ { k } T _ { k } + C _ { 2 } \alpha _ { k } ^ { 2 } + C _ { 4 } \alpha _ { k } \rho _ { k } ^ { 2 } } \end{array}$ (21) where $C _ { 1 } , \ldots , C _ { 4 }$ are nonegative constants defined in (33), (34), (48), and (49) below, respectively. + +Note that $T _ { k }$ is a scaled version of the approximation residual $G _ { k }$ defined in (14). + +We proceed to first prove Lemma 3 and then exploit the implications of Lemma 2. Referring to (10) and (11), let $N \doteq \mathrm { m a x } _ { j \in 1 \ldots 4 } N _ { j }$ . To simplify the constants, we will use $N$ in place of $N _ { j }$ for the noise variance bounds given in (10)-(11). + +# C.3 UPPER BOUNDING THE GRADIENT + +Throughout the analysis, we fix some $p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \ldots , w _ { n + 1 } ^ { * } ) \in \mathcal { S }$ . All statements are with probability one (almost surely), but for brevity we will omit this unless it needs to be emphasized. + +In this section, we derive appropriate upper bounds for $\| \nabla \varphi _ { k } \| ^ { 2 }$ to use in (13). We begin with $\nabla _ { z } \varphi _ { k }$ + +$$ +\begin{array} { r l r } & { } & { \displaystyle \| \nabla _ { z } \varphi _ { k } \| ^ { 2 } = \Big \| \displaystyle \sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k } \Big \| ^ { 2 } \leq 2 \| y _ { n + 1 } ^ { k } \| ^ { 2 } + 2 \Big \| \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } = 2 \Big \| B ( x _ { n + 1 } ^ { k } ) + e ^ { k } \Big \| ^ { 2 } + 2 \Big \| \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } } \\ & { } & { \leq 4 \| B ( x _ { n + 1 } ^ { k } ) \| ^ { 2 } + 2 \Big \| \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } + 4 \| e ^ { k } \| ^ { 2 } . } \end{array} +$$ + +Now next take expectations with respect to $\mathcal { F } _ { k }$ and $\mathcal { E } _ { k }$ , and use the bound on the variance of the noise in (11), obtaining + +$$ +\begin{array} { r l r } { { \mathbb { E } [ \| \nabla _ { z } \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } , \mathcal { E } _ { k } ] \leq \mathbb { E } [ 4 \| B ( x _ { n + 1 } ^ { k } ) \| ^ { 2 } + 2 \Big \| \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } + 4 \| e ^ { k } \| ^ { 2 } \ \Big | \ \mathcal { F } _ { k } , \mathcal { E } _ { k } ] } } \\ & { } & { \leq 4 ( N + 1 ) \| B ( x _ { n + 1 } ^ { k } ) \| ^ { 2 } + 2 \Big \| \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } + 4 N , ~ } \end{array} +$$ + +where we have used that $y _ { i } ^ { k }$ is $\mathcal { F } _ { k }$ -measurable for $i \in 1 . . n$ . Thus, taking expectations over $\mathcal { E } _ { k }$ conditioned on $\mathcal { F } _ { k }$ yields + +$$ +\mathbb { E } \left[ \| \nabla _ { z } \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } \right] \leq 4 ( N + 1 ) \mathbb { E } [ \| B ( x _ { n + 1 } ^ { k } ) \| ^ { 2 } | \mathcal { F } _ { k } ] + 2 \Big \| \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } + 4 N . +$$ + +We will now bound the two terms on the right side of (22). + +# C.3.1 FIRST TERM IN (22) + +First, note that + +$$ +\begin{array} { r l } & { \| B ( z ^ { k } ) \| ^ { 2 } = \| B ( z ^ { k } ) - B ( z ^ { * } ) + B ( z ^ { * } ) ) \| ^ { 2 } } \\ & { \qquad \leq 2 \| B ( z ^ { k } ) - B ( z ^ { * } ) \| ^ { 2 } + 2 \| B ( z ^ { * } ) \| ^ { 2 } } \\ & { \qquad \leq 2 L ^ { 2 } \| z ^ { k } - z ^ { * } \| ^ { 2 } + 2 \| B ( z ^ { * } ) \| ^ { 2 } } \\ & { \qquad \leq 2 L ^ { 2 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + 2 \| B ( z ^ { * } ) \| ^ { 2 } . } \end{array} +$$ + +Now, returning to the first term on the right of (22), we have + +$$ +\begin{array} { r l } & { \| B ( x _ { n + 1 } ^ { k } ) \| ^ { 2 } = \| B ( z ^ { k } ) + B ( x _ { n + 1 } ^ { k } ) - B ( z ^ { k } ) \| ^ { 2 } } \\ & { \qquad \leq 2 \| B ( z ^ { k } ) \| ^ { 2 } + 2 \| B ( x _ { n + 1 } ^ { k } ) - B ( z ^ { k } ) \| ^ { 2 } } \\ & { \qquad \leq 2 \| B ( z ^ { k } ) \| ^ { 2 } + 2 L ^ { 2 } \| x _ { n + 1 } ^ { k } - z ^ { k } \| ^ { 2 } } \\ & { \qquad \leq 4 L ^ { 2 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + 4 \| B ( z ^ { * } ) \| ^ { 2 } + 2 L ^ { 2 } \| x _ { n + 1 } ^ { k } - z ^ { k } \| ^ { 2 } } \end{array} +$$ + +where we have used (23) to obtain (24). + +For the third term in (24), we have from the calculation on line 7 of the algorithm that + +$$ +\begin{array} { r } { x _ { n + 1 } ^ { k } - z ^ { k } = - \rho _ { k } ( r ^ { k } - w _ { n + 1 } ^ { k } ) = - \rho _ { k } ( B ( z ^ { k } ) + \epsilon ^ { k } - w _ { n + 1 } ^ { k } ) , } \end{array} +$$ + +and therefore + +$$ +\begin{array} { r l } & { \| x _ { n + 1 } ^ { k } - z ^ { k } \| ^ { 2 } = \rho _ { k } ^ { 2 } \| B ( z ^ { k } ) + \epsilon ^ { k } - w _ { n + 1 } ^ { k } \| ^ { 2 } } \\ & { \qquad \leq \overline { { \rho } } ^ { 2 } \| B ( z ^ { k } ) + \epsilon ^ { k } - w _ { n + 1 } ^ { k } \| ^ { 2 } } \\ & { \qquad \leq 3 \overline { { \rho } } ^ { 2 } ( \| B ( z ^ { k } ) \| ^ { 2 } + \| \epsilon ^ { k } \| ^ { 2 } + \| w _ { n + 1 } ^ { k } \| ^ { 2 } ) . } \end{array} +$$ + +We next take expectations conditioned on $\mathcal { F } _ { k }$ and use the noise variance bound (10) to obtain + +$$ +\begin{array} { r l } & { \mathbb { E } \big [ \| x _ { n + 1 } ^ { k } - z ^ { k } \| ^ { 2 } | \mathcal { F } _ { k } \big ] \leq \mathbb { E } \big [ 3 \overline { { \rho } } ^ { 2 } \big ( \| B ( z ^ { k } ) \| ^ { 2 } + \| \epsilon ^ { k } \| ^ { 2 } + \| w _ { n + 1 } ^ { k } \| ^ { 2 } \big ) | \mathcal { F } _ { k } \big ] } \\ & { \qquad \leq 3 \overline { { \rho } } ^ { 2 } \big ( ( N + 1 ) \| B ( z ^ { k } ) \| ^ { 2 } + \| w _ { n + 1 } ^ { k } \| ^ { 2 } + N \big ) . } \end{array} +$$ + +Therefore + +$$ +\begin{array} { r l } & { \mathbb { E } [ \| x _ { n + 1 } ^ { k } - z ^ { k } \| ^ { 2 } | \mathcal { F } _ { k } ] \leq 6 \bar { \rho } ^ { 2 } \big ( ( N + 1 ) \| B ( z ^ { k } ) \| ^ { 2 } + \| w _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { * } \| ^ { 2 } + \| w _ { n + 1 } ^ { * } \| ^ { 2 } \big ) + 3 \bar { \rho } ^ { 2 } N } \\ & { \qquad = 6 \bar { \rho } ^ { 2 } \Big ( 2 ( N + 1 ) L ^ { 2 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + 2 ( N + 1 ) \| B ( z ^ { * } ) \| ^ { 2 } } \\ & { \qquad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad } \\ & { \qquad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad } \\ & { \leq 6 \bar { \rho } ^ { 2 } \big ( 2 ( N + 1 ) L ^ { 2 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + \| w _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { * } \| ^ { 2 } \big ) } \\ & { \qquad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad + 1 8 \bar { \rho } ^ { 2 } ( N + 1 ) \| B ( z ^ { * } ) \| ^ { 2 } + 3 \bar { \rho } ^ { 2 } N } \\ & { \leq 1 8 \bar { \rho } ^ { 2 } ( N + 1 ) \big ( ( L ^ { 2 } + 1 ) \| p ^ { k } - p ^ { * } \| ^ { 2 } + \| B ( z ^ { * } ) \| ^ { 2 } \big ) + 3 \bar { \rho } ^ { 2 } N } \end{array} +$$ + +where in the equality uses (23) and $w _ { n + 1 } ^ { * } = B ( z ^ { * } )$ . Combining (24) and (25), we arrive at + +$$ +\begin{array} { r l } & { \mathbb { E } \left[ \left. B ( x _ { n + 1 } ^ { k } ) \right. ^ { 2 } \middle | \mathcal { F } _ { k } \right] \leq 4 L ^ { 2 } \left[ 1 + 9 \overline { { \rho } } ^ { 2 } ( L ^ { 2 } + 1 ) ( N + 1 ) \right] \Vert p ^ { k } - p ^ { * } \Vert ^ { 2 } } \\ & { \qquad + 4 \big ( 1 + 9 \overline { { \rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) \big ) \Vert B ( z ^ { * } ) \Vert ^ { 2 } + 6 \overline { { \rho } } ^ { 2 } L ^ { 2 } N . } \end{array} +$$ + +# C.3.2 SECOND TERM IN (22) + +For $i \in 1 . . n$ , line 5 of the algorithm may be rearranged into $y _ { i } ^ { k } = \tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) + w _ { i } ^ { k }$ , so + +$$ +\begin{array} { r l r } { { \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \bigg \| ^ { 2 } = \bigg \| \sum _ { i = 1 } ^ { n } ( \tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) + w _ { i } ^ { k } ) \bigg \| ^ { 2 } } } \\ & { } & { \leq 2 \bigg \| \tau ^ { - 1 } \sum _ { i = 1 } ^ { n } ( z ^ { k } - x _ { i } ^ { k } ) \bigg \| ^ { 2 } + 2 \bigg \| \sum _ { i = 1 } ^ { n } w _ { i } ^ { k } \bigg \| ^ { 2 } } \\ & { } & { \leq 2 \pi \tau ^ { - 2 } \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 \bigg \| \sum _ { i = 1 } ^ { n } w _ { i } ^ { k } \bigg \| ^ { 2 } } \\ & { } & { \leq 4 \pi ^ { 2 } \tau ^ { - 2 } \| z ^ { k } - z ^ { * } \| ^ { 2 } + 4 \pi \tau ^ { - 2 } \sum _ { i = 1 } ^ { n } \| z ^ { * } - x _ { i } ^ { k } \| ^ { 2 } + 4 m \sum _ { i = 1 } ^ { n } \| w _ { i } ^ { k } - w _ { i } ^ { * } \| ^ { 2 } + 4 \bigg \| \sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \bigg \| ^ { 2 } } \\ & { } & { \leq 4 \pi ^ { 2 } ( \tau ^ { - 2 } + 1 ) \| y ^ { k } - p ^ { * } \| ^ { 2 } + 4 \pi \tau ^ { - 2 } \sum _ { i = 1 } ^ { n } \| z ^ { * } - x _ { i } ^ { k } \| ^ { 2 } + 4 \bigg \| \sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \bigg \| ^ { 2 } . } \end{array} +$$ + +By the definition of the solution set $s$ in (5), $w _ { i } ^ { * } \in A _ { i } ( z ^ { * } )$ , so $z ^ { * } + \tau w _ { i } ^ { * } \in ( I + \tau A _ { i } ) ( z ^ { * } )$ , and since the resolvent is single-valued (Bauschke & Combettes, 2017, Cor. 23.9) we therefore obtain + +$$ +z ^ { * } = ( I + \tau A _ { i } ) ^ { - 1 } ( I + \tau A _ { i } ) ( z ^ { * } ) = J _ { \tau A _ { i } } ( z ^ { * } + \tau w _ { i } ^ { * } ) . +$$ + +From lines 3 and 4 of the algorithm, we also have $x _ { i } ^ { k } = J _ { \tau A _ { i } } ( z ^ { k } + \tau w _ { i } ^ { k } )$ for $i \in 1 . . n$ . Thus, using the nonexpansiveness of the resolvent (Bauschke & Combettes, 2017, Def. 4.1 and Cor. 23.9), we have + +$$ +\begin{array} { r l } { \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { * } - x _ { i } ^ { k } \| ^ { 2 } = \displaystyle \sum _ { i = 1 } ^ { n } \left\| J _ { T , 4 _ { i } } ( z ^ { k } + \tau w _ { i } ^ { k } ) - J _ { \tau , 4 _ { i } } ( z ^ { * } + \tau w _ { i } ^ { * } ) \right\| ^ { 2 } } & { } \\ { \leq \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } + \tau w _ { i } ^ { k } - z ^ { * } - \tau w _ { i } ^ { * } \| ^ { 2 } } & { } \\ { = \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - z ^ { * } + \tau ( w _ { i } ^ { k } - w _ { i } ^ { * } ) \| ^ { 2 } } & { } \\ { \leq 2 n \| z ^ { k } - z ^ { * } \| ^ { 2 } + 2 \tau ^ { 2 } \displaystyle \sum _ { i = 1 } ^ { n } \| w _ { i } ^ { k } - w _ { i } ^ { * } \| ^ { 2 } } & { } \\ { \leq 2 ( n + \tau ^ { 2 } ) \| y ^ { k } - p ^ { * } \| ^ { 2 } . } & { } \end{array} +$$ + +Combining (27) and (28) yields + +$$ +\Big \| \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \Big \| ^ { 2 } \leq 1 2 n ^ { 2 } \tau ^ { - 2 } ( n + \tau ^ { 2 } ) \| p ^ { k } - p ^ { * } \| ^ { 2 } + 4 \Big \| \sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \Big \| ^ { 2 } . +$$ + +Combining (26) and (29) with (22) yields + +$$ +\begin{array} { r l } & { \mathbb { E } \left[ \| \nabla _ { z } \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } \right] \le 2 4 \left[ ( 1 + 9 \overline { { \rho } } ^ { 2 } ) ( L ^ { 2 } + 1 ) ^ { 2 } ( N + 1 ) ^ { 2 } + n ^ { 2 } \tau ^ { - 2 } ( n + \tau ^ { 2 } ) \right] \| p ^ { k } - p ^ { * } \| ^ { 2 } } \\ & { \qquad + 1 6 ( N + 1 ) \big ( 1 + 9 \overline { { \rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) \big ) \| B ( z ^ { * } ) \| ^ { 2 } + 8 \bigg \| \displaystyle \sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \bigg \| ^ { 2 } } \\ & { \qquad + 2 4 \overline { { \rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) N + 4 N . } \end{array} +$$ + +# C.3.3 DUAL GRADIENT NORM + +Considering that $\nabla \varphi _ { k }$ is taken with respect to the subspace $\mathcal { P }$ , the gradients with respect to the dual variables are β€” see for example Eckstein & Svaiter (2009) β€” for each $i \in { 1 . . ( n + 1 ) }$ , + +$$ +\begin{array} { l } { \displaystyle \| \nabla _ { w _ { i } } \varphi _ { k } \| ^ { 2 } = \left\| x _ { i } ^ { k } - \frac { 1 } { n + 1 } \sum _ { j = 1 } ^ { n + 1 } x _ { j } ^ { k } \right\| ^ { 2 } = \left\| \frac { 1 } { n + 1 } \sum _ { j = 1 } ^ { n + 1 } ( x _ { i } ^ { k } - x _ { j } ^ { k } ) \right\| ^ { 2 } } \\ { \displaystyle \leq \sum _ { j = 1 } ^ { n + 1 } \| x _ { i } ^ { k } - x _ { j } ^ { k } \| ^ { 2 } } \\ { \displaystyle \leq 2 \sum _ { j = 1 } ^ { n + 1 } \big ( \| x _ { i } ^ { k } - z ^ { k } \| ^ { 2 } + \| z ^ { k } - x _ { j } ^ { k } \| ^ { 2 } \big ) } \end{array} +$$ + +Summing this inequality for $i \in { 1 . . ( n + 1 ) }$ and collecting terms yields + +$$ +\sum _ { i = 1 } ^ { n + 1 } \| \nabla _ { w _ { i } } \varphi _ { k } \| ^ { 2 } \leq 4 ( n + 1 ) \sum _ { i = 1 } ^ { n + 1 } \| x _ { i } ^ { k } - z ^ { k } \| ^ { 2 } , +$$ + +so taking expectations conditioned on $\mathcal { F } _ { k }$ produces + +$$ +\begin{array} { r l } { \displaystyle \sum _ { i = 1 } ^ { n + 1 } \mathbb { E } \| \nabla _ { x _ { i } } \varphi _ { i } \| ^ { 2 } | \mathcal { F } _ { k } | \leq 4 ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n + 1 } \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } \| ^ { 2 } | \mathcal { F } _ { k } | } \\ & { \leq 4 ( n + 1 ) \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | + 4 ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | } \\ & { \leq 4 ( n + 1 ) \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | } \\ & { \qquad + s ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | + 8 ( n + 1 ) ^ { 2 } \| z ^ { k } - z ^ { k } | ^ { 2 } } \\ & { \qquad + s ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | } \\ & { \leq 4 ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | } \\ & { \qquad + s ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } \mathbb { E } \| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \mathcal { F } _ { k } | + 8 ( n + 1 ) ^ { 2 } \| n ^ { k } - p ^ { k } | ^ { 2 } } \\ & { \leq 8 ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } 2 \tau ^ { 2 } + 1 + 9 s ^ { 2 } ( 2 ^ { k } + 1 ) | | b ^ { k } - z ^ { k } | ^ { 2 } | } \\ & \leq 8 ( n + 1 ) \displaystyle \sum _ { i = 1 } ^ { n } 2 \tau ^ { 2 } + 1 + 9 s ^ { 2 } ( 2 ^ { k } + 1 ) | | b ^ { k } - z \end{array} +$$ + +where the final inequality employs (25) and (28). + +All told, using (30) and (31) and simplifying the constants, one obtains + +$$ +\begin{array} { r l r } { { \mathbb { E } [ \| \nabla \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } ] = \mathbb { E } [ \| \nabla _ { z } \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } ] + \sum _ { i = 1 } ^ { n + 1 } \mathbb { E } [ \| \nabla _ { w _ { i } } \varphi _ { k } \| ^ { 2 } | \mathcal { F } _ { k } ] } } \\ & { } & { \leq C _ { 1 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + C _ { 2 } , } \end{array} +$$ + +where + +$$ +\begin{array} { c } { { C _ { 1 } = 2 4 ( 1 + 1 0 \overline { { { \rho } } } ^ { 2 } ) ( n + 1 ) ( L ^ { 2 } + 1 ) ^ { 2 } ( N + 1 ) ^ { 2 } } } \\ { { { } } } \\ { { + 8 ( n + 1 ) \left( 2 \tau ^ { 2 } + 6 ( n + 1 ) + 1 + 3 ( n + 1 ) ^ { 2 } \tau ^ { - 2 } \right) } } \end{array} +$$ + +and + +$$ +\begin{array} { l } { { C _ { 2 } = 1 6 ( N + 1 ) \left[ 1 + 4 { \overline { { \rho } } } ^ { 2 } ( n + 1 ) + 9 { \overline { { \rho } } } ^ { 2 } L ^ { 2 } ( N + 1 ) \right] \| B ( z ^ { * } ) \| ^ { 2 } + 8 \| \displaystyle \sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \| ^ { 2 } } } \\ { { \nonumber } } \\ { { \qquad + 1 2 { \overline { { \rho } } } ^ { 2 } N ( 2 L ^ { 2 } ( N + 1 ) + n + 1 ) + 4 N . } } \end{array} +$$ + +# C.4 LOWER BOUND FOR $\varphi _ { k }$ -GAP + +Recalling (13), that is, + +$$ +\| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } = \| p ^ { k } - p ^ { * } \| ^ { 2 } - 2 \alpha _ { k } ( \varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } ) ) + \alpha _ { k } ^ { 2 } \| \nabla \varphi _ { k } \| ^ { 2 } . +$$ + +We may use the gradient bound from (32) to obtain + +$$ +\begin{array} { r } { \mathbb { E } [ \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } | \mathcal { F } _ { k } ] \le ( 1 + C _ { 1 } \alpha _ { k } ^ { 2 } ) \| p ^ { k } - p ^ { * } \| ^ { 2 } - 2 \alpha _ { k } \mathbb { E } [ \varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } ) | \mathcal { F } _ { k } ] + C _ { 2 } \alpha _ { k } ^ { 2 } . } \end{array} +$$ + +We now focus on finding a lower bound for the term $\mathbb { E } [ \varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } ) | \mathcal { F } _ { k } ]$ , which we call the β€œ $\varphi _ { k }$ -gap”. Recall that for $p = ( z , w _ { 1 } , \ldots , w _ { n + 1 } )$ , + +$$ +\varphi _ { k } ( p ) = \sum _ { i = 1 } ^ { n + 1 } \langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \rangle . +$$ + +For each $i \in { 1 . . ( n + 1 ) }$ , define $\varphi _ { i , k } ( p ) \doteq \langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \rangle$ . We will call $\mathbb { E } [ \varphi _ { i , k } ( p ^ { k } ) - \varphi _ { i , k } ( p ^ { * } ) \vert \mathcal { F } _ { k } ]$ the β€œ $\varphi _ { i , k }$ -gap”. Note that $\begin{array} { r } { \varphi _ { k } ( p ) = \sum _ { i = 1 } ^ { n + 1 } \varphi _ { i , k } ( p ) } \end{array}$ . + +C.5 LOWER BOUND FOR $\varphi _ { i , k }$ -GAP OVER $i \in 1 . . n$ + +For $i \in 1 . . n$ , we have from line 5 of the algorithm that + +$$ +z ^ { k } - x _ { i } ^ { k } = \tau ( y _ { i } ^ { k } - w _ { i } ^ { k } ) . +$$ + +Since $\varphi _ { i , k } ( p ^ { k } ) = \langle z ^ { k } - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } ^ { k } \rangle$ , one may conclude that for $i \in 1 . . n$ + +$$ +\varphi _ { i , k } ( p ^ { k } ) = \frac { \tau } { 2 } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { 2 \tau } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } . +$$ + +On the other hand, for $p ^ { * } \in { \mathcal { S } }$ and $i \in 1 . . n$ , one also has + +$$ +- \varphi _ { i , k } \mathopen { } \mathclose \bgroup \left( p ^ { * } \aftergroup \egroup \right) = \mathopen { } \mathclose \bgroup \left. z ^ { * } - x _ { i } ^ { k } , w _ { i } ^ { * } - y _ { i } ^ { k } \aftergroup \egroup \right. \geq 0 +$$ + +by the monotonicity of $A _ { i }$ . Therefore, for $i \in 1 . . n$ , it holds that + +$$ +\varphi _ { i , k } ( p ^ { k } ) - \varphi _ { i , k } ( p ^ { * } ) \geq \frac { \tau } { 2 } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { 2 \tau } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } , +$$ + +and taking expectations conditioned on $\mathcal { F } _ { k }$ leads to + +$$ +\mathbb { E } [ \varphi _ { i , k } ( p ^ { k } ) - \varphi _ { i , k } ( p ^ { * } ) | \mathcal { F } _ { k } ] \ge \frac { \tau } { 2 } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { 2 \tau } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } +$$ + +where we have used that $x _ { i } ^ { k }$ and $y _ { i } ^ { k }$ are both $\mathcal { F } _ { k }$ -measurable for $i \in 1 . . n$ . + +# C.6 LOWER BOUND FOR $\varphi _ { n + 1 , k }$ -GAP + +From lines 6-7 of the algorithm, we have + +$$ +z ^ { k } - x _ { n + 1 } ^ { k } = \rho _ { k } ( B ( z ^ { k } ) - w _ { n + 1 } ^ { k } + \epsilon ^ { k } ) . +$$ + +Therefore, + +$$ +\begin{array} { r l } { \hat { \sigma } _ { \beta 1 , 1 } \hat { x } _ { \beta ^ { \prime } 1 , 1 } ^ { ( f ) } = \langle \boldsymbol { \xi } ^ { 2 } \boldsymbol { \cdot } \boldsymbol { x } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } - \boldsymbol { u } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle } & { \mathrm { ~ C ~ e ~ } } \\ & { = \langle \boldsymbol { \xi } ^ { 2 } \boldsymbol { \cdot } \boldsymbol { x } _ { \alpha + 1 , \beta } ^ { ( f ) } \boldsymbol { y } _ { \beta ^ { \prime } 1 } ^ { ( f ) } - \boldsymbol { u } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle + \langle \boldsymbol { \xi } ^ { 2 } - \boldsymbol { x } _ { \alpha + 1 , \beta } ^ { ( f ) } \boldsymbol { y } _ { \beta 1 } ^ { ( f ) } - \boldsymbol { B } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle } \\ & { - \langle \boldsymbol { \xi } ^ { 2 } \boldsymbol { \cdot } \boldsymbol { u } _ { \alpha + 1 , \beta } ^ { ( f ) } \boldsymbol { x } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle + \langle \boldsymbol { \xi } ^ { 2 } \boldsymbol { \cdot } \boldsymbol { u } _ { \alpha + 1 , \beta } ^ { ( f ) } \boldsymbol { x } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle + \langle \boldsymbol { \xi } ^ { 2 } \boldsymbol { \cdot } \boldsymbol { u } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \boldsymbol { y } _ { \beta 1 } ^ { ( f ) } - \boldsymbol { u } _ { \alpha - 1 , \beta ^ { \prime } } ^ { ( f ) } \rangle } \\ & - \langle \boldsymbol { u } _ { \alpha } ^ { \beta } \boldsymbol { y } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle + \langle \boldsymbol { \xi } ^ { 4 } \boldsymbol { x } _ { \alpha + 1 , \beta ^ { \prime } 1 } ^ { ( f ) } \rangle + \langle \boldsymbol { u } _ { \alpha } ^ { \beta } \boldsymbol { u } _ { \alpha + 1 , \beta ^ { \prime } } ^ { ( f ) } \boldsymbol { y } _ { \beta ^ { \prime } 1 } ^ { ( f ) } \rangle + \langle \boldsymbol { u } _ { \alpha } ^ { \beta } \boldsymbol { u } _ \alpha + 1 , \end{array} +$$ + +where equality (a) uses line 8 of the algorithm and the inequality employs the Cauchy-Schwartz inequality followed by Lipschitz continuity of $B$ . + +On the other hand, + +$$ +\begin{array} { r l } & { - \varphi _ { n + 1 , k } ( p ^ { * } ) = \langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - y _ { n + 1 } ^ { k } \rangle } \\ & { \qquad = \langle z ^ { * } - x _ { n + 1 } ^ { k } , B ( z ^ { * } ) - B ( x _ { i } ^ { k } ) \rangle + \langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \rangle } \\ & { \qquad \geq \langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \rangle , } \end{array} +$$ + +where the second equality uses line 8 of the algorithm and the inequality follows from the monotonicity of $B$ . + +Combining (39) and (40) yields + +$$ +\begin{array} { r l } & { \circ _ { n + 1 , k } ( p ^ { k } ) - \varphi _ { n + 1 , k } ( p ^ { * } ) \geq \rho _ { k } ( 1 - \rho _ { k } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } + \rho _ { k } ( 1 - 2 \rho _ { k } L ) \langle \epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \rangle } \\ & { \qquad + \langle z ^ { k } - x _ { n + 1 } ^ { k } , e ^ { k } \rangle + \langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \rangle - \rho _ { k } ^ { 2 } L \| \epsilon ^ { k } \| ^ { 2 } } \\ & { \qquad = \rho _ { k } ( 1 - \rho _ { k } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } - \rho _ { k } ^ { 2 } L \| \epsilon ^ { k } \| ^ { 2 } } \\ & { \qquad + \rho _ { k } ( 1 - 2 \rho _ { k } L ) \langle \epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \rangle + \langle z ^ { k } - z ^ { * } , e ^ { k } \rangle . \qquad ( 4 1 ) } \end{array} +$$ + +Now, if we take expectations conditioned on $\mathcal { F } _ { k }$ and use (9), we obtain + +$$ +{ \mathbb E } \big [ \langle z ^ { k } - z ^ { * } , e ^ { k } \rangle \bigm | \mathcal F _ { k } \big ] = \langle z ^ { k } - z ^ { * } , { \mathbb E } [ e ^ { k } | \mathcal F _ { k } ] \rangle = 0 . +$$ + +Similarly, (9) also yields + +$$ +\begin{array} { r } { \mathbb { E } \big [ \langle \epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \rangle \big | \mathcal { F } _ { k } \big ] = \langle \mathbb { E } [ \epsilon ^ { k } | \mathcal { F } _ { k } ] , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \rangle = 0 . } \end{array} +$$ + +hus, using (42) and (43) and taking expectations of (41) yields + +$$ +\begin{array} { r l } & { \mathbb { E } [ \varphi _ { n + 1 , k } ( p ^ { k } ) - \varphi _ { n + 1 , k } ( p ^ { * } ) \mid \mathcal { F } _ { k } ] \ge \rho _ { k } ( 1 - \rho _ { k } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } - \rho _ { k } ^ { 2 } L \mathbb { E } [ \| \epsilon ^ { k } \| ^ { 2 } \vert \mathcal { F } _ { k } ] } \\ & { \qquad \ge \rho _ { k } ( 1 - \bar { \rho } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } - \rho _ { k } ^ { 2 } N L ( 1 + \| B ( z ^ { k } ) \| ^ { 2 } ) , } \end{array} +$$ + +where in the second inequality we used (12) and the noise variance bound (10). Recall from (12) that $1 - \overline { { \rho } } L > 0$ . + +Next, we remark that + +$$ +\begin{array} { r l } & { \| B ( z ^ { k } ) \| ^ { 2 } = \| B ( z ^ { k } ) - B ( z ^ { * } ) + B ( z ^ { * } ) \| ^ { 2 } } \\ & { \qquad \leq 2 L ^ { 2 } \| z ^ { k } - z ^ { * } \| ^ { 2 } + 2 \| B ( z ^ { * } ) \| ^ { 2 } \leq 2 L ^ { 2 } \| p ^ { k } - p ^ { * } \| ^ { 2 } + 2 \| B ( z ^ { * } ) \| ^ { 2 } . } \end{array} +$$ + +Substituting this inequality into (44) yields + +$$ +\begin{array} { r l } & { \mathbb { E } [ \varphi _ { n + 1 , k } ( p ^ { k } ) - \varphi _ { n + 1 , k } ( p ^ { * } ) | \mathcal { F } _ { k } ] \geq \rho _ { k } ( 1 - \overline { { \rho } } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } } \\ & { \qquad - 2 \rho _ { k } ^ { 2 } N L ^ { 3 } \| p ^ { k } - p ^ { * } \| ^ { 2 } - \rho _ { k } ^ { 2 } N L ( 1 + 2 \| B ( z ^ { * } ) \| ^ { 2 } ) . } \end{array} +$$ + +Finalizing the lower bound on the $\varphi _ { k }$ -gap Summing (37) over $i \in 1 . . n$ and using (45) yields + +$$ +\begin{array} { r l r } { { \mathbb { E } [ \varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } ) | \mathcal { F } _ { k } ] = \sum _ { i = 1 } ^ { n + 1 } \mathbb { E } [ \varphi _ { i , k } ( p ^ { k } ) - \varphi _ { i , k } ( p ^ { * } ) | \mathcal { F } _ { k } ] } } \\ & { } & { \geq \frac { 7 } { 2 } \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { 2 \tau } \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } } \\ & { } & { + \rho _ { k } ( 1 - \overline { { \rho } } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } - 2 \rho _ { k } ^ { 2 } N L ^ { 3 } \| p ^ { k } - p ^ { * } \| ^ { 2 } } \\ & { } & { - \rho _ { k } ^ { 2 } N L ( 1 + 2 \| B ( z ^ { * } ) \| ^ { 2 } ) . } \end{array} +$$ + +C.7 ESTABLISHING STOCHASTIC QUASI-FEJER MONOTONICITY + +Returning to (35), + +$\begin{array} { r } { \mathbb { E } [ \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } | \mathcal { F } _ { k } ] \le ( 1 + C _ { 1 } \alpha _ { k } ^ { 2 } ) \| p ^ { k } - p ^ { * } \| ^ { 2 } - 2 \alpha _ { k } \mathbb { E } [ \varphi _ { k } ( p ^ { k } ) - \varphi _ { k } ( p ^ { * } ) | \mathcal { F } _ { k } ] + C _ { 2 } \alpha _ { k } ^ { 2 } , } \end{array}$ we may now substitute (46) for the expectation on the right-hand side. First, define + +$$ +T _ { k } \doteq \frac { \tau } { \overline { { \rho } } } \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { \overline { { \rho } } \tau } \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 ( 1 - \overline { { \rho } } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } , +$$ + +after which we may use (46) in (35) to yield + +$$ +\begin{array} { r } { \mathbb { E } [ \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } | \mathcal { F } _ { k } ] \le \big ( 1 + C _ { 1 } \alpha _ { k } ^ { 2 } + C _ { 3 } \alpha _ { k } \rho _ { k } ^ { 2 } \big ) \| p ^ { k } - p ^ { * } \| ^ { 2 } - \alpha _ { k } \rho _ { k } T _ { k } + C _ { 2 } \alpha _ { k } ^ { 2 } + C _ { 4 } \alpha _ { k } \rho _ { k } ^ { 2 } } \end{array} +$$ + +here $C _ { 1 }$ and $C _ { 2 }$ are defined as before in (33) and (34) and + +$$ +\begin{array} { l } { C _ { 3 } = 4 N L ^ { 3 } } \\ { C _ { 4 } = 2 N L ( 1 + 2 \| B ( z ^ { \ast } ) \| ^ { 2 } ) . } \end{array} +$$ + +This completes the proof of Lemma 3. + +# C.8 A CONVERGENCE LEMMA + +Before establishing almost-sure convergence, we need the following lemma to derive convergence of the iterates from convergence of $T _ { k }$ defined above. Note that a more elaborate result would be needed in an infinite-dimensional setting. + +Lemma 4. For deterministic sequences $z ^ { k } \in \mathbb { R } ^ { ( n + 1 ) d }$ , $\{ ( w _ { i } ^ { k } ) _ { i = 1 } ^ { n + 1 } \} \ \in \ { \mathcal { P } }$ , and $\{ ( x _ { i } ^ { k } , y _ { i } ^ { k } ) _ { i = 1 } ^ { n + 1 } \} \in$ $\mathbb { R } ^ { 2 ( n + 1 ) d }$ , suppose that $y _ { i } ^ { k } \in A _ { i } ( x _ { i } ^ { k } )$ for i ∈ 1..n, $\textstyle \sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0$ i=, + +$$ +\xi _ { 1 } \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \xi _ { 2 } \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \xi _ { 3 } \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } \to 0 +$$ + +for scalars $\xi _ { 1 } , \xi _ { 2 } , \xi _ { 3 } > 0$ , and $p ^ { k } \doteq ( z ^ { k } , w _ { 1 } ^ { k } , \ldots , w _ { n + 1 } ^ { k } ) \to \hat { p } \doteq ( \hat { z } , \hat { w } _ { 1 } , \ldots , \hat { w } _ { n + 1 } )$ . Then $\hat { p } \in \mathcal S$ + +Proof. Fix any $i \in \{ 1 , \ldots , n \}$ . Since $\| y _ { i } ^ { k } - w _ { i } ^ { k } \| \to 0$ by (50) and $w _ { i } ^ { k } \hat { w } _ { i }$ , we also have $y _ { i } ^ { k } \hat { w } _ { i }$ . Similarly, (50) also implies that $\lVert z ^ { k } - x _ { i } ^ { k } \rVert \to 0$ , so from $z ^ { k } \hat { z }$ we also have $x _ { i } ^ { k } \hat { z }$ Since $y _ { i } ^ { k } \in A _ { i } ( x _ { i } ^ { k } )$ and $( x _ { i } ^ { k } , y _ { i } ^ { k } ) ( \hat { z } , \hat { w } _ { i } )$ , (Bauschke & Combettes, 2017, Prop. 20.37) implies $\hat { w } _ { i } \in A _ { i } ( \hat { z } )$ . Since $i$ was arbitrary, the preceding conclusions hold for $i \in 1 . . n$ . + +Now, (50) also implies that $\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| \to 0$ . Therefore, since $w _ { n + 1 } ^ { k } \to \hat { w } _ { n + 1 }$ , we also have $B ( z ^ { k } ) \to \hat { w } _ { n + 1 }$ . Much as before, since $( z ^ { k } , B ( z ^ { k } ) ) ( \hat { z } , \hat { w } _ { n + 1 } )$ , we may apply (Bauschke & Combettes, 2017, Prop. 20.37) to conclude that that $\hat { w } _ { n + 1 } = B ( \hat { z } )$ . + +Since the linear subspace $\mathcal { P }$ defined in (6) must be closed, the limit $\left( \hat { z } , \hat { w } _ { 1 } , \dots , \hat { w } _ { n + 1 } \right)$ of $\{ ( z ^ { k } , w _ { 1 } ^ { k } , \ldots , w _ { n + 1 } ^ { k } ) \} \subset \mathcal { P }$ must be in $\mathcal { P }$ , hence $\textstyle \sum _ { i = 1 } ^ { n + 1 } { \hat { w } } _ { i } = 0$ . + +nt . $\hat { p } = ( \hat { z } , \hat { w } _ { 1 } , \dots , \hat { w } _ { n + 1 } )$ satisfies tions defi $\hat { w } _ { i } \in A _ { i } ( \hat { z } )$ for ship $i \in 1 . . n$ , $\hat { w } _ { n + 1 } = B ( \hat { z } )$ , and $\textstyle \sum _ { i = 1 } ^ { n + 1 } { \hat { w } } _ { i } = 0$ $s$ $\hat { p } \in \mathcal S$ + +# C.9 FINISHING THE PROOF OF THEOREM 1 + +Given $\textstyle \sum _ { k } \alpha _ { k } ^ { 2 } < \infty$ , and $\sum \alpha _ { k } \rho _ { k } ^ { 2 } < \infty$ , (47) satisfies the conditions of Stochastic Quasi-Fejer Monotonicity as given in Lemma 2. By applying Lemma 2, we conclude that there exist $\Omega _ { 1 } , \Omega _ { 2 } , \Omega _ { 3 }$ such that $P [ \Omega _ { i } ] = 1$ for $i = { 1 , 2 , 3 }$ and + +1. for all $v \in \Omega _ { 1 }$ + +$$ +\sum _ { k = 1 } ^ { \infty } \alpha _ { k } \rho _ { k } T _ { k } ( v ) < \infty , +$$ + +2. for all $v \in \Omega _ { 2 }$ , and $p ^ { * } \in { \mathcal { S } }$ , $\| p ^ { k } ( v ) - p ^ { * } \|$ converges to a finite nonnegative random-variable, + +3. for all $v \in \Omega _ { 3 } , p ^ { k } ( v )$ remains bounded. + +Since $\textstyle \sum _ { k = 1 } ^ { \infty } \alpha _ { k } \rho _ { k } = \infty$ , (51) implies that for all $v \in \Omega _ { 1 }$ there exists a subsequence $q _ { k } ( v )$ such that + +$$ +T _ { q _ { k } ( v ) } \to 0 . +$$ + +Let $\Omega ^ { \prime } = \Omega _ { 1 } \cap \Omega _ { 2 } \cap \Omega _ { 3 }$ and note that $P [ \Omega ^ { \prime } ] = 1$ . Choose $v \in \Omega ^ { \prime }$ . Since $p ^ { k } ( v )$ remains bounded, so does $p ^ { q _ { k } ( v ) } ( v )$ for $q _ { k } ( v )$ defined above in (52). Thus there exists a subsequence $r _ { k } ( v ) \subseteq q _ { k } ( v )$ and $\hat { p } ( v ) \in \mathbb { R } ^ { ( n + 2 ) d }$ such that $p ^ { r _ { k } ( v ) } ( v ) \hat { p } ( v )$ . But since $T _ { q _ { k } ( v ) } \to 0$ , it also follows that $T _ { r _ { k } ( v ) } \to 0$ , that is, + +$$ +\begin{array} { r l r } { { \frac { \tau } { \rho } \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { r _ { k } ( v ) } ( v ) - w _ { i } ^ { r _ { k } ( v ) } ( v ) \| ^ { 2 } + \frac { 1 } { \rho \tau } \sum _ { i = 1 } ^ { n } \| z ^ { r _ { k } ( v ) } ( v ) - x _ { i } ^ { r _ { k } ( v ) } ( v ) \| ^ { 2 } } } \\ & { } & { \qquad + 2 ( 1 - \overline { { \rho } } L ) \| B ( z ^ { r _ { k } ( v ) } ( v ) ) - w _ { n + 1 } ^ { r _ { k } ( v ) } ( v ) \| ^ { 2 } \to 0 . } \end{array} +$$ + +We then have from Lemma 4 that $\hat { p } ( v ) \in S$ . + +Since $p ^ { r _ { k } ( v ) } ( v ) \hat { p } ( v )$ , it follows that $\lVert p ^ { r _ { k } ( v ) } ( v ) - \hat { p } ( v ) \rVert \to 0$ . But since $\hat { p } ( v ) \in S , \| p ^ { k } ( v ) - \hat { p } ( v ) \|$ converges by point 2 above. Thus + +$$ +\operatorname* { l i m } _ { k \to \infty } \| p ^ { k } ( v ) - \hat { p } ( v ) \| = \operatorname* { l i m } _ { k \to \infty } \| p ^ { r _ { k } ( v ) } ( v ) - \hat { p } ( v ) \| = 0 . +$$ + +Therefore $p ^ { k } ( v ) \hat { p } ( v ) \in \mathcal { S }$ . Thus there exists $\hat { p } \in \mathcal S$ such that $p ^ { k } \hat { p }$ a.s., which completes the proof of Theorem 1. + +# C.10 TWO ADDITIONAL RESULTS + +In this section, we prove two additional useful results about SPS. First, that $x _ { i } ^ { k } \hat { z }$ (a.s.) for $i = 1 , \ldots , n$ . Second, that $G _ { k } \to 0$ (a.s.). + +Note that + +$$ +x _ { i } ^ { k } = J _ { \tau A _ { i } } ( z ^ { k } + \tau w _ { i } ^ { k } ) +$$ + +and since $z ^ { k }$ and $w _ { i } ^ { k }$ convergence a.s., so does $x _ { i } ^ { k }$ . Consider the subsequence $q _ { k } ( v )$ such that (52) holds. Then + +$$ +z ^ { q _ { k } ( v ) } - x _ { i } ^ { q _ { k } ( v ) } 0 +$$ + +thus + +$$ +x _ { i } ^ { q _ { k } ( v ) } \hat { z } . +$$ + +Since $x _ { i } ^ { k }$ converges to some limit (a.s.), that limit must be $\hat { z }$ . + +Recall that + +$$ +\begin{array} { r } { G _ { k } \doteq \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } . } \end{array} +$$ + +We have shown that $z ^ { k }$ and $x _ { i } ^ { k }$ share the same limit for $i = 1 , \ldots , n$ (a.s.). Therefore $z ^ { k } - x _ { i } ^ { k } \to 0$ (a.s.). Since + +$$ +y _ { i } ^ { k } - w _ { i } ^ { k } = \tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) , +$$ + +it follows that $y _ { i } ^ { k } - w _ { i } ^ { k } \to 0$ (a.s.) for $i = 1 , \ldots , n$ . Therefore + +$$ +G _ { k } \to \| B ( \hat { z } ) - \hat { w } _ { n + 1 } \| ^ { 2 } . +$$ + +But since $( z , \hat { w } _ { 1 } , \dots , \hat { w } _ { n + 1 } ) \in S$ , $\hat { w } _ { n + 1 } = B ( \hat { z } )$ implying that $G _ { k } \to 0$ (a.s.). + +# D PROOF OF LEMMA 1 + +If $G _ { k } = 0$ , then + +$$ +\forall i = 1 , \ldots , n : \quad y _ { i } ^ { k } = w _ { i } ^ { k } \mathrm { ~ a n d ~ } z ^ { k } = x _ { i } ^ { k } . +$$ + +Since $y _ { i } ^ { k } \in A _ { i } ( x _ { i } ^ { k } )$ for $i = 1 , \ldots , n$ , (53) implies that that + +$$ +\forall i \in 1 . . n : \quad w _ { i } ^ { k } \in A _ { i } ( z ^ { k } ) . +$$ + +Furthermore $G _ { k } = 0$ also implies that $w _ { n + 1 } ^ { k } = B ( z ^ { k } )$ . Finally, since $\textstyle \sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0$ , we have that + +$$ +( z ^ { k } , w _ { 1 } ^ { k } , \ldots , w _ { n + 1 } ^ { k } ) \in { \mathcal { S } } . +$$ + +Conversely, suppose $( z ^ { k } , w _ { 1 } ^ { k } , \ldots , w _ { n + 1 } ^ { k } ) \in { \mathcal { S } }$ . The definition of $s$ implies that $B ( z ^ { k } ) = w _ { n + 1 } ^ { k }$ and furthermore that $w _ { i } ^ { k } \in A _ { i } ( z ^ { k } )$ for $i \in 1 . . n$ . For any $i \in 1 . . n$ , considering line 3 of Algorithm 1, we may write $t _ { i } ^ { k } = z ^ { k } + \tau w _ { i . } ^ { k } \in ( I + \tau A _ { i } ) ( z ^ { k } )$ , implying $z ^ { k } \in ( I + \tau A _ { i } ) ^ { - 1 } ( t _ { i } ^ { k } )$ . But since the resolvent $J _ { \tau A _ { i } } = ( I + \tau A _ { i } ) ^ { - 1 }$ is single-valued (Bauschke & Combettes, 2017, Prop. 23.8), we must have $z ^ { k } = ( I + \tau A _ { i } ) ^ { - 1 } ( t _ { i } ^ { k } )$ . Thus, by line 4, we have $x _ { i } ^ { k } = z ^ { k }$ . We may also derive from line 5 that + +$$ +y _ { i } ^ { k } = \tau ^ { - 1 } ( t _ { i } ^ { k } - x _ { i } ^ { k } ) = \tau ^ { - 1 } ( z ^ { k } + \tau w _ { i } ^ { k } - z ^ { k } ) = w _ { i } ^ { k } . +$$ + +Thus, since $x _ { i } ^ { k } = z ^ { k }$ and $y _ { i } ^ { k } = w _ { i } ^ { k }$ for $i = 1 , \ldots , n$ and $w _ { n + 1 } ^ { k } = B ( z ^ { k } )$ , we have that $G _ { k } = 0$ + +# E PROOF OF THEOREM 2 + +In addition to the proof, we provide a more detailed statement of the theorem: + +Theorem 3. Fix the total iterations $K \geq 1$ of Algorithm 1 and set + +$$ +\begin{array} { l l } { { \forall k = 1 , \dots , K : } } & { { \qquad \rho _ { k } = \rho \doteq \operatorname* { m i n } \left\{ K ^ { - 1 / 4 } , \frac { 1 } { 2 L } \right\} } } \\ { { \forall k = 1 , \dots , K : } } & { { \qquad \alpha _ { k } = \alpha \doteq C _ { f } \rho ^ { 2 } } } \end{array} +$$ + +for some $C _ { f } > 0$ . Suppose (9)-(11) hold. Then for any $p ^ { * } \in { \mathcal { S } }$ , + +$$ +\begin{array} { l } { \displaystyle \frac { 1 } { K } \sum _ { j = 1 } ^ { K } \mathbb { E } [ G _ { j } ] \leq \frac { 8 L ^ { 3 } \exp \left( C _ { f } ( C _ { 1 } + C _ { 3 } ) \right) } { C _ { f } \operatorname* { m i n } \{ \tau , \tau ^ { - 1 } \} K } \left( \| p ^ { 1 } - p ^ { * } \| ^ { 2 } + \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \right) \mathrm { ~ } f o r ~ K < ( 2 L ) ^ { 4 } } \\ { \displaystyle \frac { 1 } { K } \sum _ { j = 1 } ^ { K } \mathbb { E } [ G _ { j } ] \leq \frac { \exp \left( C _ { f } ( C _ { 1 } + C _ { 3 } ) \right) } { C _ { f } \operatorname* { m i n } \{ \tau , \tau ^ { - 1 } \} K ^ { 1 / 4 } } \left( \| p ^ { 1 } - p ^ { * } \| ^ { 2 } + \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \right) \mathrm { ~ } f o r ~ K \geq ( 2 L ) ^ { 4 } . } \end{array} +$$ + +where $G _ { k }$ is the approximation residual defined in (14), and $C _ { 1 } , C _ { 2 } , C _ { 3 } , C _ { 4 }$ are the nonegative constants defined in (33), (34), (48), and (49), respectively. Therefore, + +$$ +\frac { 1 } { K } \sum _ { j = 1 } ^ { K } \mathbb { E } [ G _ { j } ] = \mathcal { O } ( K ^ { - 1 / 4 } ) . +$$ + +Proof. Fix $\alpha _ { k } = \alpha$ and $\rho _ { k } = \rho$ , where $\alpha$ and $\rho$ are the respective right-hand sides of (55)-(56). Lemma 3 implies that (21) so long as (9)-(11) hold and the stepsize $\rho$ satisfies $\rho < L ^ { - 1 }$ . Since + +$$ +\rho = \operatorname* { m i n } \left\{ K ^ { - 1 / 4 } , \frac { 1 } { 2 L } \right\} \leq \frac { 1 } { 2 L } , +$$ + +we conclude that (21) applies. + +Rewriting (21) with $\alpha _ { k } = \alpha$ and $\rho _ { k } = \rho$ , we have + +$$ +\begin{array} { r } { \mathbb { E } [ \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } | \mathcal { F } _ { k } ] \le ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) \| p ^ { k } - p ^ { * } \| ^ { 2 } - \alpha \rho T _ { k } + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } . } \end{array} +$$ + +Therefore, taking expectations over $\mathcal { F } _ { k }$ , we have + +$$ +\begin{array} { r } { \mathbb { E } \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) \mathbb { E } \| p ^ { k } - p ^ { * } \| ^ { 2 } - \alpha \rho \mathbb { E } T _ { k } + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } . } \end{array} +$$ + +Recall that + +$$ +T _ { k } \doteq \frac { \tau } { \rho } \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { \rho \tau } \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 ( 1 - \overline { { \rho } } L ) \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } , +$$ + +where for the first two terms we have simply set $\rho = \overline { { \rho } }$ because the stepsize is constant. However, for the final term, we will still use an upper bound, $\overline { { \rho } }$ , on $\rho$ . In the current setting, we know that $\rho \leq ( 1 / 2 ) L ^ { - 1 }$ and therefore we may set $\overline { { \rho } } = ( 1 / 2 ) L ^ { - 1 }$ . Thus $1 - \overline { { \rho } } L = 1 / 2$ , leading to + +$$ +\rho \mathbb { E } T _ { k } = \tau \sum _ { i = 1 } ^ { n } \mathbb { E } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \tau ^ { - 1 } \sum _ { i = 1 } ^ { n } \mathbb { E } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \rho \mathbb { E } \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } . +$$ + +Let + +$$ +U _ { k } \doteq \mathbb { E } \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } \qquad W _ { k } \doteq \tau \sum _ { i = 1 } ^ { n } \mathbb { E } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \tau ^ { - 1 } \sum _ { i = 1 } ^ { n } \mathbb { E } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } , +$$ + +so that + +$$ +\rho \mathbb { E } T _ { k } = \rho U _ { k } + W _ { k } , +$$ + +and also let + +$$ +V _ { k } \doteq \mathbb { E } \| p ^ { k } - p ^ { * } \| ^ { 2 } . +$$ + +Using these definitions in (59) we write + +$$ +\begin{array} { r } { V _ { k + 1 } \leq \big ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } \big ) V _ { k } - \alpha \rho U _ { k } - \alpha W _ { k } + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } . } \end{array} +$$ + +Therefore, + +$$ +\begin{array} { c } { { V _ { k + 1 } + \alpha \rho U _ { k } + \alpha W _ { k } \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) V _ { k } + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } } } \\ { { \Longleftrightarrow V _ { k + 1 } + \alpha \rho \displaystyle \sum _ { j = 1 } ^ { k } U _ { j } + \alpha \displaystyle \sum _ { j = 1 } ^ { k } W _ { j } \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) V _ { k } + \alpha \rho \displaystyle \sum _ { j = 1 } ^ { k - 1 } U _ { j } + \alpha \displaystyle \sum _ { j = 1 } ^ { k - 1 } W _ { j } } } \\ { { \qquad + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } } } \\ { { \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) \left[ V _ { k } + \alpha \rho \displaystyle \sum _ { j = 1 } ^ { k - 1 } U _ { j } + \alpha \displaystyle \sum _ { j = 1 } ^ { k - 1 } W _ { j } \right] } } \\ { { \qquad + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } , } } \end{array} +$$ + +where we have used that $U _ { k } , W _ { k } \ge 0$ . Letting + +$$ +R _ { k } = V _ { k } + \alpha \rho \sum _ { j = 1 } ^ { k - 1 } U _ { j } + \alpha \sum _ { j = 1 } ^ { k - 1 } W _ { j } , +$$ + +we then have + +$$ +R _ { k + 1 } \leq { \left( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } \right) } R _ { k } + C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } , +$$ + +which implies + +$$ +R _ { k + 1 } \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k } R _ { 1 } + ( C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } ) \sum _ { j = 1 } ^ { k } ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k - j } . +$$ + +Now, + +$$ +\begin{array} { r l r } { { \sum _ { j = 1 } ^ { k } ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k - j } = \sum _ { j = 0 } ^ { k - 1 } ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { j } } } \\ & { } & { = \frac { ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k } - 1 } { ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) - 1 } } \\ & { } & { = \frac { ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k } - 1 } { C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } } } \\ & { } & { \leq \frac { ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k } } { C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } } . } \end{array} +$$ + +Therefore, + +$$ +R _ { k + 1 } \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { k } \left( R _ { 1 } + { \frac { C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } } { C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } } } \right) . +$$ + +Fix the number of iterations $K \geq 1$ . Now + +$$ +\rho = \operatorname* { m i n } \left\{ K ^ { - 1 / 4 } , \frac { 1 } { 2 L } \right\} \leq \frac { 1 } { K ^ { 1 / 4 } } \leq 1 . +$$ + +Therefore, + +$$ +\begin{array} { l } { \displaystyle \alpha \rho \sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \leq \alpha \rho \sum _ { j = 1 } ^ { K } U _ { j } + \alpha \sum _ { j = 1 } ^ { K } W _ { j } } \\ { \leq R _ { K + 1 } } \\ { \leq ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { K } \left( R _ { 1 } + \frac { C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } } { C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } } \right) . } \end{array} +$$ + +Dividing through by $\alpha \rho K$ , we obtain + +$$ +\frac { 1 } { K } \sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \leq \frac { ( 1 + C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } ) ^ { K } } { \alpha \rho K } \left( R _ { 1 } + \frac { C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } } { C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } } \right) , +$$ + +and since $\alpha = C _ { f } \rho ^ { 2 }$ , we also have + +$$ +\frac { C _ { 2 } \alpha ^ { 2 } + C _ { 4 } \alpha \rho ^ { 2 } } { C _ { 1 } \alpha ^ { 2 } + C _ { 3 } \alpha \rho ^ { 2 } } = \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } . +$$ + +Furthermore, + +$$ +\rho \leq K ^ { - \frac { 1 } { 4 } } \implies \alpha \leq C _ { f } K ^ { - \frac { 1 } { 2 } } . +$$ + +Substituting these into (60) yields + +$$ +\begin{array} { r } { \displaystyle \frac { 1 } { K } \sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \leq \frac { \left( 1 + \frac { C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) } { K } \right) ^ { K } } { \alpha \rho K } \left( R _ { 1 } + \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \right) } \\ { \leq \frac { \exp ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } { \alpha \rho K } \left( R _ { 1 } + \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \right) , } \end{array} +$$ + +where we have used that for any $t \ge 0 , 1 + t / K \le e ^ { t / K }$ , so therefore $( 1 + t / K ) ^ { K } \leq e ^ { t }$ . + +The worst-case rates in terms of $K$ occur when $\rho = K ^ { - 1 / 4 }$ and $\alpha = C _ { f } K ^ { - 1 / 2 }$ . This is the case when $K \geq ( 2 L ) ^ { 4 }$ . Substituting these into the denominator yields, for $K \geq ( 2 L ) ^ { 4 }$ , that + +$$ +\frac { 1 } { K } \sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \leq \frac { \exp ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } { C _ { f } K ^ { 1 / 4 } } \left( R _ { 1 } + \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \right) . +$$ + +Thus, since $G _ { k } \leq \operatorname* { m a x } \{ \tau , \tau ^ { - 1 } \} \left( U _ { k } + W _ { k } \right)$ , we obtain + +$$ +\frac { 1 } { K } \sum _ { j = 1 } ^ { K } \mathbb { E } [ G _ { j } ] \leq \frac { \operatorname* { m a x } \{ \tau , \tau ^ { - 1 } \} \exp { ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } } { C _ { f } K ^ { 1 / 4 } } \left( \| p ^ { 1 } - p ^ { * } \| ^ { 2 } + \frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \right) , +$$ + +which is (58). + +When $K < ( 2 L ) ^ { 4 }$ , (57) can similarly be obtained by substituting $\rho = ( 2 L ) ^ { - 1 }$ and $\alpha = C _ { f } ( 2 L ) ^ { - 2 }$ into (61). β–‘ + +# F APPROXIMATION RESIDUALS + +In this section we derive the approximation residual used to assess the performance of the algorithms in the numerical experiments. This residual relies on the following product-space reformulation of (1). + +F.1 PRODUCT-SPACE REFORMULATION AND RESIDUAL PRINCIPLE + +Recall (1), the monotone inclusion we are solving: + +$$ +{ \mathrm { F i n d ~ } } z \in \mathbb { R } ^ { d } : 0 \in \sum _ { i = 1 } ^ { n } A _ { i } ( z ) + B ( z ) . +$$ + +In this section we demonstrate a β€œproduct-space" reformulation of (1) which allows us to rewrite it in a standard form involving just two operators, one maximal monotone and the other monotone and Lipschitz. This approach was pioneered in (BriceΓ±o-Arias $\&$ Combettes, 2011; Combettes & Pesquet, 2012). Along with allowing for a simple definition of an approximation residual as a measure of approximation error in solving (1), it allows one to apply operator splitting methods originally formulated for two operators to problems such as (1) for any finite $n$ . + +Observe that solving (1) is equivalent to + +$$ +\begin{array} { l l } { \mathrm { F i n d } \left( w _ { 1 } , \ldots , w _ { n } , z \right) \in \mathbb { R } ^ { \left( n + 1 \right) d } : } & { w _ { i } \in A _ { i } ( z ) , \quad i \in { 1 . . n } } \\ & { \quad \displaystyle 0 \in \sum _ { i = 1 } ^ { n } w _ { i } + B ( z ) . } \end{array} +$$ + +This formulation resembles that of the extended solution set $s$ used in projective spitting, as given in (5), except that it combines the final two conditions in the definition of $s$ , and thus does not need the final dual variable $w _ { n + 1 }$ . From the definition of the inverse of an operator, the above formulation is equivalent to + +$$ +\begin{array} { r l } { \mathrm { F i n d ~ } ( w _ { 1 } , \dots , w _ { n } , z ) \in \mathbb { R } ^ { ( n + 1 ) d } : } & { 0 \in A _ { i } ^ { - 1 } ( w _ { i } ) - z , \quad i \in 1 . . n } \\ & { 0 \in \displaystyle \sum _ { i = 1 } ^ { n } w _ { i } + B ( z ) . } \end{array} +$$ + +These conditions are in turn equivalent to finding $( w _ { 1 } , \ldots , w _ { n } , z ) \in \mathbb { R } ^ { ( n + 1 ) d }$ such that + +$$ +0 \in \mathcal { A } ( w _ { 1 } , \ldots , w _ { n } , z ) + \mathcal { B } ( w _ { 1 } , \ldots , w _ { n } , z ) , +$$ + +where $\mathcal { A }$ is the set-valued map + +$$ +\mathcal { A } ( w _ { 1 } , \dots , w _ { n } , z ) \mapsto A _ { 1 } ^ { - 1 } ( w _ { 1 } ) \times A _ { 2 } ^ { - 1 } ( w _ { 2 } ) \times \dots \times A _ { n } ^ { - 1 } ( w _ { n } ) \times \{ 0 \} +$$ + +and $\mathcal { B }$ is the single-valued operator + +$$ +\mathcal { B } ( w _ { 1 } , \dots , w _ { n } , z ) \mapsto \left[ \begin{array} { c c c c } { 0 } & { \cdots } & { 0 } & { - I } \\ { \vdots } & { \ddots } & { \vdots } & { \vdots } \\ { 0 } & { \cdots } & { 0 } & { - I } \\ { I } & { \cdots } & { I } & { 0 } \end{array} \right] \left[ \begin{array} { c } { w _ { 1 } } \\ { \vdots } \\ { w _ { n } } \\ { z } \end{array} \right] + \left[ \begin{array} { c } { 0 } \\ { \vdots } \\ { 0 } \\ { B ( z ) } \end{array} \right] . +$$ + +It is easily established that $\mathcal { B }$ is maximal monotone and Lipschitz continuous, while $\mathcal { A }$ is maximal monotone. Letting $\mathcal { T } \doteq \mathcal { A } + \mathcal { B }$ , it follows from (Bauschke & Combettes, 2017, Proposition 20.23) that $\mathcal { T }$ is maximal monotone. Thus, we have reformulated (1) as the monotone inclusion $0 \in \mathcal { T } ( q )$ for $q$ in the product space $\mathbb { R } ^ { ( n + 1 ) \bar { d } }$ . A vector $z \in \mathbb { R } ^ { d }$ solves (1) if and only if there exists $( w _ { 1 } , \dots , w _ { n } ) \in \mathbb { R } ^ { n d }$ such that $\bar { 0 } \in \mathcal { T } ( q )$ , where $q = ( w _ { 1 } , \dots , w _ { n } , z )$ . + +For any pair $( q , v )$ such that $v \in \mathcal { T } ( q )$ , $\| v \| ^ { 2 }$ represents an approximation residual for $q$ in the sense that $v = 0$ implies $q$ is a solution to (62). One may take $\| \bar { v } \| ^ { 2 }$ as a measure of the error of $q$ as an approximate solution to (62), and it can only be 0 if $q$ is a solution. Given two approximate solutions $q _ { 1 }$ and $q _ { 2 }$ with certificates $v _ { 1 } \in T ( q _ { 1 } )$ and $v _ { 2 } \in \mathcal { T } ( q _ { 2 } )$ , we will treat $q _ { 1 }$ as a β€œbetter” approximate solution than $q _ { 2 }$ if $\| v _ { 1 } \| ^ { 2 } < \| v _ { 2 } \| ^ { 2 }$ . Doing so is somewhat analogous to the practice, common in optimization, of using the gradient $\| \nabla f ( x ) \| ^ { 2 }$ as a measure of quality of an approximate minimizer of some differentiable function $f$ . However, note that since $\mathcal { T } ( q _ { 1 } )$ is a set, there may exist elements of $\mathcal { T } ( q _ { 1 } )$ with smaller norm than $v _ { 1 }$ . Thus any given certificate $v _ { 1 }$ only corresponds to an upper bound on $\mathrm { d i s t } ^ { 2 } ( 0 , \mathcal { T } ( q _ { 1 } ) )$ . + +# F.2 APPROXIMATION RESIDUAL FOR PROJECTIVE SPLITTING + +In SPS (Algorithm 1), for $i \in 1 . . n$ , the pairs $( x _ { i } ^ { k } , y _ { i } ^ { k } )$ are chosen so that $y _ { i } ^ { k } \in A _ { i } ( x _ { i } ^ { k } )$ . This can be seen from the definition of the resolvent. Thus $\hat { x _ { i } ^ { k } } \in A _ { i } ^ { - 1 } ( y _ { i } ^ { k } )$ . Observe that + +$$ +\begin{array} { r } { v ^ { k } \doteq \left[ \begin{array} { c } { x _ { 1 } ^ { k } - z ^ { k } } \\ { \vdots } \\ { x _ { n } ^ { k } - z ^ { k } } \\ { B ( z ^ { k } ) + \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } } \end{array} \right] \in \mathcal { T } ( y _ { 1 } ^ { k } , \dotsc , y _ { n } ^ { k } , z ^ { k } ) . } \end{array} +$$ + +The approximation residual for SPS is thus + +$$ +R _ { k } \dot { = } \| v ^ { k } \| ^ { 2 } = \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \left\| B ( z ^ { k } ) + \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \right\| ^ { 2 } +$$ + +which is an approximation residual for $( y _ { 1 } ^ { k } , \dots , y _ { n } ^ { k } , z ^ { k } )$ in the sense defined above. We may relate $R _ { k }$ to the approximation residual $G _ { k }$ for SPS from Section 5 as follows: + +$$ +\begin{array} { r l } & { H _ { k } = \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \left\| B ( z ^ { k } ) + \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \right\| ^ { 2 } } \\ & { \quad = \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + \left\| B ( z ^ { k } ) + \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } - \displaystyle \sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } \right\| ^ { 2 } } \\ & { \quad \leq \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } + 2 \left\| \displaystyle \sum _ { i = 1 } ^ { n } ( y _ { i } ^ { k } - w _ { i } ^ { k } ) \right\| ^ { 2 } } \\ & { \quad \leq \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 \| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \| ^ { 2 } + 2 n \displaystyle \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } } \\ & { \quad < \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } + 2 \| B ( z ^ { k } ) + \displaystyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } } \\ & { \quad < \rho _ { n } \alpha , } \end{array} +$$ + +where in the second equality we have used the fact that $\begin{array} { r } { \sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0 } \end{array}$ . Thus, $R _ { k }$ has the same convergence rate as $G _ { k }$ given in Theorem 2. + +Note that while the certificate given in (65) focuses on the primal iterate $z ^ { k }$ , it may be changed to focus on any $x _ { i } ^ { k }$ for $i = 1 , \ldots , n$ , by using + +$$ +\boldsymbol { v } _ { i } ^ { k } \doteq \left[ \begin{array} { c } { x _ { 1 } ^ { k } - x _ { i } ^ { k } } \\ { \vdots } \\ { x _ { n } ^ { k } - x _ { i } ^ { k } } \\ { B ( x _ { i } ^ { k } ) + \sum _ { i = 1 } ^ { n } y _ { i } ^ { k } } \end{array} \right] \in \mathscr { T } ( y _ { 1 } ^ { k } , \ldots , y _ { n } ^ { k } , x _ { i } ^ { k } ) . +$$ + +The approximation residual $\| v _ { i } ^ { k } \| ^ { 2 }$ may also be shown to have the same rate as $G _ { k }$ by following similar derivations to those above for $R _ { k }$ . + +# F.3 TSENG’S METHOD + +Tseng’s method (Tseng, 2000) can be applied to (62), resulting in the following recursion with iterates $q ^ { k } , \bar { q } ^ { \bar { k } } \in \mathbb { R } ^ { ( n + 1 ) d }$ : + +$$ +\begin{array} { c } { \bar { q } ^ { k } = J _ { \alpha \mathcal { A } } ( q ^ { k } - \alpha \mathcal { B } ( q ^ { k } ) ) } \\ { q ^ { k + 1 } = \bar { q } ^ { k } + \alpha \big ( \mathcal { B } ( q ^ { k } ) - \mathcal { B } ( \bar { q } ^ { k } ) \big ) , } \end{array} +$$ + +where $\mathcal { A }$ and $\mathcal { B }$ are defined in (63) and (64). The resolvent of $\mathcal { A }$ may be readily computed from the resolvents of the $A _ { i }$ using Moreau’s identity (Bauschke & Combettes, 2017, Proposition 23.20). + +Analogous to SPS, Tseng’s method has an approximation residual, which in this case is an element of $\mathcal { T } ( \bar { q } ^ { k } )$ . In particular, using the general properties of resolvent operators as applied to $J _ { \alpha \mathcal { A } }$ , we have + +$$ +\frac { 1 } { \alpha } ( q ^ { k } - \bar { q } ^ { k } ) - \mathcal { B } ( q ^ { k } ) \in \mathcal { A } ( \bar { q } ^ { k } ) . +$$ + +Also, rearranging (68) produces + +$$ +\frac { 1 } { \alpha } ( \bar { q } ^ { k } - q ^ { k + 1 } ) + \mathcal { B } ( q ^ { k } ) = \mathcal { B } ( \bar { q } ^ { k } ) . +$$ + +Adding these two relations produces + +$$ +\frac { 1 } { \alpha } ( q ^ { k } - q ^ { k + 1 } ) \in \mathcal { A } ( \bar { q } ^ { k } ) + \mathcal { B } ( \bar { q } ^ { k } ) = \mathcal { T } ( \bar { q } ^ { k } ) +$$ + +Therefore, + +$$ +R _ { k } ^ { \mathrm { { T s e n g } } } \doteq \frac { 1 } { \alpha ^ { 2 } } \| q ^ { k } - q ^ { k + 1 } \| ^ { 2 } +$$ + +represents a measure of the approximation error for Tseng’s method equivalent to $R _ { k }$ defined in (66) for SPS. + +# F.4 FRB + +The forward-reflected-backward method (FRB) (Malitsky & Tam, 2020) is another method that may be applied to the splitting $\mathcal { T } = \mathcal { A } + \mathcal { B }$ for $\mathcal { A }$ and $\mathcal { B }$ as defined in (63) and (64). Doing so yields recursion + +$$ +q ^ { k + 1 } = J _ { \alpha \mathcal { A } } \Big ( q ^ { k } - \alpha \big ( 2 \mathcal { B } ( q ^ { k } ) - \mathcal { B } ( q ^ { k - 1 } ) \big ) \Big ) . +$$ + +Following similar arguments to those for Tseng’s method, it can be shown that + +$$ +v _ { \mathrm { F R B } } ^ { k } \doteq \frac { 1 } { \alpha } \left( q ^ { k - 1 } - q ^ { k } \right) + \mathcal { B } ( q ^ { k } ) + \mathcal { B } ( q ^ { k - 2 } ) - 2 \mathcal { B } ( q ^ { k - 1 } ) \in \mathcal { T } ( q ^ { k } ) . +$$ + +Thus, FRB admits the following approximation residual equivalent to $R _ { k }$ for SPS: + +$$ +R _ { k } ^ { \mathrm { F R B } } \doteq \| v _ { \mathrm { F R B } } ^ { k } \| ^ { 2 } . +$$ + +Finally, we remark that the stepsizes used in both the Tseng and FRB methods can be chosen via a linesearch procedure that we do not detail here. + +# F.5 STOCHASTIC TSENG METHOD + +The stochastic version of Tseng’s method of (BΓΆhm et al., 2020) (S-Tseng) may be applied to the inclusion $0 \in \mathcal { A } ( q ) + \mathcal { B } ( q )$ , since the operator $\mathcal { A }$ may be written as a subdifferential. However, unlike the deterministic Tseng method, it does not produce a valid residual. Note also that S-Tseng outputs an ergodic sequence $\mathbf { \bar { \boldsymbol { q } } } _ { \mathrm { e r g } } ^ { k }$ . To construct a residual for the ergodic sequence, we compute a deterministic step of Tseng’s method according to (67)-(68), starting at $q _ { \mathrm { e r g } } ^ { k }$ . That is, letting + +$$ +\begin{array} { r l } & { \bar { q } ^ { k } = J _ { \alpha \mathcal { A } } ( q _ { \mathrm { e r g } } ^ { k } - \mathcal { B } ( q _ { \mathrm { e r g } } ^ { k } ) ) } \\ & { q ^ { k + 1 } = \bar { q } ^ { k } + \alpha ( \mathcal { B } ( q _ { \mathrm { e r g } } ^ { k } ) - \mathcal { B } ( \bar { q } ^ { k } ) ) , } \end{array} +$$ + +we can then compute essentially the same residual as in Section F.3, + +$$ +R _ { k } ^ { \mathrm { { S - T s e n g } } } \doteq \frac { 1 } { \alpha ^ { 2 } } \| q _ { \mathrm { { e r g } } } ^ { k } - q ^ { k + 1 } \| ^ { 2 } . +$$ + +To construct the stochastic oracle for S-Tseng, we assumed $\begin{array} { r } { B ( z ) = \frac { 1 } { m } \sum _ { i = 1 } ^ { m } B _ { i } ( z ) } \end{array}$ . Then we used + +$$ +\tilde { \mathcal { B } } ( w _ { 1 } , \dots , w _ { n } , z ) \mapsto \left[ \begin{array} { c c c c } { 0 } & { \cdots } & { 0 } & { - I } \\ { \vdots } & { \ddots } & { \vdots } & { \vdots } \\ { 0 } & { \cdots } & { 0 } & { - I } \\ { I } & { \cdots } & { I } & { 0 } \end{array} \right] \left[ \begin{array} { c } { w _ { 1 } } \\ { \vdots } \\ { w _ { n } } \\ { z } \end{array} \right] + \left[ \begin{array} { c } { 0 } \\ { \vdots } \\ { 0 } \\ { \frac { 1 } { \vert \mathbf { B } \vert } \sum _ { j \in \mathbf { B } } B _ { j } ( z ) } \end{array} \right] . +$$ + +for some minibatch $\mathbf { B } \in \{ 1 , \dots , m \}$ . + +# F.6 VARIANCE-REDUCED FRB + +The FRB-VR method of Alacaoglu et al. (2021) can also be applied to $0 \in \mathcal { A } ( q ) + \mathcal { B } ( q )$ , using the same stochastic oracle $\tilde { \mathcal { B } }$ defined in (69). if we let the iterates of FRB-VR be $( q ^ { k } , p ^ { k } )$ , then line 4 of Algorithm 1 of Alacaoglu et al. (2021) can be written as + +$$ +\begin{array} { c } { \hat { q } ^ { k } = q ^ { k } - \tau ( \mathcal { B } ( p ^ { k } ) + \tilde { \mathcal { B } } ( q ^ { k } ) - \tilde { \mathcal { B } } ( p ^ { k } ) ) } \\ { q ^ { k + 1 } = J _ { \tau \mathcal { A } } ( \hat { q } ^ { k } ) . } \end{array} +$$ + +Once again, the method does not directly produce a residual, but one can be developed from the algorithm definition as follows: (71) yields $\dot { \tau } ^ { - 1 } ( \hat { q } ^ { k } - q ^ { k + 1 } ) \in \mathcal { A } ( q ^ { k + 1 } )$ and hence + +$$ +\tau ^ { - 1 } ( \hat { q } ^ { k } - q ^ { k + 1 } ) + \mathcal { B } ( q ^ { k + 1 } ) \in ( \mathcal { A } + \mathcal { B } ) ( q ^ { k + 1 } ) . +$$ + +Therefore we use the residual + +$$ +R _ { k } ^ { \mathrm { F R B - V R } } = \lVert \tau ^ { - 1 } ( \hat { q } ^ { k } - q ^ { k + 1 } ) + \mathcal { B } ( q ^ { k + 1 } ) \rVert ^ { 2 } . +$$ + +plots for FR $R _ { k }$ for SPS, VR. $R _ { k } ^ { \mathrm { T s e n g } }$ for Tseng’s method, $R _ { k } ^ { \mathrm { F R B } }$ for FRB, $R _ { k } ^ { \mathrm { S - T s e n g } }$ for S-Tseng, and $R _ { k } ^ { \mathrm { F R B - V R } }$ + +# F.7 BENEFITS AND DRAWBACKS OF THE PRODUCT SPACE REFORMULATION + +The main benefit of the product space reformulation (PSR) is that it allows one to use familiar 2-operator splitting schemes for solving $0 \in \mathcal { A } ( q ) + \mathcal { B } ( q )$ to solve the more complicated recursion (1). However, one drawback of this approach is that the operator $\mathcal { B }$ , defined in (64), combines a skew-symmetric consensus matrix with the Lipschitz operator $B$ . Treating $\mathcal { B }$ as a single operator necessitates using a single stepsize for both of its constituent operators, but the $B$ component will generally have a much larger Lipschitz constant than the skew part, necessitating a smaller stepsize than is ideal for the skew operator. This difficulty can be countered by using different stepsizes for the primal and dual components, but that strategy introduces additional tuning parameters. In other works, methods based on PSR have exhibited slower convergence than deterministic projective splitting methods (Johnstone & Eckstein, 2021; 2020b). However, in our experiments in Section 7, the performance is comparable. + +# G VARIATIONAL INEQUALITIES + +For a mapping $B : \mathbb { R } ^ { d } \mathbb { R } ^ { d }$ and a closed and convex set $\mathcal { C }$ , the variational inequality problem (Harker & Pang, 1990) is to find $z ^ { \ast } \in \mathcal { C }$ such that + +$$ +B ( z ^ { * } ) ^ { \top } ( z - z ^ { * } ) \geq 0 , \forall z \in { \mathcal { C } } . +$$ + +Consider the normal cone mapping discussed in Section 2 and defined as + +$$ +N _ { { \mathcal { C } } } ( x ) \doteq \{ g : g ^ { \top } ( y - x ) \le 0 \ \forall y \in { \mathcal { C } } \} +$$ + +It is easily seen that (72) is equivalent to finding $z ^ { * }$ such that $- B ( z ^ { * } ) \in N _ { \mathcal { C } } ( z ^ { * } )$ . Hence, if $B$ is monotone, (72) is equivalent to the monotone inclusion + +$$ +0 \in B ( z ^ { * } ) + N _ { \cal { C } } ( z ^ { * } ) . +$$ + +Thus, monotone variational inequalities are a special case of monotone inclusions with two operators, one of which is single-valued and the other is the normal cone map of the constraint set $\mathcal { C }$ . As a consequence, methods for monotone inclusions can be used to solve monotone variational inequality problems. The reverse, however, may not be true. For example, the analysis of the extragradient method (Korpelevich, 1977) relies on the second operator $N _ { \mathcal { C } }$ in (73) being a normal cone, as opposed to a more general monotone operator. We are not aware of any direct extension of the extragradient method’s analysis allowing a more general resolvent to be used in place of the projection map corresponding to $N _ { \mathcal { C } }$ . + +The Restricted Gap Function There is a disadvantage to pursuing convergence rates based on variational inequalities (as in BΓΆhm et al. (2020) and Alacaoglu et al. (2021)) rather than monotone inclusions. Convergence rate analyses for variational inequalities focus on the gap function: + +$$ +G _ { { \mathcal C } } ( z ) \doteq \operatorname* { s u p } _ { z ^ { \prime } \in { \mathcal C } } B ( z ^ { \prime } ) ^ { \top } ( z - z ^ { \prime } ) . +$$ + +It can be shown that $G _ { \mathcal { C } } ( z ) \geq 0$ and $G _ { \mathcal { C } } ( z ) = 0$ if and only if $z$ solves (72). However, (74) is meaningless for most problems, since unless $\mathcal { C }$ is compact, $G \overset { \cdot } { c } ( z )$ is typically equal to $+ \infty$ for any nonsolution (Diakonikolas, 2020). Thus researchers instead focus on the restricted gap function (Nesterov, 2007) + +$$ +G _ { { \mathcal C } _ { 2 } } ( z ) \doteq \operatorname* { s u p } _ { z ^ { \prime } \in { \mathcal C } _ { 2 } } B ( z ^ { \prime } ) ^ { \top } ( z - z ^ { \prime } ) . +$$ + +where $\mathcal { C } _ { 2 }$ is an arbitrary compact set. However, now the results are only meaningful over the set $\mathcal { C } _ { 2 }$ . Thus, $\mathcal { C } _ { 2 }$ must be chosen large enough so that the iterates of the algorithm remain in the interior of $\mathcal { C } _ { 2 }$ (BΓΆhm et al., 2020). Further, the convergence rate bound depends on the diameter of $\mathcal { C } _ { 2 }$ . For some algorithms (Mokhtari et al., 2020) a valid set is provided which bounds the iterates. However BΓΆhm et al. (2020) and Alacaoglu et al. (2021) do not provide one, although in principle it could be done so long as the ergodic sequence can be bounded almost-surely. Thus, the convergence rates depending on (75) in BΓΆhm et al. (2020) and Alacaoglu et al. (2021) are somewhat incomplete in that they depend on unknown constants. + +In contrast, rates based on the approximation residual in the monotone inclusion setting, including ours given in (57)–(58), completely avoid this pitfall. There is no need to select a compact set containing the algorithm’s iterates and the constants in our rates are all explicit or depend on standard quantities such as the initial distance to a solution. + +# H MEMORY-SAVING TECHNIQUE FOR SPS + +The variables $t _ { i } ^ { k } , x _ { i } ^ { k }$ , and $y _ { i } ^ { k }$ on lines 3-5 of SPS are stored in variables and . Another two variables $\bar { x }$ iand $\bar { y }$ i i keep track of $\textstyle \sum _ { i = 1 } ^ { n } x _ { i } ^ { k }$ and $\textstyle \sum _ { i = 1 } ^ { n } y _ { i } ^ { k }$ . The dual variables are stored as $w _ { i }$ for $i \in 1 . . n$ and the primal variable as $z$ . Once $x = x _ { i } ^ { k }$ is computed, the $i ^ { \mathrm { { t h } } }$ dual variable $w _ { i }$ can be partially updated as $w _ { i } w _ { i } - \alpha _ { k } x$ . Once all the operators have been processed, the update for each dual variable may be completed via $w _ { i } w _ { i } + \alpha _ { k } \bar { ( n + 1 ) } _ { . } ^ { - 1 } \bar { x }$ . Also, the primal update is computed as $z z - \alpha _ { k } \bar { y }$ . During the calculation loop for the $x _ { i } ^ { k } , y _ { i } ^ { k }$ , the terms in approximation residual $R _ { k }$ may also be accumulated one by one. The total total number of vector elements that must be stored is $( n + 7 ) d$ . + +# I ADDITIONAL INFORMATION ABOUT THE NUMERICAL EXPERIMENTS + +We solve the following convex-concave min-max problem: + +$$ +\begin{array} { r l } { \underset { \beta \in \mathbb { R } ^ { d } } { \operatorname* { m i n } } \quad \underset { \gamma \in \mathbb { R } ^ { m } } { \operatorname* { m a x } } } & { \left\{ \lambda ( \delta - \kappa ) + \displaystyle \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \Psi ( \langle \hat { x } _ { i } , \beta \rangle ) + \displaystyle \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \gamma _ { i } ( \hat { y } _ { i } \langle \hat { x } _ { i } , \beta \rangle - \lambda \kappa ) + c \| \beta \| _ { 1 } \right\} } \\ { \mathrm { s . t . } \quad } & { \| \beta \| _ { 2 } \leq \lambda / ( L _ { \Psi } + 1 ) \qquad \| \gamma \| _ { \infty } \leq 1 . } \end{array} +$$ + +This model is identical to that of (Yu et al., 2021, Thm. 4.3) except for the addition of the $\ell _ { 1 }$ regularization term $c \| \beta \| _ { 1 }$ , where $c \geq 0$ is a given constant. The goal is to learn the model weights $\beta$ from a training dataset of $m$ feature vectors ${ \hat { x } } _ { i }$ and corresponding labels $\hat { y } _ { i }$ . Rather than computing the expected loss over the training set, the formulation uses, for each $\beta$ , the worst possible distribution within a Wasserstein-metric ball around the empirical distribution of the $\{ ( \hat { x } _ { i } , \hat { y } _ { i } ) \}$ , with the parameter $\delta \geq 0$ giving the diameter of the ball and the parameter $\kappa \geq 0$ specifying the relative weighting of features and labels. The variables $\gamma$ and $\lambda$ parameterize the selection of this worst-case distribution in response to the model weights $\beta$ . Finally, $\Psi$ is the logistic loss kernel $t \mapsto \log ( e ^ { t } + e ^ { - t } )$ and $L _ { \Psi } = 1$ is the corresponding Lipschitz constant. In all the experiments, we set $\delta = \kappa = 1$ and $c = 1 0 ^ { - 3 }$ . + +We now show how we converted this problem to the form (1) for our experiments. Let $z$ be a shorthand for $( \lambda , \beta , \gamma )$ , and define + +$$ +\mathcal { L } ( z ) \doteq \lambda ( \delta - \kappa ) + \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \Psi ( \langle { \hat { x } _ { i } } , \beta \rangle ) + \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \gamma _ { i } ( \hat { y } _ { i } \langle { \hat { x } _ { i } } , \beta \rangle - \lambda \kappa ) . +$$ + +The first-order necessary and sufficient conditions for the convex-concave saddlepoint problem in (76) are + +$$ +0 \in B ( z ) + A _ { 1 } ( z ) + A _ { 2 } ( z ) +$$ + +where the vector field $B ( z )$ is defined as + +$$ +\boldsymbol { B } ( z ) \doteq \left[ \begin{array} { l } { \nabla _ { \boldsymbol { \lambda } , \beta } \mathcal { L } ( z ) } \\ { - \nabla _ { \boldsymbol { \gamma } } \mathcal { L } ( z ) } \end{array} \right] , +$$ + +with + +$$ +\begin{array} { r } { \nabla _ { \lambda , \beta } \mathcal { L } ( z ) = \left[ \begin{array} { c } { \delta - \kappa ( 1 + \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \gamma _ { i } ) } \\ { \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \Psi ^ { \prime } ( \langle \hat { x } _ { i } , \beta \rangle ) \hat { x } _ { i } + \frac { 1 } { m } \sum _ { i = 1 } ^ { m } \gamma _ { i } \hat { y } _ { i } \hat { x } _ { i } } \end{array} \right] } \end{array} +$$ + +and + +$$ +\nabla _ { \boldsymbol { \gamma } } \mathcal { L } ( z ) = \left[ \begin{array} { c } { \frac { 1 } { m } ( \hat { y } _ { 1 } \langle \hat { x } _ { 1 } , \beta \rangle - \lambda \kappa ) } \\ { \vdots } \\ { \frac { 1 } { m } ( \hat { y } _ { m } \langle \hat { x } _ { m } , \beta \rangle - \lambda \kappa ) } \end{array} \right] . +$$ + +It is readily confirmed that $B$ defined in this manner is Lipschitz. The monotonicity of $B$ follows from its being the generalized gradient of a convex-concave saddle function (Rockafellar, 1970). + +For the set-valued operators, $A _ { 1 } ( z )$ corresponds to the constraints and $A _ { 2 } ( z )$ to the nonsmooth $\ell _ { 1 }$ regularizer, and are defined as + +$$ +A _ { 1 } ( z ) \doteq N _ { \mathcal { C } _ { 1 } } ( \lambda , \beta ) \times N _ { \mathcal { C } _ { 2 } } ( \gamma ) , +$$ + +where + +$$ +\begin{array} { r } { \mathcal { C } _ { 1 } \doteq \bigl \{ ( \lambda , \beta ) : \| \beta \| _ { 2 } \le \lambda / ( L _ { \Psi } + 1 ) \bigr \} \quad \mathrm { ~ a n d ~ } \quad \mathcal { C } _ { 2 } \doteq \{ \gamma : \| \gamma \| _ { \infty } \le 1 \} , } \end{array} +$$ + +and + +$$ +A _ { 2 } ( z ) \doteq \{ \mathbf { 0 } _ { 1 \times 1 } \} \times c \partial \| \beta \| _ { 1 } \times \{ \mathbf { 0 } _ { m \times 1 } \} . +$$ + +Here, the notation ${ \bf 0 } _ { p \times 1 }$ denotes the $p$ -dimensional vector of all zeros. $\mathcal { C } _ { 1 }$ is a scaled version of the second-order cone, well known to be a closed convex set, while $\mathcal { C } _ { 2 }$ is the unit ball of the $\ell _ { \infty }$ norm, also closed and convex. Since $A _ { 1 }$ is a normal cone map of a closed convex set and $A _ { 2 }$ is the subgradient map of a closed proper convex function (the scaled 1-norm), both of these operators are maximal monotone and problem (77) is a special case of (1) for $n = 2$ . + +Stochastic oracle implementation The operator $B : \mathbb { R } ^ { m + d + 1 } \mapsto \mathbb { R } ^ { m + d + 1 }$ , defined in (78), can be written as + +$$ +B ( z ) = \frac { 1 } { m } \sum _ { i = 1 } ^ { m } B _ { i } ( z ) +$$ + +where + +$$ +B _ { i } ( z ) \doteq \left[ \begin{array} { c } { \delta - \kappa ( 1 + \gamma _ { i } ) } \\ { \Psi ^ { \prime } ( \langle \hat { x } _ { i } , \beta \rangle ) \hat { x } _ { i } + \gamma _ { i } \hat { y } _ { i } \hat { x } _ { i } } \\ { \mathbf { 0 } _ { ( i - 1 ) \times 1 } } \\ { - ( \hat { y } _ { i } \langle \hat { x } _ { i } , \beta \rangle - \lambda \kappa ) } \\ { \mathbf { 0 } _ { ( m - i ) \times 1 } } \end{array} \right] . +$$ + +In our SPS experiments, the stochastic oracle for $B$ is simply $\begin{array} { r } { \tilde { B } ( z ) = \frac { 1 } { | \mathbf { B } | } \sum _ { i \in \mathbf { B } } B _ { i } ( z ) } \end{array}$ for some minibatch $\mathbf { B } \subseteq \{ 1 , \dots , m \}$ . We used a batchsize of 100. + +Resolvent computations The resolvent of $A _ { 1 }$ is readily constructed from the projection maps of the simple sets $\mathcal { C } _ { 1 }$ and $\mathcal { C } _ { 2 }$ , while the resolvent $A _ { 2 }$ involves the proximal operator of the $\ell _ { 1 }$ norm. Specifically, + +$$ +J _ { \rho A _ { 1 } } ( z ) = \left[ \begin{array} { c } { \mathrm { p r o j } _ { \mathcal { C } _ { 1 } } ( \lambda , \beta ) } \\ { \mathrm { p r o j } _ { \mathcal { C } _ { 2 } } ( \gamma ) } \end{array} \right] \quad \mathrm { a n d } \quad J _ { \rho A _ { 2 } } ( z ) = \left[ \begin{array} { c } { \mathbf { 0 } _ { 1 \times 1 } } \\ { \mathrm { p r o x } _ { \rho c \| \cdot \| _ { 1 } } ( \beta ) } \\ { \mathbf { 0 } _ { m \times 1 } } \end{array} \right] . +$$ + +The constraint $\mathcal { C } _ { 1 }$ is a scaled second-order cone and $\mathcal { C } _ { 2 }$ is the $\ell _ { \infty }$ ball, both of which have closed-form projections. The proximal operator of the $\ell _ { 1 }$ norm is the well-known soft-thresholding operator (Parikh & Boyd, 2013, Section 6.5.2). Therefore all resolvents in the formulation may be computed quickly and accurately. + +SPS stepsize choices For the stepsize in SPS, we ordinarily require $\rho _ { k } \le \overline { { \rho } } < 1 / L$ for the global Lipschitz constant $L$ of $B$ . However, since the global Lipschitz constant may be pessimistic, better performance can often be achieved by experimenting with larger stepsizes. If divergence is observed, then the stepsize can be decreased. This type of strategy is common for SGD and similar stochastic methods. Thus, for SPS-decay we set $\alpha _ { k } ^ { - \pm } = C _ { d } k ^ { - 0 . 5 1 }$ and $\rho _ { k } = C _ { d } k ^ { - 0 . 2 5 }$ , and performed a grid search to select the best $C _ { d }$ from $\{ 0 . 1 , 0 . 5 , 1 , 5 , 1 0 \}$ , arriving at $C _ { d } = 1$ for epsilon and SUSY, and $C _ { d } = 0 . 5$ for real-sim. For SPS-fixed we used $\rho = K ^ { - 1 / 4 }$ and $\alpha = C _ { f } \rho ^ { 2 }$ , and performed a grid search to select $C _ { f }$ over $\{ 0 . 1 , 0 . 5 , 1 , 5 , 1 0 \}$ , arriving at $C _ { f } = 1$ for epsilon and real-sim, and $C _ { f } = 5$ for SUSY. The total number of iterations for SPS-fixed was chosen as follows: For the epsilon dataset, we used $K = 5 0 0 0$ , for SUSY we used $K = 2 0 0$ , and for real-sim we used $K = 1 0 0 0$ . + +![](images/c94c000fa07b86ebc660cc1004df85b2b9b8c4f0b3338fc20eb74e1d9a379c4e.jpg) +Figure 2: Approximation residual versus epoch for three LIBSVM benchmark datasets. Left: epsilon, middle: SUSY, right: real-sim. + +Parameter choices for the other algorithms All methods are initialized at the same random point. For Tseng’s method, we used the backtracking linesearch variant with an initial stepsize of 1, $\theta = 0 . 8$ , and a stepsize reduction factor of 0.7. For FRB, we used the backtracking linesearch variant with the same settings as for Tseng’s method. For deterministic PS, we used a fixed stepsize of $0 . 9 / L$ . For the stochastic Tseng’s method of BΓΆhm et al. (2020), the stepsize $\alpha _ { k }$ must satisfy: $\textstyle \sum _ { k = 1 } ^ { \infty } { \dot { \alpha } } _ { k } = \infty$ and P∞k=1 $\textstyle \sum _ { k = 1 } ^ { \infty } \alpha _ { k } ^ { 2 } < \infty$ . So we set $\alpha _ { k } = C k ^ { - d }$ and perform a grid search over $\{ C , d \}$ k=1 in the range $[ 1 0 ^ { - 4 } , 1 0 ] \times [ 0 . 5 1 , 1 ]$ , checking $5 \times 5$ values to find the best setting for each of the three problems. The selected values are in Table 1. + +Table 1: Parameter Values for S-Tseng + +
epsilonSUSYreal-sim
C0.560.560.77
d0.60.60.55
+ +The work of BΓΆhm et al. (2020) also introduced $\mathrm { F B F p }$ , a stochastic version of Tseng’s method that reuses a previously-computed gradient and therefore only needs one additional gradient calculation per iteration. In our experiments, the performance of the two methods was about the same, so we only report the performance of stoch. Tseng’s method. + +For variance-reduced FRB, the main parameter is the probability $p$ . We hand-tuned $p$ ,arriving at $p = 0 . 0 1$ for all problems. We set the stepsize to its maximum allowed value of + +$$ +\tau = { \frac { 1 - \sqrt { 1 - p } } { 2 L } } . +$$ + +Plots versus Epoch Figure 2 plots the performance of each method versus epoch (i.e. data pass). This shows an even more dramatic benefit for the stochastic methods than the plots versus time, since at each iteration the stochastic methods only need to process small amounts of data, whereas deterministic methods must process all of it. We believe these benefits do not fully manifest themselves in the plots versus time due to overheads in each iteration of the stochastic methods, multithreading providing a boost for the deterministic methods, memory access patterns, and other practical considerations. + +Fraction of Nonzero Entries versus Running time Figure 3 plots the fraction of nonzero entries in the iterates of each method versus running time. For each method, we used output of proxckΒ·k1. We observe that our methods produce sparse intermediate iterates for two of the three problems. This is one of the benefits of proximal splitting algorithms in general, including our method. For the other problem, SUSY, no method produces sparse iterates, suggesting that $c$ should be increased if sparse solutions are desired. + +![](images/7829a04c81fd4e0356905a55be1d4c62aec1a4bb1bef565b9369bc3a3ec4170b.jpg) +Figure 3: Fraction of nonzero entries versus running time for the three datasets. Left: epsilon, middle: SUSY, right: real-sim. + +# J LOCAL CONVERGENCE ON NON-MONOTONE PROBLEMS + +The work by Hsieh et al. (2020) provides a local convergence analysis for DSEG applied to locally monotone problems. Recall that DSEG is equivalent to the special case of SPS for which $n = 0$ . While extending this result to the more general setting of SPS is beyond the scope of this manuscript, we next provide a preliminary sketch of how the analysis of Hsieh et al. (2020) might be generalized to our setting. We leave a formal proof to future work. + +Sketch of assumptions and main result The first assumption needed is the existence of an isolated solution $p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } , \ldots , w _ { n + 1 } ^ { * } ) \in \mathcal { S }$ . We then require that there exists a ball $\mathbb { B } _ { r } \big ( z ^ { * } \big )$ , centered at $z ^ { * }$ , throughout which the operator $B$ is β€œwell-behaved”, meaning that it satisfies monotonicity and Lipschitz continuity. In addition, we need each $A _ { i }$ , for $i \in 1 . . n$ , to be maximal monotone within this ball. Outside of the ball, the operators do not need to be monotone or Lipschitz. + +Following (Hsieh et al., 2020, Assumption $2 ^ { \prime }$ ), the noise variance assumptions are slightly stronger than in the monotone case. In particular, we require that $\mathbb { E } [ \| \epsilon ^ { k } \| ^ { q } | \mathcal { F } _ { k } ] \le \dot { N } ^ { q }$ and $\mathbb { E } [ \| e ^ { k } \| ^ { q } | \dot { \mathcal { F } } _ { k } ] \le \mathsf { \bar { N } } ^ { q }$ for some $q > 2$ . As before, the noise must be zero-mean. Finally, the stepsize requirements are also slightly stronger than (12), having the added assumption that $\textstyle \sum _ { k = 1 } ^ { \infty } \rho _ { k } ^ { q } < \infty$ . + +With these assumptions, the goal is to show that, so long as the initial point $p ^ { 1 }$ is sufficiently close to $p ^ { * }$ , then with high probability $p ^ { k }$ converges to $p ^ { * }$ . + +Proof strategy The initial strategy is to develop the following recursion, satisfied by SPS, that does not (yet) utilize local monotonicity or Lipschitz continuity: + +$$ +\begin{array} { r l } & { \| p ^ { k + 1 } - p ^ { * } \| ^ { 2 } \leq ( 1 + c _ { 1 } \alpha _ { k } ^ { 2 } ) \| p ^ { k } - p ^ { * } \| ^ { 2 } - c _ { 2 } \alpha _ { k } \rho _ { k } ( T _ { k } ^ { \prime } + l _ { k } + r _ { k } ) - c _ { 3 } \alpha _ { k } ( r _ { k } ^ { \prime } + q _ { k } ) } \\ & { \qquad + c _ { 1 } \alpha _ { k } ^ { 2 } \big ( \| e ^ { k } \| ^ { 2 } + \| \epsilon ^ { k } \| ^ { 2 } + c _ { 4 } \big ) + c _ { 5 } \alpha _ { k } q _ { k } ^ { \prime } } \end{array} +$$ + +for appropriate constants $c _ { 1 } \ldots c _ { 5 } \geq 0$ . In this inequality, we use + +$$ +\begin{array} { l } { \displaystyle T _ { k } ^ { \prime } \stackrel { \prime } { = } \frac { \tau } { \overline { { \rho } } } \displaystyle \sum _ { i = 1 } ^ { n } \| y _ { i } ^ { k } - w _ { i } ^ { k } \| ^ { 2 } + \frac { 1 } { \overline { { \rho } } \tau } \displaystyle \sum _ { i = 1 } ^ { n } \| z ^ { k } - x _ { i } ^ { k } \| ^ { 2 } , } \\ { \displaystyle l _ { k } \stackrel { \prime } { = } \displaystyle \sum _ { i = 1 } ^ { n } \langle z ^ { * } - x _ { i } ^ { k } , w _ { i } ^ { * } - y _ { i } ^ { k } \rangle + \big \langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { i } ^ { * } - B ( x _ { n + 1 } ^ { k } ) \big \rangle , } \\ { \displaystyle r _ { k } \stackrel { \prime } { = } \big \langle k ^ { \ell } , B ( \tilde { x } ^ { k } ) - w _ { n + 1 } ^ { k } \big \rangle , } \\ { \displaystyle r _ { k } ^ { \prime } \stackrel { \prime } { = } \big \langle z ^ { k } - z ^ { * } , e ^ { k } \big \rangle , } \\ { \displaystyle q _ { k } \triangleq \big ( \rho _ { k } ^ { - 1 } - d / 2 \big ) \| \tilde { x } ^ { k } - z ^ { k } \| ^ { 2 } - \| \tilde { x } ^ { k } - z ^ { k } \| \| B ( \tilde { x } ^ { k } ) - B ( z ^ { k } ) \| } \\ { \displaystyle q _ { k } ^ { \prime } \stackrel { \prime } { = } \rho _ { k } \| \epsilon ^ { k } \| \| B x _ { n + 1 } ^ { k } - B \tilde { x } ^ { k } \| + \frac { 1 } { 2 d } \| B \tilde { x } _ { n + 1 } ^ { k } - B x _ { n + 1 } ^ { k } \| ^ { 2 } , } \end{array} +$$ + +where + +$$ +\tilde { x } ^ { k } \doteq z ^ { k } - \rho _ { k } \bigl ( B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \bigr ) \qquad d \doteq \frac { 1 - \overline { { \rho } } L } { 1 + \overline { { \rho } } / 2 } , +$$ + +with $L$ being the local Lipschitz constant of $B$ on $\mathbb { B } _ { r } \big ( z ^ { * } \big )$ . The iterate $\tilde { x } ^ { k }$ is the analog of the iterate $\tilde { X } _ { t + 1 / 2 }$ used in Hsieh et al. (2020). + +The recursion (79) is derived by once again starting from (13) and following the arguments leading to (35), but this time not taking conditional expectations. In particular, the upper bounds on $\| \nabla _ { z } \varphi _ { k } \| ^ { 2 }$ and $\| \nabla _ { w _ { i } } \varphi _ { k } \| ^ { 2 }$ contribute the terms $c _ { 1 } \alpha _ { k } ^ { 2 } ( \| \bar { e } ^ { k } \| ^ { 2 } + \| \epsilon ^ { k } \| ^ { 2 } + c _ { 4 } )$ and $c _ { 1 } \alpha _ { k } ^ { 2 } \| p ^ { k } - p ^ { * } \| ^ { 2 }$ . For $i \in 1 . . n$ , the ${ } ^ { \mathfrak { e } } \varphi _ { i , k }$ -gap" term, $\varphi _ { i , k } ( p ^ { k } ) - \varphi _ { i , k } ( p ^ { * } )$ , is dealt with in a similar manner to Section C.5, but this time not using monotonicity as in (36). This contributes $T _ { k } ^ { \prime }$ and the first term in $l _ { k }$ . Finally, as we sketch below, the ${ ^ { \circ } } \varphi _ { n + 1 , k }$ -gap" term contributes $r _ { k } , r _ { k } ^ { \prime } , q _ { k } , q _ { k } ^ { \prime }$ , and the last term in $l _ { k }$ . + +For the $\cdot \circ _ { n + 1 , k } \cdot \mathbf { g } \mathbf { a p } ^ { , , }$ , that is, $\varphi _ { n + 1 , k } ( p ^ { k } ) - \varphi _ { n + 1 , k } ( p ^ { * } )$ , we have to depart from the analysis in Section C.6 and use an alternative argument involving $\tilde { x } ^ { k }$ . We now provide some details of this argument: in the following, we use $B z$ as shorthand for $B ( z )$ for any vector $z \in \mathbb { R } ^ { d }$ . We begin the analysis with + +$$ +\begin{array} { r l } & { \varphi _ { n + 1 , k } ( p ^ { k } ) = \langle z ^ { k } - x _ { n + 1 } ^ { k } , y _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \rangle } \\ & { \qquad = \langle z ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \rangle + \underbrace { \langle z ^ { k } - x _ { n + 1 } ^ { k } , e ^ { k } \rangle } _ { \mathrm { p a r t } \mathrm { o f } r _ { k } ^ { \prime } } . } \end{array} +$$ + +The final term will combine with the term $\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \rangle$ coming from + +$$ +\begin{array} { r l } & { - \varphi _ { n + 1 , k } ( p ^ { * } ) = \langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - y _ { n + 1 } ^ { k } \rangle } \\ & { \qquad = \langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - B x _ { n + 1 } ^ { k } \rangle + \langle x _ { n + 1 } ^ { k } - z ^ { * } , e _ { n + 1 } ^ { k } \rangle } \end{array} +$$ + +to yield $r _ { k } ^ { \prime }$ above. Equation (82) also yields the second term in $l _ { k }$ . Using that $\tilde { x } ^ { k } - x _ { n + 1 } ^ { k } = \rho _ { k } \epsilon _ { k }$ , we rewrite the first term in (81) as + +$$ +\begin{array} { r l } & { \bigl \langle z ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle = \bigl \langle z ^ { k } - \tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle + \bigl \langle \tilde { x } ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle } \\ & { \qquad = \bigl \langle z ^ { k } - \tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle + \rho _ { k } \bigl \langle \epsilon ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle \qquad } \\ & { \qquad = \bigl \langle z ^ { k } - \tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle + \rho _ { k } \bigl \langle \epsilon ^ { k } , B x _ { n + 1 } ^ { k } - B \tilde { x } ^ { k } \bigr \rangle \qquad ( 8 } \\ & { \qquad + \rho _ { k } \underbrace { \bigl \langle \epsilon ^ { k } , B \tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \bigr \rangle } _ { r _ { k } } . } \end{array} +$$ + +Next, the terms in (83) admit the lower bound + +$$ +\begin{array} { r l } & { \langle z ^ { k } - { \tilde { x } } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \rangle + \rho _ { k } \langle \epsilon ^ { k } , B x _ { n + 1 } ^ { k } - B { \tilde { x } } ^ { k } \rangle } \\ & { \qquad \geq \langle z ^ { k } - { \tilde { x } } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \rangle - \underbrace { \rho _ { k } \| \epsilon ^ { k } \| \| B x _ { n + 1 } ^ { k } - B { \tilde { x } } ^ { k } \| } _ { \mathrm { ~ } } . } \end{array} +$$ + +Considering the first term on right-hand side of this bound, we also have + +$$ +\begin{array} { r l } { { \langle z ^ { k } - \tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \rangle = \langle z ^ { k } - \tilde { x } ^ { k } , B \tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \rangle + \langle z ^ { k } - \tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - B \tilde { x } ^ { k } \rangle } } \\ & { \geq \langle z ^ { k } - \tilde { x } ^ { k } , B \tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \rangle - \displaystyle \frac { d } { 2 } \| z ^ { k } - \tilde { x } ^ { k } \| ^ { 2 } - \displaystyle \frac { 1 } { \underline { { 2 d } } } \| B \tilde { x } ^ { k } - B x _ { n + 1 } ^ { k } \| ^ { 2 } } \end{array} +$$ + +for any $d > 0$ , using Young’s inequality. Finally, for the first two terms of the right-hand side of the above relation, we may write + +$$ +\begin{array} { r l } { { \langle z ^ { k } - \tilde { x } ^ { k } , B \tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \rangle - \frac { d } { 2 } \| z ^ { k } - \tilde { x } ^ { k } \| ^ { 2 } } } \\ & { = \langle z ^ { k } - \tilde { x } ^ { k } , B z ^ { k } - w _ { n + 1 } ^ { k } \rangle + \langle z ^ { k } - \tilde { x } ^ { k } , B \tilde { x } ^ { k } - B z ^ { k } \rangle - \frac { d } { 2 } \| z ^ { k } - \tilde { x } ^ { k } \| ^ { 2 } } \\ & { \quad \quad \geq \underbrace { ( \rho _ { k } ^ { - 1 } - d / 2 ) \| z ^ { k } - \tilde { x } ^ { k } \| ^ { 2 } - \| z ^ { k } - \tilde { x } ^ { k } \| \| B \tilde { x } ^ { k } - B z ^ { k } \| } _ { q _ { k } } , } \end{array} +$$ + +where in the final inequality we use the Cauchy-Schwartz inequality and substitute $B z ^ { k } - w _ { n + 1 } ^ { k } =$ $\rho _ { k } ^ { - 1 } ( z ^ { k } - \tilde { x } ^ { k } )$ , from the definition of $\tilde { x } ^ { k }$ in (80). We have now accounted for all the terms appearing in (79). + +The recursion (79) is analogous to equation (F.7) on page 24 of Hsieh et al. (2020) and provides the starting point for the local convergence analysis. The next step would be to derive an analog of Theorem F.1. of Hsieh et al. (2020) using (79). The following translation to the notation of Theorem F.1. could be used (note that Hsieh et al. (2020) uses $t$ for iteration counter): + +$$ +\begin{array} { r l } & { D _ { k } = \| p ^ { k } - p ^ { * } \| ^ { 2 } , } \\ & { \zeta _ { k } = c _ { 2 } \alpha _ { k } \rho _ { k } ( T _ { k } ^ { \prime } + l _ { k } ) + c _ { 3 } \alpha _ { k } q _ { k } , } \\ & { \xi _ { k } = - c _ { 2 } \alpha _ { k } \rho _ { k } r _ { k } - c _ { 3 } \alpha _ { k } r _ { k } ^ { \prime } , } \\ & { \chi _ { k } = c _ { 1 } \alpha _ { k } ^ { 2 } \big ( \| e ^ { k } \| ^ { 2 } + \| \epsilon ^ { k } \| ^ { 2 } + \| p ^ { k } - p ^ { * } \| ^ { 2 } + c _ { 4 } \big ) + c _ { 5 } \alpha _ { k } q _ { k } ^ { \prime } , } \end{array} +$$ + +and the event $E _ { \infty } ^ { \rho }$ is translated to + +$$ +\begin{array} { r } { E _ { \infty } ^ { \rho } = \left\{ x _ { n + 1 } ^ { k } \in \mathbb { B } _ { r } ( z ^ { * } ) , \tilde { x } ^ { k } \in \mathbb { B } _ { \rho r } ( z ^ { * } ) , p ^ { k } \in \mathbb { B } _ { \rho r } ( p ^ { * } ) \mathrm { ~ f o r ~ a l l ~ } k = 1 , 2 , \ldots \right\} . } \end{array} +$$ + +An analog of Theorem 2 of Hsieh et al. (2020) could then be developed based on this result. \ No newline at end of file diff --git a/parse/dev/a0SRWViFYW/a0SRWViFYW_content_list.json b/parse/dev/a0SRWViFYW/a0SRWViFYW_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7696f0ccc6be7b4ea657f5997d7bbba36068cb34 --- /dev/null +++ b/parse/dev/a0SRWViFYW/a0SRWViFYW_content_list.json @@ -0,0 +1,6996 @@ +[ + { + "type": "text", + "text": "STOCHASTIC PROJECTIVE SPLITTING:SOLVING SADDLE-POINT PROBLEMS WITH MULTIPLEREGULARIZERS", + "text_level": 1, + "bbox": [ + 176, + 98, + 823, + 171 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Anonymous authors Paper under double-blind review ", + "bbox": [ + 183, + 195, + 398, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT ", + "text_level": 1, + "bbox": [ + 454, + 261, + 544, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present a new, stochastic variant of the projective splitting (PS) family of algorithms for monotone inclusion problems. It can solve min-max and noncooperative game formulations arising in applications such as robust ML without the convergence issues associated with gradient descent-ascent, the current de facto standard approach in ML applications. Our proposal is the first version of PS able to use stochastic gradient oracles. It can solve min-max games while handling multiple constraints and nonsmooth regularizers via projection and proximal operators. Unlike other stochastic splitting methods that can solve such problems, our method does not rely on a product-space reformulation of the original problem. We prove almost-sure convergence of the iterates to the solution and a convergence rate for the expected residual. By working with monotone inclusions rather than variational inequalities, our analysis avoids the drawbacks of measuring convergence through the restricted gap function. We close with numerical experiments on a distributionally robust sparse logistic regression problem. ", + "bbox": [ + 233, + 295, + 766, + 489 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION ", + "text_level": 1, + "bbox": [ + 176, + 526, + 336, + 542 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The most prominent application of optimization in ML is empirical risk minimization. However, inspired by the success of GANs (Goodfellow et al., 2014). , ML practitioners have developed more complicated min-max and adversarial optimization formulations (Yu et al., 2021; Kuhn et al., 2019; Shafieezadeh-Abadeh et al., 2015; Sinha et al., 2018; Lin et al., 2020; Namkoong & Duchi, 2016; Huang et al., 2017; Wadsworth et al., 2018; Zhang et al., 2018; Edwards & Storkey, 2015; Celis & Keswani, 2019). Solving these multi-player games leads to issues not seen when minimizing a single-player loss function. The competitive nature of a game leads to rotational dynamics that can cause intuitive gradient-based methods to fail to converge (Gidel et al., 2019; Daskalakis et al., 2018; Hsieh et al., 2020). ", + "bbox": [ + 174, + 561, + 825, + 686 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A mathematical framework underlying both convex optimization and saddle-point problems is the monotone inclusion problem; see Ryu & Boyd (2016) for an introduction. Methods developed for monotone inclusions will converge for convex-concave, games as they are explicitly designed to handle such problems’ governing dynamics. In recent years, monotone inclusion methods and theory have started to receive attention in the ML community (Diakonikolas, 2020; Liu et al., 2021; Ryu et al., 2020; Pathak & Wainwright, 2020), with a focus on monotone variational inequalities, which form a special case of monotone inclusions (Antonakopoulos et al., 2019; Gidel et al., 2019; Daskalakis et al., 2018; Hsieh et al., 2020; Mertikopoulos et al., 2019). ", + "bbox": [ + 174, + 694, + 825, + 805 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The most prevalent methods for solving min-max games in ML are variants of gradient descent-ascent (GDA). This method alternates between a gradient-descent step for the minimizing player and a gradient-ascent step for the maximizing player. Unfortunately, GDA requires additional assumptions to converge on convex-concave games, and it even fails for some simple 2D bilinear games (Gidel et al., 2019, Prop. 1). While there have been several approaches to modify either GDA (Chavdarova et al., 2021; Grnarova et al., 2021; Balduzzi et al., 2018) or the underlying game objective (Mescheder et al., 2018; Nagarajan & Kolter, 2017; Mescheder et al., 2017) to ensure convergence, this paper instead develops a method for solving monotone inclusions that can naturally handle game dynamics. ", + "bbox": [ + 174, + 811, + 825, + 924 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Our approach builds upon the recently proposed projective splitting (PS) method with forward steps (Johnstone & Eckstein, 2020b). PS is designed specifically for solving monotone inclusions, thus does not fall prey to the convergence issues that plague GDA, at least for convex-concave games. PS is within the general class of projective splitting methods invented by Eckstein & Svaiter (2008) and developed further in Eckstein & Svaiter (2009); Alotaibi et al. (2014); Combettes & Eckstein (2018); Eckstein (2017); Johnstone & Eckstein (2019; 2021; 2020a). These methods work by creating a separating hyperplane between the current iterate and the solution and then moving closer to the solution by projecting the current iterate onto this hyperplane (see Section 3 for an overview). Other than being able to natively handle game dynamics, the primary advantage of PS is that it fully splits problems involving an arbitrary number of regularizers and constraints. β€œFull splitting” means that the method can handle multiple regularizers and constraints through their respective individual proximal and projection operators, along with the smooth terms via gradients. What makes this useful is that many of the regularizers used in ML have proximal operators that are relatively easy to compute; see for example Parikh & Boyd (2013). ", + "bbox": [ + 174, + 103, + 825, + 299 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite these advantages, the preexisting PS framework has a significant drawback: it requires deterministic gradient oracles. This feature makes it impractical for application to large datasets for which stochastic oracles may be the only feasible option. ", + "bbox": [ + 174, + 304, + 825, + 347 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Contributions The primary contribution of this work is a new projective splitting algorithm that allows for a stochastic gradient oracle. We call the method stochastic projective splitting (SPS). Our method β€œfully splits” the monotone inclusion problem ", + "bbox": [ + 174, + 369, + 825, + 412 + ], + "page_idx": 1 + }, + { + "type": "equation", + "img_path": "images/c1bd1c0f8dc091a9dfb57cd09b7ddaa5f6e265d93146a4b9d745949e39b5eee6.jpg", + "text": "$$\n\\begin{array} { r } { \\mathrm { F i n d } z \\in \\mathbb { R } ^ { d } \\mathrm { ~ s . t . ~ } 0 \\in \\sum _ { i = 1 } ^ { n } A _ { i } ( z ) + B ( z ) , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 352, + 425, + 643, + 445 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $B$ is monotone and $L$ -Lipschitz and each $A _ { i }$ is maximal monotone and typically set valued, usually arising from a constraint or a nonsmooth regularizer in the underlying optimization problem or game; see for example Ryu & Boyd (2016) for definitions. For some example ML applications of (1), see Section 2 and Appendix A. Here, an algorithm that β€œfully splits” (1) means one whose computational steps each involve only the individual operators $A _ { 1 } , \\ldots , A _ { n } , B$ . Ours is the first method that can accomplish full splitting without a product-space reformulation that recasts (1) as a two-operator problem on a higher-dimensional space, a tactic whose disadvantages are discussed in Appendix F.7. Our method interrogates the Lipschitz operator $B$ through a stochastic oracle. Previous methods splitting (1) have either required a deterministic oracle for $B$ , or have made far more restrictive assumptions on the noise or the operators (BriceΓ±o-Arias & Combettes, 2011; Combettes & Pesquet, 2012; Malitsky & Tam, 2020; Bot et al., 2019; Van Dung & Vu, 2021) than we will require below. However, the stochastic methods of Alacaoglu et al. (2021) and BΓΆhm et al. (2020), when combined with a product-space reformulation, can solve (1) when all the $A _ { i }$ are subdifferentials of convex functions; see Section 6. ", + "bbox": [ + 174, + 457, + 825, + 651 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "When moving away from a deterministic gradient oracle in projective splitting, a key difficulty is that the generated hyperplanes do not guarantee separation between the solution and the current point. We solve this issue by relaxing the projection: we only update each iterate in the direction of the noisy projection and scale its movement by a decreasing stepsize that allows for control of the stochastic error. Using the framework of stochastic quasi-FejΓ©r monotonicity (Combettes & Pesquet, 2015), we prove almost-sure convergence of the final iterate and do not require averaging of the iterates (Theorem 1, Section 5). We also provide a non-asymptotic convergence rate for the approximation residual (Theorem 2, Section 5). ", + "bbox": [ + 174, + 659, + 825, + 770 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A special case of SPS is the recently-developed Double Stepsize Extragradient Method (DSEG) (Hsieh et al., 2020). When $n = 0$ and therefore only $B$ is present in (1), DSEG and SPS coincide. Thus, our method extends DSEG to allow for regularizers and constraints. Our analysis also provides a new interpretation for DSEG as a special case of projective splitting. Our nonasymptotic convergence rate for SPS also applies to DSEG under no additional assumptions. By contrast, the original convergence rate analysis for DSEG requires either strong monotonicity or an error bound. ", + "bbox": [ + 174, + 776, + 825, + 861 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We close with numerical experiments on a distributionally robust sparse logistic regression problem. This is a nonsmooth convex-concave min-max problem which can be converted to (1) with $n = 2$ set-valued operators. On this problems class, SPS compares well to the possible alternative splitting methods. ", + "bbox": [ + 174, + 867, + 825, + 924 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Non-monotone problems The work of Hsieh et al. (2020) included a local convergence analysis for DSEG applied to locally monotone problems. For min-max problems, if the objective is locally convex-concave at a solution and DSEG is initialized in close proximity, then for small enough stepsizes it converges to the solution with high probability. It is possible to extend this result to SPS, along with our convergence rate analysis. This result is beyond the scope of this work, but Appendix J provides a proof sketch. ", + "bbox": [ + 174, + 103, + 825, + 188 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 BACKGROUND ON MONOTONE INCLUSIONS ", + "text_level": 1, + "bbox": [ + 174, + 207, + 570, + 223 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Since they are so important to SPS, this section provides some background material regarding monotone inclusions, along with their connections to convex optimization, games, and ML. Appendix G discusses their connections to variational inequalities. For a more thorough treatment, we refer to Bauschke & Combettes (2017). See Appendix A for a longer discussion of the applications of monotone inclusions to ML along with several examples. ", + "bbox": [ + 174, + 236, + 825, + 308 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Fundamentals Let $f : \\mathbb { R } ^ { d } \\mathbb { R } \\cup \\{ \\infty \\}$ be closed, convex, and proper (CCP). Recall that its subdifferential $\\partial f$ is given by $\\partial f ( x ) \\ { \\overset { \\cdot } { = } } \\ \\left\\{ g : f ( y ) \\geq f ( x ) + g ^ { \\top } { \\big ( } { \\bar { y - x } } { \\big ) } \\right\\}$ . The map $\\partial f$ has the property ", + "bbox": [ + 173, + 320, + 825, + 364 + ], + "page_idx": 2 + }, + { + "type": "equation", + "img_path": "images/e5d9121f2c1a799e9c53e9c729f315d541087dc8bc4ecf0c54171c706b49f450.jpg", + "text": "$$\nu \\in \\partial f ( x ) , v \\in \\partial f ( y ) \\implies ( u - v ) ^ { \\top } ( x - y ) \\geq 0 ,\n$$", + "text_format": "latex", + "bbox": [ + 328, + 366, + 666, + 383 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and any point-to-set map having this property is called a monotone operator. A monotone operator $T$ is called maximal if no additional points can be included in the image $T ( x )$ of any $\\boldsymbol { x } ^ { \\mathrm { ~ \\scriptsize ~ \\in ~ } \\mathbb { R } ^ { d } }$ without violating the above property (Bauschke & Combettes, 2017, Def. 20.20). Subgradient maps of CCP functions are maximal (Bauschke & Combettes, 2017, Thm. 20.25). A minimizer of $f$ is any $x ^ { * }$ such that $0 \\in \\partial f ( x ^ { * } )$ . This is perhaps the simplest example of a monotone inclusion, the problem of finding $x$ such that $0 \\in T ( x )$ , where $T$ is a monotone operator. If $f$ is smooth, then $\\bar { \\partial } f ( x ) = \\{ \\nabla f ( x ) \\}$ for all $x$ , and the monotone inclusion $0 \\in \\partial f ( x )$ is equivalent to the first-order optimality condition $0 = \\nabla f ( x )$ . ", + "bbox": [ + 173, + 385, + 825, + 497 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Under certain regularity conditions (Bauschke & Combettes, 2017, Cor. 16.5), minimizing a sum of CCP functions $f _ { 1 } , \\ldots , f _ { n }$ is equivalent to solving the monotone inclusion formed from the sum of their subdifferentials: ", + "bbox": [ + 174, + 502, + 823, + 545 + ], + "page_idx": 2 + }, + { + "type": "equation", + "img_path": "images/14ae43db097d468948ccb4029a45248686ecdb8144b85eee0324894fd7a0d72e.jpg", + "text": "$$\nx ^ { * } \\in \\underset { x \\in \\mathbb { R } ^ { d } } { \\arg \\operatorname* { m i n } } \\sum _ { i = 1 } ^ { n } f _ { i } ( x ) \\iff 0 \\in \\sum _ { i = 1 } ^ { n } \\partial f _ { i } ( x ^ { * } ) .\n$$", + "text_format": "latex", + "bbox": [ + 338, + 544, + 658, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As throughout this paper for all set addition operations, the summation on the right-hand side of (2) is the Minkowski sum $\\textstyle \\sum _ { i = 1 } ^ { n } S _ { i } = \\{ \\sum _ { i = 1 } ^ { n } s _ { i } \\ | ^ { \\cdot } s _ { i } \\in S _ { i } \\forall i \\in { 1 . . n } \\}$ . For a convex set $X$ , a constraint $x \\in C$ for some convex set $C$ may be imposed by setting one of the $f _ { i }$ to be the indicator function $\\iota _ { C }$ , defined by $\\iota _ { C } ( x ) = 0$ for $x \\in C$ and $\\iota _ { C } \\bar { ( } x ) = \\dot { + } \\infty$ for $x \\not \\in C$ . Indicator functions of closed convex sets are CCP (Bauschke & Combettes, 2017, Ex. 1.25), and the subgradient map of $\\iota _ { C }$ is also referred to as the normal cone map $N _ { C }$ of $C$ (Bauschke & Combettes, 2017, Def. 6.37). Multiple constraints may be imposed by including multiple indicator functions in (2). ", + "bbox": [ + 173, + 587, + 825, + 685 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ML applications The form (2) can be used to model ML problems with multiple constraints and/or nonsmooth regularizers, including sparse and overlapping group lasso (Jacob et al., 2009), sparse and low-rank matrix estimation problems (Richard et al., 2012), and rare feature selection (Yan & Bien, 2020); see Pedregosa & Gidel (2018) for an overview. ", + "bbox": [ + 173, + 698, + 825, + 755 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Games Consider a two-player noncooperative game in which each player tries to selfishly minimize its own loss, with each loss depending on the actions of both players. Typically, the goal is to find a Nash equilibrium, in which neither player can improve its loss by changing strategy: ", + "bbox": [ + 173, + 768, + 825, + 811 + ], + "page_idx": 2 + }, + { + "type": "equation", + "img_path": "images/066c4967ce6cb2c152fa3e359cdecbd6b06a5ad13d851c8b82eaa76e4bf72cfe.jpg", + "text": "$$\nx ^ { * } \\in \\arg \\operatorname* { m i n } _ { x \\in \\Theta } F ( x , y ^ { * } ) \\quad { \\mathrm { a n d } } \\quad y ^ { * } \\in \\arg \\operatorname* { m i n } _ { y \\in \\Omega } G ( x ^ { * } , y ) .\n$$", + "text_format": "latex", + "bbox": [ + 313, + 813, + 681, + 839 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Assuming that the admissible strategy sets $\\Theta \\subseteq \\mathbb { R } ^ { d _ { x } }$ and $\\Omega \\subseteq \\mathbb { R } ^ { d _ { y } }$ are closed and convex and that $F$ and $G$ are differentiable, then writing the first-order necessary conditions for each optimization problem in (3) yields ", + "bbox": [ + 174, + 843, + 825, + 886 + ], + "page_idx": 2 + }, + { + "type": "equation", + "img_path": "images/51b2c40c258ee95ad171fde6e9646d6c7b8aec1da1c21537eb6ca8f86d1adcd1.jpg", + "text": "$$\n0 \\in \\left[ \\begin{array} { l } { \\nabla _ { x } F ( x ^ { * } , y ^ { * } ) } \\\\ { \\nabla _ { y } G ( x ^ { * } , y ^ { * } ) } \\end{array} \\right] + \\big ( N _ { \\Theta } ( x ^ { * } ) \\times N _ { \\Omega } ( y ^ { * } ) \\big ) .\n$$", + "text_format": "latex", + "bbox": [ + 339, + 887, + 658, + 922 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "If $G = - F$ , then (3) is a min-max game. If $F$ is also convex in $x$ and concave in $y$ , then $B : ( x , y ) \\mapsto$ $( \\nabla _ { x } F ( x , y ) , - \\nabla _ { y } F ( x , y ) ) ^ { \\top }$ is monotone1 on $\\mathbb { R } ^ { d _ { x } + d _ { y } }$ (Rockafellar, 1970). In many applications, $B$ is also Lipschitz continuous. In this situation, (4) is a monotone inclusion involving two operators $B$ and $N _ { \\Theta \\times \\Omega }$ , with $B$ being Lipschitz. Using the simultaneous version of GDA on (3) is equivalent to applying the forward-backward method (FB) (Bauschke & Combettes, 2017, Thm. 26.14) to (4). However, convergence of FB requires that the operator $B$ be cocoercive (Bauschke & Combettes, 2017, Def. 4.10), and not merely Lipschitz (Bauschke & Combettes, 2017, Thm. 26.14). Thus, simultaneous GDA fails to converge for (3) without additional assumptions; see Gidel et al. (2019, Prop. 1) for a simple counterexample. ", + "bbox": [ + 173, + 103, + 825, + 229 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Regularizers and further constraints may be imposed by adding more operators to (4). For example, if one wished to apply a (nonsmooth) convex regularizer $r : \\bar { \\mathbb { R } } ^ { d _ { x } } \\bar { \\mathbb { R } } \\cup \\{ + \\infty \\}$ to the $x$ variables and a similar regularizer $d : \\mathbb { R } ^ { d _ { y } } \\mathbb { R } \\cup \\{ + \\infty \\}$ to the $y$ variables, one would add the operator $A _ { 2 } : ( x , y ) \\mapsto \\bar { \\partial r } ( x ) \\times \\partial d ( y )$ to the right-hand side of (4). ", + "bbox": [ + 174, + 236, + 825, + 292 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ML applications of games Distributionally robust supervised learning (DRSL) is an emerging framework for improving the stability and reliability of ML models in the face of distributional shifts $\\mathrm { T u }$ et al., 2021; Kuhn et al., 2019; Shafieezadeh-Abadeh et al., 2015; Sinha et al., 2018; Lin et al., 2020; Namkoong & Duchi, 2016). Common approaches to DRSL formulate the problem as a min-max game between a learner selecting the model parameters and an adversary selecting a worst-case distribution subject to some ambiguity set around the observed empirical distribution. This min-max problem is often further reduced to either a finite-dimensional saddlepoint problem or a convex optimization problem. ", + "bbox": [ + 174, + 308, + 825, + 420 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "DRSL is a source of games with multiple constraints/regularizers. One such formulation, based on Yu et al. (2021), is discussed in the experiments below. The work in Namkoong & Duchi (2016) uses an ambiguity set based on $f$ -divergences, while Sinha et al. (2018) introduce a Lagrangian relaxation of the Wasserstein ball. When applied to models utilizing multiple regularizers (Jacob et al., 2009; Richard et al., 2012; Yan & Bien, 2020), both of these approaches lead to min-max problems with multiple regularizers. ", + "bbox": [ + 173, + 426, + 825, + 511 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Other applications of games in ML, although typically nonconvex, include generative adversarial networks (GANs) (Goodfellow et al., 2014; Arjovsky et al., 2017; Loizou et al., 2020; 2021; Mishchenko et al., 2020), fair classification (Wadsworth et al., 2018; Zhang et al., 2018; Edwards & Storkey, 2015; Celis & Keswani, 2019), and adversarial privacy (Huang et al., 2017). ", + "bbox": [ + 173, + 516, + 825, + 573 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Resolvents, proximal operators, and projections A fundamental computational primitive for solving monotone inclusions is the resolvent. The resolvent of a monotone operator $A$ is defined to be $J _ { A } \\overset { \\cdot } { = } ( I + A ) ^ { - 1 }$ , where $I$ is the identity operator and the inverse of any operator $T$ is simply $T ^ { - 1 } : x \\mapsto \\{ y : T y \\ni x \\}$ . If $A$ is maximal monotone, then for any $\\rho > 0$ , $J _ { \\rho A }$ is single valued, nonexpansive, and has domain equal to $\\mathbb { R } ^ { d }$ (Bauschke & Combettes, 2017, Thm. 21.1 and Prop. 23.8). Resolvents generalize proximal operators of convex functions: the proximal operator of a CCP function $f$ is ", + "bbox": [ + 173, + 588, + 825, + 689 + ], + "page_idx": 3 + }, + { + "type": "equation", + "img_path": "images/0975033848d3dbfd934ac0caaff44d0d137a8ac48e7b915b93976889346d26b3.jpg", + "text": "$$\n\\operatorname { p r o x } _ { \\rho f } ( t ) \\doteq \\underset { x \\in \\mathbb { R } ^ { d } } { \\arg \\operatorname* { m i n } } \\left\\{ \\rho f ( x ) + ( 1 / 2 ) \\| x - t \\| ^ { 2 } \\right\\} .\n$$", + "text_format": "latex", + "bbox": [ + 333, + 694, + 663, + 722 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "It is easily proved that $\\mathrm { p r o x } _ { \\rho f } = \\underset { - } { J } _ { \\rho \\partial f }$ . Like proximal operators, resolvents generalize projection onto convex sets: if $f = \\iota _ { \\mathcal { C } }$ , then $J _ { \\rho N _ { C } } = \\mathrm { p r o x } _ { \\rho f } = \\mathrm { p r o j } _ { \\mathcal { C } }$ for any $\\rho > 0$ . In many ML applications, proximal operators, and hence resolvents, are relatively straightforward to compute. For examples, see Parikh & Boyd (2013, Sec. 6). ", + "bbox": [ + 174, + 729, + 826, + 786 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Operator splitting methods Operator splitting methods attempt to solve monotone inclusions such as (1) by a sequence of operations that each involve only one of the operators $A _ { 1 } , \\ldots , A _ { n } , B$ . Such methods are often presented in the context of convex optimization problems like (2), but typically apply more generally to monotone inclusions such as (1). In the specific context of (1), each iteration of such a method ideally handles each $A _ { i }$ via its resolvent and the Lipschitz operator $B$ by explicit (not stochastic) evaluation. This is a feasible approach if the original problem can be decomposed in such a way that the resolvents of each $A _ { i }$ are relatively inexpensive to compute, and full evaluations of $B$ are possible. Although not discussed here, more general formulations in which matrices couple the arguments of the operators can broaden the applicability of operator splitting methods. ", + "bbox": [ + 173, + 801, + 825, + 887 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "", + "bbox": [ + 173, + 103, + 825, + 147 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 THE PROJECTIVE SPLITTING FRAMEWORK ", + "text_level": 1, + "bbox": [ + 173, + 165, + 563, + 183 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Before introducing our proposed method, we give a brief introduction to the projective splitting class of methods. ", + "bbox": [ + 173, + 195, + 825, + 226 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The extended solution set Projective splitting is a primal-dual framework and operates in an extended space of primal and dual variables. Rather than directly finding a solution to (1), we find a point in the extended solution set (or Kuhn-Tucker set) ", + "bbox": [ + 173, + 239, + 825, + 282 + ], + "page_idx": 4 + }, + { + "type": "equation", + "img_path": "images/79069065cd35ebc2f3c96ad607dc0153687512f38137f08b268f350ae363cd2c.jpg", + "text": "$$\n\\begin{array} { r } { \\mathcal { S } \\doteq \\left\\{ ( z , w _ { 1 } , \\ldots , w _ { n + 1 } ) \\ \\middle | \\ w _ { i } \\in A _ { i } ( z ) \\forall i \\in 1 . . n , w _ { n + 1 } = B ( z ) , \\sum _ { i = 1 } ^ { n + 1 } w _ { i } = 0 \\right\\} . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 227, + 287, + 769, + 315 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given $p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }$ , it is straightforward to see that $z ^ { * }$ solves (1). Conversely, given a solution $z ^ { * }$ to (1), there must exist $w _ { 1 } ^ { * } , \\ldots , w _ { n + 1 } ^ { * }$ such that $( z ^ { \\ast } , w _ { 1 } ^ { \\ast } , \\dots , w _ { n + 1 } ^ { \\ast } ) \\in \\mathcal { S }$ . Suppose $p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }$ . Since $z ^ { * }$ solves (1), $z ^ { * }$ is typically referred to as a primal solution. The vectors $w _ { 1 } ^ { * } , \\ldots , w _ { n + 1 } ^ { * }$ solve a dual inclusion not described here, and are therefore called a dual solution. It can be shown that $s$ is closed and convex; see for example Johnstone $\\&$ Eckstein (2020b). We will assume throughout that a solution to (1) exists, therefore the set $s$ is nonempty. ", + "bbox": [ + 173, + 318, + 826, + 404 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Separator-projection framework Projective splitting methods are instances of the general separator-projection algorithmic framework for locating a member of a closed convex set $s$ within a linear space $\\mathcal { P }$ . Each iteration $k$ of algorithms drawn from this framework operates by finding a set $H _ { k }$ that separates the current iterate $p ^ { k } \\in \\mathcal { P }$ from $s$ , meaning that $s$ is entirely in the set and $p ^ { k }$ typically is not. One then attempts to β€œmove closer\" to $s$ by projecting the $p ^ { k }$ onto $H _ { k }$ . In the particular case of projective splitting applied to the problem (1) using (5), we select the space $\\mathcal { P }$ to be ", + "bbox": [ + 173, + 416, + 825, + 502 + ], + "page_idx": 4 + }, + { + "type": "equation", + "img_path": "images/a1095377e5ae298c91988ac15955b56f07116ecc2f868c284cdf4c1a9a693f68.jpg", + "text": "$$\n\\begin{array} { r } { \\mathcal { P } \\doteq \\left\\{ ( z , w _ { 1 } , \\ldots , w _ { n + 1 } ) \\in \\mathbb { R } ^ { ( n + 2 ) d } \\ \\Big | \\ \\sum _ { i = 1 } ^ { n + 1 } w _ { i } = 0 \\right\\} , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 313, + 506, + 681, + 534 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and each separating set $H _ { k }$ to be the half space $\\{ p \\in { \\mathcal { P } } \\mid \\varphi _ { k } ( p ) \\leq 0 \\}$ generated by an affine function $\\varphi _ { k } : \\mathscr { P } \\mathbb { R }$ . The general intention is to construct $\\varphi _ { k }$ such that $\\varphi _ { k } \\tilde { ( p ^ { k } ) } > 0$ , but $\\varphi _ { k } ( p ^ { * } ) \\leq 0$ for all $p ^ { * } \\in { \\mathcal { S } }$ . The construction employed for $\\varphi _ { k }$ in the case of (1) and (5) is of the form ", + "bbox": [ + 173, + 537, + 825, + 580 + ], + "page_idx": 4 + }, + { + "type": "equation", + "img_path": "images/d98303aa83501011e691713f8aca89e9bf4bd1b0fcade3c1504fc37c2ae25db5.jpg", + "text": "$$\n\\begin{array} { r } { \\varphi _ { k } ( z , w _ { 1 } , \\ldots , w _ { n + 1 } ) \\doteq \\sum _ { i = 1 } ^ { n + 1 } \\langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \\rangle } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 334, + 585, + 661, + 606 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "for some points $( x _ { i } ^ { k } , y _ { i } ^ { k } ) \\in \\mathbb { R } ^ { 2 d }$ , $i \\in { 1 . . ( n + 1 ) }$ , that must be carefully chosen (see below). Any function of the form (7) can be shown to be affine when restricted to $\\mathcal { P }$ . As mentioned above, the standard separator-projection algorithm obtains its next iterate $p ^ { k + 1 }$ by projecting $p ^ { k }$ onto $H _ { k }$ . This calculation involves the usual projection step for a half space, namely ", + "bbox": [ + 173, + 611, + 825, + 667 + ], + "page_idx": 4 + }, + { + "type": "equation", + "img_path": "images/f451a3c0c9822fe7b123aa3e52a07a6e8507a51f6d3a70528a872e52a6055a3d.jpg", + "text": "$$\np ^ { k + 1 } = p ^ { k } - \\alpha _ { k } \\nabla \\varphi _ { k } , \\quad \\mathrm { ~ w h e r e ~ } \\quad \\alpha _ { k } = \\varphi _ { k } ( p ^ { k } ) / \\| \\nabla \\varphi _ { k } \\| ^ { 2 } ,\n$$", + "text_format": "latex", + "bbox": [ + 303, + 672, + 692, + 691 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and the gradient $\\nabla \\varphi _ { k }$ is computed relative to $\\mathcal { P }$ , thus resulting in $p ^ { k + 1 } \\ \\in \\ { \\mathcal { P } }$ , i.e. $\\nabla \\varphi _ { k } \\ =$ \n$\\left( \\sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k } , x _ { 1 } ^ { k } - { \\bar { x } } ^ { k } , \\dots , x _ { n + 1 } - { \\bar { x } } ^ { k } \\right)$ where $\\begin{array} { r } { \\bar { x } ^ { k } = \\frac { 1 } { n + 1 } \\sum _ { i = 1 } ^ { n + 1 } x _ { i } ^ { k } } \\end{array}$ . ", + "bbox": [ + 174, + 698, + 825, + 736 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 PROPOSED METHOD ", + "text_level": 1, + "bbox": [ + 176, + 752, + 377, + 770 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The proposed method is given in Algorithm 1 and called Stochastic Projective Splitting (SPS). Unlike prior versions of projective splitting, SPS does not employ the stepsize $\\alpha _ { k }$ of (8) that places the next iterate exactly on the hyperplane given by $\\varphi _ { k } ( p ) = 0$ . Instead, it simply moves in the direction $- \\nabla \\varphi _ { k }$ with a pre-defined stepsize $\\{ \\alpha _ { k } \\}$ . This fundamental change is required to deal with the stochastic noise on lines 6 and 8. This noise could lead to the usual choice of $\\alpha _ { k }$ defined in (8) being unstable and difficult to analyze. In order to guarantee convergence, the parameters $\\alpha _ { k }$ and $\\rho _ { k }$ must be chosen to satisfy certain conditions given below. Note that the gradient is calculated with respect to the subspace $\\mathcal { P }$ defined in (6); since the algorithm is initialized within $\\mathcal { P }$ , it remains in $\\mathcal { P }$ , within which $\\varphi _ { k }$ the updates on lines 9-10 are equivalent to . $\\boldsymbol { p } ^ { k + 1 } = \\boldsymbol { p } ^ { k } - \\alpha _ { k } \\nabla \\varphi _ { k }$ , where $\\mathbf { \\chi } ^ { \\dot { k } } = ( z ^ { k } , w _ { 1 } ^ { k } , \\dots , w _ { n + 1 } ^ { k } )$ ", + "bbox": [ + 173, + 784, + 825, + 926 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that SPS does not explicitly evaluate $\\varphi _ { k }$ , which is only used in the analysis, but it does keep track of $( x _ { i } ^ { k } , y _ { i } ^ { k } )$ for $i \\in { 1 . . ( n + 1 ) }$ . The algorithm’s memory requirements scale linearly with the number of nonsmooth operators $n$ in the inclusion (1), with the simplest implementation storing $( 3 n + 5 ) d$ working-vector elements. This requirement can be reduced to $( n + 7 ) d$ through a technique discussed in Appendix H. In most applications, $n$ will be small, for example 2 or 3. ", + "bbox": [ + 173, + 103, + 825, + 174 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Updating $( x _ { i } ^ { k } , y _ { i } ^ { k } )$ The variables $( x _ { i } ^ { k } , y _ { i } ^ { k } )$ are updated on lines 3-8 of Algorithm 1, in which $e ^ { k }$ and $\\epsilon ^ { k }$ are $\\mathbb { R } ^ { d }$ -valued random variables defined on a probability space $( \\Omega , { \\mathcal { F } } , P )$ . For $B$ we use a new, noisy version of the two-forward-step procedure from Johnstone & Eckstein (2020b). For each $A _ { i }$ , $i \\in 1 . . n$ , we use the same resolvent step used in previous projective splitting papers, originating with (Eckstein & Svaiter, 2008). In the case $\\epsilon ^ { k } = e ^ { k } = 0$ , the selection of the $( \\bar { x _ { i } ^ { k } } , y _ { i } ^ { k } )$ is identical to that proposed by Johnstone & Eckstein (2020b), resulting in the hyperplane $\\{ p : { \\varphi } _ { k } ( p ) = 0 \\}$ strictly separating $p ^ { k }$ from $s$ . ", + "bbox": [ + 174, + 186, + 825, + 287 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "SPS achieves full splitting of (1): each $A _ { i }$ is processed separately using a resolvent and the Lipschitz term $B$ is processed via a stochastic gradient oracle. When the $A _ { i }$ arise from regularizers or constraints, as discussed in Section 2, their resolvents can be readily computed so long as their respective proximal/projection operators have a convenient form. ", + "bbox": [ + 174, + 294, + 825, + 351 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Noise assumptions Let $\\mathcal { F } _ { k } \\doteq \\sigma ( p ^ { 1 } , \\ldots , p ^ { k } )$ and $\\mathcal { E } _ { k } \\doteq \\sigma ( \\epsilon ^ { k } )$ . The stochastic estimators for the gradients, $r ^ { k }$ and $y _ { n + 1 } ^ { k }$ , are assumed to be unbiased, that is, the noise terms have mean 0 conditioned on the past: ", + "bbox": [ + 174, + 363, + 825, + 407 + ], + "page_idx": 5 + }, + { + "type": "equation", + "img_path": "images/3081ebd6cddf1d97a7d70c51bac354bb263f320314ed46ca2c98672af94e55e6.jpg", + "text": "$$\n\\mathbb { E } [ \\epsilon ^ { k } | \\mathcal { F } _ { k } ] = 0 , \\quad \\mathbb { E } [ e ^ { k } | \\mathcal { F } _ { k } ] = 0 \\quad a . s .\n$$", + "text_format": "latex", + "bbox": [ + 374, + 411, + 622, + 431 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We impose the following mild assumptions on the variance of the noise: ", + "bbox": [ + 173, + 435, + 647, + 449 + ], + "page_idx": 5 + }, + { + "type": "equation", + "img_path": "images/cdc25a2da250f527e64eb0fe1341d28f6d7f3c22cb3672361763f89c38ed5842.jpg", + "text": "$$\n\\begin{array} { r l } & { \\mathbb { E } \\left[ \\| \\epsilon ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\right] \\leq N _ { 1 } + N _ { 2 } \\| B ( z ^ { k } ) \\| ^ { 2 } \\quad a . s . } \\\\ & { \\mathbb { E } \\left[ \\| e ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } , \\mathcal { E } _ { k } \\right] \\leq N _ { 3 } + N _ { 4 } \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } \\quad a . s . , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 331, + 453, + 663, + 496 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $0 \\le N _ { 1 } , N _ { 2 } , N _ { 3 } , N _ { 4 } < \\infty$ . We do not require $e ^ { k }$ and $\\epsilon ^ { k }$ to be independent of one another. ", + "bbox": [ + 169, + 498, + 800, + 515 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Stepsize choices The stepsizes $\\rho _ { k }$ and $\\alpha _ { k }$ are assumed to be deterministic. A constant stepsize choice which attains a non-asymptotic convergence rate will be considered in the next section (Theorem 2). The stepsize conditions we will impose to guarantee almost-sure convergence (Theorem 1) are ", + "bbox": [ + 173, + 529, + 823, + 585 + ], + "page_idx": 5 + }, + { + "type": "equation", + "img_path": "images/4c73912963288c7d940343cfda19b4261b37d3e3d2936871d49cc104e58e93db.jpg", + "text": "$$\n\\begin{array} { r } { \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } = \\infty , \\quad \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } ^ { 2 } < \\infty , \\quad \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } ^ { 2 } < \\infty , \\mathrm { a n d } \\rho _ { k } \\leq \\overline { \\rho } < 1 / L . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 228, + 589, + 766, + 608 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For example, in the case $L = 1$ , a particular choice which satisfies these constraints is ", + "bbox": [ + 174, + 612, + 738, + 626 + ], + "page_idx": 5 + }, + { + "type": "equation", + "img_path": "images/2a93bfcc389ad2be8fe6c2716602414c175dacdc0527cd1f6bf434b8d6a3b4ca.jpg", + "text": "$$\n\\alpha _ { k } = k ^ { - 0 . 5 - p } \\mathrm { f o r } 0 < p < 0 . 5 , \\mathrm { a n d } \\rho _ { k } = k ^ { - 0 . 5 + t } \\mathrm { f o r } p \\leq t < 0 . 5 p + 0 . 2 5 .\n$$", + "text_format": "latex", + "bbox": [ + 232, + 628, + 766, + 647 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For simplicity, the stepsizes $\\tau$ used for the resolvent updates in lines 3-5 are fixed, but they could be allowed to vary with both $i$ and $k$ so long as they have finite positive lower and upper bounds. ", + "bbox": [ + 171, + 651, + 823, + 681 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Algorithm 1: Stochastic Projective Splitting (SPS) ", + "text_level": 1, + "bbox": [ + 173, + 707, + 509, + 723 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 MAIN THEORETICAL RESULTS ", + "text_level": 1, + "bbox": [ + 174, + 102, + 460, + 118 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Theorem 1. Suppose $A _ { 1 } , \\ldots , A _ { n }$ are maximal monotone, $B$ is $L$ -Lipschitz and monotone, and a solution to (1) exists. For Algorithm $I$ , suppose (9)-(12) hold. Then with probability one it holds that $z ^ { k } \\to z ^ { * }$ , where $z ^ { * }$ solves (1). Further, with probability one, $x _ { i } ^ { k } \\to z ^ { * }$ for $i = 1 , \\ldots , n$ . ", + "bbox": [ + 173, + 132, + 825, + 175 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Proof sketch Theorem 1 is proved in Appendix C, but we provide a brief sketch here. The proof begins by deriving a simple recursion inspired by the analysis of SGD (Robbins & Monro, 1951). Since $p ^ { k + 1 } = p ^ { k } - \\alpha _ { k } \\nabla \\bar { \\varphi } _ { k }$ , a step of projective splitting can be viewed as GD applied to the affine hyperplane generator function $\\varphi _ { k }$ . Thus, for any $p ^ { * } \\in \\mathcal { P }$ , ", + "bbox": [ + 174, + 188, + 826, + 244 + ], + "page_idx": 6 + }, + { + "type": "equation", + "img_path": "images/e9ddae0d18b9ce62d16ecc0ec72b85c806b24ad288b9ad8f501dfc8a5fc344aa.jpg", + "text": "$$\n\\begin{array} { r l } & { \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } \\langle \\nabla \\varphi _ { k } , p ^ { k } - p ^ { * } \\rangle + \\alpha _ { k } ^ { 2 } \\| \\nabla \\varphi _ { k } \\| ^ { 2 } } \\\\ & { \\qquad = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } ( \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) ) + \\alpha _ { k } ^ { 2 } \\| \\nabla \\varphi _ { k } \\| ^ { 2 } } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 266, + 246, + 718, + 286 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where in the second equation we have used that $\\varphi _ { k } ( p )$ is affine on $\\mathcal { P }$ . The basic strategy is to show that, for any $p ^ { * } \\in { \\mathcal { S } }$ , ", + "bbox": [ + 173, + 286, + 825, + 315 + ], + "page_idx": 6 + }, + { + "type": "equation", + "img_path": "images/f4535f8ce716c78230c228f54c23983fe999b54cec031462a4223df15a3eec87.jpg", + "text": "$$\n\\begin{array} { r } { \\mathbb { E } [ \\| \\nabla \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le C _ { 1 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + C _ { 2 } \\quad a . s . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 348, + 316, + 650, + 335 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "for some $C _ { 1 } , C _ { 2 } > 0$ . This condition allows one to establish stochastic quasi-FejΓ©r monotonicity (SQFM) (Combettes & Pesquet, 2015, Proposition 2.3) of the iterates to $s$ . One consequence of SQFM is that with probability one there exists a subsequence $v _ { k }$ such that $\\varphi _ { v _ { k } } ( p ^ { v _ { k } } ) - \\varphi _ { v _ { k } } ( p ^ { * } )$ converges to 0. Furthermore, roughly speaking, we show that $\\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } )$ provides an upper bound on the following β€œapproximation residual\" for SPS: ", + "bbox": [ + 173, + 337, + 825, + 407 + ], + "page_idx": 6 + }, + { + "type": "equation", + "img_path": "images/555c16bd22323c3486a9de0ea0a7f9817356db176ef7628602fe218a3e87fd0e.jpg", + "text": "$$\n\\begin{array} { r } { G _ { k } \\doteq \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 271, + 407, + 725, + 426 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "provides an approximation error for SPS, as formalized in the following lemma: ", + "bbox": [ + 186, + 428, + 722, + 441 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Lemma 1. For SPS, $p ^ { k } = ( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\in \\mathcal { S }$ if and only if $G _ { k } = 0$ ", + "bbox": [ + 174, + 443, + 658, + 460 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Since $y _ { i } ^ { k } \\ \\in \\ A _ { i } ( x _ { i } ^ { k } )$ for $i \\in 1 . . n$ , having $G _ { k } ~ = ~ 0$ implies that $z ^ { k } = x _ { i } ^ { k }$ , $w _ { i } ^ { k } \\ = \\ y _ { i } ^ { k }$ , and thus $w _ { i } ^ { k } \\in A _ { i } ( z ^ { k } )$ for $i \\in 1 . . n$ . Since $w _ { n + 1 } ^ { k } = B ( z ^ { k } )$ and $\\textstyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0$ , it follows that $z ^ { k }$ solves (1). The reverse direction is proved in Appendix $\\mathrm { D }$ . ", + "bbox": [ + 174, + 462, + 821, + 507 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The quantity $G _ { k }$ generalizes the role played by the norm of the gradient in algorithms for smooth optimization. In particular, in the special case where $n = 0$ and $\\bar { B } ( z ) = \\nabla f ( z )$ for some smooth convex function $f$ , one has $G _ { k } = \\| \\bar { \\nabla } f ( z ^ { k } ) \\| ^ { 2 }$ . ", + "bbox": [ + 174, + 513, + 825, + 558 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Combining the properties of $G _ { k }$ with other results following from SQFM (such as boundedness) will allow us to derive almost-sure convergence of the iterates to a solution of (1). ", + "bbox": [ + 174, + 563, + 823, + 592 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Convergence rate We can also establish non-asymptotic convergence rates for the approximation residual $G _ { k }$ : ", + "bbox": [ + 174, + 604, + 823, + 633 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Theorem 2. Fix the total iterations $K \\geq 1$ of Algorithm 1 and set ", + "bbox": [ + 174, + 636, + 611, + 651 + ], + "page_idx": 6 + }, + { + "type": "equation", + "img_path": "images/68ff0df76206fb41826b63420b4d14be9fa2c1fc4096f719e7f57855a358d996.jpg", + "text": "$$\n\\forall k = 1 , \\ldots , K : \\rho _ { k } = \\rho \\doteq \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , 1 / 2 L \\right\\} \\quad \\ a n d \\quad \\alpha _ { k } = C _ { f } \\rho ^ { 2 }\n$$", + "text_format": "latex", + "bbox": [ + 263, + 651, + 735, + 679 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "for some $C _ { f } > 0$ . Suppose (9)-(11) hold. Then ", + "bbox": [ + 173, + 679, + 482, + 694 + ], + "page_idx": 6 + }, + { + "type": "equation", + "img_path": "images/122c1b59b2769b397d0f512f754d8c65538b5ab3a8389fe57dcecc2cdee4c54a.jpg", + "text": "$$\n\\begin{array} { r } { ( 1 / K ) { \\sum } _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] = \\mathcal { O } ( K ^ { - 1 / 4 } ) } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 387, + 695, + 611, + 717 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where the constants are given (along with the proof) in Appendix $E$ . ", + "bbox": [ + 173, + 718, + 616, + 733 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Theorem 2 implies that if we pick an iterate $J$ uniformly at random from $1 . . K$ , then the expected value of $G _ { J }$ is $\\mathcal { O } ( K ^ { - 1 / 4 } )$ . As far as we know, this is the first convergence rate for a stochastic fullsplitting method solving (1) in the general discontinuous (i.e. set-valued) monotone inclusion case, and it is not clear whether it can be improved, either by a better analysis or a better method. Faster rates are certainly possible for deterministic methods under various continuity assumptions; Tseng’s method obtains $\\bar { \\mathcal { O } } ( K ^ { - 1 } )$ rate (Monteiro $\\&$ Svaiter, 2010) and the accelerated Halpern iteration under Lipschitz continuity obtains $\\mathcal { O } ( K ^ { - 2 } )$ rate (Diakonikolas, 2020). While our rate may seem slow, it is worth remembering that (1) features $n$ discontinuous operators $A _ { i }$ , so we expect rates at least as slow as nonsmooth convex optimization, but perhaps worse because (1) is far more general than convex optimization. For a different error metric, the restricted gap function, in the special case of variational inequalities, faster rates have been established in Juditsky et al. (2011) and BΓΆhm et al. (2020). However, it is unclear how to relate the restricted gap function to $G _ { k }$ , so these rates may not be directly comparable to Theorem 2. ", + "bbox": [ + 173, + 741, + 826, + 924 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6 RELATED WORK ", + "text_level": 1, + "bbox": [ + 176, + 102, + 343, + 117 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Arguably the three most popular classes of operator splitting algorithms are forward-backward splitting (FB) (Combettes & Pesquet, 2011), Douglas-Rachford splitting (DR) (Lions & Mercier, 1979), and Tseng’s method (Tseng, 2000). The extragradient method (EG) is similar to Tseng’s method, but has more projection steps per iteration and only applies to variational inequalities (Korpelevich, 1977; Nemirovski, 2004; Li et al., 2021). The popular Alternating Direction Method of Multipliers (ADMM), in its standard form, is a dual application of DR (Gabay, 1983). The three-operator splitting method (Davis & Yin, 2017) can only be applied to (1) if $B$ is cocoercive rather than merely Lipchitz, and thus its usefulness is mostly limited to optimization applications and not games. FB, DR, and Tseng’s method apply to monotone inclusions involving two operators, with varying assumptions on one of the operators. It is possible to derive splitting methods for the more complicated inclusion (1), involving more than two operators, by applying an appropriate 2-operator splitting method such as Tseng’s method to a product-space reformulation (PSR) (BriceΓ±o-Arias & Combettes, 2011; Combettes & Pesquet, 2012) (for more on PSR, see Appendix F). The recently developed forward-reflected-backward (FRB) method (Malitsky & Tam, 2020) can be used in the same way. However, there are several disadvantages to using a PSR, as discussed in Appendix F.7. ", + "bbox": [ + 174, + 136, + 825, + 343 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "By using a PSR, the stochastic methods of Alacaoglu et al. (2021) and BΓΆhm et al. (2020) can be applied to (1) in the case that each $A _ { i }$ is a subdifferential. Both of these methods are analyzed in terms of the restricted gap function. This merit function has a drawback compared with our approximation residual in that it requires one to find a bound for the iterates. However, Alacaoglu et al. (2021) and BΓΆhm et al. (2020) do not provide such a bound, meaning that their convergence rate results are somewhat incomplete. We discuss this issue in Appendix G. ", + "bbox": [ + 174, + 349, + 825, + 434 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Theoretical convergence of the method of BΓΆhm et al. (2020) requires the use of averaging, since the final iterate does not converge for certain problems (Hsieh et al., 2020). Empirically, averaging tends to be slow and to destroy regularizer-induced structural properties such as sparsity or low matrix rank, so its utility is largely theoretical and it is usually avoided in practice. Furthermore, averaging loses even its theoretical benefits for nonconvex problems, so its use in such cases is rarer still. Another drawback of the analysis of BΓΆhm et al. (2020) is that, unlike in SPS, the resolvent (proximal) stepsizes also need to vanish. ", + "bbox": [ + 174, + 441, + 825, + 539 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The method of Alacaoglu et al. (2021) applies variance reduction techniques to FRB. It only applies to finite-sum problems and requires the periodic computation of a full batch gradient, making it somewhat less flexible and scalable than our method. On the other hand, it has an accelerated ergodic rate for the restricted gap function in the variational inequality setting. We compare the empirical performance of SPS with Alacaoglu et al. (2021), BΓΆhm et al. (2020), and several deterministic methods using PSR in the numerical experiments described in Section 7. ", + "bbox": [ + 174, + 545, + 825, + 628 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Additional related work is discussed in Appendix B. ", + "bbox": [ + 176, + 636, + 516, + 650 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7 EXPERIMENTS ", + "text_level": 1, + "bbox": [ + 176, + 674, + 326, + 690 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We now present some numerical results on distributionally robust supervised learning (DRSL) problems. We follow the approach of Yu et al. (2021), which introduced a min-max formulation of Wasserstein DRSL. While other approaches reduce the problem to convex optimization, Yu et al. (2021) reduce it to a finite-dimensional min-max problem amenable to the use of stochastic methods on large datasets. However, unlike our proposed SPS method, the variance-reduced extragradient method that Yu et al. (2021) propose cannot handle multiple nonsmooth regularizers or constraints on the model parameters. Consequently, we consider distributionally robust sparse logistic regression (DRSLR), a problem class equivalent to that considered in Yu et al. (2021), but with an added $\\ell _ { 1 }$ regularizer, a standard tool to induce sparsity. See the Appendix I for the full problem definition. ", + "bbox": [ + 174, + 708, + 825, + 833 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We compared our SPS method to several methods for solving DRSLR for a collection of real datasets from the LIBSVM repository (Chang & Lin, 2011). We implemented SPS with $\\alpha _ { k } = C _ { d } k ^ { - 0 . 5 1 }$ and $\\rho _ { k } = C _ { d } k ^ { - 0 . 2 5 }$ and called it SPS-decay. We also implement SPS with the fixed stepsize given in (15) and called it SPS-fixed. We compared the method to deterministic projective splitting (Johnstone & Eckstein, 2020b) and the following methods based on PSR: Tseng’s method (Tseng, 2000; Combettes & Pesquet, 2012), the forward-reflected-backward (FRB) method (Malitsky & Tam, 2020), the stochastic Tseng (S-Tseng) method of BΓΆhm et al. (2020), and the variance-reduced stochastic FRB method (Alacaoglu et al., 2021), abbreviated FRB-VR. The S-Tseng and FRB-VR algorithms appear to be the only stochastic splitting methods other than SPS applicable to the tested problem class. ", + "bbox": [ + 174, + 840, + 825, + 922 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5a3352bf1be8622af7e8437ef0309b7c8e82b213e948eae1d016add4ab16fe33.jpg", + "image_caption": [ + "Figure 1: Approximation residual versus running time for three LIBSVM benchmark datasets, with the markers at 10-iteration intervals. Left: epsilon, middle: SUSY, right: real-sim. For the stochastic algorithms (SPS, S-Tseng, and FRB-VR), we plot the median results over 10 trials, with unit standard deviation horizontal error bars for the running time and the vertical error bars displaying the min-to-max range of the approximation residual. The code is provided in the supplementary material. " + ], + "image_footnote": [], + "bbox": [ + 178, + 103, + 818, + 223 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "", + "bbox": [ + 176, + 352, + 821, + 393 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 1 show results for three LIBSVM standard datasets: epsilon2 $m = 4 \\cdot 1 0 ^ { 5 }$ , $d = 2 0 0 0 \\mathrm { \\Omega }$ ), SUSY (Baldi et al., 2014; Dua & Graff, 2017) $m = 2 \\cdot 1 0 ^ { 6 }$ , $d = 1 8$ ), and real-sim3 ( $m = 7 2 { , } 3 0 9$ , $d = 2 0 { , } 9 5 8 _ { , }$ ). ", + "bbox": [ + 173, + 400, + 826, + 443 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To measure the progress of the algorithms, we used the β€œapproximation residual” $R _ { k }$ defined in Appendix F. As with $G _ { k }$ , having $R _ { k } = 0$ implies that $z ^ { k }$ solves (1). We use $R _ { k }$ instead of $G _ { k }$ because it is also possible to compute essentially the same measure of convergence from the iterates of the other tested algorithms, establishing a fair comparison. Appendix F provides the details of the derivation of the residual measure for each algorithm, explores the relationship between $R _ { k }$ and $G _ { k }$ , and provides additional implementation details. ", + "bbox": [ + 173, + 450, + 825, + 534 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 1 plots the approximation residual versus running time for all seven algorithms under consideration. The computations were performed using Python 3.8.3 and numpy on a 2019 MacBook Pro with a 2.4GHz 8-core Intel I9 processor and 32GB of RAM . Being a stochastic method, SPS-decay seems to outperform the deterministic methods at obtaining a medium-accuracy solution quickly. It also seems to outperform the stochastic PSR-based methods S-Tseng and FRB-VR. ", + "bbox": [ + 174, + 541, + 825, + 609 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "8 CONCLUSIONS AND FUTURE WORK ", + "text_level": 1, + "bbox": [ + 174, + 637, + 504, + 654 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We have developed and analyzed a stochastic splitting method that can handle min-max problems with multiple regularizers and constraints. Going forward, this development should make it possible to incorporate regularizers and constraints into adversarial formulations trained from large datasets. ", + "bbox": [ + 174, + 672, + 825, + 714 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Recent versions of deterministic projective splitting (Combettes & Eckstein, 2018; Johnstone & Eckstein, 2020b) allow for asynchronous and incremental operation, meaning that not all operators need to be activated at every iteration, with some calculations proceeding with stale inputs. Such characteristics make projective splitting well-suited to distributed implementations. Many of our SPS results may be extended to allow for these variations, but we leave those extensions to future work. ", + "bbox": [ + 174, + 722, + 825, + 791 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "REFERENCES ", + "text_level": 1, + "bbox": [ + 176, + 819, + 285, + 834 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Ahmet Alacaoglu, Yura Malitsky, and Volkan Cevher. Forward-reflected-backward method with variance reduction. Computational Optimization and Applications, 2021. Available online. ", + "bbox": [ + 174, + 847, + 825, + 875 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Abdullah Alotaibi, Patrick L Combettes, and Naseer Shahzad. Solving coupled composite monotone inclusions by successive FejΓ©r approximations of their Kuhn-Tucker set. SIAM Journal on Optimization, 24(4):2076–2095, 2014. ", + "bbox": [ + 176, + 103, + 825, + 146 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Kimon Antonakopoulos, Veronica Belmega, and Panayotis Mertikopoulos. An adaptive mirrorprox method for variational inequalities with singular operators. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'AlchΓ©-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, 2019. ", + "bbox": [ + 174, + 155, + 826, + 210 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Martin Arjovsky, Soumith Chintala, and LΓ©on Bottou. Wasserstein generative adversarial networks. In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on Machine Learning, volume 70 of Proceedings of Machine Learning Research, pp. 214–223, 06–11 Aug 2017. ", + "bbox": [ + 173, + 219, + 825, + 276 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Pierre Baldi, Peter Sadowski, and Daniel Whiteson. Searching for exotic particles in high-energy physics with deep learning. Nature communications, 5(1):1–9, 2014. ", + "bbox": [ + 173, + 284, + 825, + 314 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "David Balduzzi, Sebastien Racaniere, James Martens, Jakob Foerster, Karl Tuyls, and Thore Graepel. The mechanics of $n$ -player differentiable games. In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 354–363. PMLR, 10–15 Jul 2018. ", + "bbox": [ + 173, + 321, + 826, + 378 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Heinz H Bauschke and Patrick L Combettes. Convex analysis and monotone operator theory in Hilbert spaces. Springer, 2nd edition, 2017. ", + "bbox": [ + 174, + 386, + 823, + 416 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Axel BΓΆhm, Michael Sedlmayer, ErnΓΆ Robert Csetnek, and Radu Ioan BoΒΈt. Two steps at a time β€” taking GAN training in stride with Tseng’s method. arXiv preprint arXiv:2006.09033, 2020. ", + "bbox": [ + 173, + 424, + 826, + 454 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Radu Ioan Bot, Panayotis Mertikopoulos, Mathias Staudigl, and Phan Tu Vuong. Forward-backwardforward methods with variance reduction for stochastic variational inequalities. arXiv preprint arXiv:1902.03355, 2019. ", + "bbox": [ + 173, + 462, + 826, + 505 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Luis M BriceΓ±o-Arias and Patrick L Combettes. A monotone+skew splitting model for composite monotone inclusions in duality. SIAM Journal on Optimization, 21(4):1230–1250, 2011. ", + "bbox": [ + 171, + 512, + 823, + 542 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Luis M BriceΓ±o-Arias and Patrick L Combettes. Monotone operator methods for Nash equilibria in non-potential games. In Computational and Analytical Mathematics, volume 50 of Springer Proceedings in Mathematics and Statistics, pp. 143–159. Springer, 2013. ", + "bbox": [ + 173, + 550, + 821, + 594 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "L Elisa Celis and Vijay Keswani. Improved adversarial learning for fair classification. arXiv preprint arXiv:1901.10443, 2019. ", + "bbox": [ + 169, + 602, + 825, + 631 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Chih-Chung Chang and Chih-Jen Lin. LIBSVM: A library for support vector machines. ACM Transactions on Intelligent Systems and Technology, 2:27:1–27:27, 2011. Software available at http://www.csie.ntu.edu.tw/\\~cjlin/libsvm. ", + "bbox": [ + 176, + 638, + 823, + 681 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Tatjana Chavdarova, Matteo Pagliardini, Sebastian U Stich, FranΓ§ois Fleuret, and Martin Jaggi. Taming GANs with lookahead-minmax. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id $=$ ZW0yXJyNmoG. ", + "bbox": [ + 174, + 689, + 823, + 733 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Patrick L. Combettes and Jonathan Eckstein. Asynchronous block-iterative primal-dual decomposition methods for monotone inclusions. Mathematical Programming, 168(1-2):645–672, 2018. ", + "bbox": [ + 169, + 741, + 823, + 770 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Patrick L Combettes and Jean-Christophe Pesquet. Proximal splitting methods in signal processing. In H.H. Bauschke, R.S.S. Burachik, P.L. Combettes, V. Elser, D.R. Luke, and H. Wolkowicz (eds.), Fixed-Point Algorithms for Inverse Problems in Science and Engineering, pp. 185–212. Springer, 2011. ", + "bbox": [ + 173, + 779, + 826, + 835 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Patrick L Combettes and Jean-Christophe Pesquet. Primal-dual splitting algorithm for solving inclusions with mixtures of composite, Lipschitzian, and parallel-sum type monotone operators. Set-Valued and variational analysis, 20(2):307–330, 2012. ", + "bbox": [ + 173, + 843, + 823, + 887 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Patrick L Combettes and Jean-Christophe Pesquet. Stochastic quasi-FejΓ©r block-coordinate fixed point iterations with random sweeping. SIAM Journal on Optimization, 25(2):1221–1248, 2015. ", + "bbox": [ + 173, + 895, + 820, + 924 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Constantinos Daskalakis, Andrew Ilyas, Vasilis Syrgkanis, and Haoyang Zeng. Training GANs with optimism. In International Conference on Learning Representations, 2018. URL https: //openreview.net/forum?id ${ . } =$ SJJySbbAZ. ", + "bbox": [ + 176, + 103, + 825, + 146 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Damek Davis and Wotao Yin. A three-operator splitting scheme and its optimization applications. Set-Valued and Variational Analysis, 25(4):829–858, 2017. ", + "bbox": [ + 174, + 156, + 823, + 186 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Jelena Diakonikolas. Halpern iteration for near-optimal and parameter-free monotone inclusion and strong solutions to variational inequalities. In Conference on Learning Theory, pp. 1428–1451. PMLR, 2020. ", + "bbox": [ + 173, + 196, + 826, + 239 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Dheeru Dua and Casey Graff. UCI machine learning repository, 2017. URL http://archive. ics.uci.edu/ml. ", + "bbox": [ + 171, + 250, + 825, + 280 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Jonathan Eckstein. A simplified form of block-iterative operator splitting and an asynchronous algorithm resembling the multi-block alternating direction method of multipliers. Journal of Optimization Theory and Applications, 173(1):155–182, 2017. ", + "bbox": [ + 173, + 290, + 825, + 334 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Jonathan Eckstein and Benar Fux Svaiter. A family of projective splitting methods for the sum of two maximal monotone operators. Mathematical Programming, 111(1):173–199, 2008. ", + "bbox": [ + 173, + 343, + 825, + 373 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Jonathan Eckstein and Benar Fux Svaiter. General projective splitting methods for sums of maximal monotone operators. SIAM Journal on Control and Optimization, 48(2):787–811, 2009. ", + "bbox": [ + 173, + 383, + 823, + 412 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Harrison Edwards and Amos Storkey. Censoring representations with an adversary. arXiv preprint arXiv:1511.05897, 2015. ", + "bbox": [ + 173, + 422, + 825, + 453 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Daniel Gabay. Applications of the method of multipliers to variational inequalities. In M. Fortin and R. Glowinski (eds.), Augmented Lagrangian Methods: Applications to the Solution of Boundary Value Problems, chapter IX, pp. 299–340. North-Holland, Amsterdam, 1983. ", + "bbox": [ + 174, + 463, + 825, + 507 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Gauthier Gidel, Hugo Berard, GaΓ«tan Vignoud, Pascal Vincent, and Simon Lacoste-Julien. A variational inequality perspective on generative adversarial networks. In International Conference on Learning Representations, 2019. URL https://openreview.net/forum?id $=$ r1laEnA5Ym. ", + "bbox": [ + 173, + 517, + 826, + 574 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Z. Ghahramani, M. Welling, C. Cortes, N. Lawrence, and K. Q. Weinberger (eds.), Advances in Neural Information Processing Systems, volume 27. Curran Associates, 2014. ", + "bbox": [ + 174, + 584, + 826, + 641 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Paulina Grnarova, Yannic Kilcher, Kfir Y Levy, Aurelien Lucchi, and Thomas Hofmann. Generative minimization networks: Training GANs without competition. arXiv preprint arXiv:2103.12685, 2021. ", + "bbox": [ + 174, + 651, + 825, + 695 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Patrick T Harker and Jong-Shi Pang. Finite-dimensional variational inequality and nonlinear complementarity problems: a survey of theory, algorithms and applications. Mathematical programming, 48(1):161–220, 1990. ", + "bbox": [ + 174, + 707, + 826, + 748 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Yu-Guan Hsieh, Franck Iutzeler, JΓ©rΓ΄me Malick, and Panayotis Mertikopoulos. On the convergence of single-call stochastic extra-gradient methods. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'AlchΓ©-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, 2019. ", + "bbox": [ + 174, + 760, + 826, + 816 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Yu-Guan Hsieh, Franck Iutzeler, JΓ©rΓ΄me Malick, and Panayotis Mertikopoulos. Explore aggressively, update conservatively: Stochastic extragradient methods with variable stepsize scaling. In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 16223–16234. Curran Associates, 2020. ", + "bbox": [ + 174, + 827, + 825, + 885 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Chong Huang, Peter Kairouz, Xiao Chen, Lalitha Sankar, and Ram Rajagopal. Context-aware generative adversarial privacy. Entropy, 19(12):656, 2017. ", + "bbox": [ + 174, + 895, + 823, + 924 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Laurent Jacob, Guillaume Obozinski, and Jean-Philippe Vert. Group lasso with overlaps and graph lasso. In LΓ©on Bottou and Michael Littman (eds.), Proceedings of the 26th International Conference on Machine Learning, pp. 433–440, Montreal, June 2009. Omnipress. ", + "bbox": [ + 174, + 103, + 823, + 146 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Patrick R Johnstone and Jonathan Eckstein. Convergence rates for projective splitting. SIAM Journal on Optimization, 29(3):1931–1957, 2019. ", + "bbox": [ + 171, + 156, + 823, + 185 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Patrick R Johnstone and Jonathan Eckstein. Projective splitting with forward steps only requires continuity. Optimization Letters, 14(1):229–247, 2020a. ", + "bbox": [ + 173, + 195, + 823, + 224 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Patrick R Johnstone and Jonathan Eckstein. Projective splitting with forward steps. Mathematical Programming, 2020b. Published online, to appear in print. ", + "bbox": [ + 174, + 234, + 823, + 263 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Patrick R Johnstone and Jonathan Eckstein. Single-forward-step projective splitting: exploiting cocoercivity. Computational Optimization and Applications, 78(1):125–166, 2021. ", + "bbox": [ + 173, + 273, + 823, + 303 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Anatoli Juditsky, Arkadi Nemirovski, and Claire Tauvel. Solving variational inequalities with stochastic mirror-prox algorithm. Stochastic Systems, 1(1):17–58, 2011. ", + "bbox": [ + 173, + 313, + 825, + 342 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "GM Korpelevich. Extragradient method for finding saddle points and other problems. Matekon, 13 (4):35–49, 1977. ", + "bbox": [ + 171, + 352, + 823, + 381 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Daniel Kuhn, Peyman Mohajerin Esfahani, Viet Anh Nguyen, and Soroosh Shafieezadeh-Abadeh. Wasserstein distributionally robust optimization: Theory and applications in machine learning. In Serguei Netessine (ed.), Operations Research & Management Science in the Age of Analytics, Tutorials in Operations Research, pp. 130–166. INFORMS, 2019. ", + "bbox": [ + 174, + 390, + 826, + 448 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Chris Junchi Li, Yaodong Yu, Nicolas Loizou, Gauthier Gidel, Yi Ma, Nicolas Le Roux, and Michael I Jordan. On the convergence of stochastic extragradient for bilinear games with restarted iteration averaging. arXiv preprint arXiv:2107.00464, 2021. ", + "bbox": [ + 174, + 458, + 825, + 501 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Tianyi Lin, Chi Jin, and Michael Jordan. On gradient descent ascent for nonconvex-concave minimax problems. In Hal DaumΓ© III and Aarti Singh (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 6083–6093. PMLR, 2020. ", + "bbox": [ + 174, + 510, + 826, + 566 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Pierre-Louis Lions and Bertrand Mercier. Splitting algorithms for the sum of two nonlinear operators. SIAM Journal on Numerical Analysis, 16(6):964–979, 1979. ", + "bbox": [ + 168, + 577, + 825, + 607 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Mingrui Liu, Hassan Rafique, Qihang Lin, and Tianbao Yang. First-order convergence theory for weakly-convex-weakly-concave min-max problems. Journal of Machine Learning Research, 22 (169):1–34, 2021. ", + "bbox": [ + 173, + 616, + 825, + 660 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Nicolas Loizou, Hugo Berard, Alexia Jolicoeur-Martineau, Pascal Vincent, Simon Lacoste-Julien, and Ioannis Mitliagkas. Stochastic hamiltonian gradient methods for smooth games. In International Conference on Machine Learning, pp. 6370–6381. PMLR, 2020. ", + "bbox": [ + 174, + 669, + 826, + 712 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Nicolas Loizou, Hugo Berard, Gauthier Gidel, Ioannis Mitliagkas, and Simon Lacoste-Julien. Stochastic gradient descent-ascent and consensus optimization for smooth games: Convergence analysis under expected co-coercivity. arXiv preprint arXiv:2107.00052, 2021. ", + "bbox": [ + 174, + 722, + 826, + 765 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Yura Malitsky and Matthew K Tam. A forward-backward splitting method for monotone inclusions without cocoercivity. SIAM Journal on Optimization, 30(2):1451–1472, 2020. ", + "bbox": [ + 173, + 775, + 823, + 804 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Panayotis Mertikopoulos, Bruno Lecouat, Houssam Zenati, Chuan-Sheng Foo, Vijay Chandrasekhar, and Georgios Piliouras. Optimistic mirror descent in saddle-point problems: Going the extra(- gradient) mile. In International Conference on Learning Representations, 2019. URL https: //openreview.net/pdf?id=Bkg8jjC9KQ. ", + "bbox": [ + 174, + 814, + 826, + 872 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Lars Mescheder, Sebastian Nowozin, and Andreas Geiger. The numerics of GANs. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, 2017. ", + "bbox": [ + 174, + 882, + 825, + 924 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Lars Mescheder, Andreas Geiger, and Sebastian Nowozin. Which training methods for GANs do actually converge? In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 3481–3490. PMLR, 2018. ", + "bbox": [ + 174, + 103, + 826, + 160 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Konstantin Mishchenko, Dmitry Kovalev, Egor Shulgin, Peter RichtΓ‘rik, and Yura Malitsky. Revisiting stochastic extragradient. In International Conference on Artificial Intelligence and Statistics, pp. 4573–4582. PMLR, 2020. ", + "bbox": [ + 174, + 170, + 826, + 213 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Aryan Mokhtari, Asuman E Ozdaglar, and Sarath Pattathil. Convergence rate of $\\mathbf { o } ( 1 / \\mathrm { k } )$ for optimistic gradient and extragradient methods in smooth convex-concave saddle point problems. SIAM Journal on Optimization, 30(4):3230–3251, 2020. ", + "bbox": [ + 173, + 223, + 826, + 265 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Renato DC Monteiro and Benar Fux Svaiter. On the complexity of the hybrid proximal extragradient method for the iterates and the ergodic mean. SIAM Journal on Optimization, 20(6):2755–2787, 2010. ", + "bbox": [ + 173, + 275, + 825, + 318 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Vaishnavh Nagarajan and J. Zico Kolter. Gradient descent GAN optimization is locally stable. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, 2017. ", + "bbox": [ + 179, + 328, + 825, + 372 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Hongseok Namkoong and John C Duchi. Stochastic gradient methods for distributionally robust optimization with $f$ -divergences. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 29. Curran Associates, 2016. ", + "bbox": [ + 174, + 381, + 826, + 424 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Arkadi Nemirovski. Prox-method with rate of convergence $\\mathrm { O } ( 1 / t )$ for variational inequalities with Lipschitz continuous monotone operators and smooth convex-concave saddle point problems. SIAM Journal on Optimization, 15(1):229–251, 2004. ", + "bbox": [ + 174, + 434, + 826, + 477 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Yurii Nesterov. Dual extrapolation and its applications to solving variational inequalities and related problems. Mathematical Programming, 109(2):319–344, 2007. ", + "bbox": [ + 174, + 487, + 823, + 516 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Neal Parikh and Stephen Boyd. Proximal algorithms. Foundations and Trends in Optimization, 1(3): 123–231, 2013. ", + "bbox": [ + 173, + 525, + 823, + 554 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Reese Pathak and Martin J Wainwright. Fedsplit: an algorithmic framework for fast federated optimization. In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 7057–7066. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper/2020/file/ 4ebd440d99504722d80de606ea8507da-Paper.pdf. ", + "bbox": [ + 174, + 564, + 826, + 636 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Fabian Pedregosa and Gauthier Gidel. Adaptive three-operator splitting. In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp. 4085–4094. PMLR, 10–15 Jul 2018. ", + "bbox": [ + 173, + 645, + 825, + 688 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Fabian Pedregosa, Kilian Fatras, and Mattia Casotto. Proximal splitting meets variance reduction. In Kamalika Chaudhuri and Masashi Sugiyama (eds.), Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics, volume 89 of Proceedings of Machine Learning Research, pp. 1–10. PMLR, 16–18 Apr 2019. ", + "bbox": [ + 173, + 696, + 825, + 755 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Emile Richard, Pierre-Andre Savalle, and Nicolas Vayatis. Estimation of simultaneously sparse and low rank matrices. In John Langford and Joelle Pineau (eds.), Proceedings of the 29th International Conference on Machine Learning, pp. 1351–1358. Omnipress, 2012. ", + "bbox": [ + 174, + 763, + 823, + 808 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Herbert Robbins and Sutton Monro. A stochastic approximation method. The annals of mathematical statistics, pp. 400–407, 1951. ", + "bbox": [ + 169, + 818, + 825, + 847 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "R Tyrrell Rockafellar. Monotone operators associated with saddle-functions and minimax problems. Nonlinear functional analysis, 18(part 1):397–407, 1970. ", + "bbox": [ + 171, + 856, + 823, + 886 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Ernest K Ryu and Stephen Boyd. Primer on monotone operator methods. Appl. Comput. Math, 15(1): 3–43, 2016. ", + "bbox": [ + 171, + 895, + 823, + 924 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Ernest K. Ryu, Kun Yuan, and Wotao Yin. Ode analysis of stochastic gradient methods with optimism and anchoring for minimax problems, 2020. ", + "bbox": [ + 171, + 103, + 825, + 132 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Gesualdo Scutari, Francisco Facchinei, Jong-Shi Pang, and Daniel P Palomar. Real and complex monotone communication games. IEEE Transactions on Information Theory, 60(7):4197–4231, 2014. ", + "bbox": [ + 174, + 141, + 823, + 184 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Soroosh Shafieezadeh-Abadeh, Peyman Mohajerin Esfahani, and Daniel Kuhn. Distributionally robust logistic regression. In Corinna Cortes, Neil D. Lawrence, Daniel D. Lee, Masashi Sugiyama, and Roman Garnett (eds.), Advances in Neural Information Processing Systems, volume 28, pp. 1576–1584. Curran Associates, 2015. ", + "bbox": [ + 174, + 194, + 826, + 251 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Aman Sinha, Hongseok Namkoong, and John Duchi. Certifying some distributional robustness with principled adversarial training. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id $=$ Hk6kPgZA-. ", + "bbox": [ + 173, + 260, + 826, + 304 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Paul Tseng. A modified forward-backward splitting method for maximal monotone mappings. SIAM Journal on Control and Optimization, 38(2):431–446, 2000. ", + "bbox": [ + 174, + 313, + 825, + 342 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Nguyen Van Dung and Bang Cong Vu. Convergence analysis of the stochastic reflected forwardbackward splitting algorithm. arXiv preprint arXiv:2102.08906, 2021. ", + "bbox": [ + 173, + 352, + 825, + 381 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Christina Wadsworth, Francesca Vera, and Chris Piech. Achieving fairness through adversarial learning: an application to recidivism prediction. arXiv preprint arXiv:1807.00199, 2018. ", + "bbox": [ + 174, + 390, + 823, + 420 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Xiaohan Yan and Jacob Bien. Rare feature selection in high dimensions. Journal of the American Statistical Association, 2020. Published online, to appear in print. ", + "bbox": [ + 173, + 429, + 823, + 458 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Yaodong Yu, Tianyi Lin, Eric Mazumdar, and Michael I Jordan. Fast distributionally robust learning with variance reduced min-max optimization. arXiv preprint arXiv:2104.13326, 2021. ", + "bbox": [ + 171, + 467, + 825, + 497 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Alp Yurtsever, Bang Cong Vu, and Volkan Cevher. Stochastic three-composite convex minimization. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 29. Curran Associates, 2016. ", + "bbox": [ + 174, + 506, + 825, + 549 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Brian Hu Zhang, Blake Lemoine, and Margaret Mitchell. Mitigating unwanted biases with adversarial learning. In Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, pp. 335– 340, 2018. ", + "bbox": [ + 174, + 559, + 826, + 602 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A ML APPLICATIONS OF THE MONOTONE INCLUSION (1) ", + "text_level": 1, + "bbox": [ + 176, + 628, + 666, + 645 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "There are two main classes of applications of (1) in ML: optimization problems and saddle-point games. ", + "bbox": [ + 174, + 660, + 825, + 689 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Optimization Problems In this case the monotone inclusion arises from finding the zero of a sum of subgradients of convex functions, as discussed in Section 2. It is typical in ML to solve the empirical risk minimization problem ", + "bbox": [ + 173, + 704, + 825, + 747 + ], + "page_idx": 13 + }, + { + "type": "equation", + "img_path": "images/ffa360eabdcc6b4c5c52b0b2d269d8c4934e3297e827c1444fa980cafc1ff442.jpg", + "text": "$$\n\\operatorname* { m i n } _ { x \\in \\mathbb { R } ^ { d } } \\frac { 1 } { m } \\sum _ { j = 1 } ^ { m } f _ { j } ( x ) + \\sum _ { i = 1 } ^ { n } r _ { i } ( x )\n$$", + "text_format": "latex", + "bbox": [ + 397, + 753, + 601, + 796 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "over a size- $m$ dataset. Usually, the gradient of the loss function $f _ { j }$ for each datapoint $j$ is Lipschitz continuous. The terms $r _ { i }$ may be regularizers used to reduce overfitting or encourage structural properties such as sparsity or low matrix rank. They also may represent constraints on the parameters such as nonnegativity or the being in the probability simplex. Crucially, these regularizers are rarely differentiable. The first-order necessary condition for the solution of (16) is ", + "bbox": [ + 173, + 803, + 825, + 875 + ], + "page_idx": 13 + }, + { + "type": "equation", + "img_path": "images/d195774013d745d0c2effdff7e030c1cff3ad83794cc046f14aa129a6de7e84c.jpg", + "text": "$$\n0 \\in \\nabla f ( x ^ { * } ) + \\sum _ { i = 1 } ^ { n } \\partial r _ { i } ( x ^ { * } ) ,\n$$", + "text_format": "latex", + "bbox": [ + 405, + 881, + 591, + 921 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $\\begin{array} { r } { f ( x ) \\doteq \\frac { 1 } { m } \\sum _ { j = 1 } ^ { m } f _ { j } ( x ) } \\end{array}$ , thus $\\begin{array} { r } { \\nabla f ( x ) \\doteq \\frac { 1 } { m } \\sum _ { j = 1 } ^ { m } \\nabla f _ { j } ( x ) } \\end{array}$ . The inclusion (17) is a special case of (1), and our method may use the standard stochastic oracle for $\\nabla f ( x )$ , namely ", + "bbox": [ + 169, + 102, + 825, + 136 + ], + "page_idx": 14 + }, + { + "type": "equation", + "img_path": "images/1abda774af20927066c1bb8d4cd95642bf6bbabf6bfd459bd0cbabc54a594cbd.jpg", + "text": "$$\n\\frac { 1 } { | \\mathbf { B } | } \\sum _ { j \\in \\mathbf { B } } \\nabla f _ { j } ( z )\n$$", + "text_format": "latex", + "bbox": [ + 442, + 140, + 553, + 180 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "which subsamples a randomly selected minibatch of datapoints $\\mathbf { B } \\in \\{ 1 , \\dots , m \\}$ . ", + "bbox": [ + 171, + 185, + 704, + 202 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Games Consider the following nonsmooth Nash equilibrium problem ", + "bbox": [ + 171, + 215, + 645, + 232 + ], + "page_idx": 14 + }, + { + "type": "equation", + "img_path": "images/3835fb4d271dfca3981f89d019dadd6c40e1099ade9cd99e3cb6abe535e31b94.jpg", + "text": "$$\nx ^ { * } \\in \\underset { x \\in \\mathbb { R } ^ { d _ { x } } } { \\arg \\operatorname* { m i n } } F ( x , y ^ { * } ) + \\underset { i = 1 } { \\overset { n _ { 1 } } { \\sum } } r _ { i } ( x ) \\quad \\mathrm { a n d } \\quad y ^ { * } \\in \\underset { y \\in \\mathbb { R } ^ { d _ { y } } } { \\arg \\operatorname* { m i n } } G ( x ^ { * } , y ) + \\underset { i = 1 } { \\overset { n _ { 2 } } { \\sum } } d _ { i } ( y ) .\n$$", + "text_format": "latex", + "bbox": [ + 233, + 236, + 764, + 279 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The terms player’s st $\\scriptstyle \\sum _ { i = 1 } ^ { n _ { 1 } } r _ { i } ( x )$ and e tha $\\textstyle \\sum _ { i = 1 } ^ { n _ { 2 } } d _ { i } ( y )$ once again represent regularizers and constrai (saddle-point) problems correspond to having $F ( x , y ) =$ $- G ( x , y )$ . Under appropriate convexity conditions and constraint qualifications, the solutions of (18) correspond to the solutions of the following monotone inclusion in the form of (1): ", + "bbox": [ + 173, + 285, + 823, + 342 + ], + "page_idx": 14 + }, + { + "type": "equation", + "img_path": "images/644d2f5d529de231d7c117704777c5bd94a9573875ac622a3b507fc340e62377.jpg", + "text": "$$\n0 \\in \\left[ \\begin{array} { l } { \\nabla _ { x } F ( x ^ { * } , y ^ { * } ) } \\\\ { \\nabla _ { y } G ( x ^ { * } , y ^ { * } ) } \\end{array} \\right] + \\sum _ { i = 1 } ^ { \\operatorname* { m a x } \\{ n _ { 1 } , n _ { 2 } \\} } \\left( \\partial r _ { i } ( x ^ { * } ) \\times \\partial d _ { i } ( y ^ { * } ) \\right)\n$$", + "text_format": "latex", + "bbox": [ + 305, + 348, + 691, + 393 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where for $i > \\operatorname* { m i n } \\{ n _ { 1 } , n _ { 2 } \\}$ we include β€œdummy functions\", either $r _ { i } ( x ) = 0$ when $n _ { 1 } < n _ { 2 }$ or $d _ { i } ( y ) = 0$ when $n _ { 1 } < n _ { 2 }$ . If the functions $F$ and $G$ arise as averages in the same we as $f$ in (16), then our method may again use a stochastic oracle for them. ", + "bbox": [ + 173, + 398, + 826, + 443 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Distributionally-Robust ML One example application of (19) is distributionally-robust ML, as demonstrated in the numerical experiment in Section 7. The full problem statement is given in Appendix I. ", + "bbox": [ + 173, + 455, + 826, + 500 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Lagrangian Duality Another application of (19) is constrained optimization via Lagrangian duality. Consider ", + "bbox": [ + 171, + 513, + 825, + 542 + ], + "page_idx": 14 + }, + { + "type": "equation", + "img_path": "images/d905604cf0d9c0fd12213290386cea68bb91f729ffa0dcacd0ca4687d3b34ba3.jpg", + "text": "$$\n\\operatorname* { m i n } _ { x \\in \\mathbb { R } ^ { d } } \\left\\{ f ( x ) + \\sum _ { i = 1 } ^ { n } r _ { i } ( x ) \\right\\} \\quad { \\mathrm { s . t . } } \\quad h _ { j } ( x ) \\leq 0 \\quad j = 1 , \\ldots , p .\n$$", + "text_format": "latex", + "bbox": [ + 297, + 546, + 700, + 589 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "As in (16), $f$ is a loss function and the $r _ { i }$ may represent regularizers and (β€œsimple”) constraints; in addition, there are $p$ functional constraints on the model parameters $x$ . Introducing Lagrange multipliers $\\gamma \\in \\mathbb { R } ^ { p }$ , the problem can be written as ", + "bbox": [ + 174, + 594, + 826, + 637 + ], + "page_idx": 14 + }, + { + "type": "equation", + "img_path": "images/6f1d0d2520d76b3f705f5e0e4853e9aef959d5ce1f0ffce0278502590125761a.jpg", + "text": "$$\n\\operatorname* { m i n } _ { x \\in \\mathbb { R } ^ { d } } \\operatorname* { m a x } _ { \\gamma \\in \\mathbb { R } _ { + } ^ { p } } \\left\\{ f ( x ) + \\sum _ { i = 1 } ^ { n } r _ { i } ( x ) + \\sum _ { j = 1 } ^ { p } \\gamma _ { j } h _ { j } ( x ) \\right\\} .\n$$", + "text_format": "latex", + "bbox": [ + 336, + 642, + 660, + 693 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Under appropriate convexity conditions and constraint-qualifications, this reduces to the following inclusion in the form of (1): ", + "bbox": [ + 173, + 699, + 823, + 727 + ], + "page_idx": 14 + }, + { + "type": "equation", + "img_path": "images/b24a37447f0a36eb774b118eba4af8fa43b5567c971ac8b148aed8ad4587ab58.jpg", + "text": "$$\n0 \\in \\left[ \\begin{array} { c } { \\nabla f ( x ) + \\sum _ { j = 1 } ^ { p } \\gamma _ { j } \\nabla h _ { j } ( x ) } \\\\ { - h ( x ) } \\end{array} \\right] + \\sum _ { i = 1 } ^ { n } \\left( \\partial r _ { i } ( x ^ { * } ) \\times \\{ 0 \\} \\right)\n$$", + "text_format": "latex", + "bbox": [ + 297, + 732, + 697, + 775 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $h ( \\boldsymbol { x } ) = [ h _ { 1 } ( \\boldsymbol { x } ) , h _ { 2 } ( \\boldsymbol { x } ) , \\ldots , h _ { p } ( \\boldsymbol { x } ) ] ^ { \\top }$ . For certain choices of $h$ , such as linear or quadratic functions, the first term above is monotone and (locally) Lipschitz continuous (Alacaoglu et al., 2021). ", + "bbox": [ + 173, + 780, + 826, + 824 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Bilinear Games with Many Constraints Finally, consider the bilinear saddlepoint problem subject to multiple constraints: ", + "bbox": [ + 173, + 838, + 823, + 867 + ], + "page_idx": 14 + }, + { + "type": "equation", + "img_path": "images/a7f8278313a1b7a9eeda762e5abb2c69250ff81b90c6b828cfcb59e258f2e904.jpg", + "text": "$$\n\\begin{array} { l l l } { \\underset { x \\in \\mathbb { R } ^ { d } } { \\operatorname* { m i n } } \\underset { y \\in \\mathbb { R } ^ { d } } { \\operatorname* { m a x } } x ^ { \\top } D y } & { \\mathrm { s . t . } } & { x \\in \\mathcal { C } _ { j } ^ { 1 } } & { j = 1 , \\dots , n _ { 1 } , } \\\\ & { } & { y \\in \\mathcal { C } _ { j } ^ { 2 } } & { j = 1 , \\dots , n _ { 2 } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 334, + 872, + 661, + 924 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Under some regularity conditions, this problem reduces to the inclusion ", + "bbox": [ + 173, + 103, + 643, + 118 + ], + "page_idx": 15 + }, + { + "type": "equation", + "img_path": "images/20fd1ca98bfe912a0917aa73b18d66131356e9479819ab4a7df1087f717f3a64.jpg", + "text": "$$\n0 \\in \\left[ \\begin{array} { c } { D y ^ { * } } \\\\ { - D ^ { \\top } x ^ { * } } \\end{array} \\right] + \\sum _ { j = 1 } ^ { \\operatorname* { m a x } \\{ n _ { 1 } , n _ { 2 } \\} } \\big ( N _ { { \\mathcal C } _ { j } ^ { 1 } } ( x ^ { * } ) \\times N _ { { \\mathcal C } _ { j } ^ { 2 } } ( y ^ { * } ) \\big ) ,\n$$", + "text_format": "latex", + "bbox": [ + 313, + 123, + 679, + 170 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "where we introduce additional β€œdummy” sets $\\mathcal { C } _ { j } ^ { 1 } = \\mathbb { R } ^ { d }$ or $\\mathcal { C } _ { j } ^ { 2 } = \\mathbb { R } ^ { d }$ when $n _ { 1 } \\neq n _ { 2 }$ . The first term is linear and skew symmetric, and therefore can easily be shown to be Lipschitz continuous and monotone. If all the constraint sets are closed and convex, then the rest of the terms are maximal monotone, then the problem is of the form (1), meaning that projective splitting may be applied, possibly using a stochastic oracle for the first term. ", + "bbox": [ + 173, + 174, + 826, + 246 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B ADDITIONAL RELATED WORK ", + "text_level": 1, + "bbox": [ + 176, + 265, + 460, + 281 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The preprint by Bot et al. (2019) develops a stochastic version of Tseng’s method under the requirement that the noise variance converges to 0. In ML, this could be achieved with the use of perpetually increasing batch sizes, a strategy that is impractical in many scenarios. The stochastic version of FRB proposed by Van Dung & Vu (2021) has more practical noise requirements, but has stronger assumptions on the problem which are rarely satisfied in ML applications: either uniform/strong monotonicity or a bounded domain. The papers by Yurtsever et al. (2016) and Pedregosa et al. (2019) consider stochastic variants of three-operator splitting, but require $B$ in (1) to be cocoercive, essentially restricting them to optimization problems. ", + "bbox": [ + 173, + 296, + 826, + 409 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "There are several alternatives to the (stochastic) extragradient method that reduce the number of gradient evaluations per iteration from two to one (Hsieh et al., 2019; Malitsky & Tam, 2020; Gidel et al., 2019). However, these methods have more stringent stepsize limits, making it unclear a priori whether they will outperform two-step methods. ", + "bbox": [ + 174, + 415, + 825, + 472 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "DSEG is a stochastic version of EG (Hsieh et al., 2020). The primary innovation of DSEG is using different stepsizes for the extrapolation and update steps, thereby resolving some of the convergence issues affecting stochastic EG. As noted earlier, DSEG is the special case of our SPS method in which $n = 0$ , that is, no regularizers/constraints are present in the underlying game. The analysis in (Hsieh et al., 2020) also did not consider the fixed stepsize choice given in Theorem 2. ", + "bbox": [ + 174, + 477, + 825, + 547 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In the context of GANs, several methods have been developed based on a variational inequality/monotone inclusion approach (Gidel et al., 2019; Daskalakis et al., 2018; Hsieh et al., 2019; 2020; BΓΆhm et al., 2020). Many of these papers point out that variational inequalities provide a principled framework for studying the GAN training problem and correcting some of the flaws in the standard method GDA. ", + "bbox": [ + 174, + 554, + 825, + 625 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C PROOF OF THEOREM 1 ", + "text_level": 1, + "bbox": [ + 176, + 643, + 397, + 660 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1 STOCHASTIC QUASI-FEJER MONOTONICITY ", + "text_level": 1, + "bbox": [ + 174, + 675, + 522, + 690 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The key to the analysis is showing that the algorithm satisfies Stochastic Quasi-Fejer Monotonicity (Combettes & Pesquet, 2015). ", + "bbox": [ + 173, + 702, + 823, + 731 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lemma 2 ((Combettes & Pesquet, 2015), Proposition 2.3). Suppose $p ^ { k }$ is a sequence of $\\mathbb { R } ^ { d }$ -valued random variables defined on a probability space $( \\Omega , { \\mathcal { F } } , P )$ . Let $\\mathcal { F } _ { k } \\overset { \\cdot } { = } \\sigma ( p ^ { 1 } , \\cdot \\cdot \\cdot , p ^ { k } )$ . Let $F$ be $a$ osed subssuch that $\\mathbb { R } ^ { d }$ , $p \\in F$ , there exists d $\\chi ^ { k } ( p ) \\geq 0 , \\eta ^ { k } ( p ) \\geq$ $0 , \\nu ^ { k } ( \\hat { p } ) \\stackrel { \\cdot } { \\geq } 0$ $\\scriptstyle \\sum _ { k = 1 } ^ { \\infty } \\chi ^ { k } ( p ) ^ { \\widehat { < } } \\infty$ $\\scriptstyle \\sum _ { k = 1 } ^ { \\infty } \\eta ^ { k } ( p ) < \\infty$ ", + "bbox": [ + 174, + 733, + 825, + 791 + ], + "page_idx": 15 + }, + { + "type": "equation", + "img_path": "images/de203b2891d1925c2f967e3f63b0b5a5324310df98ef3338c28a462b9d371536.jpg", + "text": "$$\n\\begin{array} { r l } { ( \\forall k \\in \\mathbb { N } ) } & { \\mathbb { E } [ \\| p ^ { k + 1 } - p \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\leq ( 1 + \\chi ^ { k } ( p ) ) \\| p ^ { k } - p \\| ^ { 2 } - \\nu ^ { k } ( p ) + \\eta ^ { k } ( p ) . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 246, + 795, + 746, + 814 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Then the following hold: ", + "bbox": [ + 173, + 819, + 336, + 833 + ], + "page_idx": 15 + }, + { + "type": "equation", + "img_path": "images/be82c03ec9b18cab7103581c3370badb6567aadd8440c46dd7105782457d21dc.jpg", + "text": "$$\n\\begin{array} { r l } { I . \\ ( \\forall p \\in F ) : } & { { } \\sum _ { k = 1 } ^ { \\infty } \\nu ^ { k } ( p ) < \\infty a . s . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 210, + 842, + 475, + 862 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "2. $p ^ { k }$ is bounded a.s. ", + "bbox": [ + 212, + 869, + 352, + 886 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "3. There exists $\\tilde { \\Omega }$ such that $P [ \\tilde { \\Omega } ] = 1$ and $\\left\\{ \\| p ^ { k } ( \\omega ) - p \\| \\right\\}$ converges for every $\\omega \\in \\tilde { \\Omega }$ and $p \\in F$ . ", + "bbox": [ + 207, + 893, + 825, + 924 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.2 IMPORTANT RECURSION FOR SPS ", + "text_level": 1, + "bbox": [ + 176, + 103, + 452, + 118 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The following lemma summarizes the key recursion satisfied by Algorithm 1, to which we will apply Lemma 2. Recall that $L$ is the Lipschitz constant of $B$ . ", + "bbox": [ + 173, + 128, + 823, + 159 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Lemma 3. For Algorithm $I$ , suppose (9)–(11) hold and ", + "bbox": [ + 173, + 160, + 544, + 176 + ], + "page_idx": 16 + }, + { + "type": "equation", + "img_path": "images/c862cbc29560226484ac3f9ae67fa80a7012a73774759885a9a7b90983eb3ecf.jpg", + "text": "$$\n\\rho _ { k } \\leq \\overline { { \\rho } } < 1 / L .\n$$", + "text_format": "latex", + "bbox": [ + 446, + 179, + 550, + 195 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Let ", + "bbox": [ + 173, + 199, + 199, + 213 + ], + "page_idx": 16 + }, + { + "type": "equation", + "img_path": "images/5d23dc14d367047bfe73ff60a08031457aa8aeee336050d8f37988eb80795f5f.jpg", + "text": "$$\nT _ { k } \\doteq \\frac { \\tau } { \\overline { { \\rho } } } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\overline { { \\rho } } \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\big ( 1 - \\overline { { \\rho } } L \\big ) \\| B \\big ( z ^ { k } \\big ) - w _ { n + 1 } ^ { k } \\| ^ { 2 }\n$$", + "text_format": "latex", + "bbox": [ + 236, + 213, + 759, + 256 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "then for all $p ^ { * } \\in { \\mathcal { S } }$ , with probability one ", + "bbox": [ + 173, + 257, + 439, + 272 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "$\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le \\big ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } + C _ { 3 } \\alpha _ { k } \\rho _ { k } ^ { 2 } \\big ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha _ { k } \\rho _ { k } T _ { k } + C _ { 2 } \\alpha _ { k } ^ { 2 } + C _ { 4 } \\alpha _ { k } \\rho _ { k } ^ { 2 } } \\end{array}$ (21) where $C _ { 1 } , \\ldots , C _ { 4 }$ are nonegative constants defined in (33), (34), (48), and (49) below, respectively. ", + "bbox": [ + 171, + 275, + 821, + 313 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Note that $T _ { k }$ is a scaled version of the approximation residual $G _ { k }$ defined in (14). ", + "bbox": [ + 174, + 320, + 707, + 337 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We proceed to first prove Lemma 3 and then exploit the implications of Lemma 2. Referring to (10) and (11), let $N \\doteq \\mathrm { m a x } _ { j \\in 1 \\ldots 4 } N _ { j }$ . To simplify the constants, we will use $N$ in place of $N _ { j }$ for the noise variance bounds given in (10)-(11). ", + "bbox": [ + 173, + 343, + 826, + 386 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.3 UPPER BOUNDING THE GRADIENT ", + "text_level": 1, + "bbox": [ + 176, + 401, + 459, + 416 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Throughout the analysis, we fix some $p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }$ . All statements are with probability one (almost surely), but for brevity we will omit this unless it needs to be emphasized. ", + "bbox": [ + 171, + 426, + 825, + 457 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this section, we derive appropriate upper bounds for $\\| \\nabla \\varphi _ { k } \\| ^ { 2 }$ to use in (13). We begin with $\\nabla _ { z } \\varphi _ { k }$ ", + "bbox": [ + 173, + 462, + 823, + 478 + ], + "page_idx": 16 + }, + { + "type": "equation", + "img_path": "images/2fda7ecb6ddff8f05e5d2ad354a335aa0e69097667148b35a03b6b9c5479dce8.jpg", + "text": "$$\n\\begin{array} { r l r } & { } & { \\displaystyle \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } = \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k } \\Big \\| ^ { 2 } \\leq 2 \\| y _ { n + 1 } ^ { k } \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } = 2 \\Big \\| B ( x _ { n + 1 } ^ { k } ) + e ^ { k } \\Big \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } } \\\\ & { } & { \\leq 4 \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 \\| e ^ { k } \\| ^ { 2 } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 183, + 482, + 816, + 566 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Now next take expectations with respect to $\\mathcal { F } _ { k }$ and $\\mathcal { E } _ { k }$ , and use the bound on the variance of the noise in (11), obtaining ", + "bbox": [ + 171, + 568, + 823, + 598 + ], + "page_idx": 16 + }, + { + "type": "equation", + "img_path": "images/b8f7bd7c86a72deb4ba7e0b5cb74ab712446b5c629076a14ff36135012f0ce42.jpg", + "text": "$$\n\\begin{array} { r l r } { { \\mathbb { E } [ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } , \\mathcal { E } _ { k } ] \\leq \\mathbb { E } [ 4 \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 \\| e ^ { k } \\| ^ { 2 } \\ \\Big | \\ \\mathcal { F } _ { k } , \\mathcal { E } _ { k } ] } } \\\\ & { } & { \\leq 4 ( N + 1 ) \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 N , ~ } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 240, + 599, + 756, + 685 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where we have used that $y _ { i } ^ { k }$ is $\\mathcal { F } _ { k }$ -measurable for $i \\in 1 . . n$ . Thus, taking expectations over $\\mathcal { E } _ { k }$ conditioned on $\\mathcal { F } _ { k }$ yields ", + "bbox": [ + 173, + 688, + 825, + 718 + ], + "page_idx": 16 + }, + { + "type": "equation", + "img_path": "images/c3c8f9a6dab58330aeb949921964b22e45fc17b711843fde9cdea4fe9d522b4f.jpg", + "text": "$$\n\\mathbb { E } \\left[ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\right] \\leq 4 ( N + 1 ) \\mathbb { E } [ \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } | \\mathcal { F } _ { k } ] + 2 \\Big \\| \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 N .\n$$", + "text_format": "latex", + "bbox": [ + 258, + 719, + 738, + 761 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We will now bound the two terms on the right side of (22). ", + "bbox": [ + 173, + 771, + 558, + 786 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.3.1 FIRST TERM IN (22) ", + "text_level": 1, + "bbox": [ + 174, + 800, + 370, + 814 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "First, note that ", + "bbox": [ + 173, + 824, + 272, + 838 + ], + "page_idx": 16 + }, + { + "type": "equation", + "img_path": "images/0690b55127f958d10263ac0a027e14bf6cefdfc368630bbe58f7258713d59c6f.jpg", + "text": "$$\n\\begin{array} { r l } & { \\| B ( z ^ { k } ) \\| ^ { 2 } = \\| B ( z ^ { k } ) - B ( z ^ { * } ) + B ( z ^ { * } ) ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 \\| B ( z ^ { k } ) - B ( z ^ { * } ) \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 L ^ { 2 } \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 338, + 839, + 658, + 925 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Now, returning to the first term on the right of (22), we have ", + "bbox": [ + 173, + 103, + 570, + 119 + ], + "page_idx": 17 + }, + { + "type": "equation", + "img_path": "images/fc96ea33abc768a53fc35ed40b9ee3ab445a61dd4460fc11acc56273b242ed2f.jpg", + "text": "$$\n\\begin{array} { r l } & { \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } = \\| B ( z ^ { k } ) + B ( x _ { n + 1 } ^ { k } ) - B ( z ^ { k } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 \\| B ( z ^ { k } ) \\| ^ { 2 } + 2 \\| B ( x _ { n + 1 } ^ { k } ) - B ( z ^ { k } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 \\| B ( z ^ { k } ) \\| ^ { 2 } + 2 L ^ { 2 } \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad \\leq 4 L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 4 \\| B ( z ^ { * } ) \\| ^ { 2 } + 2 L ^ { 2 } \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 274, + 122, + 722, + 207 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where we have used (23) to obtain (24). ", + "bbox": [ + 173, + 209, + 434, + 224 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For the third term in (24), we have from the calculation on line 7 of the algorithm that ", + "bbox": [ + 174, + 231, + 736, + 247 + ], + "page_idx": 17 + }, + { + "type": "equation", + "img_path": "images/a98ab18044da605ed31a3ee780bc35f157cae0ca014dcdff3dff4e5cc89dec91.jpg", + "text": "$$\n\\begin{array} { r } { x _ { n + 1 } ^ { k } - z ^ { k } = - \\rho _ { k } ( r ^ { k } - w _ { n + 1 } ^ { k } ) = - \\rho _ { k } ( B ( z ^ { k } ) + \\epsilon ^ { k } - w _ { n + 1 } ^ { k } ) , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 292, + 251, + 702, + 272 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "and therefore ", + "bbox": [ + 173, + 276, + 264, + 290 + ], + "page_idx": 17 + }, + { + "type": "equation", + "img_path": "images/db772aab9bf8c0b6e5652ce3a542f16efe9df2e178c894dcf9bec4cee67e4be2.jpg", + "text": "$$\n\\begin{array} { r l } & { \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } = \\rho _ { k } ^ { 2 } \\| B ( z ^ { k } ) + \\epsilon ^ { k } - w _ { n + 1 } ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad \\leq \\overline { { \\rho } } ^ { 2 } \\| B ( z ^ { k } ) + \\epsilon ^ { k } - w _ { n + 1 } ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad \\leq 3 \\overline { { \\rho } } ^ { 2 } ( \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } \\| ^ { 2 } ) . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 315, + 294, + 681, + 358 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We next take expectations conditioned on $\\mathcal { F } _ { k }$ and use the noise variance bound (10) to obtain ", + "bbox": [ + 169, + 359, + 782, + 376 + ], + "page_idx": 17 + }, + { + "type": "equation", + "img_path": "images/08aec390a4dbde35b7c1034a251928e02c538c6fc181a88f80a1e19b12af99bb.jpg", + "text": "$$\n\\begin{array} { r l } & { \\mathbb { E } \\big [ \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\big ] \\leq \\mathbb { E } \\big [ 3 \\overline { { \\rho } } ^ { 2 } \\big ( \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } \\| ^ { 2 } \\big ) | \\mathcal { F } _ { k } \\big ] } \\\\ & { \\qquad \\leq 3 \\overline { { \\rho } } ^ { 2 } \\big ( ( N + 1 ) \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } \\| ^ { 2 } + N \\big ) . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 261, + 378, + 735, + 422 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Therefore ", + "bbox": [ + 173, + 426, + 241, + 440 + ], + "page_idx": 17 + }, + { + "type": "equation", + "img_path": "images/42789262e30d48cfedbbc5971687ffc48b513ff47a65ac22b89674c97713ffdd.jpg", + "text": "$$\n\\begin{array} { r l } & { \\mathbb { E } [ \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\leq 6 \\bar { \\rho } ^ { 2 } \\big ( ( N + 1 ) \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { * } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { * } \\| ^ { 2 } \\big ) + 3 \\bar { \\rho } ^ { 2 } N } \\\\ & { \\qquad = 6 \\bar { \\rho } ^ { 2 } \\Big ( 2 ( N + 1 ) L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 2 ( N + 1 ) \\| B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad } \\\\ & { \\qquad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad } \\\\ & { \\leq 6 \\bar { \\rho } ^ { 2 } \\big ( 2 ( N + 1 ) L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { * } \\| ^ { 2 } \\big ) } \\\\ & { \\qquad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad + 1 8 \\bar { \\rho } ^ { 2 } ( N + 1 ) \\| B ( z ^ { * } ) \\| ^ { 2 } + 3 \\bar { \\rho } ^ { 2 } N } \\\\ & { \\leq 1 8 \\bar { \\rho } ^ { 2 } ( N + 1 ) \\big ( ( L ^ { 2 } + 1 ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + \\| B ( z ^ { * } ) \\| ^ { 2 } \\big ) + 3 \\bar { \\rho } ^ { 2 } N } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 192, + 443, + 805, + 583 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where in the equality uses (23) and $w _ { n + 1 } ^ { * } = B ( z ^ { * } )$ . Combining (24) and (25), we arrive at ", + "bbox": [ + 173, + 587, + 767, + 603 + ], + "page_idx": 17 + }, + { + "type": "equation", + "img_path": "images/c2b2419a8d9dba549c85dc9c915c00e304e96e2478c850da1c7b214ac7dae5d9.jpg", + "text": "$$\n\\begin{array} { r l } & { \\mathbb { E } \\left[ \\left. B ( x _ { n + 1 } ^ { k } ) \\right. ^ { 2 } \\middle | \\mathcal { F } _ { k } \\right] \\leq 4 L ^ { 2 } \\left[ 1 + 9 \\overline { { \\rho } } ^ { 2 } ( L ^ { 2 } + 1 ) ( N + 1 ) \\right] \\Vert p ^ { k } - p ^ { * } \\Vert ^ { 2 } } \\\\ & { \\qquad + 4 \\big ( 1 + 9 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) \\big ) \\Vert B ( z ^ { * } ) \\Vert ^ { 2 } + 6 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } N . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 256, + 608, + 741, + 657 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.3.2 SECOND TERM IN (22) ", + "text_level": 1, + "bbox": [ + 174, + 667, + 387, + 683 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For $i \\in 1 . . n$ , line 5 of the algorithm may be rearranged into $y _ { i } ^ { k } = \\tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) + w _ { i } ^ { k }$ , so ", + "bbox": [ + 171, + 690, + 764, + 708 + ], + "page_idx": 17 + }, + { + "type": "equation", + "img_path": "images/7d3cf101ab3ad11ddd41c7f652d6ff2a57f5deec296959da5119f272170259c8.jpg", + "text": "$$\n\\begin{array} { r l r } { { \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\bigg \\| ^ { 2 } = \\bigg \\| \\sum _ { i = 1 } ^ { n } ( \\tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) + w _ { i } ^ { k } ) \\bigg \\| ^ { 2 } } } \\\\ & { } & { \\leq 2 \\bigg \\| \\tau ^ { - 1 } \\sum _ { i = 1 } ^ { n } ( z ^ { k } - x _ { i } ^ { k } ) \\bigg \\| ^ { 2 } + 2 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { k } \\bigg \\| ^ { 2 } } \\\\ & { } & { \\leq 2 \\pi \\tau ^ { - 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { k } \\bigg \\| ^ { 2 } } \\\\ & { } & { \\leq 4 \\pi ^ { 2 } \\tau ^ { - 2 } \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 4 \\pi \\tau ^ { - 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { * } - x _ { i } ^ { k } \\| ^ { 2 } + 4 m \\sum _ { i = 1 } ^ { n } \\| w _ { i } ^ { k } - w _ { i } ^ { * } \\| ^ { 2 } + 4 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\bigg \\| ^ { 2 } } \\\\ & { } & { \\leq 4 \\pi ^ { 2 } ( \\tau ^ { - 2 } + 1 ) \\| y ^ { k } - p ^ { * } \\| ^ { 2 } + 4 \\pi \\tau ^ { - 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { * } - x _ { i } ^ { k } \\| ^ { 2 } + 4 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\bigg \\| ^ { 2 } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 181, + 713, + 823, + 926 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "By the definition of the solution set $s$ in (5), $w _ { i } ^ { * } \\in A _ { i } ( z ^ { * } )$ , so $z ^ { * } + \\tau w _ { i } ^ { * } \\in ( I + \\tau A _ { i } ) ( z ^ { * } )$ , and since the resolvent is single-valued (Bauschke & Combettes, 2017, Cor. 23.9) we therefore obtain ", + "bbox": [ + 169, + 102, + 823, + 133 + ], + "page_idx": 18 + }, + { + "type": "equation", + "img_path": "images/db5125d1ddb028192cd3a9d7be6669dbce1f8e31e7578b0f223e98ded57a63a1.jpg", + "text": "$$\nz ^ { * } = ( I + \\tau A _ { i } ) ^ { - 1 } ( I + \\tau A _ { i } ) ( z ^ { * } ) = J _ { \\tau A _ { i } } ( z ^ { * } + \\tau w _ { i } ^ { * } ) .\n$$", + "text_format": "latex", + "bbox": [ + 320, + 141, + 678, + 160 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "From lines 3 and 4 of the algorithm, we also have $x _ { i } ^ { k } = J _ { \\tau A _ { i } } ( z ^ { k } + \\tau w _ { i } ^ { k } )$ for $i \\in 1 . . n$ . Thus, using the nonexpansiveness of the resolvent (Bauschke & Combettes, 2017, Def. 4.1 and Cor. 23.9), we have ", + "bbox": [ + 173, + 170, + 826, + 213 + ], + "page_idx": 18 + }, + { + "type": "equation", + "img_path": "images/eb396dbda3e27676a945368d3c89fc99a8dd9c9083043cef34f280e2656cddf7.jpg", + "text": "$$\n\\begin{array} { r l } { \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { * } - x _ { i } ^ { k } \\| ^ { 2 } = \\displaystyle \\sum _ { i = 1 } ^ { n } \\left\\| J _ { T , 4 _ { i } } ( z ^ { k } + \\tau w _ { i } ^ { k } ) - J _ { \\tau , 4 _ { i } } ( z ^ { * } + \\tau w _ { i } ^ { * } ) \\right\\| ^ { 2 } } & { } \\\\ { \\leq \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } + \\tau w _ { i } ^ { k } - z ^ { * } - \\tau w _ { i } ^ { * } \\| ^ { 2 } } & { } \\\\ { = \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - z ^ { * } + \\tau ( w _ { i } ^ { k } - w _ { i } ^ { * } ) \\| ^ { 2 } } & { } \\\\ { \\leq 2 n \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 2 \\tau ^ { 2 } \\displaystyle \\sum _ { i = 1 } ^ { n } \\| w _ { i } ^ { k } - w _ { i } ^ { * } \\| ^ { 2 } } & { } \\\\ { \\leq 2 ( n + \\tau ^ { 2 } ) \\| y ^ { k } - p ^ { * } \\| ^ { 2 } . } & { } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 287, + 222, + 707, + 411 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Combining (27) and (28) yields ", + "bbox": [ + 173, + 417, + 383, + 434 + ], + "page_idx": 18 + }, + { + "type": "equation", + "img_path": "images/e19c099610947f7954ffd8e965fac5644ea8f19726ee21ea405eeb72782628fe.jpg", + "text": "$$\n\\Big \\| \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } \\leq 1 2 n ^ { 2 } \\tau ^ { - 2 } ( n + \\tau ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 4 \\Big \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\Big \\| ^ { 2 } .\n$$", + "text_format": "latex", + "bbox": [ + 299, + 443, + 697, + 484 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Combining (26) and (29) with (22) yields ", + "bbox": [ + 173, + 501, + 446, + 516 + ], + "page_idx": 18 + }, + { + "type": "equation", + "img_path": "images/11c9e257fcbf2170a2f61304fa0a6f971bb95d5038cc34d808dd6c744cbae9ef.jpg", + "text": "$$\n\\begin{array} { r l } & { \\mathbb { E } \\left[ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\right] \\le 2 4 \\left[ ( 1 + 9 \\overline { { \\rho } } ^ { 2 } ) ( L ^ { 2 } + 1 ) ^ { 2 } ( N + 1 ) ^ { 2 } + n ^ { 2 } \\tau ^ { - 2 } ( n + \\tau ^ { 2 } ) \\right] \\| p ^ { k } - p ^ { * } \\| ^ { 2 } } \\\\ & { \\qquad + 1 6 ( N + 1 ) \\big ( 1 + 9 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) \\big ) \\| B ( z ^ { * } ) \\| ^ { 2 } + 8 \\bigg \\| \\displaystyle \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\bigg \\| ^ { 2 } } \\\\ & { \\qquad + 2 4 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) N + 4 N . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 212, + 523, + 782, + 609 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C.3.3 DUAL GRADIENT NORM ", + "text_level": 1, + "bbox": [ + 174, + 625, + 401, + 638 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Considering that $\\nabla \\varphi _ { k }$ is taken with respect to the subspace $\\mathcal { P }$ , the gradients with respect to the dual variables are β€” see for example Eckstein & Svaiter (2009) β€” for each $i \\in { 1 . . ( n + 1 ) }$ , ", + "bbox": [ + 174, + 650, + 825, + 680 + ], + "page_idx": 18 + }, + { + "type": "equation", + "img_path": "images/1a1fa31bba74c41927b4f6301aa28599463ff4b0bb6758dac99a886bfae20aea.jpg", + "text": "$$\n\\begin{array} { l } { \\displaystyle \\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 } = \\left\\| x _ { i } ^ { k } - \\frac { 1 } { n + 1 } \\sum _ { j = 1 } ^ { n + 1 } x _ { j } ^ { k } \\right\\| ^ { 2 } = \\left\\| \\frac { 1 } { n + 1 } \\sum _ { j = 1 } ^ { n + 1 } ( x _ { i } ^ { k } - x _ { j } ^ { k } ) \\right\\| ^ { 2 } } \\\\ { \\displaystyle \\leq \\sum _ { j = 1 } ^ { n + 1 } \\| x _ { i } ^ { k } - x _ { j } ^ { k } \\| ^ { 2 } } \\\\ { \\displaystyle \\leq 2 \\sum _ { j = 1 } ^ { n + 1 } \\big ( \\| x _ { i } ^ { k } - z ^ { k } \\| ^ { 2 } + \\| z ^ { k } - x _ { j } ^ { k } \\| ^ { 2 } \\big ) } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 250, + 707, + 750, + 844 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Summing this inequality for $i \\in { 1 . . ( n + 1 ) }$ and collecting terms yields ", + "bbox": [ + 173, + 853, + 637, + 869 + ], + "page_idx": 18 + }, + { + "type": "equation", + "img_path": "images/ed986999f76c7947bd3840d828f3236344aa681c73273becba85527a8cd9d4ec.jpg", + "text": "$$\n\\sum _ { i = 1 } ^ { n + 1 } \\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 } \\leq 4 ( n + 1 ) \\sum _ { i = 1 } ^ { n + 1 } \\| x _ { i } ^ { k } - z ^ { k } \\| ^ { 2 } ,\n$$", + "text_format": "latex", + "bbox": [ + 349, + 878, + 645, + 922 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "so taking expectations conditioned on $\\mathcal { F } _ { k }$ produces ", + "bbox": [ + 173, + 103, + 509, + 119 + ], + "page_idx": 19 + }, + { + "type": "equation", + "img_path": "images/b0b75238fbd7c69b9219538cf3bdf1f40557f202386711687b1ac8c8f85fb99b.jpg", + "text": "$$\n\\begin{array} { r l } { \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } \\| \\nabla _ { x _ { i } } \\varphi _ { i } \\| ^ { 2 } | \\mathcal { F } _ { k } | \\leq 4 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\leq 4 ( n + 1 ) \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | + 4 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\leq 4 ( n + 1 ) \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\qquad + s ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | + 8 ( n + 1 ) ^ { 2 } \\| z ^ { k } - z ^ { k } | ^ { 2 } } \\\\ & { \\qquad + s ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\leq 4 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\qquad + s ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | + 8 ( n + 1 ) ^ { 2 } \\| n ^ { k } - p ^ { k } | ^ { 2 } } \\\\ & { \\leq 8 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } 2 \\tau ^ { 2 } + 1 + 9 s ^ { 2 } ( 2 ^ { k } + 1 ) | | b ^ { k } - z ^ { k } | ^ { 2 } | } \\\\ & \\leq 8 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } 2 \\tau ^ { 2 } + 1 + 9 s ^ { 2 } ( 2 ^ { k } + 1 ) | | b ^ { k } - z \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 192, + 127, + 808, + 381 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where the final inequality employs (25) and (28). ", + "bbox": [ + 173, + 385, + 493, + 400 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "All told, using (30) and (31) and simplifying the constants, one obtains ", + "bbox": [ + 173, + 406, + 638, + 422 + ], + "page_idx": 19 + }, + { + "type": "equation", + "img_path": "images/0217ef0b4f5b5b4f5eef035ad32b486e027814bdc21defea076f02e797f48ca9.jpg", + "text": "$$\n\\begin{array} { r l r } { { \\mathbb { E } [ \\| \\nabla \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] = \\mathbb { E } [ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] + \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } [ \\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] } } \\\\ & { } & { \\leq C _ { 1 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + C _ { 2 } , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 294, + 429, + 702, + 496 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where ", + "bbox": [ + 173, + 502, + 217, + 516 + ], + "page_idx": 19 + }, + { + "type": "equation", + "img_path": "images/48759a6394bd537ecf90c80d0c636bfbcce7e85be2075712e959bd63159ec8c3.jpg", + "text": "$$\n\\begin{array} { c } { { C _ { 1 } = 2 4 ( 1 + 1 0 \\overline { { { \\rho } } } ^ { 2 } ) ( n + 1 ) ( L ^ { 2 } + 1 ) ^ { 2 } ( N + 1 ) ^ { 2 } } } \\\\ { { { } } } \\\\ { { + 8 ( n + 1 ) \\left( 2 \\tau ^ { 2 } + 6 ( n + 1 ) + 1 + 3 ( n + 1 ) ^ { 2 } \\tau ^ { - 2 } \\right) } } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 300, + 522, + 696, + 566 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "and ", + "bbox": [ + 173, + 571, + 202, + 585 + ], + "page_idx": 19 + }, + { + "type": "equation", + "img_path": "images/c8f22239efd08b606b7b055333c56a780f6c8c5afa476bac5207185e4d1330e5.jpg", + "text": "$$\n\\begin{array} { l } { { C _ { 2 } = 1 6 ( N + 1 ) \\left[ 1 + 4 { \\overline { { \\rho } } } ^ { 2 } ( n + 1 ) + 9 { \\overline { { \\rho } } } ^ { 2 } L ^ { 2 } ( N + 1 ) \\right] \\| B ( z ^ { * } ) \\| ^ { 2 } + 8 \\| \\displaystyle \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\| ^ { 2 } } } \\\\ { { \\nonumber } } \\\\ { { \\qquad + 1 2 { \\overline { { \\rho } } } ^ { 2 } N ( 2 L ^ { 2 } ( N + 1 ) + n + 1 ) + 4 N . } } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 236, + 593, + 758, + 655 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "C.4 LOWER BOUND FOR $\\varphi _ { k }$ -GAP ", + "text_level": 1, + "bbox": [ + 174, + 670, + 416, + 685 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Recalling (13), that is, ", + "bbox": [ + 173, + 696, + 321, + 712 + ], + "page_idx": 19 + }, + { + "type": "equation", + "img_path": "images/6b7313da379a20d7cf6014a35a92634d0fa03bbc9844da339a1bc13222e0177d.jpg", + "text": "$$\n\\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } ( \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) ) + \\alpha _ { k } ^ { 2 } \\| \\nabla \\varphi _ { k } \\| ^ { 2 } .\n$$", + "text_format": "latex", + "bbox": [ + 266, + 719, + 730, + 739 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We may use the gradient bound from (32) to obtain ", + "bbox": [ + 173, + 747, + 511, + 763 + ], + "page_idx": 19 + }, + { + "type": "equation", + "img_path": "images/b400dde624bf2a28d00a84d70f846140dc3787e37736406bc30c3e9606e98a6e.jpg", + "text": "$$\n\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } \\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] + C _ { 2 } \\alpha _ { k } ^ { 2 } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 187, + 771, + 782, + 791 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We now focus on finding a lower bound for the term $\\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ]$ , which we call the β€œ $\\varphi _ { k }$ -gap”. Recall that for $p = ( z , w _ { 1 } , \\ldots , w _ { n + 1 } )$ , ", + "bbox": [ + 173, + 800, + 826, + 832 + ], + "page_idx": 19 + }, + { + "type": "equation", + "img_path": "images/db273c99f30b728b8227982916fa285f9ecb52884901e46f6cd7adef34ca3695.jpg", + "text": "$$\n\\varphi _ { k } ( p ) = \\sum _ { i = 1 } ^ { n + 1 } \\langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \\rangle .\n$$", + "text_format": "latex", + "bbox": [ + 392, + 839, + 604, + 882 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "For each $i \\in { 1 . . ( n + 1 ) }$ , define $\\varphi _ { i , k } ( p ) \\doteq \\langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \\rangle$ . We will call $\\mathbb { E } [ \\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) \\vert \\mathcal { F } _ { k } ]$ the β€œ $\\varphi _ { i , k }$ -gap”. Note that $\\begin{array} { r } { \\varphi _ { k } ( p ) = \\sum _ { i = 1 } ^ { n + 1 } \\varphi _ { i , k } ( p ) } \\end{array}$ . ", + "bbox": [ + 173, + 890, + 823, + 928 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "C.5 LOWER BOUND FOR $\\varphi _ { i , k }$ -GAP OVER $i \\in 1 . . n$ ", + "bbox": [ + 173, + 103, + 524, + 119 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "For $i \\in 1 . . n$ , we have from line 5 of the algorithm that ", + "bbox": [ + 173, + 128, + 532, + 145 + ], + "page_idx": 20 + }, + { + "type": "equation", + "img_path": "images/1278d9537922579f771741d1ea2fd252984be67bc9aea61233395d5f3286e109.jpg", + "text": "$$\nz ^ { k } - x _ { i } ^ { k } = \\tau ( y _ { i } ^ { k } - w _ { i } ^ { k } ) .\n$$", + "text_format": "latex", + "bbox": [ + 416, + 148, + 578, + 169 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Since $\\varphi _ { i , k } ( p ^ { k } ) = \\langle z ^ { k } - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } ^ { k } \\rangle$ , one may conclude that for $i \\in 1 . . n$ ", + "bbox": [ + 173, + 174, + 663, + 193 + ], + "page_idx": 20 + }, + { + "type": "equation", + "img_path": "images/390d901b63754f4eb1e58031fd91b54dda5d5d016bc60926b2990cb15211b85a.jpg", + "text": "$$\n\\varphi _ { i , k } ( p ^ { k } ) = \\frac { \\tau } { 2 } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } .\n$$", + "text_format": "latex", + "bbox": [ + 349, + 196, + 647, + 228 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "On the other hand, for $p ^ { * } \\in { \\mathcal { S } }$ and $i \\in 1 . . n$ , one also has ", + "bbox": [ + 173, + 238, + 545, + 253 + ], + "page_idx": 20 + }, + { + "type": "equation", + "img_path": "images/ffc9f525b71929e041e7477f14a95f253f613a27c9b72aa743a9a40cfb2a51c1.jpg", + "text": "$$\n- \\varphi _ { i , k } \\mathopen { } \\mathclose \\bgroup \\left( p ^ { * } \\aftergroup \\egroup \\right) = \\mathopen { } \\mathclose \\bgroup \\left. z ^ { * } - x _ { i } ^ { k } , w _ { i } ^ { * } - y _ { i } ^ { k } \\aftergroup \\egroup \\right. \\geq 0\n$$", + "text_format": "latex", + "bbox": [ + 370, + 257, + 625, + 276 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "by the monotonicity of $A _ { i }$ . Therefore, for $i \\in 1 . . n$ , it holds that ", + "bbox": [ + 173, + 281, + 589, + 297 + ], + "page_idx": 20 + }, + { + "type": "equation", + "img_path": "images/b9de66e401adf4c675eedf82b6a4c6374ccb9dab43acfa9b7e362589f65b25d7.jpg", + "text": "$$\n\\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) \\geq \\frac { \\tau } { 2 } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } ,\n$$", + "text_format": "latex", + "bbox": [ + 312, + 301, + 683, + 332 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "and taking expectations conditioned on $\\mathcal { F } _ { k }$ leads to ", + "bbox": [ + 174, + 335, + 511, + 352 + ], + "page_idx": 20 + }, + { + "type": "equation", + "img_path": "images/796efc8de94808fd6e81122cb4be704764f41c0dcad51832b197b125404c4e9c.jpg", + "text": "$$\n\\mathbb { E } [ \\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] \\ge \\frac { \\tau } { 2 } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 }\n$$", + "text_format": "latex", + "bbox": [ + 289, + 356, + 707, + 386 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "where we have used that $x _ { i } ^ { k }$ and $y _ { i } ^ { k }$ are both $\\mathcal { F } _ { k }$ -measurable for $i \\in 1 . . n$ . ", + "bbox": [ + 174, + 392, + 650, + 409 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "C.6 LOWER BOUND FOR $\\varphi _ { n + 1 , k }$ -GAP ", + "text_level": 1, + "bbox": [ + 176, + 424, + 444, + 439 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "From lines 6-7 of the algorithm, we have ", + "bbox": [ + 174, + 449, + 444, + 464 + ], + "page_idx": 20 + }, + { + "type": "equation", + "img_path": "images/d3b94454758d38466af0ec4ae7c279789cc1fc2223355ccdc58d5dbe8236a818.jpg", + "text": "$$\nz ^ { k } - x _ { n + 1 } ^ { k } = \\rho _ { k } ( B ( z ^ { k } ) - w _ { n + 1 } ^ { k } + \\epsilon ^ { k } ) .\n$$", + "text_format": "latex", + "bbox": [ + 367, + 469, + 630, + 489 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Therefore, ", + "bbox": [ + 173, + 494, + 243, + 508 + ], + "page_idx": 20 + }, + { + "type": "equation", + "img_path": "images/898e6ca4426be5f91cea15a620315a8abeff7916dfc36e2260dbbda9b3210241.jpg", + "text": "$$\n\\begin{array} { r l } { \\hat { \\sigma } _ { \\beta 1 , 1 } \\hat { x } _ { \\beta ^ { \\prime } 1 , 1 } ^ { ( f ) } = \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } - \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle } & { \\mathrm { ~ C ~ e ~ } } \\\\ & { = \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { x } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { y } _ { \\beta ^ { \\prime } 1 } ^ { ( f ) } - \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 2 } - \\boldsymbol { x } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { y } _ { \\beta 1 } ^ { ( f ) } - \\boldsymbol { B } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle } \\\\ & { - \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { u } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { u } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\boldsymbol { y } _ { \\beta 1 } ^ { ( f ) } - \\boldsymbol { u } _ { \\alpha - 1 , \\beta ^ { \\prime } } ^ { ( f ) } \\rangle } \\\\ & - \\langle \\boldsymbol { u } _ { \\alpha } ^ { \\beta } \\boldsymbol { y } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 4 } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { u } _ { \\alpha } ^ { \\beta } \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } } ^ { ( f ) } \\boldsymbol { y } _ { \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { u } _ { \\alpha } ^ { \\beta } \\boldsymbol { u } _ \\alpha + 1 , \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 186, + 513, + 808, + 804 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "where equality (a) uses line 8 of the algorithm and the inequality employs the Cauchy-Schwartz inequality followed by Lipschitz continuity of $B$ . ", + "bbox": [ + 173, + 806, + 821, + 837 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "On the other hand, ", + "bbox": [ + 174, + 843, + 297, + 857 + ], + "page_idx": 20 + }, + { + "type": "equation", + "img_path": "images/6a4e5417f7938f29831e57985545a264816ab553e42ce83944e5870df2859abe.jpg", + "text": "$$\n\\begin{array} { r l } & { - \\varphi _ { n + 1 , k } ( p ^ { * } ) = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - y _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , B ( z ^ { * } ) - B ( x _ { i } ^ { k } ) \\rangle + \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle } \\\\ & { \\qquad \\geq \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 279, + 861, + 717, + 924 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "where the second equality uses line 8 of the algorithm and the inequality follows from the monotonicity of $B$ . ", + "bbox": [ + 171, + 103, + 823, + 132 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Combining (39) and (40) yields ", + "bbox": [ + 173, + 138, + 383, + 154 + ], + "page_idx": 21 + }, + { + "type": "equation", + "img_path": "images/2cef8eedbcd0916382969d399b818e62dc0ebe3405e9ef9217ea55ea2991d83d.jpg", + "text": "$$\n\\begin{array} { r l } & { \\circ _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } ) \\geq \\rho _ { k } ( 1 - \\rho _ { k } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } + \\rho _ { k } ( 1 - 2 \\rho _ { k } L ) \\langle \\epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad + \\langle z ^ { k } - x _ { n + 1 } ^ { k } , e ^ { k } \\rangle + \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle - \\rho _ { k } ^ { 2 } L \\| \\epsilon ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad = \\rho _ { k } ( 1 - \\rho _ { k } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } L \\| \\epsilon ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad + \\rho _ { k } ( 1 - 2 \\rho _ { k } L ) \\langle \\epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle + \\langle z ^ { k } - z ^ { * } , e ^ { k } \\rangle . \\qquad ( 4 1 ) } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 181, + 154, + 826, + 238 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Now, if we take expectations conditioned on $\\mathcal { F } _ { k }$ and use (9), we obtain ", + "bbox": [ + 181, + 243, + 638, + 258 + ], + "page_idx": 21 + }, + { + "type": "equation", + "img_path": "images/a109b0dd57cd47be02010d6f26dcc8ed46492611dc1ba10ccc11675e82d12b9d.jpg", + "text": "$$\n{ \\mathbb E } \\big [ \\langle z ^ { k } - z ^ { * } , e ^ { k } \\rangle \\bigm | \\mathcal F _ { k } \\big ] = \\langle z ^ { k } - z ^ { * } , { \\mathbb E } [ e ^ { k } | \\mathcal F _ { k } ] \\rangle = 0 .\n$$", + "text_format": "latex", + "bbox": [ + 325, + 260, + 669, + 280 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Similarly, (9) also yields ", + "bbox": [ + 174, + 280, + 338, + 295 + ], + "page_idx": 21 + }, + { + "type": "equation", + "img_path": "images/a524547846e253ac644a2637a2aa66698e5bd1ed9aa153aa81a39c224fa4c1e5.jpg", + "text": "$$\n\\begin{array} { r } { \\mathbb { E } \\big [ \\langle \\epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle \\big | \\mathcal { F } _ { k } \\big ] = \\langle \\mathbb { E } [ \\epsilon ^ { k } | \\mathcal { F } _ { k } ] , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle = 0 . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 279, + 296, + 715, + 316 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "hus, using (42) and (43) and taking expectations of (41) yields ", + "bbox": [ + 184, + 316, + 588, + 332 + ], + "page_idx": 21 + }, + { + "type": "equation", + "img_path": "images/fee85ee1b4f440fe08548cae6b0960b74f1d34127c39931c49651b80ddc21aad.jpg", + "text": "$$\n\\begin{array} { r l } & { \\mathbb { E } [ \\varphi _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } ) \\mid \\mathcal { F } _ { k } ] \\ge \\rho _ { k } ( 1 - \\rho _ { k } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } L \\mathbb { E } [ \\| \\epsilon ^ { k } \\| ^ { 2 } \\vert \\mathcal { F } _ { k } ] } \\\\ & { \\qquad \\ge \\rho _ { k } ( 1 - \\bar { \\rho } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } N L ( 1 + \\| B ( z ^ { k } ) \\| ^ { 2 } ) , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 189, + 330, + 807, + 376 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "where in the second inequality we used (12) and the noise variance bound (10). Recall from (12) that $1 - \\overline { { \\rho } } L > 0$ . ", + "bbox": [ + 173, + 388, + 825, + 416 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Next, we remark that ", + "bbox": [ + 173, + 424, + 315, + 438 + ], + "page_idx": 21 + }, + { + "type": "equation", + "img_path": "images/908750830336ba4611314739dfeef10958b281d08ec14b4328ed49b40e2d4aa7.jpg", + "text": "$$\n\\begin{array} { r l } & { \\| B ( z ^ { k } ) \\| ^ { 2 } = \\| B ( z ^ { k } ) - B ( z ^ { * } ) + B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 L ^ { 2 } \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } \\leq 2 L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 243, + 438, + 754, + 479 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Substituting this inequality into (44) yields ", + "bbox": [ + 174, + 479, + 455, + 494 + ], + "page_idx": 21 + }, + { + "type": "equation", + "img_path": "images/c185e6b0a8b9cd53f00cbcc89d0f4e975fa5fb7ce030633f594ce590b87d0e11.jpg", + "text": "$$\n\\begin{array} { r l } & { \\mathbb { E } [ \\varphi _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] \\geq \\rho _ { k } ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad - 2 \\rho _ { k } ^ { 2 } N L ^ { 3 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } N L ( 1 + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } ) . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 191, + 493, + 779, + 537 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Finalizing the lower bound on the $\\varphi _ { k }$ -gap Summing (37) over $i \\in 1 . . n$ and using (45) yields ", + "bbox": [ + 169, + 542, + 803, + 559 + ], + "page_idx": 21 + }, + { + "type": "equation", + "img_path": "images/9b68b90678ff9366588df480aa47f8d6e3c70a5dbfcde6d18ba165fc27734677.jpg", + "text": "$$\n\\begin{array} { r l r } { { \\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] = \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } [ \\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] } } \\\\ & { } & { \\geq \\frac { 7 } { 2 } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } } \\\\ & { } & { + \\rho _ { k } ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - 2 \\rho _ { k } ^ { 2 } N L ^ { 3 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } } \\\\ & { } & { - \\rho _ { k } ^ { 2 } N L ( 1 + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } ) . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 205, + 559, + 790, + 688 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "C.7 ESTABLISHING STOCHASTIC QUASI-FEJER MONOTONICITY ", + "bbox": [ + 174, + 696, + 632, + 713 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Returning to (35), ", + "bbox": [ + 173, + 724, + 294, + 738 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "$\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } \\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] + C _ { 2 } \\alpha _ { k } ^ { 2 } , } \\end{array}$ we may now substitute (46) for the expectation on the right-hand side. First, define ", + "bbox": [ + 178, + 741, + 797, + 775 + ], + "page_idx": 21 + }, + { + "type": "equation", + "img_path": "images/11aa682640a3403534593a006f13059473eb882f9691f5197ad2881d9c045666.jpg", + "text": "$$\nT _ { k } \\doteq \\frac { \\tau } { \\overline { { \\rho } } } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\overline { { \\rho } } \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } ,\n$$", + "text_format": "latex", + "bbox": [ + 235, + 776, + 761, + 818 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "after which we may use (46) in (35) to yield ", + "bbox": [ + 174, + 819, + 464, + 833 + ], + "page_idx": 21 + }, + { + "type": "equation", + "img_path": "images/51f6e4ea6827b0fab220fa09d144f61f5dc0d4e9dd17ff0cfa53eada7217961b.jpg", + "text": "$$\n\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le \\big ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } + C _ { 3 } \\alpha _ { k } \\rho _ { k } ^ { 2 } \\big ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha _ { k } \\rho _ { k } T _ { k } + C _ { 2 } \\alpha _ { k } ^ { 2 } + C _ { 4 } \\alpha _ { k } \\rho _ { k } ^ { 2 } } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 186, + 833, + 784, + 853 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "here $C _ { 1 }$ and $C _ { 2 }$ are defined as before in (33) and (34) and ", + "bbox": [ + 189, + 854, + 563, + 869 + ], + "page_idx": 21 + }, + { + "type": "equation", + "img_path": "images/8c1a47741607a1b552df795163176d43cc5e21fb70ac1f4a33110458fdcfdf26.jpg", + "text": "$$\n\\begin{array} { l } { C _ { 3 } = 4 N L ^ { 3 } } \\\\ { C _ { 4 } = 2 N L ( 1 + 2 \\| B ( z ^ { \\ast } ) \\| ^ { 2 } ) . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 400, + 869, + 598, + 909 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "This completes the proof of Lemma 3. ", + "bbox": [ + 174, + 909, + 426, + 924 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "C.8 A CONVERGENCE LEMMA ", + "text_level": 1, + "bbox": [ + 176, + 103, + 401, + 118 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Before establishing almost-sure convergence, we need the following lemma to derive convergence of the iterates from convergence of $T _ { k }$ defined above. Note that a more elaborate result would be needed in an infinite-dimensional setting. ", + "bbox": [ + 174, + 130, + 823, + 172 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Lemma 4. For deterministic sequences $z ^ { k } \\in \\mathbb { R } ^ { ( n + 1 ) d }$ , $\\{ ( w _ { i } ^ { k } ) _ { i = 1 } ^ { n + 1 } \\} \\ \\in \\ { \\mathcal { P } }$ , and $\\{ ( x _ { i } ^ { k } , y _ { i } ^ { k } ) _ { i = 1 } ^ { n + 1 } \\} \\in$ $\\mathbb { R } ^ { 2 ( n + 1 ) d }$ , suppose that $y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )$ for i ∈ 1..n, $\\textstyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0$ i=, ", + "bbox": [ + 171, + 176, + 821, + 212 + ], + "page_idx": 22 + }, + { + "type": "equation", + "img_path": "images/e15e1a65faa6eed30bf8fd1c364850ce6885980aea45d59c103767f4fadedcc1.jpg", + "text": "$$\n\\xi _ { 1 } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\xi _ { 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\xi _ { 3 } \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } \\to 0\n$$", + "text_format": "latex", + "bbox": [ + 267, + 217, + 728, + 260 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "for scalars $\\xi _ { 1 } , \\xi _ { 2 } , \\xi _ { 3 } > 0$ , and $p ^ { k } \\doteq ( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\to \\hat { p } \\doteq ( \\hat { z } , \\hat { w } _ { 1 } , \\ldots , \\hat { w } _ { n + 1 } )$ . Then $\\hat { p } \\in \\mathcal S$ ", + "bbox": [ + 169, + 267, + 805, + 286 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Proof. Fix any $i \\in \\{ 1 , \\ldots , n \\}$ . Since $\\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| \\to 0$ by (50) and $w _ { i } ^ { k } \\hat { w } _ { i }$ , we also have $y _ { i } ^ { k } \\hat { w } _ { i }$ . Similarly, (50) also implies that $\\lVert z ^ { k } - x _ { i } ^ { k } \\rVert \\to 0$ , so from $z ^ { k } \\hat { z }$ we also have $x _ { i } ^ { k } \\hat { z }$ Since $y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )$ and $( x _ { i } ^ { k } , y _ { i } ^ { k } ) ( \\hat { z } , \\hat { w } _ { i } )$ , (Bauschke & Combettes, 2017, Prop. 20.37) implies $\\hat { w } _ { i } \\in A _ { i } ( \\hat { z } )$ . Since $i$ was arbitrary, the preceding conclusions hold for $i \\in 1 . . n$ . ", + "bbox": [ + 173, + 301, + 825, + 364 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Now, (50) also implies that $\\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| \\to 0$ . Therefore, since $w _ { n + 1 } ^ { k } \\to \\hat { w } _ { n + 1 }$ , we also have $B ( z ^ { k } ) \\to \\hat { w } _ { n + 1 }$ . Much as before, since $( z ^ { k } , B ( z ^ { k } ) ) ( \\hat { z } , \\hat { w } _ { n + 1 } )$ , we may apply (Bauschke & Combettes, 2017, Prop. 20.37) to conclude that that $\\hat { w } _ { n + 1 } = B ( \\hat { z } )$ . ", + "bbox": [ + 173, + 369, + 826, + 415 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Since the linear subspace $\\mathcal { P }$ defined in (6) must be closed, the limit $\\left( \\hat { z } , \\hat { w } _ { 1 } , \\dots , \\hat { w } _ { n + 1 } \\right)$ of $\\{ ( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\} \\subset \\mathcal { P }$ must be in $\\mathcal { P }$ , hence $\\textstyle \\sum _ { i = 1 } ^ { n + 1 } { \\hat { w } } _ { i } = 0$ . ", + "bbox": [ + 176, + 421, + 823, + 454 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "nt . $\\hat { p } = ( \\hat { z } , \\hat { w } _ { 1 } , \\dots , \\hat { w } _ { n + 1 } )$ satisfies tions defi $\\hat { w } _ { i } \\in A _ { i } ( \\hat { z } )$ for ship $i \\in 1 . . n$ , $\\hat { w } _ { n + 1 } = B ( \\hat { z } )$ , and $\\textstyle \\sum _ { i = 1 } ^ { n + 1 } { \\hat { w } } _ { i } = 0$ $s$ $\\hat { p } \\in \\mathcal S$ ", + "bbox": [ + 173, + 458, + 826, + 492 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "C.9 FINISHING THE PROOF OF THEOREM 1 ", + "text_level": 1, + "bbox": [ + 176, + 507, + 483, + 523 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Given $\\textstyle \\sum _ { k } \\alpha _ { k } ^ { 2 } < \\infty$ , and $\\sum \\alpha _ { k } \\rho _ { k } ^ { 2 } < \\infty$ , (47) satisfies the conditions of Stochastic Quasi-Fejer Monotonicity as given in Lemma 2. By applying Lemma 2, we conclude that there exist $\\Omega _ { 1 } , \\Omega _ { 2 } , \\Omega _ { 3 }$ such that $P [ \\Omega _ { i } ] = 1$ for $i = { 1 , 2 , 3 }$ and ", + "bbox": [ + 173, + 534, + 826, + 578 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "1. for all $v \\in \\Omega _ { 1 }$ ", + "bbox": [ + 214, + 589, + 325, + 604 + ], + "page_idx": 22 + }, + { + "type": "equation", + "img_path": "images/32b73f4fcaa1a2e08db69eefb664ad651527a0b201d7a4b8dac9d3b578f8d85d.jpg", + "text": "$$\n\\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } T _ { k } ( v ) < \\infty ,\n$$", + "text_format": "latex", + "bbox": [ + 452, + 611, + 601, + 652 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "2. for all $v \\in \\Omega _ { 2 }$ , and $p ^ { * } \\in { \\mathcal { S } }$ , $\\| p ^ { k } ( v ) - p ^ { * } \\|$ converges to a finite nonnegative random-variable, ", + "bbox": [ + 212, + 662, + 825, + 680 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "3. for all $v \\in \\Omega _ { 3 } , p ^ { k } ( v )$ remains bounded. ", + "bbox": [ + 212, + 684, + 491, + 700 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Since $\\textstyle \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } = \\infty$ , (51) implies that for all $v \\in \\Omega _ { 1 }$ there exists a subsequence $q _ { k } ( v )$ such that ", + "bbox": [ + 171, + 710, + 823, + 727 + ], + "page_idx": 22 + }, + { + "type": "equation", + "img_path": "images/90c3ec9255f814d47099fbff42eb94da7804c7fca8841b75d2215bfbda6f3802.jpg", + "text": "$$\nT _ { q _ { k } ( v ) } \\to 0 .\n$$", + "text_format": "latex", + "bbox": [ + 457, + 734, + 540, + 752 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Let $\\Omega ^ { \\prime } = \\Omega _ { 1 } \\cap \\Omega _ { 2 } \\cap \\Omega _ { 3 }$ and note that $P [ \\Omega ^ { \\prime } ] = 1$ . Choose $v \\in \\Omega ^ { \\prime }$ . Since $p ^ { k } ( v )$ remains bounded, so does $p ^ { q _ { k } ( v ) } ( v )$ for $q _ { k } ( v )$ defined above in (52). Thus there exists a subsequence $r _ { k } ( v ) \\subseteq q _ { k } ( v )$ and $\\hat { p } ( v ) \\in \\mathbb { R } ^ { ( n + 2 ) d }$ such that $p ^ { r _ { k } ( v ) } ( v ) \\hat { p } ( v )$ . But since $T _ { q _ { k } ( v ) } \\to 0$ , it also follows that $T _ { r _ { k } ( v ) } \\to 0$ , that is, ", + "bbox": [ + 173, + 767, + 826, + 829 + ], + "page_idx": 22 + }, + { + "type": "equation", + "img_path": "images/7d02e07e324d8a9aeee14ca2095c1217ccd896ed9199f904d123a88f63dde927.jpg", + "text": "$$\n\\begin{array} { r l r } { { \\frac { \\tau } { \\rho } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { r _ { k } ( v ) } ( v ) - w _ { i } ^ { r _ { k } ( v ) } ( v ) \\| ^ { 2 } + \\frac { 1 } { \\rho \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { r _ { k } ( v ) } ( v ) - x _ { i } ^ { r _ { k } ( v ) } ( v ) \\| ^ { 2 } } } \\\\ & { } & { \\qquad + 2 ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { r _ { k } ( v ) } ( v ) ) - w _ { n + 1 } ^ { r _ { k } ( v ) } ( v ) \\| ^ { 2 } \\to 0 . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 187, + 838, + 808, + 904 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We then have from Lemma 4 that $\\hat { p } ( v ) \\in S$ . ", + "bbox": [ + 174, + 909, + 462, + 924 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Since $p ^ { r _ { k } ( v ) } ( v ) \\hat { p } ( v )$ , it follows that $\\lVert p ^ { r _ { k } ( v ) } ( v ) - \\hat { p } ( v ) \\rVert \\to 0$ . But since $\\hat { p } ( v ) \\in S , \\| p ^ { k } ( v ) - \\hat { p } ( v ) \\|$ converges by point 2 above. Thus ", + "bbox": [ + 173, + 102, + 823, + 132 + ], + "page_idx": 23 + }, + { + "type": "equation", + "img_path": "images/748c81bde216d95dc0a3a4f346d3ea01b0ea6d25d54f69de6fd2eba2aeace320.jpg", + "text": "$$\n\\operatorname* { l i m } _ { k \\to \\infty } \\| p ^ { k } ( v ) - \\hat { p } ( v ) \\| = \\operatorname* { l i m } _ { k \\to \\infty } \\| p ^ { r _ { k } ( v ) } ( v ) - \\hat { p } ( v ) \\| = 0 .\n$$", + "text_format": "latex", + "bbox": [ + 316, + 138, + 678, + 165 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Therefore $p ^ { k } ( v ) \\hat { p } ( v ) \\in \\mathcal { S }$ . Thus there exists $\\hat { p } \\in \\mathcal S$ such that $p ^ { k } \\hat { p }$ a.s., which completes the proof of Theorem 1. ", + "bbox": [ + 173, + 172, + 825, + 203 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "C.10 TWO ADDITIONAL RESULTS ", + "text_level": 1, + "bbox": [ + 176, + 219, + 423, + 234 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In this section, we prove two additional useful results about SPS. First, that $x _ { i } ^ { k } \\hat { z }$ (a.s.) for $i = 1 , \\ldots , n$ . Second, that $G _ { k } \\to 0$ (a.s.). ", + "bbox": [ + 173, + 246, + 825, + 275 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Note that ", + "bbox": [ + 173, + 281, + 238, + 296 + ], + "page_idx": 23 + }, + { + "type": "equation", + "img_path": "images/0440627561d8ce777d9ee6f78e45ef19515f026b3cf865cdb8a6950b263bea96.jpg", + "text": "$$\nx _ { i } ^ { k } = J _ { \\tau A _ { i } } ( z ^ { k } + \\tau w _ { i } ^ { k } )\n$$", + "text_format": "latex", + "bbox": [ + 421, + 294, + 575, + 313 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "and since $z ^ { k }$ and $w _ { i } ^ { k }$ convergence a.s., so does $x _ { i } ^ { k }$ . Consider the subsequence $q _ { k } ( v )$ such that (52) holds. Then ", + "bbox": [ + 169, + 316, + 825, + 343 + ], + "page_idx": 23 + }, + { + "type": "equation", + "img_path": "images/d33e75965917693fec5a75456d8bbfda621de1046323b8908959411188fc4760.jpg", + "text": "$$\nz ^ { q _ { k } ( v ) } - x _ { i } ^ { q _ { k } ( v ) } 0\n$$", + "text_format": "latex", + "bbox": [ + 429, + 340, + 568, + 362 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "thus ", + "bbox": [ + 173, + 366, + 205, + 380 + ], + "page_idx": 23 + }, + { + "type": "equation", + "img_path": "images/2b2c5a46db20413fa548a746f333c068bdb7c878bb77383191d5cd983dfaa4c5.jpg", + "text": "$$\nx _ { i } ^ { q _ { k } ( v ) } \\hat { z } .\n$$", + "text_format": "latex", + "bbox": [ + 457, + 376, + 539, + 397 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Since $x _ { i } ^ { k }$ converges to some limit (a.s.), that limit must be $\\hat { z }$ . ", + "bbox": [ + 173, + 401, + 568, + 417 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Recall that ", + "bbox": [ + 173, + 424, + 248, + 438 + ], + "page_idx": 23 + }, + { + "type": "equation", + "img_path": "images/151c16ae8074e9e0055aa01a49c3f69a548055954baf36b93febd019dfff97d5.jpg", + "text": "$$\n\\begin{array} { r } { G _ { k } \\doteq \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 271, + 444, + 723, + 465 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "We have shown that $z ^ { k }$ and $x _ { i } ^ { k }$ share the same limit for $i = 1 , \\ldots , n$ (a.s.). Therefore $z ^ { k } - x _ { i } ^ { k } \\to 0$ (a.s.). Since ", + "bbox": [ + 171, + 472, + 825, + 500 + ], + "page_idx": 23 + }, + { + "type": "equation", + "img_path": "images/4a835aa7c8343f26bfecb7e5e0d0dede023435d09bb032d4620b11b37dc014fc.jpg", + "text": "$$\ny _ { i } ^ { k } - w _ { i } ^ { k } = \\tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) ,\n$$", + "text_format": "latex", + "bbox": [ + 408, + 498, + 586, + 518 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "it follows that $y _ { i } ^ { k } - w _ { i } ^ { k } \\to 0$ (a.s.) for $i = 1 , \\ldots , n$ . Therefore ", + "bbox": [ + 171, + 520, + 581, + 537 + ], + "page_idx": 23 + }, + { + "type": "equation", + "img_path": "images/40310841d9035ee07b18142bb91b309417da6f135b21758f3617359ab73da9c5.jpg", + "text": "$$\nG _ { k } \\to \\| B ( \\hat { z } ) - \\hat { w } _ { n + 1 } \\| ^ { 2 } .\n$$", + "text_format": "latex", + "bbox": [ + 413, + 545, + 583, + 563 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "But since $( z , \\hat { w } _ { 1 } , \\dots , \\hat { w } _ { n + 1 } ) \\in S$ , $\\hat { w } _ { n + 1 } = B ( \\hat { z } )$ implying that $G _ { k } \\to 0$ (a.s.). ", + "bbox": [ + 173, + 569, + 684, + 587 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "D PROOF OF LEMMA 1 ", + "text_level": 1, + "bbox": [ + 174, + 606, + 379, + 622 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "If $G _ { k } = 0$ , then ", + "bbox": [ + 173, + 637, + 279, + 654 + ], + "page_idx": 23 + }, + { + "type": "equation", + "img_path": "images/b2792ea2f4f3698192fa19e4d41bb503dd15d5640a490afd3ecb253be78cefa8.jpg", + "text": "$$\n\\forall i = 1 , \\ldots , n : \\quad y _ { i } ^ { k } = w _ { i } ^ { k } \\mathrm { ~ a n d ~ } z ^ { k } = x _ { i } ^ { k } .\n$$", + "text_format": "latex", + "bbox": [ + 359, + 659, + 637, + 678 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Since $y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )$ for $i = 1 , \\ldots , n$ , (53) implies that that ", + "bbox": [ + 173, + 685, + 557, + 703 + ], + "page_idx": 23 + }, + { + "type": "equation", + "img_path": "images/49e56291aaa31ae64d937bbc229390022d58efc3bf9fce2fc3a74829efd0bc8b.jpg", + "text": "$$\n\\forall i \\in 1 . . n : \\quad w _ { i } ^ { k } \\in A _ { i } ( z ^ { k } ) .\n$$", + "text_format": "latex", + "bbox": [ + 403, + 709, + 593, + 729 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Furthermore $G _ { k } = 0$ also implies that $w _ { n + 1 } ^ { k } = B ( z ^ { k } )$ . Finally, since $\\textstyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0$ , we have that ", + "bbox": [ + 176, + 737, + 813, + 756 + ], + "page_idx": 23 + }, + { + "type": "equation", + "img_path": "images/0df7136549ee64cdfc42bacb3918667d0334a6a45cdbf16f6dbb05c58de79d6d.jpg", + "text": "$$\n( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\in { \\mathcal { S } } .\n$$", + "text_format": "latex", + "bbox": [ + 413, + 762, + 583, + 784 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Conversely, suppose $( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\in { \\mathcal { S } }$ . The definition of $s$ implies that $B ( z ^ { k } ) = w _ { n + 1 } ^ { k }$ and furthermore that $w _ { i } ^ { k } \\in A _ { i } ( z ^ { k } )$ for $i \\in 1 . . n$ . For any $i \\in 1 . . n$ , considering line 3 of Algorithm 1, we may write $t _ { i } ^ { k } = z ^ { k } + \\tau w _ { i . } ^ { k } \\in ( I + \\tau A _ { i } ) ( z ^ { k } )$ , implying $z ^ { k } \\in ( I + \\tau A _ { i } ) ^ { - 1 } ( t _ { i } ^ { k } )$ . But since the resolvent $J _ { \\tau A _ { i } } = ( I + \\tau A _ { i } ) ^ { - 1 }$ is single-valued (Bauschke & Combettes, 2017, Prop. 23.8), we must have $z ^ { k } = ( I + \\tau A _ { i } ) ^ { - 1 } ( t _ { i } ^ { k } )$ . Thus, by line 4, we have $x _ { i } ^ { k } = z ^ { k }$ . We may also derive from line 5 that ", + "bbox": [ + 173, + 796, + 826, + 873 + ], + "page_idx": 23 + }, + { + "type": "equation", + "img_path": "images/2917366b2617db8f1010409ffffdd31cc441d40e0b08c7a4f40f7f9002464c34.jpg", + "text": "$$\ny _ { i } ^ { k } = \\tau ^ { - 1 } ( t _ { i } ^ { k } - x _ { i } ^ { k } ) = \\tau ^ { - 1 } ( z ^ { k } + \\tau w _ { i } ^ { k } - z ^ { k } ) = w _ { i } ^ { k } .\n$$", + "text_format": "latex", + "bbox": [ + 328, + 881, + 668, + 900 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Thus, since $x _ { i } ^ { k } = z ^ { k }$ and $y _ { i } ^ { k } = w _ { i } ^ { k }$ for $i = 1 , \\ldots , n$ and $w _ { n + 1 } ^ { k } = B ( z ^ { k } )$ , we have that $G _ { k } = 0$ ", + "bbox": [ + 171, + 907, + 787, + 926 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "E PROOF OF THEOREM 2 ", + "text_level": 1, + "bbox": [ + 174, + 101, + 398, + 118 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "In addition to the proof, we provide a more detailed statement of the theorem: ", + "bbox": [ + 173, + 132, + 681, + 148 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Theorem 3. Fix the total iterations $K \\geq 1$ of Algorithm 1 and set ", + "bbox": [ + 173, + 151, + 606, + 167 + ], + "page_idx": 24 + }, + { + "type": "equation", + "img_path": "images/936cde96fcc58bc50ba91cae51c795cbfb5482cabf41e73a300f6a1d380110b9.jpg", + "text": "$$\n\\begin{array} { l l } { { \\forall k = 1 , \\dots , K : } } & { { \\qquad \\rho _ { k } = \\rho \\doteq \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , \\frac { 1 } { 2 L } \\right\\} } } \\\\ { { \\forall k = 1 , \\dots , K : } } & { { \\qquad \\alpha _ { k } = \\alpha \\doteq C _ { f } \\rho ^ { 2 } } } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 282, + 171, + 712, + 227 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "for some $C _ { f } > 0$ . Suppose (9)-(11) hold. Then for any $p ^ { * } \\in { \\mathcal { S } }$ , ", + "bbox": [ + 171, + 229, + 586, + 246 + ], + "page_idx": 24 + }, + { + "type": "equation", + "img_path": "images/c9fa1d04d7da23ebdc1b4bef62f8dc703baf5af7613ef6df359b99fda6b816f6.jpg", + "text": "$$\n\\begin{array} { l } { \\displaystyle \\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] \\leq \\frac { 8 L ^ { 3 } \\exp \\left( C _ { f } ( C _ { 1 } + C _ { 3 } ) \\right) } { C _ { f } \\operatorname* { m i n } \\{ \\tau , \\tau ^ { - 1 } \\} K } \\left( \\| p ^ { 1 } - p ^ { * } \\| ^ { 2 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) \\mathrm { ~ } f o r ~ K < ( 2 L ) ^ { 4 } } \\\\ { \\displaystyle \\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] \\leq \\frac { \\exp \\left( C _ { f } ( C _ { 1 } + C _ { 3 } ) \\right) } { C _ { f } \\operatorname* { m i n } \\{ \\tau , \\tau ^ { - 1 } \\} K ^ { 1 / 4 } } \\left( \\| p ^ { 1 } - p ^ { * } \\| ^ { 2 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) \\mathrm { ~ } f o r ~ K \\geq ( 2 L ) ^ { 4 } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 186, + 252, + 787, + 343 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where $G _ { k }$ is the approximation residual defined in (14), and $C _ { 1 } , C _ { 2 } , C _ { 3 } , C _ { 4 }$ are the nonegative constants defined in (33), (34), (48), and (49), respectively. Therefore, ", + "bbox": [ + 173, + 345, + 825, + 376 + ], + "page_idx": 24 + }, + { + "type": "equation", + "img_path": "images/e986e6e1960ab0f3a833a9e77a76ef9d2f01388c506cb00bb90f663f549d2789.jpg", + "text": "$$\n\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] = \\mathcal { O } ( K ^ { - 1 / 4 } ) .\n$$", + "text_format": "latex", + "bbox": [ + 403, + 381, + 593, + 425 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Proof. Fix $\\alpha _ { k } = \\alpha$ and $\\rho _ { k } = \\rho$ , where $\\alpha$ and $\\rho$ are the respective right-hand sides of (55)-(56). Lemma 3 implies that (21) so long as (9)-(11) hold and the stepsize $\\rho$ satisfies $\\rho < L ^ { - 1 }$ . Since ", + "bbox": [ + 173, + 439, + 826, + 468 + ], + "page_idx": 24 + }, + { + "type": "equation", + "img_path": "images/8bcf7b82a6af0b050ba47f614a56f1e1812efd3f74d3a27177cc9bd0f241b69b.jpg", + "text": "$$\n\\rho = \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , \\frac { 1 } { 2 L } \\right\\} \\leq \\frac { 1 } { 2 L } ,\n$$", + "text_format": "latex", + "bbox": [ + 392, + 472, + 604, + 507 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "we conclude that (21) applies. ", + "bbox": [ + 174, + 512, + 370, + 526 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Rewriting (21) with $\\alpha _ { k } = \\alpha$ and $\\rho _ { k } = \\rho$ , we have ", + "bbox": [ + 173, + 532, + 501, + 549 + ], + "page_idx": 24 + }, + { + "type": "equation", + "img_path": "images/f3ceb59174b248b72d8d2ac6ae444e5be98b9ef6963dd7f8cfaf30b57ea1d74c.jpg", + "text": "$$\n\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha \\rho T _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 214, + 553, + 782, + 571 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Therefore, taking expectations over $\\mathcal { F } _ { k }$ , we have ", + "bbox": [ + 174, + 577, + 491, + 592 + ], + "page_idx": 24 + }, + { + "type": "equation", + "img_path": "images/be002811de5c0ba4afb39e57c2ef241e8e0e6a2e321c863bf375349c9c4c0b6f.jpg", + "text": "$$\n\\begin{array} { r } { \\mathbb { E } \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) \\mathbb { E } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha \\rho \\mathbb { E } T _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 209, + 597, + 763, + 616 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Recall that ", + "bbox": [ + 173, + 621, + 248, + 636 + ], + "page_idx": 24 + }, + { + "type": "equation", + "img_path": "images/c0c041abdc366d835ab20547eb97d52e9bd343c6e14371ccf0b8062a0abe9419.jpg", + "text": "$$\nT _ { k } \\doteq \\frac { \\tau } { \\rho } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\rho \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } ,\n$$", + "text_format": "latex", + "bbox": [ + 233, + 638, + 761, + 680 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "where for the first two terms we have simply set $\\rho = \\overline { { \\rho } }$ because the stepsize is constant. However, for the final term, we will still use an upper bound, $\\overline { { \\rho } }$ , on $\\rho$ . In the current setting, we know that $\\rho \\leq ( 1 / 2 ) L ^ { - 1 }$ and therefore we may set $\\overline { { \\rho } } = ( 1 / 2 ) L ^ { - 1 }$ . Thus $1 - \\overline { { \\rho } } L = 1 / 2$ , leading to ", + "bbox": [ + 174, + 684, + 823, + 728 + ], + "page_idx": 24 + }, + { + "type": "equation", + "img_path": "images/7d17604a57c4da09806d10a8726be4a9262bf28ad5f53bcf71543543be7d2831.jpg", + "text": "$$\n\\rho \\mathbb { E } T _ { k } = \\tau \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\tau ^ { - 1 } \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\rho \\mathbb { E } \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } .\n$$", + "text_format": "latex", + "bbox": [ + 236, + 732, + 759, + 773 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Let ", + "bbox": [ + 173, + 779, + 199, + 792 + ], + "page_idx": 24 + }, + { + "type": "equation", + "img_path": "images/3542b23bd6ba76ee1a5024fd546bbc9ce81c8724379783e8cc3e1024c5f22f77.jpg", + "text": "$$\nU _ { k } \\doteq \\mathbb { E } \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } \\qquad W _ { k } \\doteq \\tau \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\tau ^ { - 1 } \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } ,\n$$", + "text_format": "latex", + "bbox": [ + 215, + 795, + 781, + 837 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "so that ", + "bbox": [ + 173, + 842, + 220, + 854 + ], + "page_idx": 24 + }, + { + "type": "equation", + "img_path": "images/2da8171408a871e6f550dde6e27402a69dbce0d53f091c1466a274077a810e82.jpg", + "text": "$$\n\\rho \\mathbb { E } T _ { k } = \\rho U _ { k } + W _ { k } ,\n$$", + "text_format": "latex", + "bbox": [ + 431, + 864, + 565, + 877 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "and also let ", + "bbox": [ + 173, + 883, + 251, + 898 + ], + "page_idx": 24 + }, + { + "type": "equation", + "img_path": "images/ca3de38cff2c10b5a84076d4198e8173a2cbe40638114fda907a44f2be6dac48.jpg", + "text": "$$\nV _ { k } \\doteq \\mathbb { E } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } .\n$$", + "text_format": "latex", + "bbox": [ + 433, + 905, + 563, + 921 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Using these definitions in (59) we write ", + "bbox": [ + 174, + 103, + 434, + 118 + ], + "page_idx": 25 + }, + { + "type": "equation", + "img_path": "images/9afa166c3d95c3b6ee88ee851af061eaf8dd58fe08791b319530bb21e55da5f0.jpg", + "text": "$$\n\\begin{array} { r } { V _ { k + 1 } \\leq \\big ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } \\big ) V _ { k } - \\alpha \\rho U _ { k } - \\alpha W _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 269, + 122, + 725, + 141 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Therefore, ", + "bbox": [ + 173, + 146, + 245, + 160 + ], + "page_idx": 25 + }, + { + "type": "equation", + "img_path": "images/3679110dfca62716db36bd7e924798be4200825212368be40ad2d6882e90ea31.jpg", + "text": "$$\n\\begin{array} { c } { { V _ { k + 1 } + \\alpha \\rho U _ { k } + \\alpha W _ { k } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) V _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } } \\\\ { { \\Longleftrightarrow V _ { k + 1 } + \\alpha \\rho \\displaystyle \\sum _ { j = 1 } ^ { k } U _ { j } + \\alpha \\displaystyle \\sum _ { j = 1 } ^ { k } W _ { j } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) V _ { k } + \\alpha \\rho \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } U _ { j } + \\alpha \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } W _ { j } } } \\\\ { { \\qquad + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } } \\\\ { { \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) \\left[ V _ { k } + \\alpha \\rho \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } U _ { j } + \\alpha \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } W _ { j } \\right] } } \\\\ { { \\qquad + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } , } } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 192, + 164, + 810, + 320 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "where we have used that $U _ { k } , W _ { k } \\ge 0$ . Letting ", + "bbox": [ + 174, + 324, + 477, + 339 + ], + "page_idx": 25 + }, + { + "type": "equation", + "img_path": "images/cbce57afca0ba498d20517c0a2d49c2de2dab0ea31e87287717f3c7d21c6a7d6.jpg", + "text": "$$\nR _ { k } = V _ { k } + \\alpha \\rho \\sum _ { j = 1 } ^ { k - 1 } U _ { j } + \\alpha \\sum _ { j = 1 } ^ { k - 1 } W _ { j } ,\n$$", + "text_format": "latex", + "bbox": [ + 380, + 343, + 616, + 388 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "we then have ", + "bbox": [ + 173, + 395, + 261, + 409 + ], + "page_idx": 25 + }, + { + "type": "equation", + "img_path": "images/6cf8ab205320c887d3eb63c8c846fb87a8084875f11e81261b3ded0ee69822c3.jpg", + "text": "$$\nR _ { k + 1 } \\leq { \\left( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } \\right) } R _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } ,\n$$", + "text_format": "latex", + "bbox": [ + 323, + 412, + 673, + 431 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "which implies ", + "bbox": [ + 173, + 436, + 267, + 450 + ], + "page_idx": 25 + }, + { + "type": "equation", + "img_path": "images/54c85a90e8c290da50d2cd556d249e83a42680e60b3f29761524aa14ba3d1fd3.jpg", + "text": "$$\nR _ { k + 1 } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } R _ { 1 } + ( C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } ) \\sum _ { j = 1 } ^ { k } ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k - j } .\n$$", + "text_format": "latex", + "bbox": [ + 217, + 457, + 781, + 501 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Now, ", + "bbox": [ + 173, + 505, + 210, + 518 + ], + "page_idx": 25 + }, + { + "type": "equation", + "img_path": "images/dd6f672dcc12cab7f51f57b8003c9e9869f8dde6472f70b41820ac2b9f412335.jpg", + "text": "$$\n\\begin{array} { r l r } { { \\sum _ { j = 1 } ^ { k } ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k - j } = \\sum _ { j = 0 } ^ { k - 1 } ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { j } } } \\\\ & { } & { = \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } - 1 } { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) - 1 } } \\\\ & { } & { = \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } - 1 } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } } \\\\ & { } & { \\leq \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 299, + 525, + 696, + 679 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Therefore, ", + "bbox": [ + 173, + 679, + 245, + 694 + ], + "page_idx": 25 + }, + { + "type": "equation", + "img_path": "images/dde5937c8b7f4dbfdccf262dd488ec99feb4ba7b68b4c155e3e24b70e83c2458.jpg", + "text": "$$\nR _ { k + 1 } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } \\left( R _ { 1 } + { \\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } } \\right) .\n$$", + "text_format": "latex", + "bbox": [ + 302, + 700, + 694, + 733 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Fix the number of iterations $K \\geq 1$ . Now ", + "bbox": [ + 174, + 746, + 449, + 761 + ], + "page_idx": 25 + }, + { + "type": "equation", + "img_path": "images/f8ecdc1cdf63419fb980b006038e2f23f4047ebd1c48a84fbf075fac7cafd0db.jpg", + "text": "$$\n\\rho = \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , \\frac { 1 } { 2 L } \\right\\} \\leq \\frac { 1 } { K ^ { 1 / 4 } } \\leq 1 .\n$$", + "text_format": "latex", + "bbox": [ + 369, + 765, + 627, + 799 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Therefore, ", + "bbox": [ + 173, + 804, + 245, + 818 + ], + "page_idx": 25 + }, + { + "type": "equation", + "img_path": "images/b9411a474832c7af5eee5bbf4f39ba58ca4a29c2f493647cf7e9b8e0f82cd857.jpg", + "text": "$$\n\\begin{array} { l } { \\displaystyle \\alpha \\rho \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\alpha \\rho \\sum _ { j = 1 } ^ { K } U _ { j } + \\alpha \\sum _ { j = 1 } ^ { K } W _ { j } } \\\\ { \\leq R _ { K + 1 } } \\\\ { \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { K } \\left( R _ { 1 } + \\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } \\right) . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 259, + 821, + 738, + 924 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Dividing through by $\\alpha \\rho K$ , we obtain ", + "bbox": [ + 174, + 103, + 419, + 118 + ], + "page_idx": 26 + }, + { + "type": "equation", + "img_path": "images/709fbff6f7f9fc78233cab42c32bb1b3525562556338f382da851e69e5f67ba0.jpg", + "text": "$$\n\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { K } } { \\alpha \\rho K } \\left( R _ { 1 } + \\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } \\right) ,\n$$", + "text_format": "latex", + "bbox": [ + 258, + 123, + 738, + 167 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "and since $\\alpha = C _ { f } \\rho ^ { 2 }$ , we also have ", + "bbox": [ + 174, + 183, + 401, + 198 + ], + "page_idx": 26 + }, + { + "type": "equation", + "img_path": "images/2a924310d03b5ee557f28fb34d78a936893e8f7cf3ff9134c8d76d1600906ee3.jpg", + "text": "$$\n\\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } = \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } .\n$$", + "text_format": "latex", + "bbox": [ + 390, + 204, + 607, + 239 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Furthermore, ", + "bbox": [ + 173, + 244, + 261, + 258 + ], + "page_idx": 26 + }, + { + "type": "equation", + "img_path": "images/b69e4375e97113c3ae9c1241a18fc51e9b50dc039c28a0839addd7cb241b9721.jpg", + "text": "$$\n\\rho \\leq K ^ { - \\frac { 1 } { 4 } } \\implies \\alpha \\leq C _ { f } K ^ { - \\frac { 1 } { 2 } } .\n$$", + "text_format": "latex", + "bbox": [ + 395, + 263, + 601, + 285 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Substituting these into (60) yields ", + "bbox": [ + 174, + 290, + 397, + 305 + ], + "page_idx": 26 + }, + { + "type": "equation", + "img_path": "images/0e2124502d00a7615dcadf05fc1ed22e68a79cacf3e5071ac1736f2399dff60d.jpg", + "text": "$$\n\\begin{array} { r } { \\displaystyle \\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\frac { \\left( 1 + \\frac { C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) } { K } \\right) ^ { K } } { \\alpha \\rho K } \\left( R _ { 1 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) } \\\\ { \\leq \\frac { \\exp ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } { \\alpha \\rho K } \\left( R _ { 1 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 274, + 309, + 723, + 400 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "where we have used that for any $t \\ge 0 , 1 + t / K \\le e ^ { t / K }$ , so therefore $( 1 + t / K ) ^ { K } \\leq e ^ { t }$ . ", + "bbox": [ + 171, + 405, + 756, + 421 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "The worst-case rates in terms of $K$ occur when $\\rho = K ^ { - 1 / 4 }$ and $\\alpha = C _ { f } K ^ { - 1 / 2 }$ . This is the case when $K \\geq ( 2 L ) ^ { 4 }$ . Substituting these into the denominator yields, for $K \\geq ( 2 L ) ^ { 4 }$ , that ", + "bbox": [ + 173, + 428, + 825, + 459 + ], + "page_idx": 26 + }, + { + "type": "equation", + "img_path": "images/a8c50d5cddeb344ec50671b2a66248b1a9062849bd17f1164cc995fe7923f6f9.jpg", + "text": "$$\n\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\frac { \\exp ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } { C _ { f } K ^ { 1 / 4 } } \\left( R _ { 1 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) .\n$$", + "text_format": "latex", + "bbox": [ + 272, + 464, + 723, + 508 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Thus, since $G _ { k } \\leq \\operatorname* { m a x } \\{ \\tau , \\tau ^ { - 1 } \\} \\left( U _ { k } + W _ { k } \\right)$ , we obtain ", + "bbox": [ + 173, + 515, + 539, + 532 + ], + "page_idx": 26 + }, + { + "type": "equation", + "img_path": "images/97ff30d691a0554097db475829ee6914e88295e18e02f95e3f6207493be4aa19.jpg", + "text": "$$\n\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] \\leq \\frac { \\operatorname* { m a x } \\{ \\tau , \\tau ^ { - 1 } \\} \\exp { ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } } { C _ { f } K ^ { 1 / 4 } } \\left( \\| p ^ { 1 } - p ^ { * } \\| ^ { 2 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) ,\n$$", + "text_format": "latex", + "bbox": [ + 214, + 537, + 782, + 582 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "which is (58). ", + "bbox": [ + 173, + 587, + 266, + 602 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "When $K < ( 2 L ) ^ { 4 }$ , (57) can similarly be obtained by substituting $\\rho = ( 2 L ) ^ { - 1 }$ and $\\alpha = C _ { f } ( 2 L ) ^ { - 2 }$ into (61). β–‘ ", + "bbox": [ + 174, + 608, + 823, + 637 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "F APPROXIMATION RESIDUALS ", + "text_level": 1, + "bbox": [ + 176, + 657, + 449, + 674 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "In this section we derive the approximation residual used to assess the performance of the algorithms in the numerical experiments. This residual relies on the following product-space reformulation of (1). ", + "bbox": [ + 173, + 688, + 825, + 732 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "F.1 PRODUCT-SPACE REFORMULATION AND RESIDUAL PRINCIPLE ", + "bbox": [ + 174, + 747, + 650, + 762 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Recall (1), the monotone inclusion we are solving: ", + "bbox": [ + 173, + 773, + 506, + 787 + ], + "page_idx": 26 + }, + { + "type": "equation", + "img_path": "images/3cda2b34e62a90a27633b8f271f0dd9396d59e4d546b2003413a53f23c6f3e9b.jpg", + "text": "$$\n{ \\mathrm { F i n d ~ } } z \\in \\mathbb { R } ^ { d } : 0 \\in \\sum _ { i = 1 } ^ { n } A _ { i } ( z ) + B ( z ) .\n$$", + "text_format": "latex", + "bbox": [ + 372, + 794, + 624, + 835 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "In this section we demonstrate a β€œproduct-space\" reformulation of (1) which allows us to rewrite it in a standard form involving just two operators, one maximal monotone and the other monotone and Lipschitz. This approach was pioneered in (BriceΓ±o-Arias $\\&$ Combettes, 2011; Combettes & Pesquet, 2012). Along with allowing for a simple definition of an approximation residual as a measure of approximation error in solving (1), it allows one to apply operator splitting methods originally formulated for two operators to problems such as (1) for any finite $n$ . ", + "bbox": [ + 173, + 839, + 826, + 924 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Observe that solving (1) is equivalent to ", + "bbox": [ + 174, + 103, + 437, + 118 + ], + "page_idx": 27 + }, + { + "type": "equation", + "img_path": "images/7a56b1051c11c41e1ca53600e95cbd8143c44db6433bee45e39fe04bc10e8844.jpg", + "text": "$$\n\\begin{array} { l l } { \\mathrm { F i n d } \\left( w _ { 1 } , \\ldots , w _ { n } , z \\right) \\in \\mathbb { R } ^ { \\left( n + 1 \\right) d } : } & { w _ { i } \\in A _ { i } ( z ) , \\quad i \\in { 1 . . n } } \\\\ & { \\quad \\displaystyle 0 \\in \\sum _ { i = 1 } ^ { n } w _ { i } + B ( z ) . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 297, + 122, + 697, + 188 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "This formulation resembles that of the extended solution set $s$ used in projective spitting, as given in (5), except that it combines the final two conditions in the definition of $s$ , and thus does not need the final dual variable $w _ { n + 1 }$ . From the definition of the inverse of an operator, the above formulation is equivalent to ", + "bbox": [ + 178, + 189, + 818, + 247 + ], + "page_idx": 27 + }, + { + "type": "equation", + "img_path": "images/a5e366b1daec78e36a75c0d0685354e977bf371f6228267db8c5657d622d206b.jpg", + "text": "$$\n\\begin{array} { r l } { \\mathrm { F i n d ~ } ( w _ { 1 } , \\dots , w _ { n } , z ) \\in \\mathbb { R } ^ { ( n + 1 ) d } : } & { 0 \\in A _ { i } ^ { - 1 } ( w _ { i } ) - z , \\quad i \\in 1 . . n } \\\\ & { 0 \\in \\displaystyle \\sum _ { i = 1 } ^ { n } w _ { i } + B ( z ) . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 279, + 251, + 718, + 315 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "These conditions are in turn equivalent to finding $( w _ { 1 } , \\ldots , w _ { n } , z ) \\in \\mathbb { R } ^ { ( n + 1 ) d }$ such that ", + "bbox": [ + 169, + 320, + 740, + 337 + ], + "page_idx": 27 + }, + { + "type": "equation", + "img_path": "images/3f2baa06ae4751b44507f1aec5ab9cad1cab25df653740d7a66eff10132dcc1d.jpg", + "text": "$$\n0 \\in \\mathcal { A } ( w _ { 1 } , \\ldots , w _ { n } , z ) + \\mathcal { B } ( w _ { 1 } , \\ldots , w _ { n } , z ) ,\n$$", + "text_format": "latex", + "bbox": [ + 349, + 342, + 645, + 358 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "where $\\mathcal { A }$ is the set-valued map ", + "bbox": [ + 173, + 364, + 379, + 378 + ], + "page_idx": 27 + }, + { + "type": "equation", + "img_path": "images/977d8a427bdf60035473ef5b916a73acdd9ad16bff24eb09b967888aa0743027.jpg", + "text": "$$\n\\mathcal { A } ( w _ { 1 } , \\dots , w _ { n } , z ) \\mapsto A _ { 1 } ^ { - 1 } ( w _ { 1 } ) \\times A _ { 2 } ^ { - 1 } ( w _ { 2 } ) \\times \\dots \\times A _ { n } ^ { - 1 } ( w _ { n } ) \\times \\{ 0 \\}\n$$", + "text_format": "latex", + "bbox": [ + 269, + 383, + 728, + 402 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "and $\\mathcal { B }$ is the single-valued operator ", + "bbox": [ + 173, + 407, + 410, + 421 + ], + "page_idx": 27 + }, + { + "type": "equation", + "img_path": "images/0ff8bced27f5d7b5ccb6d4d33cc7dda36dfb975caf3ecd6390b3e0d9fd11b075.jpg", + "text": "$$\n\\mathcal { B } ( w _ { 1 } , \\dots , w _ { n } , z ) \\mapsto \\left[ \\begin{array} { c c c c } { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { \\vdots } & { \\ddots } & { \\vdots } & { \\vdots } \\\\ { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { I } & { \\cdots } & { I } & { 0 } \\end{array} \\right] \\left[ \\begin{array} { c } { w _ { 1 } } \\\\ { \\vdots } \\\\ { w _ { n } } \\\\ { z } \\end{array} \\right] + \\left[ \\begin{array} { c } { 0 } \\\\ { \\vdots } \\\\ { 0 } \\\\ { B ( z ) } \\end{array} \\right] .\n$$", + "text_format": "latex", + "bbox": [ + 269, + 428, + 725, + 496 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "It is easily established that $\\mathcal { B }$ is maximal monotone and Lipschitz continuous, while $\\mathcal { A }$ is maximal monotone. Letting $\\mathcal { T } \\doteq \\mathcal { A } + \\mathcal { B }$ , it follows from (Bauschke & Combettes, 2017, Proposition 20.23) that $\\mathcal { T }$ is maximal monotone. Thus, we have reformulated (1) as the monotone inclusion $0 \\in \\mathcal { T } ( q )$ for $q$ in the product space $\\mathbb { R } ^ { ( n + 1 ) \\bar { d } }$ . A vector $z \\in \\mathbb { R } ^ { d }$ solves (1) if and only if there exists $( w _ { 1 } , \\dots , w _ { n } ) \\in \\mathbb { R } ^ { n d }$ such that $\\bar { 0 } \\in \\mathcal { T } ( q )$ , where $q = ( w _ { 1 } , \\dots , w _ { n } , z )$ . ", + "bbox": [ + 174, + 501, + 823, + 571 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "For any pair $( q , v )$ such that $v \\in \\mathcal { T } ( q )$ , $\\| v \\| ^ { 2 }$ represents an approximation residual for $q$ in the sense that $v = 0$ implies $q$ is a solution to (62). One may take $\\| \\bar { v } \\| ^ { 2 }$ as a measure of the error of $q$ as an approximate solution to (62), and it can only be 0 if $q$ is a solution. Given two approximate solutions $q _ { 1 }$ and $q _ { 2 }$ with certificates $v _ { 1 } \\in T ( q _ { 1 } )$ and $v _ { 2 } \\in \\mathcal { T } ( q _ { 2 } )$ , we will treat $q _ { 1 }$ as a β€œbetter” approximate solution than $q _ { 2 }$ if $\\| v _ { 1 } \\| ^ { 2 } < \\| v _ { 2 } \\| ^ { 2 }$ . Doing so is somewhat analogous to the practice, common in optimization, of using the gradient $\\| \\nabla f ( x ) \\| ^ { 2 }$ as a measure of quality of an approximate minimizer of some differentiable function $f$ . However, note that since $\\mathcal { T } ( q _ { 1 } )$ is a set, there may exist elements of $\\mathcal { T } ( q _ { 1 } )$ with smaller norm than $v _ { 1 }$ . Thus any given certificate $v _ { 1 }$ only corresponds to an upper bound on $\\mathrm { d i s t } ^ { 2 } ( 0 , \\mathcal { T } ( q _ { 1 } ) )$ . ", + "bbox": [ + 173, + 577, + 825, + 707 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "F.2 APPROXIMATION RESIDUAL FOR PROJECTIVE SPLITTING ", + "text_level": 1, + "bbox": [ + 174, + 720, + 611, + 737 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "In SPS (Algorithm 1), for $i \\in 1 . . n$ , the pairs $( x _ { i } ^ { k } , y _ { i } ^ { k } )$ are chosen so that $y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )$ . This can be seen from the definition of the resolvent. Thus $\\hat { x _ { i } ^ { k } } \\in A _ { i } ^ { - 1 } ( y _ { i } ^ { k } )$ . Observe that ", + "bbox": [ + 174, + 746, + 825, + 779 + ], + "page_idx": 27 + }, + { + "type": "equation", + "img_path": "images/34c2b8bc590a85d01c47ca4535edfab24349cbca1ec1369ce3179fc21f731ae6.jpg", + "text": "$$\n\\begin{array} { r } { v ^ { k } \\doteq \\left[ \\begin{array} { c } { x _ { 1 } ^ { k } - z ^ { k } } \\\\ { \\vdots } \\\\ { x _ { n } ^ { k } - z ^ { k } } \\\\ { B ( z ^ { k } ) + \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } } \\end{array} \\right] \\in \\mathcal { T } ( y _ { 1 } ^ { k } , \\dotsc , y _ { n } ^ { k } , z ^ { k } ) . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 323, + 782, + 673, + 858 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "The approximation residual for SPS is thus ", + "bbox": [ + 174, + 861, + 457, + 876 + ], + "page_idx": 27 + }, + { + "type": "equation", + "img_path": "images/f7278fc1183759e6f4ed367df5b00f786a09c9df101097ac6b06ec1e247e4d9a.jpg", + "text": "$$\nR _ { k } \\dot { = } \\| v ^ { k } \\| ^ { 2 } = \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\left\\| B ( z ^ { k } ) + \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\right\\| ^ { 2 }\n$$", + "text_format": "latex", + "bbox": [ + 318, + 881, + 679, + 922 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "which is an approximation residual for $( y _ { 1 } ^ { k } , \\dots , y _ { n } ^ { k } , z ^ { k } )$ in the sense defined above. We may relate $R _ { k }$ to the approximation residual $G _ { k }$ for SPS from Section 5 as follows: ", + "bbox": [ + 171, + 102, + 823, + 132 + ], + "page_idx": 28 + }, + { + "type": "equation", + "img_path": "images/8c330fe1de9e966ef91dbdae30060c1e446bcc0e0d0a4af3dfce1d7733b22253.jpg", + "text": "$$\n\\begin{array} { r l } & { H _ { k } = \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\left\\| B ( z ^ { k } ) + \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\right\\| ^ { 2 } } \\\\ & { \\quad = \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\left\\| B ( z ^ { k } ) + \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } - \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } \\right\\| ^ { 2 } } \\\\ & { \\quad \\leq \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } + 2 \\left\\| \\displaystyle \\sum _ { i = 1 } ^ { n } ( y _ { i } ^ { k } - w _ { i } ^ { k } ) \\right\\| ^ { 2 } } \\\\ & { \\quad \\leq \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } + 2 n \\displaystyle \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } } \\\\ & { \\quad < \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\| B ( z ^ { k } ) + \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } } \\\\ & { \\quad < \\rho _ { n } \\alpha , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 274, + 136, + 725, + 327 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "where in the second equality we have used the fact that $\\begin{array} { r } { \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0 } \\end{array}$ . Thus, $R _ { k }$ has the same convergence rate as $G _ { k }$ given in Theorem 2. ", + "bbox": [ + 174, + 338, + 825, + 369 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Note that while the certificate given in (65) focuses on the primal iterate $z ^ { k }$ , it may be changed to focus on any $x _ { i } ^ { k }$ for $i = 1 , \\ldots , n$ , by using ", + "bbox": [ + 174, + 375, + 823, + 405 + ], + "page_idx": 28 + }, + { + "type": "equation", + "img_path": "images/8204a56b968d2a373a697b9c3bb802847ba995a3a004d55c90e1cbc1f39ce696.jpg", + "text": "$$\n\\boldsymbol { v } _ { i } ^ { k } \\doteq \\left[ \\begin{array} { c } { x _ { 1 } ^ { k } - x _ { i } ^ { k } } \\\\ { \\vdots } \\\\ { x _ { n } ^ { k } - x _ { i } ^ { k } } \\\\ { B ( x _ { i } ^ { k } ) + \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } } \\end{array} \\right] \\in \\mathscr { T } ( y _ { 1 } ^ { k } , \\ldots , y _ { n } ^ { k } , x _ { i } ^ { k } ) .\n$$", + "text_format": "latex", + "bbox": [ + 323, + 407, + 674, + 482 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "The approximation residual $\\| v _ { i } ^ { k } \\| ^ { 2 }$ may also be shown to have the same rate as $G _ { k }$ by following similar derivations to those above for $R _ { k }$ . ", + "bbox": [ + 174, + 486, + 825, + 515 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "F.3 TSENG’S METHOD ", + "text_level": 1, + "bbox": [ + 173, + 531, + 343, + 546 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Tseng’s method (Tseng, 2000) can be applied to (62), resulting in the following recursion with iterates $q ^ { k } , \\bar { q } ^ { \\bar { k } } \\in \\mathbb { R } ^ { ( n + 1 ) d }$ : ", + "bbox": [ + 174, + 556, + 823, + 587 + ], + "page_idx": 28 + }, + { + "type": "equation", + "img_path": "images/89d314a14597b152cec2b1449bae122026196d7ab0fe3ab9a5d6fe622328c894.jpg", + "text": "$$\n\\begin{array} { c } { \\bar { q } ^ { k } = J _ { \\alpha \\mathcal { A } } ( q ^ { k } - \\alpha \\mathcal { B } ( q ^ { k } ) ) } \\\\ { q ^ { k + 1 } = \\bar { q } ^ { k } + \\alpha \\big ( \\mathcal { B } ( q ^ { k } ) - \\mathcal { B } ( \\bar { q } ^ { k } ) \\big ) , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 382, + 588, + 614, + 637 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "where $\\mathcal { A }$ and $\\mathcal { B }$ are defined in (63) and (64). The resolvent of $\\mathcal { A }$ may be readily computed from the resolvents of the $A _ { i }$ using Moreau’s identity (Bauschke & Combettes, 2017, Proposition 23.20). ", + "bbox": [ + 171, + 640, + 825, + 670 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Analogous to SPS, Tseng’s method has an approximation residual, which in this case is an element of $\\mathcal { T } ( \\bar { q } ^ { k } )$ . In particular, using the general properties of resolvent operators as applied to $J _ { \\alpha \\mathcal { A } }$ , we have ", + "bbox": [ + 171, + 675, + 823, + 705 + ], + "page_idx": 28 + }, + { + "type": "equation", + "img_path": "images/3ad651c74ae9cc276826b94d05a927814ef5af68d350de03d3fdebef58f09adf.jpg", + "text": "$$\n\\frac { 1 } { \\alpha } ( q ^ { k } - \\bar { q } ^ { k } ) - \\mathcal { B } ( q ^ { k } ) \\in \\mathcal { A } ( \\bar { q } ^ { k } ) .\n$$", + "text_format": "latex", + "bbox": [ + 390, + 708, + 606, + 738 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Also, rearranging (68) produces ", + "bbox": [ + 174, + 742, + 385, + 757 + ], + "page_idx": 28 + }, + { + "type": "equation", + "img_path": "images/43c918c62fec061b1e0fbc610629a312f5c06c11e5b4ee40a38fe9612a78331d.jpg", + "text": "$$\n\\frac { 1 } { \\alpha } ( \\bar { q } ^ { k } - q ^ { k + 1 } ) + \\mathcal { B } ( q ^ { k } ) = \\mathcal { B } ( \\bar { q } ^ { k } ) .\n$$", + "text_format": "latex", + "bbox": [ + 382, + 761, + 616, + 790 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Adding these two relations produces ", + "bbox": [ + 174, + 792, + 415, + 808 + ], + "page_idx": 28 + }, + { + "type": "equation", + "img_path": "images/83171b6910d48e4bfa177ccd43caea81d1d347085c2a164be95154adc4fb3099.jpg", + "text": "$$\n\\frac { 1 } { \\alpha } ( q ^ { k } - q ^ { k + 1 } ) \\in \\mathcal { A } ( \\bar { q } ^ { k } ) + \\mathcal { B } ( \\bar { q } ^ { k } ) = \\mathcal { T } ( \\bar { q } ^ { k } )\n$$", + "text_format": "latex", + "bbox": [ + 351, + 811, + 647, + 842 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Therefore, ", + "bbox": [ + 173, + 844, + 243, + 859 + ], + "page_idx": 28 + }, + { + "type": "equation", + "img_path": "images/22729217f5cf585083c76e9bf3744e87097890be4fd87e4b64c60b0296861963.jpg", + "text": "$$\nR _ { k } ^ { \\mathrm { { T s e n g } } } \\doteq \\frac { 1 } { \\alpha ^ { 2 } } \\| q ^ { k } - q ^ { k + 1 } \\| ^ { 2 }\n$$", + "text_format": "latex", + "bbox": [ + 408, + 862, + 588, + 892 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "represents a measure of the approximation error for Tseng’s method equivalent to $R _ { k }$ defined in (66) for SPS. ", + "bbox": [ + 173, + 895, + 825, + 922 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "F.4 FRB ", + "text_level": 1, + "bbox": [ + 173, + 103, + 248, + 117 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "The forward-reflected-backward method (FRB) (Malitsky & Tam, 2020) is another method that may be applied to the splitting $\\mathcal { T } = \\mathcal { A } + \\mathcal { B }$ for $\\mathcal { A }$ and $\\mathcal { B }$ as defined in (63) and (64). Doing so yields recursion ", + "bbox": [ + 174, + 128, + 825, + 171 + ], + "page_idx": 29 + }, + { + "type": "equation", + "img_path": "images/655dd6f05410a89facdc5d6f19324b42be393018991f46d1f42a5e2ff9d18e83.jpg", + "text": "$$\nq ^ { k + 1 } = J _ { \\alpha \\mathcal { A } } \\Big ( q ^ { k } - \\alpha \\big ( 2 \\mathcal { B } ( q ^ { k } ) - \\mathcal { B } ( q ^ { k - 1 } ) \\big ) \\Big ) .\n$$", + "text_format": "latex", + "bbox": [ + 346, + 171, + 650, + 199 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Following similar arguments to those for Tseng’s method, it can be shown that ", + "bbox": [ + 174, + 200, + 687, + 215 + ], + "page_idx": 29 + }, + { + "type": "equation", + "img_path": "images/6b07573d670ff5416c2619a9e8110309fa6f1a414ccd82adbc8eba4d5e7c0478.jpg", + "text": "$$\nv _ { \\mathrm { F R B } } ^ { k } \\doteq \\frac { 1 } { \\alpha } \\left( q ^ { k - 1 } - q ^ { k } \\right) + \\mathcal { B } ( q ^ { k } ) + \\mathcal { B } ( q ^ { k - 2 } ) - 2 \\mathcal { B } ( q ^ { k - 1 } ) \\in \\mathcal { T } ( q ^ { k } ) .\n$$", + "text_format": "latex", + "bbox": [ + 267, + 217, + 728, + 247 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Thus, FRB admits the following approximation residual equivalent to $R _ { k }$ for SPS: ", + "bbox": [ + 174, + 248, + 712, + 263 + ], + "page_idx": 29 + }, + { + "type": "equation", + "img_path": "images/0c5fbce05b820ef6ec87537c6abb8acba48a214be84135a5d060c536a9cb9685.jpg", + "text": "$$\nR _ { k } ^ { \\mathrm { F R B } } \\doteq \\| v _ { \\mathrm { F R B } } ^ { k } \\| ^ { 2 } .\n$$", + "text_format": "latex", + "bbox": [ + 439, + 266, + 557, + 285 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Finally, we remark that the stepsizes used in both the Tseng and FRB methods can be chosen via a linesearch procedure that we do not detail here. ", + "bbox": [ + 174, + 295, + 823, + 324 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "F.5 STOCHASTIC TSENG METHOD ", + "text_level": 1, + "bbox": [ + 176, + 339, + 424, + 354 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "The stochastic version of Tseng’s method of (BΓΆhm et al., 2020) (S-Tseng) may be applied to the inclusion $0 \\in \\mathcal { A } ( q ) + \\mathcal { B } ( q )$ , since the operator $\\mathcal { A }$ may be written as a subdifferential. However, unlike the deterministic Tseng method, it does not produce a valid residual. Note also that S-Tseng outputs an ergodic sequence $\\mathbf { \\bar { \\boldsymbol { q } } } _ { \\mathrm { e r g } } ^ { k }$ . To construct a residual for the ergodic sequence, we compute a deterministic step of Tseng’s method according to (67)-(68), starting at $q _ { \\mathrm { e r g } } ^ { k }$ . That is, letting ", + "bbox": [ + 173, + 364, + 826, + 440 + ], + "page_idx": 29 + }, + { + "type": "equation", + "img_path": "images/2344c4c5c1c54a1f6fe3ed0f7c91678f883d06deedd00452876444567f27a69a.jpg", + "text": "$$\n\\begin{array} { r l } & { \\bar { q } ^ { k } = J _ { \\alpha \\mathcal { A } } ( q _ { \\mathrm { e r g } } ^ { k } - \\mathcal { B } ( q _ { \\mathrm { e r g } } ^ { k } ) ) } \\\\ & { q ^ { k + 1 } = \\bar { q } ^ { k } + \\alpha ( \\mathcal { B } ( q _ { \\mathrm { e r g } } ^ { k } ) - \\mathcal { B } ( \\bar { q } ^ { k } ) ) , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 382, + 443, + 617, + 486 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "we can then compute essentially the same residual as in Section F.3, ", + "bbox": [ + 171, + 487, + 619, + 501 + ], + "page_idx": 29 + }, + { + "type": "equation", + "img_path": "images/e620c909214f65b396b3a0722afcf1e8bd8c008ab42e6d865ac16215300f24cb.jpg", + "text": "$$\nR _ { k } ^ { \\mathrm { { S - T s e n g } } } \\doteq \\frac { 1 } { \\alpha ^ { 2 } } \\| q _ { \\mathrm { { e r g } } } ^ { k } - q ^ { k + 1 } \\| ^ { 2 } .\n$$", + "text_format": "latex", + "bbox": [ + 398, + 503, + 598, + 534 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "To construct the stochastic oracle for S-Tseng, we assumed $\\begin{array} { r } { B ( z ) = \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } B _ { i } ( z ) } \\end{array}$ . Then we used ", + "bbox": [ + 173, + 542, + 821, + 561 + ], + "page_idx": 29 + }, + { + "type": "equation", + "img_path": "images/249f4f681bd045146bf6bfa6273c06c2cbb4fcfed7052a0b32bc60ffaba59552.jpg", + "text": "$$\n\\tilde { \\mathcal { B } } ( w _ { 1 } , \\dots , w _ { n } , z ) \\mapsto \\left[ \\begin{array} { c c c c } { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { \\vdots } & { \\ddots } & { \\vdots } & { \\vdots } \\\\ { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { I } & { \\cdots } & { I } & { 0 } \\end{array} \\right] \\left[ \\begin{array} { c } { w _ { 1 } } \\\\ { \\vdots } \\\\ { w _ { n } } \\\\ { z } \\end{array} \\right] + \\left[ \\begin{array} { c } { 0 } \\\\ { \\vdots } \\\\ { 0 } \\\\ { \\frac { 1 } { \\vert \\mathbf { B } \\vert } \\sum _ { j \\in \\mathbf { B } } B _ { j } ( z ) } \\end{array} \\right] .\n$$", + "text_format": "latex", + "bbox": [ + 232, + 564, + 764, + 636 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "for some minibatch $\\mathbf { B } \\in \\{ 1 , \\dots , m \\}$ . ", + "bbox": [ + 174, + 638, + 419, + 654 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "F.6 VARIANCE-REDUCED FRB ", + "text_level": 1, + "bbox": [ + 174, + 669, + 401, + 684 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "The FRB-VR method of Alacaoglu et al. (2021) can also be applied to $0 \\in \\mathcal { A } ( q ) + \\mathcal { B } ( q )$ , using the same stochastic oracle $\\tilde { \\mathcal { B } }$ defined in (69). if we let the iterates of FRB-VR be $( q ^ { k } , p ^ { k } )$ , then line 4 of Algorithm 1 of Alacaoglu et al. (2021) can be written as ", + "bbox": [ + 174, + 695, + 825, + 739 + ], + "page_idx": 29 + }, + { + "type": "equation", + "img_path": "images/2aca7939639a60ed396ae7fe36c40b6c6475db65d7a4e41f99e21d78db27d23a.jpg", + "text": "$$\n\\begin{array} { c } { \\hat { q } ^ { k } = q ^ { k } - \\tau ( \\mathcal { B } ( p ^ { k } ) + \\tilde { \\mathcal { B } } ( q ^ { k } ) - \\tilde { \\mathcal { B } } ( p ^ { k } ) ) } \\\\ { q ^ { k + 1 } = J _ { \\tau \\mathcal { A } } ( \\hat { q } ^ { k } ) . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 354, + 741, + 643, + 785 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Once again, the method does not directly produce a residual, but one can be developed from the algorithm definition as follows: (71) yields $\\dot { \\tau } ^ { - 1 } ( \\hat { q } ^ { k } - q ^ { k + 1 } ) \\in \\mathcal { A } ( q ^ { k + 1 } )$ and hence ", + "bbox": [ + 173, + 792, + 823, + 821 + ], + "page_idx": 29 + }, + { + "type": "equation", + "img_path": "images/246fff05c45ed5e58bd7f2cb080f07240417c731e17fc74bab71fc73ebb16ee9.jpg", + "text": "$$\n\\tau ^ { - 1 } ( \\hat { q } ^ { k } - q ^ { k + 1 } ) + \\mathcal { B } ( q ^ { k + 1 } ) \\in ( \\mathcal { A } + \\mathcal { B } ) ( q ^ { k + 1 } ) .\n$$", + "text_format": "latex", + "bbox": [ + 334, + 824, + 656, + 843 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Therefore we use the residual ", + "bbox": [ + 174, + 845, + 369, + 859 + ], + "page_idx": 29 + }, + { + "type": "equation", + "img_path": "images/1244d3a1edda7049a11dcc5a30602f2df84095e8473a81632532e57fbd0af7c5.jpg", + "text": "$$\nR _ { k } ^ { \\mathrm { F R B - V R } } = \\lVert \\tau ^ { - 1 } ( \\hat { q } ^ { k } - q ^ { k + 1 } ) + \\mathcal { B } ( q ^ { k + 1 } ) \\rVert ^ { 2 } .\n$$", + "text_format": "latex", + "bbox": [ + 349, + 861, + 645, + 881 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "plots for FR $R _ { k }$ for SPS, VR. $R _ { k } ^ { \\mathrm { T s e n g } }$ for Tseng’s method, $R _ { k } ^ { \\mathrm { F R B } }$ for FRB, $R _ { k } ^ { \\mathrm { S - T s e n g } }$ for S-Tseng, and $R _ { k } ^ { \\mathrm { F R B - V R } }$ ", + "bbox": [ + 173, + 890, + 825, + 924 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "F.7 BENEFITS AND DRAWBACKS OF THE PRODUCT SPACE REFORMULATION ", + "text_level": 1, + "bbox": [ + 174, + 103, + 714, + 118 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "The main benefit of the product space reformulation (PSR) is that it allows one to use familiar 2-operator splitting schemes for solving $0 \\in \\mathcal { A } ( q ) + \\mathcal { B } ( q )$ to solve the more complicated recursion (1). However, one drawback of this approach is that the operator $\\mathcal { B }$ , defined in (64), combines a skew-symmetric consensus matrix with the Lipschitz operator $B$ . Treating $\\mathcal { B }$ as a single operator necessitates using a single stepsize for both of its constituent operators, but the $B$ component will generally have a much larger Lipschitz constant than the skew part, necessitating a smaller stepsize than is ideal for the skew operator. This difficulty can be countered by using different stepsizes for the primal and dual components, but that strategy introduces additional tuning parameters. In other works, methods based on PSR have exhibited slower convergence than deterministic projective splitting methods (Johnstone & Eckstein, 2021; 2020b). However, in our experiments in Section 7, the performance is comparable. ", + "bbox": [ + 173, + 128, + 825, + 282 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "G VARIATIONAL INEQUALITIES ", + "text_level": 1, + "bbox": [ + 176, + 301, + 450, + 318 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "For a mapping $B : \\mathbb { R } ^ { d } \\mathbb { R } ^ { d }$ and a closed and convex set $\\mathcal { C }$ , the variational inequality problem (Harker & Pang, 1990) is to find $z ^ { \\ast } \\in \\mathcal { C }$ such that ", + "bbox": [ + 176, + 332, + 821, + 361 + ], + "page_idx": 30 + }, + { + "type": "equation", + "img_path": "images/7363e137bd9d3e57b042782850801a76b88095c5d51c497025360deb38801bea.jpg", + "text": "$$\nB ( z ^ { * } ) ^ { \\top } ( z - z ^ { * } ) \\geq 0 , \\forall z \\in { \\mathcal { C } } .\n$$", + "text_format": "latex", + "bbox": [ + 398, + 362, + 599, + 381 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Consider the normal cone mapping discussed in Section 2 and defined as ", + "bbox": [ + 173, + 382, + 651, + 396 + ], + "page_idx": 30 + }, + { + "type": "equation", + "img_path": "images/a3ba3311c6ed414d58176f577330a1ad43554d1ea0c2294617992ad703954d54.jpg", + "text": "$$\nN _ { { \\mathcal { C } } } ( x ) \\doteq \\{ g : g ^ { \\top } ( y - x ) \\le 0 \\ \\forall y \\in { \\mathcal { C } } \\}\n$$", + "text_format": "latex", + "bbox": [ + 369, + 397, + 629, + 416 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "It is easily seen that (72) is equivalent to finding $z ^ { * }$ such that $- B ( z ^ { * } ) \\in N _ { \\mathcal { C } } ( z ^ { * } )$ . Hence, if $B$ is monotone, (72) is equivalent to the monotone inclusion ", + "bbox": [ + 169, + 417, + 823, + 445 + ], + "page_idx": 30 + }, + { + "type": "equation", + "img_path": "images/3c03e1f04f0304a1368e3e3b7053ce68dfc5a0e546ad31d95dd9c2efb339f212.jpg", + "text": "$$\n0 \\in B ( z ^ { * } ) + N _ { \\cal { C } } ( z ^ { * } ) .\n$$", + "text_format": "latex", + "bbox": [ + 424, + 446, + 573, + 464 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Thus, monotone variational inequalities are a special case of monotone inclusions with two operators, one of which is single-valued and the other is the normal cone map of the constraint set $\\mathcal { C }$ . As a consequence, methods for monotone inclusions can be used to solve monotone variational inequality problems. The reverse, however, may not be true. For example, the analysis of the extragradient method (Korpelevich, 1977) relies on the second operator $N _ { \\mathcal { C } }$ in (73) being a normal cone, as opposed to a more general monotone operator. We are not aware of any direct extension of the extragradient method’s analysis allowing a more general resolvent to be used in place of the projection map corresponding to $N _ { \\mathcal { C } }$ . ", + "bbox": [ + 173, + 465, + 825, + 578 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "The Restricted Gap Function There is a disadvantage to pursuing convergence rates based on variational inequalities (as in BΓΆhm et al. (2020) and Alacaoglu et al. (2021)) rather than monotone inclusions. Convergence rate analyses for variational inequalities focus on the gap function: ", + "bbox": [ + 173, + 592, + 823, + 633 + ], + "page_idx": 30 + }, + { + "type": "equation", + "img_path": "images/45c8199cfc83f22b1c6ed9f1ade0c07c7b8cfbba9cfc235961eb41d81d8b2734.jpg", + "text": "$$\nG _ { { \\mathcal C } } ( z ) \\doteq \\operatorname* { s u p } _ { z ^ { \\prime } \\in { \\mathcal C } } B ( z ^ { \\prime } ) ^ { \\top } ( z - z ^ { \\prime } ) .\n$$", + "text_format": "latex", + "bbox": [ + 395, + 635, + 602, + 662 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "It can be shown that $G _ { \\mathcal { C } } ( z ) \\geq 0$ and $G _ { \\mathcal { C } } ( z ) = 0$ if and only if $z$ solves (72). However, (74) is meaningless for most problems, since unless $\\mathcal { C }$ is compact, $G \\overset { \\cdot } { c } ( z )$ is typically equal to $+ \\infty$ for any nonsolution (Diakonikolas, 2020). Thus researchers instead focus on the restricted gap function (Nesterov, 2007) ", + "bbox": [ + 174, + 664, + 825, + 719 + ], + "page_idx": 30 + }, + { + "type": "equation", + "img_path": "images/b3c7ac50ac762d88a20ea79a90e45f2f3a4a225fd3302e905011d704dce56117.jpg", + "text": "$$\nG _ { { \\mathcal C } _ { 2 } } ( z ) \\doteq \\operatorname* { s u p } _ { z ^ { \\prime } \\in { \\mathcal C } _ { 2 } } B ( z ^ { \\prime } ) ^ { \\top } ( z - z ^ { \\prime } ) .\n$$", + "text_format": "latex", + "bbox": [ + 390, + 719, + 607, + 748 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "where $\\mathcal { C } _ { 2 }$ is an arbitrary compact set. However, now the results are only meaningful over the set $\\mathcal { C } _ { 2 }$ . Thus, $\\mathcal { C } _ { 2 }$ must be chosen large enough so that the iterates of the algorithm remain in the interior of $\\mathcal { C } _ { 2 }$ (BΓΆhm et al., 2020). Further, the convergence rate bound depends on the diameter of $\\mathcal { C } _ { 2 }$ . For some algorithms (Mokhtari et al., 2020) a valid set is provided which bounds the iterates. However BΓΆhm et al. (2020) and Alacaoglu et al. (2021) do not provide one, although in principle it could be done so long as the ergodic sequence can be bounded almost-surely. Thus, the convergence rates depending on (75) in BΓΆhm et al. (2020) and Alacaoglu et al. (2021) are somewhat incomplete in that they depend on unknown constants. ", + "bbox": [ + 173, + 750, + 825, + 861 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "In contrast, rates based on the approximation residual in the monotone inclusion setting, including ours given in (57)–(58), completely avoid this pitfall. There is no need to select a compact set containing the algorithm’s iterates and the constants in our rates are all explicit or depend on standard quantities such as the initial distance to a solution. ", + "bbox": [ + 174, + 867, + 823, + 924 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "H MEMORY-SAVING TECHNIQUE FOR SPS", + "text_level": 1, + "bbox": [ + 174, + 101, + 544, + 118 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The variables $t _ { i } ^ { k } , x _ { i } ^ { k }$ , and $y _ { i } ^ { k }$ on lines 3-5 of SPS are stored in variables and . Another two variables $\\bar { x }$ iand $\\bar { y }$ i i keep track of $\\textstyle \\sum _ { i = 1 } ^ { n } x _ { i } ^ { k }$ and $\\textstyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k }$ . The dual variables are stored as $w _ { i }$ for $i \\in 1 . . n$ and the primal variable as $z$ . Once $x = x _ { i } ^ { k }$ is computed, the $i ^ { \\mathrm { { t h } } }$ dual variable $w _ { i }$ can be partially updated as $w _ { i } w _ { i } - \\alpha _ { k } x$ . Once all the operators have been processed, the update for each dual variable may be completed via $w _ { i } w _ { i } + \\alpha _ { k } \\bar { ( n + 1 ) } _ { . } ^ { - 1 } \\bar { x }$ . Also, the primal update is computed as $z z - \\alpha _ { k } \\bar { y }$ . During the calculation loop for the $x _ { i } ^ { k } , y _ { i } ^ { k }$ , the terms in approximation residual $R _ { k }$ may also be accumulated one by one. The total total number of vector elements that must be stored is $( n + 7 ) d$ . ", + "bbox": [ + 173, + 132, + 825, + 250 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "I ADDITIONAL INFORMATION ABOUT THE NUMERICAL EXPERIMENTS ", + "text_level": 1, + "bbox": [ + 173, + 267, + 774, + 285 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "We solve the following convex-concave min-max problem: ", + "bbox": [ + 173, + 299, + 560, + 315 + ], + "page_idx": 31 + }, + { + "type": "equation", + "img_path": "images/18cb44d386da099e52ffe5a9435b5ba27f93d0ab884a89cf537d101f2f54b20f.jpg", + "text": "$$\n\\begin{array} { r l } { \\underset { \\beta \\in \\mathbb { R } ^ { d } } { \\operatorname* { m i n } } \\quad \\underset { \\gamma \\in \\mathbb { R } ^ { m } } { \\operatorname* { m a x } } } & { \\left\\{ \\lambda ( \\delta - \\kappa ) + \\displaystyle \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\Psi ( \\langle \\hat { x } _ { i } , \\beta \\rangle ) + \\displaystyle \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } ( \\hat { y } _ { i } \\langle \\hat { x } _ { i } , \\beta \\rangle - \\lambda \\kappa ) + c \\| \\beta \\| _ { 1 } \\right\\} } \\\\ { \\mathrm { s . t . } \\quad } & { \\| \\beta \\| _ { 2 } \\leq \\lambda / ( L _ { \\Psi } + 1 ) \\qquad \\| \\gamma \\| _ { \\infty } \\leq 1 . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 197, + 320, + 769, + 387 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "This model is identical to that of (Yu et al., 2021, Thm. 4.3) except for the addition of the $\\ell _ { 1 }$ regularization term $c \\| \\beta \\| _ { 1 }$ , where $c \\geq 0$ is a given constant. The goal is to learn the model weights $\\beta$ from a training dataset of $m$ feature vectors ${ \\hat { x } } _ { i }$ and corresponding labels $\\hat { y } _ { i }$ . Rather than computing the expected loss over the training set, the formulation uses, for each $\\beta$ , the worst possible distribution within a Wasserstein-metric ball around the empirical distribution of the $\\{ ( \\hat { x } _ { i } , \\hat { y } _ { i } ) \\}$ , with the parameter $\\delta \\geq 0$ giving the diameter of the ball and the parameter $\\kappa \\geq 0$ specifying the relative weighting of features and labels. The variables $\\gamma$ and $\\lambda$ parameterize the selection of this worst-case distribution in response to the model weights $\\beta$ . Finally, $\\Psi$ is the logistic loss kernel $t \\mapsto \\log ( e ^ { t } + e ^ { - t } )$ and $L _ { \\Psi } = 1$ is the corresponding Lipschitz constant. In all the experiments, we set $\\delta = \\kappa = 1$ and $c = 1 0 ^ { - 3 }$ . ", + "bbox": [ + 173, + 392, + 826, + 520 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "We now show how we converted this problem to the form (1) for our experiments. Let $z$ be a shorthand for $( \\lambda , \\beta , \\gamma )$ , and define ", + "bbox": [ + 174, + 525, + 823, + 555 + ], + "page_idx": 31 + }, + { + "type": "equation", + "img_path": "images/aa3614c6b6306691620346b018a88bd125aca6d20f519ffa82859a5be919d1e7.jpg", + "text": "$$\n\\mathcal { L } ( z ) \\doteq \\lambda ( \\delta - \\kappa ) + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\Psi ( \\langle { \\hat { x } _ { i } } , \\beta \\rangle ) + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } ( \\hat { y } _ { i } \\langle { \\hat { x } _ { i } } , \\beta \\rangle - \\lambda \\kappa ) .\n$$", + "text_format": "latex", + "bbox": [ + 274, + 560, + 722, + 602 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The first-order necessary and sufficient conditions for the convex-concave saddlepoint problem in (76) are ", + "bbox": [ + 176, + 607, + 818, + 636 + ], + "page_idx": 31 + }, + { + "type": "equation", + "img_path": "images/ccd93350e661533a4eb2fd3e28c8ef4fe823d80b6afb68632ffba1d7a532e259.jpg", + "text": "$$\n0 \\in B ( z ) + A _ { 1 } ( z ) + A _ { 2 } ( z )\n$$", + "text_format": "latex", + "bbox": [ + 405, + 642, + 593, + 660 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "where the vector field $B ( z )$ is defined as ", + "bbox": [ + 174, + 666, + 441, + 681 + ], + "page_idx": 31 + }, + { + "type": "equation", + "img_path": "images/9cca8f05961d129210e381b2e73d7cdad0e42dd8c0f644cad0af15ae26b2145a.jpg", + "text": "$$\n\\boldsymbol { B } ( z ) \\doteq \\left[ \\begin{array} { l } { \\nabla _ { \\boldsymbol { \\lambda } , \\beta } \\mathcal { L } ( z ) } \\\\ { - \\nabla _ { \\boldsymbol { \\gamma } } \\mathcal { L } ( z ) } \\end{array} \\right] ,\n$$", + "text_format": "latex", + "bbox": [ + 413, + 688, + 581, + 731 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "with ", + "bbox": [ + 173, + 737, + 205, + 751 + ], + "page_idx": 31 + }, + { + "type": "equation", + "img_path": "images/85f1bf2cee31fddc6e9593a9a10edca9b6f09cf13548e4dd23fd4c2f73466f27.jpg", + "text": "$$\n\\begin{array} { r } { \\nabla _ { \\lambda , \\beta } \\mathcal { L } ( z ) = \\left[ \\begin{array} { c } { \\delta - \\kappa ( 1 + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } ) } \\\\ { \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\Psi ^ { \\prime } ( \\langle \\hat { x } _ { i } , \\beta \\rangle ) \\hat { x } _ { i } + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } \\hat { y } _ { i } \\hat { x } _ { i } } \\end{array} \\right] } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 299, + 755, + 696, + 799 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "and ", + "bbox": [ + 173, + 804, + 200, + 818 + ], + "page_idx": 31 + }, + { + "type": "equation", + "img_path": "images/914e4a4b20d9d95dbd3bbb4bc5dbaf285ce50d0c8096409b0c932be2678ec031.jpg", + "text": "$$\n\\nabla _ { \\boldsymbol { \\gamma } } \\mathcal { L } ( z ) = \\left[ \\begin{array} { c } { \\frac { 1 } { m } ( \\hat { y } _ { 1 } \\langle \\hat { x } _ { 1 } , \\beta \\rangle - \\lambda \\kappa ) } \\\\ { \\vdots } \\\\ { \\frac { 1 } { m } ( \\hat { y } _ { m } \\langle \\hat { x } _ { m } , \\beta \\rangle - \\lambda \\kappa ) } \\end{array} \\right] .\n$$", + "text_format": "latex", + "bbox": [ + 367, + 821, + 630, + 890 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "It is readily confirmed that $B$ defined in this manner is Lipschitz. The monotonicity of $B$ follows from its being the generalized gradient of a convex-concave saddle function (Rockafellar, 1970). ", + "bbox": [ + 173, + 895, + 826, + 924 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "For the set-valued operators, $A _ { 1 } ( z )$ corresponds to the constraints and $A _ { 2 } ( z )$ to the nonsmooth $\\ell _ { 1 }$ regularizer, and are defined as ", + "bbox": [ + 173, + 103, + 823, + 132 + ], + "page_idx": 32 + }, + { + "type": "equation", + "img_path": "images/a9322f8f88dfd4faa2d5690a44116c0abe283d3f1db5b52383fa6be335f04da9.jpg", + "text": "$$\nA _ { 1 } ( z ) \\doteq N _ { \\mathcal { C } _ { 1 } } ( \\lambda , \\beta ) \\times N _ { \\mathcal { C } _ { 2 } } ( \\gamma ) ,\n$$", + "text_format": "latex", + "bbox": [ + 395, + 138, + 599, + 156 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "where ", + "bbox": [ + 173, + 162, + 215, + 176 + ], + "page_idx": 32 + }, + { + "type": "equation", + "img_path": "images/a3f401bfc4e2d70d79c685ea6e10fe54076e2df77299cec2dd1b2cff859d932c.jpg", + "text": "$$\n\\begin{array} { r } { \\mathcal { C } _ { 1 } \\doteq \\bigl \\{ ( \\lambda , \\beta ) : \\| \\beta \\| _ { 2 } \\le \\lambda / ( L _ { \\Psi } + 1 ) \\bigr \\} \\quad \\mathrm { ~ a n d ~ } \\quad \\mathcal { C } _ { 2 } \\doteq \\{ \\gamma : \\| \\gamma \\| _ { \\infty } \\le 1 \\} , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 266, + 183, + 730, + 203 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "and ", + "bbox": [ + 173, + 208, + 200, + 222 + ], + "page_idx": 32 + }, + { + "type": "equation", + "img_path": "images/5f142bbd37a6d13571c5c73e38796564f96a9147885f69e0454ea746e642b5cb.jpg", + "text": "$$\nA _ { 2 } ( z ) \\doteq \\{ \\mathbf { 0 } _ { 1 \\times 1 } \\} \\times c \\partial \\| \\beta \\| _ { 1 } \\times \\{ \\mathbf { 0 } _ { m \\times 1 } \\} .\n$$", + "text_format": "latex", + "bbox": [ + 366, + 229, + 630, + 247 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Here, the notation ${ \\bf 0 } _ { p \\times 1 }$ denotes the $p$ -dimensional vector of all zeros. $\\mathcal { C } _ { 1 }$ is a scaled version of the second-order cone, well known to be a closed convex set, while $\\mathcal { C } _ { 2 }$ is the unit ball of the $\\ell _ { \\infty }$ norm, also closed and convex. Since $A _ { 1 }$ is a normal cone map of a closed convex set and $A _ { 2 }$ is the subgradient map of a closed proper convex function (the scaled 1-norm), both of these operators are maximal monotone and problem (77) is a special case of (1) for $n = 2$ . ", + "bbox": [ + 173, + 252, + 825, + 324 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Stochastic oracle implementation The operator $B : \\mathbb { R } ^ { m + d + 1 } \\mapsto \\mathbb { R } ^ { m + d + 1 }$ , defined in (78), can be written as ", + "bbox": [ + 173, + 337, + 823, + 367 + ], + "page_idx": 32 + }, + { + "type": "equation", + "img_path": "images/8f96bdf635a021974b7492a256c1ebf04f386ff90e14544720b8c68faafa5170.jpg", + "text": "$$\nB ( z ) = \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } B _ { i } ( z )\n$$", + "text_format": "latex", + "bbox": [ + 426, + 371, + 571, + 412 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "where ", + "bbox": [ + 173, + 419, + 215, + 433 + ], + "page_idx": 32 + }, + { + "type": "equation", + "img_path": "images/96a27dbf9aabec2390f8cc0626bed5c130859ad8934cc845adcc6d97bec8f517.jpg", + "text": "$$\nB _ { i } ( z ) \\doteq \\left[ \\begin{array} { c } { \\delta - \\kappa ( 1 + \\gamma _ { i } ) } \\\\ { \\Psi ^ { \\prime } ( \\langle \\hat { x } _ { i } , \\beta \\rangle ) \\hat { x } _ { i } + \\gamma _ { i } \\hat { y } _ { i } \\hat { x } _ { i } } \\\\ { \\mathbf { 0 } _ { ( i - 1 ) \\times 1 } } \\\\ { - ( \\hat { y } _ { i } \\langle \\hat { x } _ { i } , \\beta \\rangle - \\lambda \\kappa ) } \\\\ { \\mathbf { 0 } _ { ( m - i ) \\times 1 } } \\end{array} \\right] .\n$$", + "text_format": "latex", + "bbox": [ + 367, + 439, + 627, + 537 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "In our SPS experiments, the stochastic oracle for $B$ is simply $\\begin{array} { r } { \\tilde { B } ( z ) = \\frac { 1 } { | \\mathbf { B } | } \\sum _ { i \\in \\mathbf { B } } B _ { i } ( z ) } \\end{array}$ for some minibatch $\\mathbf { B } \\subseteq \\{ 1 , \\dots , m \\}$ . We used a batchsize of 100. ", + "bbox": [ + 173, + 545, + 823, + 579 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Resolvent computations The resolvent of $A _ { 1 }$ is readily constructed from the projection maps of the simple sets $\\mathcal { C } _ { 1 }$ and $\\mathcal { C } _ { 2 }$ , while the resolvent $A _ { 2 }$ involves the proximal operator of the $\\ell _ { 1 }$ norm. Specifically, ", + "bbox": [ + 176, + 593, + 826, + 637 + ], + "page_idx": 32 + }, + { + "type": "equation", + "img_path": "images/49a3b38017b4b6b3fb6ec9fa19ad7b9373e672d849628227aee85bf3d001b5c7.jpg", + "text": "$$\nJ _ { \\rho A _ { 1 } } ( z ) = \\left[ \\begin{array} { c } { \\mathrm { p r o j } _ { \\mathcal { C } _ { 1 } } ( \\lambda , \\beta ) } \\\\ { \\mathrm { p r o j } _ { \\mathcal { C } _ { 2 } } ( \\gamma ) } \\end{array} \\right] \\quad \\mathrm { a n d } \\quad J _ { \\rho A _ { 2 } } ( z ) = \\left[ \\begin{array} { c } { \\mathbf { 0 } _ { 1 \\times 1 } } \\\\ { \\mathrm { p r o x } _ { \\rho c \\| \\cdot \\| _ { 1 } } ( \\beta ) } \\\\ { \\mathbf { 0 } _ { m \\times 1 } } \\end{array} \\right] .\n$$", + "text_format": "latex", + "bbox": [ + 266, + 645, + 730, + 704 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The constraint $\\mathcal { C } _ { 1 }$ is a scaled second-order cone and $\\mathcal { C } _ { 2 }$ is the $\\ell _ { \\infty }$ ball, both of which have closed-form projections. The proximal operator of the $\\ell _ { 1 }$ norm is the well-known soft-thresholding operator (Parikh & Boyd, 2013, Section 6.5.2). Therefore all resolvents in the formulation may be computed quickly and accurately. ", + "bbox": [ + 176, + 710, + 825, + 768 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "SPS stepsize choices For the stepsize in SPS, we ordinarily require $\\rho _ { k } \\le \\overline { { \\rho } } < 1 / L$ for the global Lipschitz constant $L$ of $B$ . However, since the global Lipschitz constant may be pessimistic, better performance can often be achieved by experimenting with larger stepsizes. If divergence is observed, then the stepsize can be decreased. This type of strategy is common for SGD and similar stochastic methods. Thus, for SPS-decay we set $\\alpha _ { k } ^ { - \\pm } = C _ { d } k ^ { - 0 . 5 1 }$ and $\\rho _ { k } = C _ { d } k ^ { - 0 . 2 5 }$ , and performed a grid search to select the best $C _ { d }$ from $\\{ 0 . 1 , 0 . 5 , 1 , 5 , 1 0 \\}$ , arriving at $C _ { d } = 1$ for epsilon and SUSY, and $C _ { d } = 0 . 5$ for real-sim. For SPS-fixed we used $\\rho = K ^ { - 1 / 4 }$ and $\\alpha = C _ { f } \\rho ^ { 2 }$ , and performed a grid search to select $C _ { f }$ over $\\{ 0 . 1 , 0 . 5 , 1 , 5 , 1 0 \\}$ , arriving at $C _ { f } = 1$ for epsilon and real-sim, and $C _ { f } = 5$ for SUSY. The total number of iterations for SPS-fixed was chosen as follows: For the epsilon dataset, we used $K = 5 0 0 0$ , for SUSY we used $K = 2 0 0$ , and for real-sim we used $K = 1 0 0 0$ . ", + "bbox": [ + 173, + 781, + 826, + 924 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/c94c000fa07b86ebc660cc1004df85b2b9b8c4f0b3338fc20eb74e1d9a379c4e.jpg", + "image_caption": [ + "Figure 2: Approximation residual versus epoch for three LIBSVM benchmark datasets. Left: epsilon, middle: SUSY, right: real-sim. " + ], + "image_footnote": [], + "bbox": [ + 176, + 113, + 808, + 227 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Parameter choices for the other algorithms All methods are initialized at the same random point. For Tseng’s method, we used the backtracking linesearch variant with an initial stepsize of 1, $\\theta = 0 . 8$ , and a stepsize reduction factor of 0.7. For FRB, we used the backtracking linesearch variant with the same settings as for Tseng’s method. For deterministic PS, we used a fixed stepsize of $0 . 9 / L$ . For the stochastic Tseng’s method of BΓΆhm et al. (2020), the stepsize $\\alpha _ { k }$ must satisfy: $\\textstyle \\sum _ { k = 1 } ^ { \\infty } { \\dot { \\alpha } } _ { k } = \\infty$ and P∞k=1 $\\textstyle \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } ^ { 2 } < \\infty$ . So we set $\\alpha _ { k } = C k ^ { - d }$ and perform a grid search over $\\{ C , d \\}$ k=1 in the range $[ 1 0 ^ { - 4 } , 1 0 ] \\times [ 0 . 5 1 , 1 ]$ , checking $5 \\times 5$ values to find the best setting for each of the three problems. The selected values are in Table 1. ", + "bbox": [ + 173, + 296, + 826, + 412 + ], + "page_idx": 33 + }, + { + "type": "table", + "img_path": "images/dd0e452b28fcdce907b3cc14b3e92d306bb1210d5ecdd036a064d82a6b7c8165.jpg", + "table_caption": [ + "Table 1: Parameter Values for S-Tseng " + ], + "table_footnote": [], + "table_body": "
epsilonSUSYreal-sim
C0.560.560.77
d0.60.60.55
", + "bbox": [ + 377, + 429, + 614, + 493 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "The work of BΓΆhm et al. (2020) also introduced $\\mathrm { F B F p }$ , a stochastic version of Tseng’s method that reuses a previously-computed gradient and therefore only needs one additional gradient calculation per iteration. In our experiments, the performance of the two methods was about the same, so we only report the performance of stoch. Tseng’s method. ", + "bbox": [ + 174, + 560, + 825, + 617 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "For variance-reduced FRB, the main parameter is the probability $p$ . We hand-tuned $p$ ,arriving at $p = 0 . 0 1$ for all problems. We set the stepsize to its maximum allowed value of ", + "bbox": [ + 173, + 623, + 825, + 652 + ], + "page_idx": 33 + }, + { + "type": "equation", + "img_path": "images/1d2d594ab5b0a4bd7eba2426ebd3b854f23aa6cc165fa808793d71d980fa5604.jpg", + "text": "$$\n\\tau = { \\frac { 1 - \\sqrt { 1 - p } } { 2 L } } .\n$$", + "text_format": "latex", + "bbox": [ + 437, + 665, + 560, + 698 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Plots versus Epoch Figure 2 plots the performance of each method versus epoch (i.e. data pass). This shows an even more dramatic benefit for the stochastic methods than the plots versus time, since at each iteration the stochastic methods only need to process small amounts of data, whereas deterministic methods must process all of it. We believe these benefits do not fully manifest themselves in the plots versus time due to overheads in each iteration of the stochastic methods, multithreading providing a boost for the deterministic methods, memory access patterns, and other practical considerations. ", + "bbox": [ + 173, + 717, + 826, + 815 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Fraction of Nonzero Entries versus Running time Figure 3 plots the fraction of nonzero entries in the iterates of each method versus running time. For each method, we used output of proxckΒ·k1. We observe that our methods produce sparse intermediate iterates for two of the three problems. This is one of the benefits of proximal splitting algorithms in general, including our method. For the other problem, SUSY, no method produces sparse iterates, suggesting that $c$ should be increased if sparse solutions are desired. ", + "bbox": [ + 173, + 838, + 825, + 924 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/7829a04c81fd4e0356905a55be1d4c62aec1a4bb1bef565b9369bc3a3ec4170b.jpg", + "image_caption": [ + "Figure 3: Fraction of nonzero entries versus running time for the three datasets. Left: epsilon, middle: SUSY, right: real-sim. " + ], + "image_footnote": [], + "bbox": [ + 179, + 113, + 805, + 228 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "J LOCAL CONVERGENCE ON NON-MONOTONE PROBLEMS ", + "text_level": 1, + "bbox": [ + 174, + 289, + 674, + 305 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The work by Hsieh et al. (2020) provides a local convergence analysis for DSEG applied to locally monotone problems. Recall that DSEG is equivalent to the special case of SPS for which $n = 0$ . While extending this result to the more general setting of SPS is beyond the scope of this manuscript, we next provide a preliminary sketch of how the analysis of Hsieh et al. (2020) might be generalized to our setting. We leave a formal proof to future work. ", + "bbox": [ + 174, + 320, + 826, + 390 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Sketch of assumptions and main result The first assumption needed is the existence of an isolated solution $p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } , \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }$ . We then require that there exists a ball $\\mathbb { B } _ { r } \\big ( z ^ { * } \\big )$ , centered at $z ^ { * }$ , throughout which the operator $B$ is β€œwell-behaved”, meaning that it satisfies monotonicity and Lipschitz continuity. In addition, we need each $A _ { i }$ , for $i \\in 1 . . n$ , to be maximal monotone within this ball. Outside of the ball, the operators do not need to be monotone or Lipschitz. ", + "bbox": [ + 174, + 405, + 825, + 476 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Following (Hsieh et al., 2020, Assumption $2 ^ { \\prime }$ ), the noise variance assumptions are slightly stronger than in the monotone case. In particular, we require that $\\mathbb { E } [ \\| \\epsilon ^ { k } \\| ^ { q } | \\mathcal { F } _ { k } ] \\le \\dot { N } ^ { q }$ and $\\mathbb { E } [ \\| e ^ { k } \\| ^ { q } | \\dot { \\mathcal { F } } _ { k } ] \\le \\mathsf { \\bar { N } } ^ { q }$ for some $q > 2$ . As before, the noise must be zero-mean. Finally, the stepsize requirements are also slightly stronger than (12), having the added assumption that $\\textstyle \\sum _ { k = 1 } ^ { \\infty } \\rho _ { k } ^ { q } < \\infty$ . ", + "bbox": [ + 174, + 482, + 825, + 540 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "With these assumptions, the goal is to show that, so long as the initial point $p ^ { 1 }$ is sufficiently close to $p ^ { * }$ , then with high probability $p ^ { k }$ converges to $p ^ { * }$ . ", + "bbox": [ + 173, + 546, + 821, + 575 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Proof strategy The initial strategy is to develop the following recursion, satisfied by SPS, that does not (yet) utilize local monotonicity or Lipschitz continuity: ", + "bbox": [ + 169, + 589, + 823, + 618 + ], + "page_idx": 34 + }, + { + "type": "equation", + "img_path": "images/7bf4da9c6f11162f41c376cf0be6ea4f7ad92bfa09374aa8db24c9a79394cf79.jpg", + "text": "$$\n\\begin{array} { r l } & { \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } \\leq ( 1 + c _ { 1 } \\alpha _ { k } ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - c _ { 2 } \\alpha _ { k } \\rho _ { k } ( T _ { k } ^ { \\prime } + l _ { k } + r _ { k } ) - c _ { 3 } \\alpha _ { k } ( r _ { k } ^ { \\prime } + q _ { k } ) } \\\\ & { \\qquad + c _ { 1 } \\alpha _ { k } ^ { 2 } \\big ( \\| e ^ { k } \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + c _ { 4 } \\big ) + c _ { 5 } \\alpha _ { k } q _ { k } ^ { \\prime } } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 223, + 623, + 774, + 667 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "for appropriate constants $c _ { 1 } \\ldots c _ { 5 } \\geq 0$ . In this inequality, we use ", + "bbox": [ + 173, + 670, + 598, + 685 + ], + "page_idx": 34 + }, + { + "type": "equation", + "img_path": "images/f3f22522325ad822559e198d45d55b9d852139e6bd903ed78054b5cb1e6a4087.jpg", + "text": "$$\n\\begin{array} { l } { \\displaystyle T _ { k } ^ { \\prime } \\stackrel { \\prime } { = } \\frac { \\tau } { \\overline { { \\rho } } } \\displaystyle \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\overline { { \\rho } } \\tau } \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } , } \\\\ { \\displaystyle l _ { k } \\stackrel { \\prime } { = } \\displaystyle \\sum _ { i = 1 } ^ { n } \\langle z ^ { * } - x _ { i } ^ { k } , w _ { i } ^ { * } - y _ { i } ^ { k } \\rangle + \\big \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { i } ^ { * } - B ( x _ { n + 1 } ^ { k } ) \\big \\rangle , } \\\\ { \\displaystyle r _ { k } \\stackrel { \\prime } { = } \\big \\langle k ^ { \\ell } , B ( \\tilde { x } ^ { k } ) - w _ { n + 1 } ^ { k } \\big \\rangle , } \\\\ { \\displaystyle r _ { k } ^ { \\prime } \\stackrel { \\prime } { = } \\big \\langle z ^ { k } - z ^ { * } , e ^ { k } \\big \\rangle , } \\\\ { \\displaystyle q _ { k } \\triangleq \\big ( \\rho _ { k } ^ { - 1 } - d / 2 \\big ) \\| \\tilde { x } ^ { k } - z ^ { k } \\| ^ { 2 } - \\| \\tilde { x } ^ { k } - z ^ { k } \\| \\| B ( \\tilde { x } ^ { k } ) - B ( z ^ { k } ) \\| } \\\\ { \\displaystyle q _ { k } ^ { \\prime } \\stackrel { \\prime } { = } \\rho _ { k } \\| \\epsilon ^ { k } \\| \\| B x _ { n + 1 } ^ { k } - B \\tilde { x } ^ { k } \\| + \\frac { 1 } { 2 d } \\| B \\tilde { x } _ { n + 1 } ^ { k } - B x _ { n + 1 } ^ { k } \\| ^ { 2 } , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 290, + 690, + 705, + 869 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "where ", + "bbox": [ + 173, + 871, + 217, + 885 + ], + "page_idx": 34 + }, + { + "type": "equation", + "img_path": "images/ed91628b31c8533c4865e193ba80f7b606977d0725547c8ef5a119d74de7a711.jpg", + "text": "$$\n\\tilde { x } ^ { k } \\doteq z ^ { k } - \\rho _ { k } \\bigl ( B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\bigr ) \\qquad d \\doteq \\frac { 1 - \\overline { { \\rho } } L } { 1 + \\overline { { \\rho } } / 2 } ,\n$$", + "text_format": "latex", + "bbox": [ + 290, + 890, + 707, + 921 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "with $L$ being the local Lipschitz constant of $B$ on $\\mathbb { B } _ { r } \\big ( z ^ { * } \\big )$ . The iterate $\\tilde { x } ^ { k }$ is the analog of the iterate $\\tilde { X } _ { t + 1 / 2 }$ used in Hsieh et al. (2020). ", + "bbox": [ + 173, + 102, + 825, + 135 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "The recursion (79) is derived by once again starting from (13) and following the arguments leading to (35), but this time not taking conditional expectations. In particular, the upper bounds on $\\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 }$ and $\\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 }$ contribute the terms $c _ { 1 } \\alpha _ { k } ^ { 2 } ( \\| \\bar { e } ^ { k } \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + c _ { 4 } )$ and $c _ { 1 } \\alpha _ { k } ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 }$ . For $i \\in 1 . . n$ , the ${ } ^ { \\mathfrak { e } } \\varphi _ { i , k }$ -gap\" term, $\\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } )$ , is dealt with in a similar manner to Section C.5, but this time not using monotonicity as in (36). This contributes $T _ { k } ^ { \\prime }$ and the first term in $l _ { k }$ . Finally, as we sketch below, the ${ ^ { \\circ } } \\varphi _ { n + 1 , k }$ -gap\" term contributes $r _ { k } , r _ { k } ^ { \\prime } , q _ { k } , q _ { k } ^ { \\prime }$ , and the last term in $l _ { k }$ . ", + "bbox": [ + 173, + 140, + 826, + 231 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "For the $\\cdot \\circ _ { n + 1 , k } \\cdot \\mathbf { g } \\mathbf { a p } ^ { , , }$ , that is, $\\varphi _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } )$ , we have to depart from the analysis in Section C.6 and use an alternative argument involving $\\tilde { x } ^ { k }$ . We now provide some details of this argument: in the following, we use $B z$ as shorthand for $B ( z )$ for any vector $z \\in \\mathbb { R } ^ { d }$ . We begin the analysis with ", + "bbox": [ + 174, + 236, + 825, + 295 + ], + "page_idx": 35 + }, + { + "type": "equation", + "img_path": "images/7a41c8543df977baa533186e1906ea9f82ac0387eb9048b6fbfc355353936664.jpg", + "text": "$$\n\\begin{array} { r l } & { \\varphi _ { n + 1 , k } ( p ^ { k } ) = \\langle z ^ { k } - x _ { n + 1 } ^ { k } , y _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad = \\langle z ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\underbrace { \\langle z ^ { k } - x _ { n + 1 } ^ { k } , e ^ { k } \\rangle } _ { \\mathrm { p a r t } \\mathrm { o f } r _ { k } ^ { \\prime } } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 282, + 301, + 715, + 361 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "The final term will combine with the term $\\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle$ coming from ", + "bbox": [ + 174, + 372, + 648, + 390 + ], + "page_idx": 35 + }, + { + "type": "equation", + "img_path": "images/c578f3123900feba8fdaeb554b4a6137548b55fb56b425257c0fcd79e2a1e4e3.jpg", + "text": "$$\n\\begin{array} { r l } & { - \\varphi _ { n + 1 , k } ( p ^ { * } ) = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - y _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - B x _ { n + 1 } ^ { k } \\rangle + \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e _ { n + 1 } ^ { k } \\rangle } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 269, + 395, + 725, + 439 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "to yield $r _ { k } ^ { \\prime }$ above. Equation (82) also yields the second term in $l _ { k }$ . Using that $\\tilde { x } ^ { k } - x _ { n + 1 } ^ { k } = \\rho _ { k } \\epsilon _ { k }$ , we rewrite the first term in (81) as ", + "bbox": [ + 178, + 444, + 823, + 474 + ], + "page_idx": 35 + }, + { + "type": "equation", + "img_path": "images/cc88216d34b3fd4bf4c4a1541ff95dbf9379765edad8df4b947cdd641883ac92.jpg", + "text": "$$\n\\begin{array} { r l } & { \\bigl \\langle z ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle = \\bigl \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle + \\bigl \\langle \\tilde { x } ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle } \\\\ & { \\qquad = \\bigl \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle + \\rho _ { k } \\bigl \\langle \\epsilon ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle \\qquad } \\\\ & { \\qquad = \\bigl \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle + \\rho _ { k } \\bigl \\langle \\epsilon ^ { k } , B x _ { n + 1 } ^ { k } - B \\tilde { x } ^ { k } \\bigr \\rangle \\qquad ( 8 } \\\\ & { \\qquad + \\rho _ { k } \\underbrace { \\bigl \\langle \\epsilon ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle } _ { r _ { k } } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 189, + 478, + 807, + 582 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Next, the terms in (83) admit the lower bound ", + "bbox": [ + 173, + 587, + 475, + 602 + ], + "page_idx": 35 + }, + { + "type": "equation", + "img_path": "images/24c5b535d5e1260db5d97d0bc677844097fec4c9d993e421704c387d232c8f1b.jpg", + "text": "$$\n\\begin{array} { r l } & { \\langle z ^ { k } - { \\tilde { x } } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\rho _ { k } \\langle \\epsilon ^ { k } , B x _ { n + 1 } ^ { k } - B { \\tilde { x } } ^ { k } \\rangle } \\\\ & { \\qquad \\geq \\langle z ^ { k } - { \\tilde { x } } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle - \\underbrace { \\rho _ { k } \\| \\epsilon ^ { k } \\| \\| B x _ { n + 1 } ^ { k } - B { \\tilde { x } } ^ { k } \\| } _ { \\mathrm { ~ } } . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 189, + 609, + 808, + 657 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Considering the first term on right-hand side of this bound, we also have ", + "bbox": [ + 173, + 679, + 648, + 695 + ], + "page_idx": 35 + }, + { + "type": "equation", + "img_path": "images/e2ed6a3f0f6da8fe7c33a178128929042bd9ce72ad8007fbde38da35bfdfb164.jpg", + "text": "$$\n\\begin{array} { r l } { { \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle = \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - B \\tilde { x } ^ { k } \\rangle } } \\\\ & { \\geq \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\rangle - \\displaystyle \\frac { d } { 2 } \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } - \\displaystyle \\frac { 1 } { \\underline { { 2 d } } } \\| B \\tilde { x } ^ { k } - B x _ { n + 1 } ^ { k } \\| ^ { 2 } } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 181, + 700, + 820, + 761 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "for any $d > 0$ , using Young’s inequality. Finally, for the first two terms of the right-hand side of the above relation, we may write ", + "bbox": [ + 174, + 781, + 820, + 810 + ], + "page_idx": 35 + }, + { + "type": "equation", + "img_path": "images/74fa6f6938f7b925da332c981fd67157ffb313cbd333d8882c0638d5c2605496.jpg", + "text": "$$\n\\begin{array} { r l } { { \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\rangle - \\frac { d } { 2 } \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } } } \\\\ & { = \\langle z ^ { k } - \\tilde { x } ^ { k } , B z ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - B z ^ { k } \\rangle - \\frac { d } { 2 } \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } } \\\\ & { \\quad \\quad \\geq \\underbrace { ( \\rho _ { k } ^ { - 1 } - d / 2 ) \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } - \\| z ^ { k } - \\tilde { x } ^ { k } \\| \\| B \\tilde { x } ^ { k } - B z ^ { k } \\| } _ { q _ { k } } , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 189, + 819, + 810, + 924 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "where in the final inequality we use the Cauchy-Schwartz inequality and substitute $B z ^ { k } - w _ { n + 1 } ^ { k } =$ $\\rho _ { k } ^ { - 1 } ( z ^ { k } - \\tilde { x } ^ { k } )$ , from the definition of $\\tilde { x } ^ { k }$ in (80). We have now accounted for all the terms appearing in (79). ", + "bbox": [ + 173, + 102, + 825, + 148 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "The recursion (79) is analogous to equation (F.7) on page 24 of Hsieh et al. (2020) and provides the starting point for the local convergence analysis. The next step would be to derive an analog of Theorem F.1. of Hsieh et al. (2020) using (79). The following translation to the notation of Theorem F.1. could be used (note that Hsieh et al. (2020) uses $t$ for iteration counter): ", + "bbox": [ + 173, + 155, + 825, + 212 + ], + "page_idx": 36 + }, + { + "type": "equation", + "img_path": "images/f2736fc8a856d56c68e3ef766dd4fd1aec18e41e3f04b682737e69e5727fa78a.jpg", + "text": "$$\n\\begin{array} { r l } & { D _ { k } = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } , } \\\\ & { \\zeta _ { k } = c _ { 2 } \\alpha _ { k } \\rho _ { k } ( T _ { k } ^ { \\prime } + l _ { k } ) + c _ { 3 } \\alpha _ { k } q _ { k } , } \\\\ & { \\xi _ { k } = - c _ { 2 } \\alpha _ { k } \\rho _ { k } r _ { k } - c _ { 3 } \\alpha _ { k } r _ { k } ^ { \\prime } , } \\\\ & { \\chi _ { k } = c _ { 1 } \\alpha _ { k } ^ { 2 } \\big ( \\| e ^ { k } \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + c _ { 4 } \\big ) + c _ { 5 } \\alpha _ { k } q _ { k } ^ { \\prime } , } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 300, + 215, + 696, + 297 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "and the event $E _ { \\infty } ^ { \\rho }$ is translated to ", + "bbox": [ + 173, + 301, + 392, + 316 + ], + "page_idx": 36 + }, + { + "type": "equation", + "img_path": "images/03ca7d5826d37d3dfa079241797f56a314e74e8ce752b65bfea63722f09d3976.jpg", + "text": "$$\n\\begin{array} { r } { E _ { \\infty } ^ { \\rho } = \\left\\{ x _ { n + 1 } ^ { k } \\in \\mathbb { B } _ { r } ( z ^ { * } ) , \\tilde { x } ^ { k } \\in \\mathbb { B } _ { \\rho r } ( z ^ { * } ) , p ^ { k } \\in \\mathbb { B } _ { \\rho r } ( p ^ { * } ) \\mathrm { ~ f o r ~ a l l ~ } k = 1 , 2 , \\ldots \\right\\} . } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 248, + 323, + 746, + 343 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "An analog of Theorem 2 of Hsieh et al. (2020) could then be developed based on this result. ", + "bbox": [ + 168, + 348, + 774, + 363 + ], + "page_idx": 36 + } +] \ No newline at end of file diff --git a/parse/dev/a0SRWViFYW/a0SRWViFYW_middle.json b/parse/dev/a0SRWViFYW/a0SRWViFYW_middle.json new file mode 100644 index 0000000000000000000000000000000000000000..b26ff31fe21a8d3a532a09a793a28a15d217bb9b --- /dev/null +++ b/parse/dev/a0SRWViFYW/a0SRWViFYW_middle.json @@ -0,0 +1,117092 @@ +{ + "pdf_info": [ + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 108, + 78, + 504, + 136 + ], + "lines": [ + { + "bbox": [ + 105, + 78, + 389, + 98 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 389, + 98 + ], + "score": 1.0, + "content": "STOCHASTIC PROJECTIVE SPLITTING:", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 98, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 98, + 505, + 117 + ], + "score": 1.0, + "content": "SOLVING SADDLE-POINT PROBLEMS WITH MULTIPLE", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 119, + 225, + 137 + ], + "spans": [ + { + "bbox": [ + 106, + 119, + 225, + 137 + ], + "score": 1.0, + "content": "REGULARIZERS", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1 + }, + { + "type": "text", + "bbox": [ + 112, + 155, + 244, + 177 + ], + "lines": [ + { + "bbox": [ + 113, + 155, + 201, + 167 + ], + "spans": [ + { + "bbox": [ + 113, + 155, + 201, + 167 + ], + "score": 1.0, + "content": "Anonymous authors", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 166, + 245, + 178 + ], + "spans": [ + { + "bbox": [ + 111, + 166, + 245, + 178 + ], + "score": 1.0, + "content": "Paper under double-blind review", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3.5 + }, + { + "type": "title", + "bbox": [ + 278, + 207, + 333, + 218 + ], + "lines": [ + { + "bbox": [ + 276, + 206, + 335, + 219 + ], + "spans": [ + { + "bbox": [ + 276, + 206, + 335, + 219 + ], + "score": 1.0, + "content": "ABSTRACT", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 143, + 234, + 469, + 388 + ], + "lines": [ + { + "bbox": [ + 142, + 235, + 470, + 248 + ], + "spans": [ + { + "bbox": [ + 142, + 235, + 470, + 248 + ], + "score": 1.0, + "content": "We present a new, stochastic variant of the projective splitting (PS) family of", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 141, + 245, + 471, + 259 + ], + "spans": [ + { + "bbox": [ + 141, + 245, + 471, + 259 + ], + "score": 1.0, + "content": "algorithms for monotone inclusion problems. It can solve min-max and noncoop-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 257, + 469, + 268 + ], + "spans": [ + { + "bbox": [ + 141, + 257, + 469, + 268 + ], + "score": 1.0, + "content": "erative game formulations arising in applications such as robust ML without the", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 141, + 268, + 470, + 280 + ], + "spans": [ + { + "bbox": [ + 141, + 268, + 470, + 280 + ], + "score": 1.0, + "content": "convergence issues associated with gradient descent-ascent, the current de facto", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 279, + 470, + 290 + ], + "spans": [ + { + "bbox": [ + 141, + 279, + 470, + 290 + ], + "score": 1.0, + "content": "standard approach in ML applications. Our proposal is the first version of PS able", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 141, + 290, + 470, + 302 + ], + "spans": [ + { + "bbox": [ + 141, + 290, + 470, + 302 + ], + "score": 1.0, + "content": "to use stochastic gradient oracles. It can solve min-max games while handling", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 141, + 300, + 471, + 313 + ], + "spans": [ + { + "bbox": [ + 141, + 300, + 471, + 313 + ], + "score": 1.0, + "content": "multiple constraints and nonsmooth regularizers via projection and proximal op-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 312, + 471, + 324 + ], + "spans": [ + { + "bbox": [ + 141, + 312, + 471, + 324 + ], + "score": 1.0, + "content": "erators. Unlike other stochastic splitting methods that can solve such problems,", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 141, + 322, + 471, + 335 + ], + "spans": [ + { + "bbox": [ + 141, + 322, + 471, + 335 + ], + "score": 1.0, + "content": "our method does not rely on a product-space reformulation of the original problem.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 334, + 469, + 346 + ], + "spans": [ + { + "bbox": [ + 141, + 334, + 469, + 346 + ], + "score": 1.0, + "content": "We prove almost-sure convergence of the iterates to the solution and a convergence", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 345, + 469, + 356 + ], + "spans": [ + { + "bbox": [ + 141, + 345, + 469, + 356 + ], + "score": 1.0, + "content": "rate for the expected residual. By working with monotone inclusions rather than", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 355, + 470, + 368 + ], + "spans": [ + { + "bbox": [ + 141, + 355, + 470, + 368 + ], + "score": 1.0, + "content": "variational inequalities, our analysis avoids the drawbacks of measuring conver-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 367, + 469, + 379 + ], + "spans": [ + { + "bbox": [ + 141, + 367, + 469, + 379 + ], + "score": 1.0, + "content": "gence through the restricted gap function. We close with numerical experiments on", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 378, + 380, + 390 + ], + "spans": [ + { + "bbox": [ + 141, + 378, + 380, + 390 + ], + "score": 1.0, + "content": "a distributionally robust sparse logistic regression problem.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 12.5 + }, + { + "type": "title", + "bbox": [ + 108, + 417, + 206, + 430 + ], + "lines": [ + { + "bbox": [ + 105, + 416, + 208, + 433 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 208, + 433 + ], + "score": 1.0, + "content": "1 INTRODUCTION", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20 + }, + { + "type": "text", + "bbox": [ + 107, + 445, + 505, + 544 + ], + "lines": [ + { + "bbox": [ + 105, + 444, + 507, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 444, + 507, + 459 + ], + "score": 1.0, + "content": "The most prominent application of optimization in ML is empirical risk minimization. However,", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 456, + 506, + 469 + ], + "spans": [ + { + "bbox": [ + 106, + 456, + 506, + 469 + ], + "score": 1.0, + "content": "inspired by the success of GANs (Goodfellow et al., 2014). , ML practitioners have developed more", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 467, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 106, + 467, + 506, + 480 + ], + "score": 1.0, + "content": "complicated min-max and adversarial optimization formulations (Yu et al., 2021; Kuhn et al., 2019;", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 478, + 506, + 491 + ], + "spans": [ + { + "bbox": [ + 106, + 478, + 506, + 491 + ], + "score": 1.0, + "content": "Shafieezadeh-Abadeh et al., 2015; Sinha et al., 2018; Lin et al., 2020; Namkoong & Duchi, 2016;", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 489, + 506, + 503 + ], + "spans": [ + { + "bbox": [ + 105, + 489, + 506, + 503 + ], + "score": 1.0, + "content": "Huang et al., 2017; Wadsworth et al., 2018; Zhang et al., 2018; Edwards & Storkey, 2015; Celis", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 499, + 506, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 506, + 514 + ], + "score": 1.0, + "content": "& Keswani, 2019). Solving these multi-player games leads to issues not seen when minimizing a", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 511, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 506, + 524 + ], + "score": 1.0, + "content": "single-player loss function. The competitive nature of a game leads to rotational dynamics that can", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 522, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 106, + 522, + 506, + 536 + ], + "score": 1.0, + "content": "cause intuitive gradient-based methods to fail to converge (Gidel et al., 2019; Daskalakis et al., 2018;", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 533, + 183, + 545 + ], + "spans": [ + { + "bbox": [ + 106, + 533, + 183, + 545 + ], + "score": 1.0, + "content": "Hsieh et al., 2020).", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 25 + }, + { + "type": "text", + "bbox": [ + 107, + 550, + 505, + 638 + ], + "lines": [ + { + "bbox": [ + 105, + 550, + 505, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 505, + 563 + ], + "score": 1.0, + "content": "A mathematical framework underlying both convex optimization and saddle-point problems is the", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 562, + 505, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 562, + 505, + 573 + ], + "score": 1.0, + "content": "monotone inclusion problem; see Ryu & Boyd (2016) for an introduction. Methods developed", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 573, + 505, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 505, + 585 + ], + "score": 1.0, + "content": "for monotone inclusions will converge for convex-concave, games as they are explicitly designed", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 583, + 506, + 595 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 506, + 595 + ], + "score": 1.0, + "content": "to handle such problems’ governing dynamics. In recent years, monotone inclusion methods and", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 594, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 506, + 606 + ], + "score": 1.0, + "content": "theory have started to receive attention in the ML community (Diakonikolas, 2020; Liu et al., 2021;", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 604, + 507, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 507, + 618 + ], + "score": 1.0, + "content": "Ryu et al., 2020; Pathak & Wainwright, 2020), with a focus on monotone variational inequalities,", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 616, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 506, + 628 + ], + "score": 1.0, + "content": "which form a special case of monotone inclusions (Antonakopoulos et al., 2019; Gidel et al., 2019;", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 627, + 391, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 391, + 639 + ], + "score": 1.0, + "content": "Daskalakis et al., 2018; Hsieh et al., 2020; Mertikopoulos et al., 2019).", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 33.5 + }, + { + "type": "text", + "bbox": [ + 107, + 643, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 643, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 643, + 506, + 657 + ], + "score": 1.0, + "content": "The most prevalent methods for solving min-max games in ML are variants of gradient descent-ascent", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 655, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 505, + 667 + ], + "score": 1.0, + "content": "(GDA). This method alternates between a gradient-descent step for the minimizing player and a", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 666, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 106, + 666, + 505, + 678 + ], + "score": 1.0, + "content": "gradient-ascent step for the maximizing player. Unfortunately, GDA requires additional assumptions", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 677, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 505, + 689 + ], + "score": 1.0, + "content": "to converge on convex-concave games, and it even fails for some simple 2D bilinear games (Gidel", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 688, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 506, + 700 + ], + "score": 1.0, + "content": "et al., 2019, Prop. 1). While there have been several approaches to modify either GDA (Chavdarova", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 104, + 698, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 712 + ], + "score": 1.0, + "content": "et al., 2021; Grnarova et al., 2021; Balduzzi et al., 2018) or the underlying game objective (Mescheder", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 709, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 506, + 722 + ], + "score": 1.0, + "content": "et al., 2018; Nagarajan & Kolter, 2017; Mescheder et al., 2017) to ensure convergence, this paper", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 720, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 506, + 733 + ], + "score": 1.0, + "content": "instead develops a method for solving monotone inclusions that can naturally handle game dynamics.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 41.5 + } + ], + "page_idx": 0, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 761 + ], + "score": 1.0, + "content": "1", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 108, + 78, + 504, + 136 + ], + "lines": [ + { + "bbox": [ + 105, + 78, + 389, + 98 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 389, + 98 + ], + "score": 1.0, + "content": "STOCHASTIC PROJECTIVE SPLITTING:", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 98, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 98, + 505, + 117 + ], + "score": 1.0, + "content": "SOLVING SADDLE-POINT PROBLEMS WITH MULTIPLE", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 119, + 225, + 137 + ], + "spans": [ + { + "bbox": [ + 106, + 119, + 225, + 137 + ], + "score": 1.0, + "content": "REGULARIZERS", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1 + }, + { + "type": "text", + "bbox": [ + 112, + 155, + 244, + 177 + ], + "lines": [ + { + "bbox": [ + 113, + 155, + 201, + 167 + ], + "spans": [ + { + "bbox": [ + 113, + 155, + 201, + 167 + ], + "score": 1.0, + "content": "Anonymous authors", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 166, + 245, + 178 + ], + "spans": [ + { + "bbox": [ + 111, + 166, + 245, + 178 + ], + "score": 1.0, + "content": "Paper under double-blind review", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3.5, + "bbox_fs": [ + 111, + 155, + 245, + 178 + ] + }, + { + "type": "title", + "bbox": [ + 278, + 207, + 333, + 218 + ], + "lines": [ + { + "bbox": [ + 276, + 206, + 335, + 219 + ], + "spans": [ + { + "bbox": [ + 276, + 206, + 335, + 219 + ], + "score": 1.0, + "content": "ABSTRACT", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 143, + 234, + 469, + 388 + ], + "lines": [ + { + "bbox": [ + 142, + 235, + 470, + 248 + ], + "spans": [ + { + "bbox": [ + 142, + 235, + 470, + 248 + ], + "score": 1.0, + "content": "We present a new, stochastic variant of the projective splitting (PS) family of", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 141, + 245, + 471, + 259 + ], + "spans": [ + { + "bbox": [ + 141, + 245, + 471, + 259 + ], + "score": 1.0, + "content": "algorithms for monotone inclusion problems. It can solve min-max and noncoop-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 257, + 469, + 268 + ], + "spans": [ + { + "bbox": [ + 141, + 257, + 469, + 268 + ], + "score": 1.0, + "content": "erative game formulations arising in applications such as robust ML without the", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 141, + 268, + 470, + 280 + ], + "spans": [ + { + "bbox": [ + 141, + 268, + 470, + 280 + ], + "score": 1.0, + "content": "convergence issues associated with gradient descent-ascent, the current de facto", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 279, + 470, + 290 + ], + "spans": [ + { + "bbox": [ + 141, + 279, + 470, + 290 + ], + "score": 1.0, + "content": "standard approach in ML applications. Our proposal is the first version of PS able", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 141, + 290, + 470, + 302 + ], + "spans": [ + { + "bbox": [ + 141, + 290, + 470, + 302 + ], + "score": 1.0, + "content": "to use stochastic gradient oracles. It can solve min-max games while handling", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 141, + 300, + 471, + 313 + ], + "spans": [ + { + "bbox": [ + 141, + 300, + 471, + 313 + ], + "score": 1.0, + "content": "multiple constraints and nonsmooth regularizers via projection and proximal op-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 312, + 471, + 324 + ], + "spans": [ + { + "bbox": [ + 141, + 312, + 471, + 324 + ], + "score": 1.0, + "content": "erators. Unlike other stochastic splitting methods that can solve such problems,", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 141, + 322, + 471, + 335 + ], + "spans": [ + { + "bbox": [ + 141, + 322, + 471, + 335 + ], + "score": 1.0, + "content": "our method does not rely on a product-space reformulation of the original problem.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 334, + 469, + 346 + ], + "spans": [ + { + "bbox": [ + 141, + 334, + 469, + 346 + ], + "score": 1.0, + "content": "We prove almost-sure convergence of the iterates to the solution and a convergence", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 345, + 469, + 356 + ], + "spans": [ + { + "bbox": [ + 141, + 345, + 469, + 356 + ], + "score": 1.0, + "content": "rate for the expected residual. By working with monotone inclusions rather than", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 355, + 470, + 368 + ], + "spans": [ + { + "bbox": [ + 141, + 355, + 470, + 368 + ], + "score": 1.0, + "content": "variational inequalities, our analysis avoids the drawbacks of measuring conver-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 367, + 469, + 379 + ], + "spans": [ + { + "bbox": [ + 141, + 367, + 469, + 379 + ], + "score": 1.0, + "content": "gence through the restricted gap function. We close with numerical experiments on", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 378, + 380, + 390 + ], + "spans": [ + { + "bbox": [ + 141, + 378, + 380, + 390 + ], + "score": 1.0, + "content": "a distributionally robust sparse logistic regression problem.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 12.5, + "bbox_fs": [ + 141, + 235, + 471, + 390 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 417, + 206, + 430 + ], + "lines": [ + { + "bbox": [ + 105, + 416, + 208, + 433 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 208, + 433 + ], + "score": 1.0, + "content": "1 INTRODUCTION", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20 + }, + { + "type": "text", + "bbox": [ + 107, + 445, + 505, + 544 + ], + "lines": [ + { + "bbox": [ + 105, + 444, + 507, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 444, + 507, + 459 + ], + "score": 1.0, + "content": "The most prominent application of optimization in ML is empirical risk minimization. However,", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 456, + 506, + 469 + ], + "spans": [ + { + "bbox": [ + 106, + 456, + 506, + 469 + ], + "score": 1.0, + "content": "inspired by the success of GANs (Goodfellow et al., 2014). , ML practitioners have developed more", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 467, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 106, + 467, + 506, + 480 + ], + "score": 1.0, + "content": "complicated min-max and adversarial optimization formulations (Yu et al., 2021; Kuhn et al., 2019;", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 478, + 506, + 491 + ], + "spans": [ + { + "bbox": [ + 106, + 478, + 506, + 491 + ], + "score": 1.0, + "content": "Shafieezadeh-Abadeh et al., 2015; Sinha et al., 2018; Lin et al., 2020; Namkoong & Duchi, 2016;", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 489, + 506, + 503 + ], + "spans": [ + { + "bbox": [ + 105, + 489, + 506, + 503 + ], + "score": 1.0, + "content": "Huang et al., 2017; Wadsworth et al., 2018; Zhang et al., 2018; Edwards & Storkey, 2015; Celis", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 499, + 506, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 506, + 514 + ], + "score": 1.0, + "content": "& Keswani, 2019). Solving these multi-player games leads to issues not seen when minimizing a", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 511, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 506, + 524 + ], + "score": 1.0, + "content": "single-player loss function. The competitive nature of a game leads to rotational dynamics that can", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 522, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 106, + 522, + 506, + 536 + ], + "score": 1.0, + "content": "cause intuitive gradient-based methods to fail to converge (Gidel et al., 2019; Daskalakis et al., 2018;", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 533, + 183, + 545 + ], + "spans": [ + { + "bbox": [ + 106, + 533, + 183, + 545 + ], + "score": 1.0, + "content": "Hsieh et al., 2020).", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 25, + "bbox_fs": [ + 105, + 444, + 507, + 545 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 550, + 505, + 638 + ], + "lines": [ + { + "bbox": [ + 105, + 550, + 505, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 505, + 563 + ], + "score": 1.0, + "content": "A mathematical framework underlying both convex optimization and saddle-point problems is the", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 562, + 505, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 562, + 505, + 573 + ], + "score": 1.0, + "content": "monotone inclusion problem; see Ryu & Boyd (2016) for an introduction. Methods developed", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 573, + 505, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 505, + 585 + ], + "score": 1.0, + "content": "for monotone inclusions will converge for convex-concave, games as they are explicitly designed", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 583, + 506, + 595 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 506, + 595 + ], + "score": 1.0, + "content": "to handle such problems’ governing dynamics. In recent years, monotone inclusion methods and", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 594, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 506, + 606 + ], + "score": 1.0, + "content": "theory have started to receive attention in the ML community (Diakonikolas, 2020; Liu et al., 2021;", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 604, + 507, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 507, + 618 + ], + "score": 1.0, + "content": "Ryu et al., 2020; Pathak & Wainwright, 2020), with a focus on monotone variational inequalities,", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 616, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 506, + 628 + ], + "score": 1.0, + "content": "which form a special case of monotone inclusions (Antonakopoulos et al., 2019; Gidel et al., 2019;", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 627, + 391, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 391, + 639 + ], + "score": 1.0, + "content": "Daskalakis et al., 2018; Hsieh et al., 2020; Mertikopoulos et al., 2019).", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 33.5, + "bbox_fs": [ + 105, + 550, + 507, + 639 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 643, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 643, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 643, + 506, + 657 + ], + "score": 1.0, + "content": "The most prevalent methods for solving min-max games in ML are variants of gradient descent-ascent", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 655, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 505, + 667 + ], + "score": 1.0, + "content": "(GDA). This method alternates between a gradient-descent step for the minimizing player and a", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 666, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 106, + 666, + 505, + 678 + ], + "score": 1.0, + "content": "gradient-ascent step for the maximizing player. Unfortunately, GDA requires additional assumptions", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 677, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 505, + 689 + ], + "score": 1.0, + "content": "to converge on convex-concave games, and it even fails for some simple 2D bilinear games (Gidel", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 688, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 506, + 700 + ], + "score": 1.0, + "content": "et al., 2019, Prop. 1). While there have been several approaches to modify either GDA (Chavdarova", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 104, + 698, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 712 + ], + "score": 1.0, + "content": "et al., 2021; Grnarova et al., 2021; Balduzzi et al., 2018) or the underlying game objective (Mescheder", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 709, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 506, + 722 + ], + "score": 1.0, + "content": "et al., 2018; Nagarajan & Kolter, 2017; Mescheder et al., 2017) to ensure convergence, this paper", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 720, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 506, + 733 + ], + "score": 1.0, + "content": "instead develops a method for solving monotone inclusions that can naturally handle game dynamics.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 41.5, + "bbox_fs": [ + 104, + 643, + 506, + 733 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 505, + 237 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 506, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 506, + 95 + ], + "score": 1.0, + "content": "Our approach builds upon the recently proposed projective splitting (PS) method with forward", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 94, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 94, + 506, + 106 + ], + "score": 1.0, + "content": "steps (Johnstone & Eckstein, 2020b). PS is designed specifically for solving monotone inclusions,", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 104, + 507, + 117 + ], + "spans": [ + { + "bbox": [ + 106, + 104, + 507, + 117 + ], + "score": 1.0, + "content": "thus does not fall prey to the convergence issues that plague GDA, at least for convex-concave games.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 114, + 506, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 114, + 506, + 129 + ], + "score": 1.0, + "content": "PS is within the general class of projective splitting methods invented by Eckstein & Svaiter (2008)", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 126, + 505, + 139 + ], + "spans": [ + { + "bbox": [ + 105, + 126, + 505, + 139 + ], + "score": 1.0, + "content": "and developed further in Eckstein & Svaiter (2009); Alotaibi et al. (2014); Combettes & Eckstein", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 136, + 506, + 151 + ], + "spans": [ + { + "bbox": [ + 105, + 136, + 506, + 151 + ], + "score": 1.0, + "content": "(2018); Eckstein (2017); Johnstone & Eckstein (2019; 2021; 2020a). These methods work by creating", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 148, + 505, + 161 + ], + "spans": [ + { + "bbox": [ + 105, + 148, + 505, + 161 + ], + "score": 1.0, + "content": "a separating hyperplane between the current iterate and the solution and then moving closer to the", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 159, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 106, + 159, + 506, + 172 + ], + "score": 1.0, + "content": "solution by projecting the current iterate onto this hyperplane (see Section 3 for an overview). Other", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 171, + 505, + 183 + ], + "spans": [ + { + "bbox": [ + 106, + 171, + 505, + 183 + ], + "score": 1.0, + "content": "than being able to natively handle game dynamics, the primary advantage of PS is that it fully splits", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 181, + 505, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 181, + 505, + 194 + ], + "score": 1.0, + "content": "problems involving an arbitrary number of regularizers and constraints. β€œFull splitting” means that the", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 192, + 505, + 205 + ], + "spans": [ + { + "bbox": [ + 106, + 192, + 505, + 205 + ], + "score": 1.0, + "content": "method can handle multiple regularizers and constraints through their respective individual proximal", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 203, + 506, + 216 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 506, + 216 + ], + "score": 1.0, + "content": "and projection operators, along with the smooth terms via gradients. What makes this useful is that", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 214, + 506, + 227 + ], + "spans": [ + { + "bbox": [ + 106, + 214, + 506, + 227 + ], + "score": 1.0, + "content": "many of the regularizers used in ML have proximal operators that are relatively easy to compute; see", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 225, + 251, + 238 + ], + "spans": [ + { + "bbox": [ + 106, + 225, + 251, + 238 + ], + "score": 1.0, + "content": "for example Parikh & Boyd (2013).", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 6.5 + }, + { + "type": "text", + "bbox": [ + 107, + 241, + 505, + 275 + ], + "lines": [ + { + "bbox": [ + 105, + 241, + 505, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 505, + 255 + ], + "score": 1.0, + "content": "Despite these advantages, the preexisting PS framework has a significant drawback: it requires", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 254, + 505, + 264 + ], + "spans": [ + { + "bbox": [ + 106, + 254, + 505, + 264 + ], + "score": 1.0, + "content": "deterministic gradient oracles. This feature makes it impractical for application to large datasets for", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 263, + 335, + 276 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 335, + 276 + ], + "score": 1.0, + "content": "which stochastic oracles may be the only feasible option.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 15 + }, + { + "type": "text", + "bbox": [ + 107, + 293, + 505, + 327 + ], + "lines": [ + { + "bbox": [ + 106, + 293, + 506, + 306 + ], + "spans": [ + { + "bbox": [ + 106, + 293, + 506, + 306 + ], + "score": 1.0, + "content": "Contributions The primary contribution of this work is a new projective splitting algorithm that", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 304, + 505, + 317 + ], + "spans": [ + { + "bbox": [ + 106, + 304, + 505, + 317 + ], + "score": 1.0, + "content": "allows for a stochastic gradient oracle. We call the method stochastic projective splitting (SPS). Our", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 316, + 324, + 328 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 324, + 328 + ], + "score": 1.0, + "content": "method β€œfully splits” the monotone inclusion problem", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 18 + }, + { + "type": "interline_equation", + "bbox": [ + 216, + 337, + 394, + 353 + ], + "lines": [ + { + "bbox": [ + 216, + 337, + 394, + 353 + ], + "spans": [ + { + "bbox": [ + 216, + 337, + 394, + 353 + ], + "score": 0.89, + "content": "\\begin{array} { r } { \\mathrm { F i n d } z \\in \\mathbb { R } ^ { d } \\mathrm { ~ s . t . ~ } 0 \\in \\sum _ { i = 1 } ^ { n } A _ { i } ( z ) + B ( z ) , } \\end{array}", + "type": "interline_equation", + "image_path": "c1bd1c0f8dc091a9dfb57cd09b7ddaa5f6e265d93146a4b9d745949e39b5eee6.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 216, + 337, + 394, + 353 + ], + "spans": [], + "index": 20 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 362, + 505, + 516 + ], + "lines": [ + { + "bbox": [ + 105, + 363, + 506, + 375 + ], + "spans": [ + { + "bbox": [ + 105, + 363, + 133, + 375 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 134, + 363, + 143, + 373 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 143, + 363, + 213, + 375 + ], + "score": 1.0, + "content": "is monotone and", + "type": "text" + }, + { + "bbox": [ + 214, + 363, + 221, + 373 + ], + "score": 0.77, + "content": "L", + "type": "inline_equation" + }, + { + "bbox": [ + 222, + 363, + 302, + 375 + ], + "score": 1.0, + "content": "-Lipschitz and each", + "type": "text" + }, + { + "bbox": [ + 302, + 363, + 314, + 374 + ], + "score": 0.89, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 314, + 363, + 506, + 375 + ], + "score": 1.0, + "content": "is maximal monotone and typically set valued,", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 374, + 505, + 387 + ], + "spans": [ + { + "bbox": [ + 106, + 374, + 505, + 387 + ], + "score": 1.0, + "content": "usually arising from a constraint or a nonsmooth regularizer in the underlying optimization problem", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 385, + 505, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 505, + 397 + ], + "score": 1.0, + "content": "or game; see for example Ryu & Boyd (2016) for definitions. For some example ML applications", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 396, + 505, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 505, + 408 + ], + "score": 1.0, + "content": "of (1), see Section 2 and Appendix A. Here, an algorithm that β€œfully splits” (1) means one whose", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 407, + 506, + 420 + ], + "spans": [ + { + "bbox": [ + 105, + 407, + 370, + 420 + ], + "score": 1.0, + "content": "computational steps each involve only the individual operators", + "type": "text" + }, + { + "bbox": [ + 370, + 407, + 430, + 418 + ], + "score": 0.92, + "content": "A _ { 1 } , \\ldots , A _ { n } , B", + "type": "inline_equation" + }, + { + "bbox": [ + 430, + 407, + 506, + 420 + ], + "score": 1.0, + "content": ". Ours is the first", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 419, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 506, + 430 + ], + "score": 1.0, + "content": "method that can accomplish full splitting without a product-space reformulation that recasts (1) as a", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 429, + 506, + 442 + ], + "spans": [ + { + "bbox": [ + 105, + 429, + 506, + 442 + ], + "score": 1.0, + "content": "two-operator problem on a higher-dimensional space, a tactic whose disadvantages are discussed in", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 439, + 506, + 452 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 349, + 452 + ], + "score": 1.0, + "content": "Appendix F.7. Our method interrogates the Lipschitz operator", + "type": "text" + }, + { + "bbox": [ + 349, + 440, + 358, + 450 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 359, + 439, + 506, + 452 + ], + "score": 1.0, + "content": "through a stochastic oracle. Previous", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 450, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 392, + 463 + ], + "score": 1.0, + "content": "methods splitting (1) have either required a deterministic oracle for", + "type": "text" + }, + { + "bbox": [ + 392, + 451, + 401, + 460 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 402, + 450, + 506, + 463 + ], + "score": 1.0, + "content": ", or have made far more", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 462, + 506, + 473 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 506, + 473 + ], + "score": 1.0, + "content": "restrictive assumptions on the noise or the operators (BriceΓ±o-Arias & Combettes, 2011; Combettes &", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 471, + 506, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 506, + 485 + ], + "score": 1.0, + "content": "Pesquet, 2012; Malitsky & Tam, 2020; Bot et al., 2019; Van Dung & Vu, 2021) than we will require", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 483, + 505, + 495 + ], + "spans": [ + { + "bbox": [ + 106, + 483, + 505, + 495 + ], + "score": 1.0, + "content": "below. However, the stochastic methods of Alacaoglu et al. (2021) and BΓΆhm et al. (2020), when", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 495, + 505, + 506 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 401, + 506 + ], + "score": 1.0, + "content": "combined with a product-space reformulation, can solve (1) when all the", + "type": "text" + }, + { + "bbox": [ + 401, + 495, + 413, + 505 + ], + "score": 0.88, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 413, + 495, + 505, + 506 + ], + "score": 1.0, + "content": "are subdifferentials of", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 506, + 237, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 237, + 516 + ], + "score": 1.0, + "content": "convex functions; see Section 6.", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 27.5 + }, + { + "type": "text", + "bbox": [ + 107, + 522, + 505, + 610 + ], + "lines": [ + { + "bbox": [ + 105, + 522, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 506, + 536 + ], + "score": 1.0, + "content": "When moving away from a deterministic gradient oracle in projective splitting, a key difficulty is that", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 533, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 505, + 545 + ], + "score": 1.0, + "content": "the generated hyperplanes do not guarantee separation between the solution and the current point. We", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 544, + 505, + 557 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 505, + 557 + ], + "score": 1.0, + "content": "solve this issue by relaxing the projection: we only update each iterate in the direction of the noisy", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 556, + 505, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 505, + 567 + ], + "score": 1.0, + "content": "projection and scale its movement by a decreasing stepsize that allows for control of the stochastic", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 566, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 566, + 506, + 578 + ], + "score": 1.0, + "content": "error. Using the framework of stochastic quasi-FejΓ©r monotonicity (Combettes & Pesquet, 2015),", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 577, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 577, + 506, + 590 + ], + "score": 1.0, + "content": "we prove almost-sure convergence of the final iterate and do not require averaging of the iterates", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 588, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 588, + 505, + 601 + ], + "score": 1.0, + "content": "(Theorem 1, Section 5). We also provide a non-asymptotic convergence rate for the approximation", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 600, + 238, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 600, + 238, + 612 + ], + "score": 1.0, + "content": "residual (Theorem 2, Section 5).", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 38.5 + }, + { + "type": "text", + "bbox": [ + 107, + 615, + 505, + 682 + ], + "lines": [ + { + "bbox": [ + 106, + 616, + 505, + 628 + ], + "spans": [ + { + "bbox": [ + 106, + 616, + 505, + 628 + ], + "score": 1.0, + "content": "A special case of SPS is the recently-developed Double Stepsize Extragradient Method (DSEG) (Hsieh", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 626, + 505, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 184, + 640 + ], + "score": 1.0, + "content": "et al., 2020). When", + "type": "text" + }, + { + "bbox": [ + 185, + 627, + 210, + 637 + ], + "score": 0.9, + "content": "n = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 211, + 626, + 286, + 640 + ], + "score": 1.0, + "content": "and therefore only", + "type": "text" + }, + { + "bbox": [ + 286, + 627, + 295, + 637 + ], + "score": 0.8, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 295, + 626, + 505, + 640 + ], + "score": 1.0, + "content": "is present in (1), DSEG and SPS coincide. Thus, our", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 639, + 505, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 505, + 650 + ], + "score": 1.0, + "content": "method extends DSEG to allow for regularizers and constraints. Our analysis also provides a new", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 649, + 505, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 649, + 505, + 662 + ], + "score": 1.0, + "content": "interpretation for DSEG as a special case of projective splitting. Our nonasymptotic convergence rate", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 659, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 659, + 505, + 673 + ], + "score": 1.0, + "content": "for SPS also applies to DSEG under no additional assumptions. By contrast, the original convergence", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 671, + 419, + 684 + ], + "spans": [ + { + "bbox": [ + 105, + 671, + 419, + 684 + ], + "score": 1.0, + "content": "rate analysis for DSEG requires either strong monotonicity or an error bound.", + "type": "text" + } + ], + "index": 48 + } + ], + "index": 45.5 + }, + { + "type": "text", + "bbox": [ + 107, + 687, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "We close with numerical experiments on a distributionally robust sparse logistic regression problem.", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 106, + 699, + 504, + 711 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 478, + 711 + ], + "score": 1.0, + "content": "This is a nonsmooth convex-concave min-max problem which can be converted to (1) with", + "type": "text" + }, + { + "bbox": [ + 478, + 699, + 504, + 709 + ], + "score": 0.88, + "content": "n = 2", + "type": "inline_equation" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 709, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 723 + ], + "score": 1.0, + "content": "set-valued operators. On this problems class, SPS compares well to the possible alternative splitting", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 721, + 145, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 721, + 145, + 732 + ], + "score": 1.0, + "content": "methods.", + "type": "text" + } + ], + "index": 52 + } + ], + "index": 50.5 + } + ], + "page_idx": 1, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 309, + 760 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 763 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 763 + ], + "score": 1.0, + "content": "2", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 505, + 237 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 506, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 506, + 95 + ], + "score": 1.0, + "content": "Our approach builds upon the recently proposed projective splitting (PS) method with forward", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 94, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 94, + 506, + 106 + ], + "score": 1.0, + "content": "steps (Johnstone & Eckstein, 2020b). PS is designed specifically for solving monotone inclusions,", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 104, + 507, + 117 + ], + "spans": [ + { + "bbox": [ + 106, + 104, + 507, + 117 + ], + "score": 1.0, + "content": "thus does not fall prey to the convergence issues that plague GDA, at least for convex-concave games.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 114, + 506, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 114, + 506, + 129 + ], + "score": 1.0, + "content": "PS is within the general class of projective splitting methods invented by Eckstein & Svaiter (2008)", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 126, + 505, + 139 + ], + "spans": [ + { + "bbox": [ + 105, + 126, + 505, + 139 + ], + "score": 1.0, + "content": "and developed further in Eckstein & Svaiter (2009); Alotaibi et al. (2014); Combettes & Eckstein", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 136, + 506, + 151 + ], + "spans": [ + { + "bbox": [ + 105, + 136, + 506, + 151 + ], + "score": 1.0, + "content": "(2018); Eckstein (2017); Johnstone & Eckstein (2019; 2021; 2020a). These methods work by creating", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 148, + 505, + 161 + ], + "spans": [ + { + "bbox": [ + 105, + 148, + 505, + 161 + ], + "score": 1.0, + "content": "a separating hyperplane between the current iterate and the solution and then moving closer to the", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 159, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 106, + 159, + 506, + 172 + ], + "score": 1.0, + "content": "solution by projecting the current iterate onto this hyperplane (see Section 3 for an overview). Other", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 171, + 505, + 183 + ], + "spans": [ + { + "bbox": [ + 106, + 171, + 505, + 183 + ], + "score": 1.0, + "content": "than being able to natively handle game dynamics, the primary advantage of PS is that it fully splits", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 181, + 505, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 181, + 505, + 194 + ], + "score": 1.0, + "content": "problems involving an arbitrary number of regularizers and constraints. β€œFull splitting” means that the", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 192, + 505, + 205 + ], + "spans": [ + { + "bbox": [ + 106, + 192, + 505, + 205 + ], + "score": 1.0, + "content": "method can handle multiple regularizers and constraints through their respective individual proximal", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 203, + 506, + 216 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 506, + 216 + ], + "score": 1.0, + "content": "and projection operators, along with the smooth terms via gradients. What makes this useful is that", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 214, + 506, + 227 + ], + "spans": [ + { + "bbox": [ + 106, + 214, + 506, + 227 + ], + "score": 1.0, + "content": "many of the regularizers used in ML have proximal operators that are relatively easy to compute; see", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 225, + 251, + 238 + ], + "spans": [ + { + "bbox": [ + 106, + 225, + 251, + 238 + ], + "score": 1.0, + "content": "for example Parikh & Boyd (2013).", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 6.5, + "bbox_fs": [ + 105, + 82, + 507, + 238 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 241, + 505, + 275 + ], + "lines": [ + { + "bbox": [ + 105, + 241, + 505, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 505, + 255 + ], + "score": 1.0, + "content": "Despite these advantages, the preexisting PS framework has a significant drawback: it requires", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 254, + 505, + 264 + ], + "spans": [ + { + "bbox": [ + 106, + 254, + 505, + 264 + ], + "score": 1.0, + "content": "deterministic gradient oracles. This feature makes it impractical for application to large datasets for", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 263, + 335, + 276 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 335, + 276 + ], + "score": 1.0, + "content": "which stochastic oracles may be the only feasible option.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 15, + "bbox_fs": [ + 105, + 241, + 505, + 276 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 293, + 505, + 327 + ], + "lines": [ + { + "bbox": [ + 106, + 293, + 506, + 306 + ], + "spans": [ + { + "bbox": [ + 106, + 293, + 506, + 306 + ], + "score": 1.0, + "content": "Contributions The primary contribution of this work is a new projective splitting algorithm that", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 304, + 505, + 317 + ], + "spans": [ + { + "bbox": [ + 106, + 304, + 505, + 317 + ], + "score": 1.0, + "content": "allows for a stochastic gradient oracle. We call the method stochastic projective splitting (SPS). Our", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 316, + 324, + 328 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 324, + 328 + ], + "score": 1.0, + "content": "method β€œfully splits” the monotone inclusion problem", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 18, + "bbox_fs": [ + 106, + 293, + 506, + 328 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 216, + 337, + 394, + 353 + ], + "lines": [ + { + "bbox": [ + 216, + 337, + 394, + 353 + ], + "spans": [ + { + "bbox": [ + 216, + 337, + 394, + 353 + ], + "score": 0.89, + "content": "\\begin{array} { r } { \\mathrm { F i n d } z \\in \\mathbb { R } ^ { d } \\mathrm { ~ s . t . ~ } 0 \\in \\sum _ { i = 1 } ^ { n } A _ { i } ( z ) + B ( z ) , } \\end{array}", + "type": "interline_equation", + "image_path": "c1bd1c0f8dc091a9dfb57cd09b7ddaa5f6e265d93146a4b9d745949e39b5eee6.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 216, + 337, + 394, + 353 + ], + "spans": [], + "index": 20 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 362, + 505, + 516 + ], + "lines": [ + { + "bbox": [ + 105, + 363, + 506, + 375 + ], + "spans": [ + { + "bbox": [ + 105, + 363, + 133, + 375 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 134, + 363, + 143, + 373 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 143, + 363, + 213, + 375 + ], + "score": 1.0, + "content": "is monotone and", + "type": "text" + }, + { + "bbox": [ + 214, + 363, + 221, + 373 + ], + "score": 0.77, + "content": "L", + "type": "inline_equation" + }, + { + "bbox": [ + 222, + 363, + 302, + 375 + ], + "score": 1.0, + "content": "-Lipschitz and each", + "type": "text" + }, + { + "bbox": [ + 302, + 363, + 314, + 374 + ], + "score": 0.89, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 314, + 363, + 506, + 375 + ], + "score": 1.0, + "content": "is maximal monotone and typically set valued,", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 374, + 505, + 387 + ], + "spans": [ + { + "bbox": [ + 106, + 374, + 505, + 387 + ], + "score": 1.0, + "content": "usually arising from a constraint or a nonsmooth regularizer in the underlying optimization problem", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 385, + 505, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 505, + 397 + ], + "score": 1.0, + "content": "or game; see for example Ryu & Boyd (2016) for definitions. For some example ML applications", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 396, + 505, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 505, + 408 + ], + "score": 1.0, + "content": "of (1), see Section 2 and Appendix A. Here, an algorithm that β€œfully splits” (1) means one whose", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 407, + 506, + 420 + ], + "spans": [ + { + "bbox": [ + 105, + 407, + 370, + 420 + ], + "score": 1.0, + "content": "computational steps each involve only the individual operators", + "type": "text" + }, + { + "bbox": [ + 370, + 407, + 430, + 418 + ], + "score": 0.92, + "content": "A _ { 1 } , \\ldots , A _ { n } , B", + "type": "inline_equation" + }, + { + "bbox": [ + 430, + 407, + 506, + 420 + ], + "score": 1.0, + "content": ". Ours is the first", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 419, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 506, + 430 + ], + "score": 1.0, + "content": "method that can accomplish full splitting without a product-space reformulation that recasts (1) as a", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 429, + 506, + 442 + ], + "spans": [ + { + "bbox": [ + 105, + 429, + 506, + 442 + ], + "score": 1.0, + "content": "two-operator problem on a higher-dimensional space, a tactic whose disadvantages are discussed in", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 439, + 506, + 452 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 349, + 452 + ], + "score": 1.0, + "content": "Appendix F.7. Our method interrogates the Lipschitz operator", + "type": "text" + }, + { + "bbox": [ + 349, + 440, + 358, + 450 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 359, + 439, + 506, + 452 + ], + "score": 1.0, + "content": "through a stochastic oracle. Previous", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 450, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 392, + 463 + ], + "score": 1.0, + "content": "methods splitting (1) have either required a deterministic oracle for", + "type": "text" + }, + { + "bbox": [ + 392, + 451, + 401, + 460 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 402, + 450, + 506, + 463 + ], + "score": 1.0, + "content": ", or have made far more", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 462, + 506, + 473 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 506, + 473 + ], + "score": 1.0, + "content": "restrictive assumptions on the noise or the operators (BriceΓ±o-Arias & Combettes, 2011; Combettes &", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 471, + 506, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 506, + 485 + ], + "score": 1.0, + "content": "Pesquet, 2012; Malitsky & Tam, 2020; Bot et al., 2019; Van Dung & Vu, 2021) than we will require", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 483, + 505, + 495 + ], + "spans": [ + { + "bbox": [ + 106, + 483, + 505, + 495 + ], + "score": 1.0, + "content": "below. However, the stochastic methods of Alacaoglu et al. (2021) and BΓΆhm et al. (2020), when", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 495, + 505, + 506 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 401, + 506 + ], + "score": 1.0, + "content": "combined with a product-space reformulation, can solve (1) when all the", + "type": "text" + }, + { + "bbox": [ + 401, + 495, + 413, + 505 + ], + "score": 0.88, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 413, + 495, + 505, + 506 + ], + "score": 1.0, + "content": "are subdifferentials of", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 506, + 237, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 237, + 516 + ], + "score": 1.0, + "content": "convex functions; see Section 6.", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 27.5, + "bbox_fs": [ + 105, + 363, + 506, + 516 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 522, + 505, + 610 + ], + "lines": [ + { + "bbox": [ + 105, + 522, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 506, + 536 + ], + "score": 1.0, + "content": "When moving away from a deterministic gradient oracle in projective splitting, a key difficulty is that", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 533, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 505, + 545 + ], + "score": 1.0, + "content": "the generated hyperplanes do not guarantee separation between the solution and the current point. We", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 544, + 505, + 557 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 505, + 557 + ], + "score": 1.0, + "content": "solve this issue by relaxing the projection: we only update each iterate in the direction of the noisy", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 556, + 505, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 505, + 567 + ], + "score": 1.0, + "content": "projection and scale its movement by a decreasing stepsize that allows for control of the stochastic", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 566, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 566, + 506, + 578 + ], + "score": 1.0, + "content": "error. Using the framework of stochastic quasi-FejΓ©r monotonicity (Combettes & Pesquet, 2015),", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 577, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 577, + 506, + 590 + ], + "score": 1.0, + "content": "we prove almost-sure convergence of the final iterate and do not require averaging of the iterates", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 588, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 588, + 505, + 601 + ], + "score": 1.0, + "content": "(Theorem 1, Section 5). We also provide a non-asymptotic convergence rate for the approximation", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 600, + 238, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 600, + 238, + 612 + ], + "score": 1.0, + "content": "residual (Theorem 2, Section 5).", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 38.5, + "bbox_fs": [ + 105, + 522, + 506, + 612 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 615, + 505, + 682 + ], + "lines": [ + { + "bbox": [ + 106, + 616, + 505, + 628 + ], + "spans": [ + { + "bbox": [ + 106, + 616, + 505, + 628 + ], + "score": 1.0, + "content": "A special case of SPS is the recently-developed Double Stepsize Extragradient Method (DSEG) (Hsieh", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 626, + 505, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 184, + 640 + ], + "score": 1.0, + "content": "et al., 2020). When", + "type": "text" + }, + { + "bbox": [ + 185, + 627, + 210, + 637 + ], + "score": 0.9, + "content": "n = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 211, + 626, + 286, + 640 + ], + "score": 1.0, + "content": "and therefore only", + "type": "text" + }, + { + "bbox": [ + 286, + 627, + 295, + 637 + ], + "score": 0.8, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 295, + 626, + 505, + 640 + ], + "score": 1.0, + "content": "is present in (1), DSEG and SPS coincide. Thus, our", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 639, + 505, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 505, + 650 + ], + "score": 1.0, + "content": "method extends DSEG to allow for regularizers and constraints. Our analysis also provides a new", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 649, + 505, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 649, + 505, + 662 + ], + "score": 1.0, + "content": "interpretation for DSEG as a special case of projective splitting. Our nonasymptotic convergence rate", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 659, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 659, + 505, + 673 + ], + "score": 1.0, + "content": "for SPS also applies to DSEG under no additional assumptions. By contrast, the original convergence", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 671, + 419, + 684 + ], + "spans": [ + { + "bbox": [ + 105, + 671, + 419, + 684 + ], + "score": 1.0, + "content": "rate analysis for DSEG requires either strong monotonicity or an error bound.", + "type": "text" + } + ], + "index": 48 + } + ], + "index": 45.5, + "bbox_fs": [ + 105, + 616, + 505, + 684 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 687, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "We close with numerical experiments on a distributionally robust sparse logistic regression problem.", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 106, + 699, + 504, + 711 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 478, + 711 + ], + "score": 1.0, + "content": "This is a nonsmooth convex-concave min-max problem which can be converted to (1) with", + "type": "text" + }, + { + "bbox": [ + 478, + 699, + 504, + 709 + ], + "score": 0.88, + "content": "n = 2", + "type": "inline_equation" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 709, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 723 + ], + "score": 1.0, + "content": "set-valued operators. On this problems class, SPS compares well to the possible alternative splitting", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 721, + 145, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 721, + 145, + 732 + ], + "score": 1.0, + "content": "methods.", + "type": "text" + } + ], + "index": 52 + } + ], + "index": 50.5, + "bbox_fs": [ + 105, + 687, + 506, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 505, + 149 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 505, + 95 + ], + "score": 1.0, + "content": "Non-monotone problems The work of Hsieh et al. (2020) included a local convergence analysis", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 93, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 505, + 106 + ], + "score": 1.0, + "content": "for DSEG applied to locally monotone problems. For min-max problems, if the objective is locally", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 104, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 104, + 505, + 117 + ], + "score": 1.0, + "content": "convex-concave at a solution and DSEG is initialized in close proximity, then for small enough", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 115, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 105, + 115, + 506, + 127 + ], + "score": 1.0, + "content": "stepsizes it converges to the solution with high probability. It is possible to extend this result to SPS,", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 126, + 505, + 139 + ], + "spans": [ + { + "bbox": [ + 106, + 126, + 505, + 139 + ], + "score": 1.0, + "content": "along with our convergence rate analysis. This result is beyond the scope of this work, but Appendix", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 137, + 211, + 149 + ], + "spans": [ + { + "bbox": [ + 105, + 137, + 211, + 149 + ], + "score": 1.0, + "content": "J provides a proof sketch.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 2.5 + }, + { + "type": "title", + "bbox": [ + 107, + 164, + 349, + 177 + ], + "lines": [ + { + "bbox": [ + 104, + 163, + 351, + 179 + ], + "spans": [ + { + "bbox": [ + 104, + 163, + 351, + 179 + ], + "score": 1.0, + "content": "2 BACKGROUND ON MONOTONE INCLUSIONS", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6 + }, + { + "type": "text", + "bbox": [ + 107, + 187, + 505, + 244 + ], + "lines": [ + { + "bbox": [ + 106, + 189, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 106, + 189, + 506, + 201 + ], + "score": 1.0, + "content": "Since they are so important to SPS, this section provides some background material regarding mono-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 199, + 505, + 211 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 505, + 211 + ], + "score": 1.0, + "content": "tone inclusions, along with their connections to convex optimization, games, and ML. Appendix G", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 210, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 106, + 210, + 506, + 222 + ], + "score": 1.0, + "content": "discusses their connections to variational inequalities. For a more thorough treatment, we refer", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 221, + 506, + 235 + ], + "spans": [ + { + "bbox": [ + 105, + 221, + 506, + 235 + ], + "score": 1.0, + "content": "to Bauschke & Combettes (2017). See Appendix A for a longer discussion of the applications of", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 232, + 336, + 245 + ], + "spans": [ + { + "bbox": [ + 106, + 232, + 336, + 245 + ], + "score": 1.0, + "content": "monotone inclusions to ML along with several examples.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 9 + }, + { + "type": "text", + "bbox": [ + 106, + 254, + 505, + 289 + ], + "lines": [ + { + "bbox": [ + 106, + 254, + 505, + 267 + ], + "spans": [ + { + "bbox": [ + 106, + 254, + 194, + 267 + ], + "score": 1.0, + "content": "Fundamentals Let", + "type": "text" + }, + { + "bbox": [ + 195, + 254, + 283, + 267 + ], + "score": 0.91, + "content": "f : \\mathbb { R } ^ { d } \\mathbb { R } \\cup \\{ \\infty \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 284, + 254, + 505, + 267 + ], + "score": 1.0, + "content": "be closed, convex, and proper (CCP). Recall that its", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 265, + 505, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 265, + 168, + 280 + ], + "score": 1.0, + "content": "subdifferential", + "type": "text" + }, + { + "bbox": [ + 169, + 266, + 182, + 278 + ], + "score": 0.88, + "content": "\\partial f", + "type": "inline_equation" + }, + { + "bbox": [ + 182, + 265, + 232, + 280 + ], + "score": 1.0, + "content": "is given by", + "type": "text" + }, + { + "bbox": [ + 233, + 266, + 413, + 279 + ], + "score": 0.92, + "content": "\\partial f ( x ) \\ { \\overset { \\cdot } { = } } \\ \\left\\{ g : f ( y ) \\geq f ( x ) + g ^ { \\top } { \\big ( } { \\bar { y - x } } { \\big ) } \\right\\}", + "type": "inline_equation" + }, + { + "bbox": [ + 414, + 265, + 458, + 280 + ], + "score": 1.0, + "content": ". The map", + "type": "text" + }, + { + "bbox": [ + 458, + 266, + 471, + 278 + ], + "score": 0.88, + "content": "\\partial f", + "type": "inline_equation" + }, + { + "bbox": [ + 471, + 265, + 505, + 280 + ], + "score": 1.0, + "content": "has the", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 278, + 143, + 291 + ], + "spans": [ + { + "bbox": [ + 105, + 278, + 143, + 291 + ], + "score": 1.0, + "content": "property", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13 + }, + { + "type": "interline_equation", + "bbox": [ + 201, + 290, + 408, + 304 + ], + "lines": [ + { + "bbox": [ + 201, + 290, + 408, + 304 + ], + "spans": [ + { + "bbox": [ + 201, + 290, + 408, + 304 + ], + "score": 0.87, + "content": "u \\in \\partial f ( x ) , v \\in \\partial f ( y ) \\implies ( u - v ) ^ { \\top } ( x - y ) \\geq 0 ,", + "type": "interline_equation", + "image_path": "e5d9121f2c1a799e9c53e9c729f315d541087dc8bc4ecf0c54171c706b49f450.jpg" + } + ] + } + ], + "index": 15, + "virtual_lines": [ + { + "bbox": [ + 201, + 290, + 408, + 304 + ], + "spans": [], + "index": 15 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 305, + 505, + 394 + ], + "lines": [ + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "score": 1.0, + "content": "and any point-to-set map having this property is called a monotone operator. A monotone operator", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 315, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 106, + 317, + 115, + 326 + ], + "score": 0.8, + "content": "T", + "type": "inline_equation" + }, + { + "bbox": [ + 115, + 315, + 415, + 329 + ], + "score": 1.0, + "content": "is called maximal if no additional points can be included in the image", + "type": "text" + }, + { + "bbox": [ + 415, + 316, + 437, + 328 + ], + "score": 0.91, + "content": "T ( x )", + "type": "inline_equation" + }, + { + "bbox": [ + 437, + 315, + 469, + 329 + ], + "score": 1.0, + "content": "of any", + "type": "text" + }, + { + "bbox": [ + 469, + 316, + 504, + 326 + ], + "score": 0.9, + "content": "\\boldsymbol { x } ^ { \\mathrm { ~ \\scriptsize ~ \\in ~ } \\mathbb { R } ^ { d } }", + "type": "inline_equation" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 326, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 506, + 340 + ], + "score": 1.0, + "content": "without violating the above property (Bauschke & Combettes, 2017, Def. 20.20). Subgradient maps", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 338, + 505, + 350 + ], + "spans": [ + { + "bbox": [ + 106, + 338, + 487, + 350 + ], + "score": 1.0, + "content": "of CCP functions are maximal (Bauschke & Combettes, 2017, Thm. 20.25). A minimizer of", + "type": "text" + }, + { + "bbox": [ + 487, + 339, + 494, + 349 + ], + "score": 0.86, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 495, + 338, + 505, + 350 + ], + "score": 1.0, + "content": "is", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 348, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 123, + 362 + ], + "score": 1.0, + "content": "any", + "type": "text" + }, + { + "bbox": [ + 124, + 349, + 135, + 359 + ], + "score": 0.86, + "content": "x ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 136, + 348, + 177, + 362 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 177, + 349, + 226, + 361 + ], + "score": 0.93, + "content": "0 \\in \\partial f ( x ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 226, + 348, + 505, + 362 + ], + "score": 1.0, + "content": ". This is perhaps the simplest example of a monotone inclusion, the", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 360, + 505, + 372 + ], + "spans": [ + { + "bbox": [ + 106, + 360, + 186, + 372 + ], + "score": 1.0, + "content": "problem of finding", + "type": "text" + }, + { + "bbox": [ + 187, + 362, + 194, + 370 + ], + "score": 0.77, + "content": "x", + "type": "inline_equation" + }, + { + "bbox": [ + 194, + 360, + 235, + 372 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 236, + 360, + 276, + 372 + ], + "score": 0.93, + "content": "0 \\in T ( x )", + "type": "inline_equation" + }, + { + "bbox": [ + 276, + 360, + 308, + 372 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 309, + 361, + 317, + 370 + ], + "score": 0.83, + "content": "T", + "type": "inline_equation" + }, + { + "bbox": [ + 318, + 360, + 430, + 372 + ], + "score": 1.0, + "content": "is a monotone operator. If", + "type": "text" + }, + { + "bbox": [ + 431, + 361, + 438, + 372 + ], + "score": 0.87, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 438, + 360, + 505, + 372 + ], + "score": 1.0, + "content": "is smooth, then", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 107, + 370, + 506, + 385 + ], + "spans": [ + { + "bbox": [ + 107, + 371, + 184, + 383 + ], + "score": 0.92, + "content": "\\bar { \\partial } f ( x ) = \\{ \\nabla f ( x ) \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 184, + 370, + 212, + 385 + ], + "score": 1.0, + "content": "for all", + "type": "text" + }, + { + "bbox": [ + 213, + 373, + 219, + 381 + ], + "score": 0.74, + "content": "x", + "type": "inline_equation" + }, + { + "bbox": [ + 219, + 370, + 338, + 385 + ], + "score": 1.0, + "content": ", and the monotone inclusion", + "type": "text" + }, + { + "bbox": [ + 338, + 371, + 381, + 383 + ], + "score": 0.93, + "content": "0 \\in \\partial f ( x )", + "type": "inline_equation" + }, + { + "bbox": [ + 382, + 370, + 506, + 385 + ], + "score": 1.0, + "content": "is equivalent to the first-order", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 382, + 241, + 396 + ], + "spans": [ + { + "bbox": [ + 105, + 382, + 190, + 396 + ], + "score": 1.0, + "content": "optimality condition", + "type": "text" + }, + { + "bbox": [ + 191, + 382, + 237, + 394 + ], + "score": 0.92, + "content": "0 = \\nabla f ( x )", + "type": "inline_equation" + }, + { + "bbox": [ + 237, + 382, + 241, + 396 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 19.5 + }, + { + "type": "text", + "bbox": [ + 107, + 398, + 504, + 432 + ], + "lines": [ + { + "bbox": [ + 106, + 398, + 506, + 411 + ], + "spans": [ + { + "bbox": [ + 106, + 398, + 506, + 411 + ], + "score": 1.0, + "content": "Under certain regularity conditions (Bauschke & Combettes, 2017, Cor. 16.5), minimizing a sum of", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 408, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 408, + 168, + 423 + ], + "score": 1.0, + "content": "CCP functions", + "type": "text" + }, + { + "bbox": [ + 168, + 410, + 211, + 421 + ], + "score": 0.93, + "content": "f _ { 1 } , \\ldots , f _ { n }", + "type": "inline_equation" + }, + { + "bbox": [ + 211, + 408, + 506, + 423 + ], + "score": 1.0, + "content": "is equivalent to solving the monotone inclusion formed from the sum of", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 420, + 195, + 432 + ], + "spans": [ + { + "bbox": [ + 106, + 420, + 195, + 432 + ], + "score": 1.0, + "content": "their subdifferentials:", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 25 + }, + { + "type": "interline_equation", + "bbox": [ + 207, + 431, + 403, + 464 + ], + "lines": [ + { + "bbox": [ + 207, + 431, + 403, + 464 + ], + "spans": [ + { + "bbox": [ + 207, + 431, + 403, + 464 + ], + "score": 0.93, + "content": "x ^ { * } \\in \\underset { x \\in \\mathbb { R } ^ { d } } { \\arg \\operatorname* { m i n } } \\sum _ { i = 1 } ^ { n } f _ { i } ( x ) \\iff 0 \\in \\sum _ { i = 1 } ^ { n } \\partial f _ { i } ( x ^ { * } ) .", + "type": "interline_equation", + "image_path": "14ae43db097d468948ccb4029a45248686ecdb8144b85eee0324894fd7a0d72e.jpg" + } + ] + } + ], + "index": 27.5, + "virtual_lines": [ + { + "bbox": [ + 207, + 431, + 403, + 447.5 + ], + "spans": [], + "index": 27 + }, + { + "bbox": [ + 207, + 447.5, + 403, + 464.0 + ], + "spans": [], + "index": 28 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 465, + 505, + 543 + ], + "lines": [ + { + "bbox": [ + 106, + 465, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 106, + 465, + 506, + 477 + ], + "score": 1.0, + "content": "As throughout this paper for all set addition operations, the summation on the right-hand side of (2)", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 101, + 470, + 509, + 493 + ], + "spans": [ + { + "bbox": [ + 101, + 470, + 196, + 493 + ], + "score": 1.0, + "content": "is the Minkowski sum", + "type": "text" + }, + { + "bbox": [ + 196, + 475, + 369, + 488 + ], + "score": 0.91, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n } S _ { i } = \\{ \\sum _ { i = 1 } ^ { n } s _ { i } \\ | ^ { \\cdot } s _ { i } \\in S _ { i } \\forall i \\in { 1 . . n } \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 370, + 470, + 441, + 493 + ], + "score": 1.0, + "content": ". For a convex set", + "type": "text" + }, + { + "bbox": [ + 442, + 477, + 452, + 486 + ], + "score": 0.81, + "content": "X", + "type": "inline_equation" + }, + { + "bbox": [ + 452, + 470, + 509, + 493 + ], + "score": 1.0, + "content": ", a constraint", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 107, + 485, + 507, + 501 + ], + "spans": [ + { + "bbox": [ + 107, + 488, + 133, + 497 + ], + "score": 0.88, + "content": "x \\in C", + "type": "inline_equation" + }, + { + "bbox": [ + 133, + 485, + 214, + 501 + ], + "score": 1.0, + "content": "for some convex set", + "type": "text" + }, + { + "bbox": [ + 214, + 488, + 223, + 497 + ], + "score": 0.83, + "content": "C", + "type": "inline_equation" + }, + { + "bbox": [ + 223, + 485, + 372, + 501 + ], + "score": 1.0, + "content": "may be imposed by setting one of the", + "type": "text" + }, + { + "bbox": [ + 372, + 487, + 382, + 498 + ], + "score": 0.88, + "content": "f _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 382, + 485, + 491, + 501 + ], + "score": 1.0, + "content": "to be the indicator function", + "type": "text" + }, + { + "bbox": [ + 491, + 489, + 502, + 498 + ], + "score": 0.78, + "content": "\\iota _ { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 502, + 485, + 507, + 501 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 497, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 106, + 497, + 151, + 510 + ], + "score": 1.0, + "content": "defined by", + "type": "text" + }, + { + "bbox": [ + 151, + 498, + 194, + 510 + ], + "score": 0.92, + "content": "\\iota _ { C } ( x ) = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 195, + 497, + 209, + 510 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 210, + 498, + 236, + 508 + ], + "score": 0.9, + "content": "x \\in C", + "type": "inline_equation" + }, + { + "bbox": [ + 237, + 497, + 255, + 510 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 255, + 497, + 311, + 509 + ], + "score": 0.92, + "content": "\\iota _ { C } \\bar { ( } x ) = \\dot { + } \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 311, + 497, + 326, + 510 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 326, + 498, + 353, + 509 + ], + "score": 0.91, + "content": "x \\not \\in C", + "type": "inline_equation" + }, + { + "bbox": [ + 353, + 497, + 506, + 510 + ], + "score": 1.0, + "content": ". Indicator functions of closed convex", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 509, + 505, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 509, + 432, + 521 + ], + "score": 1.0, + "content": "sets are CCP (Bauschke & Combettes, 2017, Ex. 1.25), and the subgradient map of", + "type": "text" + }, + { + "bbox": [ + 432, + 510, + 443, + 520 + ], + "score": 0.85, + "content": "\\iota _ { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 444, + 509, + 505, + 521 + ], + "score": 1.0, + "content": "is also referred", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 519, + 506, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 519, + 213, + 532 + ], + "score": 1.0, + "content": "to as the normal cone map", + "type": "text" + }, + { + "bbox": [ + 213, + 520, + 229, + 531 + ], + "score": 0.89, + "content": "N _ { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 229, + 519, + 241, + 532 + ], + "score": 1.0, + "content": "of", + "type": "text" + }, + { + "bbox": [ + 241, + 520, + 250, + 530 + ], + "score": 0.82, + "content": "C", + "type": "inline_equation" + }, + { + "bbox": [ + 251, + 519, + 506, + 532 + ], + "score": 1.0, + "content": "(Bauschke & Combettes, 2017, Def. 6.37). Multiple constraints", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 531, + 367, + 543 + ], + "spans": [ + { + "bbox": [ + 106, + 531, + 367, + 543 + ], + "score": 1.0, + "content": "may be imposed by including multiple indicator functions in (2).", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 106, + 553, + 505, + 598 + ], + "lines": [ + { + "bbox": [ + 105, + 553, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 553, + 506, + 567 + ], + "score": 1.0, + "content": "ML applications The form (2) can be used to model ML problems with multiple constraints and/or", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 564, + 506, + 577 + ], + "spans": [ + { + "bbox": [ + 105, + 564, + 506, + 577 + ], + "score": 1.0, + "content": "nonsmooth regularizers, including sparse and overlapping group lasso (Jacob et al., 2009), sparse and", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 576, + 506, + 587 + ], + "spans": [ + { + "bbox": [ + 106, + 576, + 506, + 587 + ], + "score": 1.0, + "content": "low-rank matrix estimation problems (Richard et al., 2012), and rare feature selection (Yan & Bien,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 586, + 325, + 599 + ], + "spans": [ + { + "bbox": [ + 106, + 586, + 325, + 599 + ], + "score": 1.0, + "content": "2020); see Pedregosa & Gidel (2018) for an overview.", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 37.5 + }, + { + "type": "text", + "bbox": [ + 106, + 609, + 505, + 643 + ], + "lines": [ + { + "bbox": [ + 106, + 609, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 106, + 609, + 505, + 622 + ], + "score": 1.0, + "content": "Games Consider a two-player noncooperative game in which each player tries to selfishly minimize", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 620, + 505, + 632 + ], + "spans": [ + { + "bbox": [ + 106, + 620, + 505, + 632 + ], + "score": 1.0, + "content": "its own loss, with each loss depending on the actions of both players. Typically, the goal is to find a", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 630, + 446, + 646 + ], + "spans": [ + { + "bbox": [ + 105, + 630, + 446, + 646 + ], + "score": 1.0, + "content": "Nash equilibrium, in which neither player can improve its loss by changing strategy:", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 41 + }, + { + "type": "interline_equation", + "bbox": [ + 192, + 644, + 417, + 665 + ], + "lines": [ + { + "bbox": [ + 192, + 644, + 417, + 665 + ], + "spans": [ + { + "bbox": [ + 192, + 644, + 417, + 665 + ], + "score": 0.9, + "content": "x ^ { * } \\in \\arg \\operatorname* { m i n } _ { x \\in \\Theta } F ( x , y ^ { * } ) \\quad { \\mathrm { a n d } } \\quad y ^ { * } \\in \\arg \\operatorname* { m i n } _ { y \\in \\Omega } G ( x ^ { * } , y ) .", + "type": "interline_equation", + "image_path": "066c4967ce6cb2c152fa3e359cdecbd6b06a5ad13d851c8b82eaa76e4bf72cfe.jpg" + } + ] + } + ], + "index": 43, + "virtual_lines": [ + { + "bbox": [ + 192, + 644, + 417, + 665 + ], + "spans": [], + "index": 43 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 668, + 505, + 702 + ], + "lines": [ + { + "bbox": [ + 105, + 667, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 281, + 681 + ], + "score": 1.0, + "content": "Assuming that the admissible strategy sets", + "type": "text" + }, + { + "bbox": [ + 281, + 667, + 319, + 680 + ], + "score": 0.92, + "content": "\\Theta \\subseteq \\mathbb { R } ^ { d _ { x } }", + "type": "inline_equation" + }, + { + "bbox": [ + 320, + 667, + 338, + 681 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 339, + 667, + 376, + 680 + ], + "score": 0.93, + "content": "\\Omega \\subseteq \\mathbb { R } ^ { d _ { y } }", + "type": "inline_equation" + }, + { + "bbox": [ + 376, + 667, + 506, + 681 + ], + "score": 1.0, + "content": "are closed and convex and that", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 107, + 678, + 505, + 692 + ], + "spans": [ + { + "bbox": [ + 107, + 680, + 115, + 689 + ], + "score": 0.84, + "content": "F", + "type": "inline_equation" + }, + { + "bbox": [ + 116, + 678, + 134, + 692 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 134, + 680, + 143, + 690 + ], + "score": 0.83, + "content": "G", + "type": "inline_equation" + }, + { + "bbox": [ + 144, + 678, + 505, + 692 + ], + "score": 1.0, + "content": "are differentiable, then writing the first-order necessary conditions for each optimization", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 690, + 193, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 690, + 193, + 703 + ], + "score": 1.0, + "content": "problem in (3) yields", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 45 + }, + { + "type": "interline_equation", + "bbox": [ + 208, + 703, + 403, + 731 + ], + "lines": [ + { + "bbox": [ + 208, + 703, + 403, + 731 + ], + "spans": [ + { + "bbox": [ + 208, + 703, + 403, + 731 + ], + "score": 0.93, + "content": "0 \\in \\left[ \\begin{array} { l } { \\nabla _ { x } F ( x ^ { * } , y ^ { * } ) } \\\\ { \\nabla _ { y } G ( x ^ { * } , y ^ { * } ) } \\end{array} \\right] + \\big ( N _ { \\Theta } ( x ^ { * } ) \\times N _ { \\Omega } ( y ^ { * } ) \\big ) .", + "type": "interline_equation", + "image_path": "51b2c40c258ee95ad171fde6e9646d6c7b8aec1da1c21537eb6ca8f86d1adcd1.jpg" + } + ] + } + ], + "index": 47.5, + "virtual_lines": [ + { + "bbox": [ + 208, + 703, + 403, + 717.0 + ], + "spans": [], + "index": 47 + }, + { + "bbox": [ + 208, + 717.0, + 403, + 731.0 + ], + "spans": [], + "index": 48 + } + ] + } + ], + "page_idx": 2, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 309, + 760 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "score": 1.0, + "content": "3", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 505, + 149 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 505, + 95 + ], + "score": 1.0, + "content": "Non-monotone problems The work of Hsieh et al. (2020) included a local convergence analysis", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 93, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 505, + 106 + ], + "score": 1.0, + "content": "for DSEG applied to locally monotone problems. For min-max problems, if the objective is locally", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 104, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 104, + 505, + 117 + ], + "score": 1.0, + "content": "convex-concave at a solution and DSEG is initialized in close proximity, then for small enough", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 115, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 105, + 115, + 506, + 127 + ], + "score": 1.0, + "content": "stepsizes it converges to the solution with high probability. It is possible to extend this result to SPS,", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 126, + 505, + 139 + ], + "spans": [ + { + "bbox": [ + 106, + 126, + 505, + 139 + ], + "score": 1.0, + "content": "along with our convergence rate analysis. This result is beyond the scope of this work, but Appendix", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 137, + 211, + 149 + ], + "spans": [ + { + "bbox": [ + 105, + 137, + 211, + 149 + ], + "score": 1.0, + "content": "J provides a proof sketch.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 2.5, + "bbox_fs": [ + 105, + 82, + 506, + 149 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 164, + 349, + 177 + ], + "lines": [ + { + "bbox": [ + 104, + 163, + 351, + 179 + ], + "spans": [ + { + "bbox": [ + 104, + 163, + 351, + 179 + ], + "score": 1.0, + "content": "2 BACKGROUND ON MONOTONE INCLUSIONS", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6 + }, + { + "type": "text", + "bbox": [ + 107, + 187, + 505, + 244 + ], + "lines": [ + { + "bbox": [ + 106, + 189, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 106, + 189, + 506, + 201 + ], + "score": 1.0, + "content": "Since they are so important to SPS, this section provides some background material regarding mono-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 199, + 505, + 211 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 505, + 211 + ], + "score": 1.0, + "content": "tone inclusions, along with their connections to convex optimization, games, and ML. Appendix G", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 210, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 106, + 210, + 506, + 222 + ], + "score": 1.0, + "content": "discusses their connections to variational inequalities. For a more thorough treatment, we refer", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 221, + 506, + 235 + ], + "spans": [ + { + "bbox": [ + 105, + 221, + 506, + 235 + ], + "score": 1.0, + "content": "to Bauschke & Combettes (2017). See Appendix A for a longer discussion of the applications of", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 232, + 336, + 245 + ], + "spans": [ + { + "bbox": [ + 106, + 232, + 336, + 245 + ], + "score": 1.0, + "content": "monotone inclusions to ML along with several examples.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 9, + "bbox_fs": [ + 105, + 189, + 506, + 245 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 254, + 505, + 289 + ], + "lines": [ + { + "bbox": [ + 106, + 254, + 505, + 267 + ], + "spans": [ + { + "bbox": [ + 106, + 254, + 194, + 267 + ], + "score": 1.0, + "content": "Fundamentals Let", + "type": "text" + }, + { + "bbox": [ + 195, + 254, + 283, + 267 + ], + "score": 0.91, + "content": "f : \\mathbb { R } ^ { d } \\mathbb { R } \\cup \\{ \\infty \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 284, + 254, + 505, + 267 + ], + "score": 1.0, + "content": "be closed, convex, and proper (CCP). Recall that its", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 265, + 505, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 265, + 168, + 280 + ], + "score": 1.0, + "content": "subdifferential", + "type": "text" + }, + { + "bbox": [ + 169, + 266, + 182, + 278 + ], + "score": 0.88, + "content": "\\partial f", + "type": "inline_equation" + }, + { + "bbox": [ + 182, + 265, + 232, + 280 + ], + "score": 1.0, + "content": "is given by", + "type": "text" + }, + { + "bbox": [ + 233, + 266, + 413, + 279 + ], + "score": 0.92, + "content": "\\partial f ( x ) \\ { \\overset { \\cdot } { = } } \\ \\left\\{ g : f ( y ) \\geq f ( x ) + g ^ { \\top } { \\big ( } { \\bar { y - x } } { \\big ) } \\right\\}", + "type": "inline_equation" + }, + { + "bbox": [ + 414, + 265, + 458, + 280 + ], + "score": 1.0, + "content": ". The map", + "type": "text" + }, + { + "bbox": [ + 458, + 266, + 471, + 278 + ], + "score": 0.88, + "content": "\\partial f", + "type": "inline_equation" + }, + { + "bbox": [ + 471, + 265, + 505, + 280 + ], + "score": 1.0, + "content": "has the", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 278, + 143, + 291 + ], + "spans": [ + { + "bbox": [ + 105, + 278, + 143, + 291 + ], + "score": 1.0, + "content": "property", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13, + "bbox_fs": [ + 105, + 254, + 505, + 291 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 201, + 290, + 408, + 304 + ], + "lines": [ + { + "bbox": [ + 201, + 290, + 408, + 304 + ], + "spans": [ + { + "bbox": [ + 201, + 290, + 408, + 304 + ], + "score": 0.87, + "content": "u \\in \\partial f ( x ) , v \\in \\partial f ( y ) \\implies ( u - v ) ^ { \\top } ( x - y ) \\geq 0 ,", + "type": "interline_equation", + "image_path": "e5d9121f2c1a799e9c53e9c729f315d541087dc8bc4ecf0c54171c706b49f450.jpg" + } + ] + } + ], + "index": 15, + "virtual_lines": [ + { + "bbox": [ + 201, + 290, + 408, + 304 + ], + "spans": [], + "index": 15 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 305, + 505, + 394 + ], + "lines": [ + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "score": 1.0, + "content": "and any point-to-set map having this property is called a monotone operator. A monotone operator", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 315, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 106, + 317, + 115, + 326 + ], + "score": 0.8, + "content": "T", + "type": "inline_equation" + }, + { + "bbox": [ + 115, + 315, + 415, + 329 + ], + "score": 1.0, + "content": "is called maximal if no additional points can be included in the image", + "type": "text" + }, + { + "bbox": [ + 415, + 316, + 437, + 328 + ], + "score": 0.91, + "content": "T ( x )", + "type": "inline_equation" + }, + { + "bbox": [ + 437, + 315, + 469, + 329 + ], + "score": 1.0, + "content": "of any", + "type": "text" + }, + { + "bbox": [ + 469, + 316, + 504, + 326 + ], + "score": 0.9, + "content": "\\boldsymbol { x } ^ { \\mathrm { ~ \\scriptsize ~ \\in ~ } \\mathbb { R } ^ { d } }", + "type": "inline_equation" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 326, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 506, + 340 + ], + "score": 1.0, + "content": "without violating the above property (Bauschke & Combettes, 2017, Def. 20.20). Subgradient maps", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 338, + 505, + 350 + ], + "spans": [ + { + "bbox": [ + 106, + 338, + 487, + 350 + ], + "score": 1.0, + "content": "of CCP functions are maximal (Bauschke & Combettes, 2017, Thm. 20.25). A minimizer of", + "type": "text" + }, + { + "bbox": [ + 487, + 339, + 494, + 349 + ], + "score": 0.86, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 495, + 338, + 505, + 350 + ], + "score": 1.0, + "content": "is", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 348, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 123, + 362 + ], + "score": 1.0, + "content": "any", + "type": "text" + }, + { + "bbox": [ + 124, + 349, + 135, + 359 + ], + "score": 0.86, + "content": "x ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 136, + 348, + 177, + 362 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 177, + 349, + 226, + 361 + ], + "score": 0.93, + "content": "0 \\in \\partial f ( x ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 226, + 348, + 505, + 362 + ], + "score": 1.0, + "content": ". This is perhaps the simplest example of a monotone inclusion, the", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 360, + 505, + 372 + ], + "spans": [ + { + "bbox": [ + 106, + 360, + 186, + 372 + ], + "score": 1.0, + "content": "problem of finding", + "type": "text" + }, + { + "bbox": [ + 187, + 362, + 194, + 370 + ], + "score": 0.77, + "content": "x", + "type": "inline_equation" + }, + { + "bbox": [ + 194, + 360, + 235, + 372 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 236, + 360, + 276, + 372 + ], + "score": 0.93, + "content": "0 \\in T ( x )", + "type": "inline_equation" + }, + { + "bbox": [ + 276, + 360, + 308, + 372 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 309, + 361, + 317, + 370 + ], + "score": 0.83, + "content": "T", + "type": "inline_equation" + }, + { + "bbox": [ + 318, + 360, + 430, + 372 + ], + "score": 1.0, + "content": "is a monotone operator. If", + "type": "text" + }, + { + "bbox": [ + 431, + 361, + 438, + 372 + ], + "score": 0.87, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 438, + 360, + 505, + 372 + ], + "score": 1.0, + "content": "is smooth, then", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 107, + 370, + 506, + 385 + ], + "spans": [ + { + "bbox": [ + 107, + 371, + 184, + 383 + ], + "score": 0.92, + "content": "\\bar { \\partial } f ( x ) = \\{ \\nabla f ( x ) \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 184, + 370, + 212, + 385 + ], + "score": 1.0, + "content": "for all", + "type": "text" + }, + { + "bbox": [ + 213, + 373, + 219, + 381 + ], + "score": 0.74, + "content": "x", + "type": "inline_equation" + }, + { + "bbox": [ + 219, + 370, + 338, + 385 + ], + "score": 1.0, + "content": ", and the monotone inclusion", + "type": "text" + }, + { + "bbox": [ + 338, + 371, + 381, + 383 + ], + "score": 0.93, + "content": "0 \\in \\partial f ( x )", + "type": "inline_equation" + }, + { + "bbox": [ + 382, + 370, + 506, + 385 + ], + "score": 1.0, + "content": "is equivalent to the first-order", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 382, + 241, + 396 + ], + "spans": [ + { + "bbox": [ + 105, + 382, + 190, + 396 + ], + "score": 1.0, + "content": "optimality condition", + "type": "text" + }, + { + "bbox": [ + 191, + 382, + 237, + 394 + ], + "score": 0.92, + "content": "0 = \\nabla f ( x )", + "type": "inline_equation" + }, + { + "bbox": [ + 237, + 382, + 241, + 396 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 19.5, + "bbox_fs": [ + 105, + 304, + 506, + 396 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 398, + 504, + 432 + ], + "lines": [ + { + "bbox": [ + 106, + 398, + 506, + 411 + ], + "spans": [ + { + "bbox": [ + 106, + 398, + 506, + 411 + ], + "score": 1.0, + "content": "Under certain regularity conditions (Bauschke & Combettes, 2017, Cor. 16.5), minimizing a sum of", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 408, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 408, + 168, + 423 + ], + "score": 1.0, + "content": "CCP functions", + "type": "text" + }, + { + "bbox": [ + 168, + 410, + 211, + 421 + ], + "score": 0.93, + "content": "f _ { 1 } , \\ldots , f _ { n }", + "type": "inline_equation" + }, + { + "bbox": [ + 211, + 408, + 506, + 423 + ], + "score": 1.0, + "content": "is equivalent to solving the monotone inclusion formed from the sum of", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 420, + 195, + 432 + ], + "spans": [ + { + "bbox": [ + 106, + 420, + 195, + 432 + ], + "score": 1.0, + "content": "their subdifferentials:", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 25, + "bbox_fs": [ + 105, + 398, + 506, + 432 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 207, + 431, + 403, + 464 + ], + "lines": [ + { + "bbox": [ + 207, + 431, + 403, + 464 + ], + "spans": [ + { + "bbox": [ + 207, + 431, + 403, + 464 + ], + "score": 0.93, + "content": "x ^ { * } \\in \\underset { x \\in \\mathbb { R } ^ { d } } { \\arg \\operatorname* { m i n } } \\sum _ { i = 1 } ^ { n } f _ { i } ( x ) \\iff 0 \\in \\sum _ { i = 1 } ^ { n } \\partial f _ { i } ( x ^ { * } ) .", + "type": "interline_equation", + "image_path": "14ae43db097d468948ccb4029a45248686ecdb8144b85eee0324894fd7a0d72e.jpg" + } + ] + } + ], + "index": 27.5, + "virtual_lines": [ + { + "bbox": [ + 207, + 431, + 403, + 447.5 + ], + "spans": [], + "index": 27 + }, + { + "bbox": [ + 207, + 447.5, + 403, + 464.0 + ], + "spans": [], + "index": 28 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 465, + 505, + 543 + ], + "lines": [ + { + "bbox": [ + 106, + 465, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 106, + 465, + 506, + 477 + ], + "score": 1.0, + "content": "As throughout this paper for all set addition operations, the summation on the right-hand side of (2)", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 101, + 470, + 509, + 493 + ], + "spans": [ + { + "bbox": [ + 101, + 470, + 196, + 493 + ], + "score": 1.0, + "content": "is the Minkowski sum", + "type": "text" + }, + { + "bbox": [ + 196, + 475, + 369, + 488 + ], + "score": 0.91, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n } S _ { i } = \\{ \\sum _ { i = 1 } ^ { n } s _ { i } \\ | ^ { \\cdot } s _ { i } \\in S _ { i } \\forall i \\in { 1 . . n } \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 370, + 470, + 441, + 493 + ], + "score": 1.0, + "content": ". For a convex set", + "type": "text" + }, + { + "bbox": [ + 442, + 477, + 452, + 486 + ], + "score": 0.81, + "content": "X", + "type": "inline_equation" + }, + { + "bbox": [ + 452, + 470, + 509, + 493 + ], + "score": 1.0, + "content": ", a constraint", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 107, + 485, + 507, + 501 + ], + "spans": [ + { + "bbox": [ + 107, + 488, + 133, + 497 + ], + "score": 0.88, + "content": "x \\in C", + "type": "inline_equation" + }, + { + "bbox": [ + 133, + 485, + 214, + 501 + ], + "score": 1.0, + "content": "for some convex set", + "type": "text" + }, + { + "bbox": [ + 214, + 488, + 223, + 497 + ], + "score": 0.83, + "content": "C", + "type": "inline_equation" + }, + { + "bbox": [ + 223, + 485, + 372, + 501 + ], + "score": 1.0, + "content": "may be imposed by setting one of the", + "type": "text" + }, + { + "bbox": [ + 372, + 487, + 382, + 498 + ], + "score": 0.88, + "content": "f _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 382, + 485, + 491, + 501 + ], + "score": 1.0, + "content": "to be the indicator function", + "type": "text" + }, + { + "bbox": [ + 491, + 489, + 502, + 498 + ], + "score": 0.78, + "content": "\\iota _ { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 502, + 485, + 507, + 501 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 497, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 106, + 497, + 151, + 510 + ], + "score": 1.0, + "content": "defined by", + "type": "text" + }, + { + "bbox": [ + 151, + 498, + 194, + 510 + ], + "score": 0.92, + "content": "\\iota _ { C } ( x ) = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 195, + 497, + 209, + 510 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 210, + 498, + 236, + 508 + ], + "score": 0.9, + "content": "x \\in C", + "type": "inline_equation" + }, + { + "bbox": [ + 237, + 497, + 255, + 510 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 255, + 497, + 311, + 509 + ], + "score": 0.92, + "content": "\\iota _ { C } \\bar { ( } x ) = \\dot { + } \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 311, + 497, + 326, + 510 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 326, + 498, + 353, + 509 + ], + "score": 0.91, + "content": "x \\not \\in C", + "type": "inline_equation" + }, + { + "bbox": [ + 353, + 497, + 506, + 510 + ], + "score": 1.0, + "content": ". Indicator functions of closed convex", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 509, + 505, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 509, + 432, + 521 + ], + "score": 1.0, + "content": "sets are CCP (Bauschke & Combettes, 2017, Ex. 1.25), and the subgradient map of", + "type": "text" + }, + { + "bbox": [ + 432, + 510, + 443, + 520 + ], + "score": 0.85, + "content": "\\iota _ { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 444, + 509, + 505, + 521 + ], + "score": 1.0, + "content": "is also referred", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 519, + 506, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 519, + 213, + 532 + ], + "score": 1.0, + "content": "to as the normal cone map", + "type": "text" + }, + { + "bbox": [ + 213, + 520, + 229, + 531 + ], + "score": 0.89, + "content": "N _ { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 229, + 519, + 241, + 532 + ], + "score": 1.0, + "content": "of", + "type": "text" + }, + { + "bbox": [ + 241, + 520, + 250, + 530 + ], + "score": 0.82, + "content": "C", + "type": "inline_equation" + }, + { + "bbox": [ + 251, + 519, + 506, + 532 + ], + "score": 1.0, + "content": "(Bauschke & Combettes, 2017, Def. 6.37). Multiple constraints", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 531, + 367, + 543 + ], + "spans": [ + { + "bbox": [ + 106, + 531, + 367, + 543 + ], + "score": 1.0, + "content": "may be imposed by including multiple indicator functions in (2).", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 32, + "bbox_fs": [ + 101, + 465, + 509, + 543 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 553, + 505, + 598 + ], + "lines": [ + { + "bbox": [ + 105, + 553, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 553, + 506, + 567 + ], + "score": 1.0, + "content": "ML applications The form (2) can be used to model ML problems with multiple constraints and/or", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 564, + 506, + 577 + ], + "spans": [ + { + "bbox": [ + 105, + 564, + 506, + 577 + ], + "score": 1.0, + "content": "nonsmooth regularizers, including sparse and overlapping group lasso (Jacob et al., 2009), sparse and", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 576, + 506, + 587 + ], + "spans": [ + { + "bbox": [ + 106, + 576, + 506, + 587 + ], + "score": 1.0, + "content": "low-rank matrix estimation problems (Richard et al., 2012), and rare feature selection (Yan & Bien,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 586, + 325, + 599 + ], + "spans": [ + { + "bbox": [ + 106, + 586, + 325, + 599 + ], + "score": 1.0, + "content": "2020); see Pedregosa & Gidel (2018) for an overview.", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 37.5, + "bbox_fs": [ + 105, + 553, + 506, + 599 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 609, + 505, + 643 + ], + "lines": [ + { + "bbox": [ + 106, + 609, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 106, + 609, + 505, + 622 + ], + "score": 1.0, + "content": "Games Consider a two-player noncooperative game in which each player tries to selfishly minimize", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 620, + 505, + 632 + ], + "spans": [ + { + "bbox": [ + 106, + 620, + 505, + 632 + ], + "score": 1.0, + "content": "its own loss, with each loss depending on the actions of both players. Typically, the goal is to find a", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 630, + 446, + 646 + ], + "spans": [ + { + "bbox": [ + 105, + 630, + 446, + 646 + ], + "score": 1.0, + "content": "Nash equilibrium, in which neither player can improve its loss by changing strategy:", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 41, + "bbox_fs": [ + 105, + 609, + 505, + 646 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 192, + 644, + 417, + 665 + ], + "lines": [ + { + "bbox": [ + 192, + 644, + 417, + 665 + ], + "spans": [ + { + "bbox": [ + 192, + 644, + 417, + 665 + ], + "score": 0.9, + "content": "x ^ { * } \\in \\arg \\operatorname* { m i n } _ { x \\in \\Theta } F ( x , y ^ { * } ) \\quad { \\mathrm { a n d } } \\quad y ^ { * } \\in \\arg \\operatorname* { m i n } _ { y \\in \\Omega } G ( x ^ { * } , y ) .", + "type": "interline_equation", + "image_path": "066c4967ce6cb2c152fa3e359cdecbd6b06a5ad13d851c8b82eaa76e4bf72cfe.jpg" + } + ] + } + ], + "index": 43, + "virtual_lines": [ + { + "bbox": [ + 192, + 644, + 417, + 665 + ], + "spans": [], + "index": 43 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 668, + 505, + 702 + ], + "lines": [ + { + "bbox": [ + 105, + 667, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 281, + 681 + ], + "score": 1.0, + "content": "Assuming that the admissible strategy sets", + "type": "text" + }, + { + "bbox": [ + 281, + 667, + 319, + 680 + ], + "score": 0.92, + "content": "\\Theta \\subseteq \\mathbb { R } ^ { d _ { x } }", + "type": "inline_equation" + }, + { + "bbox": [ + 320, + 667, + 338, + 681 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 339, + 667, + 376, + 680 + ], + "score": 0.93, + "content": "\\Omega \\subseteq \\mathbb { R } ^ { d _ { y } }", + "type": "inline_equation" + }, + { + "bbox": [ + 376, + 667, + 506, + 681 + ], + "score": 1.0, + "content": "are closed and convex and that", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 107, + 678, + 505, + 692 + ], + "spans": [ + { + "bbox": [ + 107, + 680, + 115, + 689 + ], + "score": 0.84, + "content": "F", + "type": "inline_equation" + }, + { + "bbox": [ + 116, + 678, + 134, + 692 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 134, + 680, + 143, + 690 + ], + "score": 0.83, + "content": "G", + "type": "inline_equation" + }, + { + "bbox": [ + 144, + 678, + 505, + 692 + ], + "score": 1.0, + "content": "are differentiable, then writing the first-order necessary conditions for each optimization", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 690, + 193, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 690, + 193, + 703 + ], + "score": 1.0, + "content": "problem in (3) yields", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 45, + "bbox_fs": [ + 105, + 667, + 506, + 703 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 208, + 703, + 403, + 731 + ], + "lines": [ + { + "bbox": [ + 208, + 703, + 403, + 731 + ], + "spans": [ + { + "bbox": [ + 208, + 703, + 403, + 731 + ], + "score": 0.93, + "content": "0 \\in \\left[ \\begin{array} { l } { \\nabla _ { x } F ( x ^ { * } , y ^ { * } ) } \\\\ { \\nabla _ { y } G ( x ^ { * } , y ^ { * } ) } \\end{array} \\right] + \\big ( N _ { \\Theta } ( x ^ { * } ) \\times N _ { \\Omega } ( y ^ { * } ) \\big ) .", + "type": "interline_equation", + "image_path": "51b2c40c258ee95ad171fde6e9646d6c7b8aec1da1c21537eb6ca8f86d1adcd1.jpg" + } + ] + } + ], + "index": 47.5, + "virtual_lines": [ + { + "bbox": [ + 208, + 703, + 403, + 717.0 + ], + "spans": [], + "index": 47 + }, + { + "bbox": [ + 208, + 717.0, + 403, + 731.0 + ], + "spans": [], + "index": 48 + } + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 505, + 182 + ], + "lines": [ + { + "bbox": [ + 104, + 81, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 115, + 95 + ], + "score": 1.0, + "content": "If", + "type": "text" + }, + { + "bbox": [ + 116, + 83, + 153, + 93 + ], + "score": 0.9, + "content": "G = - F", + "type": "inline_equation" + }, + { + "bbox": [ + 154, + 81, + 277, + 95 + ], + "score": 1.0, + "content": ", then (3) is a min-max game. If", + "type": "text" + }, + { + "bbox": [ + 277, + 83, + 286, + 92 + ], + "score": 0.84, + "content": "F", + "type": "inline_equation" + }, + { + "bbox": [ + 287, + 81, + 353, + 95 + ], + "score": 1.0, + "content": "is also convex in", + "type": "text" + }, + { + "bbox": [ + 353, + 85, + 361, + 92 + ], + "score": 0.71, + "content": "x", + "type": "inline_equation" + }, + { + "bbox": [ + 361, + 81, + 421, + 95 + ], + "score": 1.0, + "content": "and concave in", + "type": "text" + }, + { + "bbox": [ + 421, + 84, + 428, + 94 + ], + "score": 0.69, + "content": "y", + "type": "inline_equation" + }, + { + "bbox": [ + 428, + 81, + 450, + 95 + ], + "score": 1.0, + "content": ", then", + "type": "text" + }, + { + "bbox": [ + 451, + 82, + 505, + 95 + ], + "score": 0.92, + "content": "B : ( x , y ) \\mapsto", + "type": "inline_equation" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 90, + 504, + 108 + ], + "spans": [ + { + "bbox": [ + 106, + 93, + 221, + 106 + ], + "score": 0.89, + "content": "( \\nabla _ { x } F ( x , y ) , - \\nabla _ { y } F ( x , y ) ) ^ { \\top }", + "type": "inline_equation" + }, + { + "bbox": [ + 221, + 90, + 290, + 108 + ], + "score": 1.0, + "content": "is monotone1 on", + "type": "text" + }, + { + "bbox": [ + 290, + 93, + 322, + 104 + ], + "score": 0.91, + "content": "\\mathbb { R } ^ { d _ { x } + d _ { y } }", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 90, + 495, + 108 + ], + "score": 1.0, + "content": "(Rockafellar, 1970). In many applications,", + "type": "text" + }, + { + "bbox": [ + 495, + 94, + 504, + 104 + ], + "score": 0.75, + "content": "B", + "type": "inline_equation" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 104, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 104, + 506, + 118 + ], + "score": 1.0, + "content": "is also Lipschitz continuous. In this situation, (4) is a monotone inclusion involving two operators", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 115, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 107, + 116, + 116, + 126 + ], + "score": 0.82, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 116, + 115, + 134, + 128 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 134, + 116, + 162, + 127 + ], + "score": 0.92, + "content": "N _ { \\Theta \\times \\Omega }", + "type": "inline_equation" + }, + { + "bbox": [ + 162, + 115, + 185, + 128 + ], + "score": 1.0, + "content": ", with", + "type": "text" + }, + { + "bbox": [ + 186, + 116, + 195, + 126 + ], + "score": 0.79, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 195, + 115, + 506, + 128 + ], + "score": 1.0, + "content": "being Lipschitz. Using the simultaneous version of GDA on (3) is equivalent", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 126, + 506, + 139 + ], + "spans": [ + { + "bbox": [ + 105, + 126, + 506, + 139 + ], + "score": 1.0, + "content": "to applying the forward-backward method (FB) (Bauschke & Combettes, 2017, Thm. 26.14) to (4).", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 136, + 507, + 150 + ], + "spans": [ + { + "bbox": [ + 105, + 136, + 332, + 150 + ], + "score": 1.0, + "content": "However, convergence of FB requires that the operator", + "type": "text" + }, + { + "bbox": [ + 333, + 138, + 342, + 147 + ], + "score": 0.82, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 342, + 136, + 507, + 150 + ], + "score": 1.0, + "content": "be cocoercive (Bauschke & Combettes,", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 147, + 507, + 161 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 507, + 161 + ], + "score": 1.0, + "content": "2017, Def. 4.10), and not merely Lipschitz (Bauschke & Combettes, 2017, Thm. 26.14). Thus,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 159, + 507, + 172 + ], + "spans": [ + { + "bbox": [ + 105, + 159, + 507, + 172 + ], + "score": 1.0, + "content": "simultaneous GDA fails to converge for (3) without additional assumptions; see Gidel et al. (2019,", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 170, + 259, + 183 + ], + "spans": [ + { + "bbox": [ + 105, + 170, + 259, + 183 + ], + "score": 1.0, + "content": "Prop. 1) for a simple counterexample.", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 4 + }, + { + "type": "text", + "bbox": [ + 107, + 187, + 505, + 232 + ], + "lines": [ + { + "bbox": [ + 105, + 187, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 506, + 200 + ], + "score": 1.0, + "content": "Regularizers and further constraints may be imposed by adding more operators to (4). For example,", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 196, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 338, + 211 + ], + "score": 1.0, + "content": "if one wished to apply a (nonsmooth) convex regularizer", + "type": "text" + }, + { + "bbox": [ + 339, + 198, + 431, + 210 + ], + "score": 0.92, + "content": "r : \\bar { \\mathbb { R } } ^ { d _ { x } } \\bar { \\mathbb { R } } \\cup \\{ + \\infty \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 432, + 196, + 457, + 211 + ], + "score": 1.0, + "content": "to the", + "type": "text" + }, + { + "bbox": [ + 458, + 200, + 465, + 208 + ], + "score": 0.75, + "content": "x", + "type": "inline_equation" + }, + { + "bbox": [ + 465, + 196, + 506, + 211 + ], + "score": 1.0, + "content": "variables", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 208, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 210, + 222 + ], + "score": 1.0, + "content": "and a similar regularizer", + "type": "text" + }, + { + "bbox": [ + 211, + 208, + 309, + 221 + ], + "score": 0.9, + "content": "d : \\mathbb { R } ^ { d _ { y } } \\mathbb { R } \\cup \\{ + \\infty \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 310, + 208, + 338, + 222 + ], + "score": 1.0, + "content": "to the", + "type": "text" + }, + { + "bbox": [ + 338, + 211, + 345, + 221 + ], + "score": 0.8, + "content": "y", + "type": "inline_equation" + }, + { + "bbox": [ + 345, + 208, + 506, + 222 + ], + "score": 1.0, + "content": "variables, one would add the operator", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 220, + 343, + 233 + ], + "spans": [ + { + "bbox": [ + 106, + 220, + 227, + 232 + ], + "score": 0.92, + "content": "A _ { 2 } : ( x , y ) \\mapsto \\bar { \\partial r } ( x ) \\times \\partial d ( y )", + "type": "inline_equation" + }, + { + "bbox": [ + 227, + 220, + 343, + 233 + ], + "score": 1.0, + "content": "to the right-hand side of (4).", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 10.5 + }, + { + "type": "text", + "bbox": [ + 107, + 244, + 505, + 333 + ], + "lines": [ + { + "bbox": [ + 105, + 243, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 243, + 505, + 258 + ], + "score": 1.0, + "content": "ML applications of games Distributionally robust supervised learning (DRSL) is an emerging", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 255, + 505, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 505, + 268 + ], + "score": 1.0, + "content": "framework for improving the stability and reliability of ML models in the face of distributional", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 266, + 505, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 134, + 279 + ], + "score": 1.0, + "content": "shifts", + "type": "text" + }, + { + "bbox": [ + 134, + 266, + 147, + 276 + ], + "score": 0.26, + "content": "\\mathrm { T u }", + "type": "inline_equation" + }, + { + "bbox": [ + 147, + 266, + 505, + 279 + ], + "score": 1.0, + "content": "et al., 2021; Kuhn et al., 2019; Shafieezadeh-Abadeh et al., 2015; Sinha et al., 2018; Lin", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 277, + 505, + 290 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 505, + 290 + ], + "score": 1.0, + "content": "et al., 2020; Namkoong & Duchi, 2016). Common approaches to DRSL formulate the problem as", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 287, + 506, + 302 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 506, + 302 + ], + "score": 1.0, + "content": "a min-max game between a learner selecting the model parameters and an adversary selecting a", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 298, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 506, + 312 + ], + "score": 1.0, + "content": "worst-case distribution subject to some ambiguity set around the observed empirical distribution.", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 309, + 506, + 323 + ], + "spans": [ + { + "bbox": [ + 105, + 309, + 506, + 323 + ], + "score": 1.0, + "content": "This min-max problem is often further reduced to either a finite-dimensional saddlepoint problem or", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 322, + 235, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 322, + 235, + 333 + ], + "score": 1.0, + "content": "a convex optimization problem.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 16.5 + }, + { + "type": "text", + "bbox": [ + 106, + 338, + 505, + 405 + ], + "lines": [ + { + "bbox": [ + 105, + 337, + 505, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 337, + 505, + 351 + ], + "score": 1.0, + "content": "DRSL is a source of games with multiple constraints/regularizers. One such formulation, based on", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 348, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 505, + 362 + ], + "score": 1.0, + "content": "Yu et al. (2021), is discussed in the experiments below. The work in Namkoong & Duchi (2016) uses", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 360, + 506, + 372 + ], + "spans": [ + { + "bbox": [ + 105, + 360, + 210, + 372 + ], + "score": 1.0, + "content": "an ambiguity set based on", + "type": "text" + }, + { + "bbox": [ + 211, + 361, + 218, + 372 + ], + "score": 0.86, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 360, + 506, + 372 + ], + "score": 1.0, + "content": "-divergences, while Sinha et al. (2018) introduce a Lagrangian relaxation", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 371, + 506, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 506, + 383 + ], + "score": 1.0, + "content": "of the Wasserstein ball. When applied to models utilizing multiple regularizers (Jacob et al., 2009;", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 381, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 105, + 381, + 505, + 394 + ], + "score": 1.0, + "content": "Richard et al., 2012; Yan & Bien, 2020), both of these approaches lead to min-max problems with", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 392, + 195, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 195, + 406 + ], + "score": 1.0, + "content": "multiple regularizers.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 23.5 + }, + { + "type": "text", + "bbox": [ + 106, + 409, + 505, + 454 + ], + "lines": [ + { + "bbox": [ + 106, + 409, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 106, + 409, + 506, + 423 + ], + "score": 1.0, + "content": "Other applications of games in ML, although typically nonconvex, include generative adversarial net-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 420, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 506, + 433 + ], + "score": 1.0, + "content": "works (GANs) (Goodfellow et al., 2014; Arjovsky et al., 2017; Loizou et al., 2020; 2021; Mishchenko", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 432, + 506, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 506, + 444 + ], + "score": 1.0, + "content": "et al., 2020), fair classification (Wadsworth et al., 2018; Zhang et al., 2018; Edwards & Storkey, 2015;", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 442, + 387, + 455 + ], + "spans": [ + { + "bbox": [ + 106, + 442, + 387, + 455 + ], + "score": 1.0, + "content": "Celis & Keswani, 2019), and adversarial privacy (Huang et al., 2017).", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 28.5 + }, + { + "type": "text", + "bbox": [ + 106, + 466, + 505, + 546 + ], + "lines": [ + { + "bbox": [ + 106, + 467, + 506, + 479 + ], + "spans": [ + { + "bbox": [ + 106, + 467, + 506, + 479 + ], + "score": 1.0, + "content": "Resolvents, proximal operators, and projections A fundamental computational primitive for", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 478, + 505, + 489 + ], + "spans": [ + { + "bbox": [ + 106, + 478, + 443, + 489 + ], + "score": 1.0, + "content": "solving monotone inclusions is the resolvent. The resolvent of a monotone operator", + "type": "text" + }, + { + "bbox": [ + 444, + 478, + 452, + 488 + ], + "score": 0.81, + "content": "A", + "type": "inline_equation" + }, + { + "bbox": [ + 453, + 478, + 505, + 489 + ], + "score": 1.0, + "content": "is defined to", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 488, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 119, + 502 + ], + "score": 1.0, + "content": "be", + "type": "text" + }, + { + "bbox": [ + 119, + 488, + 190, + 500 + ], + "score": 0.91, + "content": "J _ { A } \\overset { \\cdot } { = } ( I + A ) ^ { - 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 190, + 488, + 222, + 502 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 222, + 489, + 229, + 499 + ], + "score": 0.78, + "content": "I", + "type": "inline_equation" + }, + { + "bbox": [ + 230, + 488, + 455, + 502 + ], + "score": 1.0, + "content": "is the identity operator and the inverse of any operator", + "type": "text" + }, + { + "bbox": [ + 456, + 489, + 464, + 499 + ], + "score": 0.83, + "content": "T", + "type": "inline_equation" + }, + { + "bbox": [ + 465, + 488, + 505, + 502 + ], + "score": 1.0, + "content": "is simply", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 498, + 506, + 514 + ], + "spans": [ + { + "bbox": [ + 106, + 500, + 215, + 512 + ], + "score": 0.89, + "content": "T ^ { - 1 } : x \\mapsto \\{ y : T y \\ni x \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 215, + 498, + 230, + 514 + ], + "score": 1.0, + "content": ". If", + "type": "text" + }, + { + "bbox": [ + 231, + 500, + 239, + 510 + ], + "score": 0.81, + "content": "A", + "type": "inline_equation" + }, + { + "bbox": [ + 240, + 498, + 388, + 514 + ], + "score": 1.0, + "content": "is maximal monotone, then for any", + "type": "text" + }, + { + "bbox": [ + 388, + 500, + 414, + 511 + ], + "score": 0.87, + "content": "\\rho > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 414, + 498, + 418, + 514 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 419, + 500, + 436, + 512 + ], + "score": 0.85, + "content": "J _ { \\rho A }", + "type": "inline_equation" + }, + { + "bbox": [ + 436, + 498, + 506, + 514 + ], + "score": 1.0, + "content": "is single valued,", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 104, + 510, + 507, + 526 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 271, + 526 + ], + "score": 1.0, + "content": "nonexpansive, and has domain equal to", + "type": "text" + }, + { + "bbox": [ + 271, + 511, + 284, + 522 + ], + "score": 0.88, + "content": "\\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 285, + 510, + 507, + 526 + ], + "score": 1.0, + "content": "(Bauschke & Combettes, 2017, Thm. 21.1 and Prop.", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 521, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 506, + 537 + ], + "score": 1.0, + "content": "23.8). Resolvents generalize proximal operators of convex functions: the proximal operator of a CCP", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 533, + 160, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 142, + 547 + ], + "score": 1.0, + "content": "function", + "type": "text" + }, + { + "bbox": [ + 142, + 534, + 149, + 545 + ], + "score": 0.85, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 150, + 533, + 160, + 547 + ], + "score": 1.0, + "content": "is", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 34 + }, + { + "type": "interline_equation", + "bbox": [ + 204, + 550, + 406, + 572 + ], + "lines": [ + { + "bbox": [ + 204, + 550, + 406, + 572 + ], + "spans": [ + { + "bbox": [ + 204, + 550, + 406, + 572 + ], + "score": 0.9, + "content": "\\operatorname { p r o x } _ { \\rho f } ( t ) \\doteq \\underset { x \\in \\mathbb { R } ^ { d } } { \\arg \\operatorname* { m i n } } \\left\\{ \\rho f ( x ) + ( 1 / 2 ) \\| x - t \\| ^ { 2 } \\right\\} .", + "type": "interline_equation", + "image_path": "0975033848d3dbfd934ac0caaff44d0d137a8ac48e7b915b93976889346d26b3.jpg" + } + ] + } + ], + "index": 38, + "virtual_lines": [ + { + "bbox": [ + 204, + 550, + 406, + 572 + ], + "spans": [], + "index": 38 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 578, + 506, + 623 + ], + "lines": [ + { + "bbox": [ + 104, + 577, + 506, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 201, + 592 + ], + "score": 1.0, + "content": "It is easily proved that", + "type": "text" + }, + { + "bbox": [ + 201, + 579, + 263, + 592 + ], + "score": 0.89, + "content": "\\mathrm { p r o x } _ { \\rho f } = \\underset { - } { J } _ { \\rho \\partial f }", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 577, + 506, + 592 + ], + "score": 1.0, + "content": ". Like proximal operators, resolvents generalize projection", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 104, + 588, + 508, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 588, + 187, + 604 + ], + "score": 1.0, + "content": "onto convex sets: if", + "type": "text" + }, + { + "bbox": [ + 187, + 590, + 216, + 601 + ], + "score": 0.91, + "content": "f = \\iota _ { \\mathcal { C } }", + "type": "inline_equation" + }, + { + "bbox": [ + 216, + 588, + 240, + 604 + ], + "score": 1.0, + "content": ", then", + "type": "text" + }, + { + "bbox": [ + 240, + 590, + 338, + 603 + ], + "score": 0.91, + "content": "J _ { \\rho N _ { C } } = \\mathrm { p r o x } _ { \\rho f } = \\mathrm { p r o j } _ { \\mathcal { C } }", + "type": "inline_equation" + }, + { + "bbox": [ + 339, + 588, + 371, + 604 + ], + "score": 1.0, + "content": "for any", + "type": "text" + }, + { + "bbox": [ + 371, + 590, + 396, + 601 + ], + "score": 0.9, + "content": "\\rho > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 396, + 588, + 508, + 604 + ], + "score": 1.0, + "content": ". In many ML applications,", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 601, + 506, + 613 + ], + "spans": [ + { + "bbox": [ + 106, + 601, + 506, + 613 + ], + "score": 1.0, + "content": "proximal operators, and hence resolvents, are relatively straightforward to compute. For examples,", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 611, + 245, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 245, + 624 + ], + "score": 1.0, + "content": "see Parikh & Boyd (2013, Sec. 6).", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 40.5 + }, + { + "type": "text", + "bbox": [ + 106, + 635, + 505, + 703 + ], + "lines": [ + { + "bbox": [ + 105, + 635, + 506, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 506, + 648 + ], + "score": 1.0, + "content": "Operator splitting methods Operator splitting methods attempt to solve monotone inclusions such", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 646, + 505, + 659 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 417, + 659 + ], + "score": 1.0, + "content": "as (1) by a sequence of operations that each involve only one of the operators", + "type": "text" + }, + { + "bbox": [ + 418, + 647, + 478, + 658 + ], + "score": 0.91, + "content": "A _ { 1 } , \\ldots , A _ { n } , B", + "type": "inline_equation" + }, + { + "bbox": [ + 478, + 646, + 505, + 659 + ], + "score": 1.0, + "content": ". Such", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 657, + 505, + 670 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 505, + 670 + ], + "score": 1.0, + "content": "methods are often presented in the context of convex optimization problems like (2), but typically", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 668, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 506, + 681 + ], + "score": 1.0, + "content": "apply more generally to monotone inclusions such as (1). In the specific context of (1), each iteration", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 106, + 680, + 505, + 691 + ], + "spans": [ + { + "bbox": [ + 106, + 680, + 262, + 691 + ], + "score": 1.0, + "content": "of such a method ideally handles each", + "type": "text" + }, + { + "bbox": [ + 262, + 680, + 274, + 690 + ], + "score": 0.89, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 275, + 680, + 449, + 691 + ], + "score": 1.0, + "content": "via its resolvent and the Lipschitz operator", + "type": "text" + }, + { + "bbox": [ + 450, + 680, + 459, + 689 + ], + "score": 0.82, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 459, + 680, + 505, + 691 + ], + "score": 1.0, + "content": "by explicit", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 691, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 106, + 691, + 504, + 702 + ], + "score": 1.0, + "content": "(not stochastic) evaluation. This is a feasible approach if the original problem can be decomposed in", + "type": "text" + } + ], + "index": 48 + } + ], + "index": 45.5 + } + ], + "page_idx": 3, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 105, + 711, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 119, + 709, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 119, + 709, + 366, + 723 + ], + "score": 1.0, + "content": "1Sufficient conditions for the monotonicity of (4) in the case where", + "type": "text" + }, + { + "bbox": [ + 366, + 713, + 400, + 722 + ], + "score": 0.9, + "content": "G \\neq - F", + "type": "inline_equation" + }, + { + "bbox": [ + 401, + 709, + 505, + 723 + ], + "score": 1.0, + "content": "are discussed in e.g. Scutari", + "type": "text" + } + ] + }, + { + "bbox": [ + 106, + 721, + 286, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 721, + 286, + 732 + ], + "score": 1.0, + "content": "et al. (2014); BriceΓ±o-Arias & Combettes (2013).", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 107, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 752, + 308, + 759 + ], + "lines": [] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 505, + 182 + ], + "lines": [ + { + "bbox": [ + 104, + 81, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 115, + 95 + ], + "score": 1.0, + "content": "If", + "type": "text" + }, + { + "bbox": [ + 116, + 83, + 153, + 93 + ], + "score": 0.9, + "content": "G = - F", + "type": "inline_equation" + }, + { + "bbox": [ + 154, + 81, + 277, + 95 + ], + "score": 1.0, + "content": ", then (3) is a min-max game. If", + "type": "text" + }, + { + "bbox": [ + 277, + 83, + 286, + 92 + ], + "score": 0.84, + "content": "F", + "type": "inline_equation" + }, + { + "bbox": [ + 287, + 81, + 353, + 95 + ], + "score": 1.0, + "content": "is also convex in", + "type": "text" + }, + { + "bbox": [ + 353, + 85, + 361, + 92 + ], + "score": 0.71, + "content": "x", + "type": "inline_equation" + }, + { + "bbox": [ + 361, + 81, + 421, + 95 + ], + "score": 1.0, + "content": "and concave in", + "type": "text" + }, + { + "bbox": [ + 421, + 84, + 428, + 94 + ], + "score": 0.69, + "content": "y", + "type": "inline_equation" + }, + { + "bbox": [ + 428, + 81, + 450, + 95 + ], + "score": 1.0, + "content": ", then", + "type": "text" + }, + { + "bbox": [ + 451, + 82, + 505, + 95 + ], + "score": 0.92, + "content": "B : ( x , y ) \\mapsto", + "type": "inline_equation" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 90, + 504, + 108 + ], + "spans": [ + { + "bbox": [ + 106, + 93, + 221, + 106 + ], + "score": 0.89, + "content": "( \\nabla _ { x } F ( x , y ) , - \\nabla _ { y } F ( x , y ) ) ^ { \\top }", + "type": "inline_equation" + }, + { + "bbox": [ + 221, + 90, + 290, + 108 + ], + "score": 1.0, + "content": "is monotone1 on", + "type": "text" + }, + { + "bbox": [ + 290, + 93, + 322, + 104 + ], + "score": 0.91, + "content": "\\mathbb { R } ^ { d _ { x } + d _ { y } }", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 90, + 495, + 108 + ], + "score": 1.0, + "content": "(Rockafellar, 1970). In many applications,", + "type": "text" + }, + { + "bbox": [ + 495, + 94, + 504, + 104 + ], + "score": 0.75, + "content": "B", + "type": "inline_equation" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 104, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 104, + 506, + 118 + ], + "score": 1.0, + "content": "is also Lipschitz continuous. In this situation, (4) is a monotone inclusion involving two operators", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 115, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 107, + 116, + 116, + 126 + ], + "score": 0.82, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 116, + 115, + 134, + 128 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 134, + 116, + 162, + 127 + ], + "score": 0.92, + "content": "N _ { \\Theta \\times \\Omega }", + "type": "inline_equation" + }, + { + "bbox": [ + 162, + 115, + 185, + 128 + ], + "score": 1.0, + "content": ", with", + "type": "text" + }, + { + "bbox": [ + 186, + 116, + 195, + 126 + ], + "score": 0.79, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 195, + 115, + 506, + 128 + ], + "score": 1.0, + "content": "being Lipschitz. Using the simultaneous version of GDA on (3) is equivalent", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 126, + 506, + 139 + ], + "spans": [ + { + "bbox": [ + 105, + 126, + 506, + 139 + ], + "score": 1.0, + "content": "to applying the forward-backward method (FB) (Bauschke & Combettes, 2017, Thm. 26.14) to (4).", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 136, + 507, + 150 + ], + "spans": [ + { + "bbox": [ + 105, + 136, + 332, + 150 + ], + "score": 1.0, + "content": "However, convergence of FB requires that the operator", + "type": "text" + }, + { + "bbox": [ + 333, + 138, + 342, + 147 + ], + "score": 0.82, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 342, + 136, + 507, + 150 + ], + "score": 1.0, + "content": "be cocoercive (Bauschke & Combettes,", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 147, + 507, + 161 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 507, + 161 + ], + "score": 1.0, + "content": "2017, Def. 4.10), and not merely Lipschitz (Bauschke & Combettes, 2017, Thm. 26.14). Thus,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 159, + 507, + 172 + ], + "spans": [ + { + "bbox": [ + 105, + 159, + 507, + 172 + ], + "score": 1.0, + "content": "simultaneous GDA fails to converge for (3) without additional assumptions; see Gidel et al. (2019,", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 170, + 259, + 183 + ], + "spans": [ + { + "bbox": [ + 105, + 170, + 259, + 183 + ], + "score": 1.0, + "content": "Prop. 1) for a simple counterexample.", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 4, + "bbox_fs": [ + 104, + 81, + 507, + 183 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 187, + 505, + 232 + ], + "lines": [ + { + "bbox": [ + 105, + 187, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 506, + 200 + ], + "score": 1.0, + "content": "Regularizers and further constraints may be imposed by adding more operators to (4). For example,", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 196, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 338, + 211 + ], + "score": 1.0, + "content": "if one wished to apply a (nonsmooth) convex regularizer", + "type": "text" + }, + { + "bbox": [ + 339, + 198, + 431, + 210 + ], + "score": 0.92, + "content": "r : \\bar { \\mathbb { R } } ^ { d _ { x } } \\bar { \\mathbb { R } } \\cup \\{ + \\infty \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 432, + 196, + 457, + 211 + ], + "score": 1.0, + "content": "to the", + "type": "text" + }, + { + "bbox": [ + 458, + 200, + 465, + 208 + ], + "score": 0.75, + "content": "x", + "type": "inline_equation" + }, + { + "bbox": [ + 465, + 196, + 506, + 211 + ], + "score": 1.0, + "content": "variables", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 208, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 210, + 222 + ], + "score": 1.0, + "content": "and a similar regularizer", + "type": "text" + }, + { + "bbox": [ + 211, + 208, + 309, + 221 + ], + "score": 0.9, + "content": "d : \\mathbb { R } ^ { d _ { y } } \\mathbb { R } \\cup \\{ + \\infty \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 310, + 208, + 338, + 222 + ], + "score": 1.0, + "content": "to the", + "type": "text" + }, + { + "bbox": [ + 338, + 211, + 345, + 221 + ], + "score": 0.8, + "content": "y", + "type": "inline_equation" + }, + { + "bbox": [ + 345, + 208, + 506, + 222 + ], + "score": 1.0, + "content": "variables, one would add the operator", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 220, + 343, + 233 + ], + "spans": [ + { + "bbox": [ + 106, + 220, + 227, + 232 + ], + "score": 0.92, + "content": "A _ { 2 } : ( x , y ) \\mapsto \\bar { \\partial r } ( x ) \\times \\partial d ( y )", + "type": "inline_equation" + }, + { + "bbox": [ + 227, + 220, + 343, + 233 + ], + "score": 1.0, + "content": "to the right-hand side of (4).", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 10.5, + "bbox_fs": [ + 105, + 187, + 506, + 233 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 244, + 505, + 333 + ], + "lines": [ + { + "bbox": [ + 105, + 243, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 243, + 505, + 258 + ], + "score": 1.0, + "content": "ML applications of games Distributionally robust supervised learning (DRSL) is an emerging", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 255, + 505, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 505, + 268 + ], + "score": 1.0, + "content": "framework for improving the stability and reliability of ML models in the face of distributional", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 266, + 505, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 134, + 279 + ], + "score": 1.0, + "content": "shifts", + "type": "text" + }, + { + "bbox": [ + 134, + 266, + 147, + 276 + ], + "score": 0.26, + "content": "\\mathrm { T u }", + "type": "inline_equation" + }, + { + "bbox": [ + 147, + 266, + 505, + 279 + ], + "score": 1.0, + "content": "et al., 2021; Kuhn et al., 2019; Shafieezadeh-Abadeh et al., 2015; Sinha et al., 2018; Lin", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 277, + 505, + 290 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 505, + 290 + ], + "score": 1.0, + "content": "et al., 2020; Namkoong & Duchi, 2016). Common approaches to DRSL formulate the problem as", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 287, + 506, + 302 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 506, + 302 + ], + "score": 1.0, + "content": "a min-max game between a learner selecting the model parameters and an adversary selecting a", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 298, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 506, + 312 + ], + "score": 1.0, + "content": "worst-case distribution subject to some ambiguity set around the observed empirical distribution.", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 309, + 506, + 323 + ], + "spans": [ + { + "bbox": [ + 105, + 309, + 506, + 323 + ], + "score": 1.0, + "content": "This min-max problem is often further reduced to either a finite-dimensional saddlepoint problem or", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 322, + 235, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 322, + 235, + 333 + ], + "score": 1.0, + "content": "a convex optimization problem.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 16.5, + "bbox_fs": [ + 104, + 243, + 506, + 333 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 338, + 505, + 405 + ], + "lines": [ + { + "bbox": [ + 105, + 337, + 505, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 337, + 505, + 351 + ], + "score": 1.0, + "content": "DRSL is a source of games with multiple constraints/regularizers. One such formulation, based on", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 348, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 505, + 362 + ], + "score": 1.0, + "content": "Yu et al. (2021), is discussed in the experiments below. The work in Namkoong & Duchi (2016) uses", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 360, + 506, + 372 + ], + "spans": [ + { + "bbox": [ + 105, + 360, + 210, + 372 + ], + "score": 1.0, + "content": "an ambiguity set based on", + "type": "text" + }, + { + "bbox": [ + 211, + 361, + 218, + 372 + ], + "score": 0.86, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 360, + 506, + 372 + ], + "score": 1.0, + "content": "-divergences, while Sinha et al. (2018) introduce a Lagrangian relaxation", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 371, + 506, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 506, + 383 + ], + "score": 1.0, + "content": "of the Wasserstein ball. When applied to models utilizing multiple regularizers (Jacob et al., 2009;", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 381, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 105, + 381, + 505, + 394 + ], + "score": 1.0, + "content": "Richard et al., 2012; Yan & Bien, 2020), both of these approaches lead to min-max problems with", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 392, + 195, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 195, + 406 + ], + "score": 1.0, + "content": "multiple regularizers.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 23.5, + "bbox_fs": [ + 105, + 337, + 506, + 406 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 409, + 505, + 454 + ], + "lines": [ + { + "bbox": [ + 106, + 409, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 106, + 409, + 506, + 423 + ], + "score": 1.0, + "content": "Other applications of games in ML, although typically nonconvex, include generative adversarial net-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 420, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 506, + 433 + ], + "score": 1.0, + "content": "works (GANs) (Goodfellow et al., 2014; Arjovsky et al., 2017; Loizou et al., 2020; 2021; Mishchenko", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 432, + 506, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 506, + 444 + ], + "score": 1.0, + "content": "et al., 2020), fair classification (Wadsworth et al., 2018; Zhang et al., 2018; Edwards & Storkey, 2015;", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 442, + 387, + 455 + ], + "spans": [ + { + "bbox": [ + 106, + 442, + 387, + 455 + ], + "score": 1.0, + "content": "Celis & Keswani, 2019), and adversarial privacy (Huang et al., 2017).", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 28.5, + "bbox_fs": [ + 105, + 409, + 506, + 455 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 466, + 505, + 546 + ], + "lines": [ + { + "bbox": [ + 106, + 467, + 506, + 479 + ], + "spans": [ + { + "bbox": [ + 106, + 467, + 506, + 479 + ], + "score": 1.0, + "content": "Resolvents, proximal operators, and projections A fundamental computational primitive for", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 478, + 505, + 489 + ], + "spans": [ + { + "bbox": [ + 106, + 478, + 443, + 489 + ], + "score": 1.0, + "content": "solving monotone inclusions is the resolvent. The resolvent of a monotone operator", + "type": "text" + }, + { + "bbox": [ + 444, + 478, + 452, + 488 + ], + "score": 0.81, + "content": "A", + "type": "inline_equation" + }, + { + "bbox": [ + 453, + 478, + 505, + 489 + ], + "score": 1.0, + "content": "is defined to", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 488, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 119, + 502 + ], + "score": 1.0, + "content": "be", + "type": "text" + }, + { + "bbox": [ + 119, + 488, + 190, + 500 + ], + "score": 0.91, + "content": "J _ { A } \\overset { \\cdot } { = } ( I + A ) ^ { - 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 190, + 488, + 222, + 502 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 222, + 489, + 229, + 499 + ], + "score": 0.78, + "content": "I", + "type": "inline_equation" + }, + { + "bbox": [ + 230, + 488, + 455, + 502 + ], + "score": 1.0, + "content": "is the identity operator and the inverse of any operator", + "type": "text" + }, + { + "bbox": [ + 456, + 489, + 464, + 499 + ], + "score": 0.83, + "content": "T", + "type": "inline_equation" + }, + { + "bbox": [ + 465, + 488, + 505, + 502 + ], + "score": 1.0, + "content": "is simply", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 498, + 506, + 514 + ], + "spans": [ + { + "bbox": [ + 106, + 500, + 215, + 512 + ], + "score": 0.89, + "content": "T ^ { - 1 } : x \\mapsto \\{ y : T y \\ni x \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 215, + 498, + 230, + 514 + ], + "score": 1.0, + "content": ". If", + "type": "text" + }, + { + "bbox": [ + 231, + 500, + 239, + 510 + ], + "score": 0.81, + "content": "A", + "type": "inline_equation" + }, + { + "bbox": [ + 240, + 498, + 388, + 514 + ], + "score": 1.0, + "content": "is maximal monotone, then for any", + "type": "text" + }, + { + "bbox": [ + 388, + 500, + 414, + 511 + ], + "score": 0.87, + "content": "\\rho > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 414, + 498, + 418, + 514 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 419, + 500, + 436, + 512 + ], + "score": 0.85, + "content": "J _ { \\rho A }", + "type": "inline_equation" + }, + { + "bbox": [ + 436, + 498, + 506, + 514 + ], + "score": 1.0, + "content": "is single valued,", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 104, + 510, + 507, + 526 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 271, + 526 + ], + "score": 1.0, + "content": "nonexpansive, and has domain equal to", + "type": "text" + }, + { + "bbox": [ + 271, + 511, + 284, + 522 + ], + "score": 0.88, + "content": "\\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 285, + 510, + 507, + 526 + ], + "score": 1.0, + "content": "(Bauschke & Combettes, 2017, Thm. 21.1 and Prop.", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 521, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 506, + 537 + ], + "score": 1.0, + "content": "23.8). Resolvents generalize proximal operators of convex functions: the proximal operator of a CCP", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 533, + 160, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 142, + 547 + ], + "score": 1.0, + "content": "function", + "type": "text" + }, + { + "bbox": [ + 142, + 534, + 149, + 545 + ], + "score": 0.85, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 150, + 533, + 160, + 547 + ], + "score": 1.0, + "content": "is", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 34, + "bbox_fs": [ + 104, + 467, + 507, + 547 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 204, + 550, + 406, + 572 + ], + "lines": [ + { + "bbox": [ + 204, + 550, + 406, + 572 + ], + "spans": [ + { + "bbox": [ + 204, + 550, + 406, + 572 + ], + "score": 0.9, + "content": "\\operatorname { p r o x } _ { \\rho f } ( t ) \\doteq \\underset { x \\in \\mathbb { R } ^ { d } } { \\arg \\operatorname* { m i n } } \\left\\{ \\rho f ( x ) + ( 1 / 2 ) \\| x - t \\| ^ { 2 } \\right\\} .", + "type": "interline_equation", + "image_path": "0975033848d3dbfd934ac0caaff44d0d137a8ac48e7b915b93976889346d26b3.jpg" + } + ] + } + ], + "index": 38, + "virtual_lines": [ + { + "bbox": [ + 204, + 550, + 406, + 572 + ], + "spans": [], + "index": 38 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 578, + 506, + 623 + ], + "lines": [ + { + "bbox": [ + 104, + 577, + 506, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 201, + 592 + ], + "score": 1.0, + "content": "It is easily proved that", + "type": "text" + }, + { + "bbox": [ + 201, + 579, + 263, + 592 + ], + "score": 0.89, + "content": "\\mathrm { p r o x } _ { \\rho f } = \\underset { - } { J } _ { \\rho \\partial f }", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 577, + 506, + 592 + ], + "score": 1.0, + "content": ". Like proximal operators, resolvents generalize projection", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 104, + 588, + 508, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 588, + 187, + 604 + ], + "score": 1.0, + "content": "onto convex sets: if", + "type": "text" + }, + { + "bbox": [ + 187, + 590, + 216, + 601 + ], + "score": 0.91, + "content": "f = \\iota _ { \\mathcal { C } }", + "type": "inline_equation" + }, + { + "bbox": [ + 216, + 588, + 240, + 604 + ], + "score": 1.0, + "content": ", then", + "type": "text" + }, + { + "bbox": [ + 240, + 590, + 338, + 603 + ], + "score": 0.91, + "content": "J _ { \\rho N _ { C } } = \\mathrm { p r o x } _ { \\rho f } = \\mathrm { p r o j } _ { \\mathcal { C } }", + "type": "inline_equation" + }, + { + "bbox": [ + 339, + 588, + 371, + 604 + ], + "score": 1.0, + "content": "for any", + "type": "text" + }, + { + "bbox": [ + 371, + 590, + 396, + 601 + ], + "score": 0.9, + "content": "\\rho > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 396, + 588, + 508, + 604 + ], + "score": 1.0, + "content": ". In many ML applications,", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 601, + 506, + 613 + ], + "spans": [ + { + "bbox": [ + 106, + 601, + 506, + 613 + ], + "score": 1.0, + "content": "proximal operators, and hence resolvents, are relatively straightforward to compute. For examples,", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 611, + 245, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 245, + 624 + ], + "score": 1.0, + "content": "see Parikh & Boyd (2013, Sec. 6).", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 40.5, + "bbox_fs": [ + 104, + 577, + 508, + 624 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 635, + 505, + 703 + ], + "lines": [ + { + "bbox": [ + 105, + 635, + 506, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 506, + 648 + ], + "score": 1.0, + "content": "Operator splitting methods Operator splitting methods attempt to solve monotone inclusions such", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 646, + 505, + 659 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 417, + 659 + ], + "score": 1.0, + "content": "as (1) by a sequence of operations that each involve only one of the operators", + "type": "text" + }, + { + "bbox": [ + 418, + 647, + 478, + 658 + ], + "score": 0.91, + "content": "A _ { 1 } , \\ldots , A _ { n } , B", + "type": "inline_equation" + }, + { + "bbox": [ + 478, + 646, + 505, + 659 + ], + "score": 1.0, + "content": ". Such", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 657, + 505, + 670 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 505, + 670 + ], + "score": 1.0, + "content": "methods are often presented in the context of convex optimization problems like (2), but typically", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 668, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 506, + 681 + ], + "score": 1.0, + "content": "apply more generally to monotone inclusions such as (1). In the specific context of (1), each iteration", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 106, + 680, + 505, + 691 + ], + "spans": [ + { + "bbox": [ + 106, + 680, + 262, + 691 + ], + "score": 1.0, + "content": "of such a method ideally handles each", + "type": "text" + }, + { + "bbox": [ + 262, + 680, + 274, + 690 + ], + "score": 0.89, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 275, + 680, + 449, + 691 + ], + "score": 1.0, + "content": "via its resolvent and the Lipschitz operator", + "type": "text" + }, + { + "bbox": [ + 450, + 680, + 459, + 689 + ], + "score": 0.82, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 459, + 680, + 505, + 691 + ], + "score": 1.0, + "content": "by explicit", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 691, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 106, + 691, + 504, + 702 + ], + "score": 1.0, + "content": "(not stochastic) evaluation. This is a feasible approach if the original problem can be decomposed in", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 106, + 82, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 258, + 95 + ], + "score": 1.0, + "content": "such a way that the resolvents of each", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 259, + 83, + 271, + 93 + ], + "score": 0.89, + "content": "A _ { i }", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 271, + 82, + 504, + 95 + ], + "score": 1.0, + "content": "are relatively inexpensive to compute, and full evaluations", + "type": "text", + "cross_page": true + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 94, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 94, + 117, + 106 + ], + "score": 1.0, + "content": "of", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 117, + 94, + 127, + 104 + ], + "score": 0.79, + "content": "B", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 127, + 94, + 505, + 106 + ], + "score": 1.0, + "content": "are possible. Although not discussed here, more general formulations in which matrices couple", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 104, + 468, + 118 + ], + "spans": [ + { + "bbox": [ + 106, + 104, + 468, + 118 + ], + "score": 1.0, + "content": "the arguments of the operators can broaden the applicability of operator splitting methods.", + "type": "text", + "cross_page": true + } + ], + "index": 2 + } + ], + "index": 45.5, + "bbox_fs": [ + 105, + 635, + 506, + 702 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 505, + 117 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 258, + 95 + ], + "score": 1.0, + "content": "such a way that the resolvents of each", + "type": "text" + }, + { + "bbox": [ + 259, + 83, + 271, + 93 + ], + "score": 0.89, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 271, + 82, + 504, + 95 + ], + "score": 1.0, + "content": "are relatively inexpensive to compute, and full evaluations", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 94, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 94, + 117, + 106 + ], + "score": 1.0, + "content": "of", + "type": "text" + }, + { + "bbox": [ + 117, + 94, + 127, + 104 + ], + "score": 0.79, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 127, + 94, + 505, + 106 + ], + "score": 1.0, + "content": "are possible. Although not discussed here, more general formulations in which matrices couple", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 104, + 468, + 118 + ], + "spans": [ + { + "bbox": [ + 106, + 104, + 468, + 118 + ], + "score": 1.0, + "content": "the arguments of the operators can broaden the applicability of operator splitting methods.", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1 + }, + { + "type": "title", + "bbox": [ + 106, + 131, + 345, + 145 + ], + "lines": [ + { + "bbox": [ + 104, + 130, + 345, + 146 + ], + "spans": [ + { + "bbox": [ + 104, + 130, + 345, + 146 + ], + "score": 1.0, + "content": "3 THE PROJECTIVE SPLITTING FRAMEWORK", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 3 + }, + { + "type": "text", + "bbox": [ + 106, + 155, + 505, + 179 + ], + "lines": [ + { + "bbox": [ + 105, + 155, + 505, + 170 + ], + "spans": [ + { + "bbox": [ + 105, + 155, + 505, + 170 + ], + "score": 1.0, + "content": "Before introducing our proposed method, we give a brief introduction to the projective splitting class", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 167, + 156, + 179 + ], + "spans": [ + { + "bbox": [ + 106, + 167, + 156, + 179 + ], + "score": 1.0, + "content": "of methods.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4.5 + }, + { + "type": "text", + "bbox": [ + 106, + 190, + 505, + 224 + ], + "lines": [ + { + "bbox": [ + 105, + 190, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 506, + 204 + ], + "score": 1.0, + "content": "The extended solution set Projective splitting is a primal-dual framework and operates in an", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 202, + 505, + 213 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 505, + 213 + ], + "score": 1.0, + "content": "extended space of primal and dual variables. Rather than directly finding a solution to (1), we find a", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 213, + 326, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 326, + 225 + ], + "score": 1.0, + "content": "point in the extended solution set (or Kuhn-Tucker set)", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 7 + }, + { + "type": "interline_equation", + "bbox": [ + 139, + 228, + 471, + 250 + ], + "lines": [ + { + "bbox": [ + 139, + 228, + 471, + 250 + ], + "spans": [ + { + "bbox": [ + 139, + 228, + 471, + 250 + ], + "score": 0.91, + "content": "\\begin{array} { r } { \\mathcal { S } \\doteq \\left\\{ ( z , w _ { 1 } , \\ldots , w _ { n + 1 } ) \\ \\middle | \\ w _ { i } \\in A _ { i } ( z ) \\forall i \\in 1 . . n , w _ { n + 1 } = B ( z ) , \\sum _ { i = 1 } ^ { n + 1 } w _ { i } = 0 \\right\\} . } \\end{array}", + "type": "interline_equation", + "image_path": "79069065cd35ebc2f3c96ad607dc0153687512f38137f08b268f350ae363cd2c.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 139, + 228, + 471, + 250 + ], + "spans": [], + "index": 9 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 252, + 506, + 320 + ], + "lines": [ + { + "bbox": [ + 105, + 251, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 251, + 132, + 268 + ], + "score": 1.0, + "content": "Given", + "type": "text" + }, + { + "bbox": [ + 133, + 253, + 252, + 266 + ], + "score": 0.91, + "content": "p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 252, + 251, + 377, + 268 + ], + "score": 1.0, + "content": ", it is straightforward to see that", + "type": "text" + }, + { + "bbox": [ + 377, + 254, + 388, + 263 + ], + "score": 0.87, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 388, + 251, + 506, + 268 + ], + "score": 1.0, + "content": "solves (1). Conversely, given", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 262, + 507, + 280 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 150, + 280 + ], + "score": 1.0, + "content": "a solution", + "type": "text" + }, + { + "bbox": [ + 150, + 266, + 160, + 274 + ], + "score": 0.84, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 161, + 262, + 260, + 280 + ], + "score": 1.0, + "content": "to (1), there must exist", + "type": "text" + }, + { + "bbox": [ + 261, + 265, + 318, + 277 + ], + "score": 0.91, + "content": "w _ { 1 } ^ { * } , \\ldots , w _ { n + 1 } ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 318, + 262, + 361, + 280 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 361, + 264, + 461, + 277 + ], + "score": 0.93, + "content": "( z ^ { \\ast } , w _ { 1 } ^ { \\ast } , \\dots , w _ { n + 1 } ^ { \\ast } ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 462, + 262, + 507, + 280 + ], + "score": 1.0, + "content": ". Suppose", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 273, + 508, + 290 + ], + "spans": [ + { + "bbox": [ + 106, + 275, + 227, + 287 + ], + "score": 0.89, + "content": "p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 228, + 273, + 257, + 290 + ], + "score": 1.0, + "content": ". Since", + "type": "text" + }, + { + "bbox": [ + 258, + 276, + 268, + 285 + ], + "score": 0.84, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 268, + 273, + 315, + 290 + ], + "score": 1.0, + "content": "solves (1),", + "type": "text" + }, + { + "bbox": [ + 315, + 276, + 326, + 285 + ], + "score": 0.83, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 326, + 273, + 508, + 290 + ], + "score": 1.0, + "content": "is typically referred to as a primal solution.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 285, + 507, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 156, + 300 + ], + "score": 1.0, + "content": "The vectors", + "type": "text" + }, + { + "bbox": [ + 156, + 288, + 213, + 299 + ], + "score": 0.9, + "content": "w _ { 1 } ^ { * } , \\ldots , w _ { n + 1 } ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 213, + 285, + 507, + 300 + ], + "score": 1.0, + "content": "solve a dual inclusion not described here, and are therefore called a dual", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 297, + 506, + 309 + ], + "spans": [ + { + "bbox": [ + 106, + 297, + 224, + 309 + ], + "score": 1.0, + "content": "solution. It can be shown that", + "type": "text" + }, + { + "bbox": [ + 225, + 298, + 232, + 307 + ], + "score": 0.82, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 233, + 297, + 425, + 309 + ], + "score": 1.0, + "content": "is closed and convex; see for example Johnstone", + "type": "text" + }, + { + "bbox": [ + 425, + 298, + 434, + 307 + ], + "score": 0.31, + "content": "\\&", + "type": "inline_equation" + }, + { + "bbox": [ + 434, + 297, + 506, + 309 + ], + "score": 1.0, + "content": "Eckstein (2020b).", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 306, + 458, + 322 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 395, + 322 + ], + "score": 1.0, + "content": "We will assume throughout that a solution to (1) exists, therefore the set", + "type": "text" + }, + { + "bbox": [ + 395, + 309, + 403, + 318 + ], + "score": 0.82, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 403, + 306, + 458, + 322 + ], + "score": 1.0, + "content": "is nonempty.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 12.5 + }, + { + "type": "text", + "bbox": [ + 106, + 330, + 505, + 398 + ], + "lines": [ + { + "bbox": [ + 105, + 331, + 505, + 344 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 505, + 344 + ], + "score": 1.0, + "content": "Separator-projection framework Projective splitting methods are instances of the general", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 343, + 505, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 467, + 354 + ], + "score": 1.0, + "content": "separator-projection algorithmic framework for locating a member of a closed convex set", + "type": "text" + }, + { + "bbox": [ + 468, + 343, + 476, + 352 + ], + "score": 0.81, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 476, + 343, + 505, + 354 + ], + "score": 1.0, + "content": "within", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 352, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 164, + 367 + ], + "score": 1.0, + "content": "a linear space", + "type": "text" + }, + { + "bbox": [ + 165, + 354, + 173, + 363 + ], + "score": 0.84, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 173, + 352, + 237, + 367 + ], + "score": 1.0, + "content": ". Each iteration", + "type": "text" + }, + { + "bbox": [ + 238, + 354, + 244, + 363 + ], + "score": 0.81, + "content": "k", + "type": "inline_equation" + }, + { + "bbox": [ + 245, + 352, + 506, + 367 + ], + "score": 1.0, + "content": "of algorithms drawn from this framework operates by finding a", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 364, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 120, + 376 + ], + "score": 1.0, + "content": "set", + "type": "text" + }, + { + "bbox": [ + 121, + 365, + 135, + 375 + ], + "score": 0.88, + "content": "H _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 135, + 365, + 270, + 376 + ], + "score": 1.0, + "content": "that separates the current iterate", + "type": "text" + }, + { + "bbox": [ + 270, + 364, + 302, + 376 + ], + "score": 0.92, + "content": "p ^ { k } \\in \\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 365, + 326, + 376 + ], + "score": 1.0, + "content": "from", + "type": "text" + }, + { + "bbox": [ + 326, + 365, + 334, + 374 + ], + "score": 0.78, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 335, + 365, + 394, + 376 + ], + "score": 1.0, + "content": ", meaning that", + "type": "text" + }, + { + "bbox": [ + 395, + 365, + 403, + 374 + ], + "score": 0.81, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 403, + 365, + 505, + 376 + ], + "score": 1.0, + "content": "is entirely in the set and", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 374, + 506, + 389 + ], + "spans": [ + { + "bbox": [ + 107, + 375, + 117, + 387 + ], + "score": 0.89, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 118, + 374, + 344, + 389 + ], + "score": 1.0, + "content": "typically is not. One then attempts to β€œmove closer\" to", + "type": "text" + }, + { + "bbox": [ + 345, + 376, + 353, + 385 + ], + "score": 0.82, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 353, + 374, + 426, + 389 + ], + "score": 1.0, + "content": "by projecting the", + "type": "text" + }, + { + "bbox": [ + 426, + 375, + 437, + 387 + ], + "score": 0.9, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 438, + 374, + 459, + 389 + ], + "score": 1.0, + "content": "onto", + "type": "text" + }, + { + "bbox": [ + 460, + 375, + 474, + 386 + ], + "score": 0.88, + "content": "H _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 374, + 506, + 389 + ], + "score": 1.0, + "content": ". In the", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 386, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 386, + 473, + 399 + ], + "score": 1.0, + "content": "particular case of projective splitting applied to the problem (1) using (5), we select the space", + "type": "text" + }, + { + "bbox": [ + 473, + 387, + 482, + 396 + ], + "score": 0.85, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 482, + 386, + 505, + 399 + ], + "score": 1.0, + "content": "to be", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 18.5 + }, + { + "type": "interline_equation", + "bbox": [ + 192, + 401, + 417, + 423 + ], + "lines": [ + { + "bbox": [ + 192, + 401, + 417, + 423 + ], + "spans": [ + { + "bbox": [ + 192, + 401, + 417, + 423 + ], + "score": 0.92, + "content": "\\begin{array} { r } { \\mathcal { P } \\doteq \\left\\{ ( z , w _ { 1 } , \\ldots , w _ { n + 1 } ) \\in \\mathbb { R } ^ { ( n + 2 ) d } \\ \\Big | \\ \\sum _ { i = 1 } ^ { n + 1 } w _ { i } = 0 \\right\\} , } \\end{array}", + "type": "interline_equation", + "image_path": "a1095377e5ae298c91988ac15955b56f07116ecc2f868c284cdf4c1a9a693f68.jpg" + } + ] + } + ], + "index": 22, + "virtual_lines": [ + { + "bbox": [ + 192, + 401, + 417, + 423 + ], + "spans": [], + "index": 22 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 426, + 505, + 460 + ], + "lines": [ + { + "bbox": [ + 105, + 425, + 506, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 199, + 440 + ], + "score": 1.0, + "content": "and each separating set", + "type": "text" + }, + { + "bbox": [ + 199, + 427, + 213, + 438 + ], + "score": 0.91, + "content": "H _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 214, + 425, + 293, + 440 + ], + "score": 1.0, + "content": "to be the half space", + "type": "text" + }, + { + "bbox": [ + 294, + 426, + 380, + 439 + ], + "score": 0.91, + "content": "\\{ p \\in { \\mathcal { P } } \\mid \\varphi _ { k } ( p ) \\leq 0 \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 380, + 425, + 506, + 440 + ], + "score": 1.0, + "content": "generated by an affine function", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 107, + 437, + 506, + 451 + ], + "spans": [ + { + "bbox": [ + 107, + 438, + 158, + 449 + ], + "score": 0.9, + "content": "\\varphi _ { k } : \\mathscr { P } \\mathbb { R }", + "type": "inline_equation" + }, + { + "bbox": [ + 158, + 437, + 309, + 451 + ], + "score": 1.0, + "content": ". The general intention is to construct", + "type": "text" + }, + { + "bbox": [ + 309, + 440, + 322, + 449 + ], + "score": 0.85, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 437, + 361, + 451 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 362, + 438, + 410, + 450 + ], + "score": 0.91, + "content": "\\varphi _ { k } \\tilde { ( p ^ { k } ) } > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 410, + 437, + 429, + 451 + ], + "score": 1.0, + "content": ", but", + "type": "text" + }, + { + "bbox": [ + 429, + 438, + 477, + 450 + ], + "score": 0.93, + "content": "\\varphi _ { k } ( p ^ { * } ) \\leq 0", + "type": "inline_equation" + }, + { + "bbox": [ + 478, + 437, + 506, + 451 + ], + "score": 1.0, + "content": "for all", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 107, + 448, + 438, + 461 + ], + "spans": [ + { + "bbox": [ + 107, + 450, + 136, + 460 + ], + "score": 0.89, + "content": "p ^ { * } \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 136, + 448, + 266, + 461 + ], + "score": 1.0, + "content": ". The construction employed for", + "type": "text" + }, + { + "bbox": [ + 266, + 450, + 279, + 460 + ], + "score": 0.85, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 279, + 448, + 438, + 461 + ], + "score": 1.0, + "content": "in the case of (1) and (5) is of the form", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 24 + }, + { + "type": "interline_equation", + "bbox": [ + 205, + 464, + 405, + 480 + ], + "lines": [ + { + "bbox": [ + 205, + 464, + 405, + 480 + ], + "spans": [ + { + "bbox": [ + 205, + 464, + 405, + 480 + ], + "score": 0.9, + "content": "\\begin{array} { r } { \\varphi _ { k } ( z , w _ { 1 } , \\ldots , w _ { n + 1 } ) \\doteq \\sum _ { i = 1 } ^ { n + 1 } \\langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \\rangle } \\end{array}", + "type": "interline_equation", + "image_path": "d98303aa83501011e691713f8aca89e9bf4bd1b0fcade3c1504fc37c2ae25db5.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 205, + 464, + 405, + 480 + ], + "spans": [], + "index": 26 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 484, + 505, + 529 + ], + "lines": [ + { + "bbox": [ + 104, + 482, + 506, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 482, + 174, + 499 + ], + "score": 1.0, + "content": "for some points", + "type": "text" + }, + { + "bbox": [ + 174, + 484, + 237, + 497 + ], + "score": 0.89, + "content": "( x _ { i } ^ { k } , y _ { i } ^ { k } ) \\in \\mathbb { R } ^ { 2 d }", + "type": "inline_equation" + }, + { + "bbox": [ + 237, + 482, + 242, + 499 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 242, + 484, + 302, + 497 + ], + "score": 0.9, + "content": "i \\in { 1 . . ( n + 1 ) }", + "type": "inline_equation" + }, + { + "bbox": [ + 303, + 482, + 506, + 499 + ], + "score": 1.0, + "content": ", that must be carefully chosen (see below). Any", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 496, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 106, + 496, + 387, + 508 + ], + "score": 1.0, + "content": "function of the form (7) can be shown to be affine when restricted to", + "type": "text" + }, + { + "bbox": [ + 388, + 497, + 396, + 506 + ], + "score": 0.83, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 397, + 496, + 505, + 508 + ], + "score": 1.0, + "content": ". As mentioned above, the", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 104, + 506, + 506, + 519 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 356, + 519 + ], + "score": 1.0, + "content": "standard separator-projection algorithm obtains its next iterate", + "type": "text" + }, + { + "bbox": [ + 356, + 506, + 377, + 519 + ], + "score": 0.92, + "content": "p ^ { k + 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 377, + 506, + 433, + 519 + ], + "score": 1.0, + "content": "by projecting", + "type": "text" + }, + { + "bbox": [ + 434, + 506, + 444, + 519 + ], + "score": 0.89, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 445, + 506, + 466, + 519 + ], + "score": 1.0, + "content": "onto", + "type": "text" + }, + { + "bbox": [ + 466, + 507, + 480, + 518 + ], + "score": 0.89, + "content": "H _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 480, + 506, + 506, + 519 + ], + "score": 1.0, + "content": ". This", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 518, + 385, + 530 + ], + "spans": [ + { + "bbox": [ + 106, + 518, + 385, + 530 + ], + "score": 1.0, + "content": "calculation involves the usual projection step for a half space, namely", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 28.5 + }, + { + "type": "interline_equation", + "bbox": [ + 186, + 533, + 424, + 548 + ], + "lines": [ + { + "bbox": [ + 186, + 533, + 424, + 548 + ], + "spans": [ + { + "bbox": [ + 186, + 533, + 424, + 548 + ], + "score": 0.89, + "content": "p ^ { k + 1 } = p ^ { k } - \\alpha _ { k } \\nabla \\varphi _ { k } , \\quad \\mathrm { ~ w h e r e ~ } \\quad \\alpha _ { k } = \\varphi _ { k } ( p ^ { k } ) / \\| \\nabla \\varphi _ { k } \\| ^ { 2 } ,", + "type": "interline_equation", + "image_path": "f451a3c0c9822fe7b123aa3e52a07a6e8507a51f6d3a70528a872e52a6055a3d.jpg" + } + ] + } + ], + "index": 31, + "virtual_lines": [ + { + "bbox": [ + 186, + 533, + 424, + 548 + ], + "spans": [], + "index": 31 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 553, + 505, + 583 + ], + "lines": [ + { + "bbox": [ + 104, + 550, + 505, + 568 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 180, + 568 + ], + "score": 1.0, + "content": "and the gradient", + "type": "text" + }, + { + "bbox": [ + 180, + 554, + 201, + 565 + ], + "score": 0.89, + "content": "\\nabla \\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 201, + 550, + 305, + 568 + ], + "score": 1.0, + "content": "is computed relative to", + "type": "text" + }, + { + "bbox": [ + 306, + 554, + 315, + 564 + ], + "score": 0.83, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 315, + 550, + 393, + 568 + ], + "score": 1.0, + "content": ", thus resulting in", + "type": "text" + }, + { + "bbox": [ + 394, + 552, + 442, + 565 + ], + "score": 0.92, + "content": "p ^ { k + 1 } \\ \\in \\ { \\mathcal { P } }", + "type": "inline_equation" + }, + { + "bbox": [ + 442, + 550, + 468, + 568 + ], + "score": 1.0, + "content": ", i.e.", + "type": "text" + }, + { + "bbox": [ + 469, + 554, + 505, + 566 + ], + "score": 0.87, + "content": "\\nabla \\varphi _ { k } \\ =", + "type": "inline_equation" + } + ], + "index": 32 + }, + { + "bbox": [ + 108, + 560, + 381, + 588 + ], + "spans": [ + { + "bbox": [ + 108, + 565, + 261, + 585 + ], + "score": 0.89, + "content": "\\left( \\sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k } , x _ { 1 } ^ { k } - { \\bar { x } } ^ { k } , \\dots , x _ { n + 1 } - { \\bar { x } } ^ { k } \\right)", + "type": "inline_equation" + }, + { + "bbox": [ + 261, + 560, + 289, + 588 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 290, + 566, + 372, + 582 + ], + "score": 0.92, + "content": "\\begin{array} { r } { \\bar { x } ^ { k } = \\frac { 1 } { n + 1 } \\sum _ { i = 1 } ^ { n + 1 } x _ { i } ^ { k } } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 372, + 560, + 381, + 588 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32.5 + }, + { + "type": "title", + "bbox": [ + 108, + 596, + 231, + 610 + ], + "lines": [ + { + "bbox": [ + 105, + 596, + 231, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 231, + 611 + ], + "score": 1.0, + "content": "4 PROPOSED METHOD", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 34 + }, + { + "type": "text", + "bbox": [ + 106, + 621, + 505, + 734 + ], + "lines": [ + { + "bbox": [ + 106, + 622, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 106, + 622, + 505, + 633 + ], + "score": 1.0, + "content": "The proposed method is given in Algorithm 1 and called Stochastic Projective Splitting (SPS). Unlike", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 632, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 388, + 645 + ], + "score": 1.0, + "content": "prior versions of projective splitting, SPS does not employ the stepsize", + "type": "text" + }, + { + "bbox": [ + 388, + 634, + 401, + 644 + ], + "score": 0.86, + "content": "\\alpha _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 401, + 632, + 505, + 645 + ], + "score": 1.0, + "content": "of (8) that places the next", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 642, + 504, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 642, + 270, + 658 + ], + "score": 1.0, + "content": "iterate exactly on the hyperplane given by", + "type": "text" + }, + { + "bbox": [ + 270, + 644, + 313, + 656 + ], + "score": 0.93, + "content": "\\varphi _ { k } ( p ) = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 314, + 642, + 475, + 658 + ], + "score": 1.0, + "content": ". Instead, it simply moves in the direction", + "type": "text" + }, + { + "bbox": [ + 475, + 644, + 504, + 655 + ], + "score": 0.91, + "content": "- \\nabla \\varphi _ { k }", + "type": "inline_equation" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 654, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 218, + 667 + ], + "score": 1.0, + "content": "with a pre-defined stepsize", + "type": "text" + }, + { + "bbox": [ + 218, + 655, + 240, + 667 + ], + "score": 0.92, + "content": "\\{ \\alpha _ { k } \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 240, + 654, + 505, + 667 + ], + "score": 1.0, + "content": ". This fundamental change is required to deal with the stochastic", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 666, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 106, + 666, + 374, + 678 + ], + "score": 1.0, + "content": "noise on lines 6 and 8. This noise could lead to the usual choice of", + "type": "text" + }, + { + "bbox": [ + 375, + 667, + 387, + 677 + ], + "score": 0.86, + "content": "\\alpha _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 387, + 666, + 505, + 678 + ], + "score": 1.0, + "content": "defined in (8) being unstable", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 677, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 399, + 690 + ], + "score": 1.0, + "content": "and difficult to analyze. In order to guarantee convergence, the parameters", + "type": "text" + }, + { + "bbox": [ + 399, + 679, + 412, + 688 + ], + "score": 0.86, + "content": "\\alpha _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 412, + 677, + 429, + 690 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 430, + 678, + 441, + 688 + ], + "score": 0.86, + "content": "\\rho _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 441, + 677, + 505, + 690 + ], + "score": 1.0, + "content": "must be chosen", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 688, + 505, + 699 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 505, + 699 + ], + "score": 1.0, + "content": "to satisfy certain conditions given below. Note that the gradient is calculated with respect to the", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 699, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 144, + 711 + ], + "score": 1.0, + "content": "subspace", + "type": "text" + }, + { + "bbox": [ + 145, + 699, + 154, + 709 + ], + "score": 0.8, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 154, + 699, + 372, + 711 + ], + "score": 1.0, + "content": "defined in (6); since the algorithm is initialized within", + "type": "text" + }, + { + "bbox": [ + 373, + 699, + 381, + 709 + ], + "score": 0.84, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 381, + 699, + 437, + 711 + ], + "score": 1.0, + "content": ", it remains in", + "type": "text" + }, + { + "bbox": [ + 438, + 699, + 446, + 709 + ], + "score": 0.82, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 447, + 699, + 505, + 711 + ], + "score": 1.0, + "content": ", within which", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 106, + 707, + 506, + 738 + ], + "spans": [ + { + "bbox": [ + 106, + 711, + 119, + 721 + ], + "score": 0.83, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 210, + 707, + 386, + 738 + ], + "score": 1.0, + "content": "the updates on lines 9-10 are equivalent to .", + "type": "text" + }, + { + "bbox": [ + 386, + 709, + 474, + 721 + ], + "score": 0.93, + "content": "\\boldsymbol { p } ^ { k + 1 } = \\boldsymbol { p } ^ { k } - \\alpha _ { k } \\nabla \\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 707, + 506, + 738 + ], + "score": 1.0, + "content": ", where", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 720, + 209, + 733 + ], + "spans": [ + { + "bbox": [ + 106, + 720, + 209, + 733 + ], + "score": 0.91, + "content": "\\mathbf { \\chi } ^ { \\dot { k } } = ( z ^ { k } , w _ { 1 } ^ { k } , \\dots , w _ { n + 1 } ^ { k } )", + "type": "inline_equation" + } + ], + "index": 44 + } + ], + "index": 39.5 + } + ], + "page_idx": 4, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 309, + 760 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "score": 1.0, + "content": "5", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 505, + 117 + ], + "lines": [], + "index": 1, + "bbox_fs": [ + 106, + 82, + 505, + 118 + ], + "lines_deleted": true + }, + { + "type": "title", + "bbox": [ + 106, + 131, + 345, + 145 + ], + "lines": [ + { + "bbox": [ + 104, + 130, + 345, + 146 + ], + "spans": [ + { + "bbox": [ + 104, + 130, + 345, + 146 + ], + "score": 1.0, + "content": "3 THE PROJECTIVE SPLITTING FRAMEWORK", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 3 + }, + { + "type": "text", + "bbox": [ + 106, + 155, + 505, + 179 + ], + "lines": [ + { + "bbox": [ + 105, + 155, + 505, + 170 + ], + "spans": [ + { + "bbox": [ + 105, + 155, + 505, + 170 + ], + "score": 1.0, + "content": "Before introducing our proposed method, we give a brief introduction to the projective splitting class", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 167, + 156, + 179 + ], + "spans": [ + { + "bbox": [ + 106, + 167, + 156, + 179 + ], + "score": 1.0, + "content": "of methods.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4.5, + "bbox_fs": [ + 105, + 155, + 505, + 179 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 190, + 505, + 224 + ], + "lines": [ + { + "bbox": [ + 105, + 190, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 506, + 204 + ], + "score": 1.0, + "content": "The extended solution set Projective splitting is a primal-dual framework and operates in an", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 202, + 505, + 213 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 505, + 213 + ], + "score": 1.0, + "content": "extended space of primal and dual variables. Rather than directly finding a solution to (1), we find a", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 213, + 326, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 326, + 225 + ], + "score": 1.0, + "content": "point in the extended solution set (or Kuhn-Tucker set)", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 7, + "bbox_fs": [ + 105, + 190, + 506, + 225 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 139, + 228, + 471, + 250 + ], + "lines": [ + { + "bbox": [ + 139, + 228, + 471, + 250 + ], + "spans": [ + { + "bbox": [ + 139, + 228, + 471, + 250 + ], + "score": 0.91, + "content": "\\begin{array} { r } { \\mathcal { S } \\doteq \\left\\{ ( z , w _ { 1 } , \\ldots , w _ { n + 1 } ) \\ \\middle | \\ w _ { i } \\in A _ { i } ( z ) \\forall i \\in 1 . . n , w _ { n + 1 } = B ( z ) , \\sum _ { i = 1 } ^ { n + 1 } w _ { i } = 0 \\right\\} . } \\end{array}", + "type": "interline_equation", + "image_path": "79069065cd35ebc2f3c96ad607dc0153687512f38137f08b268f350ae363cd2c.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 139, + 228, + 471, + 250 + ], + "spans": [], + "index": 9 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 252, + 506, + 320 + ], + "lines": [ + { + "bbox": [ + 105, + 251, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 251, + 132, + 268 + ], + "score": 1.0, + "content": "Given", + "type": "text" + }, + { + "bbox": [ + 133, + 253, + 252, + 266 + ], + "score": 0.91, + "content": "p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 252, + 251, + 377, + 268 + ], + "score": 1.0, + "content": ", it is straightforward to see that", + "type": "text" + }, + { + "bbox": [ + 377, + 254, + 388, + 263 + ], + "score": 0.87, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 388, + 251, + 506, + 268 + ], + "score": 1.0, + "content": "solves (1). Conversely, given", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 262, + 507, + 280 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 150, + 280 + ], + "score": 1.0, + "content": "a solution", + "type": "text" + }, + { + "bbox": [ + 150, + 266, + 160, + 274 + ], + "score": 0.84, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 161, + 262, + 260, + 280 + ], + "score": 1.0, + "content": "to (1), there must exist", + "type": "text" + }, + { + "bbox": [ + 261, + 265, + 318, + 277 + ], + "score": 0.91, + "content": "w _ { 1 } ^ { * } , \\ldots , w _ { n + 1 } ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 318, + 262, + 361, + 280 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 361, + 264, + 461, + 277 + ], + "score": 0.93, + "content": "( z ^ { \\ast } , w _ { 1 } ^ { \\ast } , \\dots , w _ { n + 1 } ^ { \\ast } ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 462, + 262, + 507, + 280 + ], + "score": 1.0, + "content": ". Suppose", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 273, + 508, + 290 + ], + "spans": [ + { + "bbox": [ + 106, + 275, + 227, + 287 + ], + "score": 0.89, + "content": "p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 228, + 273, + 257, + 290 + ], + "score": 1.0, + "content": ". Since", + "type": "text" + }, + { + "bbox": [ + 258, + 276, + 268, + 285 + ], + "score": 0.84, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 268, + 273, + 315, + 290 + ], + "score": 1.0, + "content": "solves (1),", + "type": "text" + }, + { + "bbox": [ + 315, + 276, + 326, + 285 + ], + "score": 0.83, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 326, + 273, + 508, + 290 + ], + "score": 1.0, + "content": "is typically referred to as a primal solution.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 285, + 507, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 156, + 300 + ], + "score": 1.0, + "content": "The vectors", + "type": "text" + }, + { + "bbox": [ + 156, + 288, + 213, + 299 + ], + "score": 0.9, + "content": "w _ { 1 } ^ { * } , \\ldots , w _ { n + 1 } ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 213, + 285, + 507, + 300 + ], + "score": 1.0, + "content": "solve a dual inclusion not described here, and are therefore called a dual", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 297, + 506, + 309 + ], + "spans": [ + { + "bbox": [ + 106, + 297, + 224, + 309 + ], + "score": 1.0, + "content": "solution. It can be shown that", + "type": "text" + }, + { + "bbox": [ + 225, + 298, + 232, + 307 + ], + "score": 0.82, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 233, + 297, + 425, + 309 + ], + "score": 1.0, + "content": "is closed and convex; see for example Johnstone", + "type": "text" + }, + { + "bbox": [ + 425, + 298, + 434, + 307 + ], + "score": 0.31, + "content": "\\&", + "type": "inline_equation" + }, + { + "bbox": [ + 434, + 297, + 506, + 309 + ], + "score": 1.0, + "content": "Eckstein (2020b).", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 306, + 458, + 322 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 395, + 322 + ], + "score": 1.0, + "content": "We will assume throughout that a solution to (1) exists, therefore the set", + "type": "text" + }, + { + "bbox": [ + 395, + 309, + 403, + 318 + ], + "score": 0.82, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 403, + 306, + 458, + 322 + ], + "score": 1.0, + "content": "is nonempty.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 12.5, + "bbox_fs": [ + 104, + 251, + 508, + 322 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 330, + 505, + 398 + ], + "lines": [ + { + "bbox": [ + 105, + 331, + 505, + 344 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 505, + 344 + ], + "score": 1.0, + "content": "Separator-projection framework Projective splitting methods are instances of the general", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 343, + 505, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 467, + 354 + ], + "score": 1.0, + "content": "separator-projection algorithmic framework for locating a member of a closed convex set", + "type": "text" + }, + { + "bbox": [ + 468, + 343, + 476, + 352 + ], + "score": 0.81, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 476, + 343, + 505, + 354 + ], + "score": 1.0, + "content": "within", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 352, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 164, + 367 + ], + "score": 1.0, + "content": "a linear space", + "type": "text" + }, + { + "bbox": [ + 165, + 354, + 173, + 363 + ], + "score": 0.84, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 173, + 352, + 237, + 367 + ], + "score": 1.0, + "content": ". Each iteration", + "type": "text" + }, + { + "bbox": [ + 238, + 354, + 244, + 363 + ], + "score": 0.81, + "content": "k", + "type": "inline_equation" + }, + { + "bbox": [ + 245, + 352, + 506, + 367 + ], + "score": 1.0, + "content": "of algorithms drawn from this framework operates by finding a", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 364, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 120, + 376 + ], + "score": 1.0, + "content": "set", + "type": "text" + }, + { + "bbox": [ + 121, + 365, + 135, + 375 + ], + "score": 0.88, + "content": "H _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 135, + 365, + 270, + 376 + ], + "score": 1.0, + "content": "that separates the current iterate", + "type": "text" + }, + { + "bbox": [ + 270, + 364, + 302, + 376 + ], + "score": 0.92, + "content": "p ^ { k } \\in \\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 365, + 326, + 376 + ], + "score": 1.0, + "content": "from", + "type": "text" + }, + { + "bbox": [ + 326, + 365, + 334, + 374 + ], + "score": 0.78, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 335, + 365, + 394, + 376 + ], + "score": 1.0, + "content": ", meaning that", + "type": "text" + }, + { + "bbox": [ + 395, + 365, + 403, + 374 + ], + "score": 0.81, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 403, + 365, + 505, + 376 + ], + "score": 1.0, + "content": "is entirely in the set and", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 374, + 506, + 389 + ], + "spans": [ + { + "bbox": [ + 107, + 375, + 117, + 387 + ], + "score": 0.89, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 118, + 374, + 344, + 389 + ], + "score": 1.0, + "content": "typically is not. One then attempts to β€œmove closer\" to", + "type": "text" + }, + { + "bbox": [ + 345, + 376, + 353, + 385 + ], + "score": 0.82, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 353, + 374, + 426, + 389 + ], + "score": 1.0, + "content": "by projecting the", + "type": "text" + }, + { + "bbox": [ + 426, + 375, + 437, + 387 + ], + "score": 0.9, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 438, + 374, + 459, + 389 + ], + "score": 1.0, + "content": "onto", + "type": "text" + }, + { + "bbox": [ + 460, + 375, + 474, + 386 + ], + "score": 0.88, + "content": "H _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 374, + 506, + 389 + ], + "score": 1.0, + "content": ". In the", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 386, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 386, + 473, + 399 + ], + "score": 1.0, + "content": "particular case of projective splitting applied to the problem (1) using (5), we select the space", + "type": "text" + }, + { + "bbox": [ + 473, + 387, + 482, + 396 + ], + "score": 0.85, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 482, + 386, + 505, + 399 + ], + "score": 1.0, + "content": "to be", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 18.5, + "bbox_fs": [ + 104, + 331, + 506, + 399 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 192, + 401, + 417, + 423 + ], + "lines": [ + { + "bbox": [ + 192, + 401, + 417, + 423 + ], + "spans": [ + { + "bbox": [ + 192, + 401, + 417, + 423 + ], + "score": 0.92, + "content": "\\begin{array} { r } { \\mathcal { P } \\doteq \\left\\{ ( z , w _ { 1 } , \\ldots , w _ { n + 1 } ) \\in \\mathbb { R } ^ { ( n + 2 ) d } \\ \\Big | \\ \\sum _ { i = 1 } ^ { n + 1 } w _ { i } = 0 \\right\\} , } \\end{array}", + "type": "interline_equation", + "image_path": "a1095377e5ae298c91988ac15955b56f07116ecc2f868c284cdf4c1a9a693f68.jpg" + } + ] + } + ], + "index": 22, + "virtual_lines": [ + { + "bbox": [ + 192, + 401, + 417, + 423 + ], + "spans": [], + "index": 22 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 426, + 505, + 460 + ], + "lines": [ + { + "bbox": [ + 105, + 425, + 506, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 199, + 440 + ], + "score": 1.0, + "content": "and each separating set", + "type": "text" + }, + { + "bbox": [ + 199, + 427, + 213, + 438 + ], + "score": 0.91, + "content": "H _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 214, + 425, + 293, + 440 + ], + "score": 1.0, + "content": "to be the half space", + "type": "text" + }, + { + "bbox": [ + 294, + 426, + 380, + 439 + ], + "score": 0.91, + "content": "\\{ p \\in { \\mathcal { P } } \\mid \\varphi _ { k } ( p ) \\leq 0 \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 380, + 425, + 506, + 440 + ], + "score": 1.0, + "content": "generated by an affine function", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 107, + 437, + 506, + 451 + ], + "spans": [ + { + "bbox": [ + 107, + 438, + 158, + 449 + ], + "score": 0.9, + "content": "\\varphi _ { k } : \\mathscr { P } \\mathbb { R }", + "type": "inline_equation" + }, + { + "bbox": [ + 158, + 437, + 309, + 451 + ], + "score": 1.0, + "content": ". The general intention is to construct", + "type": "text" + }, + { + "bbox": [ + 309, + 440, + 322, + 449 + ], + "score": 0.85, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 437, + 361, + 451 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 362, + 438, + 410, + 450 + ], + "score": 0.91, + "content": "\\varphi _ { k } \\tilde { ( p ^ { k } ) } > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 410, + 437, + 429, + 451 + ], + "score": 1.0, + "content": ", but", + "type": "text" + }, + { + "bbox": [ + 429, + 438, + 477, + 450 + ], + "score": 0.93, + "content": "\\varphi _ { k } ( p ^ { * } ) \\leq 0", + "type": "inline_equation" + }, + { + "bbox": [ + 478, + 437, + 506, + 451 + ], + "score": 1.0, + "content": "for all", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 107, + 448, + 438, + 461 + ], + "spans": [ + { + "bbox": [ + 107, + 450, + 136, + 460 + ], + "score": 0.89, + "content": "p ^ { * } \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 136, + 448, + 266, + 461 + ], + "score": 1.0, + "content": ". The construction employed for", + "type": "text" + }, + { + "bbox": [ + 266, + 450, + 279, + 460 + ], + "score": 0.85, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 279, + 448, + 438, + 461 + ], + "score": 1.0, + "content": "in the case of (1) and (5) is of the form", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 24, + "bbox_fs": [ + 105, + 425, + 506, + 461 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 205, + 464, + 405, + 480 + ], + "lines": [ + { + "bbox": [ + 205, + 464, + 405, + 480 + ], + "spans": [ + { + "bbox": [ + 205, + 464, + 405, + 480 + ], + "score": 0.9, + "content": "\\begin{array} { r } { \\varphi _ { k } ( z , w _ { 1 } , \\ldots , w _ { n + 1 } ) \\doteq \\sum _ { i = 1 } ^ { n + 1 } \\langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \\rangle } \\end{array}", + "type": "interline_equation", + "image_path": "d98303aa83501011e691713f8aca89e9bf4bd1b0fcade3c1504fc37c2ae25db5.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 205, + 464, + 405, + 480 + ], + "spans": [], + "index": 26 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 484, + 505, + 529 + ], + "lines": [ + { + "bbox": [ + 104, + 482, + 506, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 482, + 174, + 499 + ], + "score": 1.0, + "content": "for some points", + "type": "text" + }, + { + "bbox": [ + 174, + 484, + 237, + 497 + ], + "score": 0.89, + "content": "( x _ { i } ^ { k } , y _ { i } ^ { k } ) \\in \\mathbb { R } ^ { 2 d }", + "type": "inline_equation" + }, + { + "bbox": [ + 237, + 482, + 242, + 499 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 242, + 484, + 302, + 497 + ], + "score": 0.9, + "content": "i \\in { 1 . . ( n + 1 ) }", + "type": "inline_equation" + }, + { + "bbox": [ + 303, + 482, + 506, + 499 + ], + "score": 1.0, + "content": ", that must be carefully chosen (see below). Any", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 496, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 106, + 496, + 387, + 508 + ], + "score": 1.0, + "content": "function of the form (7) can be shown to be affine when restricted to", + "type": "text" + }, + { + "bbox": [ + 388, + 497, + 396, + 506 + ], + "score": 0.83, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 397, + 496, + 505, + 508 + ], + "score": 1.0, + "content": ". As mentioned above, the", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 104, + 506, + 506, + 519 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 356, + 519 + ], + "score": 1.0, + "content": "standard separator-projection algorithm obtains its next iterate", + "type": "text" + }, + { + "bbox": [ + 356, + 506, + 377, + 519 + ], + "score": 0.92, + "content": "p ^ { k + 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 377, + 506, + 433, + 519 + ], + "score": 1.0, + "content": "by projecting", + "type": "text" + }, + { + "bbox": [ + 434, + 506, + 444, + 519 + ], + "score": 0.89, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 445, + 506, + 466, + 519 + ], + "score": 1.0, + "content": "onto", + "type": "text" + }, + { + "bbox": [ + 466, + 507, + 480, + 518 + ], + "score": 0.89, + "content": "H _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 480, + 506, + 506, + 519 + ], + "score": 1.0, + "content": ". This", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 518, + 385, + 530 + ], + "spans": [ + { + "bbox": [ + 106, + 518, + 385, + 530 + ], + "score": 1.0, + "content": "calculation involves the usual projection step for a half space, namely", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 28.5, + "bbox_fs": [ + 104, + 482, + 506, + 530 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 186, + 533, + 424, + 548 + ], + "lines": [ + { + "bbox": [ + 186, + 533, + 424, + 548 + ], + "spans": [ + { + "bbox": [ + 186, + 533, + 424, + 548 + ], + "score": 0.89, + "content": "p ^ { k + 1 } = p ^ { k } - \\alpha _ { k } \\nabla \\varphi _ { k } , \\quad \\mathrm { ~ w h e r e ~ } \\quad \\alpha _ { k } = \\varphi _ { k } ( p ^ { k } ) / \\| \\nabla \\varphi _ { k } \\| ^ { 2 } ,", + "type": "interline_equation", + "image_path": "f451a3c0c9822fe7b123aa3e52a07a6e8507a51f6d3a70528a872e52a6055a3d.jpg" + } + ] + } + ], + "index": 31, + "virtual_lines": [ + { + "bbox": [ + 186, + 533, + 424, + 548 + ], + "spans": [], + "index": 31 + } + ] + }, + { + "type": "list", + "bbox": [ + 107, + 553, + 505, + 583 + ], + "lines": [ + { + "bbox": [ + 104, + 550, + 505, + 568 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 180, + 568 + ], + "score": 1.0, + "content": "and the gradient", + "type": "text" + }, + { + "bbox": [ + 180, + 554, + 201, + 565 + ], + "score": 0.89, + "content": "\\nabla \\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 201, + 550, + 305, + 568 + ], + "score": 1.0, + "content": "is computed relative to", + "type": "text" + }, + { + "bbox": [ + 306, + 554, + 315, + 564 + ], + "score": 0.83, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 315, + 550, + 393, + 568 + ], + "score": 1.0, + "content": ", thus resulting in", + "type": "text" + }, + { + "bbox": [ + 394, + 552, + 442, + 565 + ], + "score": 0.92, + "content": "p ^ { k + 1 } \\ \\in \\ { \\mathcal { P } }", + "type": "inline_equation" + }, + { + "bbox": [ + 442, + 550, + 468, + 568 + ], + "score": 1.0, + "content": ", i.e.", + "type": "text" + }, + { + "bbox": [ + 469, + 554, + 505, + 566 + ], + "score": 0.87, + "content": "\\nabla \\varphi _ { k } \\ =", + "type": "inline_equation" + } + ], + "index": 32, + "is_list_end_line": true + }, + { + "bbox": [ + 108, + 560, + 381, + 588 + ], + "spans": [ + { + "bbox": [ + 108, + 565, + 261, + 585 + ], + "score": 0.89, + "content": "\\left( \\sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k } , x _ { 1 } ^ { k } - { \\bar { x } } ^ { k } , \\dots , x _ { n + 1 } - { \\bar { x } } ^ { k } \\right)", + "type": "inline_equation" + }, + { + "bbox": [ + 261, + 560, + 289, + 588 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 290, + 566, + 372, + 582 + ], + "score": 0.92, + "content": "\\begin{array} { r } { \\bar { x } ^ { k } = \\frac { 1 } { n + 1 } \\sum _ { i = 1 } ^ { n + 1 } x _ { i } ^ { k } } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 372, + 560, + 381, + 588 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 33, + "is_list_start_line": true, + "is_list_end_line": true + } + ], + "index": 32.5, + "bbox_fs": [ + 104, + 550, + 505, + 588 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 596, + 231, + 610 + ], + "lines": [ + { + "bbox": [ + 105, + 596, + 231, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 231, + 611 + ], + "score": 1.0, + "content": "4 PROPOSED METHOD", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 34 + }, + { + "type": "text", + "bbox": [ + 106, + 621, + 505, + 734 + ], + "lines": [ + { + "bbox": [ + 106, + 622, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 106, + 622, + 505, + 633 + ], + "score": 1.0, + "content": "The proposed method is given in Algorithm 1 and called Stochastic Projective Splitting (SPS). Unlike", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 632, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 388, + 645 + ], + "score": 1.0, + "content": "prior versions of projective splitting, SPS does not employ the stepsize", + "type": "text" + }, + { + "bbox": [ + 388, + 634, + 401, + 644 + ], + "score": 0.86, + "content": "\\alpha _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 401, + 632, + 505, + 645 + ], + "score": 1.0, + "content": "of (8) that places the next", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 642, + 504, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 642, + 270, + 658 + ], + "score": 1.0, + "content": "iterate exactly on the hyperplane given by", + "type": "text" + }, + { + "bbox": [ + 270, + 644, + 313, + 656 + ], + "score": 0.93, + "content": "\\varphi _ { k } ( p ) = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 314, + 642, + 475, + 658 + ], + "score": 1.0, + "content": ". Instead, it simply moves in the direction", + "type": "text" + }, + { + "bbox": [ + 475, + 644, + 504, + 655 + ], + "score": 0.91, + "content": "- \\nabla \\varphi _ { k }", + "type": "inline_equation" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 654, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 218, + 667 + ], + "score": 1.0, + "content": "with a pre-defined stepsize", + "type": "text" + }, + { + "bbox": [ + 218, + 655, + 240, + 667 + ], + "score": 0.92, + "content": "\\{ \\alpha _ { k } \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 240, + 654, + 505, + 667 + ], + "score": 1.0, + "content": ". This fundamental change is required to deal with the stochastic", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 666, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 106, + 666, + 374, + 678 + ], + "score": 1.0, + "content": "noise on lines 6 and 8. This noise could lead to the usual choice of", + "type": "text" + }, + { + "bbox": [ + 375, + 667, + 387, + 677 + ], + "score": 0.86, + "content": "\\alpha _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 387, + 666, + 505, + 678 + ], + "score": 1.0, + "content": "defined in (8) being unstable", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 677, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 399, + 690 + ], + "score": 1.0, + "content": "and difficult to analyze. In order to guarantee convergence, the parameters", + "type": "text" + }, + { + "bbox": [ + 399, + 679, + 412, + 688 + ], + "score": 0.86, + "content": "\\alpha _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 412, + 677, + 429, + 690 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 430, + 678, + 441, + 688 + ], + "score": 0.86, + "content": "\\rho _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 441, + 677, + 505, + 690 + ], + "score": 1.0, + "content": "must be chosen", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 688, + 505, + 699 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 505, + 699 + ], + "score": 1.0, + "content": "to satisfy certain conditions given below. Note that the gradient is calculated with respect to the", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 699, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 144, + 711 + ], + "score": 1.0, + "content": "subspace", + "type": "text" + }, + { + "bbox": [ + 145, + 699, + 154, + 709 + ], + "score": 0.8, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 154, + 699, + 372, + 711 + ], + "score": 1.0, + "content": "defined in (6); since the algorithm is initialized within", + "type": "text" + }, + { + "bbox": [ + 373, + 699, + 381, + 709 + ], + "score": 0.84, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 381, + 699, + 437, + 711 + ], + "score": 1.0, + "content": ", it remains in", + "type": "text" + }, + { + "bbox": [ + 438, + 699, + 446, + 709 + ], + "score": 0.82, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 447, + 699, + 505, + 711 + ], + "score": 1.0, + "content": ", within which", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 106, + 707, + 506, + 738 + ], + "spans": [ + { + "bbox": [ + 106, + 711, + 119, + 721 + ], + "score": 0.83, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 210, + 707, + 386, + 738 + ], + "score": 1.0, + "content": "the updates on lines 9-10 are equivalent to .", + "type": "text" + }, + { + "bbox": [ + 386, + 709, + 474, + 721 + ], + "score": 0.93, + "content": "\\boldsymbol { p } ^ { k + 1 } = \\boldsymbol { p } ^ { k } - \\alpha _ { k } \\nabla \\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 707, + 506, + 738 + ], + "score": 1.0, + "content": ", where", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 720, + 209, + 733 + ], + "spans": [ + { + "bbox": [ + 106, + 720, + 209, + 733 + ], + "score": 0.91, + "content": "\\mathbf { \\chi } ^ { \\dot { k } } = ( z ^ { k } , w _ { 1 } ^ { k } , \\dots , w _ { n + 1 } ^ { k } )", + "type": "inline_equation" + } + ], + "index": 44 + } + ], + "index": 39.5, + "bbox_fs": [ + 104, + 622, + 506, + 738 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 505, + 138 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 279, + 95 + ], + "score": 1.0, + "content": "Note that SPS does not explicitly evaluate", + "type": "text" + }, + { + "bbox": [ + 280, + 84, + 292, + 94 + ], + "score": 0.86, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 293, + 82, + 505, + 95 + ], + "score": 1.0, + "content": ", which is only used in the analysis, but it does keep", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 92, + 506, + 108 + ], + "spans": [ + { + "bbox": [ + 105, + 92, + 141, + 108 + ], + "score": 1.0, + "content": "track of", + "type": "text" + }, + { + "bbox": [ + 141, + 93, + 174, + 106 + ], + "score": 0.93, + "content": "( x _ { i } ^ { k } , y _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 174, + 92, + 190, + 108 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 191, + 93, + 249, + 106 + ], + "score": 0.92, + "content": "i \\in { 1 . . ( n + 1 ) }", + "type": "inline_equation" + }, + { + "bbox": [ + 249, + 92, + 506, + 108 + ], + "score": 1.0, + "content": ". The algorithm’s memory requirements scale linearly with the", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 103, + 241, + 118 + ], + "score": 1.0, + "content": "number of nonsmooth operators", + "type": "text" + }, + { + "bbox": [ + 241, + 107, + 249, + 114 + ], + "score": 0.74, + "content": "n", + "type": "inline_equation" + }, + { + "bbox": [ + 249, + 103, + 505, + 118 + ], + "score": 1.0, + "content": "in the inclusion (1), with the simplest implementation storing", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 114, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 106, + 115, + 146, + 127 + ], + "score": 0.91, + "content": "( 3 n + 5 ) d", + "type": "inline_equation" + }, + { + "bbox": [ + 146, + 114, + 389, + 129 + ], + "score": 1.0, + "content": "working-vector elements. This requirement can be reduced to", + "type": "text" + }, + { + "bbox": [ + 389, + 115, + 424, + 127 + ], + "score": 0.92, + "content": "( n + 7 ) d", + "type": "inline_equation" + }, + { + "bbox": [ + 425, + 114, + 505, + 129 + ], + "score": 1.0, + "content": "through a technique", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 125, + 441, + 139 + ], + "spans": [ + { + "bbox": [ + 105, + 125, + 297, + 139 + ], + "score": 1.0, + "content": "discussed in Appendix H. In most applications,", + "type": "text" + }, + { + "bbox": [ + 297, + 129, + 304, + 136 + ], + "score": 0.72, + "content": "n", + "type": "inline_equation" + }, + { + "bbox": [ + 305, + 125, + 441, + 139 + ], + "score": 1.0, + "content": "will be small, for example 2 or 3.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 2 + }, + { + "type": "text", + "bbox": [ + 107, + 148, + 505, + 228 + ], + "lines": [ + { + "bbox": [ + 105, + 147, + 504, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 149, + 163 + ], + "score": 1.0, + "content": "Updating", + "type": "text" + }, + { + "bbox": [ + 150, + 149, + 183, + 162 + ], + "score": 0.91, + "content": "( x _ { i } ^ { k } , y _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 147, + 250, + 163 + ], + "score": 1.0, + "content": "The variables", + "type": "text" + }, + { + "bbox": [ + 250, + 149, + 284, + 162 + ], + "score": 0.93, + "content": "( x _ { i } ^ { k } , y _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 284, + 147, + 493, + 163 + ], + "score": 1.0, + "content": "are updated on lines 3-8 of Algorithm 1, in which", + "type": "text" + }, + { + "bbox": [ + 493, + 149, + 504, + 160 + ], + "score": 0.86, + "content": "e ^ { k }", + "type": "inline_equation" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 160, + 506, + 174 + ], + "spans": [ + { + "bbox": [ + 105, + 160, + 123, + 174 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 124, + 161, + 134, + 172 + ], + "score": 0.87, + "content": "\\epsilon ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 135, + 160, + 150, + 174 + ], + "score": 1.0, + "content": "are", + "type": "text" + }, + { + "bbox": [ + 151, + 162, + 164, + 172 + ], + "score": 0.87, + "content": "\\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 164, + 160, + 394, + 174 + ], + "score": 1.0, + "content": "-valued random variables defined on a probability space", + "type": "text" + }, + { + "bbox": [ + 394, + 162, + 434, + 174 + ], + "score": 0.92, + "content": "( \\Omega , { \\mathcal { F } } , P )", + "type": "inline_equation" + }, + { + "bbox": [ + 435, + 160, + 456, + 174 + ], + "score": 1.0, + "content": ". For", + "type": "text" + }, + { + "bbox": [ + 457, + 162, + 466, + 172 + ], + "score": 0.82, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 466, + 160, + 506, + 174 + ], + "score": 1.0, + "content": "we use a", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 173, + 505, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 173, + 505, + 185 + ], + "score": 1.0, + "content": "new, noisy version of the two-forward-step procedure from Johnstone & Eckstein (2020b). For each", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 182, + 506, + 198 + ], + "spans": [ + { + "bbox": [ + 107, + 184, + 118, + 195 + ], + "score": 0.74, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 119, + 182, + 122, + 198 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 122, + 184, + 155, + 194 + ], + "score": 0.77, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 156, + 182, + 506, + 198 + ], + "score": 1.0, + "content": ", we use the same resolvent step used in previous projective splitting papers, originating", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 192, + 507, + 209 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 281, + 209 + ], + "score": 1.0, + "content": "with (Eckstein & Svaiter, 2008). In the case", + "type": "text" + }, + { + "bbox": [ + 282, + 194, + 333, + 205 + ], + "score": 0.92, + "content": "\\epsilon ^ { k } = e ^ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 333, + 192, + 414, + 209 + ], + "score": 1.0, + "content": ", the selection of the", + "type": "text" + }, + { + "bbox": [ + 415, + 194, + 448, + 206 + ], + "score": 0.91, + "content": "( \\bar { x _ { i } ^ { k } } , y _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 192, + 507, + 209 + ], + "score": 1.0, + "content": "is identical to", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 204, + 505, + 219 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 407, + 219 + ], + "score": 1.0, + "content": "that proposed by Johnstone & Eckstein (2020b), resulting in the hyperplane", + "type": "text" + }, + { + "bbox": [ + 408, + 206, + 474, + 218 + ], + "score": 0.92, + "content": "\\{ p : { \\varphi } _ { k } ( p ) = 0 \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 204, + 505, + 219 + ], + "score": 1.0, + "content": "strictly", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 216, + 197, + 229 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 149, + 229 + ], + "score": 1.0, + "content": "separating", + "type": "text" + }, + { + "bbox": [ + 150, + 216, + 161, + 228 + ], + "score": 0.89, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 161, + 216, + 184, + 229 + ], + "score": 1.0, + "content": "from", + "type": "text" + }, + { + "bbox": [ + 184, + 217, + 192, + 226 + ], + "score": 0.74, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 192, + 216, + 197, + 229 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 8 + }, + { + "type": "text", + "bbox": [ + 107, + 233, + 505, + 278 + ], + "lines": [ + { + "bbox": [ + 105, + 233, + 505, + 246 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 260, + 246 + ], + "score": 1.0, + "content": "SPS achieves full splitting of (1): each", + "type": "text" + }, + { + "bbox": [ + 261, + 234, + 272, + 244 + ], + "score": 0.88, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 273, + 233, + 505, + 246 + ], + "score": 1.0, + "content": "is processed separately using a resolvent and the Lipschitz", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 245, + 505, + 257 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 129, + 257 + ], + "score": 1.0, + "content": "term", + "type": "text" + }, + { + "bbox": [ + 129, + 245, + 138, + 254 + ], + "score": 0.8, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 139, + 245, + 381, + 257 + ], + "score": 1.0, + "content": "is processed via a stochastic gradient oracle. When the", + "type": "text" + }, + { + "bbox": [ + 381, + 245, + 393, + 255 + ], + "score": 0.88, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 393, + 245, + 505, + 257 + ], + "score": 1.0, + "content": "arise from regularizers or", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 256, + 505, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 256, + 505, + 268 + ], + "score": 1.0, + "content": "constraints, as discussed in Section 2, their resolvents can be readily computed so long as their", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 267, + 369, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 267, + 369, + 279 + ], + "score": 1.0, + "content": "respective proximal/projection operators have a convenient form.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 13.5 + }, + { + "type": "text", + "bbox": [ + 107, + 288, + 505, + 323 + ], + "lines": [ + { + "bbox": [ + 105, + 288, + 505, + 302 + ], + "spans": [ + { + "bbox": [ + 105, + 288, + 212, + 302 + ], + "score": 1.0, + "content": "Noise assumptions Let", + "type": "text" + }, + { + "bbox": [ + 212, + 289, + 295, + 302 + ], + "score": 0.95, + "content": "\\mathcal { F } _ { k } \\doteq \\sigma ( p ^ { 1 } , \\ldots , p ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 295, + 288, + 314, + 302 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 315, + 289, + 363, + 302 + ], + "score": 0.93, + "content": "\\mathcal { E } _ { k } \\doteq \\sigma ( \\epsilon ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 363, + 288, + 505, + 302 + ], + "score": 1.0, + "content": ". The stochastic estimators for the", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 103, + 296, + 508, + 317 + ], + "spans": [ + { + "bbox": [ + 103, + 296, + 147, + 317 + ], + "score": 1.0, + "content": "gradients,", + "type": "text" + }, + { + "bbox": [ + 147, + 300, + 158, + 311 + ], + "score": 0.87, + "content": "r ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 159, + 296, + 176, + 317 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 176, + 300, + 198, + 313 + ], + "score": 0.94, + "content": "y _ { n + 1 } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 198, + 296, + 508, + 317 + ], + "score": 1.0, + "content": ", are assumed to be unbiased, that is, the noise terms have mean 0 conditioned", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 311, + 156, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 156, + 325 + ], + "score": 1.0, + "content": "on the past:", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17 + }, + { + "type": "interline_equation", + "bbox": [ + 229, + 326, + 381, + 342 + ], + "lines": [ + { + "bbox": [ + 229, + 326, + 381, + 342 + ], + "spans": [ + { + "bbox": [ + 229, + 326, + 381, + 342 + ], + "score": 0.91, + "content": "\\mathbb { E } [ \\epsilon ^ { k } | \\mathcal { F } _ { k } ] = 0 , \\quad \\mathbb { E } [ e ^ { k } | \\mathcal { F } _ { k } ] = 0 \\quad a . s .", + "type": "interline_equation", + "image_path": "3081ebd6cddf1d97a7d70c51bac354bb263f320314ed46ca2c98672af94e55e6.jpg" + } + ] + } + ], + "index": 19, + "virtual_lines": [ + { + "bbox": [ + 229, + 326, + 381, + 342 + ], + "spans": [], + "index": 19 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 345, + 396, + 356 + ], + "lines": [ + { + "bbox": [ + 106, + 344, + 396, + 358 + ], + "spans": [ + { + "bbox": [ + 106, + 344, + 396, + 358 + ], + "score": 1.0, + "content": "We impose the following mild assumptions on the variance of the noise:", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20 + }, + { + "type": "interline_equation", + "bbox": [ + 203, + 359, + 406, + 393 + ], + "lines": [ + { + "bbox": [ + 203, + 359, + 406, + 393 + ], + "spans": [ + { + "bbox": [ + 203, + 359, + 406, + 393 + ], + "score": 0.91, + "content": "\\begin{array} { r l } & { \\mathbb { E } \\left[ \\| \\epsilon ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\right] \\leq N _ { 1 } + N _ { 2 } \\| B ( z ^ { k } ) \\| ^ { 2 } \\quad a . s . } \\\\ & { \\mathbb { E } \\left[ \\| e ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } , \\mathcal { E } _ { k } \\right] \\leq N _ { 3 } + N _ { 4 } \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } \\quad a . s . , } \\end{array}", + "type": "interline_equation", + "image_path": "cdc25a2da250f527e64eb0fe1341d28f6d7f3c22cb3672361763f89c38ed5842.jpg" + } + ] + } + ], + "index": 21.5, + "virtual_lines": [ + { + "bbox": [ + 203, + 359, + 406, + 376.0 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 203, + 376.0, + 406, + 393.0 + ], + "spans": [], + "index": 22 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 395, + 490, + 408 + ], + "lines": [ + { + "bbox": [ + 106, + 395, + 493, + 410 + ], + "spans": [ + { + "bbox": [ + 106, + 395, + 133, + 410 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 397, + 239, + 408 + ], + "score": 0.91, + "content": "0 \\le N _ { 1 } , N _ { 2 } , N _ { 3 } , N _ { 4 } < \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 239, + 395, + 317, + 410 + ], + "score": 1.0, + "content": ". We do not require", + "type": "text" + }, + { + "bbox": [ + 317, + 396, + 328, + 406 + ], + "score": 0.88, + "content": "e ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 328, + 395, + 346, + 410 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 346, + 396, + 356, + 406 + ], + "score": 0.87, + "content": "\\epsilon ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 356, + 395, + 493, + 410 + ], + "score": 1.0, + "content": "to be independent of one another.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 23 + }, + { + "type": "text", + "bbox": [ + 106, + 419, + 504, + 464 + ], + "lines": [ + { + "bbox": [ + 105, + 419, + 506, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 241, + 432 + ], + "score": 1.0, + "content": "Stepsize choices The stepsizes", + "type": "text" + }, + { + "bbox": [ + 242, + 422, + 253, + 432 + ], + "score": 0.86, + "content": "\\rho _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 254, + 419, + 272, + 432 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 272, + 422, + 285, + 431 + ], + "score": 0.86, + "content": "\\alpha _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 285, + 419, + 506, + 432 + ], + "score": 1.0, + "content": "are assumed to be deterministic. A constant stepsize", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 430, + 505, + 443 + ], + "spans": [ + { + "bbox": [ + 106, + 430, + 505, + 443 + ], + "score": 1.0, + "content": "choice which attains a non-asymptotic convergence rate will be considered in the next section", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 441, + 506, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 506, + 455 + ], + "score": 1.0, + "content": "(Theorem 2). The stepsize conditions we will impose to guarantee almost-sure convergence (Theorem", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 451, + 133, + 465 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 133, + 465 + ], + "score": 1.0, + "content": "1) are", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 25.5 + }, + { + "type": "interline_equation", + "bbox": [ + 140, + 467, + 469, + 482 + ], + "lines": [ + { + "bbox": [ + 140, + 467, + 469, + 482 + ], + "spans": [ + { + "bbox": [ + 140, + 467, + 469, + 482 + ], + "score": 0.85, + "content": "\\begin{array} { r } { \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } = \\infty , \\quad \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } ^ { 2 } < \\infty , \\quad \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } ^ { 2 } < \\infty , \\mathrm { a n d } \\rho _ { k } \\leq \\overline { \\rho } < 1 / L . } \\end{array}", + "type": "interline_equation", + "image_path": "4c73912963288c7d940343cfda19b4261b37d3e3d2936871d49cc104e58e93db.jpg" + } + ] + } + ], + "index": 28, + "virtual_lines": [ + { + "bbox": [ + 140, + 467, + 469, + 482 + ], + "spans": [], + "index": 28 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 485, + 452, + 496 + ], + "lines": [ + { + "bbox": [ + 105, + 484, + 451, + 497 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 205, + 497 + ], + "score": 1.0, + "content": "For example, in the case", + "type": "text" + }, + { + "bbox": [ + 206, + 486, + 232, + 495 + ], + "score": 0.9, + "content": "L = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 232, + 484, + 451, + 497 + ], + "score": 1.0, + "content": ", a particular choice which satisfies these constraints is", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 29 + }, + { + "type": "interline_equation", + "bbox": [ + 142, + 498, + 469, + 513 + ], + "lines": [ + { + "bbox": [ + 142, + 498, + 469, + 513 + ], + "spans": [ + { + "bbox": [ + 142, + 498, + 469, + 513 + ], + "score": 0.62, + "content": "\\alpha _ { k } = k ^ { - 0 . 5 - p } \\mathrm { f o r } 0 < p < 0 . 5 , \\mathrm { a n d } \\rho _ { k } = k ^ { - 0 . 5 + t } \\mathrm { f o r } p \\leq t < 0 . 5 p + 0 . 2 5 .", + "type": "interline_equation", + "image_path": "2a93bfcc389ad2be8fe6c2716602414c175dacdc0527cd1f6bf434b8d6a3b4ca.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 142, + 498, + 469, + 513 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 516, + 504, + 540 + ], + "lines": [ + { + "bbox": [ + 106, + 516, + 505, + 529 + ], + "spans": [ + { + "bbox": [ + 106, + 516, + 218, + 529 + ], + "score": 1.0, + "content": "For simplicity, the stepsizes", + "type": "text" + }, + { + "bbox": [ + 219, + 519, + 226, + 527 + ], + "score": 0.78, + "content": "\\tau", + "type": "inline_equation" + }, + { + "bbox": [ + 226, + 516, + 505, + 529 + ], + "score": 1.0, + "content": "used for the resolvent updates in lines 3-5 are fixed, but they could be", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 528, + 482, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 528, + 210, + 541 + ], + "score": 1.0, + "content": "allowed to vary with both", + "type": "text" + }, + { + "bbox": [ + 211, + 529, + 216, + 538 + ], + "score": 0.78, + "content": "i", + "type": "inline_equation" + }, + { + "bbox": [ + 216, + 528, + 234, + 541 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 234, + 528, + 241, + 538 + ], + "score": 0.82, + "content": "k", + "type": "inline_equation" + }, + { + "bbox": [ + 241, + 528, + 482, + 541 + ], + "score": 1.0, + "content": "so long as they have finite positive lower and upper bounds.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 31.5 + }, + { + "type": "title", + "bbox": [ + 106, + 560, + 312, + 573 + ], + "lines": [ + { + "bbox": [ + 105, + 559, + 313, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 313, + 576 + ], + "score": 1.0, + "content": "Algorithm 1: Stochastic Projective Splitting (SPS)", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 33 + } + ], + "page_idx": 5, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 309, + 760 + ], + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 762 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 762 + ], + "score": 1.0, + "content": "6", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 505, + 138 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 279, + 95 + ], + "score": 1.0, + "content": "Note that SPS does not explicitly evaluate", + "type": "text" + }, + { + "bbox": [ + 280, + 84, + 292, + 94 + ], + "score": 0.86, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 293, + 82, + 505, + 95 + ], + "score": 1.0, + "content": ", which is only used in the analysis, but it does keep", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 92, + 506, + 108 + ], + "spans": [ + { + "bbox": [ + 105, + 92, + 141, + 108 + ], + "score": 1.0, + "content": "track of", + "type": "text" + }, + { + "bbox": [ + 141, + 93, + 174, + 106 + ], + "score": 0.93, + "content": "( x _ { i } ^ { k } , y _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 174, + 92, + 190, + 108 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 191, + 93, + 249, + 106 + ], + "score": 0.92, + "content": "i \\in { 1 . . ( n + 1 ) }", + "type": "inline_equation" + }, + { + "bbox": [ + 249, + 92, + 506, + 108 + ], + "score": 1.0, + "content": ". The algorithm’s memory requirements scale linearly with the", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 103, + 241, + 118 + ], + "score": 1.0, + "content": "number of nonsmooth operators", + "type": "text" + }, + { + "bbox": [ + 241, + 107, + 249, + 114 + ], + "score": 0.74, + "content": "n", + "type": "inline_equation" + }, + { + "bbox": [ + 249, + 103, + 505, + 118 + ], + "score": 1.0, + "content": "in the inclusion (1), with the simplest implementation storing", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 114, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 106, + 115, + 146, + 127 + ], + "score": 0.91, + "content": "( 3 n + 5 ) d", + "type": "inline_equation" + }, + { + "bbox": [ + 146, + 114, + 389, + 129 + ], + "score": 1.0, + "content": "working-vector elements. This requirement can be reduced to", + "type": "text" + }, + { + "bbox": [ + 389, + 115, + 424, + 127 + ], + "score": 0.92, + "content": "( n + 7 ) d", + "type": "inline_equation" + }, + { + "bbox": [ + 425, + 114, + 505, + 129 + ], + "score": 1.0, + "content": "through a technique", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 125, + 441, + 139 + ], + "spans": [ + { + "bbox": [ + 105, + 125, + 297, + 139 + ], + "score": 1.0, + "content": "discussed in Appendix H. In most applications,", + "type": "text" + }, + { + "bbox": [ + 297, + 129, + 304, + 136 + ], + "score": 0.72, + "content": "n", + "type": "inline_equation" + }, + { + "bbox": [ + 305, + 125, + 441, + 139 + ], + "score": 1.0, + "content": "will be small, for example 2 or 3.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 2, + "bbox_fs": [ + 105, + 82, + 506, + 139 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 148, + 505, + 228 + ], + "lines": [ + { + "bbox": [ + 105, + 147, + 504, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 149, + 163 + ], + "score": 1.0, + "content": "Updating", + "type": "text" + }, + { + "bbox": [ + 150, + 149, + 183, + 162 + ], + "score": 0.91, + "content": "( x _ { i } ^ { k } , y _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 147, + 250, + 163 + ], + "score": 1.0, + "content": "The variables", + "type": "text" + }, + { + "bbox": [ + 250, + 149, + 284, + 162 + ], + "score": 0.93, + "content": "( x _ { i } ^ { k } , y _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 284, + 147, + 493, + 163 + ], + "score": 1.0, + "content": "are updated on lines 3-8 of Algorithm 1, in which", + "type": "text" + }, + { + "bbox": [ + 493, + 149, + 504, + 160 + ], + "score": 0.86, + "content": "e ^ { k }", + "type": "inline_equation" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 160, + 506, + 174 + ], + "spans": [ + { + "bbox": [ + 105, + 160, + 123, + 174 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 124, + 161, + 134, + 172 + ], + "score": 0.87, + "content": "\\epsilon ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 135, + 160, + 150, + 174 + ], + "score": 1.0, + "content": "are", + "type": "text" + }, + { + "bbox": [ + 151, + 162, + 164, + 172 + ], + "score": 0.87, + "content": "\\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 164, + 160, + 394, + 174 + ], + "score": 1.0, + "content": "-valued random variables defined on a probability space", + "type": "text" + }, + { + "bbox": [ + 394, + 162, + 434, + 174 + ], + "score": 0.92, + "content": "( \\Omega , { \\mathcal { F } } , P )", + "type": "inline_equation" + }, + { + "bbox": [ + 435, + 160, + 456, + 174 + ], + "score": 1.0, + "content": ". For", + "type": "text" + }, + { + "bbox": [ + 457, + 162, + 466, + 172 + ], + "score": 0.82, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 466, + 160, + 506, + 174 + ], + "score": 1.0, + "content": "we use a", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 173, + 505, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 173, + 505, + 185 + ], + "score": 1.0, + "content": "new, noisy version of the two-forward-step procedure from Johnstone & Eckstein (2020b). For each", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 182, + 506, + 198 + ], + "spans": [ + { + "bbox": [ + 107, + 184, + 118, + 195 + ], + "score": 0.74, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 119, + 182, + 122, + 198 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 122, + 184, + 155, + 194 + ], + "score": 0.77, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 156, + 182, + 506, + 198 + ], + "score": 1.0, + "content": ", we use the same resolvent step used in previous projective splitting papers, originating", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 192, + 507, + 209 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 281, + 209 + ], + "score": 1.0, + "content": "with (Eckstein & Svaiter, 2008). In the case", + "type": "text" + }, + { + "bbox": [ + 282, + 194, + 333, + 205 + ], + "score": 0.92, + "content": "\\epsilon ^ { k } = e ^ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 333, + 192, + 414, + 209 + ], + "score": 1.0, + "content": ", the selection of the", + "type": "text" + }, + { + "bbox": [ + 415, + 194, + 448, + 206 + ], + "score": 0.91, + "content": "( \\bar { x _ { i } ^ { k } } , y _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 192, + 507, + 209 + ], + "score": 1.0, + "content": "is identical to", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 204, + 505, + 219 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 407, + 219 + ], + "score": 1.0, + "content": "that proposed by Johnstone & Eckstein (2020b), resulting in the hyperplane", + "type": "text" + }, + { + "bbox": [ + 408, + 206, + 474, + 218 + ], + "score": 0.92, + "content": "\\{ p : { \\varphi } _ { k } ( p ) = 0 \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 204, + 505, + 219 + ], + "score": 1.0, + "content": "strictly", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 216, + 197, + 229 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 149, + 229 + ], + "score": 1.0, + "content": "separating", + "type": "text" + }, + { + "bbox": [ + 150, + 216, + 161, + 228 + ], + "score": 0.89, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 161, + 216, + 184, + 229 + ], + "score": 1.0, + "content": "from", + "type": "text" + }, + { + "bbox": [ + 184, + 217, + 192, + 226 + ], + "score": 0.74, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 192, + 216, + 197, + 229 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 8, + "bbox_fs": [ + 104, + 147, + 507, + 229 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 233, + 505, + 278 + ], + "lines": [ + { + "bbox": [ + 105, + 233, + 505, + 246 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 260, + 246 + ], + "score": 1.0, + "content": "SPS achieves full splitting of (1): each", + "type": "text" + }, + { + "bbox": [ + 261, + 234, + 272, + 244 + ], + "score": 0.88, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 273, + 233, + 505, + 246 + ], + "score": 1.0, + "content": "is processed separately using a resolvent and the Lipschitz", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 245, + 505, + 257 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 129, + 257 + ], + "score": 1.0, + "content": "term", + "type": "text" + }, + { + "bbox": [ + 129, + 245, + 138, + 254 + ], + "score": 0.8, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 139, + 245, + 381, + 257 + ], + "score": 1.0, + "content": "is processed via a stochastic gradient oracle. When the", + "type": "text" + }, + { + "bbox": [ + 381, + 245, + 393, + 255 + ], + "score": 0.88, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 393, + 245, + 505, + 257 + ], + "score": 1.0, + "content": "arise from regularizers or", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 256, + 505, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 256, + 505, + 268 + ], + "score": 1.0, + "content": "constraints, as discussed in Section 2, their resolvents can be readily computed so long as their", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 267, + 369, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 267, + 369, + 279 + ], + "score": 1.0, + "content": "respective proximal/projection operators have a convenient form.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 13.5, + "bbox_fs": [ + 105, + 233, + 505, + 279 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 288, + 505, + 323 + ], + "lines": [ + { + "bbox": [ + 105, + 288, + 505, + 302 + ], + "spans": [ + { + "bbox": [ + 105, + 288, + 212, + 302 + ], + "score": 1.0, + "content": "Noise assumptions Let", + "type": "text" + }, + { + "bbox": [ + 212, + 289, + 295, + 302 + ], + "score": 0.95, + "content": "\\mathcal { F } _ { k } \\doteq \\sigma ( p ^ { 1 } , \\ldots , p ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 295, + 288, + 314, + 302 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 315, + 289, + 363, + 302 + ], + "score": 0.93, + "content": "\\mathcal { E } _ { k } \\doteq \\sigma ( \\epsilon ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 363, + 288, + 505, + 302 + ], + "score": 1.0, + "content": ". The stochastic estimators for the", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 103, + 296, + 508, + 317 + ], + "spans": [ + { + "bbox": [ + 103, + 296, + 147, + 317 + ], + "score": 1.0, + "content": "gradients,", + "type": "text" + }, + { + "bbox": [ + 147, + 300, + 158, + 311 + ], + "score": 0.87, + "content": "r ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 159, + 296, + 176, + 317 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 176, + 300, + 198, + 313 + ], + "score": 0.94, + "content": "y _ { n + 1 } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 198, + 296, + 508, + 317 + ], + "score": 1.0, + "content": ", are assumed to be unbiased, that is, the noise terms have mean 0 conditioned", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 311, + 156, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 156, + 325 + ], + "score": 1.0, + "content": "on the past:", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17, + "bbox_fs": [ + 103, + 288, + 508, + 325 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 229, + 326, + 381, + 342 + ], + "lines": [ + { + "bbox": [ + 229, + 326, + 381, + 342 + ], + "spans": [ + { + "bbox": [ + 229, + 326, + 381, + 342 + ], + "score": 0.91, + "content": "\\mathbb { E } [ \\epsilon ^ { k } | \\mathcal { F } _ { k } ] = 0 , \\quad \\mathbb { E } [ e ^ { k } | \\mathcal { F } _ { k } ] = 0 \\quad a . s .", + "type": "interline_equation", + "image_path": "3081ebd6cddf1d97a7d70c51bac354bb263f320314ed46ca2c98672af94e55e6.jpg" + } + ] + } + ], + "index": 19, + "virtual_lines": [ + { + "bbox": [ + 229, + 326, + 381, + 342 + ], + "spans": [], + "index": 19 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 345, + 396, + 356 + ], + "lines": [ + { + "bbox": [ + 106, + 344, + 396, + 358 + ], + "spans": [ + { + "bbox": [ + 106, + 344, + 396, + 358 + ], + "score": 1.0, + "content": "We impose the following mild assumptions on the variance of the noise:", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20, + "bbox_fs": [ + 106, + 344, + 396, + 358 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 203, + 359, + 406, + 393 + ], + "lines": [ + { + "bbox": [ + 203, + 359, + 406, + 393 + ], + "spans": [ + { + "bbox": [ + 203, + 359, + 406, + 393 + ], + "score": 0.91, + "content": "\\begin{array} { r l } & { \\mathbb { E } \\left[ \\| \\epsilon ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\right] \\leq N _ { 1 } + N _ { 2 } \\| B ( z ^ { k } ) \\| ^ { 2 } \\quad a . s . } \\\\ & { \\mathbb { E } \\left[ \\| e ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } , \\mathcal { E } _ { k } \\right] \\leq N _ { 3 } + N _ { 4 } \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } \\quad a . s . , } \\end{array}", + "type": "interline_equation", + "image_path": "cdc25a2da250f527e64eb0fe1341d28f6d7f3c22cb3672361763f89c38ed5842.jpg" + } + ] + } + ], + "index": 21.5, + "virtual_lines": [ + { + "bbox": [ + 203, + 359, + 406, + 376.0 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 203, + 376.0, + 406, + 393.0 + ], + "spans": [], + "index": 22 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 395, + 490, + 408 + ], + "lines": [ + { + "bbox": [ + 106, + 395, + 493, + 410 + ], + "spans": [ + { + "bbox": [ + 106, + 395, + 133, + 410 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 397, + 239, + 408 + ], + "score": 0.91, + "content": "0 \\le N _ { 1 } , N _ { 2 } , N _ { 3 } , N _ { 4 } < \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 239, + 395, + 317, + 410 + ], + "score": 1.0, + "content": ". We do not require", + "type": "text" + }, + { + "bbox": [ + 317, + 396, + 328, + 406 + ], + "score": 0.88, + "content": "e ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 328, + 395, + 346, + 410 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 346, + 396, + 356, + 406 + ], + "score": 0.87, + "content": "\\epsilon ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 356, + 395, + 493, + 410 + ], + "score": 1.0, + "content": "to be independent of one another.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 23, + "bbox_fs": [ + 106, + 395, + 493, + 410 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 419, + 504, + 464 + ], + "lines": [ + { + "bbox": [ + 105, + 419, + 506, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 241, + 432 + ], + "score": 1.0, + "content": "Stepsize choices The stepsizes", + "type": "text" + }, + { + "bbox": [ + 242, + 422, + 253, + 432 + ], + "score": 0.86, + "content": "\\rho _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 254, + 419, + 272, + 432 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 272, + 422, + 285, + 431 + ], + "score": 0.86, + "content": "\\alpha _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 285, + 419, + 506, + 432 + ], + "score": 1.0, + "content": "are assumed to be deterministic. A constant stepsize", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 430, + 505, + 443 + ], + "spans": [ + { + "bbox": [ + 106, + 430, + 505, + 443 + ], + "score": 1.0, + "content": "choice which attains a non-asymptotic convergence rate will be considered in the next section", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 441, + 506, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 506, + 455 + ], + "score": 1.0, + "content": "(Theorem 2). The stepsize conditions we will impose to guarantee almost-sure convergence (Theorem", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 451, + 133, + 465 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 133, + 465 + ], + "score": 1.0, + "content": "1) are", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 25.5, + "bbox_fs": [ + 104, + 419, + 506, + 465 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 140, + 467, + 469, + 482 + ], + "lines": [ + { + "bbox": [ + 140, + 467, + 469, + 482 + ], + "spans": [ + { + "bbox": [ + 140, + 467, + 469, + 482 + ], + "score": 0.85, + "content": "\\begin{array} { r } { \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } = \\infty , \\quad \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } ^ { 2 } < \\infty , \\quad \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } ^ { 2 } < \\infty , \\mathrm { a n d } \\rho _ { k } \\leq \\overline { \\rho } < 1 / L . } \\end{array}", + "type": "interline_equation", + "image_path": "4c73912963288c7d940343cfda19b4261b37d3e3d2936871d49cc104e58e93db.jpg" + } + ] + } + ], + "index": 28, + "virtual_lines": [ + { + "bbox": [ + 140, + 467, + 469, + 482 + ], + "spans": [], + "index": 28 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 485, + 452, + 496 + ], + "lines": [ + { + "bbox": [ + 105, + 484, + 451, + 497 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 205, + 497 + ], + "score": 1.0, + "content": "For example, in the case", + "type": "text" + }, + { + "bbox": [ + 206, + 486, + 232, + 495 + ], + "score": 0.9, + "content": "L = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 232, + 484, + 451, + 497 + ], + "score": 1.0, + "content": ", a particular choice which satisfies these constraints is", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 29, + "bbox_fs": [ + 105, + 484, + 451, + 497 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 142, + 498, + 469, + 513 + ], + "lines": [ + { + "bbox": [ + 142, + 498, + 469, + 513 + ], + "spans": [ + { + "bbox": [ + 142, + 498, + 469, + 513 + ], + "score": 0.62, + "content": "\\alpha _ { k } = k ^ { - 0 . 5 - p } \\mathrm { f o r } 0 < p < 0 . 5 , \\mathrm { a n d } \\rho _ { k } = k ^ { - 0 . 5 + t } \\mathrm { f o r } p \\leq t < 0 . 5 p + 0 . 2 5 .", + "type": "interline_equation", + "image_path": "2a93bfcc389ad2be8fe6c2716602414c175dacdc0527cd1f6bf434b8d6a3b4ca.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 142, + 498, + 469, + 513 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 516, + 504, + 540 + ], + "lines": [ + { + "bbox": [ + 106, + 516, + 505, + 529 + ], + "spans": [ + { + "bbox": [ + 106, + 516, + 218, + 529 + ], + "score": 1.0, + "content": "For simplicity, the stepsizes", + "type": "text" + }, + { + "bbox": [ + 219, + 519, + 226, + 527 + ], + "score": 0.78, + "content": "\\tau", + "type": "inline_equation" + }, + { + "bbox": [ + 226, + 516, + 505, + 529 + ], + "score": 1.0, + "content": "used for the resolvent updates in lines 3-5 are fixed, but they could be", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 528, + 482, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 528, + 210, + 541 + ], + "score": 1.0, + "content": "allowed to vary with both", + "type": "text" + }, + { + "bbox": [ + 211, + 529, + 216, + 538 + ], + "score": 0.78, + "content": "i", + "type": "inline_equation" + }, + { + "bbox": [ + 216, + 528, + 234, + 541 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 234, + 528, + 241, + 538 + ], + "score": 0.82, + "content": "k", + "type": "inline_equation" + }, + { + "bbox": [ + 241, + 528, + 482, + 541 + ], + "score": 1.0, + "content": "so long as they have finite positive lower and upper bounds.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 31.5, + "bbox_fs": [ + 106, + 516, + 505, + 541 + ] + }, + { + "type": "title", + "bbox": [ + 106, + 560, + 312, + 573 + ], + "lines": [ + { + "bbox": [ + 105, + 559, + 313, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 313, + 576 + ], + "score": 1.0, + "content": "Algorithm 1: Stochastic Projective Splitting (SPS)", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 33 + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 81, + 282, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 284, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 284, + 95 + ], + "score": 1.0, + "content": "5 MAIN THEORETICAL RESULTS", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 105, + 505, + 139 + ], + "lines": [ + { + "bbox": [ + 106, + 105, + 506, + 119 + ], + "spans": [ + { + "bbox": [ + 106, + 105, + 197, + 119 + ], + "score": 1.0, + "content": "Theorem 1. Suppose", + "type": "text" + }, + { + "bbox": [ + 197, + 106, + 245, + 117 + ], + "score": 0.92, + "content": "A _ { 1 } , \\ldots , A _ { n }", + "type": "inline_equation" + }, + { + "bbox": [ + 245, + 105, + 345, + 119 + ], + "score": 1.0, + "content": "are maximal monotone,", + "type": "text" + }, + { + "bbox": [ + 346, + 106, + 355, + 115 + ], + "score": 0.7, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 356, + 105, + 366, + 119 + ], + "score": 1.0, + "content": "is", + "type": "text" + }, + { + "bbox": [ + 366, + 106, + 374, + 115 + ], + "score": 0.63, + "content": "L", + "type": "inline_equation" + }, + { + "bbox": [ + 375, + 105, + 506, + 119 + ], + "score": 1.0, + "content": "-Lipschitz and monotone, and a", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 117, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 106, + 117, + 250, + 129 + ], + "score": 1.0, + "content": "solution to (1) exists. For Algorithm", + "type": "text" + }, + { + "bbox": [ + 251, + 117, + 257, + 126 + ], + "score": 0.47, + "content": "I", + "type": "inline_equation" + }, + { + "bbox": [ + 257, + 117, + 505, + 129 + ], + "score": 1.0, + "content": ", suppose (9)-(12) hold. Then with probability one it holds that", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 124, + 457, + 142 + ], + "spans": [ + { + "bbox": [ + 107, + 127, + 142, + 137 + ], + "score": 0.92, + "content": "z ^ { k } \\to z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 142, + 124, + 173, + 142 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 173, + 128, + 184, + 137 + ], + "score": 0.85, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 184, + 124, + 349, + 142 + ], + "score": 1.0, + "content": "solves (1). Further, with probability one,", + "type": "text" + }, + { + "bbox": [ + 349, + 127, + 386, + 140 + ], + "score": 0.92, + "content": "x _ { i } ^ { k } \\to z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 386, + 124, + 402, + 142 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 402, + 128, + 452, + 139 + ], + "score": 0.92, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 453, + 124, + 457, + 142 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 2 + }, + { + "type": "text", + "bbox": [ + 107, + 149, + 506, + 194 + ], + "lines": [ + { + "bbox": [ + 105, + 149, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 149, + 506, + 163 + ], + "score": 1.0, + "content": "Proof sketch Theorem 1 is proved in Appendix C, but we provide a brief sketch here. The proof", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 161, + 506, + 173 + ], + "spans": [ + { + "bbox": [ + 106, + 161, + 506, + 173 + ], + "score": 1.0, + "content": "begins by deriving a simple recursion inspired by the analysis of SGD (Robbins & Monro, 1951).", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 170, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 170, + 131, + 186 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 131, + 171, + 219, + 184 + ], + "score": 0.92, + "content": "p ^ { k + 1 } = p ^ { k } - \\alpha _ { k } \\nabla \\bar { \\varphi } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 219, + 170, + 506, + 186 + ], + "score": 1.0, + "content": ", a step of projective splitting can be viewed as GD applied to the affine", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 183, + 337, + 195 + ], + "spans": [ + { + "bbox": [ + 106, + 183, + 229, + 195 + ], + "score": 1.0, + "content": "hyperplane generator function", + "type": "text" + }, + { + "bbox": [ + 229, + 185, + 241, + 195 + ], + "score": 0.86, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 242, + 183, + 302, + 195 + ], + "score": 1.0, + "content": ". Thus, for any", + "type": "text" + }, + { + "bbox": [ + 302, + 183, + 332, + 194 + ], + "score": 0.92, + "content": "p ^ { * } \\in \\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 333, + 183, + 337, + 195 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 5.5 + }, + { + "type": "interline_equation", + "bbox": [ + 163, + 195, + 440, + 227 + ], + "lines": [ + { + "bbox": [ + 163, + 195, + 440, + 227 + ], + "spans": [ + { + "bbox": [ + 163, + 195, + 440, + 227 + ], + "score": 0.9, + "content": "\\begin{array} { r l } & { \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } \\langle \\nabla \\varphi _ { k } , p ^ { k } - p ^ { * } \\rangle + \\alpha _ { k } ^ { 2 } \\| \\nabla \\varphi _ { k } \\| ^ { 2 } } \\\\ & { \\qquad = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } ( \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) ) + \\alpha _ { k } ^ { 2 } \\| \\nabla \\varphi _ { k } \\| ^ { 2 } } \\end{array}", + "type": "interline_equation", + "image_path": "e9ddae0d18b9ce62d16ecc0ec72b85c806b24ad288b9ad8f501dfc8a5fc344aa.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 163, + 195, + 440, + 205.66666666666666 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 163, + 205.66666666666666, + 440, + 216.33333333333331 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 163, + 216.33333333333331, + 440, + 226.99999999999997 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 227, + 505, + 250 + ], + "lines": [ + { + "bbox": [ + 105, + 226, + 506, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 226, + 300, + 241 + ], + "score": 1.0, + "content": "where in the second equation we have used that", + "type": "text" + }, + { + "bbox": [ + 300, + 228, + 325, + 240 + ], + "score": 0.92, + "content": "\\varphi _ { k } ( p )", + "type": "inline_equation" + }, + { + "bbox": [ + 326, + 226, + 373, + 241 + ], + "score": 1.0, + "content": "is affine on", + "type": "text" + }, + { + "bbox": [ + 374, + 228, + 382, + 238 + ], + "score": 0.83, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 383, + 226, + 506, + 241 + ], + "score": 1.0, + "content": ". The basic strategy is to show", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 238, + 191, + 251 + ], + "spans": [ + { + "bbox": [ + 106, + 238, + 157, + 251 + ], + "score": 1.0, + "content": "that, for any", + "type": "text" + }, + { + "bbox": [ + 157, + 239, + 187, + 250 + ], + "score": 0.92, + "content": "p ^ { * } \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 187, + 238, + 191, + 251 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 11.5 + }, + { + "type": "interline_equation", + "bbox": [ + 213, + 251, + 398, + 266 + ], + "lines": [ + { + "bbox": [ + 213, + 251, + 398, + 266 + ], + "spans": [ + { + "bbox": [ + 213, + 251, + 398, + 266 + ], + "score": 0.92, + "content": "\\begin{array} { r } { \\mathbb { E } [ \\| \\nabla \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le C _ { 1 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + C _ { 2 } \\quad a . s . } \\end{array}", + "type": "interline_equation", + "image_path": "f4535f8ce716c78230c228f54c23983fe999b54cec031462a4223df15a3eec87.jpg" + } + ] + } + ], + "index": 13, + "virtual_lines": [ + { + "bbox": [ + 213, + 251, + 398, + 266 + ], + "spans": [], + "index": 13 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 267, + 505, + 323 + ], + "lines": [ + { + "bbox": [ + 105, + 266, + 505, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 145, + 280 + ], + "score": 1.0, + "content": "for some", + "type": "text" + }, + { + "bbox": [ + 145, + 267, + 194, + 278 + ], + "score": 0.92, + "content": "C _ { 1 } , C _ { 2 } > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 194, + 266, + 505, + 280 + ], + "score": 1.0, + "content": ". This condition allows one to establish stochastic quasi-FejΓ©r monotonicity", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 278, + 505, + 290 + ], + "spans": [ + { + "bbox": [ + 106, + 278, + 405, + 290 + ], + "score": 1.0, + "content": "(SQFM) (Combettes & Pesquet, 2015, Proposition 2.3) of the iterates to", + "type": "text" + }, + { + "bbox": [ + 406, + 278, + 414, + 288 + ], + "score": 0.79, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 414, + 278, + 505, + 290 + ], + "score": 1.0, + "content": ". One consequence of", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 288, + 505, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 288, + 366, + 303 + ], + "score": 1.0, + "content": "SQFM is that with probability one there exists a subsequence", + "type": "text" + }, + { + "bbox": [ + 367, + 290, + 378, + 300 + ], + "score": 0.86, + "content": "v _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 378, + 288, + 420, + 303 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 421, + 289, + 505, + 301 + ], + "score": 0.91, + "content": "\\varphi _ { v _ { k } } ( p ^ { v _ { k } } ) - \\varphi _ { v _ { k } } ( p ^ { * } )", + "type": "inline_equation" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 299, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 299, + 357, + 315 + ], + "score": 1.0, + "content": "converges to 0. Furthermore, roughly speaking, we show that", + "type": "text" + }, + { + "bbox": [ + 358, + 300, + 429, + 313 + ], + "score": 0.93, + "content": "\\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 429, + 299, + 506, + 315 + ], + "score": 1.0, + "content": "provides an upper", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 311, + 341, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 341, + 324 + ], + "score": 1.0, + "content": "bound on the following β€œapproximation residual\" for SPS:", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 16 + }, + { + "type": "interline_equation", + "bbox": [ + 166, + 323, + 444, + 338 + ], + "lines": [ + { + "bbox": [ + 166, + 323, + 444, + 338 + ], + "spans": [ + { + "bbox": [ + 166, + 323, + 444, + 338 + ], + "score": 0.85, + "content": "\\begin{array} { r } { G _ { k } \\doteq \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "555c16bd22323c3486a9de0ea0a7f9817356db176ef7628602fe218a3e87fd0e.jpg" + } + ] + } + ], + "index": 19, + "virtual_lines": [ + { + "bbox": [ + 166, + 323, + 444, + 338 + ], + "spans": [], + "index": 19 + } + ] + }, + { + "type": "text", + "bbox": [ + 114, + 339, + 442, + 350 + ], + "lines": [ + { + "bbox": [ + 121, + 337, + 444, + 353 + ], + "spans": [ + { + "bbox": [ + 121, + 337, + 444, + 353 + ], + "score": 1.0, + "content": "provides an approximation error for SPS, as formalized in the following lemma:", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20 + }, + { + "type": "text", + "bbox": [ + 107, + 351, + 403, + 365 + ], + "lines": [ + { + "bbox": [ + 105, + 348, + 401, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 191, + 367 + ], + "score": 1.0, + "content": "Lemma 1. For SPS,", + "type": "text" + }, + { + "bbox": [ + 192, + 351, + 314, + 365 + ], + "score": 0.92, + "content": "p ^ { k } = ( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 315, + 348, + 369, + 367 + ], + "score": 1.0, + "content": "if and only if", + "type": "text" + }, + { + "bbox": [ + 369, + 352, + 401, + 363 + ], + "score": 0.92, + "content": "G _ { k } = 0", + "type": "inline_equation" + } + ], + "index": 21 + } + ], + "index": 21 + }, + { + "type": "text", + "bbox": [ + 107, + 366, + 503, + 402 + ], + "lines": [ + { + "bbox": [ + 105, + 364, + 506, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 133, + 379 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 133, + 365, + 190, + 378 + ], + "score": 0.93, + "content": "y _ { i } ^ { k } \\ \\in \\ A _ { i } ( x _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 190, + 364, + 208, + 379 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 208, + 366, + 246, + 377 + ], + "score": 0.88, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 247, + 364, + 283, + 379 + ], + "score": 1.0, + "content": ", having", + "type": "text" + }, + { + "bbox": [ + 283, + 366, + 320, + 377 + ], + "score": 0.9, + "content": "G _ { k } ~ = ~ 0", + "type": "inline_equation" + }, + { + "bbox": [ + 320, + 364, + 375, + 379 + ], + "score": 1.0, + "content": "implies that", + "type": "text" + }, + { + "bbox": [ + 375, + 365, + 415, + 378 + ], + "score": 0.87, + "content": "z ^ { k } = x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 415, + 364, + 420, + 379 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 420, + 365, + 461, + 378 + ], + "score": 0.88, + "content": "w _ { i } ^ { k } \\ = \\ y _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 462, + 364, + 506, + 379 + ], + "score": 1.0, + "content": ", and thus", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 107, + 371, + 505, + 398 + ], + "spans": [ + { + "bbox": [ + 107, + 379, + 162, + 392 + ], + "score": 0.93, + "content": "w _ { i } ^ { k } \\in A _ { i } ( z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 162, + 371, + 178, + 398 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 178, + 379, + 212, + 390 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 371, + 243, + 398 + ], + "score": 1.0, + "content": ". Since", + "type": "text" + }, + { + "bbox": [ + 243, + 378, + 306, + 392 + ], + "score": 0.93, + "content": "w _ { n + 1 } ^ { k } = B ( z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 307, + 371, + 325, + 398 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 326, + 377, + 386, + 392 + ], + "score": 0.94, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 386, + 377, + 449, + 392 + ], + "score": 1.0, + "content": ", it follows that", + "type": "text" + }, + { + "bbox": [ + 450, + 379, + 461, + 389 + ], + "score": 0.87, + "content": "z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 461, + 377, + 505, + 392 + ], + "score": 1.0, + "content": "solves (1).", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 390, + 297, + 402 + ], + "spans": [ + { + "bbox": [ + 106, + 390, + 284, + 402 + ], + "score": 1.0, + "content": "The reverse direction is proved in Appendix", + "type": "text" + }, + { + "bbox": [ + 284, + 392, + 292, + 400 + ], + "score": 0.34, + "content": "\\mathrm { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 293, + 390, + 297, + 402 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 23 + }, + { + "type": "text", + "bbox": [ + 107, + 407, + 505, + 442 + ], + "lines": [ + { + "bbox": [ + 106, + 406, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 106, + 406, + 160, + 420 + ], + "score": 1.0, + "content": "The quantity", + "type": "text" + }, + { + "bbox": [ + 161, + 408, + 174, + 419 + ], + "score": 0.89, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 175, + 406, + 505, + 420 + ], + "score": 1.0, + "content": "generalizes the role played by the norm of the gradient in algorithms for smooth", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 417, + 505, + 431 + ], + "spans": [ + { + "bbox": [ + 106, + 417, + 324, + 431 + ], + "score": 1.0, + "content": "optimization. In particular, in the special case where", + "type": "text" + }, + { + "bbox": [ + 324, + 419, + 350, + 428 + ], + "score": 0.9, + "content": "n = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 351, + 417, + 369, + 431 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 369, + 418, + 432, + 430 + ], + "score": 0.94, + "content": "\\bar { B } ( z ) = \\nabla f ( z )", + "type": "inline_equation" + }, + { + "bbox": [ + 433, + 417, + 505, + 431 + ], + "score": 1.0, + "content": "for some smooth", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 428, + 295, + 442 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 173, + 442 + ], + "score": 1.0, + "content": "convex function", + "type": "text" + }, + { + "bbox": [ + 173, + 430, + 180, + 441 + ], + "score": 0.84, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 180, + 428, + 216, + 442 + ], + "score": 1.0, + "content": ", one has", + "type": "text" + }, + { + "bbox": [ + 217, + 429, + 289, + 442 + ], + "score": 0.96, + "content": "G _ { k } = \\| \\bar { \\nabla } f ( z ^ { k } ) \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 289, + 428, + 295, + 442 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 107, + 446, + 504, + 469 + ], + "lines": [ + { + "bbox": [ + 106, + 446, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 446, + 220, + 459 + ], + "score": 1.0, + "content": "Combining the properties of", + "type": "text" + }, + { + "bbox": [ + 220, + 446, + 234, + 457 + ], + "score": 0.9, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 234, + 446, + 505, + 459 + ], + "score": 1.0, + "content": "with other results following from SQFM (such as boundedness) will", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 457, + 416, + 469 + ], + "spans": [ + { + "bbox": [ + 106, + 457, + 416, + 469 + ], + "score": 1.0, + "content": "allow us to derive almost-sure convergence of the iterates to a solution of (1).", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28.5 + }, + { + "type": "text", + "bbox": [ + 107, + 479, + 504, + 502 + ], + "lines": [ + { + "bbox": [ + 105, + 478, + 505, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 505, + 493 + ], + "score": 1.0, + "content": "Convergence rate We can also establish non-asymptotic convergence rates for the approximation", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 488, + 160, + 505 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 140, + 505 + ], + "score": 1.0, + "content": "residual", + "type": "text" + }, + { + "bbox": [ + 141, + 491, + 154, + 502 + ], + "score": 0.89, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 155, + 488, + 160, + 505 + ], + "score": 1.0, + "content": ":", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 30.5 + }, + { + "type": "text", + "bbox": [ + 107, + 504, + 374, + 516 + ], + "lines": [ + { + "bbox": [ + 105, + 502, + 373, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 251, + 517 + ], + "score": 1.0, + "content": "Theorem 2. Fix the total iterations", + "type": "text" + }, + { + "bbox": [ + 251, + 505, + 280, + 515 + ], + "score": 0.92, + "content": "K \\geq 1", + "type": "inline_equation" + }, + { + "bbox": [ + 280, + 502, + 373, + 517 + ], + "score": 1.0, + "content": "of Algorithm 1 and set", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 32 + }, + { + "type": "interline_equation", + "bbox": [ + 161, + 516, + 450, + 538 + ], + "lines": [ + { + "bbox": [ + 161, + 516, + 450, + 538 + ], + "spans": [ + { + "bbox": [ + 161, + 516, + 450, + 538 + ], + "score": 0.92, + "content": "\\forall k = 1 , \\ldots , K : \\rho _ { k } = \\rho \\doteq \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , 1 / 2 L \\right\\} \\quad \\ a n d \\quad \\alpha _ { k } = C _ { f } \\rho ^ { 2 }", + "type": "interline_equation", + "image_path": "68ff0df76206fb41826b63420b4d14be9fa2c1fc4096f719e7f57855a358d996.jpg" + } + ] + } + ], + "index": 33, + "virtual_lines": [ + { + "bbox": [ + 161, + 516, + 450, + 538 + ], + "spans": [], + "index": 33 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 538, + 295, + 550 + ], + "lines": [ + { + "bbox": [ + 104, + 536, + 297, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 144, + 552 + ], + "score": 1.0, + "content": "for some", + "type": "text" + }, + { + "bbox": [ + 144, + 538, + 175, + 550 + ], + "score": 0.93, + "content": "C _ { f } > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 176, + 536, + 297, + 552 + ], + "score": 1.0, + "content": ". Suppose (9)-(11) hold. Then", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 34 + }, + { + "type": "interline_equation", + "bbox": [ + 237, + 551, + 374, + 568 + ], + "lines": [ + { + "bbox": [ + 237, + 551, + 374, + 568 + ], + "spans": [ + { + "bbox": [ + 237, + 551, + 374, + 568 + ], + "score": 0.88, + "content": "\\begin{array} { r } { ( 1 / K ) { \\sum } _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] = \\mathcal { O } ( K ^ { - 1 / 4 } ) } \\end{array}", + "type": "interline_equation", + "image_path": "122c1b59b2769b397d0f512f754d8c65538b5ab3a8389fe57dcecc2cdee4c54a.jpg" + } + ] + } + ], + "index": 35, + "virtual_lines": [ + { + "bbox": [ + 237, + 551, + 374, + 568 + ], + "spans": [], + "index": 35 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 569, + 377, + 581 + ], + "lines": [ + { + "bbox": [ + 105, + 568, + 378, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 568, + 367, + 582 + ], + "score": 1.0, + "content": "where the constants are given (along with the proof) in Appendix", + "type": "text" + }, + { + "bbox": [ + 367, + 570, + 375, + 579 + ], + "score": 0.51, + "content": "E", + "type": "inline_equation" + }, + { + "bbox": [ + 375, + 568, + 378, + 582 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 36 + }, + { + "type": "text", + "bbox": [ + 106, + 587, + 506, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 587, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 106, + 587, + 288, + 601 + ], + "score": 1.0, + "content": "Theorem 2 implies that if we pick an iterate", + "type": "text" + }, + { + "bbox": [ + 288, + 588, + 296, + 598 + ], + "score": 0.8, + "content": "J", + "type": "inline_equation" + }, + { + "bbox": [ + 296, + 587, + 406, + 601 + ], + "score": 1.0, + "content": "uniformly at random from", + "type": "text" + }, + { + "bbox": [ + 406, + 588, + 427, + 599 + ], + "score": 0.7, + "content": "1 . . K", + "type": "inline_equation" + }, + { + "bbox": [ + 428, + 587, + 505, + 601 + ], + "score": 1.0, + "content": ", then the expected", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 598, + 508, + 614 + ], + "spans": [ + { + "bbox": [ + 105, + 598, + 141, + 614 + ], + "score": 1.0, + "content": "value of", + "type": "text" + }, + { + "bbox": [ + 141, + 600, + 156, + 612 + ], + "score": 0.89, + "content": "G _ { J }", + "type": "inline_equation" + }, + { + "bbox": [ + 156, + 598, + 166, + 614 + ], + "score": 1.0, + "content": "is", + "type": "text" + }, + { + "bbox": [ + 167, + 599, + 211, + 612 + ], + "score": 0.93, + "content": "\\mathcal { O } ( K ^ { - 1 / 4 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 211, + 598, + 508, + 614 + ], + "score": 1.0, + "content": ". As far as we know, this is the first convergence rate for a stochastic full-", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 609, + 507, + 625 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 507, + 625 + ], + "score": 1.0, + "content": "splitting method solving (1) in the general discontinuous (i.e. set-valued) monotone inclusion case,", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 622, + 506, + 634 + ], + "spans": [ + { + "bbox": [ + 106, + 622, + 506, + 634 + ], + "score": 1.0, + "content": "and it is not clear whether it can be improved, either by a better analysis or a better method. Faster", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 633, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 633, + 505, + 645 + ], + "score": 1.0, + "content": "rates are certainly possible for deterministic methods under various continuity assumptions; Tseng’s", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 642, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 642, + 169, + 657 + ], + "score": 1.0, + "content": "method obtains", + "type": "text" + }, + { + "bbox": [ + 169, + 644, + 206, + 656 + ], + "score": 0.93, + "content": "\\bar { \\mathcal { O } } ( K ^ { - 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 206, + 642, + 266, + 657 + ], + "score": 1.0, + "content": "rate (Monteiro", + "type": "text" + }, + { + "bbox": [ + 267, + 644, + 275, + 654 + ], + "score": 0.3, + "content": "\\&", + "type": "inline_equation" + }, + { + "bbox": [ + 276, + 642, + 506, + 657 + ], + "score": 1.0, + "content": "Svaiter, 2010) and the accelerated Halpern iteration under", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 653, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 222, + 668 + ], + "score": 1.0, + "content": "Lipschitz continuity obtains", + "type": "text" + }, + { + "bbox": [ + 222, + 655, + 259, + 667 + ], + "score": 0.93, + "content": "\\mathcal { O } ( K ^ { - 2 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 259, + 653, + 506, + 668 + ], + "score": 1.0, + "content": "rate (Diakonikolas, 2020). While our rate may seem slow, it", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 666, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 264, + 679 + ], + "score": 1.0, + "content": "is worth remembering that (1) features", + "type": "text" + }, + { + "bbox": [ + 264, + 668, + 271, + 676 + ], + "score": 0.77, + "content": "n", + "type": "inline_equation" + }, + { + "bbox": [ + 272, + 666, + 371, + 679 + ], + "score": 1.0, + "content": "discontinuous operators", + "type": "text" + }, + { + "bbox": [ + 372, + 666, + 383, + 677 + ], + "score": 0.89, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 384, + 666, + 506, + 679 + ], + "score": 1.0, + "content": ", so we expect rates at least as", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 677, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 506, + 690 + ], + "score": 1.0, + "content": "slow as nonsmooth convex optimization, but perhaps worse because (1) is far more general than", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "convex optimization. For a different error metric, the restricted gap function, in the special case of", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 106, + 699, + 506, + 711 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 506, + 711 + ], + "score": 1.0, + "content": "variational inequalities, faster rates have been established in Juditsky et al. (2011) and BΓΆhm et al.", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 397, + 722 + ], + "score": 1.0, + "content": "(2020). However, it is unclear how to relate the restricted gap function to", + "type": "text" + }, + { + "bbox": [ + 398, + 710, + 411, + 721 + ], + "score": 0.89, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 411, + 709, + 505, + 722 + ], + "score": 1.0, + "content": ", so these rates may not", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 721, + 258, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 721, + 258, + 732 + ], + "score": 1.0, + "content": "be directly comparable to Theorem 2.", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 43 + } + ], + "page_idx": 6, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 309, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 309, + 39 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 759 + ], + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 762 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 762 + ], + "score": 1.0, + "content": "7", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 81, + 282, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 284, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 284, + 95 + ], + "score": 1.0, + "content": "5 MAIN THEORETICAL RESULTS", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 105, + 505, + 139 + ], + "lines": [ + { + "bbox": [ + 106, + 105, + 506, + 119 + ], + "spans": [ + { + "bbox": [ + 106, + 105, + 197, + 119 + ], + "score": 1.0, + "content": "Theorem 1. Suppose", + "type": "text" + }, + { + "bbox": [ + 197, + 106, + 245, + 117 + ], + "score": 0.92, + "content": "A _ { 1 } , \\ldots , A _ { n }", + "type": "inline_equation" + }, + { + "bbox": [ + 245, + 105, + 345, + 119 + ], + "score": 1.0, + "content": "are maximal monotone,", + "type": "text" + }, + { + "bbox": [ + 346, + 106, + 355, + 115 + ], + "score": 0.7, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 356, + 105, + 366, + 119 + ], + "score": 1.0, + "content": "is", + "type": "text" + }, + { + "bbox": [ + 366, + 106, + 374, + 115 + ], + "score": 0.63, + "content": "L", + "type": "inline_equation" + }, + { + "bbox": [ + 375, + 105, + 506, + 119 + ], + "score": 1.0, + "content": "-Lipschitz and monotone, and a", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 117, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 106, + 117, + 250, + 129 + ], + "score": 1.0, + "content": "solution to (1) exists. For Algorithm", + "type": "text" + }, + { + "bbox": [ + 251, + 117, + 257, + 126 + ], + "score": 0.47, + "content": "I", + "type": "inline_equation" + }, + { + "bbox": [ + 257, + 117, + 505, + 129 + ], + "score": 1.0, + "content": ", suppose (9)-(12) hold. Then with probability one it holds that", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 124, + 457, + 142 + ], + "spans": [ + { + "bbox": [ + 107, + 127, + 142, + 137 + ], + "score": 0.92, + "content": "z ^ { k } \\to z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 142, + 124, + 173, + 142 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 173, + 128, + 184, + 137 + ], + "score": 0.85, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 184, + 124, + 349, + 142 + ], + "score": 1.0, + "content": "solves (1). Further, with probability one,", + "type": "text" + }, + { + "bbox": [ + 349, + 127, + 386, + 140 + ], + "score": 0.92, + "content": "x _ { i } ^ { k } \\to z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 386, + 124, + 402, + 142 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 402, + 128, + 452, + 139 + ], + "score": 0.92, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 453, + 124, + 457, + 142 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 2, + "bbox_fs": [ + 106, + 105, + 506, + 142 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 149, + 506, + 194 + ], + "lines": [ + { + "bbox": [ + 105, + 149, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 149, + 506, + 163 + ], + "score": 1.0, + "content": "Proof sketch Theorem 1 is proved in Appendix C, but we provide a brief sketch here. The proof", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 161, + 506, + 173 + ], + "spans": [ + { + "bbox": [ + 106, + 161, + 506, + 173 + ], + "score": 1.0, + "content": "begins by deriving a simple recursion inspired by the analysis of SGD (Robbins & Monro, 1951).", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 170, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 170, + 131, + 186 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 131, + 171, + 219, + 184 + ], + "score": 0.92, + "content": "p ^ { k + 1 } = p ^ { k } - \\alpha _ { k } \\nabla \\bar { \\varphi } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 219, + 170, + 506, + 186 + ], + "score": 1.0, + "content": ", a step of projective splitting can be viewed as GD applied to the affine", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 183, + 337, + 195 + ], + "spans": [ + { + "bbox": [ + 106, + 183, + 229, + 195 + ], + "score": 1.0, + "content": "hyperplane generator function", + "type": "text" + }, + { + "bbox": [ + 229, + 185, + 241, + 195 + ], + "score": 0.86, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 242, + 183, + 302, + 195 + ], + "score": 1.0, + "content": ". Thus, for any", + "type": "text" + }, + { + "bbox": [ + 302, + 183, + 332, + 194 + ], + "score": 0.92, + "content": "p ^ { * } \\in \\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 333, + 183, + 337, + 195 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 5.5, + "bbox_fs": [ + 105, + 149, + 506, + 195 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 163, + 195, + 440, + 227 + ], + "lines": [ + { + "bbox": [ + 163, + 195, + 440, + 227 + ], + "spans": [ + { + "bbox": [ + 163, + 195, + 440, + 227 + ], + "score": 0.9, + "content": "\\begin{array} { r l } & { \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } \\langle \\nabla \\varphi _ { k } , p ^ { k } - p ^ { * } \\rangle + \\alpha _ { k } ^ { 2 } \\| \\nabla \\varphi _ { k } \\| ^ { 2 } } \\\\ & { \\qquad = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } ( \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) ) + \\alpha _ { k } ^ { 2 } \\| \\nabla \\varphi _ { k } \\| ^ { 2 } } \\end{array}", + "type": "interline_equation", + "image_path": "e9ddae0d18b9ce62d16ecc0ec72b85c806b24ad288b9ad8f501dfc8a5fc344aa.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 163, + 195, + 440, + 205.66666666666666 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 163, + 205.66666666666666, + 440, + 216.33333333333331 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 163, + 216.33333333333331, + 440, + 226.99999999999997 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 227, + 505, + 250 + ], + "lines": [ + { + "bbox": [ + 105, + 226, + 506, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 226, + 300, + 241 + ], + "score": 1.0, + "content": "where in the second equation we have used that", + "type": "text" + }, + { + "bbox": [ + 300, + 228, + 325, + 240 + ], + "score": 0.92, + "content": "\\varphi _ { k } ( p )", + "type": "inline_equation" + }, + { + "bbox": [ + 326, + 226, + 373, + 241 + ], + "score": 1.0, + "content": "is affine on", + "type": "text" + }, + { + "bbox": [ + 374, + 228, + 382, + 238 + ], + "score": 0.83, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 383, + 226, + 506, + 241 + ], + "score": 1.0, + "content": ". The basic strategy is to show", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 238, + 191, + 251 + ], + "spans": [ + { + "bbox": [ + 106, + 238, + 157, + 251 + ], + "score": 1.0, + "content": "that, for any", + "type": "text" + }, + { + "bbox": [ + 157, + 239, + 187, + 250 + ], + "score": 0.92, + "content": "p ^ { * } \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 187, + 238, + 191, + 251 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 11.5, + "bbox_fs": [ + 105, + 226, + 506, + 251 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 213, + 251, + 398, + 266 + ], + "lines": [ + { + "bbox": [ + 213, + 251, + 398, + 266 + ], + "spans": [ + { + "bbox": [ + 213, + 251, + 398, + 266 + ], + "score": 0.92, + "content": "\\begin{array} { r } { \\mathbb { E } [ \\| \\nabla \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le C _ { 1 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + C _ { 2 } \\quad a . s . } \\end{array}", + "type": "interline_equation", + "image_path": "f4535f8ce716c78230c228f54c23983fe999b54cec031462a4223df15a3eec87.jpg" + } + ] + } + ], + "index": 13, + "virtual_lines": [ + { + "bbox": [ + 213, + 251, + 398, + 266 + ], + "spans": [], + "index": 13 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 267, + 505, + 323 + ], + "lines": [ + { + "bbox": [ + 105, + 266, + 505, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 145, + 280 + ], + "score": 1.0, + "content": "for some", + "type": "text" + }, + { + "bbox": [ + 145, + 267, + 194, + 278 + ], + "score": 0.92, + "content": "C _ { 1 } , C _ { 2 } > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 194, + 266, + 505, + 280 + ], + "score": 1.0, + "content": ". This condition allows one to establish stochastic quasi-FejΓ©r monotonicity", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 278, + 505, + 290 + ], + "spans": [ + { + "bbox": [ + 106, + 278, + 405, + 290 + ], + "score": 1.0, + "content": "(SQFM) (Combettes & Pesquet, 2015, Proposition 2.3) of the iterates to", + "type": "text" + }, + { + "bbox": [ + 406, + 278, + 414, + 288 + ], + "score": 0.79, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 414, + 278, + 505, + 290 + ], + "score": 1.0, + "content": ". One consequence of", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 288, + 505, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 288, + 366, + 303 + ], + "score": 1.0, + "content": "SQFM is that with probability one there exists a subsequence", + "type": "text" + }, + { + "bbox": [ + 367, + 290, + 378, + 300 + ], + "score": 0.86, + "content": "v _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 378, + 288, + 420, + 303 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 421, + 289, + 505, + 301 + ], + "score": 0.91, + "content": "\\varphi _ { v _ { k } } ( p ^ { v _ { k } } ) - \\varphi _ { v _ { k } } ( p ^ { * } )", + "type": "inline_equation" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 299, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 299, + 357, + 315 + ], + "score": 1.0, + "content": "converges to 0. Furthermore, roughly speaking, we show that", + "type": "text" + }, + { + "bbox": [ + 358, + 300, + 429, + 313 + ], + "score": 0.93, + "content": "\\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 429, + 299, + 506, + 315 + ], + "score": 1.0, + "content": "provides an upper", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 311, + 341, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 341, + 324 + ], + "score": 1.0, + "content": "bound on the following β€œapproximation residual\" for SPS:", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 16, + "bbox_fs": [ + 104, + 266, + 506, + 324 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 166, + 323, + 444, + 338 + ], + "lines": [ + { + "bbox": [ + 166, + 323, + 444, + 338 + ], + "spans": [ + { + "bbox": [ + 166, + 323, + 444, + 338 + ], + "score": 0.85, + "content": "\\begin{array} { r } { G _ { k } \\doteq \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "555c16bd22323c3486a9de0ea0a7f9817356db176ef7628602fe218a3e87fd0e.jpg" + } + ] + } + ], + "index": 19, + "virtual_lines": [ + { + "bbox": [ + 166, + 323, + 444, + 338 + ], + "spans": [], + "index": 19 + } + ] + }, + { + "type": "text", + "bbox": [ + 114, + 339, + 442, + 350 + ], + "lines": [ + { + "bbox": [ + 121, + 337, + 444, + 353 + ], + "spans": [ + { + "bbox": [ + 121, + 337, + 444, + 353 + ], + "score": 1.0, + "content": "provides an approximation error for SPS, as formalized in the following lemma:", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20, + "bbox_fs": [ + 121, + 337, + 444, + 353 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 351, + 403, + 365 + ], + "lines": [ + { + "bbox": [ + 105, + 348, + 401, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 191, + 367 + ], + "score": 1.0, + "content": "Lemma 1. For SPS,", + "type": "text" + }, + { + "bbox": [ + 192, + 351, + 314, + 365 + ], + "score": 0.92, + "content": "p ^ { k } = ( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 315, + 348, + 369, + 367 + ], + "score": 1.0, + "content": "if and only if", + "type": "text" + }, + { + "bbox": [ + 369, + 352, + 401, + 363 + ], + "score": 0.92, + "content": "G _ { k } = 0", + "type": "inline_equation" + } + ], + "index": 21 + } + ], + "index": 21, + "bbox_fs": [ + 105, + 348, + 401, + 367 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 366, + 503, + 402 + ], + "lines": [ + { + "bbox": [ + 105, + 364, + 506, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 133, + 379 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 133, + 365, + 190, + 378 + ], + "score": 0.93, + "content": "y _ { i } ^ { k } \\ \\in \\ A _ { i } ( x _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 190, + 364, + 208, + 379 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 208, + 366, + 246, + 377 + ], + "score": 0.88, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 247, + 364, + 283, + 379 + ], + "score": 1.0, + "content": ", having", + "type": "text" + }, + { + "bbox": [ + 283, + 366, + 320, + 377 + ], + "score": 0.9, + "content": "G _ { k } ~ = ~ 0", + "type": "inline_equation" + }, + { + "bbox": [ + 320, + 364, + 375, + 379 + ], + "score": 1.0, + "content": "implies that", + "type": "text" + }, + { + "bbox": [ + 375, + 365, + 415, + 378 + ], + "score": 0.87, + "content": "z ^ { k } = x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 415, + 364, + 420, + 379 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 420, + 365, + 461, + 378 + ], + "score": 0.88, + "content": "w _ { i } ^ { k } \\ = \\ y _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 462, + 364, + 506, + 379 + ], + "score": 1.0, + "content": ", and thus", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 107, + 371, + 505, + 398 + ], + "spans": [ + { + "bbox": [ + 107, + 379, + 162, + 392 + ], + "score": 0.93, + "content": "w _ { i } ^ { k } \\in A _ { i } ( z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 162, + 371, + 178, + 398 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 178, + 379, + 212, + 390 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 371, + 243, + 398 + ], + "score": 1.0, + "content": ". Since", + "type": "text" + }, + { + "bbox": [ + 243, + 378, + 306, + 392 + ], + "score": 0.93, + "content": "w _ { n + 1 } ^ { k } = B ( z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 307, + 371, + 325, + 398 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 326, + 377, + 386, + 392 + ], + "score": 0.94, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 386, + 377, + 449, + 392 + ], + "score": 1.0, + "content": ", it follows that", + "type": "text" + }, + { + "bbox": [ + 450, + 379, + 461, + 389 + ], + "score": 0.87, + "content": "z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 461, + 377, + 505, + 392 + ], + "score": 1.0, + "content": "solves (1).", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 390, + 297, + 402 + ], + "spans": [ + { + "bbox": [ + 106, + 390, + 284, + 402 + ], + "score": 1.0, + "content": "The reverse direction is proved in Appendix", + "type": "text" + }, + { + "bbox": [ + 284, + 392, + 292, + 400 + ], + "score": 0.34, + "content": "\\mathrm { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 293, + 390, + 297, + 402 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 23, + "bbox_fs": [ + 105, + 364, + 506, + 402 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 407, + 505, + 442 + ], + "lines": [ + { + "bbox": [ + 106, + 406, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 106, + 406, + 160, + 420 + ], + "score": 1.0, + "content": "The quantity", + "type": "text" + }, + { + "bbox": [ + 161, + 408, + 174, + 419 + ], + "score": 0.89, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 175, + 406, + 505, + 420 + ], + "score": 1.0, + "content": "generalizes the role played by the norm of the gradient in algorithms for smooth", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 417, + 505, + 431 + ], + "spans": [ + { + "bbox": [ + 106, + 417, + 324, + 431 + ], + "score": 1.0, + "content": "optimization. In particular, in the special case where", + "type": "text" + }, + { + "bbox": [ + 324, + 419, + 350, + 428 + ], + "score": 0.9, + "content": "n = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 351, + 417, + 369, + 431 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 369, + 418, + 432, + 430 + ], + "score": 0.94, + "content": "\\bar { B } ( z ) = \\nabla f ( z )", + "type": "inline_equation" + }, + { + "bbox": [ + 433, + 417, + 505, + 431 + ], + "score": 1.0, + "content": "for some smooth", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 428, + 295, + 442 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 173, + 442 + ], + "score": 1.0, + "content": "convex function", + "type": "text" + }, + { + "bbox": [ + 173, + 430, + 180, + 441 + ], + "score": 0.84, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 180, + 428, + 216, + 442 + ], + "score": 1.0, + "content": ", one has", + "type": "text" + }, + { + "bbox": [ + 217, + 429, + 289, + 442 + ], + "score": 0.96, + "content": "G _ { k } = \\| \\bar { \\nabla } f ( z ^ { k } ) \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 289, + 428, + 295, + 442 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 26, + "bbox_fs": [ + 105, + 406, + 505, + 442 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 446, + 504, + 469 + ], + "lines": [ + { + "bbox": [ + 106, + 446, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 446, + 220, + 459 + ], + "score": 1.0, + "content": "Combining the properties of", + "type": "text" + }, + { + "bbox": [ + 220, + 446, + 234, + 457 + ], + "score": 0.9, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 234, + 446, + 505, + 459 + ], + "score": 1.0, + "content": "with other results following from SQFM (such as boundedness) will", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 457, + 416, + 469 + ], + "spans": [ + { + "bbox": [ + 106, + 457, + 416, + 469 + ], + "score": 1.0, + "content": "allow us to derive almost-sure convergence of the iterates to a solution of (1).", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28.5, + "bbox_fs": [ + 106, + 446, + 505, + 469 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 479, + 504, + 502 + ], + "lines": [ + { + "bbox": [ + 105, + 478, + 505, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 505, + 493 + ], + "score": 1.0, + "content": "Convergence rate We can also establish non-asymptotic convergence rates for the approximation", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 488, + 160, + 505 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 140, + 505 + ], + "score": 1.0, + "content": "residual", + "type": "text" + }, + { + "bbox": [ + 141, + 491, + 154, + 502 + ], + "score": 0.89, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 155, + 488, + 160, + 505 + ], + "score": 1.0, + "content": ":", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 30.5, + "bbox_fs": [ + 105, + 478, + 505, + 505 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 504, + 374, + 516 + ], + "lines": [ + { + "bbox": [ + 105, + 502, + 373, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 251, + 517 + ], + "score": 1.0, + "content": "Theorem 2. Fix the total iterations", + "type": "text" + }, + { + "bbox": [ + 251, + 505, + 280, + 515 + ], + "score": 0.92, + "content": "K \\geq 1", + "type": "inline_equation" + }, + { + "bbox": [ + 280, + 502, + 373, + 517 + ], + "score": 1.0, + "content": "of Algorithm 1 and set", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 32, + "bbox_fs": [ + 105, + 502, + 373, + 517 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 161, + 516, + 450, + 538 + ], + "lines": [ + { + "bbox": [ + 161, + 516, + 450, + 538 + ], + "spans": [ + { + "bbox": [ + 161, + 516, + 450, + 538 + ], + "score": 0.92, + "content": "\\forall k = 1 , \\ldots , K : \\rho _ { k } = \\rho \\doteq \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , 1 / 2 L \\right\\} \\quad \\ a n d \\quad \\alpha _ { k } = C _ { f } \\rho ^ { 2 }", + "type": "interline_equation", + "image_path": "68ff0df76206fb41826b63420b4d14be9fa2c1fc4096f719e7f57855a358d996.jpg" + } + ] + } + ], + "index": 33, + "virtual_lines": [ + { + "bbox": [ + 161, + 516, + 450, + 538 + ], + "spans": [], + "index": 33 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 538, + 295, + 550 + ], + "lines": [ + { + "bbox": [ + 104, + 536, + 297, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 144, + 552 + ], + "score": 1.0, + "content": "for some", + "type": "text" + }, + { + "bbox": [ + 144, + 538, + 175, + 550 + ], + "score": 0.93, + "content": "C _ { f } > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 176, + 536, + 297, + 552 + ], + "score": 1.0, + "content": ". Suppose (9)-(11) hold. Then", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 34, + "bbox_fs": [ + 104, + 536, + 297, + 552 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 237, + 551, + 374, + 568 + ], + "lines": [ + { + "bbox": [ + 237, + 551, + 374, + 568 + ], + "spans": [ + { + "bbox": [ + 237, + 551, + 374, + 568 + ], + "score": 0.88, + "content": "\\begin{array} { r } { ( 1 / K ) { \\sum } _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] = \\mathcal { O } ( K ^ { - 1 / 4 } ) } \\end{array}", + "type": "interline_equation", + "image_path": "122c1b59b2769b397d0f512f754d8c65538b5ab3a8389fe57dcecc2cdee4c54a.jpg" + } + ] + } + ], + "index": 35, + "virtual_lines": [ + { + "bbox": [ + 237, + 551, + 374, + 568 + ], + "spans": [], + "index": 35 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 569, + 377, + 581 + ], + "lines": [ + { + "bbox": [ + 105, + 568, + 378, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 568, + 367, + 582 + ], + "score": 1.0, + "content": "where the constants are given (along with the proof) in Appendix", + "type": "text" + }, + { + "bbox": [ + 367, + 570, + 375, + 579 + ], + "score": 0.51, + "content": "E", + "type": "inline_equation" + }, + { + "bbox": [ + 375, + 568, + 378, + 582 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 36, + "bbox_fs": [ + 105, + 568, + 378, + 582 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 587, + 506, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 587, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 106, + 587, + 288, + 601 + ], + "score": 1.0, + "content": "Theorem 2 implies that if we pick an iterate", + "type": "text" + }, + { + "bbox": [ + 288, + 588, + 296, + 598 + ], + "score": 0.8, + "content": "J", + "type": "inline_equation" + }, + { + "bbox": [ + 296, + 587, + 406, + 601 + ], + "score": 1.0, + "content": "uniformly at random from", + "type": "text" + }, + { + "bbox": [ + 406, + 588, + 427, + 599 + ], + "score": 0.7, + "content": "1 . . K", + "type": "inline_equation" + }, + { + "bbox": [ + 428, + 587, + 505, + 601 + ], + "score": 1.0, + "content": ", then the expected", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 598, + 508, + 614 + ], + "spans": [ + { + "bbox": [ + 105, + 598, + 141, + 614 + ], + "score": 1.0, + "content": "value of", + "type": "text" + }, + { + "bbox": [ + 141, + 600, + 156, + 612 + ], + "score": 0.89, + "content": "G _ { J }", + "type": "inline_equation" + }, + { + "bbox": [ + 156, + 598, + 166, + 614 + ], + "score": 1.0, + "content": "is", + "type": "text" + }, + { + "bbox": [ + 167, + 599, + 211, + 612 + ], + "score": 0.93, + "content": "\\mathcal { O } ( K ^ { - 1 / 4 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 211, + 598, + 508, + 614 + ], + "score": 1.0, + "content": ". As far as we know, this is the first convergence rate for a stochastic full-", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 609, + 507, + 625 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 507, + 625 + ], + "score": 1.0, + "content": "splitting method solving (1) in the general discontinuous (i.e. set-valued) monotone inclusion case,", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 622, + 506, + 634 + ], + "spans": [ + { + "bbox": [ + 106, + 622, + 506, + 634 + ], + "score": 1.0, + "content": "and it is not clear whether it can be improved, either by a better analysis or a better method. Faster", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 633, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 633, + 505, + 645 + ], + "score": 1.0, + "content": "rates are certainly possible for deterministic methods under various continuity assumptions; Tseng’s", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 642, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 642, + 169, + 657 + ], + "score": 1.0, + "content": "method obtains", + "type": "text" + }, + { + "bbox": [ + 169, + 644, + 206, + 656 + ], + "score": 0.93, + "content": "\\bar { \\mathcal { O } } ( K ^ { - 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 206, + 642, + 266, + 657 + ], + "score": 1.0, + "content": "rate (Monteiro", + "type": "text" + }, + { + "bbox": [ + 267, + 644, + 275, + 654 + ], + "score": 0.3, + "content": "\\&", + "type": "inline_equation" + }, + { + "bbox": [ + 276, + 642, + 506, + 657 + ], + "score": 1.0, + "content": "Svaiter, 2010) and the accelerated Halpern iteration under", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 653, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 222, + 668 + ], + "score": 1.0, + "content": "Lipschitz continuity obtains", + "type": "text" + }, + { + "bbox": [ + 222, + 655, + 259, + 667 + ], + "score": 0.93, + "content": "\\mathcal { O } ( K ^ { - 2 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 259, + 653, + 506, + 668 + ], + "score": 1.0, + "content": "rate (Diakonikolas, 2020). While our rate may seem slow, it", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 666, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 264, + 679 + ], + "score": 1.0, + "content": "is worth remembering that (1) features", + "type": "text" + }, + { + "bbox": [ + 264, + 668, + 271, + 676 + ], + "score": 0.77, + "content": "n", + "type": "inline_equation" + }, + { + "bbox": [ + 272, + 666, + 371, + 679 + ], + "score": 1.0, + "content": "discontinuous operators", + "type": "text" + }, + { + "bbox": [ + 372, + 666, + 383, + 677 + ], + "score": 0.89, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 384, + 666, + 506, + 679 + ], + "score": 1.0, + "content": ", so we expect rates at least as", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 677, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 506, + 690 + ], + "score": 1.0, + "content": "slow as nonsmooth convex optimization, but perhaps worse because (1) is far more general than", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "convex optimization. For a different error metric, the restricted gap function, in the special case of", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 106, + 699, + 506, + 711 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 506, + 711 + ], + "score": 1.0, + "content": "variational inequalities, faster rates have been established in Juditsky et al. (2011) and BΓΆhm et al.", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 397, + 722 + ], + "score": 1.0, + "content": "(2020). However, it is unclear how to relate the restricted gap function to", + "type": "text" + }, + { + "bbox": [ + 398, + 710, + 411, + 721 + ], + "score": 0.89, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 411, + 709, + 505, + 722 + ], + "score": 1.0, + "content": ", so these rates may not", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 721, + 258, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 721, + 258, + 732 + ], + "score": 1.0, + "content": "be directly comparable to Theorem 2.", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 43, + "bbox_fs": [ + 105, + 587, + 508, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 108, + 81, + 210, + 93 + ], + "lines": [ + { + "bbox": [ + 105, + 79, + 213, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 213, + 96 + ], + "score": 1.0, + "content": "6 RELATED WORK", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 108, + 505, + 272 + ], + "lines": [ + { + "bbox": [ + 106, + 108, + 505, + 120 + ], + "spans": [ + { + "bbox": [ + 106, + 108, + 505, + 120 + ], + "score": 1.0, + "content": "Arguably the three most popular classes of operator splitting algorithms are forward-backward", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 119, + 507, + 132 + ], + "spans": [ + { + "bbox": [ + 105, + 119, + 507, + 132 + ], + "score": 1.0, + "content": "splitting (FB) (Combettes & Pesquet, 2011), Douglas-Rachford splitting (DR) (Lions & Mercier,", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 128, + 506, + 143 + ], + "spans": [ + { + "bbox": [ + 105, + 128, + 506, + 143 + ], + "score": 1.0, + "content": "1979), and Tseng’s method (Tseng, 2000). The extragradient method (EG) is similar to Tseng’s", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 141, + 505, + 154 + ], + "spans": [ + { + "bbox": [ + 105, + 141, + 505, + 154 + ], + "score": 1.0, + "content": "method, but has more projection steps per iteration and only applies to variational inequalities", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 151, + 506, + 165 + ], + "spans": [ + { + "bbox": [ + 105, + 151, + 506, + 165 + ], + "score": 1.0, + "content": "(Korpelevich, 1977; Nemirovski, 2004; Li et al., 2021). The popular Alternating Direction Method", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 162, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 506, + 176 + ], + "score": 1.0, + "content": "of Multipliers (ADMM), in its standard form, is a dual application of DR (Gabay, 1983). The", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 174, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 174, + 439, + 186 + ], + "score": 1.0, + "content": "three-operator splitting method (Davis & Yin, 2017) can only be applied to (1) if", + "type": "text" + }, + { + "bbox": [ + 439, + 174, + 448, + 183 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 174, + 505, + 186 + ], + "score": 1.0, + "content": "is cocoercive", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 184, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 184, + 505, + 198 + ], + "score": 1.0, + "content": "rather than merely Lipchitz, and thus its usefulness is mostly limited to optimization applications and", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 196, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 106, + 196, + 505, + 208 + ], + "score": 1.0, + "content": "not games. FB, DR, and Tseng’s method apply to monotone inclusions involving two operators, with", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 207, + 506, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 207, + 506, + 220 + ], + "score": 1.0, + "content": "varying assumptions on one of the operators. It is possible to derive splitting methods for the more", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 218, + 506, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 218, + 506, + 231 + ], + "score": 1.0, + "content": "complicated inclusion (1), involving more than two operators, by applying an appropriate 2-operator", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 228, + 506, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 506, + 241 + ], + "score": 1.0, + "content": "splitting method such as Tseng’s method to a product-space reformulation (PSR) (BriceΓ±o-Arias &", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 238, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 238, + 506, + 253 + ], + "score": 1.0, + "content": "Combettes, 2011; Combettes & Pesquet, 2012) (for more on PSR, see Appendix F). The recently", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 250, + 505, + 263 + ], + "spans": [ + { + "bbox": [ + 105, + 250, + 505, + 263 + ], + "score": 1.0, + "content": "developed forward-reflected-backward (FRB) method (Malitsky & Tam, 2020) can be used in the", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 261, + 503, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 503, + 274 + ], + "score": 1.0, + "content": "same way. However, there are several disadvantages to using a PSR, as discussed in Appendix F.7.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 8 + }, + { + "type": "text", + "bbox": [ + 107, + 277, + 505, + 344 + ], + "lines": [ + { + "bbox": [ + 105, + 277, + 505, + 290 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 505, + 290 + ], + "score": 1.0, + "content": "By using a PSR, the stochastic methods of Alacaoglu et al. (2021) and BΓΆhm et al. (2020) can be", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 288, + 506, + 302 + ], + "spans": [ + { + "bbox": [ + 105, + 288, + 241, + 302 + ], + "score": 1.0, + "content": "applied to (1) in the case that each", + "type": "text" + }, + { + "bbox": [ + 241, + 290, + 253, + 300 + ], + "score": 0.89, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 253, + 288, + 506, + 302 + ], + "score": 1.0, + "content": "is a subdifferential. Both of these methods are analyzed in terms", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 299, + 505, + 313 + ], + "spans": [ + { + "bbox": [ + 105, + 299, + 505, + 313 + ], + "score": 1.0, + "content": "of the restricted gap function. This merit function has a drawback compared with our approximation", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 311, + 505, + 323 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 505, + 323 + ], + "score": 1.0, + "content": "residual in that it requires one to find a bound for the iterates. However, Alacaoglu et al. (2021) and", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 321, + 505, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 505, + 335 + ], + "score": 1.0, + "content": "BΓΆhm et al. (2020) do not provide such a bound, meaning that their convergence rate results are", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 333, + 349, + 345 + ], + "spans": [ + { + "bbox": [ + 105, + 333, + 349, + 345 + ], + "score": 1.0, + "content": "somewhat incomplete. We discuss this issue in Appendix G.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 18.5 + }, + { + "type": "text", + "bbox": [ + 107, + 350, + 505, + 427 + ], + "lines": [ + { + "bbox": [ + 105, + 350, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 505, + 362 + ], + "score": 1.0, + "content": "Theoretical convergence of the method of BΓΆhm et al. (2020) requires the use of averaging, since", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 358, + 506, + 375 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 506, + 375 + ], + "score": 1.0, + "content": "the final iterate does not converge for certain problems (Hsieh et al., 2020). Empirically, averaging", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 371, + 505, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 505, + 385 + ], + "score": 1.0, + "content": "tends to be slow and to destroy regularizer-induced structural properties such as sparsity or low", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 382, + 507, + 396 + ], + "spans": [ + { + "bbox": [ + 105, + 382, + 507, + 396 + ], + "score": 1.0, + "content": "matrix rank, so its utility is largely theoretical and it is usually avoided in practice. Furthermore,", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 394, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 394, + 506, + 406 + ], + "score": 1.0, + "content": "averaging loses even its theoretical benefits for nonconvex problems, so its use in such cases is rarer", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 404, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 404, + 506, + 417 + ], + "score": 1.0, + "content": "still. Another drawback of the analysis of BΓΆhm et al. (2020) is that, unlike in SPS, the resolvent", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 416, + 270, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 270, + 428 + ], + "score": 1.0, + "content": "(proximal) stepsizes also need to vanish.", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 25 + }, + { + "type": "text", + "bbox": [ + 107, + 432, + 505, + 498 + ], + "lines": [ + { + "bbox": [ + 106, + 432, + 505, + 445 + ], + "spans": [ + { + "bbox": [ + 106, + 432, + 505, + 445 + ], + "score": 1.0, + "content": "The method of Alacaoglu et al. (2021) applies variance reduction techniques to FRB. It only applies", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 443, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 506, + 456 + ], + "score": 1.0, + "content": "to finite-sum problems and requires the periodic computation of a full batch gradient, making it", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 455, + 505, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 455, + 505, + 467 + ], + "score": 1.0, + "content": "somewhat less flexible and scalable than our method. On the other hand, it has an accelerated ergodic", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 465, + 505, + 478 + ], + "spans": [ + { + "bbox": [ + 105, + 465, + 505, + 478 + ], + "score": 1.0, + "content": "rate for the restricted gap function in the variational inequality setting. We compare the empirical", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 476, + 505, + 489 + ], + "spans": [ + { + "bbox": [ + 105, + 476, + 505, + 489 + ], + "score": 1.0, + "content": "performance of SPS with Alacaoglu et al. (2021), BΓΆhm et al. (2020), and several deterministic", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 488, + 399, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 399, + 499 + ], + "score": 1.0, + "content": "methods using PSR in the numerical experiments described in Section 7.", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 31.5 + }, + { + "type": "text", + "bbox": [ + 108, + 504, + 316, + 515 + ], + "lines": [ + { + "bbox": [ + 106, + 504, + 317, + 517 + ], + "spans": [ + { + "bbox": [ + 106, + 504, + 317, + 517 + ], + "score": 1.0, + "content": "Additional related work is discussed in Appendix B.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 35 + }, + { + "type": "title", + "bbox": [ + 108, + 534, + 200, + 547 + ], + "lines": [ + { + "bbox": [ + 105, + 532, + 202, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 532, + 202, + 550 + ], + "score": 1.0, + "content": "7 EXPERIMENTS", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 36 + }, + { + "type": "text", + "bbox": [ + 107, + 561, + 505, + 660 + ], + "lines": [ + { + "bbox": [ + 106, + 561, + 505, + 573 + ], + "spans": [ + { + "bbox": [ + 106, + 561, + 505, + 573 + ], + "score": 1.0, + "content": "We now present some numerical results on distributionally robust supervised learning (DRSL)", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 572, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 106, + 572, + 506, + 584 + ], + "score": 1.0, + "content": "problems. We follow the approach of Yu et al. (2021), which introduced a min-max formulation of", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 582, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 506, + 596 + ], + "score": 1.0, + "content": "Wasserstein DRSL. While other approaches reduce the problem to convex optimization, Yu et al.", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 594, + 506, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 506, + 607 + ], + "score": 1.0, + "content": "(2021) reduce it to a finite-dimensional min-max problem amenable to the use of stochastic methods", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 605, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 106, + 605, + 505, + 617 + ], + "score": 1.0, + "content": "on large datasets. However, unlike our proposed SPS method, the variance-reduced extragradient", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 615, + 506, + 630 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 506, + 630 + ], + "score": 1.0, + "content": "method that Yu et al. (2021) propose cannot handle multiple nonsmooth regularizers or constraints on", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 626, + 505, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 505, + 640 + ], + "score": 1.0, + "content": "the model parameters. Consequently, we consider distributionally robust sparse logistic regression", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 637, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 494, + 651 + ], + "score": 1.0, + "content": "(DRSLR), a problem class equivalent to that considered in Yu et al. (2021), but with an added", + "type": "text" + }, + { + "bbox": [ + 495, + 638, + 505, + 649 + ], + "score": 0.86, + "content": "\\ell _ { 1 }", + "type": "inline_equation" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 649, + 493, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 649, + 493, + 662 + ], + "score": 1.0, + "content": "regularizer, a standard tool to induce sparsity. See the Appendix I for the full problem definition.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 41 + }, + { + "type": "text", + "bbox": [ + 107, + 666, + 505, + 731 + ], + "lines": [ + { + "bbox": [ + 106, + 665, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 106, + 665, + 505, + 678 + ], + "score": 1.0, + "content": "We compared our SPS method to several methods for solving DRSLR for a collection of real datasets", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 104, + 674, + 508, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 674, + 423, + 691 + ], + "score": 1.0, + "content": "from the LIBSVM repository (Chang & Lin, 2011). We implemented SPS with", + "type": "text" + }, + { + "bbox": [ + 423, + 676, + 487, + 688 + ], + "score": 0.93, + "content": "\\alpha _ { k } = C _ { d } k ^ { - 0 . 5 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 487, + 674, + 508, + 691 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 107, + 684, + 508, + 703 + ], + "spans": [ + { + "bbox": [ + 107, + 687, + 169, + 699 + ], + "score": 0.93, + "content": "\\rho _ { k } = C _ { d } k ^ { - 0 . 2 5 }", + "type": "inline_equation" + }, + { + "bbox": [ + 169, + 684, + 508, + 703 + ], + "score": 1.0, + "content": "and called it SPS-decay. We also implement SPS with the fixed stepsize given in (15)", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "score": 1.0, + "content": "and called it SPS-fixed. We compared the method to deterministic projective splitting (Johnstone &", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 709, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 506, + 722 + ], + "score": 1.0, + "content": "Eckstein, 2020b) and the following methods based on PSR: Tseng’s method (Tseng, 2000; Combettes", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 720, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 505, + 732 + ], + "score": 1.0, + "content": "& Pesquet, 2012), the forward-reflected-backward (FRB) method (Malitsky & Tam, 2020), the", + "type": "text" + } + ], + "index": 51 + } + ], + "index": 48.5 + } + ], + "page_idx": 7, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 752, + 308, + 759 + ], + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 761 + ], + "score": 1.0, + "content": "8", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 108, + 81, + 210, + 93 + ], + "lines": [ + { + "bbox": [ + 105, + 79, + 213, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 213, + 96 + ], + "score": 1.0, + "content": "6 RELATED WORK", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 108, + 505, + 272 + ], + "lines": [ + { + "bbox": [ + 106, + 108, + 505, + 120 + ], + "spans": [ + { + "bbox": [ + 106, + 108, + 505, + 120 + ], + "score": 1.0, + "content": "Arguably the three most popular classes of operator splitting algorithms are forward-backward", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 119, + 507, + 132 + ], + "spans": [ + { + "bbox": [ + 105, + 119, + 507, + 132 + ], + "score": 1.0, + "content": "splitting (FB) (Combettes & Pesquet, 2011), Douglas-Rachford splitting (DR) (Lions & Mercier,", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 128, + 506, + 143 + ], + "spans": [ + { + "bbox": [ + 105, + 128, + 506, + 143 + ], + "score": 1.0, + "content": "1979), and Tseng’s method (Tseng, 2000). The extragradient method (EG) is similar to Tseng’s", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 141, + 505, + 154 + ], + "spans": [ + { + "bbox": [ + 105, + 141, + 505, + 154 + ], + "score": 1.0, + "content": "method, but has more projection steps per iteration and only applies to variational inequalities", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 151, + 506, + 165 + ], + "spans": [ + { + "bbox": [ + 105, + 151, + 506, + 165 + ], + "score": 1.0, + "content": "(Korpelevich, 1977; Nemirovski, 2004; Li et al., 2021). The popular Alternating Direction Method", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 162, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 506, + 176 + ], + "score": 1.0, + "content": "of Multipliers (ADMM), in its standard form, is a dual application of DR (Gabay, 1983). The", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 174, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 174, + 439, + 186 + ], + "score": 1.0, + "content": "three-operator splitting method (Davis & Yin, 2017) can only be applied to (1) if", + "type": "text" + }, + { + "bbox": [ + 439, + 174, + 448, + 183 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 174, + 505, + 186 + ], + "score": 1.0, + "content": "is cocoercive", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 184, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 184, + 505, + 198 + ], + "score": 1.0, + "content": "rather than merely Lipchitz, and thus its usefulness is mostly limited to optimization applications and", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 196, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 106, + 196, + 505, + 208 + ], + "score": 1.0, + "content": "not games. FB, DR, and Tseng’s method apply to monotone inclusions involving two operators, with", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 207, + 506, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 207, + 506, + 220 + ], + "score": 1.0, + "content": "varying assumptions on one of the operators. It is possible to derive splitting methods for the more", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 218, + 506, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 218, + 506, + 231 + ], + "score": 1.0, + "content": "complicated inclusion (1), involving more than two operators, by applying an appropriate 2-operator", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 228, + 506, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 506, + 241 + ], + "score": 1.0, + "content": "splitting method such as Tseng’s method to a product-space reformulation (PSR) (BriceΓ±o-Arias &", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 238, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 238, + 506, + 253 + ], + "score": 1.0, + "content": "Combettes, 2011; Combettes & Pesquet, 2012) (for more on PSR, see Appendix F). The recently", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 250, + 505, + 263 + ], + "spans": [ + { + "bbox": [ + 105, + 250, + 505, + 263 + ], + "score": 1.0, + "content": "developed forward-reflected-backward (FRB) method (Malitsky & Tam, 2020) can be used in the", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 261, + 503, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 503, + 274 + ], + "score": 1.0, + "content": "same way. However, there are several disadvantages to using a PSR, as discussed in Appendix F.7.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 8, + "bbox_fs": [ + 105, + 108, + 507, + 274 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 277, + 505, + 344 + ], + "lines": [ + { + "bbox": [ + 105, + 277, + 505, + 290 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 505, + 290 + ], + "score": 1.0, + "content": "By using a PSR, the stochastic methods of Alacaoglu et al. (2021) and BΓΆhm et al. (2020) can be", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 288, + 506, + 302 + ], + "spans": [ + { + "bbox": [ + 105, + 288, + 241, + 302 + ], + "score": 1.0, + "content": "applied to (1) in the case that each", + "type": "text" + }, + { + "bbox": [ + 241, + 290, + 253, + 300 + ], + "score": 0.89, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 253, + 288, + 506, + 302 + ], + "score": 1.0, + "content": "is a subdifferential. Both of these methods are analyzed in terms", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 299, + 505, + 313 + ], + "spans": [ + { + "bbox": [ + 105, + 299, + 505, + 313 + ], + "score": 1.0, + "content": "of the restricted gap function. This merit function has a drawback compared with our approximation", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 311, + 505, + 323 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 505, + 323 + ], + "score": 1.0, + "content": "residual in that it requires one to find a bound for the iterates. However, Alacaoglu et al. (2021) and", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 321, + 505, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 505, + 335 + ], + "score": 1.0, + "content": "BΓΆhm et al. (2020) do not provide such a bound, meaning that their convergence rate results are", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 333, + 349, + 345 + ], + "spans": [ + { + "bbox": [ + 105, + 333, + 349, + 345 + ], + "score": 1.0, + "content": "somewhat incomplete. We discuss this issue in Appendix G.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 18.5, + "bbox_fs": [ + 105, + 277, + 506, + 345 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 350, + 505, + 427 + ], + "lines": [ + { + "bbox": [ + 105, + 350, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 505, + 362 + ], + "score": 1.0, + "content": "Theoretical convergence of the method of BΓΆhm et al. (2020) requires the use of averaging, since", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 358, + 506, + 375 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 506, + 375 + ], + "score": 1.0, + "content": "the final iterate does not converge for certain problems (Hsieh et al., 2020). Empirically, averaging", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 371, + 505, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 505, + 385 + ], + "score": 1.0, + "content": "tends to be slow and to destroy regularizer-induced structural properties such as sparsity or low", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 382, + 507, + 396 + ], + "spans": [ + { + "bbox": [ + 105, + 382, + 507, + 396 + ], + "score": 1.0, + "content": "matrix rank, so its utility is largely theoretical and it is usually avoided in practice. Furthermore,", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 394, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 394, + 506, + 406 + ], + "score": 1.0, + "content": "averaging loses even its theoretical benefits for nonconvex problems, so its use in such cases is rarer", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 404, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 404, + 506, + 417 + ], + "score": 1.0, + "content": "still. Another drawback of the analysis of BΓΆhm et al. (2020) is that, unlike in SPS, the resolvent", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 416, + 270, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 270, + 428 + ], + "score": 1.0, + "content": "(proximal) stepsizes also need to vanish.", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 25, + "bbox_fs": [ + 104, + 350, + 507, + 428 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 432, + 505, + 498 + ], + "lines": [ + { + "bbox": [ + 106, + 432, + 505, + 445 + ], + "spans": [ + { + "bbox": [ + 106, + 432, + 505, + 445 + ], + "score": 1.0, + "content": "The method of Alacaoglu et al. (2021) applies variance reduction techniques to FRB. It only applies", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 443, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 506, + 456 + ], + "score": 1.0, + "content": "to finite-sum problems and requires the periodic computation of a full batch gradient, making it", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 455, + 505, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 455, + 505, + 467 + ], + "score": 1.0, + "content": "somewhat less flexible and scalable than our method. On the other hand, it has an accelerated ergodic", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 465, + 505, + 478 + ], + "spans": [ + { + "bbox": [ + 105, + 465, + 505, + 478 + ], + "score": 1.0, + "content": "rate for the restricted gap function in the variational inequality setting. We compare the empirical", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 476, + 505, + 489 + ], + "spans": [ + { + "bbox": [ + 105, + 476, + 505, + 489 + ], + "score": 1.0, + "content": "performance of SPS with Alacaoglu et al. (2021), BΓΆhm et al. (2020), and several deterministic", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 488, + 399, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 399, + 499 + ], + "score": 1.0, + "content": "methods using PSR in the numerical experiments described in Section 7.", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 31.5, + "bbox_fs": [ + 105, + 432, + 506, + 499 + ] + }, + { + "type": "text", + "bbox": [ + 108, + 504, + 316, + 515 + ], + "lines": [ + { + "bbox": [ + 106, + 504, + 317, + 517 + ], + "spans": [ + { + "bbox": [ + 106, + 504, + 317, + 517 + ], + "score": 1.0, + "content": "Additional related work is discussed in Appendix B.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 35, + "bbox_fs": [ + 106, + 504, + 317, + 517 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 534, + 200, + 547 + ], + "lines": [ + { + "bbox": [ + 105, + 532, + 202, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 532, + 202, + 550 + ], + "score": 1.0, + "content": "7 EXPERIMENTS", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 36 + }, + { + "type": "text", + "bbox": [ + 107, + 561, + 505, + 660 + ], + "lines": [ + { + "bbox": [ + 106, + 561, + 505, + 573 + ], + "spans": [ + { + "bbox": [ + 106, + 561, + 505, + 573 + ], + "score": 1.0, + "content": "We now present some numerical results on distributionally robust supervised learning (DRSL)", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 572, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 106, + 572, + 506, + 584 + ], + "score": 1.0, + "content": "problems. We follow the approach of Yu et al. (2021), which introduced a min-max formulation of", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 582, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 506, + 596 + ], + "score": 1.0, + "content": "Wasserstein DRSL. While other approaches reduce the problem to convex optimization, Yu et al.", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 594, + 506, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 506, + 607 + ], + "score": 1.0, + "content": "(2021) reduce it to a finite-dimensional min-max problem amenable to the use of stochastic methods", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 605, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 106, + 605, + 505, + 617 + ], + "score": 1.0, + "content": "on large datasets. However, unlike our proposed SPS method, the variance-reduced extragradient", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 615, + 506, + 630 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 506, + 630 + ], + "score": 1.0, + "content": "method that Yu et al. (2021) propose cannot handle multiple nonsmooth regularizers or constraints on", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 626, + 505, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 505, + 640 + ], + "score": 1.0, + "content": "the model parameters. Consequently, we consider distributionally robust sparse logistic regression", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 637, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 494, + 651 + ], + "score": 1.0, + "content": "(DRSLR), a problem class equivalent to that considered in Yu et al. (2021), but with an added", + "type": "text" + }, + { + "bbox": [ + 495, + 638, + 505, + 649 + ], + "score": 0.86, + "content": "\\ell _ { 1 }", + "type": "inline_equation" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 649, + 493, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 649, + 493, + 662 + ], + "score": 1.0, + "content": "regularizer, a standard tool to induce sparsity. See the Appendix I for the full problem definition.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 41, + "bbox_fs": [ + 105, + 561, + 506, + 662 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 666, + 505, + 731 + ], + "lines": [ + { + "bbox": [ + 106, + 665, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 106, + 665, + 505, + 678 + ], + "score": 1.0, + "content": "We compared our SPS method to several methods for solving DRSLR for a collection of real datasets", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 104, + 674, + 508, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 674, + 423, + 691 + ], + "score": 1.0, + "content": "from the LIBSVM repository (Chang & Lin, 2011). We implemented SPS with", + "type": "text" + }, + { + "bbox": [ + 423, + 676, + 487, + 688 + ], + "score": 0.93, + "content": "\\alpha _ { k } = C _ { d } k ^ { - 0 . 5 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 487, + 674, + 508, + 691 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 107, + 684, + 508, + 703 + ], + "spans": [ + { + "bbox": [ + 107, + 687, + 169, + 699 + ], + "score": 0.93, + "content": "\\rho _ { k } = C _ { d } k ^ { - 0 . 2 5 }", + "type": "inline_equation" + }, + { + "bbox": [ + 169, + 684, + 508, + 703 + ], + "score": 1.0, + "content": "and called it SPS-decay. We also implement SPS with the fixed stepsize given in (15)", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "score": 1.0, + "content": "and called it SPS-fixed. We compared the method to deterministic projective splitting (Johnstone &", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 709, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 506, + 722 + ], + "score": 1.0, + "content": "Eckstein, 2020b) and the following methods based on PSR: Tseng’s method (Tseng, 2000; Combettes", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 720, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 505, + 732 + ], + "score": 1.0, + "content": "& Pesquet, 2012), the forward-reflected-backward (FRB) method (Malitsky & Tam, 2020), the", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 106, + 279, + 505, + 291 + ], + "spans": [ + { + "bbox": [ + 106, + 279, + 505, + 291 + ], + "score": 1.0, + "content": "stochastic Tseng (S-Tseng) method of BΓΆhm et al. (2020), and the variance-reduced stochastic FRB", + "type": "text", + "cross_page": true + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 290, + 505, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 505, + 303 + ], + "score": 1.0, + "content": "method (Alacaoglu et al., 2021), abbreviated FRB-VR. The S-Tseng and FRB-VR algorithms appear", + "type": "text", + "cross_page": true + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 301, + 492, + 313 + ], + "spans": [ + { + "bbox": [ + 106, + 301, + 492, + 313 + ], + "score": 1.0, + "content": "to be the only stochastic splitting methods other than SPS applicable to the tested problem class.", + "type": "text", + "cross_page": true + } + ], + "index": 11 + } + ], + "index": 48.5, + "bbox_fs": [ + 104, + 665, + 508, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 82, + 501, + 177 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 109, + 82, + 501, + 177 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 109, + 82, + 501, + 177 + ], + "spans": [ + { + "bbox": [ + 109, + 82, + 501, + 177 + ], + "score": 0.969, + "type": "image", + "image_path": "5a3352bf1be8622af7e8437ef0309b7c8e82b213e948eae1d016add4ab16fe33.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 109, + 82, + 501, + 113.66666666666667 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 109, + 113.66666666666667, + 501, + 145.33333333333334 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 109, + 145.33333333333334, + 501, + 177.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 186, + 506, + 253 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 506, + 199 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 506, + 199 + ], + "score": 1.0, + "content": "Figure 1: Approximation residual versus running time for three LIBSVM benchmark datasets,", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 198, + 505, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 198, + 505, + 210 + ], + "score": 1.0, + "content": "with the markers at 10-iteration intervals. Left: epsilon, middle: SUSY, right: real-sim. For the", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 208, + 505, + 221 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 505, + 221 + ], + "score": 1.0, + "content": "stochastic algorithms (SPS, S-Tseng, and FRB-VR), we plot the median results over 10 trials, with", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 218, + 506, + 234 + ], + "spans": [ + { + "bbox": [ + 105, + 218, + 506, + 234 + ], + "score": 1.0, + "content": "unit standard deviation horizontal error bars for the running time and the vertical error bars displaying", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 229, + 506, + 245 + ], + "spans": [ + { + "bbox": [ + 104, + 229, + 506, + 245 + ], + "score": 1.0, + "content": "the min-to-max range of the approximation residual. The code is provided in the supplementary", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 241, + 144, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 144, + 253 + ], + "score": 1.0, + "content": "material.", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 5.5 + } + ], + "index": 3.25 + }, + { + "type": "text", + "bbox": [ + 108, + 279, + 503, + 312 + ], + "lines": [ + { + "bbox": [ + 106, + 279, + 505, + 291 + ], + "spans": [ + { + "bbox": [ + 106, + 279, + 505, + 291 + ], + "score": 1.0, + "content": "stochastic Tseng (S-Tseng) method of BΓΆhm et al. (2020), and the variance-reduced stochastic FRB", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 290, + 505, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 505, + 303 + ], + "score": 1.0, + "content": "method (Alacaoglu et al., 2021), abbreviated FRB-VR. The S-Tseng and FRB-VR algorithms appear", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 301, + 492, + 313 + ], + "spans": [ + { + "bbox": [ + 106, + 301, + 492, + 313 + ], + "score": 1.0, + "content": "to be the only stochastic splitting methods other than SPS applicable to the tested problem class.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 10 + }, + { + "type": "text", + "bbox": [ + 106, + 317, + 506, + 351 + ], + "lines": [ + { + "bbox": [ + 105, + 317, + 506, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 396, + 330 + ], + "score": 1.0, + "content": "Figure 1 show results for three LIBSVM standard datasets: epsilon2", + "type": "text" + }, + { + "bbox": [ + 397, + 317, + 451, + 328 + ], + "score": 0.86, + "content": "m = 4 \\cdot 1 0 ^ { 5 }", + "type": "inline_equation" + }, + { + "bbox": [ + 451, + 317, + 456, + 330 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 456, + 318, + 500, + 329 + ], + "score": 0.84, + "content": "d = 2 0 0 0 \\mathrm { \\Omega }", + "type": "inline_equation" + }, + { + "bbox": [ + 501, + 317, + 506, + 330 + ], + "score": 1.0, + "content": "),", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 327, + 507, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 298, + 342 + ], + "score": 1.0, + "content": "SUSY (Baldi et al., 2014; Dua & Graff, 2017)", + "type": "text" + }, + { + "bbox": [ + 298, + 329, + 348, + 340 + ], + "score": 0.86, + "content": "m = 2 \\cdot 1 0 ^ { 6 }", + "type": "inline_equation" + }, + { + "bbox": [ + 349, + 327, + 352, + 342 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 353, + 329, + 383, + 339 + ], + "score": 0.85, + "content": "d = 1 8", + "type": "inline_equation" + }, + { + "bbox": [ + 384, + 327, + 451, + 342 + ], + "score": 1.0, + "content": "), and real-sim3 (", + "type": "text" + }, + { + "bbox": [ + 451, + 329, + 502, + 340 + ], + "score": 0.82, + "content": "m = 7 2 { , } 3 0 9", + "type": "inline_equation" + }, + { + "bbox": [ + 503, + 327, + 507, + 342 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 339, + 162, + 352 + ], + "spans": [ + { + "bbox": [ + 107, + 340, + 155, + 351 + ], + "score": 0.85, + "content": "d = 2 0 { , } 9 5 8 _ { , }", + "type": "inline_equation" + }, + { + "bbox": [ + 155, + 339, + 162, + 352 + ], + "score": 1.0, + "content": ").", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13 + }, + { + "type": "text", + "bbox": [ + 106, + 357, + 505, + 423 + ], + "lines": [ + { + "bbox": [ + 106, + 356, + 505, + 369 + ], + "spans": [ + { + "bbox": [ + 106, + 356, + 446, + 369 + ], + "score": 1.0, + "content": "To measure the progress of the algorithms, we used the β€œapproximation residual”", + "type": "text" + }, + { + "bbox": [ + 446, + 357, + 460, + 368 + ], + "score": 0.89, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 460, + 356, + 505, + 369 + ], + "score": 1.0, + "content": "defined in", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 367, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 196, + 381 + ], + "score": 1.0, + "content": "Appendix F. As with", + "type": "text" + }, + { + "bbox": [ + 197, + 368, + 210, + 379 + ], + "score": 0.88, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 211, + 367, + 246, + 381 + ], + "score": 1.0, + "content": ", having", + "type": "text" + }, + { + "bbox": [ + 246, + 368, + 282, + 379 + ], + "score": 0.92, + "content": "R _ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 282, + 367, + 335, + 381 + ], + "score": 1.0, + "content": "implies that", + "type": "text" + }, + { + "bbox": [ + 335, + 367, + 347, + 378 + ], + "score": 0.87, + "content": "z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 347, + 367, + 430, + 381 + ], + "score": 1.0, + "content": "solves (1). We use", + "type": "text" + }, + { + "bbox": [ + 430, + 369, + 444, + 379 + ], + "score": 0.89, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 444, + 367, + 489, + 381 + ], + "score": 1.0, + "content": "instead of", + "type": "text" + }, + { + "bbox": [ + 490, + 368, + 504, + 379 + ], + "score": 0.88, + "content": "G _ { k }", + "type": "inline_equation" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 379, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 505, + 392 + ], + "score": 1.0, + "content": "because it is also possible to compute essentially the same measure of convergence from the iterates", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 389, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 505, + 402 + ], + "score": 1.0, + "content": "of the other tested algorithms, establishing a fair comparison. Appendix F provides the details of the", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 399, + 506, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 456, + 414 + ], + "score": 1.0, + "content": "derivation of the residual measure for each algorithm, explores the relationship between", + "type": "text" + }, + { + "bbox": [ + 457, + 401, + 470, + 412 + ], + "score": 0.89, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 471, + 399, + 488, + 414 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 489, + 401, + 502, + 412 + ], + "score": 0.88, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 502, + 399, + 506, + 414 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 412, + 298, + 424 + ], + "spans": [ + { + "bbox": [ + 106, + 412, + 298, + 424 + ], + "score": 1.0, + "content": "and provides additional implementation details.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 17.5 + }, + { + "type": "text", + "bbox": [ + 107, + 429, + 505, + 483 + ], + "lines": [ + { + "bbox": [ + 106, + 429, + 506, + 442 + ], + "spans": [ + { + "bbox": [ + 106, + 429, + 506, + 442 + ], + "score": 1.0, + "content": "Figure 1 plots the approximation residual versus running time for all seven algorithms under consid-", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 439, + 505, + 453 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 505, + 453 + ], + "score": 1.0, + "content": "eration. The computations were performed using Python 3.8.3 and numpy on a 2019 MacBook Pro", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 449, + 506, + 465 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 506, + 465 + ], + "score": 1.0, + "content": "with a 2.4GHz 8-core Intel I9 processor and 32GB of RAM . Being a stochastic method, SPS-decay", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 461, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 461, + 506, + 475 + ], + "score": 1.0, + "content": "seems to outperform the deterministic methods at obtaining a medium-accuracy solution quickly. It", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 472, + 441, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 441, + 485 + ], + "score": 1.0, + "content": "also seems to outperform the stochastic PSR-based methods S-Tseng and FRB-VR.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 23 + }, + { + "type": "title", + "bbox": [ + 107, + 505, + 309, + 518 + ], + "lines": [ + { + "bbox": [ + 105, + 504, + 310, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 310, + 520 + ], + "score": 1.0, + "content": "8 CONCLUSIONS AND FUTURE WORK", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 107, + 533, + 505, + 566 + ], + "lines": [ + { + "bbox": [ + 105, + 533, + 506, + 546 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 506, + 546 + ], + "score": 1.0, + "content": "We have developed and analyzed a stochastic splitting method that can handle min-max problems", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 545, + 505, + 557 + ], + "spans": [ + { + "bbox": [ + 106, + 545, + 505, + 557 + ], + "score": 1.0, + "content": "with multiple regularizers and constraints. Going forward, this development should make it possible", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 555, + 505, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 505, + 569 + ], + "score": 1.0, + "content": "to incorporate regularizers and constraints into adversarial formulations trained from large datasets.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28 + }, + { + "type": "text", + "bbox": [ + 107, + 572, + 505, + 627 + ], + "lines": [ + { + "bbox": [ + 105, + 572, + 506, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 506, + 585 + ], + "score": 1.0, + "content": "Recent versions of deterministic projective splitting (Combettes & Eckstein, 2018; Johnstone &", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 583, + 505, + 595 + ], + "spans": [ + { + "bbox": [ + 106, + 583, + 505, + 595 + ], + "score": 1.0, + "content": "Eckstein, 2020b) allow for asynchronous and incremental operation, meaning that not all operators", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 594, + 505, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 505, + 607 + ], + "score": 1.0, + "content": "need to be activated at every iteration, with some calculations proceeding with stale inputs. Such", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 605, + 505, + 618 + ], + "spans": [ + { + "bbox": [ + 106, + 605, + 505, + 618 + ], + "score": 1.0, + "content": "characteristics make projective splitting well-suited to distributed implementations. Many of our SPS", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 617, + 503, + 628 + ], + "spans": [ + { + "bbox": [ + 106, + 617, + 503, + 628 + ], + "score": 1.0, + "content": "results may be extended to allow for these variations, but we leave those extensions to future work.", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 32 + }, + { + "type": "title", + "bbox": [ + 108, + 649, + 175, + 661 + ], + "lines": [ + { + "bbox": [ + 106, + 649, + 176, + 662 + ], + "spans": [ + { + "bbox": [ + 106, + 649, + 176, + 662 + ], + "score": 1.0, + "content": "REFERENCES", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 35 + }, + { + "type": "text", + "bbox": [ + 107, + 671, + 505, + 693 + ], + "lines": [ + { + "bbox": [ + 106, + 671, + 505, + 683 + ], + "spans": [ + { + "bbox": [ + 106, + 671, + 505, + 683 + ], + "score": 1.0, + "content": "Ahmet Alacaoglu, Yura Malitsky, and Volkan Cevher. Forward-reflected-backward method with", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 116, + 681, + 481, + 694 + ], + "spans": [ + { + "bbox": [ + 116, + 681, + 481, + 694 + ], + "score": 1.0, + "content": "variance reduction. Computational Optimization and Applications, 2021. Available online.", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 36.5 + } + ], + "page_idx": 8, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 117, + 711, + 453, + 732 + ], + "lines": [ + { + "bbox": [ + 118, + 709, + 453, + 722 + ], + "spans": [ + { + "bbox": [ + 118, + 709, + 453, + 722 + ], + "score": 1.0, + "content": "2Original data source http://largescale.ml.tu-berlin.de/instructions/", + "type": "text" + } + ] + }, + { + "bbox": [ + 118, + 720, + 452, + 733 + ], + "spans": [ + { + "bbox": [ + 118, + 720, + 452, + 733 + ], + "score": 1.0, + "content": "3Original data source https://people.cs.umass.edu/~mccallum/data.html", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 106, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 106, + 26, + 309, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 309, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 309, + 759 + ], + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 762 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 762 + ], + "score": 1.0, + "content": "9", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 82, + 501, + 177 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 109, + 82, + 501, + 177 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 109, + 82, + 501, + 177 + ], + "spans": [ + { + "bbox": [ + 109, + 82, + 501, + 177 + ], + "score": 0.969, + "type": "image", + "image_path": "5a3352bf1be8622af7e8437ef0309b7c8e82b213e948eae1d016add4ab16fe33.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 109, + 82, + 501, + 113.66666666666667 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 109, + 113.66666666666667, + 501, + 145.33333333333334 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 109, + 145.33333333333334, + 501, + 177.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 186, + 506, + 253 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 506, + 199 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 506, + 199 + ], + "score": 1.0, + "content": "Figure 1: Approximation residual versus running time for three LIBSVM benchmark datasets,", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 198, + 505, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 198, + 505, + 210 + ], + "score": 1.0, + "content": "with the markers at 10-iteration intervals. Left: epsilon, middle: SUSY, right: real-sim. For the", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 208, + 505, + 221 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 505, + 221 + ], + "score": 1.0, + "content": "stochastic algorithms (SPS, S-Tseng, and FRB-VR), we plot the median results over 10 trials, with", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 218, + 506, + 234 + ], + "spans": [ + { + "bbox": [ + 105, + 218, + 506, + 234 + ], + "score": 1.0, + "content": "unit standard deviation horizontal error bars for the running time and the vertical error bars displaying", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 229, + 506, + 245 + ], + "spans": [ + { + "bbox": [ + 104, + 229, + 506, + 245 + ], + "score": 1.0, + "content": "the min-to-max range of the approximation residual. The code is provided in the supplementary", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 241, + 144, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 144, + 253 + ], + "score": 1.0, + "content": "material.", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 5.5 + } + ], + "index": 3.25 + }, + { + "type": "text", + "bbox": [ + 108, + 279, + 503, + 312 + ], + "lines": [], + "index": 10, + "bbox_fs": [ + 105, + 279, + 505, + 313 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 106, + 317, + 506, + 351 + ], + "lines": [ + { + "bbox": [ + 105, + 317, + 506, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 396, + 330 + ], + "score": 1.0, + "content": "Figure 1 show results for three LIBSVM standard datasets: epsilon2", + "type": "text" + }, + { + "bbox": [ + 397, + 317, + 451, + 328 + ], + "score": 0.86, + "content": "m = 4 \\cdot 1 0 ^ { 5 }", + "type": "inline_equation" + }, + { + "bbox": [ + 451, + 317, + 456, + 330 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 456, + 318, + 500, + 329 + ], + "score": 0.84, + "content": "d = 2 0 0 0 \\mathrm { \\Omega }", + "type": "inline_equation" + }, + { + "bbox": [ + 501, + 317, + 506, + 330 + ], + "score": 1.0, + "content": "),", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 327, + 507, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 298, + 342 + ], + "score": 1.0, + "content": "SUSY (Baldi et al., 2014; Dua & Graff, 2017)", + "type": "text" + }, + { + "bbox": [ + 298, + 329, + 348, + 340 + ], + "score": 0.86, + "content": "m = 2 \\cdot 1 0 ^ { 6 }", + "type": "inline_equation" + }, + { + "bbox": [ + 349, + 327, + 352, + 342 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 353, + 329, + 383, + 339 + ], + "score": 0.85, + "content": "d = 1 8", + "type": "inline_equation" + }, + { + "bbox": [ + 384, + 327, + 451, + 342 + ], + "score": 1.0, + "content": "), and real-sim3 (", + "type": "text" + }, + { + "bbox": [ + 451, + 329, + 502, + 340 + ], + "score": 0.82, + "content": "m = 7 2 { , } 3 0 9", + "type": "inline_equation" + }, + { + "bbox": [ + 503, + 327, + 507, + 342 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 339, + 162, + 352 + ], + "spans": [ + { + "bbox": [ + 107, + 340, + 155, + 351 + ], + "score": 0.85, + "content": "d = 2 0 { , } 9 5 8 _ { , }", + "type": "inline_equation" + }, + { + "bbox": [ + 155, + 339, + 162, + 352 + ], + "score": 1.0, + "content": ").", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13, + "bbox_fs": [ + 104, + 317, + 507, + 352 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 357, + 505, + 423 + ], + "lines": [ + { + "bbox": [ + 106, + 356, + 505, + 369 + ], + "spans": [ + { + "bbox": [ + 106, + 356, + 446, + 369 + ], + "score": 1.0, + "content": "To measure the progress of the algorithms, we used the β€œapproximation residual”", + "type": "text" + }, + { + "bbox": [ + 446, + 357, + 460, + 368 + ], + "score": 0.89, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 460, + 356, + 505, + 369 + ], + "score": 1.0, + "content": "defined in", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 367, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 196, + 381 + ], + "score": 1.0, + "content": "Appendix F. As with", + "type": "text" + }, + { + "bbox": [ + 197, + 368, + 210, + 379 + ], + "score": 0.88, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 211, + 367, + 246, + 381 + ], + "score": 1.0, + "content": ", having", + "type": "text" + }, + { + "bbox": [ + 246, + 368, + 282, + 379 + ], + "score": 0.92, + "content": "R _ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 282, + 367, + 335, + 381 + ], + "score": 1.0, + "content": "implies that", + "type": "text" + }, + { + "bbox": [ + 335, + 367, + 347, + 378 + ], + "score": 0.87, + "content": "z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 347, + 367, + 430, + 381 + ], + "score": 1.0, + "content": "solves (1). We use", + "type": "text" + }, + { + "bbox": [ + 430, + 369, + 444, + 379 + ], + "score": 0.89, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 444, + 367, + 489, + 381 + ], + "score": 1.0, + "content": "instead of", + "type": "text" + }, + { + "bbox": [ + 490, + 368, + 504, + 379 + ], + "score": 0.88, + "content": "G _ { k }", + "type": "inline_equation" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 379, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 505, + 392 + ], + "score": 1.0, + "content": "because it is also possible to compute essentially the same measure of convergence from the iterates", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 389, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 505, + 402 + ], + "score": 1.0, + "content": "of the other tested algorithms, establishing a fair comparison. Appendix F provides the details of the", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 399, + 506, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 456, + 414 + ], + "score": 1.0, + "content": "derivation of the residual measure for each algorithm, explores the relationship between", + "type": "text" + }, + { + "bbox": [ + 457, + 401, + 470, + 412 + ], + "score": 0.89, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 471, + 399, + 488, + 414 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 489, + 401, + 502, + 412 + ], + "score": 0.88, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 502, + 399, + 506, + 414 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 412, + 298, + 424 + ], + "spans": [ + { + "bbox": [ + 106, + 412, + 298, + 424 + ], + "score": 1.0, + "content": "and provides additional implementation details.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 17.5, + "bbox_fs": [ + 105, + 356, + 506, + 424 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 429, + 505, + 483 + ], + "lines": [ + { + "bbox": [ + 106, + 429, + 506, + 442 + ], + "spans": [ + { + "bbox": [ + 106, + 429, + 506, + 442 + ], + "score": 1.0, + "content": "Figure 1 plots the approximation residual versus running time for all seven algorithms under consid-", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 439, + 505, + 453 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 505, + 453 + ], + "score": 1.0, + "content": "eration. The computations were performed using Python 3.8.3 and numpy on a 2019 MacBook Pro", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 449, + 506, + 465 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 506, + 465 + ], + "score": 1.0, + "content": "with a 2.4GHz 8-core Intel I9 processor and 32GB of RAM . Being a stochastic method, SPS-decay", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 461, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 461, + 506, + 475 + ], + "score": 1.0, + "content": "seems to outperform the deterministic methods at obtaining a medium-accuracy solution quickly. It", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 472, + 441, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 441, + 485 + ], + "score": 1.0, + "content": "also seems to outperform the stochastic PSR-based methods S-Tseng and FRB-VR.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 23, + "bbox_fs": [ + 104, + 429, + 506, + 485 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 505, + 309, + 518 + ], + "lines": [ + { + "bbox": [ + 105, + 504, + 310, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 310, + 520 + ], + "score": 1.0, + "content": "8 CONCLUSIONS AND FUTURE WORK", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 107, + 533, + 505, + 566 + ], + "lines": [ + { + "bbox": [ + 105, + 533, + 506, + 546 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 506, + 546 + ], + "score": 1.0, + "content": "We have developed and analyzed a stochastic splitting method that can handle min-max problems", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 545, + 505, + 557 + ], + "spans": [ + { + "bbox": [ + 106, + 545, + 505, + 557 + ], + "score": 1.0, + "content": "with multiple regularizers and constraints. Going forward, this development should make it possible", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 555, + 505, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 505, + 569 + ], + "score": 1.0, + "content": "to incorporate regularizers and constraints into adversarial formulations trained from large datasets.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28, + "bbox_fs": [ + 105, + 533, + 506, + 569 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 572, + 505, + 627 + ], + "lines": [ + { + "bbox": [ + 105, + 572, + 506, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 506, + 585 + ], + "score": 1.0, + "content": "Recent versions of deterministic projective splitting (Combettes & Eckstein, 2018; Johnstone &", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 583, + 505, + 595 + ], + "spans": [ + { + "bbox": [ + 106, + 583, + 505, + 595 + ], + "score": 1.0, + "content": "Eckstein, 2020b) allow for asynchronous and incremental operation, meaning that not all operators", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 594, + 505, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 505, + 607 + ], + "score": 1.0, + "content": "need to be activated at every iteration, with some calculations proceeding with stale inputs. Such", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 605, + 505, + 618 + ], + "spans": [ + { + "bbox": [ + 106, + 605, + 505, + 618 + ], + "score": 1.0, + "content": "characteristics make projective splitting well-suited to distributed implementations. Many of our SPS", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 617, + 503, + 628 + ], + "spans": [ + { + "bbox": [ + 106, + 617, + 503, + 628 + ], + "score": 1.0, + "content": "results may be extended to allow for these variations, but we leave those extensions to future work.", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 32, + "bbox_fs": [ + 105, + 572, + 506, + 628 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 649, + 175, + 661 + ], + "lines": [ + { + "bbox": [ + 106, + 649, + 176, + 662 + ], + "spans": [ + { + "bbox": [ + 106, + 649, + 176, + 662 + ], + "score": 1.0, + "content": "REFERENCES", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 35 + }, + { + "type": "text", + "bbox": [ + 107, + 671, + 505, + 693 + ], + "lines": [ + { + "bbox": [ + 106, + 671, + 505, + 683 + ], + "spans": [ + { + "bbox": [ + 106, + 671, + 505, + 683 + ], + "score": 1.0, + "content": "Ahmet Alacaoglu, Yura Malitsky, and Volkan Cevher. Forward-reflected-backward method with", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 116, + 681, + 481, + 694 + ], + "spans": [ + { + "bbox": [ + 116, + 681, + 481, + 694 + ], + "score": 1.0, + "content": "variance reduction. Computational Optimization and Applications, 2021. Available online.", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 36.5, + "bbox_fs": [ + 106, + 671, + 505, + 694 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 108, + 82, + 505, + 116 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 506, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 506, + 95 + ], + "score": 1.0, + "content": "Abdullah Alotaibi, Patrick L Combettes, and Naseer Shahzad. Solving coupled composite mono-", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 93, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 93, + 505, + 106 + ], + "score": 1.0, + "content": "tone inclusions by successive FejΓ©r approximations of their Kuhn-Tucker set. SIAM Journal on", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 105, + 273, + 116 + ], + "spans": [ + { + "bbox": [ + 115, + 105, + 273, + 116 + ], + "score": 1.0, + "content": "Optimization, 24(4):2076–2095, 2014.", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1 + }, + { + "type": "text", + "bbox": [ + 107, + 123, + 506, + 167 + ], + "lines": [ + { + "bbox": [ + 106, + 123, + 506, + 135 + ], + "spans": [ + { + "bbox": [ + 106, + 123, + 506, + 135 + ], + "score": 1.0, + "content": "Kimon Antonakopoulos, Veronica Belmega, and Panayotis Mertikopoulos. An adaptive mirror-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 134, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 115, + 134, + 506, + 146 + ], + "score": 1.0, + "content": "prox method for variational inequalities with singular operators. In H. Wallach, H. Larochelle,", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 145, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 115, + 145, + 506, + 158 + ], + "score": 1.0, + "content": "A. Beygelzimer, F. d'AlchΓ©-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 114, + 155, + 349, + 168 + ], + "spans": [ + { + "bbox": [ + 114, + 155, + 349, + 168 + ], + "score": 1.0, + "content": "Processing Systems, volume 32. Curran Associates, 2019.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 4.5 + }, + { + "type": "text", + "bbox": [ + 106, + 174, + 505, + 219 + ], + "lines": [ + { + "bbox": [ + 106, + 174, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 174, + 506, + 186 + ], + "score": 1.0, + "content": "Martin Arjovsky, Soumith Chintala, and LΓ©on Bottou. Wasserstein generative adversarial networks.", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 185, + 506, + 198 + ], + "spans": [ + { + "bbox": [ + 115, + 185, + 506, + 198 + ], + "score": 1.0, + "content": "In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 114, + 196, + 505, + 209 + ], + "spans": [ + { + "bbox": [ + 114, + 196, + 505, + 209 + ], + "score": 1.0, + "content": "Machine Learning, volume 70 of Proceedings of Machine Learning Research, pp. 214–223, 06–11", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 207, + 162, + 220 + ], + "spans": [ + { + "bbox": [ + 115, + 207, + 162, + 220 + ], + "score": 1.0, + "content": "Aug 2017.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 8.5 + }, + { + "type": "text", + "bbox": [ + 106, + 225, + 505, + 249 + ], + "lines": [ + { + "bbox": [ + 105, + 223, + 505, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 505, + 240 + ], + "score": 1.0, + "content": "Pierre Baldi, Peter Sadowski, and Daniel Whiteson. Searching for exotic particles in high-energy", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 237, + 394, + 249 + ], + "spans": [ + { + "bbox": [ + 115, + 237, + 394, + 249 + ], + "score": 1.0, + "content": "physics with deep learning. Nature communications, 5(1):1–9, 2014.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 11.5 + }, + { + "type": "text", + "bbox": [ + 106, + 255, + 506, + 300 + ], + "lines": [ + { + "bbox": [ + 106, + 255, + 506, + 267 + ], + "spans": [ + { + "bbox": [ + 106, + 255, + 506, + 267 + ], + "score": 1.0, + "content": "David Balduzzi, Sebastien Racaniere, James Martens, Jakob Foerster, Karl Tuyls, and Thore Graepel.", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 116, + 267, + 506, + 279 + ], + "spans": [ + { + "bbox": [ + 116, + 267, + 194, + 279 + ], + "score": 1.0, + "content": "The mechanics of", + "type": "text" + }, + { + "bbox": [ + 194, + 268, + 201, + 277 + ], + "score": 0.76, + "content": "n", + "type": "inline_equation" + }, + { + "bbox": [ + 201, + 267, + 506, + 279 + ], + "score": 1.0, + "content": "-player differentiable games. In Jennifer Dy and Andreas Krause (eds.),", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 277, + 505, + 290 + ], + "spans": [ + { + "bbox": [ + 115, + 277, + 505, + 290 + ], + "score": 1.0, + "content": "Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 116, + 289, + 398, + 300 + ], + "spans": [ + { + "bbox": [ + 116, + 289, + 398, + 300 + ], + "score": 1.0, + "content": "of Machine Learning Research, pp. 354–363. PMLR, 10–15 Jul 2018.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 14.5 + }, + { + "type": "text", + "bbox": [ + 107, + 306, + 504, + 330 + ], + "lines": [ + { + "bbox": [ + 105, + 306, + 505, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 306, + 505, + 321 + ], + "score": 1.0, + "content": "Heinz H Bauschke and Patrick L Combettes. Convex analysis and monotone operator theory in", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 116, + 318, + 294, + 331 + ], + "spans": [ + { + "bbox": [ + 116, + 318, + 294, + 331 + ], + "score": 1.0, + "content": "Hilbert spaces. Springer, 2nd edition, 2017.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17.5 + }, + { + "type": "text", + "bbox": [ + 106, + 336, + 506, + 360 + ], + "lines": [ + { + "bbox": [ + 107, + 337, + 496, + 349 + ], + "spans": [ + { + "bbox": [ + 107, + 337, + 496, + 349 + ], + "score": 1.0, + "content": "Axel BΓΆhm, Michael Sedlmayer, ErnΓΆ Robert Csetnek, and Radu Ioan BoΒΈt. Two steps at a time β€”", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 347, + 488, + 360 + ], + "spans": [ + { + "bbox": [ + 115, + 347, + 488, + 360 + ], + "score": 1.0, + "content": "taking GAN training in stride with Tseng’s method. arXiv preprint arXiv:2006.09033, 2020.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 19.5 + }, + { + "type": "text", + "bbox": [ + 106, + 366, + 506, + 400 + ], + "lines": [ + { + "bbox": [ + 105, + 366, + 506, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 506, + 379 + ], + "score": 1.0, + "content": "Radu Ioan Bot, Panayotis Mertikopoulos, Mathias Staudigl, and Phan Tu Vuong. Forward-backward-", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 116, + 376, + 505, + 390 + ], + "spans": [ + { + "bbox": [ + 116, + 376, + 505, + 390 + ], + "score": 1.0, + "content": "forward methods with variance reduction for stochastic variational inequalities. arXiv preprint", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 388, + 219, + 399 + ], + "spans": [ + { + "bbox": [ + 115, + 388, + 219, + 399 + ], + "score": 1.0, + "content": "arXiv:1902.03355, 2019.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 22 + }, + { + "type": "text", + "bbox": [ + 105, + 406, + 504, + 430 + ], + "lines": [ + { + "bbox": [ + 106, + 407, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 106, + 407, + 504, + 419 + ], + "score": 1.0, + "content": "Luis M BriceΓ±o-Arias and Patrick L Combettes. A monotone+skew splitting model for composite", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 115, + 418, + 473, + 430 + ], + "spans": [ + { + "bbox": [ + 115, + 418, + 473, + 430 + ], + "score": 1.0, + "content": "monotone inclusions in duality. SIAM Journal on Optimization, 21(4):1230–1250, 2011.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 24.5 + }, + { + "type": "text", + "bbox": [ + 106, + 436, + 503, + 471 + ], + "lines": [ + { + "bbox": [ + 105, + 435, + 505, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 505, + 450 + ], + "score": 1.0, + "content": "Luis M BriceΓ±o-Arias and Patrick L Combettes. Monotone operator methods for Nash equilibria", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 115, + 447, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 115, + 447, + 505, + 461 + ], + "score": 1.0, + "content": "in non-potential games. In Computational and Analytical Mathematics, volume 50 of Springer", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 458, + 409, + 471 + ], + "spans": [ + { + "bbox": [ + 115, + 458, + 409, + 471 + ], + "score": 1.0, + "content": "Proceedings in Mathematics and Statistics, pp. 143–159. Springer, 2013.", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 27 + }, + { + "type": "text", + "bbox": [ + 104, + 477, + 505, + 500 + ], + "lines": [ + { + "bbox": [ + 105, + 475, + 505, + 490 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 505, + 490 + ], + "score": 1.0, + "content": "L Elisa Celis and Vijay Keswani. Improved adversarial learning for fair classification. arXiv preprint", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 115, + 488, + 219, + 500 + ], + "spans": [ + { + "bbox": [ + 115, + 488, + 219, + 500 + ], + "score": 1.0, + "content": "arXiv:1901.10443, 2019.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 29.5 + }, + { + "type": "text", + "bbox": [ + 108, + 506, + 504, + 540 + ], + "lines": [ + { + "bbox": [ + 106, + 507, + 506, + 519 + ], + "spans": [ + { + "bbox": [ + 106, + 507, + 506, + 519 + ], + "score": 1.0, + "content": "Chih-Chung Chang and Chih-Jen Lin. LIBSVM: A library for support vector machines. ACM", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 518, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 115, + 518, + 506, + 529 + ], + "score": 1.0, + "content": "Transactions on Intelligent Systems and Technology, 2:27:1–27:27, 2011. Software available at", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 115, + 528, + 361, + 541 + ], + "spans": [ + { + "bbox": [ + 115, + 528, + 361, + 541 + ], + "score": 1.0, + "content": "http://www.csie.ntu.edu.tw/~cjlin/libsvm.", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 107, + 546, + 504, + 581 + ], + "lines": [ + { + "bbox": [ + 105, + 546, + 506, + 561 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 506, + 561 + ], + "score": 1.0, + "content": "Tatjana Chavdarova, Matteo Pagliardini, Sebastian U Stich, FranΓ§ois Fleuret, and Martin Jaggi.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 115, + 558, + 507, + 571 + ], + "spans": [ + { + "bbox": [ + 115, + 558, + 507, + 571 + ], + "score": 1.0, + "content": "Taming GANs with lookahead-minmax. In International Conference on Learning Representations,", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 115, + 569, + 427, + 582 + ], + "spans": [ + { + "bbox": [ + 115, + 569, + 351, + 582 + ], + "score": 1.0, + "content": "2021. URL https://openreview.net/forum?id", + "type": "text" + }, + { + "bbox": [ + 351, + 570, + 357, + 578 + ], + "score": 0.37, + "content": "=", + "type": "inline_equation" + }, + { + "bbox": [ + 358, + 569, + 427, + 582 + ], + "score": 1.0, + "content": "ZW0yXJyNmoG.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 35 + }, + { + "type": "text", + "bbox": [ + 104, + 587, + 504, + 610 + ], + "lines": [ + { + "bbox": [ + 105, + 587, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 587, + 505, + 601 + ], + "score": 1.0, + "content": "Patrick L. Combettes and Jonathan Eckstein. Asynchronous block-iterative primal-dual decomposition", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 116, + 598, + 476, + 611 + ], + "spans": [ + { + "bbox": [ + 116, + 598, + 476, + 611 + ], + "score": 1.0, + "content": "methods for monotone inclusions. Mathematical Programming, 168(1-2):645–672, 2018.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 37.5 + }, + { + "type": "text", + "bbox": [ + 106, + 617, + 506, + 662 + ], + "lines": [ + { + "bbox": [ + 105, + 616, + 507, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 507, + 631 + ], + "score": 1.0, + "content": "Patrick L Combettes and Jean-Christophe Pesquet. Proximal splitting methods in signal processing.", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 116, + 628, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 116, + 628, + 506, + 641 + ], + "score": 1.0, + "content": "In H.H. Bauschke, R.S.S. Burachik, P.L. Combettes, V. Elser, D.R. Luke, and H. Wolkowicz (eds.),", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 639, + 507, + 653 + ], + "spans": [ + { + "bbox": [ + 114, + 639, + 507, + 653 + ], + "score": 1.0, + "content": "Fixed-Point Algorithms for Inverse Problems in Science and Engineering, pp. 185–212. Springer,", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 650, + 142, + 662 + ], + "spans": [ + { + "bbox": [ + 115, + 650, + 142, + 662 + ], + "score": 1.0, + "content": "2011.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 40.5 + }, + { + "type": "text", + "bbox": [ + 106, + 668, + 504, + 703 + ], + "lines": [ + { + "bbox": [ + 105, + 667, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 505, + 682 + ], + "score": 1.0, + "content": "Patrick L Combettes and Jean-Christophe Pesquet. Primal-dual splitting algorithm for solving", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 679, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 115, + 679, + 505, + 693 + ], + "score": 1.0, + "content": "inclusions with mixtures of composite, Lipschitzian, and parallel-sum type monotone operators.", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 691, + 352, + 703 + ], + "spans": [ + { + "bbox": [ + 115, + 691, + 352, + 703 + ], + "score": 1.0, + "content": "Set-Valued and variational analysis, 20(2):307–330, 2012.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 44 + }, + { + "type": "text", + "bbox": [ + 106, + 709, + 502, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "Patrick L Combettes and Jean-Christophe Pesquet. Stochastic quasi-FejΓ©r block-coordinate fixed", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 115, + 721, + 503, + 732 + ], + "spans": [ + { + "bbox": [ + 115, + 721, + 503, + 732 + ], + "score": 1.0, + "content": "point iterations with random sweeping. SIAM Journal on Optimization, 25(2):1221–1248, 2015.", + "type": "text" + } + ], + "index": 47 + } + ], + "index": 46.5 + } + ], + "page_idx": 9, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "score": 1.0, + "content": "10", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 107, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 108, + 82, + 505, + 116 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 506, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 506, + 95 + ], + "score": 1.0, + "content": "Abdullah Alotaibi, Patrick L Combettes, and Naseer Shahzad. Solving coupled composite mono-", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 93, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 93, + 505, + 106 + ], + "score": 1.0, + "content": "tone inclusions by successive FejΓ©r approximations of their Kuhn-Tucker set. SIAM Journal on", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 105, + 273, + 116 + ], + "spans": [ + { + "bbox": [ + 115, + 105, + 273, + 116 + ], + "score": 1.0, + "content": "Optimization, 24(4):2076–2095, 2014.", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1, + "bbox_fs": [ + 105, + 81, + 506, + 116 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 123, + 506, + 167 + ], + "lines": [ + { + "bbox": [ + 106, + 123, + 506, + 135 + ], + "spans": [ + { + "bbox": [ + 106, + 123, + 506, + 135 + ], + "score": 1.0, + "content": "Kimon Antonakopoulos, Veronica Belmega, and Panayotis Mertikopoulos. An adaptive mirror-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 134, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 115, + 134, + 506, + 146 + ], + "score": 1.0, + "content": "prox method for variational inequalities with singular operators. In H. Wallach, H. Larochelle,", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 145, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 115, + 145, + 506, + 158 + ], + "score": 1.0, + "content": "A. Beygelzimer, F. d'AlchΓ©-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 114, + 155, + 349, + 168 + ], + "spans": [ + { + "bbox": [ + 114, + 155, + 349, + 168 + ], + "score": 1.0, + "content": "Processing Systems, volume 32. Curran Associates, 2019.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 4.5, + "bbox_fs": [ + 106, + 123, + 506, + 168 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 174, + 505, + 219 + ], + "lines": [ + { + "bbox": [ + 106, + 174, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 174, + 506, + 186 + ], + "score": 1.0, + "content": "Martin Arjovsky, Soumith Chintala, and LΓ©on Bottou. Wasserstein generative adversarial networks.", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 185, + 506, + 198 + ], + "spans": [ + { + "bbox": [ + 115, + 185, + 506, + 198 + ], + "score": 1.0, + "content": "In Doina Precup and Yee Whye Teh (eds.), Proceedings of the 34th International Conference on", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 114, + 196, + 505, + 209 + ], + "spans": [ + { + "bbox": [ + 114, + 196, + 505, + 209 + ], + "score": 1.0, + "content": "Machine Learning, volume 70 of Proceedings of Machine Learning Research, pp. 214–223, 06–11", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 207, + 162, + 220 + ], + "spans": [ + { + "bbox": [ + 115, + 207, + 162, + 220 + ], + "score": 1.0, + "content": "Aug 2017.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 8.5, + "bbox_fs": [ + 106, + 174, + 506, + 220 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 225, + 505, + 249 + ], + "lines": [ + { + "bbox": [ + 105, + 223, + 505, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 505, + 240 + ], + "score": 1.0, + "content": "Pierre Baldi, Peter Sadowski, and Daniel Whiteson. Searching for exotic particles in high-energy", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 237, + 394, + 249 + ], + "spans": [ + { + "bbox": [ + 115, + 237, + 394, + 249 + ], + "score": 1.0, + "content": "physics with deep learning. Nature communications, 5(1):1–9, 2014.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 11.5, + "bbox_fs": [ + 105, + 223, + 505, + 249 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 255, + 506, + 300 + ], + "lines": [ + { + "bbox": [ + 106, + 255, + 506, + 267 + ], + "spans": [ + { + "bbox": [ + 106, + 255, + 506, + 267 + ], + "score": 1.0, + "content": "David Balduzzi, Sebastien Racaniere, James Martens, Jakob Foerster, Karl Tuyls, and Thore Graepel.", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 116, + 267, + 506, + 279 + ], + "spans": [ + { + "bbox": [ + 116, + 267, + 194, + 279 + ], + "score": 1.0, + "content": "The mechanics of", + "type": "text" + }, + { + "bbox": [ + 194, + 268, + 201, + 277 + ], + "score": 0.76, + "content": "n", + "type": "inline_equation" + }, + { + "bbox": [ + 201, + 267, + 506, + 279 + ], + "score": 1.0, + "content": "-player differentiable games. In Jennifer Dy and Andreas Krause (eds.),", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 277, + 505, + 290 + ], + "spans": [ + { + "bbox": [ + 115, + 277, + 505, + 290 + ], + "score": 1.0, + "content": "Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 116, + 289, + 398, + 300 + ], + "spans": [ + { + "bbox": [ + 116, + 289, + 398, + 300 + ], + "score": 1.0, + "content": "of Machine Learning Research, pp. 354–363. PMLR, 10–15 Jul 2018.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 14.5, + "bbox_fs": [ + 106, + 255, + 506, + 300 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 306, + 504, + 330 + ], + "lines": [ + { + "bbox": [ + 105, + 306, + 505, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 306, + 505, + 321 + ], + "score": 1.0, + "content": "Heinz H Bauschke and Patrick L Combettes. Convex analysis and monotone operator theory in", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 116, + 318, + 294, + 331 + ], + "spans": [ + { + "bbox": [ + 116, + 318, + 294, + 331 + ], + "score": 1.0, + "content": "Hilbert spaces. Springer, 2nd edition, 2017.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17.5, + "bbox_fs": [ + 105, + 306, + 505, + 331 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 336, + 506, + 360 + ], + "lines": [ + { + "bbox": [ + 107, + 337, + 496, + 349 + ], + "spans": [ + { + "bbox": [ + 107, + 337, + 496, + 349 + ], + "score": 1.0, + "content": "Axel BΓΆhm, Michael Sedlmayer, ErnΓΆ Robert Csetnek, and Radu Ioan BoΒΈt. Two steps at a time β€”", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 347, + 488, + 360 + ], + "spans": [ + { + "bbox": [ + 115, + 347, + 488, + 360 + ], + "score": 1.0, + "content": "taking GAN training in stride with Tseng’s method. arXiv preprint arXiv:2006.09033, 2020.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 19.5, + "bbox_fs": [ + 107, + 337, + 496, + 360 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 366, + 506, + 400 + ], + "lines": [ + { + "bbox": [ + 105, + 366, + 506, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 506, + 379 + ], + "score": 1.0, + "content": "Radu Ioan Bot, Panayotis Mertikopoulos, Mathias Staudigl, and Phan Tu Vuong. Forward-backward-", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 116, + 376, + 505, + 390 + ], + "spans": [ + { + "bbox": [ + 116, + 376, + 505, + 390 + ], + "score": 1.0, + "content": "forward methods with variance reduction for stochastic variational inequalities. arXiv preprint", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 388, + 219, + 399 + ], + "spans": [ + { + "bbox": [ + 115, + 388, + 219, + 399 + ], + "score": 1.0, + "content": "arXiv:1902.03355, 2019.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 22, + "bbox_fs": [ + 105, + 366, + 506, + 399 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 406, + 504, + 430 + ], + "lines": [ + { + "bbox": [ + 106, + 407, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 106, + 407, + 504, + 419 + ], + "score": 1.0, + "content": "Luis M BriceΓ±o-Arias and Patrick L Combettes. A monotone+skew splitting model for composite", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 115, + 418, + 473, + 430 + ], + "spans": [ + { + "bbox": [ + 115, + 418, + 473, + 430 + ], + "score": 1.0, + "content": "monotone inclusions in duality. SIAM Journal on Optimization, 21(4):1230–1250, 2011.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 24.5, + "bbox_fs": [ + 106, + 407, + 504, + 430 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 436, + 503, + 471 + ], + "lines": [ + { + "bbox": [ + 105, + 435, + 505, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 505, + 450 + ], + "score": 1.0, + "content": "Luis M BriceΓ±o-Arias and Patrick L Combettes. Monotone operator methods for Nash equilibria", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 115, + 447, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 115, + 447, + 505, + 461 + ], + "score": 1.0, + "content": "in non-potential games. In Computational and Analytical Mathematics, volume 50 of Springer", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 458, + 409, + 471 + ], + "spans": [ + { + "bbox": [ + 115, + 458, + 409, + 471 + ], + "score": 1.0, + "content": "Proceedings in Mathematics and Statistics, pp. 143–159. Springer, 2013.", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 27, + "bbox_fs": [ + 105, + 435, + 505, + 471 + ] + }, + { + "type": "text", + "bbox": [ + 104, + 477, + 505, + 500 + ], + "lines": [ + { + "bbox": [ + 105, + 475, + 505, + 490 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 505, + 490 + ], + "score": 1.0, + "content": "L Elisa Celis and Vijay Keswani. Improved adversarial learning for fair classification. arXiv preprint", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 115, + 488, + 219, + 500 + ], + "spans": [ + { + "bbox": [ + 115, + 488, + 219, + 500 + ], + "score": 1.0, + "content": "arXiv:1901.10443, 2019.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 29.5, + "bbox_fs": [ + 105, + 475, + 505, + 500 + ] + }, + { + "type": "text", + "bbox": [ + 108, + 506, + 504, + 540 + ], + "lines": [ + { + "bbox": [ + 106, + 507, + 506, + 519 + ], + "spans": [ + { + "bbox": [ + 106, + 507, + 506, + 519 + ], + "score": 1.0, + "content": "Chih-Chung Chang and Chih-Jen Lin. LIBSVM: A library for support vector machines. ACM", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 518, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 115, + 518, + 506, + 529 + ], + "score": 1.0, + "content": "Transactions on Intelligent Systems and Technology, 2:27:1–27:27, 2011. Software available at", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 115, + 528, + 361, + 541 + ], + "spans": [ + { + "bbox": [ + 115, + 528, + 361, + 541 + ], + "score": 1.0, + "content": "http://www.csie.ntu.edu.tw/~cjlin/libsvm.", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32, + "bbox_fs": [ + 106, + 507, + 506, + 541 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 546, + 504, + 581 + ], + "lines": [ + { + "bbox": [ + 105, + 546, + 506, + 561 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 506, + 561 + ], + "score": 1.0, + "content": "Tatjana Chavdarova, Matteo Pagliardini, Sebastian U Stich, FranΓ§ois Fleuret, and Martin Jaggi.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 115, + 558, + 507, + 571 + ], + "spans": [ + { + "bbox": [ + 115, + 558, + 507, + 571 + ], + "score": 1.0, + "content": "Taming GANs with lookahead-minmax. In International Conference on Learning Representations,", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 115, + 569, + 427, + 582 + ], + "spans": [ + { + "bbox": [ + 115, + 569, + 351, + 582 + ], + "score": 1.0, + "content": "2021. URL https://openreview.net/forum?id", + "type": "text" + }, + { + "bbox": [ + 351, + 570, + 357, + 578 + ], + "score": 0.37, + "content": "=", + "type": "inline_equation" + }, + { + "bbox": [ + 358, + 569, + 427, + 582 + ], + "score": 1.0, + "content": "ZW0yXJyNmoG.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 35, + "bbox_fs": [ + 105, + 546, + 507, + 582 + ] + }, + { + "type": "text", + "bbox": [ + 104, + 587, + 504, + 610 + ], + "lines": [ + { + "bbox": [ + 105, + 587, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 587, + 505, + 601 + ], + "score": 1.0, + "content": "Patrick L. Combettes and Jonathan Eckstein. Asynchronous block-iterative primal-dual decomposition", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 116, + 598, + 476, + 611 + ], + "spans": [ + { + "bbox": [ + 116, + 598, + 476, + 611 + ], + "score": 1.0, + "content": "methods for monotone inclusions. Mathematical Programming, 168(1-2):645–672, 2018.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 37.5, + "bbox_fs": [ + 105, + 587, + 505, + 611 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 617, + 506, + 662 + ], + "lines": [ + { + "bbox": [ + 105, + 616, + 507, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 507, + 631 + ], + "score": 1.0, + "content": "Patrick L Combettes and Jean-Christophe Pesquet. Proximal splitting methods in signal processing.", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 116, + 628, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 116, + 628, + 506, + 641 + ], + "score": 1.0, + "content": "In H.H. Bauschke, R.S.S. Burachik, P.L. Combettes, V. Elser, D.R. Luke, and H. Wolkowicz (eds.),", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 639, + 507, + 653 + ], + "spans": [ + { + "bbox": [ + 114, + 639, + 507, + 653 + ], + "score": 1.0, + "content": "Fixed-Point Algorithms for Inverse Problems in Science and Engineering, pp. 185–212. Springer,", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 650, + 142, + 662 + ], + "spans": [ + { + "bbox": [ + 115, + 650, + 142, + 662 + ], + "score": 1.0, + "content": "2011.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 40.5, + "bbox_fs": [ + 105, + 616, + 507, + 662 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 668, + 504, + 703 + ], + "lines": [ + { + "bbox": [ + 105, + 667, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 505, + 682 + ], + "score": 1.0, + "content": "Patrick L Combettes and Jean-Christophe Pesquet. Primal-dual splitting algorithm for solving", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 679, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 115, + 679, + 505, + 693 + ], + "score": 1.0, + "content": "inclusions with mixtures of composite, Lipschitzian, and parallel-sum type monotone operators.", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 691, + 352, + 703 + ], + "spans": [ + { + "bbox": [ + 115, + 691, + 352, + 703 + ], + "score": 1.0, + "content": "Set-Valued and variational analysis, 20(2):307–330, 2012.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 44, + "bbox_fs": [ + 105, + 667, + 505, + 703 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 709, + 502, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "Patrick L Combettes and Jean-Christophe Pesquet. Stochastic quasi-FejΓ©r block-coordinate fixed", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 115, + 721, + 503, + 732 + ], + "spans": [ + { + "bbox": [ + 115, + 721, + 503, + 732 + ], + "score": 1.0, + "content": "point iterations with random sweeping. SIAM Journal on Optimization, 25(2):1221–1248, 2015.", + "type": "text" + } + ], + "index": 47 + } + ], + "index": 46.5, + "bbox_fs": [ + 106, + 709, + 505, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 108, + 82, + 505, + 116 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 505, + 95 + ], + "score": 1.0, + "content": "Constantinos Daskalakis, Andrew Ilyas, Vasilis Syrgkanis, and Haoyang Zeng. Training GANs", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 93, + 507, + 107 + ], + "spans": [ + { + "bbox": [ + 115, + 93, + 507, + 107 + ], + "score": 1.0, + "content": "with optimism. In International Conference on Learning Representations, 2018. URL https:", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 104, + 330, + 117 + ], + "spans": [ + { + "bbox": [ + 115, + 104, + 265, + 117 + ], + "score": 1.0, + "content": "//openreview.net/forum?id", + "type": "text" + }, + { + "bbox": [ + 266, + 106, + 273, + 114 + ], + "score": 0.45, + "content": "{ . } =", + "type": "inline_equation" + }, + { + "bbox": [ + 273, + 104, + 330, + 117 + ], + "score": 1.0, + "content": "SJJySbbAZ.", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1 + }, + { + "type": "text", + "bbox": [ + 107, + 124, + 504, + 148 + ], + "lines": [ + { + "bbox": [ + 105, + 123, + 505, + 138 + ], + "spans": [ + { + "bbox": [ + 105, + 123, + 505, + 138 + ], + "score": 1.0, + "content": "Damek Davis and Wotao Yin. A three-operator splitting scheme and its optimization applications.", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 116, + 136, + 353, + 148 + ], + "spans": [ + { + "bbox": [ + 116, + 136, + 353, + 148 + ], + "score": 1.0, + "content": "Set-Valued and Variational Analysis, 25(4):829–858, 2017.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 106, + 156, + 506, + 190 + ], + "lines": [ + { + "bbox": [ + 105, + 156, + 505, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 505, + 168 + ], + "score": 1.0, + "content": "Jelena Diakonikolas. Halpern iteration for near-optimal and parameter-free monotone inclusion and", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 168, + 507, + 180 + ], + "spans": [ + { + "bbox": [ + 116, + 168, + 507, + 180 + ], + "score": 1.0, + "content": "strong solutions to variational inequalities. In Conference on Learning Theory, pp. 1428–1451.", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 116, + 178, + 173, + 190 + ], + "spans": [ + { + "bbox": [ + 116, + 178, + 173, + 190 + ], + "score": 1.0, + "content": "PMLR, 2020.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 6 + }, + { + "type": "text", + "bbox": [ + 105, + 198, + 505, + 222 + ], + "lines": [ + { + "bbox": [ + 105, + 198, + 507, + 212 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 507, + 212 + ], + "score": 1.0, + "content": "Dheeru Dua and Casey Graff. UCI machine learning repository, 2017. URL http://archive.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 116, + 210, + 205, + 222 + ], + "spans": [ + { + "bbox": [ + 116, + 210, + 205, + 222 + ], + "score": 1.0, + "content": "ics.uci.edu/ml.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 8.5 + }, + { + "type": "text", + "bbox": [ + 106, + 230, + 505, + 265 + ], + "lines": [ + { + "bbox": [ + 105, + 230, + 505, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 230, + 505, + 244 + ], + "score": 1.0, + "content": "Jonathan Eckstein. A simplified form of block-iterative operator splitting and an asynchronous", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 241, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 115, + 241, + 506, + 255 + ], + "score": 1.0, + "content": "algorithm resembling the multi-block alternating direction method of multipliers. Journal of", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 116, + 253, + 369, + 265 + ], + "spans": [ + { + "bbox": [ + 116, + 253, + 369, + 265 + ], + "score": 1.0, + "content": "Optimization Theory and Applications, 173(1):155–182, 2017.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 11 + }, + { + "type": "text", + "bbox": [ + 106, + 272, + 505, + 296 + ], + "lines": [ + { + "bbox": [ + 106, + 273, + 506, + 286 + ], + "spans": [ + { + "bbox": [ + 106, + 273, + 506, + 286 + ], + "score": 1.0, + "content": "Jonathan Eckstein and Benar Fux Svaiter. A family of projective splitting methods for the sum of", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 284, + 469, + 296 + ], + "spans": [ + { + "bbox": [ + 115, + 284, + 469, + 296 + ], + "score": 1.0, + "content": "two maximal monotone operators. Mathematical Programming, 111(1):173–199, 2008.", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13.5 + }, + { + "type": "text", + "bbox": [ + 106, + 304, + 504, + 327 + ], + "lines": [ + { + "bbox": [ + 105, + 305, + 505, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 305, + 505, + 318 + ], + "score": 1.0, + "content": "Jonathan Eckstein and Benar Fux Svaiter. General projective splitting methods for sums of maximal", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 316, + 469, + 328 + ], + "spans": [ + { + "bbox": [ + 115, + 316, + 469, + 328 + ], + "score": 1.0, + "content": "monotone operators. SIAM Journal on Control and Optimization, 48(2):787–811, 2009.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 15.5 + }, + { + "type": "text", + "bbox": [ + 106, + 335, + 505, + 359 + ], + "lines": [ + { + "bbox": [ + 105, + 334, + 505, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 505, + 350 + ], + "score": 1.0, + "content": "Harrison Edwards and Amos Storkey. Censoring representations with an adversary. arXiv preprint", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 116, + 347, + 219, + 358 + ], + "spans": [ + { + "bbox": [ + 116, + 347, + 219, + 358 + ], + "score": 1.0, + "content": "arXiv:1511.05897, 2015.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17.5 + }, + { + "type": "text", + "bbox": [ + 107, + 367, + 505, + 402 + ], + "lines": [ + { + "bbox": [ + 105, + 367, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 506, + 381 + ], + "score": 1.0, + "content": "Daniel Gabay. Applications of the method of multipliers to variational inequalities. In M. Fortin and", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 379, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 115, + 379, + 505, + 392 + ], + "score": 1.0, + "content": "R. Glowinski (eds.), Augmented Lagrangian Methods: Applications to the Solution of Boundary", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 389, + 426, + 402 + ], + "spans": [ + { + "bbox": [ + 115, + 389, + 426, + 402 + ], + "score": 1.0, + "content": "Value Problems, chapter IX, pp. 299–340. North-Holland, Amsterdam, 1983.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20 + }, + { + "type": "text", + "bbox": [ + 106, + 410, + 506, + 455 + ], + "lines": [ + { + "bbox": [ + 106, + 411, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 106, + 411, + 506, + 423 + ], + "score": 1.0, + "content": "Gauthier Gidel, Hugo Berard, GaΓ«tan Vignoud, Pascal Vincent, and Simon Lacoste-Julien. A", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 422, + 507, + 434 + ], + "spans": [ + { + "bbox": [ + 115, + 422, + 507, + 434 + ], + "score": 1.0, + "content": "variational inequality perspective on generative adversarial networks. In International Confer-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 115, + 432, + 505, + 445 + ], + "spans": [ + { + "bbox": [ + 115, + 432, + 497, + 445 + ], + "score": 1.0, + "content": "ence on Learning Representations, 2019. URL https://openreview.net/forum?id", + "type": "text" + }, + { + "bbox": [ + 497, + 434, + 505, + 442 + ], + "score": 0.39, + "content": "=", + "type": "inline_equation" + } + ], + "index": 24 + }, + { + "bbox": [ + 116, + 443, + 181, + 455 + ], + "spans": [ + { + "bbox": [ + 116, + 443, + 181, + 455 + ], + "score": 1.0, + "content": "r1laEnA5Ym.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 23.5 + }, + { + "type": "text", + "bbox": [ + 107, + 463, + 506, + 508 + ], + "lines": [ + { + "bbox": [ + 105, + 464, + 507, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 507, + 477 + ], + "score": 1.0, + "content": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair,", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 473, + 507, + 489 + ], + "spans": [ + { + "bbox": [ + 114, + 473, + 507, + 489 + ], + "score": 1.0, + "content": "Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Z. Ghahramani, M. Welling,", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 485, + 506, + 500 + ], + "spans": [ + { + "bbox": [ + 115, + 485, + 506, + 500 + ], + "score": 1.0, + "content": "C. Cortes, N. Lawrence, and K. Q. Weinberger (eds.), Advances in Neural Information Processing", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 497, + 302, + 509 + ], + "spans": [ + { + "bbox": [ + 115, + 497, + 302, + 509 + ], + "score": 1.0, + "content": "Systems, volume 27. Curran Associates, 2014.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 27.5 + }, + { + "type": "text", + "bbox": [ + 107, + 516, + 505, + 551 + ], + "lines": [ + { + "bbox": [ + 105, + 516, + 505, + 530 + ], + "spans": [ + { + "bbox": [ + 105, + 516, + 505, + 530 + ], + "score": 1.0, + "content": "Paulina Grnarova, Yannic Kilcher, Kfir Y Levy, Aurelien Lucchi, and Thomas Hofmann. Generative", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 115, + 529, + 506, + 541 + ], + "spans": [ + { + "bbox": [ + 115, + 529, + 506, + 541 + ], + "score": 1.0, + "content": "minimization networks: Training GANs without competition. arXiv preprint arXiv:2103.12685,", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 537, + 143, + 552 + ], + "spans": [ + { + "bbox": [ + 115, + 537, + 143, + 552 + ], + "score": 1.0, + "content": "2021.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 31 + }, + { + "type": "text", + "bbox": [ + 107, + 560, + 506, + 593 + ], + "lines": [ + { + "bbox": [ + 105, + 559, + 506, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 506, + 573 + ], + "score": 1.0, + "content": "Patrick T Harker and Jong-Shi Pang. Finite-dimensional variational inequality and nonlinear comple-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 115, + 571, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 115, + 571, + 506, + 584 + ], + "score": 1.0, + "content": "mentarity problems: a survey of theory, algorithms and applications. Mathematical programming,", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 115, + 582, + 205, + 594 + ], + "spans": [ + { + "bbox": [ + 115, + 582, + 205, + 594 + ], + "score": 1.0, + "content": "48(1):161–220, 1990.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 34 + }, + { + "type": "text", + "bbox": [ + 107, + 602, + 506, + 647 + ], + "lines": [ + { + "bbox": [ + 106, + 602, + 505, + 615 + ], + "spans": [ + { + "bbox": [ + 106, + 602, + 505, + 615 + ], + "score": 1.0, + "content": "Yu-Guan Hsieh, Franck Iutzeler, JΓ©rΓ΄me Malick, and Panayotis Mertikopoulos. On the convergence", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 613, + 507, + 626 + ], + "spans": [ + { + "bbox": [ + 115, + 613, + 507, + 626 + ], + "score": 1.0, + "content": "of single-call stochastic extra-gradient methods. In H. Wallach, H. Larochelle, A. Beygelzimer,", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 624, + 507, + 638 + ], + "spans": [ + { + "bbox": [ + 114, + 624, + 507, + 638 + ], + "score": 1.0, + "content": "F. d'AlchΓ©-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 116, + 636, + 266, + 647 + ], + "spans": [ + { + "bbox": [ + 116, + 636, + 266, + 647 + ], + "score": 1.0, + "content": "volume 32. Curran Associates, 2019.", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 37.5 + }, + { + "type": "text", + "bbox": [ + 107, + 655, + 505, + 701 + ], + "lines": [ + { + "bbox": [ + 106, + 654, + 507, + 669 + ], + "spans": [ + { + "bbox": [ + 106, + 654, + 507, + 669 + ], + "score": 1.0, + "content": "Yu-Guan Hsieh, Franck Iutzeler, JΓ©rΓ΄me Malick, and Panayotis Mertikopoulos. Explore aggres-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 666, + 507, + 680 + ], + "spans": [ + { + "bbox": [ + 114, + 666, + 507, + 680 + ], + "score": 1.0, + "content": "sively, update conservatively: Stochastic extragradient methods with variable stepsize scaling.", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 677, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 115, + 677, + 506, + 690 + ], + "score": 1.0, + "content": "In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (eds.), Advances in Neural", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 115, + 689, + 474, + 702 + ], + "spans": [ + { + "bbox": [ + 115, + 689, + 474, + 702 + ], + "score": 1.0, + "content": "Information Processing Systems, volume 33, pp. 16223–16234. Curran Associates, 2020.", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 41.5 + }, + { + "type": "text", + "bbox": [ + 107, + 709, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "score": 1.0, + "content": "Chong Huang, Peter Kairouz, Xiao Chen, Lalitha Sankar, and Ram Rajagopal. Context-aware", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 721, + 352, + 732 + ], + "spans": [ + { + "bbox": [ + 115, + 721, + 352, + 732 + ], + "score": 1.0, + "content": "generative adversarial privacy. Entropy, 19(12):656, 2017.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 44.5 + } + ], + "page_idx": 10, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 751, + 309, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 765 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 15, + "width": 13 + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 107, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 108, + 82, + 505, + 116 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 505, + 95 + ], + "score": 1.0, + "content": "Constantinos Daskalakis, Andrew Ilyas, Vasilis Syrgkanis, and Haoyang Zeng. Training GANs", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 93, + 507, + 107 + ], + "spans": [ + { + "bbox": [ + 115, + 93, + 507, + 107 + ], + "score": 1.0, + "content": "with optimism. In International Conference on Learning Representations, 2018. URL https:", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 104, + 330, + 117 + ], + "spans": [ + { + "bbox": [ + 115, + 104, + 265, + 117 + ], + "score": 1.0, + "content": "//openreview.net/forum?id", + "type": "text" + }, + { + "bbox": [ + 266, + 106, + 273, + 114 + ], + "score": 0.45, + "content": "{ . } =", + "type": "inline_equation" + }, + { + "bbox": [ + 273, + 104, + 330, + 117 + ], + "score": 1.0, + "content": "SJJySbbAZ.", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1, + "bbox_fs": [ + 106, + 82, + 507, + 117 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 124, + 504, + 148 + ], + "lines": [ + { + "bbox": [ + 105, + 123, + 505, + 138 + ], + "spans": [ + { + "bbox": [ + 105, + 123, + 505, + 138 + ], + "score": 1.0, + "content": "Damek Davis and Wotao Yin. A three-operator splitting scheme and its optimization applications.", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 116, + 136, + 353, + 148 + ], + "spans": [ + { + "bbox": [ + 116, + 136, + 353, + 148 + ], + "score": 1.0, + "content": "Set-Valued and Variational Analysis, 25(4):829–858, 2017.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3.5, + "bbox_fs": [ + 105, + 123, + 505, + 148 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 156, + 506, + 190 + ], + "lines": [ + { + "bbox": [ + 105, + 156, + 505, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 505, + 168 + ], + "score": 1.0, + "content": "Jelena Diakonikolas. Halpern iteration for near-optimal and parameter-free monotone inclusion and", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 168, + 507, + 180 + ], + "spans": [ + { + "bbox": [ + 116, + 168, + 507, + 180 + ], + "score": 1.0, + "content": "strong solutions to variational inequalities. In Conference on Learning Theory, pp. 1428–1451.", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 116, + 178, + 173, + 190 + ], + "spans": [ + { + "bbox": [ + 116, + 178, + 173, + 190 + ], + "score": 1.0, + "content": "PMLR, 2020.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 6, + "bbox_fs": [ + 105, + 156, + 507, + 190 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 198, + 505, + 222 + ], + "lines": [ + { + "bbox": [ + 105, + 198, + 507, + 212 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 507, + 212 + ], + "score": 1.0, + "content": "Dheeru Dua and Casey Graff. UCI machine learning repository, 2017. URL http://archive.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 116, + 210, + 205, + 222 + ], + "spans": [ + { + "bbox": [ + 116, + 210, + 205, + 222 + ], + "score": 1.0, + "content": "ics.uci.edu/ml.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 8.5, + "bbox_fs": [ + 105, + 198, + 507, + 222 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 230, + 505, + 265 + ], + "lines": [ + { + "bbox": [ + 105, + 230, + 505, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 230, + 505, + 244 + ], + "score": 1.0, + "content": "Jonathan Eckstein. A simplified form of block-iterative operator splitting and an asynchronous", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 241, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 115, + 241, + 506, + 255 + ], + "score": 1.0, + "content": "algorithm resembling the multi-block alternating direction method of multipliers. Journal of", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 116, + 253, + 369, + 265 + ], + "spans": [ + { + "bbox": [ + 116, + 253, + 369, + 265 + ], + "score": 1.0, + "content": "Optimization Theory and Applications, 173(1):155–182, 2017.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 11, + "bbox_fs": [ + 105, + 230, + 506, + 265 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 272, + 505, + 296 + ], + "lines": [ + { + "bbox": [ + 106, + 273, + 506, + 286 + ], + "spans": [ + { + "bbox": [ + 106, + 273, + 506, + 286 + ], + "score": 1.0, + "content": "Jonathan Eckstein and Benar Fux Svaiter. A family of projective splitting methods for the sum of", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 284, + 469, + 296 + ], + "spans": [ + { + "bbox": [ + 115, + 284, + 469, + 296 + ], + "score": 1.0, + "content": "two maximal monotone operators. Mathematical Programming, 111(1):173–199, 2008.", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13.5, + "bbox_fs": [ + 106, + 273, + 506, + 296 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 304, + 504, + 327 + ], + "lines": [ + { + "bbox": [ + 105, + 305, + 505, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 305, + 505, + 318 + ], + "score": 1.0, + "content": "Jonathan Eckstein and Benar Fux Svaiter. General projective splitting methods for sums of maximal", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 316, + 469, + 328 + ], + "spans": [ + { + "bbox": [ + 115, + 316, + 469, + 328 + ], + "score": 1.0, + "content": "monotone operators. SIAM Journal on Control and Optimization, 48(2):787–811, 2009.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 15.5, + "bbox_fs": [ + 105, + 305, + 505, + 328 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 335, + 505, + 359 + ], + "lines": [ + { + "bbox": [ + 105, + 334, + 505, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 505, + 350 + ], + "score": 1.0, + "content": "Harrison Edwards and Amos Storkey. Censoring representations with an adversary. arXiv preprint", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 116, + 347, + 219, + 358 + ], + "spans": [ + { + "bbox": [ + 116, + 347, + 219, + 358 + ], + "score": 1.0, + "content": "arXiv:1511.05897, 2015.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17.5, + "bbox_fs": [ + 105, + 334, + 505, + 358 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 367, + 505, + 402 + ], + "lines": [ + { + "bbox": [ + 105, + 367, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 506, + 381 + ], + "score": 1.0, + "content": "Daniel Gabay. Applications of the method of multipliers to variational inequalities. In M. Fortin and", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 379, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 115, + 379, + 505, + 392 + ], + "score": 1.0, + "content": "R. Glowinski (eds.), Augmented Lagrangian Methods: Applications to the Solution of Boundary", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 389, + 426, + 402 + ], + "spans": [ + { + "bbox": [ + 115, + 389, + 426, + 402 + ], + "score": 1.0, + "content": "Value Problems, chapter IX, pp. 299–340. North-Holland, Amsterdam, 1983.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20, + "bbox_fs": [ + 105, + 367, + 506, + 402 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 410, + 506, + 455 + ], + "lines": [ + { + "bbox": [ + 106, + 411, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 106, + 411, + 506, + 423 + ], + "score": 1.0, + "content": "Gauthier Gidel, Hugo Berard, GaΓ«tan Vignoud, Pascal Vincent, and Simon Lacoste-Julien. A", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 422, + 507, + 434 + ], + "spans": [ + { + "bbox": [ + 115, + 422, + 507, + 434 + ], + "score": 1.0, + "content": "variational inequality perspective on generative adversarial networks. In International Confer-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 115, + 432, + 505, + 445 + ], + "spans": [ + { + "bbox": [ + 115, + 432, + 497, + 445 + ], + "score": 1.0, + "content": "ence on Learning Representations, 2019. URL https://openreview.net/forum?id", + "type": "text" + }, + { + "bbox": [ + 497, + 434, + 505, + 442 + ], + "score": 0.39, + "content": "=", + "type": "inline_equation" + } + ], + "index": 24 + }, + { + "bbox": [ + 116, + 443, + 181, + 455 + ], + "spans": [ + { + "bbox": [ + 116, + 443, + 181, + 455 + ], + "score": 1.0, + "content": "r1laEnA5Ym.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 23.5, + "bbox_fs": [ + 106, + 411, + 507, + 455 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 463, + 506, + 508 + ], + "lines": [ + { + "bbox": [ + 105, + 464, + 507, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 507, + 477 + ], + "score": 1.0, + "content": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair,", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 473, + 507, + 489 + ], + "spans": [ + { + "bbox": [ + 114, + 473, + 507, + 489 + ], + "score": 1.0, + "content": "Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Z. Ghahramani, M. Welling,", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 485, + 506, + 500 + ], + "spans": [ + { + "bbox": [ + 115, + 485, + 506, + 500 + ], + "score": 1.0, + "content": "C. Cortes, N. Lawrence, and K. Q. Weinberger (eds.), Advances in Neural Information Processing", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 497, + 302, + 509 + ], + "spans": [ + { + "bbox": [ + 115, + 497, + 302, + 509 + ], + "score": 1.0, + "content": "Systems, volume 27. Curran Associates, 2014.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 27.5, + "bbox_fs": [ + 105, + 464, + 507, + 509 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 516, + 505, + 551 + ], + "lines": [ + { + "bbox": [ + 105, + 516, + 505, + 530 + ], + "spans": [ + { + "bbox": [ + 105, + 516, + 505, + 530 + ], + "score": 1.0, + "content": "Paulina Grnarova, Yannic Kilcher, Kfir Y Levy, Aurelien Lucchi, and Thomas Hofmann. Generative", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 115, + 529, + 506, + 541 + ], + "spans": [ + { + "bbox": [ + 115, + 529, + 506, + 541 + ], + "score": 1.0, + "content": "minimization networks: Training GANs without competition. arXiv preprint arXiv:2103.12685,", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 537, + 143, + 552 + ], + "spans": [ + { + "bbox": [ + 115, + 537, + 143, + 552 + ], + "score": 1.0, + "content": "2021.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 31, + "bbox_fs": [ + 105, + 516, + 506, + 552 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 560, + 506, + 593 + ], + "lines": [ + { + "bbox": [ + 105, + 559, + 506, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 506, + 573 + ], + "score": 1.0, + "content": "Patrick T Harker and Jong-Shi Pang. Finite-dimensional variational inequality and nonlinear comple-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 115, + 571, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 115, + 571, + 506, + 584 + ], + "score": 1.0, + "content": "mentarity problems: a survey of theory, algorithms and applications. Mathematical programming,", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 115, + 582, + 205, + 594 + ], + "spans": [ + { + "bbox": [ + 115, + 582, + 205, + 594 + ], + "score": 1.0, + "content": "48(1):161–220, 1990.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 34, + "bbox_fs": [ + 105, + 559, + 506, + 594 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 602, + 506, + 647 + ], + "lines": [ + { + "bbox": [ + 106, + 602, + 505, + 615 + ], + "spans": [ + { + "bbox": [ + 106, + 602, + 505, + 615 + ], + "score": 1.0, + "content": "Yu-Guan Hsieh, Franck Iutzeler, JΓ©rΓ΄me Malick, and Panayotis Mertikopoulos. On the convergence", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 613, + 507, + 626 + ], + "spans": [ + { + "bbox": [ + 115, + 613, + 507, + 626 + ], + "score": 1.0, + "content": "of single-call stochastic extra-gradient methods. In H. Wallach, H. Larochelle, A. Beygelzimer,", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 114, + 624, + 507, + 638 + ], + "spans": [ + { + "bbox": [ + 114, + 624, + 507, + 638 + ], + "score": 1.0, + "content": "F. d'AlchΓ©-Buc, E. Fox, and R. Garnett (eds.), Advances in Neural Information Processing Systems,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 116, + 636, + 266, + 647 + ], + "spans": [ + { + "bbox": [ + 116, + 636, + 266, + 647 + ], + "score": 1.0, + "content": "volume 32. Curran Associates, 2019.", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 37.5, + "bbox_fs": [ + 106, + 602, + 507, + 647 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 655, + 505, + 701 + ], + "lines": [ + { + "bbox": [ + 106, + 654, + 507, + 669 + ], + "spans": [ + { + "bbox": [ + 106, + 654, + 507, + 669 + ], + "score": 1.0, + "content": "Yu-Guan Hsieh, Franck Iutzeler, JΓ©rΓ΄me Malick, and Panayotis Mertikopoulos. Explore aggres-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 666, + 507, + 680 + ], + "spans": [ + { + "bbox": [ + 114, + 666, + 507, + 680 + ], + "score": 1.0, + "content": "sively, update conservatively: Stochastic extragradient methods with variable stepsize scaling.", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 677, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 115, + 677, + 506, + 690 + ], + "score": 1.0, + "content": "In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (eds.), Advances in Neural", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 115, + 689, + 474, + 702 + ], + "spans": [ + { + "bbox": [ + 115, + 689, + 474, + 702 + ], + "score": 1.0, + "content": "Information Processing Systems, volume 33, pp. 16223–16234. Curran Associates, 2020.", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 41.5, + "bbox_fs": [ + 106, + 654, + 507, + 702 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 709, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "score": 1.0, + "content": "Chong Huang, Peter Kairouz, Xiao Chen, Lalitha Sankar, and Ram Rajagopal. Context-aware", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 721, + 352, + 732 + ], + "spans": [ + { + "bbox": [ + 115, + 721, + 352, + 732 + ], + "score": 1.0, + "content": "generative adversarial privacy. Entropy, 19(12):656, 2017.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 44.5, + "bbox_fs": [ + 105, + 708, + 505, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 504, + 116 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 96 + ], + "score": 1.0, + "content": "Laurent Jacob, Guillaume Obozinski, and Jean-Philippe Vert. Group lasso with overlaps and graph", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 93, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 93, + 505, + 106 + ], + "score": 1.0, + "content": "lasso. In LΓ©on Bottou and Michael Littman (eds.), Proceedings of the 26th International Conference", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 104, + 398, + 118 + ], + "spans": [ + { + "bbox": [ + 115, + 104, + 398, + 118 + ], + "score": 1.0, + "content": "on Machine Learning, pp. 433–440, Montreal, June 2009. Omnipress.", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1 + }, + { + "type": "text", + "bbox": [ + 105, + 124, + 504, + 147 + ], + "lines": [ + { + "bbox": [ + 105, + 123, + 505, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 123, + 505, + 137 + ], + "score": 1.0, + "content": "Patrick R Johnstone and Jonathan Eckstein. Convergence rates for projective splitting. SIAM Journal", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 116, + 136, + 285, + 147 + ], + "spans": [ + { + "bbox": [ + 116, + 136, + 285, + 147 + ], + "score": 1.0, + "content": "on Optimization, 29(3):1931–1957, 2019.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 106, + 155, + 504, + 178 + ], + "lines": [ + { + "bbox": [ + 105, + 155, + 505, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 155, + 505, + 168 + ], + "score": 1.0, + "content": "Patrick R Johnstone and Jonathan Eckstein. Projective splitting with forward steps only requires", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 166, + 343, + 179 + ], + "spans": [ + { + "bbox": [ + 116, + 166, + 343, + 179 + ], + "score": 1.0, + "content": "continuity. Optimization Letters, 14(1):229–247, 2020a.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 5.5 + }, + { + "type": "text", + "bbox": [ + 107, + 186, + 504, + 209 + ], + "lines": [ + { + "bbox": [ + 105, + 186, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 506, + 200 + ], + "score": 1.0, + "content": "Patrick R Johnstone and Jonathan Eckstein. Projective splitting with forward steps. Mathematical", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 116, + 197, + 352, + 210 + ], + "spans": [ + { + "bbox": [ + 116, + 197, + 352, + 210 + ], + "score": 1.0, + "content": "Programming, 2020b. Published online, to appear in print.", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 7.5 + }, + { + "type": "text", + "bbox": [ + 106, + 217, + 504, + 240 + ], + "lines": [ + { + "bbox": [ + 105, + 216, + 505, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 505, + 231 + ], + "score": 1.0, + "content": "Patrick R Johnstone and Jonathan Eckstein. Single-forward-step projective splitting: exploiting", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 116, + 228, + 451, + 240 + ], + "spans": [ + { + "bbox": [ + 116, + 228, + 451, + 240 + ], + "score": 1.0, + "content": "cocoercivity. Computational Optimization and Applications, 78(1):125–166, 2021.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 9.5 + }, + { + "type": "text", + "bbox": [ + 106, + 248, + 505, + 271 + ], + "lines": [ + { + "bbox": [ + 106, + 247, + 505, + 262 + ], + "spans": [ + { + "bbox": [ + 106, + 247, + 505, + 262 + ], + "score": 1.0, + "content": "Anatoli Juditsky, Arkadi Nemirovski, and Claire Tauvel. Solving variational inequalities with", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 259, + 406, + 272 + ], + "spans": [ + { + "bbox": [ + 115, + 259, + 406, + 272 + ], + "score": 1.0, + "content": "stochastic mirror-prox algorithm. Stochastic Systems, 1(1):17–58, 2011.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 11.5 + }, + { + "type": "text", + "bbox": [ + 105, + 279, + 504, + 302 + ], + "lines": [ + { + "bbox": [ + 105, + 277, + 505, + 292 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 505, + 292 + ], + "score": 1.0, + "content": "GM Korpelevich. Extragradient method for finding saddle points and other problems. Matekon, 13", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 290, + 187, + 303 + ], + "spans": [ + { + "bbox": [ + 115, + 290, + 187, + 303 + ], + "score": 1.0, + "content": "(4):35–49, 1977.", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13.5 + }, + { + "type": "text", + "bbox": [ + 107, + 309, + 506, + 355 + ], + "lines": [ + { + "bbox": [ + 106, + 311, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 106, + 311, + 506, + 322 + ], + "score": 1.0, + "content": "Daniel Kuhn, Peyman Mohajerin Esfahani, Viet Anh Nguyen, and Soroosh Shafieezadeh-Abadeh.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 320, + 507, + 335 + ], + "spans": [ + { + "bbox": [ + 115, + 320, + 507, + 335 + ], + "score": 1.0, + "content": "Wasserstein distributionally robust optimization: Theory and applications in machine learning.", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 332, + 507, + 346 + ], + "spans": [ + { + "bbox": [ + 115, + 332, + 507, + 346 + ], + "score": 1.0, + "content": "In Serguei Netessine (ed.), Operations Research & Management Science in the Age of Analytics,", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 343, + 382, + 356 + ], + "spans": [ + { + "bbox": [ + 115, + 343, + 382, + 356 + ], + "score": 1.0, + "content": "Tutorials in Operations Research, pp. 130–166. INFORMS, 2019.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 16.5 + }, + { + "type": "text", + "bbox": [ + 107, + 363, + 505, + 397 + ], + "lines": [ + { + "bbox": [ + 106, + 363, + 505, + 375 + ], + "spans": [ + { + "bbox": [ + 106, + 363, + 505, + 375 + ], + "score": 1.0, + "content": "Chris Junchi Li, Yaodong Yu, Nicolas Loizou, Gauthier Gidel, Yi Ma, Nicolas Le Roux, and Michael I", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 374, + 505, + 387 + ], + "spans": [ + { + "bbox": [ + 115, + 374, + 505, + 387 + ], + "score": 1.0, + "content": "Jordan. On the convergence of stochastic extragradient for bilinear games with restarted iteration", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 116, + 386, + 324, + 397 + ], + "spans": [ + { + "bbox": [ + 116, + 386, + 324, + 397 + ], + "score": 1.0, + "content": "averaging. arXiv preprint arXiv:2107.00464, 2021.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20 + }, + { + "type": "text", + "bbox": [ + 107, + 404, + 506, + 449 + ], + "lines": [ + { + "bbox": [ + 106, + 405, + 505, + 417 + ], + "spans": [ + { + "bbox": [ + 106, + 405, + 505, + 417 + ], + "score": 1.0, + "content": "Tianyi Lin, Chi Jin, and Michael Jordan. On gradient descent ascent for nonconvex-concave minimax", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 416, + 506, + 428 + ], + "spans": [ + { + "bbox": [ + 115, + 416, + 506, + 428 + ], + "score": 1.0, + "content": "problems. In Hal DaumΓ© III and Aarti Singh (eds.), Proceedings of the 37th International", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 115, + 426, + 507, + 441 + ], + "spans": [ + { + "bbox": [ + 115, + 426, + 507, + 441 + ], + "score": 1.0, + "content": "Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp.", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 116, + 438, + 223, + 450 + ], + "spans": [ + { + "bbox": [ + 116, + 438, + 223, + 450 + ], + "score": 1.0, + "content": "6083–6093. PMLR, 2020.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 23.5 + }, + { + "type": "text", + "bbox": [ + 103, + 457, + 505, + 481 + ], + "lines": [ + { + "bbox": [ + 105, + 457, + 506, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 506, + 470 + ], + "score": 1.0, + "content": "Pierre-Louis Lions and Bertrand Mercier. Splitting algorithms for the sum of two nonlinear operators.", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 115, + 469, + 360, + 482 + ], + "spans": [ + { + "bbox": [ + 115, + 469, + 360, + 482 + ], + "score": 1.0, + "content": "SIAM Journal on Numerical Analysis, 16(6):964–979, 1979.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 26.5 + }, + { + "type": "text", + "bbox": [ + 106, + 488, + 505, + 523 + ], + "lines": [ + { + "bbox": [ + 106, + 488, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 106, + 488, + 506, + 502 + ], + "score": 1.0, + "content": "Mingrui Liu, Hassan Rafique, Qihang Lin, and Tianbao Yang. First-order convergence theory for", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 116, + 500, + 505, + 512 + ], + "spans": [ + { + "bbox": [ + 116, + 500, + 505, + 512 + ], + "score": 1.0, + "content": "weakly-convex-weakly-concave min-max problems. Journal of Machine Learning Research, 22", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 116, + 511, + 191, + 523 + ], + "spans": [ + { + "bbox": [ + 116, + 511, + 191, + 523 + ], + "score": 1.0, + "content": "(169):1–34, 2021.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 29 + }, + { + "type": "text", + "bbox": [ + 107, + 530, + 506, + 564 + ], + "lines": [ + { + "bbox": [ + 105, + 530, + 506, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 506, + 544 + ], + "score": 1.0, + "content": "Nicolas Loizou, Hugo Berard, Alexia Jolicoeur-Martineau, Pascal Vincent, Simon Lacoste-Julien, and", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 116, + 542, + 506, + 554 + ], + "spans": [ + { + "bbox": [ + 116, + 542, + 506, + 554 + ], + "score": 1.0, + "content": "Ioannis Mitliagkas. Stochastic hamiltonian gradient methods for smooth games. In International", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 116, + 552, + 377, + 566 + ], + "spans": [ + { + "bbox": [ + 116, + 552, + 377, + 566 + ], + "score": 1.0, + "content": "Conference on Machine Learning, pp. 6370–6381. PMLR, 2020.", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 107, + 572, + 506, + 606 + ], + "lines": [ + { + "bbox": [ + 106, + 573, + 506, + 585 + ], + "spans": [ + { + "bbox": [ + 106, + 573, + 506, + 585 + ], + "score": 1.0, + "content": "Nicolas Loizou, Hugo Berard, Gauthier Gidel, Ioannis Mitliagkas, and Simon Lacoste-Julien. Stochas-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 116, + 585, + 505, + 597 + ], + "spans": [ + { + "bbox": [ + 116, + 585, + 505, + 597 + ], + "score": 1.0, + "content": "tic gradient descent-ascent and consensus optimization for smooth games: Convergence analysis", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 116, + 595, + 399, + 608 + ], + "spans": [ + { + "bbox": [ + 116, + 595, + 399, + 608 + ], + "score": 1.0, + "content": "under expected co-coercivity. arXiv preprint arXiv:2107.00052, 2021.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 35 + }, + { + "type": "text", + "bbox": [ + 106, + 614, + 504, + 637 + ], + "lines": [ + { + "bbox": [ + 106, + 614, + 505, + 627 + ], + "spans": [ + { + "bbox": [ + 106, + 614, + 505, + 627 + ], + "score": 1.0, + "content": "Yura Malitsky and Matthew K Tam. A forward-backward splitting method for monotone inclusions", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 115, + 626, + 430, + 638 + ], + "spans": [ + { + "bbox": [ + 115, + 626, + 430, + 638 + ], + "score": 1.0, + "content": "without cocoercivity. SIAM Journal on Optimization, 30(2):1451–1472, 2020.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 37.5 + }, + { + "type": "text", + "bbox": [ + 107, + 645, + 506, + 691 + ], + "lines": [ + { + "bbox": [ + 105, + 646, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 506, + 659 + ], + "score": 1.0, + "content": "Panayotis Mertikopoulos, Bruno Lecouat, Houssam Zenati, Chuan-Sheng Foo, Vijay Chandrasekhar,", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 115, + 657, + 507, + 670 + ], + "spans": [ + { + "bbox": [ + 115, + 657, + 507, + 670 + ], + "score": 1.0, + "content": "and Georgios Piliouras. Optimistic mirror descent in saddle-point problems: Going the extra(-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 115, + 668, + 508, + 681 + ], + "spans": [ + { + "bbox": [ + 115, + 668, + 508, + 681 + ], + "score": 1.0, + "content": "gradient) mile. In International Conference on Learning Representations, 2019. URL https:", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 678, + 324, + 691 + ], + "spans": [ + { + "bbox": [ + 115, + 678, + 324, + 691 + ], + "score": 1.0, + "content": "//openreview.net/pdf?id=Bkg8jjC9KQ.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 40.5 + }, + { + "type": "text", + "bbox": [ + 107, + 699, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 698, + 507, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 507, + 712 + ], + "score": 1.0, + "content": "Lars Mescheder, Sebastian Nowozin, and Andreas Geiger. The numerics of GANs. In I. Guyon, U. V.", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 710, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 115, + 710, + 506, + 722 + ], + "score": 1.0, + "content": "Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 720, + 429, + 732 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 429, + 732 + ], + "score": 1.0, + "content": "Neural Information Processing Systems, volume 30. Curran Associates, 2017.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 44 + } + ], + "page_idx": 11, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "score": 1.0, + "content": "12", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 504, + 116 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 96 + ], + "score": 1.0, + "content": "Laurent Jacob, Guillaume Obozinski, and Jean-Philippe Vert. Group lasso with overlaps and graph", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 93, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 93, + 505, + 106 + ], + "score": 1.0, + "content": "lasso. In LΓ©on Bottou and Michael Littman (eds.), Proceedings of the 26th International Conference", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 104, + 398, + 118 + ], + "spans": [ + { + "bbox": [ + 115, + 104, + 398, + 118 + ], + "score": 1.0, + "content": "on Machine Learning, pp. 433–440, Montreal, June 2009. Omnipress.", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1, + "bbox_fs": [ + 105, + 81, + 505, + 118 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 124, + 504, + 147 + ], + "lines": [ + { + "bbox": [ + 105, + 123, + 505, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 123, + 505, + 137 + ], + "score": 1.0, + "content": "Patrick R Johnstone and Jonathan Eckstein. Convergence rates for projective splitting. SIAM Journal", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 116, + 136, + 285, + 147 + ], + "spans": [ + { + "bbox": [ + 116, + 136, + 285, + 147 + ], + "score": 1.0, + "content": "on Optimization, 29(3):1931–1957, 2019.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3.5, + "bbox_fs": [ + 105, + 123, + 505, + 147 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 155, + 504, + 178 + ], + "lines": [ + { + "bbox": [ + 105, + 155, + 505, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 155, + 505, + 168 + ], + "score": 1.0, + "content": "Patrick R Johnstone and Jonathan Eckstein. Projective splitting with forward steps only requires", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 166, + 343, + 179 + ], + "spans": [ + { + "bbox": [ + 116, + 166, + 343, + 179 + ], + "score": 1.0, + "content": "continuity. Optimization Letters, 14(1):229–247, 2020a.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 5.5, + "bbox_fs": [ + 105, + 155, + 505, + 179 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 186, + 504, + 209 + ], + "lines": [ + { + "bbox": [ + 105, + 186, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 506, + 200 + ], + "score": 1.0, + "content": "Patrick R Johnstone and Jonathan Eckstein. Projective splitting with forward steps. Mathematical", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 116, + 197, + 352, + 210 + ], + "spans": [ + { + "bbox": [ + 116, + 197, + 352, + 210 + ], + "score": 1.0, + "content": "Programming, 2020b. Published online, to appear in print.", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 7.5, + "bbox_fs": [ + 105, + 186, + 506, + 210 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 217, + 504, + 240 + ], + "lines": [ + { + "bbox": [ + 105, + 216, + 505, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 505, + 231 + ], + "score": 1.0, + "content": "Patrick R Johnstone and Jonathan Eckstein. Single-forward-step projective splitting: exploiting", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 116, + 228, + 451, + 240 + ], + "spans": [ + { + "bbox": [ + 116, + 228, + 451, + 240 + ], + "score": 1.0, + "content": "cocoercivity. Computational Optimization and Applications, 78(1):125–166, 2021.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 9.5, + "bbox_fs": [ + 105, + 216, + 505, + 240 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 248, + 505, + 271 + ], + "lines": [ + { + "bbox": [ + 106, + 247, + 505, + 262 + ], + "spans": [ + { + "bbox": [ + 106, + 247, + 505, + 262 + ], + "score": 1.0, + "content": "Anatoli Juditsky, Arkadi Nemirovski, and Claire Tauvel. Solving variational inequalities with", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 259, + 406, + 272 + ], + "spans": [ + { + "bbox": [ + 115, + 259, + 406, + 272 + ], + "score": 1.0, + "content": "stochastic mirror-prox algorithm. Stochastic Systems, 1(1):17–58, 2011.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 11.5, + "bbox_fs": [ + 106, + 247, + 505, + 272 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 279, + 504, + 302 + ], + "lines": [ + { + "bbox": [ + 105, + 277, + 505, + 292 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 505, + 292 + ], + "score": 1.0, + "content": "GM Korpelevich. Extragradient method for finding saddle points and other problems. Matekon, 13", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 290, + 187, + 303 + ], + "spans": [ + { + "bbox": [ + 115, + 290, + 187, + 303 + ], + "score": 1.0, + "content": "(4):35–49, 1977.", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13.5, + "bbox_fs": [ + 105, + 277, + 505, + 303 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 309, + 506, + 355 + ], + "lines": [ + { + "bbox": [ + 106, + 311, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 106, + 311, + 506, + 322 + ], + "score": 1.0, + "content": "Daniel Kuhn, Peyman Mohajerin Esfahani, Viet Anh Nguyen, and Soroosh Shafieezadeh-Abadeh.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 320, + 507, + 335 + ], + "spans": [ + { + "bbox": [ + 115, + 320, + 507, + 335 + ], + "score": 1.0, + "content": "Wasserstein distributionally robust optimization: Theory and applications in machine learning.", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 332, + 507, + 346 + ], + "spans": [ + { + "bbox": [ + 115, + 332, + 507, + 346 + ], + "score": 1.0, + "content": "In Serguei Netessine (ed.), Operations Research & Management Science in the Age of Analytics,", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 343, + 382, + 356 + ], + "spans": [ + { + "bbox": [ + 115, + 343, + 382, + 356 + ], + "score": 1.0, + "content": "Tutorials in Operations Research, pp. 130–166. INFORMS, 2019.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 16.5, + "bbox_fs": [ + 106, + 311, + 507, + 356 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 363, + 505, + 397 + ], + "lines": [ + { + "bbox": [ + 106, + 363, + 505, + 375 + ], + "spans": [ + { + "bbox": [ + 106, + 363, + 505, + 375 + ], + "score": 1.0, + "content": "Chris Junchi Li, Yaodong Yu, Nicolas Loizou, Gauthier Gidel, Yi Ma, Nicolas Le Roux, and Michael I", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 374, + 505, + 387 + ], + "spans": [ + { + "bbox": [ + 115, + 374, + 505, + 387 + ], + "score": 1.0, + "content": "Jordan. On the convergence of stochastic extragradient for bilinear games with restarted iteration", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 116, + 386, + 324, + 397 + ], + "spans": [ + { + "bbox": [ + 116, + 386, + 324, + 397 + ], + "score": 1.0, + "content": "averaging. arXiv preprint arXiv:2107.00464, 2021.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20, + "bbox_fs": [ + 106, + 363, + 505, + 397 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 404, + 506, + 449 + ], + "lines": [ + { + "bbox": [ + 106, + 405, + 505, + 417 + ], + "spans": [ + { + "bbox": [ + 106, + 405, + 505, + 417 + ], + "score": 1.0, + "content": "Tianyi Lin, Chi Jin, and Michael Jordan. On gradient descent ascent for nonconvex-concave minimax", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 416, + 506, + 428 + ], + "spans": [ + { + "bbox": [ + 115, + 416, + 506, + 428 + ], + "score": 1.0, + "content": "problems. In Hal DaumΓ© III and Aarti Singh (eds.), Proceedings of the 37th International", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 115, + 426, + 507, + 441 + ], + "spans": [ + { + "bbox": [ + 115, + 426, + 507, + 441 + ], + "score": 1.0, + "content": "Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp.", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 116, + 438, + 223, + 450 + ], + "spans": [ + { + "bbox": [ + 116, + 438, + 223, + 450 + ], + "score": 1.0, + "content": "6083–6093. PMLR, 2020.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 23.5, + "bbox_fs": [ + 106, + 405, + 507, + 450 + ] + }, + { + "type": "text", + "bbox": [ + 103, + 457, + 505, + 481 + ], + "lines": [ + { + "bbox": [ + 105, + 457, + 506, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 506, + 470 + ], + "score": 1.0, + "content": "Pierre-Louis Lions and Bertrand Mercier. Splitting algorithms for the sum of two nonlinear operators.", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 115, + 469, + 360, + 482 + ], + "spans": [ + { + "bbox": [ + 115, + 469, + 360, + 482 + ], + "score": 1.0, + "content": "SIAM Journal on Numerical Analysis, 16(6):964–979, 1979.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 26.5, + "bbox_fs": [ + 105, + 457, + 506, + 482 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 488, + 505, + 523 + ], + "lines": [ + { + "bbox": [ + 106, + 488, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 106, + 488, + 506, + 502 + ], + "score": 1.0, + "content": "Mingrui Liu, Hassan Rafique, Qihang Lin, and Tianbao Yang. First-order convergence theory for", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 116, + 500, + 505, + 512 + ], + "spans": [ + { + "bbox": [ + 116, + 500, + 505, + 512 + ], + "score": 1.0, + "content": "weakly-convex-weakly-concave min-max problems. Journal of Machine Learning Research, 22", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 116, + 511, + 191, + 523 + ], + "spans": [ + { + "bbox": [ + 116, + 511, + 191, + 523 + ], + "score": 1.0, + "content": "(169):1–34, 2021.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 29, + "bbox_fs": [ + 106, + 488, + 506, + 523 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 530, + 506, + 564 + ], + "lines": [ + { + "bbox": [ + 105, + 530, + 506, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 506, + 544 + ], + "score": 1.0, + "content": "Nicolas Loizou, Hugo Berard, Alexia Jolicoeur-Martineau, Pascal Vincent, Simon Lacoste-Julien, and", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 116, + 542, + 506, + 554 + ], + "spans": [ + { + "bbox": [ + 116, + 542, + 506, + 554 + ], + "score": 1.0, + "content": "Ioannis Mitliagkas. Stochastic hamiltonian gradient methods for smooth games. In International", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 116, + 552, + 377, + 566 + ], + "spans": [ + { + "bbox": [ + 116, + 552, + 377, + 566 + ], + "score": 1.0, + "content": "Conference on Machine Learning, pp. 6370–6381. PMLR, 2020.", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32, + "bbox_fs": [ + 105, + 530, + 506, + 566 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 572, + 506, + 606 + ], + "lines": [ + { + "bbox": [ + 106, + 573, + 506, + 585 + ], + "spans": [ + { + "bbox": [ + 106, + 573, + 506, + 585 + ], + "score": 1.0, + "content": "Nicolas Loizou, Hugo Berard, Gauthier Gidel, Ioannis Mitliagkas, and Simon Lacoste-Julien. Stochas-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 116, + 585, + 505, + 597 + ], + "spans": [ + { + "bbox": [ + 116, + 585, + 505, + 597 + ], + "score": 1.0, + "content": "tic gradient descent-ascent and consensus optimization for smooth games: Convergence analysis", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 116, + 595, + 399, + 608 + ], + "spans": [ + { + "bbox": [ + 116, + 595, + 399, + 608 + ], + "score": 1.0, + "content": "under expected co-coercivity. arXiv preprint arXiv:2107.00052, 2021.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 35, + "bbox_fs": [ + 106, + 573, + 506, + 608 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 614, + 504, + 637 + ], + "lines": [ + { + "bbox": [ + 106, + 614, + 505, + 627 + ], + "spans": [ + { + "bbox": [ + 106, + 614, + 505, + 627 + ], + "score": 1.0, + "content": "Yura Malitsky and Matthew K Tam. A forward-backward splitting method for monotone inclusions", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 115, + 626, + 430, + 638 + ], + "spans": [ + { + "bbox": [ + 115, + 626, + 430, + 638 + ], + "score": 1.0, + "content": "without cocoercivity. SIAM Journal on Optimization, 30(2):1451–1472, 2020.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 37.5, + "bbox_fs": [ + 106, + 614, + 505, + 638 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 645, + 506, + 691 + ], + "lines": [ + { + "bbox": [ + 105, + 646, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 506, + 659 + ], + "score": 1.0, + "content": "Panayotis Mertikopoulos, Bruno Lecouat, Houssam Zenati, Chuan-Sheng Foo, Vijay Chandrasekhar,", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 115, + 657, + 507, + 670 + ], + "spans": [ + { + "bbox": [ + 115, + 657, + 507, + 670 + ], + "score": 1.0, + "content": "and Georgios Piliouras. Optimistic mirror descent in saddle-point problems: Going the extra(-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 115, + 668, + 508, + 681 + ], + "spans": [ + { + "bbox": [ + 115, + 668, + 508, + 681 + ], + "score": 1.0, + "content": "gradient) mile. In International Conference on Learning Representations, 2019. URL https:", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 678, + 324, + 691 + ], + "spans": [ + { + "bbox": [ + 115, + 678, + 324, + 691 + ], + "score": 1.0, + "content": "//openreview.net/pdf?id=Bkg8jjC9KQ.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 40.5, + "bbox_fs": [ + 105, + 646, + 508, + 691 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 699, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 698, + 507, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 507, + 712 + ], + "score": 1.0, + "content": "Lars Mescheder, Sebastian Nowozin, and Andreas Geiger. The numerics of GANs. In I. Guyon, U. V.", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 710, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 115, + 710, + 506, + 722 + ], + "score": 1.0, + "content": "Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 720, + 429, + 732 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 429, + 732 + ], + "score": 1.0, + "content": "Neural Information Processing Systems, volume 30. Curran Associates, 2017.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 44, + "bbox_fs": [ + 105, + 698, + 507, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 506, + 127 + ], + "lines": [ + { + "bbox": [ + 106, + 83, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 83, + 505, + 95 + ], + "score": 1.0, + "content": "Lars Mescheder, Andreas Geiger, and Sebastian Nowozin. Which training methods for GANs do", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 94, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 116, + 94, + 506, + 106 + ], + "score": 1.0, + "content": "actually converge? In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 104, + 507, + 118 + ], + "spans": [ + { + "bbox": [ + 115, + 104, + 507, + 118 + ], + "score": 1.0, + "content": "Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 114, + 223, + 127 + ], + "spans": [ + { + "bbox": [ + 115, + 114, + 223, + 127 + ], + "score": 1.0, + "content": "3481–3490. PMLR, 2018.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 1.5 + }, + { + "type": "text", + "bbox": [ + 107, + 135, + 506, + 169 + ], + "lines": [ + { + "bbox": [ + 106, + 135, + 505, + 147 + ], + "spans": [ + { + "bbox": [ + 106, + 135, + 505, + 147 + ], + "score": 1.0, + "content": "Konstantin Mishchenko, Dmitry Kovalev, Egor Shulgin, Peter RichtΓ‘rik, and Yura Malitsky. Revisit-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 146, + 507, + 160 + ], + "spans": [ + { + "bbox": [ + 115, + 146, + 507, + 160 + ], + "score": 1.0, + "content": "ing stochastic extragradient. In International Conference on Artificial Intelligence and Statistics,", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 157, + 239, + 169 + ], + "spans": [ + { + "bbox": [ + 115, + 157, + 239, + 169 + ], + "score": 1.0, + "content": "pp. 4573–4582. PMLR, 2020.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 106, + 177, + 506, + 210 + ], + "lines": [ + { + "bbox": [ + 106, + 177, + 505, + 189 + ], + "spans": [ + { + "bbox": [ + 106, + 177, + 423, + 189 + ], + "score": 1.0, + "content": "Aryan Mokhtari, Asuman E Ozdaglar, and Sarath Pattathil. Convergence rate of", + "type": "text" + }, + { + "bbox": [ + 423, + 177, + 448, + 189 + ], + "score": 0.69, + "content": "\\mathbf { o } ( 1 / \\mathrm { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 177, + 505, + 189 + ], + "score": 1.0, + "content": "for optimistic", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 188, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 115, + 188, + 506, + 200 + ], + "score": 1.0, + "content": "gradient and extragradient methods in smooth convex-concave saddle point problems. SIAM", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 199, + 317, + 211 + ], + "spans": [ + { + "bbox": [ + 115, + 199, + 317, + 211 + ], + "score": 1.0, + "content": "Journal on Optimization, 30(4):3230–3251, 2020.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 8 + }, + { + "type": "text", + "bbox": [ + 106, + 218, + 505, + 252 + ], + "lines": [ + { + "bbox": [ + 105, + 218, + 505, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 218, + 505, + 231 + ], + "score": 1.0, + "content": "Renato DC Monteiro and Benar Fux Svaiter. On the complexity of the hybrid proximal extragradient", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 229, + 507, + 243 + ], + "spans": [ + { + "bbox": [ + 115, + 229, + 507, + 243 + ], + "score": 1.0, + "content": "method for the iterates and the ergodic mean. SIAM Journal on Optimization, 20(6):2755–2787,", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 239, + 143, + 254 + ], + "spans": [ + { + "bbox": [ + 115, + 239, + 143, + 254 + ], + "score": 1.0, + "content": "2010.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 11 + }, + { + "type": "text", + "bbox": [ + 110, + 260, + 505, + 295 + ], + "lines": [ + { + "bbox": [ + 108, + 261, + 505, + 273 + ], + "spans": [ + { + "bbox": [ + 108, + 261, + 505, + 273 + ], + "score": 1.0, + "content": "Vaishnavh Nagarajan and J. Zico Kolter. Gradient descent GAN optimization is locally stable. In", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 271, + 506, + 284 + ], + "spans": [ + { + "bbox": [ + 114, + 271, + 506, + 284 + ], + "score": 1.0, + "content": "I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.),", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 282, + 480, + 295 + ], + "spans": [ + { + "bbox": [ + 114, + 282, + 480, + 295 + ], + "score": 1.0, + "content": "Advances in Neural Information Processing Systems, volume 30. Curran Associates, 2017.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 14 + }, + { + "type": "text", + "bbox": [ + 107, + 302, + 506, + 336 + ], + "lines": [ + { + "bbox": [ + 106, + 302, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 106, + 302, + 506, + 316 + ], + "score": 1.0, + "content": "Hongseok Namkoong and John C Duchi. Stochastic gradient methods for distributionally robust", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 313, + 506, + 327 + ], + "spans": [ + { + "bbox": [ + 115, + 313, + 189, + 327 + ], + "score": 1.0, + "content": "optimization with", + "type": "text" + }, + { + "bbox": [ + 190, + 314, + 196, + 325 + ], + "score": 0.85, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 197, + 313, + 506, + 327 + ], + "score": 1.0, + "content": "-divergences. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 116, + 325, + 506, + 337 + ], + "spans": [ + { + "bbox": [ + 116, + 325, + 506, + 337 + ], + "score": 1.0, + "content": "(eds.), Advances in Neural Information Processing Systems, volume 29. Curran Associates, 2016.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17 + }, + { + "type": "text", + "bbox": [ + 107, + 344, + 506, + 378 + ], + "lines": [ + { + "bbox": [ + 106, + 344, + 505, + 357 + ], + "spans": [ + { + "bbox": [ + 106, + 344, + 345, + 357 + ], + "score": 1.0, + "content": "Arkadi Nemirovski. Prox-method with rate of convergence", + "type": "text" + }, + { + "bbox": [ + 345, + 344, + 375, + 356 + ], + "score": 0.9, + "content": "\\mathrm { O } ( 1 / t )", + "type": "inline_equation" + }, + { + "bbox": [ + 376, + 344, + 505, + 357 + ], + "score": 1.0, + "content": "for variational inequalities with", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 356, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 115, + 356, + 506, + 367 + ], + "score": 1.0, + "content": "Lipschitz continuous monotone operators and smooth convex-concave saddle point problems.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 366, + 333, + 378 + ], + "spans": [ + { + "bbox": [ + 115, + 366, + 333, + 378 + ], + "score": 1.0, + "content": "SIAM Journal on Optimization, 15(1):229–251, 2004.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20 + }, + { + "type": "text", + "bbox": [ + 107, + 386, + 504, + 409 + ], + "lines": [ + { + "bbox": [ + 106, + 385, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 106, + 385, + 506, + 399 + ], + "score": 1.0, + "content": "Yurii Nesterov. Dual extrapolation and its applications to solving variational inequalities and related", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 397, + 372, + 409 + ], + "spans": [ + { + "bbox": [ + 115, + 397, + 372, + 409 + ], + "score": 1.0, + "content": "problems. Mathematical Programming, 109(2):319–344, 2007.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 22.5 + }, + { + "type": "text", + "bbox": [ + 106, + 416, + 504, + 439 + ], + "lines": [ + { + "bbox": [ + 105, + 415, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 506, + 430 + ], + "score": 1.0, + "content": "Neal Parikh and Stephen Boyd. Proximal algorithms. Foundations and Trends in Optimization, 1(3):", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 116, + 428, + 181, + 439 + ], + "spans": [ + { + "bbox": [ + 116, + 428, + 181, + 439 + ], + "score": 1.0, + "content": "123–231, 2013.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 24.5 + }, + { + "type": "text", + "bbox": [ + 107, + 447, + 506, + 504 + ], + "lines": [ + { + "bbox": [ + 106, + 448, + 505, + 460 + ], + "spans": [ + { + "bbox": [ + 106, + 448, + 505, + 460 + ], + "score": 1.0, + "content": "Reese Pathak and Martin J Wainwright. Fedsplit: an algorithmic framework for fast federated", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 115, + 458, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 115, + 458, + 506, + 471 + ], + "score": 1.0, + "content": "optimization. In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (eds.), Ad-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 469, + 507, + 483 + ], + "spans": [ + { + "bbox": [ + 115, + 469, + 507, + 483 + ], + "score": 1.0, + "content": "vances in Neural Information Processing Systems, volume 33, pp. 7057–7066. Curran Asso-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 480, + 505, + 493 + ], + "spans": [ + { + "bbox": [ + 115, + 480, + 505, + 493 + ], + "score": 1.0, + "content": "ciates, Inc., 2020. URL https://proceedings.neurips.cc/paper/2020/file/", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 115, + 491, + 373, + 504 + ], + "spans": [ + { + "bbox": [ + 115, + 491, + 373, + 504 + ], + "score": 1.0, + "content": "4ebd440d99504722d80de606ea8507da-Paper.pdf.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 28 + }, + { + "type": "text", + "bbox": [ + 106, + 511, + 505, + 545 + ], + "lines": [ + { + "bbox": [ + 105, + 511, + 505, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 505, + 525 + ], + "score": 1.0, + "content": "Fabian Pedregosa and Gauthier Gidel. Adaptive three-operator splitting. In Jennifer Dy and Andreas", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 116, + 523, + 505, + 535 + ], + "spans": [ + { + "bbox": [ + 116, + 523, + 505, + 535 + ], + "score": 1.0, + "content": "Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 115, + 533, + 470, + 546 + ], + "spans": [ + { + "bbox": [ + 115, + 533, + 470, + 546 + ], + "score": 1.0, + "content": "of Proceedings of Machine Learning Research, pp. 4085–4094. PMLR, 10–15 Jul 2018.", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 106, + 552, + 505, + 598 + ], + "lines": [ + { + "bbox": [ + 106, + 553, + 505, + 565 + ], + "spans": [ + { + "bbox": [ + 106, + 553, + 505, + 565 + ], + "score": 1.0, + "content": "Fabian Pedregosa, Kilian Fatras, and Mattia Casotto. Proximal splitting meets variance reduction. In", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 115, + 564, + 506, + 577 + ], + "spans": [ + { + "bbox": [ + 115, + 564, + 506, + 577 + ], + "score": 1.0, + "content": "Kamalika Chaudhuri and Masashi Sugiyama (eds.), Proceedings of the Twenty-Second Interna-", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 116, + 576, + 505, + 588 + ], + "spans": [ + { + "bbox": [ + 116, + 576, + 505, + 588 + ], + "score": 1.0, + "content": "tional Conference on Artificial Intelligence and Statistics, volume 89 of Proceedings of Machine", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 586, + 339, + 598 + ], + "spans": [ + { + "bbox": [ + 115, + 586, + 339, + 598 + ], + "score": 1.0, + "content": "Learning Research, pp. 1–10. PMLR, 16–18 Apr 2019.", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 35.5 + }, + { + "type": "text", + "bbox": [ + 107, + 605, + 504, + 640 + ], + "lines": [ + { + "bbox": [ + 105, + 606, + 505, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 505, + 618 + ], + "score": 1.0, + "content": "Emile Richard, Pierre-Andre Savalle, and Nicolas Vayatis. Estimation of simultaneously sparse and", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 117, + 618, + 505, + 629 + ], + "spans": [ + { + "bbox": [ + 117, + 618, + 505, + 629 + ], + "score": 1.0, + "content": "low rank matrices. In John Langford and Joelle Pineau (eds.), Proceedings of the 29th International", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 117, + 628, + 394, + 640 + ], + "spans": [ + { + "bbox": [ + 117, + 628, + 394, + 640 + ], + "score": 1.0, + "content": "Conference on Machine Learning, pp. 1351–1358. Omnipress, 2012.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 39 + }, + { + "type": "text", + "bbox": [ + 104, + 648, + 505, + 671 + ], + "lines": [ + { + "bbox": [ + 106, + 648, + 505, + 660 + ], + "spans": [ + { + "bbox": [ + 106, + 648, + 505, + 660 + ], + "score": 1.0, + "content": "Herbert Robbins and Sutton Monro. A stochastic approximation method. The annals of mathematical", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 659, + 237, + 671 + ], + "spans": [ + { + "bbox": [ + 115, + 659, + 237, + 671 + ], + "score": 1.0, + "content": "statistics, pp. 400–407, 1951.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 41.5 + }, + { + "type": "text", + "bbox": [ + 105, + 678, + 504, + 702 + ], + "lines": [ + { + "bbox": [ + 106, + 678, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 106, + 678, + 506, + 691 + ], + "score": 1.0, + "content": "R Tyrrell Rockafellar. Monotone operators associated with saddle-functions and minimax problems.", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 690, + 346, + 703 + ], + "spans": [ + { + "bbox": [ + 115, + 690, + 346, + 703 + ], + "score": 1.0, + "content": "Nonlinear functional analysis, 18(part 1):397–407, 1970.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 43.5 + }, + { + "type": "text", + "bbox": [ + 105, + 709, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 708, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 506, + 723 + ], + "score": 1.0, + "content": "Ernest K Ryu and Stephen Boyd. Primer on monotone operator methods. Appl. Comput. Math, 15(1):", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 115, + 720, + 166, + 732 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 166, + 732 + ], + "score": 1.0, + "content": "3–43, 2016.", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 45.5 + } + ], + "page_idx": 12, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 751, + 310, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 14, + "width": 13 + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 107, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 506, + 127 + ], + "lines": [ + { + "bbox": [ + 106, + 83, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 83, + 505, + 95 + ], + "score": 1.0, + "content": "Lars Mescheder, Andreas Geiger, and Sebastian Nowozin. Which training methods for GANs do", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 94, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 116, + 94, + 506, + 106 + ], + "score": 1.0, + "content": "actually converge? In Jennifer Dy and Andreas Krause (eds.), Proceedings of the 35th International", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 104, + 507, + 118 + ], + "spans": [ + { + "bbox": [ + 115, + 104, + 507, + 118 + ], + "score": 1.0, + "content": "Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pp.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 114, + 223, + 127 + ], + "spans": [ + { + "bbox": [ + 115, + 114, + 223, + 127 + ], + "score": 1.0, + "content": "3481–3490. PMLR, 2018.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 1.5, + "bbox_fs": [ + 106, + 83, + 507, + 127 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 135, + 506, + 169 + ], + "lines": [ + { + "bbox": [ + 106, + 135, + 505, + 147 + ], + "spans": [ + { + "bbox": [ + 106, + 135, + 505, + 147 + ], + "score": 1.0, + "content": "Konstantin Mishchenko, Dmitry Kovalev, Egor Shulgin, Peter RichtΓ‘rik, and Yura Malitsky. Revisit-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 146, + 507, + 160 + ], + "spans": [ + { + "bbox": [ + 115, + 146, + 507, + 160 + ], + "score": 1.0, + "content": "ing stochastic extragradient. In International Conference on Artificial Intelligence and Statistics,", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 157, + 239, + 169 + ], + "spans": [ + { + "bbox": [ + 115, + 157, + 239, + 169 + ], + "score": 1.0, + "content": "pp. 4573–4582. PMLR, 2020.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 5, + "bbox_fs": [ + 106, + 135, + 507, + 169 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 177, + 506, + 210 + ], + "lines": [ + { + "bbox": [ + 106, + 177, + 505, + 189 + ], + "spans": [ + { + "bbox": [ + 106, + 177, + 423, + 189 + ], + "score": 1.0, + "content": "Aryan Mokhtari, Asuman E Ozdaglar, and Sarath Pattathil. Convergence rate of", + "type": "text" + }, + { + "bbox": [ + 423, + 177, + 448, + 189 + ], + "score": 0.69, + "content": "\\mathbf { o } ( 1 / \\mathrm { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 177, + 505, + 189 + ], + "score": 1.0, + "content": "for optimistic", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 188, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 115, + 188, + 506, + 200 + ], + "score": 1.0, + "content": "gradient and extragradient methods in smooth convex-concave saddle point problems. SIAM", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 199, + 317, + 211 + ], + "spans": [ + { + "bbox": [ + 115, + 199, + 317, + 211 + ], + "score": 1.0, + "content": "Journal on Optimization, 30(4):3230–3251, 2020.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 8, + "bbox_fs": [ + 106, + 177, + 506, + 211 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 218, + 505, + 252 + ], + "lines": [ + { + "bbox": [ + 105, + 218, + 505, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 218, + 505, + 231 + ], + "score": 1.0, + "content": "Renato DC Monteiro and Benar Fux Svaiter. On the complexity of the hybrid proximal extragradient", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 229, + 507, + 243 + ], + "spans": [ + { + "bbox": [ + 115, + 229, + 507, + 243 + ], + "score": 1.0, + "content": "method for the iterates and the ergodic mean. SIAM Journal on Optimization, 20(6):2755–2787,", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 239, + 143, + 254 + ], + "spans": [ + { + "bbox": [ + 115, + 239, + 143, + 254 + ], + "score": 1.0, + "content": "2010.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 11, + "bbox_fs": [ + 105, + 218, + 507, + 254 + ] + }, + { + "type": "text", + "bbox": [ + 110, + 260, + 505, + 295 + ], + "lines": [ + { + "bbox": [ + 108, + 261, + 505, + 273 + ], + "spans": [ + { + "bbox": [ + 108, + 261, + 505, + 273 + ], + "score": 1.0, + "content": "Vaishnavh Nagarajan and J. Zico Kolter. Gradient descent GAN optimization is locally stable. In", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 271, + 506, + 284 + ], + "spans": [ + { + "bbox": [ + 114, + 271, + 506, + 284 + ], + "score": 1.0, + "content": "I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.),", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 282, + 480, + 295 + ], + "spans": [ + { + "bbox": [ + 114, + 282, + 480, + 295 + ], + "score": 1.0, + "content": "Advances in Neural Information Processing Systems, volume 30. Curran Associates, 2017.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 14, + "bbox_fs": [ + 108, + 261, + 506, + 295 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 302, + 506, + 336 + ], + "lines": [ + { + "bbox": [ + 106, + 302, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 106, + 302, + 506, + 316 + ], + "score": 1.0, + "content": "Hongseok Namkoong and John C Duchi. Stochastic gradient methods for distributionally robust", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 313, + 506, + 327 + ], + "spans": [ + { + "bbox": [ + 115, + 313, + 189, + 327 + ], + "score": 1.0, + "content": "optimization with", + "type": "text" + }, + { + "bbox": [ + 190, + 314, + 196, + 325 + ], + "score": 0.85, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 197, + 313, + 506, + 327 + ], + "score": 1.0, + "content": "-divergences. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 116, + 325, + 506, + 337 + ], + "spans": [ + { + "bbox": [ + 116, + 325, + 506, + 337 + ], + "score": 1.0, + "content": "(eds.), Advances in Neural Information Processing Systems, volume 29. Curran Associates, 2016.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17, + "bbox_fs": [ + 106, + 302, + 506, + 337 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 344, + 506, + 378 + ], + "lines": [ + { + "bbox": [ + 106, + 344, + 505, + 357 + ], + "spans": [ + { + "bbox": [ + 106, + 344, + 345, + 357 + ], + "score": 1.0, + "content": "Arkadi Nemirovski. Prox-method with rate of convergence", + "type": "text" + }, + { + "bbox": [ + 345, + 344, + 375, + 356 + ], + "score": 0.9, + "content": "\\mathrm { O } ( 1 / t )", + "type": "inline_equation" + }, + { + "bbox": [ + 376, + 344, + 505, + 357 + ], + "score": 1.0, + "content": "for variational inequalities with", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 356, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 115, + 356, + 506, + 367 + ], + "score": 1.0, + "content": "Lipschitz continuous monotone operators and smooth convex-concave saddle point problems.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 366, + 333, + 378 + ], + "spans": [ + { + "bbox": [ + 115, + 366, + 333, + 378 + ], + "score": 1.0, + "content": "SIAM Journal on Optimization, 15(1):229–251, 2004.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20, + "bbox_fs": [ + 106, + 344, + 506, + 378 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 386, + 504, + 409 + ], + "lines": [ + { + "bbox": [ + 106, + 385, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 106, + 385, + 506, + 399 + ], + "score": 1.0, + "content": "Yurii Nesterov. Dual extrapolation and its applications to solving variational inequalities and related", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 397, + 372, + 409 + ], + "spans": [ + { + "bbox": [ + 115, + 397, + 372, + 409 + ], + "score": 1.0, + "content": "problems. Mathematical Programming, 109(2):319–344, 2007.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 22.5, + "bbox_fs": [ + 106, + 385, + 506, + 409 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 416, + 504, + 439 + ], + "lines": [ + { + "bbox": [ + 105, + 415, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 506, + 430 + ], + "score": 1.0, + "content": "Neal Parikh and Stephen Boyd. Proximal algorithms. Foundations and Trends in Optimization, 1(3):", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 116, + 428, + 181, + 439 + ], + "spans": [ + { + "bbox": [ + 116, + 428, + 181, + 439 + ], + "score": 1.0, + "content": "123–231, 2013.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 24.5, + "bbox_fs": [ + 105, + 415, + 506, + 439 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 447, + 506, + 504 + ], + "lines": [ + { + "bbox": [ + 106, + 448, + 505, + 460 + ], + "spans": [ + { + "bbox": [ + 106, + 448, + 505, + 460 + ], + "score": 1.0, + "content": "Reese Pathak and Martin J Wainwright. Fedsplit: an algorithmic framework for fast federated", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 115, + 458, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 115, + 458, + 506, + 471 + ], + "score": 1.0, + "content": "optimization. In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (eds.), Ad-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 469, + 507, + 483 + ], + "spans": [ + { + "bbox": [ + 115, + 469, + 507, + 483 + ], + "score": 1.0, + "content": "vances in Neural Information Processing Systems, volume 33, pp. 7057–7066. Curran Asso-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 480, + 505, + 493 + ], + "spans": [ + { + "bbox": [ + 115, + 480, + 505, + 493 + ], + "score": 1.0, + "content": "ciates, Inc., 2020. URL https://proceedings.neurips.cc/paper/2020/file/", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 115, + 491, + 373, + 504 + ], + "spans": [ + { + "bbox": [ + 115, + 491, + 373, + 504 + ], + "score": 1.0, + "content": "4ebd440d99504722d80de606ea8507da-Paper.pdf.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 28, + "bbox_fs": [ + 106, + 448, + 507, + 504 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 511, + 505, + 545 + ], + "lines": [ + { + "bbox": [ + 105, + 511, + 505, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 505, + 525 + ], + "score": 1.0, + "content": "Fabian Pedregosa and Gauthier Gidel. Adaptive three-operator splitting. In Jennifer Dy and Andreas", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 116, + 523, + 505, + 535 + ], + "spans": [ + { + "bbox": [ + 116, + 523, + 505, + 535 + ], + "score": 1.0, + "content": "Krause (eds.), Proceedings of the 35th International Conference on Machine Learning, volume 80", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 115, + 533, + 470, + 546 + ], + "spans": [ + { + "bbox": [ + 115, + 533, + 470, + 546 + ], + "score": 1.0, + "content": "of Proceedings of Machine Learning Research, pp. 4085–4094. PMLR, 10–15 Jul 2018.", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32, + "bbox_fs": [ + 105, + 511, + 505, + 546 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 552, + 505, + 598 + ], + "lines": [ + { + "bbox": [ + 106, + 553, + 505, + 565 + ], + "spans": [ + { + "bbox": [ + 106, + 553, + 505, + 565 + ], + "score": 1.0, + "content": "Fabian Pedregosa, Kilian Fatras, and Mattia Casotto. Proximal splitting meets variance reduction. In", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 115, + 564, + 506, + 577 + ], + "spans": [ + { + "bbox": [ + 115, + 564, + 506, + 577 + ], + "score": 1.0, + "content": "Kamalika Chaudhuri and Masashi Sugiyama (eds.), Proceedings of the Twenty-Second Interna-", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 116, + 576, + 505, + 588 + ], + "spans": [ + { + "bbox": [ + 116, + 576, + 505, + 588 + ], + "score": 1.0, + "content": "tional Conference on Artificial Intelligence and Statistics, volume 89 of Proceedings of Machine", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 586, + 339, + 598 + ], + "spans": [ + { + "bbox": [ + 115, + 586, + 339, + 598 + ], + "score": 1.0, + "content": "Learning Research, pp. 1–10. PMLR, 16–18 Apr 2019.", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 35.5, + "bbox_fs": [ + 106, + 553, + 506, + 598 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 605, + 504, + 640 + ], + "lines": [ + { + "bbox": [ + 105, + 606, + 505, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 505, + 618 + ], + "score": 1.0, + "content": "Emile Richard, Pierre-Andre Savalle, and Nicolas Vayatis. Estimation of simultaneously sparse and", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 117, + 618, + 505, + 629 + ], + "spans": [ + { + "bbox": [ + 117, + 618, + 505, + 629 + ], + "score": 1.0, + "content": "low rank matrices. In John Langford and Joelle Pineau (eds.), Proceedings of the 29th International", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 117, + 628, + 394, + 640 + ], + "spans": [ + { + "bbox": [ + 117, + 628, + 394, + 640 + ], + "score": 1.0, + "content": "Conference on Machine Learning, pp. 1351–1358. Omnipress, 2012.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 39, + "bbox_fs": [ + 105, + 606, + 505, + 640 + ] + }, + { + "type": "text", + "bbox": [ + 104, + 648, + 505, + 671 + ], + "lines": [ + { + "bbox": [ + 106, + 648, + 505, + 660 + ], + "spans": [ + { + "bbox": [ + 106, + 648, + 505, + 660 + ], + "score": 1.0, + "content": "Herbert Robbins and Sutton Monro. A stochastic approximation method. The annals of mathematical", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 659, + 237, + 671 + ], + "spans": [ + { + "bbox": [ + 115, + 659, + 237, + 671 + ], + "score": 1.0, + "content": "statistics, pp. 400–407, 1951.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 41.5, + "bbox_fs": [ + 106, + 648, + 505, + 671 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 678, + 504, + 702 + ], + "lines": [ + { + "bbox": [ + 106, + 678, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 106, + 678, + 506, + 691 + ], + "score": 1.0, + "content": "R Tyrrell Rockafellar. Monotone operators associated with saddle-functions and minimax problems.", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 690, + 346, + 703 + ], + "spans": [ + { + "bbox": [ + 115, + 690, + 346, + 703 + ], + "score": 1.0, + "content": "Nonlinear functional analysis, 18(part 1):397–407, 1970.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 43.5, + "bbox_fs": [ + 106, + 678, + 506, + 703 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 709, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 708, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 506, + 723 + ], + "score": 1.0, + "content": "Ernest K Ryu and Stephen Boyd. Primer on monotone operator methods. Appl. Comput. Math, 15(1):", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 115, + 720, + 166, + 732 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 166, + 732 + ], + "score": 1.0, + "content": "3–43, 2016.", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 45.5, + "bbox_fs": [ + 105, + 708, + 506, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 82, + 505, + 105 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 505, + 95 + ], + "score": 1.0, + "content": "Ernest K. Ryu, Kun Yuan, and Wotao Yin. Ode analysis of stochastic gradient methods with optimism", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 93, + 294, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 93, + 294, + 106 + ], + "score": 1.0, + "content": "and anchoring for minimax problems, 2020.", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "text", + "bbox": [ + 107, + 112, + 504, + 146 + ], + "lines": [ + { + "bbox": [ + 106, + 112, + 506, + 126 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 506, + 126 + ], + "score": 1.0, + "content": "Gesualdo Scutari, Francisco Facchinei, Jong-Shi Pang, and Daniel P Palomar. Real and complex", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 124, + 507, + 137 + ], + "spans": [ + { + "bbox": [ + 115, + 124, + 507, + 137 + ], + "score": 1.0, + "content": "monotone communication games. IEEE Transactions on Information Theory, 60(7):4197–4231,", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 133, + 143, + 147 + ], + "spans": [ + { + "bbox": [ + 115, + 133, + 143, + 147 + ], + "score": 1.0, + "content": "2014.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3 + }, + { + "type": "text", + "bbox": [ + 107, + 154, + 506, + 199 + ], + "lines": [ + { + "bbox": [ + 105, + 154, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 154, + 506, + 168 + ], + "score": 1.0, + "content": "Soroosh Shafieezadeh-Abadeh, Peyman Mohajerin Esfahani, and Daniel Kuhn. Distributionally", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 165, + 507, + 178 + ], + "spans": [ + { + "bbox": [ + 115, + 165, + 507, + 178 + ], + "score": 1.0, + "content": "robust logistic regression. In Corinna Cortes, Neil D. Lawrence, Daniel D. Lee, Masashi Sugiyama,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 176, + 507, + 190 + ], + "spans": [ + { + "bbox": [ + 115, + 176, + 507, + 190 + ], + "score": 1.0, + "content": "and Roman Garnett (eds.), Advances in Neural Information Processing Systems, volume 28, pp.", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 116, + 188, + 267, + 199 + ], + "spans": [ + { + "bbox": [ + 116, + 188, + 267, + 199 + ], + "score": 1.0, + "content": "1576–1584. Curran Associates, 2015.", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 6.5 + }, + { + "type": "text", + "bbox": [ + 106, + 206, + 506, + 241 + ], + "lines": [ + { + "bbox": [ + 106, + 206, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 106, + 206, + 505, + 220 + ], + "score": 1.0, + "content": "Aman Sinha, Hongseok Namkoong, and John Duchi. Certifying some distributional robustness with", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 116, + 219, + 507, + 230 + ], + "spans": [ + { + "bbox": [ + 116, + 219, + 507, + 230 + ], + "score": 1.0, + "content": "principled adversarial training. In International Conference on Learning Representations, 2018.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 116, + 228, + 389, + 243 + ], + "spans": [ + { + "bbox": [ + 116, + 228, + 325, + 243 + ], + "score": 1.0, + "content": "URL https://openreview.net/forum?id", + "type": "text" + }, + { + "bbox": [ + 325, + 231, + 331, + 239 + ], + "score": 0.49, + "content": "=", + "type": "inline_equation" + }, + { + "bbox": [ + 332, + 228, + 389, + 243 + ], + "score": 1.0, + "content": "Hk6kPgZA-.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 10 + }, + { + "type": "text", + "bbox": [ + 107, + 248, + 505, + 271 + ], + "lines": [ + { + "bbox": [ + 105, + 248, + 505, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 505, + 262 + ], + "score": 1.0, + "content": "Paul Tseng. A modified forward-backward splitting method for maximal monotone mappings. SIAM", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 260, + 358, + 271 + ], + "spans": [ + { + "bbox": [ + 115, + 260, + 358, + 271 + ], + "score": 1.0, + "content": "Journal on Control and Optimization, 38(2):431–446, 2000.", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 12.5 + }, + { + "type": "text", + "bbox": [ + 106, + 279, + 505, + 302 + ], + "lines": [ + { + "bbox": [ + 106, + 279, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 106, + 279, + 506, + 291 + ], + "score": 1.0, + "content": "Nguyen Van Dung and Bang Cong Vu. Convergence analysis of the stochastic reflected forward-", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 290, + 400, + 302 + ], + "spans": [ + { + "bbox": [ + 115, + 290, + 400, + 302 + ], + "score": 1.0, + "content": "backward splitting algorithm. arXiv preprint arXiv:2102.08906, 2021.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 14.5 + }, + { + "type": "text", + "bbox": [ + 107, + 309, + 504, + 333 + ], + "lines": [ + { + "bbox": [ + 106, + 309, + 505, + 322 + ], + "spans": [ + { + "bbox": [ + 106, + 309, + 505, + 322 + ], + "score": 1.0, + "content": "Christina Wadsworth, Francesca Vera, and Chris Piech. Achieving fairness through adversarial", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 321, + 477, + 333 + ], + "spans": [ + { + "bbox": [ + 115, + 321, + 477, + 333 + ], + "score": 1.0, + "content": "learning: an application to recidivism prediction. arXiv preprint arXiv:1807.00199, 2018.", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 16.5 + }, + { + "type": "text", + "bbox": [ + 106, + 340, + 504, + 363 + ], + "lines": [ + { + "bbox": [ + 105, + 339, + 505, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 505, + 353 + ], + "score": 1.0, + "content": "Xiaohan Yan and Jacob Bien. Rare feature selection in high dimensions. Journal of the American", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 115, + 350, + 381, + 365 + ], + "spans": [ + { + "bbox": [ + 115, + 350, + 381, + 365 + ], + "score": 1.0, + "content": "Statistical Association, 2020. Published online, to appear in print.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 18.5 + }, + { + "type": "text", + "bbox": [ + 105, + 370, + 505, + 394 + ], + "lines": [ + { + "bbox": [ + 105, + 369, + 506, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 506, + 385 + ], + "score": 1.0, + "content": "Yaodong Yu, Tianyi Lin, Eric Mazumdar, and Michael I Jordan. Fast distributionally robust learning", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 382, + 464, + 394 + ], + "spans": [ + { + "bbox": [ + 115, + 382, + 464, + 394 + ], + "score": 1.0, + "content": "with variance reduced min-max optimization. arXiv preprint arXiv:2104.13326, 2021.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20.5 + }, + { + "type": "text", + "bbox": [ + 107, + 401, + 505, + 435 + ], + "lines": [ + { + "bbox": [ + 105, + 401, + 506, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 506, + 414 + ], + "score": 1.0, + "content": "Alp Yurtsever, Bang Cong Vu, and Volkan Cevher. Stochastic three-composite convex minimization.", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 116, + 413, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 116, + 413, + 505, + 425 + ], + "score": 1.0, + "content": "In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett (eds.), Advances in Neural", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 116, + 424, + 399, + 436 + ], + "spans": [ + { + "bbox": [ + 116, + 424, + 399, + 436 + ], + "score": 1.0, + "content": "Information Processing Systems, volume 29. Curran Associates, 2016.", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 23 + }, + { + "type": "text", + "bbox": [ + 107, + 443, + 506, + 477 + ], + "lines": [ + { + "bbox": [ + 106, + 443, + 505, + 456 + ], + "spans": [ + { + "bbox": [ + 106, + 443, + 505, + 456 + ], + "score": 1.0, + "content": "Brian Hu Zhang, Blake Lemoine, and Margaret Mitchell. Mitigating unwanted biases with adversarial", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 115, + 454, + 506, + 466 + ], + "spans": [ + { + "bbox": [ + 115, + 454, + 506, + 466 + ], + "score": 1.0, + "content": "learning. In Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, pp. 335–", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 115, + 464, + 162, + 477 + ], + "spans": [ + { + "bbox": [ + 115, + 464, + 162, + 477 + ], + "score": 1.0, + "content": "340, 2018.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 26 + }, + { + "type": "title", + "bbox": [ + 108, + 498, + 408, + 511 + ], + "lines": [ + { + "bbox": [ + 105, + 497, + 410, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 410, + 514 + ], + "score": 1.0, + "content": "A ML APPLICATIONS OF THE MONOTONE INCLUSION (1)", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28 + }, + { + "type": "text", + "bbox": [ + 107, + 523, + 505, + 546 + ], + "lines": [ + { + "bbox": [ + 105, + 523, + 505, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 523, + 505, + 537 + ], + "score": 1.0, + "content": "There are two main classes of applications of (1) in ML: optimization problems and saddle-point", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 104, + 535, + 138, + 547 + ], + "spans": [ + { + "bbox": [ + 104, + 535, + 138, + 547 + ], + "score": 1.0, + "content": "games.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 29.5 + }, + { + "type": "text", + "bbox": [ + 106, + 558, + 505, + 592 + ], + "lines": [ + { + "bbox": [ + 106, + 558, + 505, + 571 + ], + "spans": [ + { + "bbox": [ + 106, + 558, + 505, + 571 + ], + "score": 1.0, + "content": "Optimization Problems In this case the monotone inclusion arises from finding the zero of a", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 570, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 106, + 570, + 504, + 582 + ], + "score": 1.0, + "content": "sum of subgradients of convex functions, as discussed in Section 2. It is typical in ML to solve the", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 581, + 255, + 593 + ], + "spans": [ + { + "bbox": [ + 106, + 581, + 255, + 593 + ], + "score": 1.0, + "content": "empirical risk minimization problem", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32 + }, + { + "type": "interline_equation", + "bbox": [ + 243, + 597, + 368, + 631 + ], + "lines": [ + { + "bbox": [ + 243, + 597, + 368, + 631 + ], + "spans": [ + { + "bbox": [ + 243, + 597, + 368, + 631 + ], + "score": 0.94, + "content": "\\operatorname* { m i n } _ { x \\in \\mathbb { R } ^ { d } } \\frac { 1 } { m } \\sum _ { j = 1 } ^ { m } f _ { j } ( x ) + \\sum _ { i = 1 } ^ { n } r _ { i } ( x )", + "type": "interline_equation", + "image_path": "ffa360eabdcc6b4c5c52b0b2d269d8c4934e3297e827c1444fa980cafc1ff442.jpg" + } + ] + } + ], + "index": 34.5, + "virtual_lines": [ + { + "bbox": [ + 243, + 597, + 368, + 614.0 + ], + "spans": [], + "index": 34 + }, + { + "bbox": [ + 243, + 614.0, + 368, + 631.0 + ], + "spans": [], + "index": 35 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 636, + 505, + 693 + ], + "lines": [ + { + "bbox": [ + 106, + 637, + 505, + 649 + ], + "spans": [ + { + "bbox": [ + 106, + 637, + 153, + 649 + ], + "score": 1.0, + "content": "over a size-", + "type": "text" + }, + { + "bbox": [ + 153, + 639, + 162, + 647 + ], + "score": 0.71, + "content": "m", + "type": "inline_equation" + }, + { + "bbox": [ + 163, + 637, + 362, + 649 + ], + "score": 1.0, + "content": "dataset. Usually, the gradient of the loss function", + "type": "text" + }, + { + "bbox": [ + 362, + 637, + 372, + 649 + ], + "score": 0.89, + "content": "f _ { j }", + "type": "inline_equation" + }, + { + "bbox": [ + 373, + 637, + 449, + 649 + ], + "score": 1.0, + "content": "for each datapoint", + "type": "text" + }, + { + "bbox": [ + 449, + 638, + 455, + 649 + ], + "score": 0.84, + "content": "j", + "type": "inline_equation" + }, + { + "bbox": [ + 455, + 637, + 505, + 649 + ], + "score": 1.0, + "content": "is Lipschitz", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 648, + 505, + 660 + ], + "spans": [ + { + "bbox": [ + 106, + 648, + 203, + 660 + ], + "score": 1.0, + "content": "continuous. The terms", + "type": "text" + }, + { + "bbox": [ + 203, + 650, + 213, + 659 + ], + "score": 0.83, + "content": "r _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 213, + 648, + 505, + 660 + ], + "score": 1.0, + "content": "may be regularizers used to reduce overfitting or encourage structural", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 658, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 506, + 672 + ], + "score": 1.0, + "content": "properties such as sparsity or low matrix rank. They also may represent constraints on the parameters", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 671, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 671, + 504, + 682 + ], + "score": 1.0, + "content": "such as nonnegativity or the being in the probability simplex. Crucially, these regularizers are rarely", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 680, + 410, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 680, + 410, + 694 + ], + "score": 1.0, + "content": "differentiable. The first-order necessary condition for the solution of (16) is", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 38 + }, + { + "type": "interline_equation", + "bbox": [ + 248, + 698, + 362, + 730 + ], + "lines": [ + { + "bbox": [ + 248, + 698, + 362, + 730 + ], + "spans": [ + { + "bbox": [ + 248, + 698, + 362, + 730 + ], + "score": 0.94, + "content": "0 \\in \\nabla f ( x ^ { * } ) + \\sum _ { i = 1 } ^ { n } \\partial r _ { i } ( x ^ { * } ) ,", + "type": "interline_equation", + "image_path": "d195774013d745d0c2effdff7e030c1cff3ad83794cc046f14aa129a6de7e84c.jpg" + } + ] + } + ], + "index": 41.5, + "virtual_lines": [ + { + "bbox": [ + 248, + 698, + 362, + 714.0 + ], + "spans": [], + "index": 41 + }, + { + "bbox": [ + 248, + 714.0, + 362, + 730.0 + ], + "spans": [], + "index": 42 + } + ] + } + ], + "page_idx": 13, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 107, + 26, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "score": 1.0, + "content": "14", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 82, + 505, + 105 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 505, + 95 + ], + "score": 1.0, + "content": "Ernest K. Ryu, Kun Yuan, and Wotao Yin. Ode analysis of stochastic gradient methods with optimism", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 93, + 294, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 93, + 294, + 106 + ], + "score": 1.0, + "content": "and anchoring for minimax problems, 2020.", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5, + "bbox_fs": [ + 106, + 82, + 505, + 106 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 112, + 504, + 146 + ], + "lines": [ + { + "bbox": [ + 106, + 112, + 506, + 126 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 506, + 126 + ], + "score": 1.0, + "content": "Gesualdo Scutari, Francisco Facchinei, Jong-Shi Pang, and Daniel P Palomar. Real and complex", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 124, + 507, + 137 + ], + "spans": [ + { + "bbox": [ + 115, + 124, + 507, + 137 + ], + "score": 1.0, + "content": "monotone communication games. IEEE Transactions on Information Theory, 60(7):4197–4231,", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 133, + 143, + 147 + ], + "spans": [ + { + "bbox": [ + 115, + 133, + 143, + 147 + ], + "score": 1.0, + "content": "2014.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3, + "bbox_fs": [ + 106, + 112, + 507, + 147 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 154, + 506, + 199 + ], + "lines": [ + { + "bbox": [ + 105, + 154, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 154, + 506, + 168 + ], + "score": 1.0, + "content": "Soroosh Shafieezadeh-Abadeh, Peyman Mohajerin Esfahani, and Daniel Kuhn. Distributionally", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 165, + 507, + 178 + ], + "spans": [ + { + "bbox": [ + 115, + 165, + 507, + 178 + ], + "score": 1.0, + "content": "robust logistic regression. In Corinna Cortes, Neil D. Lawrence, Daniel D. Lee, Masashi Sugiyama,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 176, + 507, + 190 + ], + "spans": [ + { + "bbox": [ + 115, + 176, + 507, + 190 + ], + "score": 1.0, + "content": "and Roman Garnett (eds.), Advances in Neural Information Processing Systems, volume 28, pp.", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 116, + 188, + 267, + 199 + ], + "spans": [ + { + "bbox": [ + 116, + 188, + 267, + 199 + ], + "score": 1.0, + "content": "1576–1584. Curran Associates, 2015.", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 6.5, + "bbox_fs": [ + 105, + 154, + 507, + 199 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 206, + 506, + 241 + ], + "lines": [ + { + "bbox": [ + 106, + 206, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 106, + 206, + 505, + 220 + ], + "score": 1.0, + "content": "Aman Sinha, Hongseok Namkoong, and John Duchi. Certifying some distributional robustness with", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 116, + 219, + 507, + 230 + ], + "spans": [ + { + "bbox": [ + 116, + 219, + 507, + 230 + ], + "score": 1.0, + "content": "principled adversarial training. In International Conference on Learning Representations, 2018.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 116, + 228, + 389, + 243 + ], + "spans": [ + { + "bbox": [ + 116, + 228, + 325, + 243 + ], + "score": 1.0, + "content": "URL https://openreview.net/forum?id", + "type": "text" + }, + { + "bbox": [ + 325, + 231, + 331, + 239 + ], + "score": 0.49, + "content": "=", + "type": "inline_equation" + }, + { + "bbox": [ + 332, + 228, + 389, + 243 + ], + "score": 1.0, + "content": "Hk6kPgZA-.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 10, + "bbox_fs": [ + 106, + 206, + 507, + 243 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 248, + 505, + 271 + ], + "lines": [ + { + "bbox": [ + 105, + 248, + 505, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 505, + 262 + ], + "score": 1.0, + "content": "Paul Tseng. A modified forward-backward splitting method for maximal monotone mappings. SIAM", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 260, + 358, + 271 + ], + "spans": [ + { + "bbox": [ + 115, + 260, + 358, + 271 + ], + "score": 1.0, + "content": "Journal on Control and Optimization, 38(2):431–446, 2000.", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 12.5, + "bbox_fs": [ + 105, + 248, + 505, + 271 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 279, + 505, + 302 + ], + "lines": [ + { + "bbox": [ + 106, + 279, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 106, + 279, + 506, + 291 + ], + "score": 1.0, + "content": "Nguyen Van Dung and Bang Cong Vu. Convergence analysis of the stochastic reflected forward-", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 290, + 400, + 302 + ], + "spans": [ + { + "bbox": [ + 115, + 290, + 400, + 302 + ], + "score": 1.0, + "content": "backward splitting algorithm. arXiv preprint arXiv:2102.08906, 2021.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 14.5, + "bbox_fs": [ + 106, + 279, + 506, + 302 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 309, + 504, + 333 + ], + "lines": [ + { + "bbox": [ + 106, + 309, + 505, + 322 + ], + "spans": [ + { + "bbox": [ + 106, + 309, + 505, + 322 + ], + "score": 1.0, + "content": "Christina Wadsworth, Francesca Vera, and Chris Piech. Achieving fairness through adversarial", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 321, + 477, + 333 + ], + "spans": [ + { + "bbox": [ + 115, + 321, + 477, + 333 + ], + "score": 1.0, + "content": "learning: an application to recidivism prediction. arXiv preprint arXiv:1807.00199, 2018.", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 16.5, + "bbox_fs": [ + 106, + 309, + 505, + 333 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 340, + 504, + 363 + ], + "lines": [ + { + "bbox": [ + 105, + 339, + 505, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 505, + 353 + ], + "score": 1.0, + "content": "Xiaohan Yan and Jacob Bien. Rare feature selection in high dimensions. Journal of the American", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 115, + 350, + 381, + 365 + ], + "spans": [ + { + "bbox": [ + 115, + 350, + 381, + 365 + ], + "score": 1.0, + "content": "Statistical Association, 2020. Published online, to appear in print.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 18.5, + "bbox_fs": [ + 105, + 339, + 505, + 365 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 370, + 505, + 394 + ], + "lines": [ + { + "bbox": [ + 105, + 369, + 506, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 506, + 385 + ], + "score": 1.0, + "content": "Yaodong Yu, Tianyi Lin, Eric Mazumdar, and Michael I Jordan. Fast distributionally robust learning", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 382, + 464, + 394 + ], + "spans": [ + { + "bbox": [ + 115, + 382, + 464, + 394 + ], + "score": 1.0, + "content": "with variance reduced min-max optimization. arXiv preprint arXiv:2104.13326, 2021.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20.5, + "bbox_fs": [ + 105, + 369, + 506, + 394 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 401, + 505, + 435 + ], + "lines": [ + { + "bbox": [ + 105, + 401, + 506, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 506, + 414 + ], + "score": 1.0, + "content": "Alp Yurtsever, Bang Cong Vu, and Volkan Cevher. Stochastic three-composite convex minimization.", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 116, + 413, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 116, + 413, + 505, + 425 + ], + "score": 1.0, + "content": "In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett (eds.), Advances in Neural", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 116, + 424, + 399, + 436 + ], + "spans": [ + { + "bbox": [ + 116, + 424, + 399, + 436 + ], + "score": 1.0, + "content": "Information Processing Systems, volume 29. Curran Associates, 2016.", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 23, + "bbox_fs": [ + 105, + 401, + 506, + 436 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 443, + 506, + 477 + ], + "lines": [ + { + "bbox": [ + 106, + 443, + 505, + 456 + ], + "spans": [ + { + "bbox": [ + 106, + 443, + 505, + 456 + ], + "score": 1.0, + "content": "Brian Hu Zhang, Blake Lemoine, and Margaret Mitchell. Mitigating unwanted biases with adversarial", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 115, + 454, + 506, + 466 + ], + "spans": [ + { + "bbox": [ + 115, + 454, + 506, + 466 + ], + "score": 1.0, + "content": "learning. In Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, pp. 335–", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 115, + 464, + 162, + 477 + ], + "spans": [ + { + "bbox": [ + 115, + 464, + 162, + 477 + ], + "score": 1.0, + "content": "340, 2018.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 26, + "bbox_fs": [ + 106, + 443, + 506, + 477 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 498, + 408, + 511 + ], + "lines": [ + { + "bbox": [ + 105, + 497, + 410, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 410, + 514 + ], + "score": 1.0, + "content": "A ML APPLICATIONS OF THE MONOTONE INCLUSION (1)", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28 + }, + { + "type": "text", + "bbox": [ + 107, + 523, + 505, + 546 + ], + "lines": [ + { + "bbox": [ + 105, + 523, + 505, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 523, + 505, + 537 + ], + "score": 1.0, + "content": "There are two main classes of applications of (1) in ML: optimization problems and saddle-point", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 104, + 535, + 138, + 547 + ], + "spans": [ + { + "bbox": [ + 104, + 535, + 138, + 547 + ], + "score": 1.0, + "content": "games.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 29.5, + "bbox_fs": [ + 104, + 523, + 505, + 547 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 558, + 505, + 592 + ], + "lines": [ + { + "bbox": [ + 106, + 558, + 505, + 571 + ], + "spans": [ + { + "bbox": [ + 106, + 558, + 505, + 571 + ], + "score": 1.0, + "content": "Optimization Problems In this case the monotone inclusion arises from finding the zero of a", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 570, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 106, + 570, + 504, + 582 + ], + "score": 1.0, + "content": "sum of subgradients of convex functions, as discussed in Section 2. It is typical in ML to solve the", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 581, + 255, + 593 + ], + "spans": [ + { + "bbox": [ + 106, + 581, + 255, + 593 + ], + "score": 1.0, + "content": "empirical risk minimization problem", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32, + "bbox_fs": [ + 106, + 558, + 505, + 593 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 243, + 597, + 368, + 631 + ], + "lines": [ + { + "bbox": [ + 243, + 597, + 368, + 631 + ], + "spans": [ + { + "bbox": [ + 243, + 597, + 368, + 631 + ], + "score": 0.94, + "content": "\\operatorname* { m i n } _ { x \\in \\mathbb { R } ^ { d } } \\frac { 1 } { m } \\sum _ { j = 1 } ^ { m } f _ { j } ( x ) + \\sum _ { i = 1 } ^ { n } r _ { i } ( x )", + "type": "interline_equation", + "image_path": "ffa360eabdcc6b4c5c52b0b2d269d8c4934e3297e827c1444fa980cafc1ff442.jpg" + } + ] + } + ], + "index": 34.5, + "virtual_lines": [ + { + "bbox": [ + 243, + 597, + 368, + 614.0 + ], + "spans": [], + "index": 34 + }, + { + "bbox": [ + 243, + 614.0, + 368, + 631.0 + ], + "spans": [], + "index": 35 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 636, + 505, + 693 + ], + "lines": [ + { + "bbox": [ + 106, + 637, + 505, + 649 + ], + "spans": [ + { + "bbox": [ + 106, + 637, + 153, + 649 + ], + "score": 1.0, + "content": "over a size-", + "type": "text" + }, + { + "bbox": [ + 153, + 639, + 162, + 647 + ], + "score": 0.71, + "content": "m", + "type": "inline_equation" + }, + { + "bbox": [ + 163, + 637, + 362, + 649 + ], + "score": 1.0, + "content": "dataset. Usually, the gradient of the loss function", + "type": "text" + }, + { + "bbox": [ + 362, + 637, + 372, + 649 + ], + "score": 0.89, + "content": "f _ { j }", + "type": "inline_equation" + }, + { + "bbox": [ + 373, + 637, + 449, + 649 + ], + "score": 1.0, + "content": "for each datapoint", + "type": "text" + }, + { + "bbox": [ + 449, + 638, + 455, + 649 + ], + "score": 0.84, + "content": "j", + "type": "inline_equation" + }, + { + "bbox": [ + 455, + 637, + 505, + 649 + ], + "score": 1.0, + "content": "is Lipschitz", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 648, + 505, + 660 + ], + "spans": [ + { + "bbox": [ + 106, + 648, + 203, + 660 + ], + "score": 1.0, + "content": "continuous. The terms", + "type": "text" + }, + { + "bbox": [ + 203, + 650, + 213, + 659 + ], + "score": 0.83, + "content": "r _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 213, + 648, + 505, + 660 + ], + "score": 1.0, + "content": "may be regularizers used to reduce overfitting or encourage structural", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 658, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 506, + 672 + ], + "score": 1.0, + "content": "properties such as sparsity or low matrix rank. They also may represent constraints on the parameters", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 671, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 671, + 504, + 682 + ], + "score": 1.0, + "content": "such as nonnegativity or the being in the probability simplex. Crucially, these regularizers are rarely", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 680, + 410, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 680, + 410, + 694 + ], + "score": 1.0, + "content": "differentiable. The first-order necessary condition for the solution of (16) is", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 38, + "bbox_fs": [ + 105, + 637, + 506, + 694 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 248, + 698, + 362, + 730 + ], + "lines": [ + { + "bbox": [ + 248, + 698, + 362, + 730 + ], + "spans": [ + { + "bbox": [ + 248, + 698, + 362, + 730 + ], + "score": 0.94, + "content": "0 \\in \\nabla f ( x ^ { * } ) + \\sum _ { i = 1 } ^ { n } \\partial r _ { i } ( x ^ { * } ) ,", + "type": "interline_equation", + "image_path": "d195774013d745d0c2effdff7e030c1cff3ad83794cc046f14aa129a6de7e84c.jpg" + } + ] + } + ], + "index": 41.5, + "virtual_lines": [ + { + "bbox": [ + 248, + 698, + 362, + 714.0 + ], + "spans": [], + "index": 41 + }, + { + "bbox": [ + 248, + 714.0, + 362, + 730.0 + ], + "spans": [], + "index": 42 + } + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 104, + 81, + 505, + 108 + ], + "lines": [ + { + "bbox": [ + 101, + 76, + 509, + 101 + ], + "spans": [ + { + "bbox": [ + 101, + 76, + 133, + 101 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 81, + 227, + 96 + ], + "score": 0.91, + "content": "\\begin{array} { r } { f ( x ) \\doteq \\frac { 1 } { m } \\sum _ { j = 1 } ^ { m } f _ { j } ( x ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 227, + 76, + 249, + 101 + ], + "score": 1.0, + "content": ", thus", + "type": "text" + }, + { + "bbox": [ + 250, + 81, + 360, + 96 + ], + "score": 0.93, + "content": "\\begin{array} { r } { \\nabla f ( x ) \\doteq \\frac { 1 } { m } \\sum _ { j = 1 } ^ { m } \\nabla f _ { j } ( x ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 360, + 76, + 509, + 101 + ], + "score": 1.0, + "content": ". The inclusion (17) is a special case", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 95, + 432, + 108 + ], + "spans": [ + { + "bbox": [ + 106, + 96, + 368, + 108 + ], + "score": 1.0, + "content": "of (1), and our method may use the standard stochastic oracle for", + "type": "text" + }, + { + "bbox": [ + 368, + 95, + 397, + 108 + ], + "score": 0.92, + "content": "\\nabla f ( x )", + "type": "inline_equation" + }, + { + "bbox": [ + 397, + 96, + 432, + 108 + ], + "score": 1.0, + "content": ", namely", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "interline_equation", + "bbox": [ + 271, + 111, + 339, + 143 + ], + "lines": [ + { + "bbox": [ + 271, + 111, + 339, + 143 + ], + "spans": [ + { + "bbox": [ + 271, + 111, + 339, + 143 + ], + "score": 0.93, + "content": "\\frac { 1 } { | \\mathbf { B } | } \\sum _ { j \\in \\mathbf { B } } \\nabla f _ { j } ( z )", + "type": "interline_equation", + "image_path": "1abda774af20927066c1bb8d4cd95642bf6bbabf6bfd459bd0cbabc54a594cbd.jpg" + } + ] + } + ], + "index": 2.5, + "virtual_lines": [ + { + "bbox": [ + 271, + 111, + 339, + 127.0 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 271, + 127.0, + 339, + 143.0 + ], + "spans": [], + "index": 3 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 147, + 431, + 160 + ], + "lines": [ + { + "bbox": [ + 105, + 146, + 432, + 162 + ], + "spans": [ + { + "bbox": [ + 105, + 146, + 360, + 162 + ], + "score": 1.0, + "content": "which subsamples a randomly selected minibatch of datapoints", + "type": "text" + }, + { + "bbox": [ + 360, + 148, + 428, + 160 + ], + "score": 0.92, + "content": "\\mathbf { B } \\in \\{ 1 , \\dots , m \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 428, + 146, + 432, + 162 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4 + }, + { + "type": "text", + "bbox": [ + 105, + 171, + 395, + 184 + ], + "lines": [ + { + "bbox": [ + 105, + 169, + 395, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 169, + 395, + 187 + ], + "score": 1.0, + "content": "Games Consider the following nonsmooth Nash equilibrium problem", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "interline_equation", + "bbox": [ + 143, + 187, + 468, + 221 + ], + "lines": [ + { + "bbox": [ + 143, + 187, + 468, + 221 + ], + "spans": [ + { + "bbox": [ + 143, + 187, + 468, + 221 + ], + "score": 0.93, + "content": "x ^ { * } \\in \\underset { x \\in \\mathbb { R } ^ { d _ { x } } } { \\arg \\operatorname* { m i n } } F ( x , y ^ { * } ) + \\underset { i = 1 } { \\overset { n _ { 1 } } { \\sum } } r _ { i } ( x ) \\quad \\mathrm { a n d } \\quad y ^ { * } \\in \\underset { y \\in \\mathbb { R } ^ { d _ { y } } } { \\arg \\operatorname* { m i n } } G ( x ^ { * } , y ) + \\underset { i = 1 } { \\overset { n _ { 2 } } { \\sum } } d _ { i } ( y ) .", + "type": "interline_equation", + "image_path": "3835fb4d271dfca3981f89d019dadd6c40e1099ade9cd99e3cb6abe535e31b94.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 143, + 187, + 468, + 198.33333333333334 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 143, + 198.33333333333334, + 468, + 209.66666666666669 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 143, + 209.66666666666669, + 468, + 221.00000000000003 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 226, + 504, + 271 + ], + "lines": [ + { + "bbox": [ + 101, + 220, + 459, + 250 + ], + "spans": [ + { + "bbox": [ + 101, + 220, + 150, + 250 + ], + "score": 1.0, + "content": "The terms player’s st", + "type": "text" + }, + { + "bbox": [ + 151, + 226, + 198, + 239 + ], + "score": 0.93, + "content": "\\scriptstyle \\sum _ { i = 1 } ^ { n _ { 1 } } r _ { i } ( x )", + "type": "inline_equation" + }, + { + "bbox": [ + 199, + 220, + 218, + 250 + ], + "score": 1.0, + "content": "and e tha", + "type": "text" + }, + { + "bbox": [ + 218, + 226, + 266, + 240 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n _ { 2 } } d _ { i } ( y )", + "type": "inline_equation" + }, + { + "bbox": [ + 266, + 220, + 459, + 250 + ], + "score": 1.0, + "content": "once again represent regularizers and constrai (saddle-point) problems correspond to having", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 459, + 237, + 505, + 250 + ], + "spans": [ + { + "bbox": [ + 459, + 237, + 505, + 250 + ], + "score": 0.91, + "content": "F ( x , y ) =", + "type": "inline_equation" + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 248, + 506, + 261 + ], + "spans": [ + { + "bbox": [ + 107, + 249, + 146, + 261 + ], + "score": 0.92, + "content": "- G ( x , y )", + "type": "inline_equation" + }, + { + "bbox": [ + 146, + 248, + 506, + 261 + ], + "score": 1.0, + "content": ". Under appropriate convexity conditions and constraint qualifications, the solutions of (18)", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 259, + 439, + 271 + ], + "spans": [ + { + "bbox": [ + 105, + 259, + 439, + 271 + ], + "score": 1.0, + "content": "correspond to the solutions of the following monotone inclusion in the form of (1):", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 10.5 + }, + { + "type": "interline_equation", + "bbox": [ + 187, + 276, + 423, + 312 + ], + "lines": [ + { + "bbox": [ + 187, + 276, + 423, + 312 + ], + "spans": [ + { + "bbox": [ + 187, + 276, + 423, + 312 + ], + "score": 0.94, + "content": "0 \\in \\left[ \\begin{array} { l } { \\nabla _ { x } F ( x ^ { * } , y ^ { * } ) } \\\\ { \\nabla _ { y } G ( x ^ { * } , y ^ { * } ) } \\end{array} \\right] + \\sum _ { i = 1 } ^ { \\operatorname* { m a x } \\{ n _ { 1 } , n _ { 2 } \\} } \\left( \\partial r _ { i } ( x ^ { * } ) \\times \\partial d _ { i } ( y ^ { * } ) \\right)", + "type": "interline_equation", + "image_path": "644d2f5d529de231d7c117704777c5bd94a9573875ac622a3b507fc340e62377.jpg" + } + ] + } + ], + "index": 13.5, + "virtual_lines": [ + { + "bbox": [ + 187, + 276, + 423, + 294.0 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 187, + 294.0, + 423, + 312.0 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 316, + 506, + 351 + ], + "lines": [ + { + "bbox": [ + 105, + 316, + 506, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 149, + 330 + ], + "score": 1.0, + "content": "where for", + "type": "text" + }, + { + "bbox": [ + 150, + 317, + 221, + 329 + ], + "score": 0.92, + "content": "i > \\operatorname* { m i n } \\{ n _ { 1 } , n _ { 2 } \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 222, + 316, + 384, + 330 + ], + "score": 1.0, + "content": "we include β€œdummy functions\", either", + "type": "text" + }, + { + "bbox": [ + 384, + 317, + 428, + 329 + ], + "score": 0.93, + "content": "r _ { i } ( x ) = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 428, + 316, + 454, + 330 + ], + "score": 1.0, + "content": "when", + "type": "text" + }, + { + "bbox": [ + 455, + 318, + 492, + 328 + ], + "score": 0.89, + "content": "n _ { 1 } < n _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 493, + 316, + 506, + 330 + ], + "score": 1.0, + "content": "or", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 327, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 107, + 327, + 149, + 340 + ], + "score": 0.92, + "content": "d _ { i } ( y ) = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 149, + 328, + 174, + 340 + ], + "score": 1.0, + "content": "when", + "type": "text" + }, + { + "bbox": [ + 174, + 329, + 210, + 339 + ], + "score": 0.9, + "content": "n _ { 1 } < n _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 211, + 328, + 281, + 340 + ], + "score": 1.0, + "content": ". If the functions", + "type": "text" + }, + { + "bbox": [ + 281, + 328, + 290, + 338 + ], + "score": 0.84, + "content": "F", + "type": "inline_equation" + }, + { + "bbox": [ + 290, + 328, + 308, + 340 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 309, + 328, + 318, + 338 + ], + "score": 0.83, + "content": "G", + "type": "inline_equation" + }, + { + "bbox": [ + 318, + 328, + 465, + 340 + ], + "score": 1.0, + "content": "arise as averages in the same we as", + "type": "text" + }, + { + "bbox": [ + 465, + 329, + 473, + 339 + ], + "score": 0.84, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 473, + 328, + 506, + 340 + ], + "score": 1.0, + "content": "in (16),", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 339, + 348, + 351 + ], + "spans": [ + { + "bbox": [ + 106, + 339, + 348, + 351 + ], + "score": 1.0, + "content": "then our method may again use a stochastic oracle for them.", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 16 + }, + { + "type": "text", + "bbox": [ + 106, + 361, + 506, + 396 + ], + "lines": [ + { + "bbox": [ + 105, + 361, + 506, + 375 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 506, + 375 + ], + "score": 1.0, + "content": "Distributionally-Robust ML One example application of (19) is distributionally-robust ML, as", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 373, + 505, + 386 + ], + "spans": [ + { + "bbox": [ + 106, + 373, + 505, + 386 + ], + "score": 1.0, + "content": "demonstrated in the numerical experiment in Section 7. The full problem statement is given in", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 384, + 157, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 157, + 397 + ], + "score": 1.0, + "content": "Appendix I.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 19 + }, + { + "type": "text", + "bbox": [ + 105, + 407, + 505, + 430 + ], + "lines": [ + { + "bbox": [ + 105, + 406, + 507, + 421 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 507, + 421 + ], + "score": 1.0, + "content": "Lagrangian Duality Another application of (19) is constrained optimization via Lagrangian duality.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 417, + 146, + 431 + ], + "spans": [ + { + "bbox": [ + 105, + 417, + 146, + 431 + ], + "score": 1.0, + "content": "Consider", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 21.5 + }, + { + "type": "interline_equation", + "bbox": [ + 182, + 433, + 429, + 467 + ], + "lines": [ + { + "bbox": [ + 182, + 433, + 429, + 467 + ], + "spans": [ + { + "bbox": [ + 182, + 433, + 429, + 467 + ], + "score": 0.94, + "content": "\\operatorname* { m i n } _ { x \\in \\mathbb { R } ^ { d } } \\left\\{ f ( x ) + \\sum _ { i = 1 } ^ { n } r _ { i } ( x ) \\right\\} \\quad { \\mathrm { s . t . } } \\quad h _ { j } ( x ) \\leq 0 \\quad j = 1 , \\ldots , p .", + "type": "interline_equation", + "image_path": "d905604cf0d9c0fd12213290386cea68bb91f729ffa0dcacd0ca4687d3b34ba3.jpg" + } + ] + } + ], + "index": 24, + "virtual_lines": [ + { + "bbox": [ + 182, + 433, + 429, + 444.3333333333333 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 182, + 444.3333333333333, + 429, + 455.66666666666663 + ], + "spans": [], + "index": 24 + }, + { + "bbox": [ + 182, + 455.66666666666663, + 429, + 466.99999999999994 + ], + "spans": [], + "index": 25 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 471, + 506, + 505 + ], + "lines": [ + { + "bbox": [ + 105, + 470, + 506, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 470, + 154, + 483 + ], + "score": 1.0, + "content": "As in (16),", + "type": "text" + }, + { + "bbox": [ + 154, + 472, + 161, + 483 + ], + "score": 0.85, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 162, + 470, + 269, + 483 + ], + "score": 1.0, + "content": "is a loss function and the", + "type": "text" + }, + { + "bbox": [ + 270, + 473, + 279, + 483 + ], + "score": 0.84, + "content": "r _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 279, + 470, + 506, + 483 + ], + "score": 1.0, + "content": "may represent regularizers and (β€œsimple”) constraints;", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 482, + 505, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 195, + 495 + ], + "score": 1.0, + "content": "in addition, there are", + "type": "text" + }, + { + "bbox": [ + 195, + 484, + 202, + 494 + ], + "score": 0.81, + "content": "p", + "type": "inline_equation" + }, + { + "bbox": [ + 203, + 482, + 400, + 495 + ], + "score": 1.0, + "content": "functional constraints on the model parameters", + "type": "text" + }, + { + "bbox": [ + 401, + 485, + 407, + 492 + ], + "score": 0.75, + "content": "x", + "type": "inline_equation" + }, + { + "bbox": [ + 407, + 482, + 505, + 495 + ], + "score": 1.0, + "content": ". Introducing Lagrange", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 493, + 308, + 506 + ], + "spans": [ + { + "bbox": [ + 106, + 493, + 152, + 506 + ], + "score": 1.0, + "content": "multipliers", + "type": "text" + }, + { + "bbox": [ + 153, + 493, + 183, + 505 + ], + "score": 0.92, + "content": "\\gamma \\in \\mathbb { R } ^ { p }", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 493, + 308, + 506 + ], + "score": 1.0, + "content": ", the problem can be written as", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 27 + }, + { + "type": "interline_equation", + "bbox": [ + 206, + 509, + 404, + 549 + ], + "lines": [ + { + "bbox": [ + 206, + 509, + 404, + 549 + ], + "spans": [ + { + "bbox": [ + 206, + 509, + 404, + 549 + ], + "score": 0.93, + "content": "\\operatorname* { m i n } _ { x \\in \\mathbb { R } ^ { d } } \\operatorname* { m a x } _ { \\gamma \\in \\mathbb { R } _ { + } ^ { p } } \\left\\{ f ( x ) + \\sum _ { i = 1 } ^ { n } r _ { i } ( x ) + \\sum _ { j = 1 } ^ { p } \\gamma _ { j } h _ { j } ( x ) \\right\\} .", + "type": "interline_equation", + "image_path": "6f1d0d2520d76b3f705f5e0e4853e9aef959d5ce1f0ffce0278502590125761a.jpg" + } + ] + } + ], + "index": 29.5, + "virtual_lines": [ + { + "bbox": [ + 206, + 509, + 404, + 529.0 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 206, + 529.0, + 404, + 549.0 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 554, + 504, + 576 + ], + "lines": [ + { + "bbox": [ + 105, + 551, + 506, + 568 + ], + "spans": [ + { + "bbox": [ + 105, + 551, + 506, + 568 + ], + "score": 1.0, + "content": "Under appropriate convexity conditions and constraint-qualifications, this reduces to the following", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 565, + 220, + 576 + ], + "spans": [ + { + "bbox": [ + 106, + 565, + 220, + 576 + ], + "score": 1.0, + "content": "inclusion in the form of (1):", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 31.5 + }, + { + "type": "interline_equation", + "bbox": [ + 182, + 580, + 427, + 614 + ], + "lines": [ + { + "bbox": [ + 182, + 580, + 427, + 614 + ], + "spans": [ + { + "bbox": [ + 182, + 580, + 427, + 614 + ], + "score": 0.93, + "content": "0 \\in \\left[ \\begin{array} { c } { \\nabla f ( x ) + \\sum _ { j = 1 } ^ { p } \\gamma _ { j } \\nabla h _ { j } ( x ) } \\\\ { - h ( x ) } \\end{array} \\right] + \\sum _ { i = 1 } ^ { n } \\left( \\partial r _ { i } ( x ^ { * } ) \\times \\{ 0 \\} \\right)", + "type": "interline_equation", + "image_path": "b24a37447f0a36eb774b118eba4af8fa43b5567c971ac8b148aed8ad4587ab58.jpg" + } + ] + } + ], + "index": 34, + "virtual_lines": [ + { + "bbox": [ + 182, + 580, + 427, + 591.3333333333334 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 182, + 591.3333333333334, + 427, + 602.6666666666667 + ], + "spans": [], + "index": 34 + }, + { + "bbox": [ + 182, + 602.6666666666667, + 427, + 614.0000000000001 + ], + "spans": [], + "index": 35 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 618, + 506, + 653 + ], + "lines": [ + { + "bbox": [ + 105, + 618, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 618, + 134, + 632 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 134, + 618, + 281, + 632 + ], + "score": 0.92, + "content": "h ( \\boldsymbol { x } ) = [ h _ { 1 } ( \\boldsymbol { x } ) , h _ { 2 } ( \\boldsymbol { x } ) , \\ldots , h _ { p } ( \\boldsymbol { x } ) ] ^ { \\top }", + "type": "inline_equation" + }, + { + "bbox": [ + 281, + 618, + 381, + 632 + ], + "score": 1.0, + "content": ". For certain choices of", + "type": "text" + }, + { + "bbox": [ + 381, + 620, + 388, + 629 + ], + "score": 0.82, + "content": "h", + "type": "inline_equation" + }, + { + "bbox": [ + 388, + 618, + 506, + 632 + ], + "score": 1.0, + "content": ", such as linear or quadratic", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 629, + 507, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 629, + 507, + 643 + ], + "score": 1.0, + "content": "functions, the first term above is monotone and (locally) Lipschitz continuous (Alacaoglu et al.,", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 639, + 136, + 654 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 136, + 654 + ], + "score": 1.0, + "content": "2021).", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 37 + }, + { + "type": "text", + "bbox": [ + 106, + 664, + 504, + 687 + ], + "lines": [ + { + "bbox": [ + 105, + 664, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 664, + 505, + 678 + ], + "score": 1.0, + "content": "Bilinear Games with Many Constraints Finally, consider the bilinear saddlepoint problem subject", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 676, + 201, + 688 + ], + "spans": [ + { + "bbox": [ + 106, + 676, + 201, + 688 + ], + "score": 1.0, + "content": "to multiple constraints:", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 39.5 + }, + { + "type": "interline_equation", + "bbox": [ + 205, + 691, + 405, + 732 + ], + "lines": [ + { + "bbox": [ + 205, + 691, + 405, + 732 + ], + "spans": [ + { + "bbox": [ + 205, + 691, + 405, + 732 + ], + "score": 0.91, + "content": "\\begin{array} { l l l } { \\underset { x \\in \\mathbb { R } ^ { d } } { \\operatorname* { m i n } } \\underset { y \\in \\mathbb { R } ^ { d } } { \\operatorname* { m a x } } x ^ { \\top } D y } & { \\mathrm { s . t . } } & { x \\in \\mathcal { C } _ { j } ^ { 1 } } & { j = 1 , \\dots , n _ { 1 } , } \\\\ & { } & { y \\in \\mathcal { C } _ { j } ^ { 2 } } & { j = 1 , \\dots , n _ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "a7f8278313a1b7a9eeda762e5abb2c69250ff81b90c6b828cfcb59e258f2e904.jpg" + } + ] + } + ], + "index": 42, + "virtual_lines": [ + { + "bbox": [ + 205, + 691, + 405, + 704.6666666666666 + ], + "spans": [], + "index": 41 + }, + { + "bbox": [ + 205, + 704.6666666666666, + 405, + 718.3333333333333 + ], + "spans": [], + "index": 42 + }, + { + "bbox": [ + 205, + 718.3333333333333, + 405, + 731.9999999999999 + ], + "spans": [], + "index": 43 + } + ] + } + ], + "page_idx": 14, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "score": 1.0, + "content": "15", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 104, + 81, + 505, + 108 + ], + "lines": [ + { + "bbox": [ + 101, + 76, + 509, + 101 + ], + "spans": [ + { + "bbox": [ + 101, + 76, + 133, + 101 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 81, + 227, + 96 + ], + "score": 0.91, + "content": "\\begin{array} { r } { f ( x ) \\doteq \\frac { 1 } { m } \\sum _ { j = 1 } ^ { m } f _ { j } ( x ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 227, + 76, + 249, + 101 + ], + "score": 1.0, + "content": ", thus", + "type": "text" + }, + { + "bbox": [ + 250, + 81, + 360, + 96 + ], + "score": 0.93, + "content": "\\begin{array} { r } { \\nabla f ( x ) \\doteq \\frac { 1 } { m } \\sum _ { j = 1 } ^ { m } \\nabla f _ { j } ( x ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 360, + 76, + 509, + 101 + ], + "score": 1.0, + "content": ". The inclusion (17) is a special case", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 95, + 432, + 108 + ], + "spans": [ + { + "bbox": [ + 106, + 96, + 368, + 108 + ], + "score": 1.0, + "content": "of (1), and our method may use the standard stochastic oracle for", + "type": "text" + }, + { + "bbox": [ + 368, + 95, + 397, + 108 + ], + "score": 0.92, + "content": "\\nabla f ( x )", + "type": "inline_equation" + }, + { + "bbox": [ + 397, + 96, + 432, + 108 + ], + "score": 1.0, + "content": ", namely", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5, + "bbox_fs": [ + 101, + 76, + 509, + 108 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 271, + 111, + 339, + 143 + ], + "lines": [ + { + "bbox": [ + 271, + 111, + 339, + 143 + ], + "spans": [ + { + "bbox": [ + 271, + 111, + 339, + 143 + ], + "score": 0.93, + "content": "\\frac { 1 } { | \\mathbf { B } | } \\sum _ { j \\in \\mathbf { B } } \\nabla f _ { j } ( z )", + "type": "interline_equation", + "image_path": "1abda774af20927066c1bb8d4cd95642bf6bbabf6bfd459bd0cbabc54a594cbd.jpg" + } + ] + } + ], + "index": 2.5, + "virtual_lines": [ + { + "bbox": [ + 271, + 111, + 339, + 127.0 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 271, + 127.0, + 339, + 143.0 + ], + "spans": [], + "index": 3 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 147, + 431, + 160 + ], + "lines": [ + { + "bbox": [ + 105, + 146, + 432, + 162 + ], + "spans": [ + { + "bbox": [ + 105, + 146, + 360, + 162 + ], + "score": 1.0, + "content": "which subsamples a randomly selected minibatch of datapoints", + "type": "text" + }, + { + "bbox": [ + 360, + 148, + 428, + 160 + ], + "score": 0.92, + "content": "\\mathbf { B } \\in \\{ 1 , \\dots , m \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 428, + 146, + 432, + 162 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4, + "bbox_fs": [ + 105, + 146, + 432, + 162 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 171, + 395, + 184 + ], + "lines": [ + { + "bbox": [ + 105, + 169, + 395, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 169, + 395, + 187 + ], + "score": 1.0, + "content": "Games Consider the following nonsmooth Nash equilibrium problem", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5, + "bbox_fs": [ + 105, + 169, + 395, + 187 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 143, + 187, + 468, + 221 + ], + "lines": [ + { + "bbox": [ + 143, + 187, + 468, + 221 + ], + "spans": [ + { + "bbox": [ + 143, + 187, + 468, + 221 + ], + "score": 0.93, + "content": "x ^ { * } \\in \\underset { x \\in \\mathbb { R } ^ { d _ { x } } } { \\arg \\operatorname* { m i n } } F ( x , y ^ { * } ) + \\underset { i = 1 } { \\overset { n _ { 1 } } { \\sum } } r _ { i } ( x ) \\quad \\mathrm { a n d } \\quad y ^ { * } \\in \\underset { y \\in \\mathbb { R } ^ { d _ { y } } } { \\arg \\operatorname* { m i n } } G ( x ^ { * } , y ) + \\underset { i = 1 } { \\overset { n _ { 2 } } { \\sum } } d _ { i } ( y ) .", + "type": "interline_equation", + "image_path": "3835fb4d271dfca3981f89d019dadd6c40e1099ade9cd99e3cb6abe535e31b94.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 143, + 187, + 468, + 198.33333333333334 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 143, + 198.33333333333334, + 468, + 209.66666666666669 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 143, + 209.66666666666669, + 468, + 221.00000000000003 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 226, + 504, + 271 + ], + "lines": [ + { + "bbox": [ + 101, + 220, + 459, + 250 + ], + "spans": [ + { + "bbox": [ + 101, + 220, + 150, + 250 + ], + "score": 1.0, + "content": "The terms player’s st", + "type": "text" + }, + { + "bbox": [ + 151, + 226, + 198, + 239 + ], + "score": 0.93, + "content": "\\scriptstyle \\sum _ { i = 1 } ^ { n _ { 1 } } r _ { i } ( x )", + "type": "inline_equation" + }, + { + "bbox": [ + 199, + 220, + 218, + 250 + ], + "score": 1.0, + "content": "and e tha", + "type": "text" + }, + { + "bbox": [ + 218, + 226, + 266, + 240 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n _ { 2 } } d _ { i } ( y )", + "type": "inline_equation" + }, + { + "bbox": [ + 266, + 220, + 459, + 250 + ], + "score": 1.0, + "content": "once again represent regularizers and constrai (saddle-point) problems correspond to having", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 459, + 237, + 505, + 250 + ], + "spans": [ + { + "bbox": [ + 459, + 237, + 505, + 250 + ], + "score": 0.91, + "content": "F ( x , y ) =", + "type": "inline_equation" + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 248, + 506, + 261 + ], + "spans": [ + { + "bbox": [ + 107, + 249, + 146, + 261 + ], + "score": 0.92, + "content": "- G ( x , y )", + "type": "inline_equation" + }, + { + "bbox": [ + 146, + 248, + 506, + 261 + ], + "score": 1.0, + "content": ". Under appropriate convexity conditions and constraint qualifications, the solutions of (18)", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 259, + 439, + 271 + ], + "spans": [ + { + "bbox": [ + 105, + 259, + 439, + 271 + ], + "score": 1.0, + "content": "correspond to the solutions of the following monotone inclusion in the form of (1):", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 10.5, + "bbox_fs": [ + 101, + 220, + 506, + 271 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 187, + 276, + 423, + 312 + ], + "lines": [ + { + "bbox": [ + 187, + 276, + 423, + 312 + ], + "spans": [ + { + "bbox": [ + 187, + 276, + 423, + 312 + ], + "score": 0.94, + "content": "0 \\in \\left[ \\begin{array} { l } { \\nabla _ { x } F ( x ^ { * } , y ^ { * } ) } \\\\ { \\nabla _ { y } G ( x ^ { * } , y ^ { * } ) } \\end{array} \\right] + \\sum _ { i = 1 } ^ { \\operatorname* { m a x } \\{ n _ { 1 } , n _ { 2 } \\} } \\left( \\partial r _ { i } ( x ^ { * } ) \\times \\partial d _ { i } ( y ^ { * } ) \\right)", + "type": "interline_equation", + "image_path": "644d2f5d529de231d7c117704777c5bd94a9573875ac622a3b507fc340e62377.jpg" + } + ] + } + ], + "index": 13.5, + "virtual_lines": [ + { + "bbox": [ + 187, + 276, + 423, + 294.0 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 187, + 294.0, + 423, + 312.0 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 316, + 506, + 351 + ], + "lines": [ + { + "bbox": [ + 105, + 316, + 506, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 149, + 330 + ], + "score": 1.0, + "content": "where for", + "type": "text" + }, + { + "bbox": [ + 150, + 317, + 221, + 329 + ], + "score": 0.92, + "content": "i > \\operatorname* { m i n } \\{ n _ { 1 } , n _ { 2 } \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 222, + 316, + 384, + 330 + ], + "score": 1.0, + "content": "we include β€œdummy functions\", either", + "type": "text" + }, + { + "bbox": [ + 384, + 317, + 428, + 329 + ], + "score": 0.93, + "content": "r _ { i } ( x ) = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 428, + 316, + 454, + 330 + ], + "score": 1.0, + "content": "when", + "type": "text" + }, + { + "bbox": [ + 455, + 318, + 492, + 328 + ], + "score": 0.89, + "content": "n _ { 1 } < n _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 493, + 316, + 506, + 330 + ], + "score": 1.0, + "content": "or", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 327, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 107, + 327, + 149, + 340 + ], + "score": 0.92, + "content": "d _ { i } ( y ) = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 149, + 328, + 174, + 340 + ], + "score": 1.0, + "content": "when", + "type": "text" + }, + { + "bbox": [ + 174, + 329, + 210, + 339 + ], + "score": 0.9, + "content": "n _ { 1 } < n _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 211, + 328, + 281, + 340 + ], + "score": 1.0, + "content": ". If the functions", + "type": "text" + }, + { + "bbox": [ + 281, + 328, + 290, + 338 + ], + "score": 0.84, + "content": "F", + "type": "inline_equation" + }, + { + "bbox": [ + 290, + 328, + 308, + 340 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 309, + 328, + 318, + 338 + ], + "score": 0.83, + "content": "G", + "type": "inline_equation" + }, + { + "bbox": [ + 318, + 328, + 465, + 340 + ], + "score": 1.0, + "content": "arise as averages in the same we as", + "type": "text" + }, + { + "bbox": [ + 465, + 329, + 473, + 339 + ], + "score": 0.84, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 473, + 328, + 506, + 340 + ], + "score": 1.0, + "content": "in (16),", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 339, + 348, + 351 + ], + "spans": [ + { + "bbox": [ + 106, + 339, + 348, + 351 + ], + "score": 1.0, + "content": "then our method may again use a stochastic oracle for them.", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 16, + "bbox_fs": [ + 105, + 316, + 506, + 351 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 361, + 506, + 396 + ], + "lines": [ + { + "bbox": [ + 105, + 361, + 506, + 375 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 506, + 375 + ], + "score": 1.0, + "content": "Distributionally-Robust ML One example application of (19) is distributionally-robust ML, as", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 373, + 505, + 386 + ], + "spans": [ + { + "bbox": [ + 106, + 373, + 505, + 386 + ], + "score": 1.0, + "content": "demonstrated in the numerical experiment in Section 7. The full problem statement is given in", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 384, + 157, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 157, + 397 + ], + "score": 1.0, + "content": "Appendix I.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 19, + "bbox_fs": [ + 105, + 361, + 506, + 397 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 407, + 505, + 430 + ], + "lines": [ + { + "bbox": [ + 105, + 406, + 507, + 421 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 507, + 421 + ], + "score": 1.0, + "content": "Lagrangian Duality Another application of (19) is constrained optimization via Lagrangian duality.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 417, + 146, + 431 + ], + "spans": [ + { + "bbox": [ + 105, + 417, + 146, + 431 + ], + "score": 1.0, + "content": "Consider", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 21.5, + "bbox_fs": [ + 105, + 406, + 507, + 431 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 182, + 433, + 429, + 467 + ], + "lines": [ + { + "bbox": [ + 182, + 433, + 429, + 467 + ], + "spans": [ + { + "bbox": [ + 182, + 433, + 429, + 467 + ], + "score": 0.94, + "content": "\\operatorname* { m i n } _ { x \\in \\mathbb { R } ^ { d } } \\left\\{ f ( x ) + \\sum _ { i = 1 } ^ { n } r _ { i } ( x ) \\right\\} \\quad { \\mathrm { s . t . } } \\quad h _ { j } ( x ) \\leq 0 \\quad j = 1 , \\ldots , p .", + "type": "interline_equation", + "image_path": "d905604cf0d9c0fd12213290386cea68bb91f729ffa0dcacd0ca4687d3b34ba3.jpg" + } + ] + } + ], + "index": 24, + "virtual_lines": [ + { + "bbox": [ + 182, + 433, + 429, + 444.3333333333333 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 182, + 444.3333333333333, + 429, + 455.66666666666663 + ], + "spans": [], + "index": 24 + }, + { + "bbox": [ + 182, + 455.66666666666663, + 429, + 466.99999999999994 + ], + "spans": [], + "index": 25 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 471, + 506, + 505 + ], + "lines": [ + { + "bbox": [ + 105, + 470, + 506, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 470, + 154, + 483 + ], + "score": 1.0, + "content": "As in (16),", + "type": "text" + }, + { + "bbox": [ + 154, + 472, + 161, + 483 + ], + "score": 0.85, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 162, + 470, + 269, + 483 + ], + "score": 1.0, + "content": "is a loss function and the", + "type": "text" + }, + { + "bbox": [ + 270, + 473, + 279, + 483 + ], + "score": 0.84, + "content": "r _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 279, + 470, + 506, + 483 + ], + "score": 1.0, + "content": "may represent regularizers and (β€œsimple”) constraints;", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 482, + 505, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 195, + 495 + ], + "score": 1.0, + "content": "in addition, there are", + "type": "text" + }, + { + "bbox": [ + 195, + 484, + 202, + 494 + ], + "score": 0.81, + "content": "p", + "type": "inline_equation" + }, + { + "bbox": [ + 203, + 482, + 400, + 495 + ], + "score": 1.0, + "content": "functional constraints on the model parameters", + "type": "text" + }, + { + "bbox": [ + 401, + 485, + 407, + 492 + ], + "score": 0.75, + "content": "x", + "type": "inline_equation" + }, + { + "bbox": [ + 407, + 482, + 505, + 495 + ], + "score": 1.0, + "content": ". Introducing Lagrange", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 493, + 308, + 506 + ], + "spans": [ + { + "bbox": [ + 106, + 493, + 152, + 506 + ], + "score": 1.0, + "content": "multipliers", + "type": "text" + }, + { + "bbox": [ + 153, + 493, + 183, + 505 + ], + "score": 0.92, + "content": "\\gamma \\in \\mathbb { R } ^ { p }", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 493, + 308, + 506 + ], + "score": 1.0, + "content": ", the problem can be written as", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 27, + "bbox_fs": [ + 105, + 470, + 506, + 506 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 206, + 509, + 404, + 549 + ], + "lines": [ + { + "bbox": [ + 206, + 509, + 404, + 549 + ], + "spans": [ + { + "bbox": [ + 206, + 509, + 404, + 549 + ], + "score": 0.93, + "content": "\\operatorname* { m i n } _ { x \\in \\mathbb { R } ^ { d } } \\operatorname* { m a x } _ { \\gamma \\in \\mathbb { R } _ { + } ^ { p } } \\left\\{ f ( x ) + \\sum _ { i = 1 } ^ { n } r _ { i } ( x ) + \\sum _ { j = 1 } ^ { p } \\gamma _ { j } h _ { j } ( x ) \\right\\} .", + "type": "interline_equation", + "image_path": "6f1d0d2520d76b3f705f5e0e4853e9aef959d5ce1f0ffce0278502590125761a.jpg" + } + ] + } + ], + "index": 29.5, + "virtual_lines": [ + { + "bbox": [ + 206, + 509, + 404, + 529.0 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 206, + 529.0, + 404, + 549.0 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 554, + 504, + 576 + ], + "lines": [ + { + "bbox": [ + 105, + 551, + 506, + 568 + ], + "spans": [ + { + "bbox": [ + 105, + 551, + 506, + 568 + ], + "score": 1.0, + "content": "Under appropriate convexity conditions and constraint-qualifications, this reduces to the following", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 565, + 220, + 576 + ], + "spans": [ + { + "bbox": [ + 106, + 565, + 220, + 576 + ], + "score": 1.0, + "content": "inclusion in the form of (1):", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 31.5, + "bbox_fs": [ + 105, + 551, + 506, + 576 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 182, + 580, + 427, + 614 + ], + "lines": [ + { + "bbox": [ + 182, + 580, + 427, + 614 + ], + "spans": [ + { + "bbox": [ + 182, + 580, + 427, + 614 + ], + "score": 0.93, + "content": "0 \\in \\left[ \\begin{array} { c } { \\nabla f ( x ) + \\sum _ { j = 1 } ^ { p } \\gamma _ { j } \\nabla h _ { j } ( x ) } \\\\ { - h ( x ) } \\end{array} \\right] + \\sum _ { i = 1 } ^ { n } \\left( \\partial r _ { i } ( x ^ { * } ) \\times \\{ 0 \\} \\right)", + "type": "interline_equation", + "image_path": "b24a37447f0a36eb774b118eba4af8fa43b5567c971ac8b148aed8ad4587ab58.jpg" + } + ] + } + ], + "index": 34, + "virtual_lines": [ + { + "bbox": [ + 182, + 580, + 427, + 591.3333333333334 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 182, + 591.3333333333334, + 427, + 602.6666666666667 + ], + "spans": [], + "index": 34 + }, + { + "bbox": [ + 182, + 602.6666666666667, + 427, + 614.0000000000001 + ], + "spans": [], + "index": 35 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 618, + 506, + 653 + ], + "lines": [ + { + "bbox": [ + 105, + 618, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 618, + 134, + 632 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 134, + 618, + 281, + 632 + ], + "score": 0.92, + "content": "h ( \\boldsymbol { x } ) = [ h _ { 1 } ( \\boldsymbol { x } ) , h _ { 2 } ( \\boldsymbol { x } ) , \\ldots , h _ { p } ( \\boldsymbol { x } ) ] ^ { \\top }", + "type": "inline_equation" + }, + { + "bbox": [ + 281, + 618, + 381, + 632 + ], + "score": 1.0, + "content": ". For certain choices of", + "type": "text" + }, + { + "bbox": [ + 381, + 620, + 388, + 629 + ], + "score": 0.82, + "content": "h", + "type": "inline_equation" + }, + { + "bbox": [ + 388, + 618, + 506, + 632 + ], + "score": 1.0, + "content": ", such as linear or quadratic", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 629, + 507, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 629, + 507, + 643 + ], + "score": 1.0, + "content": "functions, the first term above is monotone and (locally) Lipschitz continuous (Alacaoglu et al.,", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 639, + 136, + 654 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 136, + 654 + ], + "score": 1.0, + "content": "2021).", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 37, + "bbox_fs": [ + 105, + 618, + 507, + 654 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 664, + 504, + 687 + ], + "lines": [ + { + "bbox": [ + 105, + 664, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 664, + 505, + 678 + ], + "score": 1.0, + "content": "Bilinear Games with Many Constraints Finally, consider the bilinear saddlepoint problem subject", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 676, + 201, + 688 + ], + "spans": [ + { + "bbox": [ + 106, + 676, + 201, + 688 + ], + "score": 1.0, + "content": "to multiple constraints:", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 39.5, + "bbox_fs": [ + 105, + 664, + 505, + 688 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 205, + 691, + 405, + 732 + ], + "lines": [ + { + "bbox": [ + 205, + 691, + 405, + 732 + ], + "spans": [ + { + "bbox": [ + 205, + 691, + 405, + 732 + ], + "score": 0.91, + "content": "\\begin{array} { l l l } { \\underset { x \\in \\mathbb { R } ^ { d } } { \\operatorname* { m i n } } \\underset { y \\in \\mathbb { R } ^ { d } } { \\operatorname* { m a x } } x ^ { \\top } D y } & { \\mathrm { s . t . } } & { x \\in \\mathcal { C } _ { j } ^ { 1 } } & { j = 1 , \\dots , n _ { 1 } , } \\\\ & { } & { y \\in \\mathcal { C } _ { j } ^ { 2 } } & { j = 1 , \\dots , n _ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "a7f8278313a1b7a9eeda762e5abb2c69250ff81b90c6b828cfcb59e258f2e904.jpg" + } + ] + } + ], + "index": 42, + "virtual_lines": [ + { + "bbox": [ + 205, + 691, + 405, + 704.6666666666666 + ], + "spans": [], + "index": 41 + }, + { + "bbox": [ + 205, + 704.6666666666666, + 405, + 718.3333333333333 + ], + "spans": [], + "index": 42 + }, + { + "bbox": [ + 205, + 718.3333333333333, + 405, + 731.9999999999999 + ], + "spans": [], + "index": 43 + } + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 394, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 396, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 396, + 96 + ], + "score": 1.0, + "content": "Under some regularity conditions, this problem reduces to the inclusion", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "interline_equation", + "bbox": [ + 192, + 98, + 416, + 135 + ], + "lines": [ + { + "bbox": [ + 192, + 98, + 416, + 135 + ], + "spans": [ + { + "bbox": [ + 192, + 98, + 416, + 135 + ], + "score": 0.94, + "content": "0 \\in \\left[ \\begin{array} { c } { D y ^ { * } } \\\\ { - D ^ { \\top } x ^ { * } } \\end{array} \\right] + \\sum _ { j = 1 } ^ { \\operatorname* { m a x } \\{ n _ { 1 } , n _ { 2 } \\} } \\big ( N _ { { \\mathcal C } _ { j } ^ { 1 } } ( x ^ { * } ) \\times N _ { { \\mathcal C } _ { j } ^ { 2 } } ( y ^ { * } ) \\big ) ,", + "type": "interline_equation", + "image_path": "20fd1ca98bfe912a0917aa73b18d66131356e9479819ab4a7df1087f717f3a64.jpg" + } + ] + } + ], + "index": 1.5, + "virtual_lines": [ + { + "bbox": [ + 192, + 98, + 416, + 116.5 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 192, + 116.5, + 416, + 135.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 138, + 506, + 195 + ], + "lines": [ + { + "bbox": [ + 105, + 137, + 505, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 137, + 293, + 153 + ], + "score": 1.0, + "content": "where we introduce additional β€œdummy” sets", + "type": "text" + }, + { + "bbox": [ + 293, + 138, + 330, + 153 + ], + "score": 0.95, + "content": "\\mathcal { C } _ { j } ^ { 1 } = \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 330, + 137, + 342, + 153 + ], + "score": 1.0, + "content": "or", + "type": "text" + }, + { + "bbox": [ + 343, + 139, + 380, + 153 + ], + "score": 0.92, + "content": "\\mathcal { C } _ { j } ^ { 2 } = \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 380, + 137, + 406, + 153 + ], + "score": 1.0, + "content": "when", + "type": "text" + }, + { + "bbox": [ + 406, + 140, + 442, + 151 + ], + "score": 0.91, + "content": "n _ { 1 } \\neq n _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 442, + 137, + 505, + 153 + ], + "score": 1.0, + "content": ". The first term", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 150, + 505, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 505, + 163 + ], + "score": 1.0, + "content": "is linear and skew symmetric, and therefore can easily be shown to be Lipschitz continuous and", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 162, + 505, + 173 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 505, + 173 + ], + "score": 1.0, + "content": "monotone. If all the constraint sets are closed and convex, then the rest of the terms are maximal", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 172, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 506, + 186 + ], + "score": 1.0, + "content": "monotone, then the problem is of the form (1), meaning that projective splitting may be applied,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 183, + 312, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 312, + 196 + ], + "score": 1.0, + "content": "possibly using a stochastic oracle for the first term.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 5 + }, + { + "type": "title", + "bbox": [ + 108, + 210, + 282, + 223 + ], + "lines": [ + { + "bbox": [ + 105, + 210, + 284, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 210, + 284, + 225 + ], + "score": 1.0, + "content": "B ADDITIONAL RELATED WORK", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 8 + }, + { + "type": "text", + "bbox": [ + 106, + 235, + 506, + 324 + ], + "lines": [ + { + "bbox": [ + 106, + 236, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 106, + 236, + 506, + 248 + ], + "score": 1.0, + "content": "The preprint by Bot et al. (2019) develops a stochastic version of Tseng’s method under the require-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 247, + 505, + 259 + ], + "spans": [ + { + "bbox": [ + 106, + 247, + 505, + 259 + ], + "score": 1.0, + "content": "ment that the noise variance converges to 0. In ML, this could be achieved with the use of perpetually", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 258, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 506, + 269 + ], + "score": 1.0, + "content": "increasing batch sizes, a strategy that is impractical in many scenarios. The stochastic version of", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 268, + 506, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 506, + 281 + ], + "score": 1.0, + "content": "FRB proposed by Van Dung & Vu (2021) has more practical noise requirements, but has stronger", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 279, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 279, + 506, + 293 + ], + "score": 1.0, + "content": "assumptions on the problem which are rarely satisfied in ML applications: either uniform/strong", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 290, + 506, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 506, + 303 + ], + "score": 1.0, + "content": "monotonicity or a bounded domain. The papers by Yurtsever et al. (2016) and Pedregosa et al.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 301, + 507, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 301, + 401, + 314 + ], + "score": 1.0, + "content": "(2019) consider stochastic variants of three-operator splitting, but require", + "type": "text" + }, + { + "bbox": [ + 401, + 302, + 411, + 311 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 411, + 301, + 507, + 314 + ], + "score": 1.0, + "content": "in (1) to be cocoercive,", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 313, + 321, + 324 + ], + "spans": [ + { + "bbox": [ + 106, + 313, + 321, + 324 + ], + "score": 1.0, + "content": "essentially restricting them to optimization problems.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 12.5 + }, + { + "type": "text", + "bbox": [ + 107, + 329, + 505, + 374 + ], + "lines": [ + { + "bbox": [ + 105, + 329, + 505, + 341 + ], + "spans": [ + { + "bbox": [ + 105, + 329, + 505, + 341 + ], + "score": 1.0, + "content": "There are several alternatives to the (stochastic) extragradient method that reduce the number of", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 340, + 506, + 352 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 506, + 352 + ], + "score": 1.0, + "content": "gradient evaluations per iteration from two to one (Hsieh et al., 2019; Malitsky & Tam, 2020; Gidel", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 351, + 506, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 506, + 364 + ], + "score": 1.0, + "content": "et al., 2019). However, these methods have more stringent stepsize limits, making it unclear a priori", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 362, + 301, + 374 + ], + "spans": [ + { + "bbox": [ + 106, + 362, + 301, + 374 + ], + "score": 1.0, + "content": "whether they will outperform two-step methods.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 18.5 + }, + { + "type": "text", + "bbox": [ + 107, + 378, + 505, + 434 + ], + "lines": [ + { + "bbox": [ + 105, + 378, + 505, + 393 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 505, + 393 + ], + "score": 1.0, + "content": "DSEG is a stochastic version of EG (Hsieh et al., 2020). The primary innovation of DSEG is using", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 390, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 106, + 390, + 505, + 402 + ], + "score": 1.0, + "content": "different stepsizes for the extrapolation and update steps, thereby resolving some of the convergence", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 401, + 506, + 413 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 506, + 413 + ], + "score": 1.0, + "content": "issues affecting stochastic EG. As noted earlier, DSEG is the special case of our SPS method in which", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 412, + 505, + 424 + ], + "spans": [ + { + "bbox": [ + 106, + 412, + 132, + 422 + ], + "score": 0.88, + "content": "n = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 132, + 412, + 505, + 424 + ], + "score": 1.0, + "content": ", that is, no regularizers/constraints are present in the underlying game. The analysis in (Hsieh", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 423, + 424, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 423, + 424, + 434 + ], + "score": 1.0, + "content": "et al., 2020) also did not consider the fixed stepsize choice given in Theorem 2.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 23 + }, + { + "type": "text", + "bbox": [ + 107, + 439, + 505, + 495 + ], + "lines": [ + { + "bbox": [ + 105, + 439, + 507, + 452 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 507, + 452 + ], + "score": 1.0, + "content": "In the context of GANs, several methods have been developed based on a variational inequal-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 451, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 106, + 451, + 506, + 463 + ], + "score": 1.0, + "content": "ity/monotone inclusion approach (Gidel et al., 2019; Daskalakis et al., 2018; Hsieh et al., 2019; 2020;", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 460, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 506, + 475 + ], + "score": 1.0, + "content": "BΓΆhm et al., 2020). Many of these papers point out that variational inequalities provide a principled", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 472, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 506, + 486 + ], + "score": 1.0, + "content": "framework for studying the GAN training problem and correcting some of the flaws in the standard", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 483, + 165, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 165, + 495 + ], + "score": 1.0, + "content": "method GDA.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 28 + }, + { + "type": "title", + "bbox": [ + 108, + 510, + 243, + 523 + ], + "lines": [ + { + "bbox": [ + 106, + 510, + 245, + 525 + ], + "spans": [ + { + "bbox": [ + 106, + 510, + 245, + 525 + ], + "score": 1.0, + "content": "C PROOF OF THEOREM 1", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 31 + }, + { + "type": "title", + "bbox": [ + 107, + 535, + 320, + 547 + ], + "lines": [ + { + "bbox": [ + 106, + 535, + 322, + 549 + ], + "spans": [ + { + "bbox": [ + 106, + 535, + 322, + 549 + ], + "score": 1.0, + "content": "C.1 STOCHASTIC QUASI-FEJER MONOTONICITY", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 106, + 556, + 504, + 579 + ], + "lines": [ + { + "bbox": [ + 105, + 554, + 506, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 554, + 506, + 569 + ], + "score": 1.0, + "content": "The key to the analysis is showing that the algorithm satisfies Stochastic Quasi-Fejer Monotonicity", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 567, + 228, + 579 + ], + "spans": [ + { + "bbox": [ + 106, + 567, + 228, + 579 + ], + "score": 1.0, + "content": "(Combettes & Pesquet, 2015).", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 33.5 + }, + { + "type": "text", + "bbox": [ + 107, + 581, + 505, + 627 + ], + "lines": [ + { + "bbox": [ + 105, + 579, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 579, + 382, + 594 + ], + "score": 1.0, + "content": "Lemma 2 ((Combettes & Pesquet, 2015), Proposition 2.3). Suppose", + "type": "text" + }, + { + "bbox": [ + 383, + 581, + 394, + 592 + ], + "score": 0.87, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 394, + 579, + 461, + 594 + ], + "score": 1.0, + "content": "is a sequence of", + "type": "text" + }, + { + "bbox": [ + 461, + 582, + 474, + 591 + ], + "score": 0.88, + "content": "\\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 579, + 506, + 594 + ], + "score": 1.0, + "content": "-valued", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 591, + 504, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 591, + 308, + 605 + ], + "score": 1.0, + "content": "random variables defined on a probability space", + "type": "text" + }, + { + "bbox": [ + 309, + 592, + 348, + 604 + ], + "score": 0.92, + "content": "( \\Omega , { \\mathcal { F } } , P )", + "type": "inline_equation" + }, + { + "bbox": [ + 349, + 591, + 370, + 605 + ], + "score": 1.0, + "content": ". Let", + "type": "text" + }, + { + "bbox": [ + 371, + 592, + 452, + 604 + ], + "score": 0.91, + "content": "\\mathcal { F } _ { k } \\overset { \\cdot } { = } \\sigma ( p ^ { 1 } , \\cdot \\cdot \\cdot , p ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 453, + 591, + 475, + 605 + ], + "score": 1.0, + "content": ". Let", + "type": "text" + }, + { + "bbox": [ + 475, + 594, + 483, + 602 + ], + "score": 0.82, + "content": "F", + "type": "inline_equation" + }, + { + "bbox": [ + 483, + 591, + 498, + 605 + ], + "score": 1.0, + "content": "be", + "type": "text" + }, + { + "bbox": [ + 498, + 597, + 504, + 602 + ], + "score": 0.39, + "content": "a", + "type": "inline_equation" + } + ], + "index": 36 + }, + { + "bbox": [ + 159, + 601, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 159, + 601, + 199, + 633 + ], + "score": 1.0, + "content": "osed subssuch that", + "type": "text" + }, + { + "bbox": [ + 220, + 603, + 232, + 613 + ], + "score": 0.85, + "content": "\\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 274, + 601, + 278, + 633 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 337, + 604, + 365, + 615 + ], + "score": 0.89, + "content": "p \\in F", + "type": "inline_equation" + }, + { + "bbox": [ + 366, + 601, + 419, + 633 + ], + "score": 1.0, + "content": ", there exists d", + "type": "text" + }, + { + "bbox": [ + 419, + 603, + 505, + 615 + ], + "score": 0.91, + "content": "\\chi ^ { k } ( p ) \\geq 0 , \\eta ^ { k } ( p ) \\geq", + "type": "inline_equation" + } + ], + "index": 38 + }, + { + "bbox": [ + 107, + 614, + 352, + 627 + ], + "spans": [ + { + "bbox": [ + 107, + 614, + 159, + 626 + ], + "score": 0.92, + "content": "0 , \\nu ^ { k } ( \\hat { p } ) \\stackrel { \\cdot } { \\geq } 0", + "type": "inline_equation" + }, + { + "bbox": [ + 199, + 614, + 274, + 627 + ], + "score": 0.87, + "content": "\\scriptstyle \\sum _ { k = 1 } ^ { \\infty } \\chi ^ { k } ( p ) ^ { \\widehat { < } } \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 278, + 614, + 352, + 627 + ], + "score": 0.9, + "content": "\\scriptstyle \\sum _ { k = 1 } ^ { \\infty } \\eta ^ { k } ( p ) < \\infty", + "type": "inline_equation" + } + ], + "index": 37 + } + ], + "index": 36.5 + }, + { + "type": "interline_equation", + "bbox": [ + 151, + 630, + 457, + 645 + ], + "lines": [ + { + "bbox": [ + 151, + 630, + 457, + 645 + ], + "spans": [ + { + "bbox": [ + 151, + 630, + 457, + 645 + ], + "score": 0.84, + "content": "\\begin{array} { r l } { ( \\forall k \\in \\mathbb { N } ) } & { \\mathbb { E } [ \\| p ^ { k + 1 } - p \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\leq ( 1 + \\chi ^ { k } ( p ) ) \\| p ^ { k } - p \\| ^ { 2 } - \\nu ^ { k } ( p ) + \\eta ^ { k } ( p ) . } \\end{array}", + "type": "interline_equation", + "image_path": "de203b2891d1925c2f967e3f63b0b5a5324310df98ef3338c28a462b9d371536.jpg" + } + ] + } + ], + "index": 39, + "virtual_lines": [ + { + "bbox": [ + 151, + 630, + 457, + 645 + ], + "spans": [], + "index": 39 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 649, + 206, + 660 + ], + "lines": [ + { + "bbox": [ + 106, + 649, + 206, + 662 + ], + "spans": [ + { + "bbox": [ + 106, + 649, + 206, + 662 + ], + "score": 1.0, + "content": "Then the following hold:", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 40 + }, + { + "type": "interline_equation", + "bbox": [ + 129, + 667, + 291, + 683 + ], + "lines": [ + { + "bbox": [ + 129, + 667, + 291, + 683 + ], + "spans": [ + { + "bbox": [ + 129, + 667, + 291, + 683 + ], + "score": 0.85, + "content": "\\begin{array} { r l } { I . \\ ( \\forall p \\in F ) : } & { { } \\sum _ { k = 1 } ^ { \\infty } \\nu ^ { k } ( p ) < \\infty a . s . } \\end{array}", + "type": "interline_equation", + "image_path": "be82c03ec9b18cab7103581c3370badb6567aadd8440c46dd7105782457d21dc.jpg" + } + ] + } + ], + "index": 41, + "virtual_lines": [ + { + "bbox": [ + 129, + 667, + 291, + 683 + ], + "spans": [], + "index": 41 + } + ] + }, + { + "type": "text", + "bbox": [ + 130, + 689, + 216, + 702 + ], + "lines": [ + { + "bbox": [ + 129, + 687, + 217, + 703 + ], + "spans": [ + { + "bbox": [ + 129, + 687, + 142, + 703 + ], + "score": 1.0, + "content": "2.", + "type": "text" + }, + { + "bbox": [ + 142, + 689, + 153, + 702 + ], + "score": 0.86, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 154, + 687, + 217, + 703 + ], + "score": 1.0, + "content": "is bounded a.s.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 42 + }, + { + "type": "text", + "bbox": [ + 127, + 708, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 127, + 707, + 506, + 724 + ], + "spans": [ + { + "bbox": [ + 127, + 707, + 194, + 724 + ], + "score": 1.0, + "content": "3. There exists", + "type": "text" + }, + { + "bbox": [ + 194, + 708, + 203, + 720 + ], + "score": 0.83, + "content": "\\tilde { \\Omega }", + "type": "inline_equation" + }, + { + "bbox": [ + 203, + 707, + 245, + 724 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 245, + 708, + 287, + 722 + ], + "score": 0.93, + "content": "P [ \\tilde { \\Omega } ] = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 287, + 707, + 307, + 724 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 307, + 708, + 371, + 723 + ], + "score": 0.95, + "content": "\\left\\{ \\| p ^ { k } ( \\omega ) - p \\| \\right\\}", + "type": "inline_equation" + }, + { + "bbox": [ + 372, + 707, + 456, + 724 + ], + "score": 1.0, + "content": "converges for every", + "type": "text" + }, + { + "bbox": [ + 456, + 708, + 486, + 720 + ], + "score": 0.91, + "content": "\\omega \\in \\tilde { \\Omega }", + "type": "inline_equation" + }, + { + "bbox": [ + 486, + 707, + 506, + 724 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 142, + 721, + 173, + 732 + ], + "spans": [ + { + "bbox": [ + 142, + 721, + 168, + 732 + ], + "score": 0.91, + "content": "p \\in F", + "type": "inline_equation" + }, + { + "bbox": [ + 169, + 721, + 173, + 732 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 43.5 + } + ], + "page_idx": 15, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 309, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 309, + 39 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "score": 1.0, + "content": "16", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 394, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 396, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 396, + 96 + ], + "score": 1.0, + "content": "Under some regularity conditions, this problem reduces to the inclusion", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0, + "bbox_fs": [ + 105, + 81, + 396, + 96 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 192, + 98, + 416, + 135 + ], + "lines": [ + { + "bbox": [ + 192, + 98, + 416, + 135 + ], + "spans": [ + { + "bbox": [ + 192, + 98, + 416, + 135 + ], + "score": 0.94, + "content": "0 \\in \\left[ \\begin{array} { c } { D y ^ { * } } \\\\ { - D ^ { \\top } x ^ { * } } \\end{array} \\right] + \\sum _ { j = 1 } ^ { \\operatorname* { m a x } \\{ n _ { 1 } , n _ { 2 } \\} } \\big ( N _ { { \\mathcal C } _ { j } ^ { 1 } } ( x ^ { * } ) \\times N _ { { \\mathcal C } _ { j } ^ { 2 } } ( y ^ { * } ) \\big ) ,", + "type": "interline_equation", + "image_path": "20fd1ca98bfe912a0917aa73b18d66131356e9479819ab4a7df1087f717f3a64.jpg" + } + ] + } + ], + "index": 1.5, + "virtual_lines": [ + { + "bbox": [ + 192, + 98, + 416, + 116.5 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 192, + 116.5, + 416, + 135.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 138, + 506, + 195 + ], + "lines": [ + { + "bbox": [ + 105, + 137, + 505, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 137, + 293, + 153 + ], + "score": 1.0, + "content": "where we introduce additional β€œdummy” sets", + "type": "text" + }, + { + "bbox": [ + 293, + 138, + 330, + 153 + ], + "score": 0.95, + "content": "\\mathcal { C } _ { j } ^ { 1 } = \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 330, + 137, + 342, + 153 + ], + "score": 1.0, + "content": "or", + "type": "text" + }, + { + "bbox": [ + 343, + 139, + 380, + 153 + ], + "score": 0.92, + "content": "\\mathcal { C } _ { j } ^ { 2 } = \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 380, + 137, + 406, + 153 + ], + "score": 1.0, + "content": "when", + "type": "text" + }, + { + "bbox": [ + 406, + 140, + 442, + 151 + ], + "score": 0.91, + "content": "n _ { 1 } \\neq n _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 442, + 137, + 505, + 153 + ], + "score": 1.0, + "content": ". The first term", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 150, + 505, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 505, + 163 + ], + "score": 1.0, + "content": "is linear and skew symmetric, and therefore can easily be shown to be Lipschitz continuous and", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 162, + 505, + 173 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 505, + 173 + ], + "score": 1.0, + "content": "monotone. If all the constraint sets are closed and convex, then the rest of the terms are maximal", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 172, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 506, + 186 + ], + "score": 1.0, + "content": "monotone, then the problem is of the form (1), meaning that projective splitting may be applied,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 183, + 312, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 312, + 196 + ], + "score": 1.0, + "content": "possibly using a stochastic oracle for the first term.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 5, + "bbox_fs": [ + 105, + 137, + 506, + 196 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 210, + 282, + 223 + ], + "lines": [ + { + "bbox": [ + 105, + 210, + 284, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 210, + 284, + 225 + ], + "score": 1.0, + "content": "B ADDITIONAL RELATED WORK", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 8 + }, + { + "type": "text", + "bbox": [ + 106, + 235, + 506, + 324 + ], + "lines": [ + { + "bbox": [ + 106, + 236, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 106, + 236, + 506, + 248 + ], + "score": 1.0, + "content": "The preprint by Bot et al. (2019) develops a stochastic version of Tseng’s method under the require-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 247, + 505, + 259 + ], + "spans": [ + { + "bbox": [ + 106, + 247, + 505, + 259 + ], + "score": 1.0, + "content": "ment that the noise variance converges to 0. In ML, this could be achieved with the use of perpetually", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 258, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 506, + 269 + ], + "score": 1.0, + "content": "increasing batch sizes, a strategy that is impractical in many scenarios. The stochastic version of", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 268, + 506, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 506, + 281 + ], + "score": 1.0, + "content": "FRB proposed by Van Dung & Vu (2021) has more practical noise requirements, but has stronger", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 279, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 279, + 506, + 293 + ], + "score": 1.0, + "content": "assumptions on the problem which are rarely satisfied in ML applications: either uniform/strong", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 290, + 506, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 506, + 303 + ], + "score": 1.0, + "content": "monotonicity or a bounded domain. The papers by Yurtsever et al. (2016) and Pedregosa et al.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 301, + 507, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 301, + 401, + 314 + ], + "score": 1.0, + "content": "(2019) consider stochastic variants of three-operator splitting, but require", + "type": "text" + }, + { + "bbox": [ + 401, + 302, + 411, + 311 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 411, + 301, + 507, + 314 + ], + "score": 1.0, + "content": "in (1) to be cocoercive,", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 313, + 321, + 324 + ], + "spans": [ + { + "bbox": [ + 106, + 313, + 321, + 324 + ], + "score": 1.0, + "content": "essentially restricting them to optimization problems.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 12.5, + "bbox_fs": [ + 105, + 236, + 507, + 324 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 329, + 505, + 374 + ], + "lines": [ + { + "bbox": [ + 105, + 329, + 505, + 341 + ], + "spans": [ + { + "bbox": [ + 105, + 329, + 505, + 341 + ], + "score": 1.0, + "content": "There are several alternatives to the (stochastic) extragradient method that reduce the number of", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 340, + 506, + 352 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 506, + 352 + ], + "score": 1.0, + "content": "gradient evaluations per iteration from two to one (Hsieh et al., 2019; Malitsky & Tam, 2020; Gidel", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 351, + 506, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 506, + 364 + ], + "score": 1.0, + "content": "et al., 2019). However, these methods have more stringent stepsize limits, making it unclear a priori", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 362, + 301, + 374 + ], + "spans": [ + { + "bbox": [ + 106, + 362, + 301, + 374 + ], + "score": 1.0, + "content": "whether they will outperform two-step methods.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 18.5, + "bbox_fs": [ + 105, + 329, + 506, + 374 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 378, + 505, + 434 + ], + "lines": [ + { + "bbox": [ + 105, + 378, + 505, + 393 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 505, + 393 + ], + "score": 1.0, + "content": "DSEG is a stochastic version of EG (Hsieh et al., 2020). The primary innovation of DSEG is using", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 390, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 106, + 390, + 505, + 402 + ], + "score": 1.0, + "content": "different stepsizes for the extrapolation and update steps, thereby resolving some of the convergence", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 401, + 506, + 413 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 506, + 413 + ], + "score": 1.0, + "content": "issues affecting stochastic EG. As noted earlier, DSEG is the special case of our SPS method in which", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 412, + 505, + 424 + ], + "spans": [ + { + "bbox": [ + 106, + 412, + 132, + 422 + ], + "score": 0.88, + "content": "n = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 132, + 412, + 505, + 424 + ], + "score": 1.0, + "content": ", that is, no regularizers/constraints are present in the underlying game. The analysis in (Hsieh", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 423, + 424, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 423, + 424, + 434 + ], + "score": 1.0, + "content": "et al., 2020) also did not consider the fixed stepsize choice given in Theorem 2.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 23, + "bbox_fs": [ + 105, + 378, + 506, + 434 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 439, + 505, + 495 + ], + "lines": [ + { + "bbox": [ + 105, + 439, + 507, + 452 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 507, + 452 + ], + "score": 1.0, + "content": "In the context of GANs, several methods have been developed based on a variational inequal-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 451, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 106, + 451, + 506, + 463 + ], + "score": 1.0, + "content": "ity/monotone inclusion approach (Gidel et al., 2019; Daskalakis et al., 2018; Hsieh et al., 2019; 2020;", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 460, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 506, + 475 + ], + "score": 1.0, + "content": "BΓΆhm et al., 2020). Many of these papers point out that variational inequalities provide a principled", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 472, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 506, + 486 + ], + "score": 1.0, + "content": "framework for studying the GAN training problem and correcting some of the flaws in the standard", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 483, + 165, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 165, + 495 + ], + "score": 1.0, + "content": "method GDA.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 28, + "bbox_fs": [ + 105, + 439, + 507, + 495 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 510, + 243, + 523 + ], + "lines": [ + { + "bbox": [ + 106, + 510, + 245, + 525 + ], + "spans": [ + { + "bbox": [ + 106, + 510, + 245, + 525 + ], + "score": 1.0, + "content": "C PROOF OF THEOREM 1", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 31 + }, + { + "type": "title", + "bbox": [ + 107, + 535, + 320, + 547 + ], + "lines": [ + { + "bbox": [ + 106, + 535, + 322, + 549 + ], + "spans": [ + { + "bbox": [ + 106, + 535, + 322, + 549 + ], + "score": 1.0, + "content": "C.1 STOCHASTIC QUASI-FEJER MONOTONICITY", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 106, + 556, + 504, + 579 + ], + "lines": [ + { + "bbox": [ + 105, + 554, + 506, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 554, + 506, + 569 + ], + "score": 1.0, + "content": "The key to the analysis is showing that the algorithm satisfies Stochastic Quasi-Fejer Monotonicity", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 567, + 228, + 579 + ], + "spans": [ + { + "bbox": [ + 106, + 567, + 228, + 579 + ], + "score": 1.0, + "content": "(Combettes & Pesquet, 2015).", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 33.5, + "bbox_fs": [ + 105, + 554, + 506, + 579 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 581, + 505, + 627 + ], + "lines": [ + { + "bbox": [ + 105, + 579, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 579, + 382, + 594 + ], + "score": 1.0, + "content": "Lemma 2 ((Combettes & Pesquet, 2015), Proposition 2.3). Suppose", + "type": "text" + }, + { + "bbox": [ + 383, + 581, + 394, + 592 + ], + "score": 0.87, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 394, + 579, + 461, + 594 + ], + "score": 1.0, + "content": "is a sequence of", + "type": "text" + }, + { + "bbox": [ + 461, + 582, + 474, + 591 + ], + "score": 0.88, + "content": "\\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 579, + 506, + 594 + ], + "score": 1.0, + "content": "-valued", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 591, + 504, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 591, + 308, + 605 + ], + "score": 1.0, + "content": "random variables defined on a probability space", + "type": "text" + }, + { + "bbox": [ + 309, + 592, + 348, + 604 + ], + "score": 0.92, + "content": "( \\Omega , { \\mathcal { F } } , P )", + "type": "inline_equation" + }, + { + "bbox": [ + 349, + 591, + 370, + 605 + ], + "score": 1.0, + "content": ". Let", + "type": "text" + }, + { + "bbox": [ + 371, + 592, + 452, + 604 + ], + "score": 0.91, + "content": "\\mathcal { F } _ { k } \\overset { \\cdot } { = } \\sigma ( p ^ { 1 } , \\cdot \\cdot \\cdot , p ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 453, + 591, + 475, + 605 + ], + "score": 1.0, + "content": ". Let", + "type": "text" + }, + { + "bbox": [ + 475, + 594, + 483, + 602 + ], + "score": 0.82, + "content": "F", + "type": "inline_equation" + }, + { + "bbox": [ + 483, + 591, + 498, + 605 + ], + "score": 1.0, + "content": "be", + "type": "text" + }, + { + "bbox": [ + 498, + 597, + 504, + 602 + ], + "score": 0.39, + "content": "a", + "type": "inline_equation" + } + ], + "index": 36 + }, + { + "bbox": [ + 159, + 601, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 159, + 601, + 199, + 633 + ], + "score": 1.0, + "content": "osed subssuch that", + "type": "text" + }, + { + "bbox": [ + 220, + 603, + 232, + 613 + ], + "score": 0.85, + "content": "\\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 274, + 601, + 278, + 633 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 337, + 604, + 365, + 615 + ], + "score": 0.89, + "content": "p \\in F", + "type": "inline_equation" + }, + { + "bbox": [ + 366, + 601, + 419, + 633 + ], + "score": 1.0, + "content": ", there exists d", + "type": "text" + }, + { + "bbox": [ + 419, + 603, + 505, + 615 + ], + "score": 0.91, + "content": "\\chi ^ { k } ( p ) \\geq 0 , \\eta ^ { k } ( p ) \\geq", + "type": "inline_equation" + } + ], + "index": 38 + }, + { + "bbox": [ + 107, + 614, + 352, + 627 + ], + "spans": [ + { + "bbox": [ + 107, + 614, + 159, + 626 + ], + "score": 0.92, + "content": "0 , \\nu ^ { k } ( \\hat { p } ) \\stackrel { \\cdot } { \\geq } 0", + "type": "inline_equation" + }, + { + "bbox": [ + 199, + 614, + 274, + 627 + ], + "score": 0.87, + "content": "\\scriptstyle \\sum _ { k = 1 } ^ { \\infty } \\chi ^ { k } ( p ) ^ { \\widehat { < } } \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 278, + 614, + 352, + 627 + ], + "score": 0.9, + "content": "\\scriptstyle \\sum _ { k = 1 } ^ { \\infty } \\eta ^ { k } ( p ) < \\infty", + "type": "inline_equation" + } + ], + "index": 37 + } + ], + "index": 36.5, + "bbox_fs": [ + 105, + 579, + 506, + 633 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 151, + 630, + 457, + 645 + ], + "lines": [ + { + "bbox": [ + 151, + 630, + 457, + 645 + ], + "spans": [ + { + "bbox": [ + 151, + 630, + 457, + 645 + ], + "score": 0.84, + "content": "\\begin{array} { r l } { ( \\forall k \\in \\mathbb { N } ) } & { \\mathbb { E } [ \\| p ^ { k + 1 } - p \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\leq ( 1 + \\chi ^ { k } ( p ) ) \\| p ^ { k } - p \\| ^ { 2 } - \\nu ^ { k } ( p ) + \\eta ^ { k } ( p ) . } \\end{array}", + "type": "interline_equation", + "image_path": "de203b2891d1925c2f967e3f63b0b5a5324310df98ef3338c28a462b9d371536.jpg" + } + ] + } + ], + "index": 39, + "virtual_lines": [ + { + "bbox": [ + 151, + 630, + 457, + 645 + ], + "spans": [], + "index": 39 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 649, + 206, + 660 + ], + "lines": [ + { + "bbox": [ + 106, + 649, + 206, + 662 + ], + "spans": [ + { + "bbox": [ + 106, + 649, + 206, + 662 + ], + "score": 1.0, + "content": "Then the following hold:", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 40, + "bbox_fs": [ + 106, + 649, + 206, + 662 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 129, + 667, + 291, + 683 + ], + "lines": [ + { + "bbox": [ + 129, + 667, + 291, + 683 + ], + "spans": [ + { + "bbox": [ + 129, + 667, + 291, + 683 + ], + "score": 0.85, + "content": "\\begin{array} { r l } { I . \\ ( \\forall p \\in F ) : } & { { } \\sum _ { k = 1 } ^ { \\infty } \\nu ^ { k } ( p ) < \\infty a . s . } \\end{array}", + "type": "interline_equation", + "image_path": "be82c03ec9b18cab7103581c3370badb6567aadd8440c46dd7105782457d21dc.jpg" + } + ] + } + ], + "index": 41, + "virtual_lines": [ + { + "bbox": [ + 129, + 667, + 291, + 683 + ], + "spans": [], + "index": 41 + } + ] + }, + { + "type": "text", + "bbox": [ + 130, + 689, + 216, + 702 + ], + "lines": [ + { + "bbox": [ + 129, + 687, + 217, + 703 + ], + "spans": [ + { + "bbox": [ + 129, + 687, + 142, + 703 + ], + "score": 1.0, + "content": "2.", + "type": "text" + }, + { + "bbox": [ + 142, + 689, + 153, + 702 + ], + "score": 0.86, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 154, + 687, + 217, + 703 + ], + "score": 1.0, + "content": "is bounded a.s.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 42, + "bbox_fs": [ + 129, + 687, + 217, + 703 + ] + }, + { + "type": "text", + "bbox": [ + 127, + 708, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 127, + 707, + 506, + 724 + ], + "spans": [ + { + "bbox": [ + 127, + 707, + 194, + 724 + ], + "score": 1.0, + "content": "3. There exists", + "type": "text" + }, + { + "bbox": [ + 194, + 708, + 203, + 720 + ], + "score": 0.83, + "content": "\\tilde { \\Omega }", + "type": "inline_equation" + }, + { + "bbox": [ + 203, + 707, + 245, + 724 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 245, + 708, + 287, + 722 + ], + "score": 0.93, + "content": "P [ \\tilde { \\Omega } ] = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 287, + 707, + 307, + 724 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 307, + 708, + 371, + 723 + ], + "score": 0.95, + "content": "\\left\\{ \\| p ^ { k } ( \\omega ) - p \\| \\right\\}", + "type": "inline_equation" + }, + { + "bbox": [ + 372, + 707, + 456, + 724 + ], + "score": 1.0, + "content": "converges for every", + "type": "text" + }, + { + "bbox": [ + 456, + 708, + 486, + 720 + ], + "score": 0.91, + "content": "\\omega \\in \\tilde { \\Omega }", + "type": "inline_equation" + }, + { + "bbox": [ + 486, + 707, + 506, + 724 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 142, + 721, + 173, + 732 + ], + "spans": [ + { + "bbox": [ + 142, + 721, + 168, + 732 + ], + "score": 0.91, + "content": "p \\in F", + "type": "inline_equation" + }, + { + "bbox": [ + 169, + 721, + 173, + 732 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 43.5, + "bbox_fs": [ + 127, + 707, + 506, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 108, + 82, + 277, + 94 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 278, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 278, + 95 + ], + "score": 1.0, + "content": "C.2 IMPORTANT RECURSION FOR SPS", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 102, + 504, + 126 + ], + "lines": [ + { + "bbox": [ + 105, + 100, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 100, + 505, + 117 + ], + "score": 1.0, + "content": "The following lemma summarizes the key recursion satisfied by Algorithm 1, to which we will apply", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 114, + 327, + 126 + ], + "spans": [ + { + "bbox": [ + 106, + 114, + 195, + 126 + ], + "score": 1.0, + "content": "Lemma 2. Recall that", + "type": "text" + }, + { + "bbox": [ + 196, + 115, + 204, + 124 + ], + "score": 0.81, + "content": "L", + "type": "inline_equation" + }, + { + "bbox": [ + 204, + 114, + 315, + 126 + ], + "score": 1.0, + "content": "is the Lipschitz constant of", + "type": "text" + }, + { + "bbox": [ + 315, + 114, + 324, + 124 + ], + "score": 0.82, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 324, + 114, + 327, + 126 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1.5 + }, + { + "type": "text", + "bbox": [ + 106, + 127, + 333, + 140 + ], + "lines": [ + { + "bbox": [ + 106, + 127, + 331, + 141 + ], + "spans": [ + { + "bbox": [ + 106, + 127, + 213, + 141 + ], + "score": 1.0, + "content": "Lemma 3. For Algorithm", + "type": "text" + }, + { + "bbox": [ + 214, + 128, + 219, + 138 + ], + "score": 0.34, + "content": "I", + "type": "inline_equation" + }, + { + "bbox": [ + 219, + 127, + 331, + 141 + ], + "score": 1.0, + "content": ", suppose (9)–(11) hold and", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 3 + }, + { + "type": "interline_equation", + "bbox": [ + 273, + 142, + 337, + 155 + ], + "lines": [ + { + "bbox": [ + 273, + 142, + 337, + 155 + ], + "spans": [ + { + "bbox": [ + 273, + 142, + 337, + 155 + ], + "score": 0.91, + "content": "\\rho _ { k } \\leq \\overline { { \\rho } } < 1 / L .", + "type": "interline_equation", + "image_path": "c862cbc29560226484ac3f9ae67fa80a7012a73774759885a9a7b90983eb3ecf.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 273, + 142, + 337, + 155 + ], + "spans": [], + "index": 4 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 158, + 122, + 169 + ], + "lines": [ + { + "bbox": [ + 104, + 158, + 123, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 158, + 123, + 172 + ], + "score": 1.0, + "content": "Let", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "interline_equation", + "bbox": [ + 145, + 169, + 465, + 203 + ], + "lines": [ + { + "bbox": [ + 145, + 169, + 465, + 203 + ], + "spans": [ + { + "bbox": [ + 145, + 169, + 465, + 203 + ], + "score": 0.93, + "content": "T _ { k } \\doteq \\frac { \\tau } { \\overline { { \\rho } } } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\overline { { \\rho } } \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\big ( 1 - \\overline { { \\rho } } L \\big ) \\| B \\big ( z ^ { k } \\big ) - w _ { n + 1 } ^ { k } \\| ^ { 2 }", + "type": "interline_equation", + "image_path": "5d23dc14d367047bfe73ff60a08031457aa8aeee336050d8f37988eb80795f5f.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 145, + 169, + 465, + 180.33333333333334 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 145, + 180.33333333333334, + 465, + 191.66666666666669 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 145, + 191.66666666666669, + 465, + 203.00000000000003 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 204, + 269, + 216 + ], + "lines": [ + { + "bbox": [ + 106, + 203, + 270, + 218 + ], + "spans": [ + { + "bbox": [ + 106, + 203, + 153, + 218 + ], + "score": 1.0, + "content": "then for all", + "type": "text" + }, + { + "bbox": [ + 153, + 205, + 183, + 216 + ], + "score": 0.91, + "content": "p ^ { * } \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 203, + 270, + 218 + ], + "score": 1.0, + "content": ", with probability one", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9 + }, + { + "type": "text", + "bbox": [ + 105, + 218, + 503, + 248 + ], + "lines": [ + { + "bbox": [ + 115, + 216, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 115, + 218, + 478, + 233 + ], + "score": 0.9, + "content": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le \\big ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } + C _ { 3 } \\alpha _ { k } \\rho _ { k } ^ { 2 } \\big ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha _ { k } \\rho _ { k } T _ { k } + C _ { 2 } \\alpha _ { k } ^ { 2 } + C _ { 4 } \\alpha _ { k } \\rho _ { k } ^ { 2 } } \\end{array}", + "type": "inline_equation", + "image_path": "c21990d1265024b692e80bc3d6bc1f7163f688481833e2c82477412bc3de38f0.jpg" + }, + { + "bbox": [ + 479, + 216, + 506, + 236 + ], + "score": 1.0, + "content": "(21)", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 233, + 501, + 249 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 133, + 249 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 236, + 179, + 247 + ], + "score": 0.93, + "content": "C _ { 1 } , \\ldots , C _ { 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 180, + 233, + 501, + 249 + ], + "score": 1.0, + "content": "are nonegative constants defined in (33), (34), (48), and (49) below, respectively.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 10.5 + }, + { + "type": "text", + "bbox": [ + 107, + 254, + 433, + 267 + ], + "lines": [ + { + "bbox": [ + 105, + 254, + 433, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 254, + 145, + 268 + ], + "score": 1.0, + "content": "Note that", + "type": "text" + }, + { + "bbox": [ + 146, + 256, + 158, + 267 + ], + "score": 0.89, + "content": "T _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 158, + 254, + 354, + 268 + ], + "score": 1.0, + "content": "is a scaled version of the approximation residual", + "type": "text" + }, + { + "bbox": [ + 354, + 255, + 368, + 267 + ], + "score": 0.89, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 369, + 254, + 433, + 268 + ], + "score": 1.0, + "content": "defined in (14).", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12 + }, + { + "type": "text", + "bbox": [ + 106, + 272, + 506, + 306 + ], + "lines": [ + { + "bbox": [ + 105, + 271, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 506, + 285 + ], + "score": 1.0, + "content": "We proceed to first prove Lemma 3 and then exploit the implications of Lemma 2. Referring to (10)", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 283, + 506, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 283, + 159, + 297 + ], + "score": 1.0, + "content": "and (11), let", + "type": "text" + }, + { + "bbox": [ + 159, + 284, + 238, + 296 + ], + "score": 0.94, + "content": "N \\doteq \\mathrm { m a x } _ { j \\in 1 \\ldots 4 } N _ { j }", + "type": "inline_equation" + }, + { + "bbox": [ + 239, + 283, + 402, + 297 + ], + "score": 1.0, + "content": ". To simplify the constants, we will use", + "type": "text" + }, + { + "bbox": [ + 402, + 284, + 412, + 293 + ], + "score": 0.81, + "content": "N", + "type": "inline_equation" + }, + { + "bbox": [ + 413, + 283, + 460, + 297 + ], + "score": 1.0, + "content": "in place of", + "type": "text" + }, + { + "bbox": [ + 461, + 284, + 474, + 296 + ], + "score": 0.9, + "content": "N _ { j }", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 283, + 506, + 297 + ], + "score": 1.0, + "content": "for the", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 294, + 273, + 307 + ], + "spans": [ + { + "bbox": [ + 106, + 294, + 273, + 307 + ], + "score": 1.0, + "content": "noise variance bounds given in (10)-(11).", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 14 + }, + { + "type": "title", + "bbox": [ + 108, + 318, + 281, + 330 + ], + "lines": [ + { + "bbox": [ + 105, + 317, + 282, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 282, + 331 + ], + "score": 1.0, + "content": "C.3 UPPER BOUNDING THE GRADIENT", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "text", + "bbox": [ + 105, + 338, + 505, + 362 + ], + "lines": [ + { + "bbox": [ + 105, + 338, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 268, + 354 + ], + "score": 1.0, + "content": "Throughout the analysis, we fix some", + "type": "text" + }, + { + "bbox": [ + 269, + 339, + 397, + 352 + ], + "score": 0.93, + "content": "p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 397, + 338, + 506, + 354 + ], + "score": 1.0, + "content": ". All statements are with", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 350, + 498, + 362 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 498, + 362 + ], + "score": 1.0, + "content": "probability one (almost surely), but for brevity we will omit this unless it needs to be emphasized.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17.5 + }, + { + "type": "text", + "bbox": [ + 106, + 366, + 504, + 379 + ], + "lines": [ + { + "bbox": [ + 105, + 364, + 502, + 382 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 323, + 382 + ], + "score": 1.0, + "content": "In this section, we derive appropriate upper bounds for", + "type": "text" + }, + { + "bbox": [ + 324, + 367, + 358, + 379 + ], + "score": 0.93, + "content": "\\| \\nabla \\varphi _ { k } \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 359, + 364, + 477, + 382 + ], + "score": 1.0, + "content": "to use in (13). We begin with", + "type": "text" + }, + { + "bbox": [ + 477, + 367, + 502, + 379 + ], + "score": 0.91, + "content": "\\nabla _ { z } \\varphi _ { k }", + "type": "inline_equation" + } + ], + "index": 19 + } + ], + "index": 19 + }, + { + "type": "interline_equation", + "bbox": [ + 112, + 382, + 500, + 449 + ], + "lines": [ + { + "bbox": [ + 112, + 382, + 500, + 449 + ], + "spans": [ + { + "bbox": [ + 112, + 382, + 500, + 449 + ], + "score": 0.93, + "content": "\\begin{array} { r l r } & { } & { \\displaystyle \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } = \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k } \\Big \\| ^ { 2 } \\leq 2 \\| y _ { n + 1 } ^ { k } \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } = 2 \\Big \\| B ( x _ { n + 1 } ^ { k } ) + e ^ { k } \\Big \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } } \\\\ & { } & { \\leq 4 \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 \\| e ^ { k } \\| ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "2fda7ecb6ddff8f05e5d2ad354a335aa0e69097667148b35a03b6b9c5479dce8.jpg" + } + ] + } + ], + "index": 21, + "virtual_lines": [ + { + "bbox": [ + 112, + 382, + 500, + 404.3333333333333 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 112, + 404.3333333333333, + 500, + 426.66666666666663 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 112, + 426.66666666666663, + 500, + 448.99999999999994 + ], + "spans": [], + "index": 22 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 450, + 504, + 474 + ], + "lines": [ + { + "bbox": [ + 105, + 450, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 276, + 464 + ], + "score": 1.0, + "content": "Now next take expectations with respect to", + "type": "text" + }, + { + "bbox": [ + 276, + 451, + 289, + 462 + ], + "score": 0.9, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 290, + 450, + 307, + 464 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 307, + 451, + 318, + 462 + ], + "score": 0.88, + "content": "\\mathcal { E } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 318, + 450, + 506, + 464 + ], + "score": 1.0, + "content": ", and use the bound on the variance of the noise", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 460, + 179, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 179, + 475 + ], + "score": 1.0, + "content": "in (11), obtaining", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 23.5 + }, + { + "type": "interline_equation", + "bbox": [ + 147, + 475, + 463, + 543 + ], + "lines": [ + { + "bbox": [ + 147, + 475, + 463, + 543 + ], + "spans": [ + { + "bbox": [ + 147, + 475, + 463, + 543 + ], + "score": 0.95, + "content": "\\begin{array} { r l r } { { \\mathbb { E } [ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } , \\mathcal { E } _ { k } ] \\leq \\mathbb { E } [ 4 \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 \\| e ^ { k } \\| ^ { 2 } \\ \\Big | \\ \\mathcal { F } _ { k } , \\mathcal { E } _ { k } ] } } \\\\ & { } & { \\leq 4 ( N + 1 ) \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 N , ~ } \\end{array}", + "type": "interline_equation", + "image_path": "b8f7bd7c86a72deb4ba7e0b5cb74ab712446b5c629076a14ff36135012f0ce42.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 147, + 475, + 463, + 497.6666666666667 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 147, + 497.6666666666667, + 463, + 520.3333333333334 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 147, + 520.3333333333334, + 463, + 543.0 + ], + "spans": [], + "index": 27 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 545, + 505, + 569 + ], + "lines": [ + { + "bbox": [ + 105, + 543, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 105, + 543, + 213, + 559 + ], + "score": 1.0, + "content": "where we have used that", + "type": "text" + }, + { + "bbox": [ + 213, + 545, + 224, + 558 + ], + "score": 0.9, + "content": "y _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 225, + 543, + 236, + 559 + ], + "score": 1.0, + "content": "is", + "type": "text" + }, + { + "bbox": [ + 237, + 546, + 249, + 557 + ], + "score": 0.89, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 250, + 543, + 317, + 559 + ], + "score": 1.0, + "content": "-measurable for", + "type": "text" + }, + { + "bbox": [ + 317, + 546, + 354, + 556 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 354, + 543, + 492, + 559 + ], + "score": 1.0, + "content": ". Thus, taking expectations over", + "type": "text" + }, + { + "bbox": [ + 493, + 546, + 504, + 557 + ], + "score": 0.87, + "content": "\\mathcal { E } _ { k }", + "type": "inline_equation" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 556, + 210, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 168, + 569 + ], + "score": 1.0, + "content": "conditioned on", + "type": "text" + }, + { + "bbox": [ + 169, + 558, + 182, + 568 + ], + "score": 0.89, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 182, + 556, + 210, + 569 + ], + "score": 1.0, + "content": "yields", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28.5 + }, + { + "type": "interline_equation", + "bbox": [ + 158, + 570, + 452, + 603 + ], + "lines": [ + { + "bbox": [ + 158, + 570, + 452, + 603 + ], + "spans": [ + { + "bbox": [ + 158, + 570, + 452, + 603 + ], + "score": 0.93, + "content": "\\mathbb { E } \\left[ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\right] \\leq 4 ( N + 1 ) \\mathbb { E } [ \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } | \\mathcal { F } _ { k } ] + 2 \\Big \\| \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 N .", + "type": "interline_equation", + "image_path": "c3c8f9a6dab58330aeb949921964b22e45fc17b711843fde9cdea4fe9d522b4f.jpg" + } + ] + } + ], + "index": 31, + "virtual_lines": [ + { + "bbox": [ + 158, + 570, + 452, + 581.0 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 158, + 581.0, + 452, + 592.0 + ], + "spans": [], + "index": 31 + }, + { + "bbox": [ + 158, + 592.0, + 452, + 603.0 + ], + "spans": [], + "index": 32 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 611, + 342, + 623 + ], + "lines": [ + { + "bbox": [ + 105, + 609, + 342, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 342, + 624 + ], + "score": 1.0, + "content": "We will now bound the two terms on the right side of (22).", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 33 + }, + { + "type": "title", + "bbox": [ + 107, + 634, + 227, + 645 + ], + "lines": [ + { + "bbox": [ + 106, + 631, + 227, + 648 + ], + "spans": [ + { + "bbox": [ + 106, + 631, + 227, + 648 + ], + "score": 1.0, + "content": "C.3.1 FIRST TERM IN (22)", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 34 + }, + { + "type": "text", + "bbox": [ + 106, + 653, + 167, + 664 + ], + "lines": [ + { + "bbox": [ + 105, + 653, + 168, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 168, + 666 + ], + "score": 1.0, + "content": "First, note that", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 35 + }, + { + "type": "interline_equation", + "bbox": [ + 207, + 665, + 403, + 733 + ], + "lines": [ + { + "bbox": [ + 207, + 665, + 403, + 733 + ], + "spans": [ + { + "bbox": [ + 207, + 665, + 403, + 733 + ], + "score": 0.95, + "content": "\\begin{array} { r l } & { \\| B ( z ^ { k } ) \\| ^ { 2 } = \\| B ( z ^ { k } ) - B ( z ^ { * } ) + B ( z ^ { * } ) ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 \\| B ( z ^ { k } ) - B ( z ^ { * } ) \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 L ^ { 2 } \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "0690b55127f958d10263ac0a027e14bf6cefdfc368630bbe58f7258713d59c6f.jpg" + } + ] + } + ], + "index": 37.5, + "virtual_lines": [ + { + "bbox": [ + 207, + 665, + 403, + 682.0 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 207, + 682.0, + 403, + 699.0 + ], + "spans": [], + "index": 37 + }, + { + "bbox": [ + 207, + 699.0, + 403, + 716.0 + ], + "spans": [], + "index": 38 + }, + { + "bbox": [ + 207, + 716.0, + 403, + 733.0 + ], + "spans": [], + "index": 39 + } + ] + } + ], + "page_idx": 16, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 765 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 765 + ], + "score": 1.0, + "content": "17", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 108, + 82, + 277, + 94 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 278, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 278, + 95 + ], + "score": 1.0, + "content": "C.2 IMPORTANT RECURSION FOR SPS", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 102, + 504, + 126 + ], + "lines": [ + { + "bbox": [ + 105, + 100, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 100, + 505, + 117 + ], + "score": 1.0, + "content": "The following lemma summarizes the key recursion satisfied by Algorithm 1, to which we will apply", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 114, + 327, + 126 + ], + "spans": [ + { + "bbox": [ + 106, + 114, + 195, + 126 + ], + "score": 1.0, + "content": "Lemma 2. Recall that", + "type": "text" + }, + { + "bbox": [ + 196, + 115, + 204, + 124 + ], + "score": 0.81, + "content": "L", + "type": "inline_equation" + }, + { + "bbox": [ + 204, + 114, + 315, + 126 + ], + "score": 1.0, + "content": "is the Lipschitz constant of", + "type": "text" + }, + { + "bbox": [ + 315, + 114, + 324, + 124 + ], + "score": 0.82, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 324, + 114, + 327, + 126 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1.5, + "bbox_fs": [ + 105, + 100, + 505, + 126 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 127, + 333, + 140 + ], + "lines": [ + { + "bbox": [ + 106, + 127, + 331, + 141 + ], + "spans": [ + { + "bbox": [ + 106, + 127, + 213, + 141 + ], + "score": 1.0, + "content": "Lemma 3. For Algorithm", + "type": "text" + }, + { + "bbox": [ + 214, + 128, + 219, + 138 + ], + "score": 0.34, + "content": "I", + "type": "inline_equation" + }, + { + "bbox": [ + 219, + 127, + 331, + 141 + ], + "score": 1.0, + "content": ", suppose (9)–(11) hold and", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 3, + "bbox_fs": [ + 106, + 127, + 331, + 141 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 273, + 142, + 337, + 155 + ], + "lines": [ + { + "bbox": [ + 273, + 142, + 337, + 155 + ], + "spans": [ + { + "bbox": [ + 273, + 142, + 337, + 155 + ], + "score": 0.91, + "content": "\\rho _ { k } \\leq \\overline { { \\rho } } < 1 / L .", + "type": "interline_equation", + "image_path": "c862cbc29560226484ac3f9ae67fa80a7012a73774759885a9a7b90983eb3ecf.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 273, + 142, + 337, + 155 + ], + "spans": [], + "index": 4 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 158, + 122, + 169 + ], + "lines": [ + { + "bbox": [ + 104, + 158, + 123, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 158, + 123, + 172 + ], + "score": 1.0, + "content": "Let", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5, + "bbox_fs": [ + 104, + 158, + 123, + 172 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 145, + 169, + 465, + 203 + ], + "lines": [ + { + "bbox": [ + 145, + 169, + 465, + 203 + ], + "spans": [ + { + "bbox": [ + 145, + 169, + 465, + 203 + ], + "score": 0.93, + "content": "T _ { k } \\doteq \\frac { \\tau } { \\overline { { \\rho } } } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\overline { { \\rho } } \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\big ( 1 - \\overline { { \\rho } } L \\big ) \\| B \\big ( z ^ { k } \\big ) - w _ { n + 1 } ^ { k } \\| ^ { 2 }", + "type": "interline_equation", + "image_path": "5d23dc14d367047bfe73ff60a08031457aa8aeee336050d8f37988eb80795f5f.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 145, + 169, + 465, + 180.33333333333334 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 145, + 180.33333333333334, + 465, + 191.66666666666669 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 145, + 191.66666666666669, + 465, + 203.00000000000003 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 204, + 269, + 216 + ], + "lines": [ + { + "bbox": [ + 106, + 203, + 270, + 218 + ], + "spans": [ + { + "bbox": [ + 106, + 203, + 153, + 218 + ], + "score": 1.0, + "content": "then for all", + "type": "text" + }, + { + "bbox": [ + 153, + 205, + 183, + 216 + ], + "score": 0.91, + "content": "p ^ { * } \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 203, + 270, + 218 + ], + "score": 1.0, + "content": ", with probability one", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9, + "bbox_fs": [ + 106, + 203, + 270, + 218 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 218, + 503, + 248 + ], + "lines": [ + { + "bbox": [ + 115, + 216, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 115, + 218, + 478, + 233 + ], + "score": 0.9, + "content": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le \\big ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } + C _ { 3 } \\alpha _ { k } \\rho _ { k } ^ { 2 } \\big ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha _ { k } \\rho _ { k } T _ { k } + C _ { 2 } \\alpha _ { k } ^ { 2 } + C _ { 4 } \\alpha _ { k } \\rho _ { k } ^ { 2 } } \\end{array}", + "type": "inline_equation", + "image_path": "c21990d1265024b692e80bc3d6bc1f7163f688481833e2c82477412bc3de38f0.jpg" + }, + { + "bbox": [ + 479, + 216, + 506, + 236 + ], + "score": 1.0, + "content": "(21)", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 233, + 501, + 249 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 133, + 249 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 236, + 179, + 247 + ], + "score": 0.93, + "content": "C _ { 1 } , \\ldots , C _ { 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 180, + 233, + 501, + 249 + ], + "score": 1.0, + "content": "are nonegative constants defined in (33), (34), (48), and (49) below, respectively.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 10.5, + "bbox_fs": [ + 105, + 216, + 506, + 249 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 254, + 433, + 267 + ], + "lines": [ + { + "bbox": [ + 105, + 254, + 433, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 254, + 145, + 268 + ], + "score": 1.0, + "content": "Note that", + "type": "text" + }, + { + "bbox": [ + 146, + 256, + 158, + 267 + ], + "score": 0.89, + "content": "T _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 158, + 254, + 354, + 268 + ], + "score": 1.0, + "content": "is a scaled version of the approximation residual", + "type": "text" + }, + { + "bbox": [ + 354, + 255, + 368, + 267 + ], + "score": 0.89, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 369, + 254, + 433, + 268 + ], + "score": 1.0, + "content": "defined in (14).", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12, + "bbox_fs": [ + 105, + 254, + 433, + 268 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 272, + 506, + 306 + ], + "lines": [ + { + "bbox": [ + 105, + 271, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 506, + 285 + ], + "score": 1.0, + "content": "We proceed to first prove Lemma 3 and then exploit the implications of Lemma 2. Referring to (10)", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 283, + 506, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 283, + 159, + 297 + ], + "score": 1.0, + "content": "and (11), let", + "type": "text" + }, + { + "bbox": [ + 159, + 284, + 238, + 296 + ], + "score": 0.94, + "content": "N \\doteq \\mathrm { m a x } _ { j \\in 1 \\ldots 4 } N _ { j }", + "type": "inline_equation" + }, + { + "bbox": [ + 239, + 283, + 402, + 297 + ], + "score": 1.0, + "content": ". To simplify the constants, we will use", + "type": "text" + }, + { + "bbox": [ + 402, + 284, + 412, + 293 + ], + "score": 0.81, + "content": "N", + "type": "inline_equation" + }, + { + "bbox": [ + 413, + 283, + 460, + 297 + ], + "score": 1.0, + "content": "in place of", + "type": "text" + }, + { + "bbox": [ + 461, + 284, + 474, + 296 + ], + "score": 0.9, + "content": "N _ { j }", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 283, + 506, + 297 + ], + "score": 1.0, + "content": "for the", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 294, + 273, + 307 + ], + "spans": [ + { + "bbox": [ + 106, + 294, + 273, + 307 + ], + "score": 1.0, + "content": "noise variance bounds given in (10)-(11).", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 14, + "bbox_fs": [ + 105, + 271, + 506, + 307 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 318, + 281, + 330 + ], + "lines": [ + { + "bbox": [ + 105, + 317, + 282, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 282, + 331 + ], + "score": 1.0, + "content": "C.3 UPPER BOUNDING THE GRADIENT", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "text", + "bbox": [ + 105, + 338, + 505, + 362 + ], + "lines": [ + { + "bbox": [ + 105, + 338, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 268, + 354 + ], + "score": 1.0, + "content": "Throughout the analysis, we fix some", + "type": "text" + }, + { + "bbox": [ + 269, + 339, + 397, + 352 + ], + "score": 0.93, + "content": "p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 397, + 338, + 506, + 354 + ], + "score": 1.0, + "content": ". All statements are with", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 350, + 498, + 362 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 498, + 362 + ], + "score": 1.0, + "content": "probability one (almost surely), but for brevity we will omit this unless it needs to be emphasized.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17.5, + "bbox_fs": [ + 105, + 338, + 506, + 362 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 366, + 504, + 379 + ], + "lines": [ + { + "bbox": [ + 105, + 364, + 502, + 382 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 323, + 382 + ], + "score": 1.0, + "content": "In this section, we derive appropriate upper bounds for", + "type": "text" + }, + { + "bbox": [ + 324, + 367, + 358, + 379 + ], + "score": 0.93, + "content": "\\| \\nabla \\varphi _ { k } \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 359, + 364, + 477, + 382 + ], + "score": 1.0, + "content": "to use in (13). We begin with", + "type": "text" + }, + { + "bbox": [ + 477, + 367, + 502, + 379 + ], + "score": 0.91, + "content": "\\nabla _ { z } \\varphi _ { k }", + "type": "inline_equation" + } + ], + "index": 19 + } + ], + "index": 19, + "bbox_fs": [ + 105, + 364, + 502, + 382 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 112, + 382, + 500, + 449 + ], + "lines": [ + { + "bbox": [ + 112, + 382, + 500, + 449 + ], + "spans": [ + { + "bbox": [ + 112, + 382, + 500, + 449 + ], + "score": 0.93, + "content": "\\begin{array} { r l r } & { } & { \\displaystyle \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } = \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k } \\Big \\| ^ { 2 } \\leq 2 \\| y _ { n + 1 } ^ { k } \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } = 2 \\Big \\| B ( x _ { n + 1 } ^ { k } ) + e ^ { k } \\Big \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } } \\\\ & { } & { \\leq 4 \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 \\| e ^ { k } \\| ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "2fda7ecb6ddff8f05e5d2ad354a335aa0e69097667148b35a03b6b9c5479dce8.jpg" + } + ] + } + ], + "index": 21, + "virtual_lines": [ + { + "bbox": [ + 112, + 382, + 500, + 404.3333333333333 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 112, + 404.3333333333333, + 500, + 426.66666666666663 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 112, + 426.66666666666663, + 500, + 448.99999999999994 + ], + "spans": [], + "index": 22 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 450, + 504, + 474 + ], + "lines": [ + { + "bbox": [ + 105, + 450, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 276, + 464 + ], + "score": 1.0, + "content": "Now next take expectations with respect to", + "type": "text" + }, + { + "bbox": [ + 276, + 451, + 289, + 462 + ], + "score": 0.9, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 290, + 450, + 307, + 464 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 307, + 451, + 318, + 462 + ], + "score": 0.88, + "content": "\\mathcal { E } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 318, + 450, + 506, + 464 + ], + "score": 1.0, + "content": ", and use the bound on the variance of the noise", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 460, + 179, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 179, + 475 + ], + "score": 1.0, + "content": "in (11), obtaining", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 23.5, + "bbox_fs": [ + 105, + 450, + 506, + 475 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 147, + 475, + 463, + 543 + ], + "lines": [ + { + "bbox": [ + 147, + 475, + 463, + 543 + ], + "spans": [ + { + "bbox": [ + 147, + 475, + 463, + 543 + ], + "score": 0.95, + "content": "\\begin{array} { r l r } { { \\mathbb { E } [ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } , \\mathcal { E } _ { k } ] \\leq \\mathbb { E } [ 4 \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 \\| e ^ { k } \\| ^ { 2 } \\ \\Big | \\ \\mathcal { F } _ { k } , \\mathcal { E } _ { k } ] } } \\\\ & { } & { \\leq 4 ( N + 1 ) \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 N , ~ } \\end{array}", + "type": "interline_equation", + "image_path": "b8f7bd7c86a72deb4ba7e0b5cb74ab712446b5c629076a14ff36135012f0ce42.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 147, + 475, + 463, + 497.6666666666667 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 147, + 497.6666666666667, + 463, + 520.3333333333334 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 147, + 520.3333333333334, + 463, + 543.0 + ], + "spans": [], + "index": 27 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 545, + 505, + 569 + ], + "lines": [ + { + "bbox": [ + 105, + 543, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 105, + 543, + 213, + 559 + ], + "score": 1.0, + "content": "where we have used that", + "type": "text" + }, + { + "bbox": [ + 213, + 545, + 224, + 558 + ], + "score": 0.9, + "content": "y _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 225, + 543, + 236, + 559 + ], + "score": 1.0, + "content": "is", + "type": "text" + }, + { + "bbox": [ + 237, + 546, + 249, + 557 + ], + "score": 0.89, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 250, + 543, + 317, + 559 + ], + "score": 1.0, + "content": "-measurable for", + "type": "text" + }, + { + "bbox": [ + 317, + 546, + 354, + 556 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 354, + 543, + 492, + 559 + ], + "score": 1.0, + "content": ". Thus, taking expectations over", + "type": "text" + }, + { + "bbox": [ + 493, + 546, + 504, + 557 + ], + "score": 0.87, + "content": "\\mathcal { E } _ { k }", + "type": "inline_equation" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 556, + 210, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 168, + 569 + ], + "score": 1.0, + "content": "conditioned on", + "type": "text" + }, + { + "bbox": [ + 169, + 558, + 182, + 568 + ], + "score": 0.89, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 182, + 556, + 210, + 569 + ], + "score": 1.0, + "content": "yields", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28.5, + "bbox_fs": [ + 105, + 543, + 504, + 569 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 158, + 570, + 452, + 603 + ], + "lines": [ + { + "bbox": [ + 158, + 570, + 452, + 603 + ], + "spans": [ + { + "bbox": [ + 158, + 570, + 452, + 603 + ], + "score": 0.93, + "content": "\\mathbb { E } \\left[ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\right] \\leq 4 ( N + 1 ) \\mathbb { E } [ \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } | \\mathcal { F } _ { k } ] + 2 \\Big \\| \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 N .", + "type": "interline_equation", + "image_path": "c3c8f9a6dab58330aeb949921964b22e45fc17b711843fde9cdea4fe9d522b4f.jpg" + } + ] + } + ], + "index": 31, + "virtual_lines": [ + { + "bbox": [ + 158, + 570, + 452, + 581.0 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 158, + 581.0, + 452, + 592.0 + ], + "spans": [], + "index": 31 + }, + { + "bbox": [ + 158, + 592.0, + 452, + 603.0 + ], + "spans": [], + "index": 32 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 611, + 342, + 623 + ], + "lines": [ + { + "bbox": [ + 105, + 609, + 342, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 342, + 624 + ], + "score": 1.0, + "content": "We will now bound the two terms on the right side of (22).", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 33, + "bbox_fs": [ + 105, + 609, + 342, + 624 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 634, + 227, + 645 + ], + "lines": [ + { + "bbox": [ + 106, + 631, + 227, + 648 + ], + "spans": [ + { + "bbox": [ + 106, + 631, + 227, + 648 + ], + "score": 1.0, + "content": "C.3.1 FIRST TERM IN (22)", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 34 + }, + { + "type": "text", + "bbox": [ + 106, + 653, + 167, + 664 + ], + "lines": [ + { + "bbox": [ + 105, + 653, + 168, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 168, + 666 + ], + "score": 1.0, + "content": "First, note that", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 35, + "bbox_fs": [ + 105, + 653, + 168, + 666 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 207, + 665, + 403, + 733 + ], + "lines": [ + { + "bbox": [ + 207, + 665, + 403, + 733 + ], + "spans": [ + { + "bbox": [ + 207, + 665, + 403, + 733 + ], + "score": 0.95, + "content": "\\begin{array} { r l } & { \\| B ( z ^ { k } ) \\| ^ { 2 } = \\| B ( z ^ { k } ) - B ( z ^ { * } ) + B ( z ^ { * } ) ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 \\| B ( z ^ { k } ) - B ( z ^ { * } ) \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 L ^ { 2 } \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "0690b55127f958d10263ac0a027e14bf6cefdfc368630bbe58f7258713d59c6f.jpg" + } + ] + } + ], + "index": 37.5, + "virtual_lines": [ + { + "bbox": [ + 207, + 665, + 403, + 682.0 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 207, + 682.0, + 403, + 699.0 + ], + "spans": [], + "index": 37 + }, + { + "bbox": [ + 207, + 699.0, + 403, + 716.0 + ], + "spans": [], + "index": 38 + }, + { + "bbox": [ + 207, + 716.0, + 403, + 733.0 + ], + "spans": [], + "index": 39 + } + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 349, + 95 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 349, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 349, + 96 + ], + "score": 1.0, + "content": "Now, returning to the first term on the right of (22), we have", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "interline_equation", + "bbox": [ + 168, + 97, + 442, + 164 + ], + "lines": [ + { + "bbox": [ + 168, + 97, + 442, + 164 + ], + "spans": [ + { + "bbox": [ + 168, + 97, + 442, + 164 + ], + "score": 0.94, + "content": "\\begin{array} { r l } & { \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } = \\| B ( z ^ { k } ) + B ( x _ { n + 1 } ^ { k } ) - B ( z ^ { k } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 \\| B ( z ^ { k } ) \\| ^ { 2 } + 2 \\| B ( x _ { n + 1 } ^ { k } ) - B ( z ^ { k } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 \\| B ( z ^ { k } ) \\| ^ { 2 } + 2 L ^ { 2 } \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad \\leq 4 L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 4 \\| B ( z ^ { * } ) \\| ^ { 2 } + 2 L ^ { 2 } \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } } \\end{array}", + "type": "interline_equation", + "image_path": "fc96ea33abc768a53fc35ed40b9ee3ab445a61dd4460fc11acc56273b242ed2f.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 168, + 97, + 442, + 119.33333333333333 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 168, + 119.33333333333333, + 442, + 141.66666666666666 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 168, + 141.66666666666666, + 442, + 164.0 + ], + "spans": [], + "index": 3 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 166, + 266, + 178 + ], + "lines": [ + { + "bbox": [ + 105, + 165, + 267, + 180 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 267, + 180 + ], + "score": 1.0, + "content": "where we have used (23) to obtain (24).", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4 + }, + { + "type": "text", + "bbox": [ + 107, + 183, + 451, + 196 + ], + "lines": [ + { + "bbox": [ + 105, + 183, + 451, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 451, + 196 + ], + "score": 1.0, + "content": "For the third term in (24), we have from the calculation on line 7 of the algorithm that", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "interline_equation", + "bbox": [ + 179, + 199, + 430, + 216 + ], + "lines": [ + { + "bbox": [ + 179, + 199, + 430, + 216 + ], + "spans": [ + { + "bbox": [ + 179, + 199, + 430, + 216 + ], + "score": 0.88, + "content": "\\begin{array} { r } { x _ { n + 1 } ^ { k } - z ^ { k } = - \\rho _ { k } ( r ^ { k } - w _ { n + 1 } ^ { k } ) = - \\rho _ { k } ( B ( z ^ { k } ) + \\epsilon ^ { k } - w _ { n + 1 } ^ { k } ) , } \\end{array}", + "type": "interline_equation", + "image_path": "a98ab18044da605ed31a3ee780bc35f157cae0ca014dcdff3dff4e5cc89dec91.jpg" + } + ] + } + ], + "index": 6, + "virtual_lines": [ + { + "bbox": [ + 179, + 199, + 430, + 216 + ], + "spans": [], + "index": 6 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 219, + 162, + 230 + ], + "lines": [ + { + "bbox": [ + 106, + 219, + 162, + 230 + ], + "spans": [ + { + "bbox": [ + 106, + 219, + 162, + 230 + ], + "score": 1.0, + "content": "and therefore", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 7 + }, + { + "type": "interline_equation", + "bbox": [ + 193, + 233, + 417, + 284 + ], + "lines": [ + { + "bbox": [ + 193, + 233, + 417, + 284 + ], + "spans": [ + { + "bbox": [ + 193, + 233, + 417, + 284 + ], + "score": 0.93, + "content": "\\begin{array} { r l } & { \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } = \\rho _ { k } ^ { 2 } \\| B ( z ^ { k } ) + \\epsilon ^ { k } - w _ { n + 1 } ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad \\leq \\overline { { \\rho } } ^ { 2 } \\| B ( z ^ { k } ) + \\epsilon ^ { k } - w _ { n + 1 } ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad \\leq 3 \\overline { { \\rho } } ^ { 2 } ( \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } \\| ^ { 2 } ) . } \\end{array}", + "type": "interline_equation", + "image_path": "db772aab9bf8c0b6e5652ce3a542f16efe9df2e178c894dcf9bec4cee67e4be2.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 193, + 233, + 417, + 250.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 193, + 250.0, + 417, + 267.0 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 193, + 267.0, + 417, + 284.0 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 285, + 479, + 298 + ], + "lines": [ + { + "bbox": [ + 105, + 285, + 480, + 299 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 274, + 299 + ], + "score": 1.0, + "content": "We next take expectations conditioned on", + "type": "text" + }, + { + "bbox": [ + 275, + 286, + 287, + 297 + ], + "score": 0.9, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 288, + 285, + 480, + 299 + ], + "score": 1.0, + "content": "and use the noise variance bound (10) to obtain", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11 + }, + { + "type": "interline_equation", + "bbox": [ + 160, + 300, + 450, + 335 + ], + "lines": [ + { + "bbox": [ + 160, + 300, + 450, + 335 + ], + "spans": [ + { + "bbox": [ + 160, + 300, + 450, + 335 + ], + "score": 0.93, + "content": "\\begin{array} { r l } & { \\mathbb { E } \\big [ \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\big ] \\leq \\mathbb { E } \\big [ 3 \\overline { { \\rho } } ^ { 2 } \\big ( \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } \\| ^ { 2 } \\big ) | \\mathcal { F } _ { k } \\big ] } \\\\ & { \\qquad \\leq 3 \\overline { { \\rho } } ^ { 2 } \\big ( ( N + 1 ) \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } \\| ^ { 2 } + N \\big ) . } \\end{array}", + "type": "interline_equation", + "image_path": "08aec390a4dbde35b7c1034a251928e02c538c6fc181a88f80a1e19b12af99bb.jpg" + } + ] + } + ], + "index": 13, + "virtual_lines": [ + { + "bbox": [ + 160, + 300, + 450, + 311.6666666666667 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 160, + 311.6666666666667, + 450, + 323.33333333333337 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 160, + 323.33333333333337, + 450, + 335.00000000000006 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 338, + 148, + 349 + ], + "lines": [ + { + "bbox": [ + 106, + 337, + 148, + 349 + ], + "spans": [ + { + "bbox": [ + 106, + 337, + 148, + 349 + ], + "score": 1.0, + "content": "Therefore", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + }, + { + "type": "interline_equation", + "bbox": [ + 118, + 351, + 493, + 462 + ], + "lines": [ + { + "bbox": [ + 118, + 351, + 493, + 462 + ], + "spans": [ + { + "bbox": [ + 118, + 351, + 493, + 462 + ], + "score": 0.95, + "content": "\\begin{array} { r l } & { \\mathbb { E } [ \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\leq 6 \\bar { \\rho } ^ { 2 } \\big ( ( N + 1 ) \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { * } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { * } \\| ^ { 2 } \\big ) + 3 \\bar { \\rho } ^ { 2 } N } \\\\ & { \\qquad = 6 \\bar { \\rho } ^ { 2 } \\Big ( 2 ( N + 1 ) L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 2 ( N + 1 ) \\| B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad } \\\\ & { \\qquad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad } \\\\ & { \\leq 6 \\bar { \\rho } ^ { 2 } \\big ( 2 ( N + 1 ) L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { * } \\| ^ { 2 } \\big ) } \\\\ & { \\qquad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad + 1 8 \\bar { \\rho } ^ { 2 } ( N + 1 ) \\| B ( z ^ { * } ) \\| ^ { 2 } + 3 \\bar { \\rho } ^ { 2 } N } \\\\ & { \\leq 1 8 \\bar { \\rho } ^ { 2 } ( N + 1 ) \\big ( ( L ^ { 2 } + 1 ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + \\| B ( z ^ { * } ) \\| ^ { 2 } \\big ) + 3 \\bar { \\rho } ^ { 2 } N } \\end{array}", + "type": "interline_equation", + "image_path": "42789262e30d48cfedbbc5971687ffc48b513ff47a65ac22b89674c97713ffdd.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 118, + 351, + 493, + 388.0 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 118, + 388.0, + 493, + 425.0 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 118, + 425.0, + 493, + 462.0 + ], + "spans": [], + "index": 18 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 465, + 470, + 478 + ], + "lines": [ + { + "bbox": [ + 105, + 464, + 471, + 480 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 248, + 480 + ], + "score": 1.0, + "content": "where in the equality uses (23) and", + "type": "text" + }, + { + "bbox": [ + 248, + 466, + 311, + 478 + ], + "score": 0.93, + "content": "w _ { n + 1 } ^ { * } = B ( z ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 311, + 464, + 471, + 480 + ], + "score": 1.0, + "content": ". Combining (24) and (25), we arrive at", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 19 + }, + { + "type": "interline_equation", + "bbox": [ + 157, + 482, + 454, + 521 + ], + "lines": [ + { + "bbox": [ + 157, + 482, + 454, + 521 + ], + "spans": [ + { + "bbox": [ + 157, + 482, + 454, + 521 + ], + "score": 0.91, + "content": "\\begin{array} { r l } & { \\mathbb { E } \\left[ \\left. B ( x _ { n + 1 } ^ { k } ) \\right. ^ { 2 } \\middle | \\mathcal { F } _ { k } \\right] \\leq 4 L ^ { 2 } \\left[ 1 + 9 \\overline { { \\rho } } ^ { 2 } ( L ^ { 2 } + 1 ) ( N + 1 ) \\right] \\Vert p ^ { k } - p ^ { * } \\Vert ^ { 2 } } \\\\ & { \\qquad + 4 \\big ( 1 + 9 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) \\big ) \\Vert B ( z ^ { * } ) \\Vert ^ { 2 } + 6 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } N . } \\end{array}", + "type": "interline_equation", + "image_path": "c2b2419a8d9dba549c85dc9c915c00e304e96e2478c850da1c7b214ac7dae5d9.jpg" + } + ] + } + ], + "index": 21, + "virtual_lines": [ + { + "bbox": [ + 157, + 482, + 454, + 495.0 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 157, + 495.0, + 454, + 508.0 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 157, + 508.0, + 454, + 521.0 + ], + "spans": [], + "index": 22 + } + ] + }, + { + "type": "title", + "bbox": [ + 107, + 529, + 237, + 541 + ], + "lines": [ + { + "bbox": [ + 105, + 527, + 237, + 543 + ], + "spans": [ + { + "bbox": [ + 105, + 527, + 237, + 543 + ], + "score": 1.0, + "content": "C.3.2 SECOND TERM IN (22)", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 23 + }, + { + "type": "text", + "bbox": [ + 105, + 547, + 468, + 561 + ], + "lines": [ + { + "bbox": [ + 104, + 545, + 468, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 545, + 122, + 563 + ], + "score": 1.0, + "content": "For", + "type": "text" + }, + { + "bbox": [ + 123, + 549, + 156, + 559 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 156, + 545, + 346, + 563 + ], + "score": 1.0, + "content": ", line 5 of the algorithm may be rearranged into", + "type": "text" + }, + { + "bbox": [ + 346, + 548, + 452, + 561 + ], + "score": 0.91, + "content": "y _ { i } ^ { k } = \\tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) + w _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 452, + 545, + 468, + 563 + ], + "score": 1.0, + "content": ", so", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24 + }, + { + "type": "interline_equation", + "bbox": [ + 111, + 565, + 504, + 734 + ], + "lines": [ + { + "bbox": [ + 111, + 565, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 111, + 565, + 504, + 734 + ], + "score": 0.92, + "content": "\\begin{array} { r l r } { { \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\bigg \\| ^ { 2 } = \\bigg \\| \\sum _ { i = 1 } ^ { n } ( \\tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) + w _ { i } ^ { k } ) \\bigg \\| ^ { 2 } } } \\\\ & { } & { \\leq 2 \\bigg \\| \\tau ^ { - 1 } \\sum _ { i = 1 } ^ { n } ( z ^ { k } - x _ { i } ^ { k } ) \\bigg \\| ^ { 2 } + 2 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { k } \\bigg \\| ^ { 2 } } \\\\ & { } & { \\leq 2 \\pi \\tau ^ { - 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { k } \\bigg \\| ^ { 2 } } \\\\ & { } & { \\leq 4 \\pi ^ { 2 } \\tau ^ { - 2 } \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 4 \\pi \\tau ^ { - 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { * } - x _ { i } ^ { k } \\| ^ { 2 } + 4 m \\sum _ { i = 1 } ^ { n } \\| w _ { i } ^ { k } - w _ { i } ^ { * } \\| ^ { 2 } + 4 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\bigg \\| ^ { 2 } } \\\\ & { } & { \\leq 4 \\pi ^ { 2 } ( \\tau ^ { - 2 } + 1 ) \\| y ^ { k } - p ^ { * } \\| ^ { 2 } + 4 \\pi \\tau ^ { - 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { * } - x _ { i } ^ { k } \\| ^ { 2 } + 4 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\bigg \\| ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "7d3cf101ab3ad11ddd41c7f652d6ff2a57f5deec296959da5119f272170259c8.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 111, + 565, + 504, + 621.3333333333334 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 111, + 621.3333333333334, + 504, + 677.6666666666667 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 111, + 677.6666666666667, + 504, + 734.0000000000001 + ], + "spans": [], + "index": 27 + } + ] + } + ], + "page_idx": 17, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 761 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 763 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 763 + ], + "score": 1.0, + "content": "18", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 349, + 95 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 349, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 349, + 96 + ], + "score": 1.0, + "content": "Now, returning to the first term on the right of (22), we have", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0, + "bbox_fs": [ + 105, + 81, + 349, + 96 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 168, + 97, + 442, + 164 + ], + "lines": [ + { + "bbox": [ + 168, + 97, + 442, + 164 + ], + "spans": [ + { + "bbox": [ + 168, + 97, + 442, + 164 + ], + "score": 0.94, + "content": "\\begin{array} { r l } & { \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } = \\| B ( z ^ { k } ) + B ( x _ { n + 1 } ^ { k } ) - B ( z ^ { k } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 \\| B ( z ^ { k } ) \\| ^ { 2 } + 2 \\| B ( x _ { n + 1 } ^ { k } ) - B ( z ^ { k } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 \\| B ( z ^ { k } ) \\| ^ { 2 } + 2 L ^ { 2 } \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad \\leq 4 L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 4 \\| B ( z ^ { * } ) \\| ^ { 2 } + 2 L ^ { 2 } \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } } \\end{array}", + "type": "interline_equation", + "image_path": "fc96ea33abc768a53fc35ed40b9ee3ab445a61dd4460fc11acc56273b242ed2f.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 168, + 97, + 442, + 119.33333333333333 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 168, + 119.33333333333333, + 442, + 141.66666666666666 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 168, + 141.66666666666666, + 442, + 164.0 + ], + "spans": [], + "index": 3 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 166, + 266, + 178 + ], + "lines": [ + { + "bbox": [ + 105, + 165, + 267, + 180 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 267, + 180 + ], + "score": 1.0, + "content": "where we have used (23) to obtain (24).", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4, + "bbox_fs": [ + 105, + 165, + 267, + 180 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 183, + 451, + 196 + ], + "lines": [ + { + "bbox": [ + 105, + 183, + 451, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 451, + 196 + ], + "score": 1.0, + "content": "For the third term in (24), we have from the calculation on line 7 of the algorithm that", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5, + "bbox_fs": [ + 105, + 183, + 451, + 196 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 179, + 199, + 430, + 216 + ], + "lines": [ + { + "bbox": [ + 179, + 199, + 430, + 216 + ], + "spans": [ + { + "bbox": [ + 179, + 199, + 430, + 216 + ], + "score": 0.88, + "content": "\\begin{array} { r } { x _ { n + 1 } ^ { k } - z ^ { k } = - \\rho _ { k } ( r ^ { k } - w _ { n + 1 } ^ { k } ) = - \\rho _ { k } ( B ( z ^ { k } ) + \\epsilon ^ { k } - w _ { n + 1 } ^ { k } ) , } \\end{array}", + "type": "interline_equation", + "image_path": "a98ab18044da605ed31a3ee780bc35f157cae0ca014dcdff3dff4e5cc89dec91.jpg" + } + ] + } + ], + "index": 6, + "virtual_lines": [ + { + "bbox": [ + 179, + 199, + 430, + 216 + ], + "spans": [], + "index": 6 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 219, + 162, + 230 + ], + "lines": [ + { + "bbox": [ + 106, + 219, + 162, + 230 + ], + "spans": [ + { + "bbox": [ + 106, + 219, + 162, + 230 + ], + "score": 1.0, + "content": "and therefore", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 7, + "bbox_fs": [ + 106, + 219, + 162, + 230 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 193, + 233, + 417, + 284 + ], + "lines": [ + { + "bbox": [ + 193, + 233, + 417, + 284 + ], + "spans": [ + { + "bbox": [ + 193, + 233, + 417, + 284 + ], + "score": 0.93, + "content": "\\begin{array} { r l } & { \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } = \\rho _ { k } ^ { 2 } \\| B ( z ^ { k } ) + \\epsilon ^ { k } - w _ { n + 1 } ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad \\leq \\overline { { \\rho } } ^ { 2 } \\| B ( z ^ { k } ) + \\epsilon ^ { k } - w _ { n + 1 } ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad \\leq 3 \\overline { { \\rho } } ^ { 2 } ( \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } \\| ^ { 2 } ) . } \\end{array}", + "type": "interline_equation", + "image_path": "db772aab9bf8c0b6e5652ce3a542f16efe9df2e178c894dcf9bec4cee67e4be2.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 193, + 233, + 417, + 250.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 193, + 250.0, + 417, + 267.0 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 193, + 267.0, + 417, + 284.0 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 285, + 479, + 298 + ], + "lines": [ + { + "bbox": [ + 105, + 285, + 480, + 299 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 274, + 299 + ], + "score": 1.0, + "content": "We next take expectations conditioned on", + "type": "text" + }, + { + "bbox": [ + 275, + 286, + 287, + 297 + ], + "score": 0.9, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 288, + 285, + 480, + 299 + ], + "score": 1.0, + "content": "and use the noise variance bound (10) to obtain", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11, + "bbox_fs": [ + 105, + 285, + 480, + 299 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 160, + 300, + 450, + 335 + ], + "lines": [ + { + "bbox": [ + 160, + 300, + 450, + 335 + ], + "spans": [ + { + "bbox": [ + 160, + 300, + 450, + 335 + ], + "score": 0.93, + "content": "\\begin{array} { r l } & { \\mathbb { E } \\big [ \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\big ] \\leq \\mathbb { E } \\big [ 3 \\overline { { \\rho } } ^ { 2 } \\big ( \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } \\| ^ { 2 } \\big ) | \\mathcal { F } _ { k } \\big ] } \\\\ & { \\qquad \\leq 3 \\overline { { \\rho } } ^ { 2 } \\big ( ( N + 1 ) \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } \\| ^ { 2 } + N \\big ) . } \\end{array}", + "type": "interline_equation", + "image_path": "08aec390a4dbde35b7c1034a251928e02c538c6fc181a88f80a1e19b12af99bb.jpg" + } + ] + } + ], + "index": 13, + "virtual_lines": [ + { + "bbox": [ + 160, + 300, + 450, + 311.6666666666667 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 160, + 311.6666666666667, + 450, + 323.33333333333337 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 160, + 323.33333333333337, + 450, + 335.00000000000006 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 338, + 148, + 349 + ], + "lines": [ + { + "bbox": [ + 106, + 337, + 148, + 349 + ], + "spans": [ + { + "bbox": [ + 106, + 337, + 148, + 349 + ], + "score": 1.0, + "content": "Therefore", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15, + "bbox_fs": [ + 106, + 337, + 148, + 349 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 118, + 351, + 493, + 462 + ], + "lines": [ + { + "bbox": [ + 118, + 351, + 493, + 462 + ], + "spans": [ + { + "bbox": [ + 118, + 351, + 493, + 462 + ], + "score": 0.95, + "content": "\\begin{array} { r l } & { \\mathbb { E } [ \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\leq 6 \\bar { \\rho } ^ { 2 } \\big ( ( N + 1 ) \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { * } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { * } \\| ^ { 2 } \\big ) + 3 \\bar { \\rho } ^ { 2 } N } \\\\ & { \\qquad = 6 \\bar { \\rho } ^ { 2 } \\Big ( 2 ( N + 1 ) L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 2 ( N + 1 ) \\| B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad } \\\\ & { \\qquad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad } \\\\ & { \\leq 6 \\bar { \\rho } ^ { 2 } \\big ( 2 ( N + 1 ) L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { * } \\| ^ { 2 } \\big ) } \\\\ & { \\qquad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad + 1 8 \\bar { \\rho } ^ { 2 } ( N + 1 ) \\| B ( z ^ { * } ) \\| ^ { 2 } + 3 \\bar { \\rho } ^ { 2 } N } \\\\ & { \\leq 1 8 \\bar { \\rho } ^ { 2 } ( N + 1 ) \\big ( ( L ^ { 2 } + 1 ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + \\| B ( z ^ { * } ) \\| ^ { 2 } \\big ) + 3 \\bar { \\rho } ^ { 2 } N } \\end{array}", + "type": "interline_equation", + "image_path": "42789262e30d48cfedbbc5971687ffc48b513ff47a65ac22b89674c97713ffdd.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 118, + 351, + 493, + 388.0 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 118, + 388.0, + 493, + 425.0 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 118, + 425.0, + 493, + 462.0 + ], + "spans": [], + "index": 18 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 465, + 470, + 478 + ], + "lines": [ + { + "bbox": [ + 105, + 464, + 471, + 480 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 248, + 480 + ], + "score": 1.0, + "content": "where in the equality uses (23) and", + "type": "text" + }, + { + "bbox": [ + 248, + 466, + 311, + 478 + ], + "score": 0.93, + "content": "w _ { n + 1 } ^ { * } = B ( z ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 311, + 464, + 471, + 480 + ], + "score": 1.0, + "content": ". Combining (24) and (25), we arrive at", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 19, + "bbox_fs": [ + 105, + 464, + 471, + 480 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 157, + 482, + 454, + 521 + ], + "lines": [ + { + "bbox": [ + 157, + 482, + 454, + 521 + ], + "spans": [ + { + "bbox": [ + 157, + 482, + 454, + 521 + ], + "score": 0.91, + "content": "\\begin{array} { r l } & { \\mathbb { E } \\left[ \\left. B ( x _ { n + 1 } ^ { k } ) \\right. ^ { 2 } \\middle | \\mathcal { F } _ { k } \\right] \\leq 4 L ^ { 2 } \\left[ 1 + 9 \\overline { { \\rho } } ^ { 2 } ( L ^ { 2 } + 1 ) ( N + 1 ) \\right] \\Vert p ^ { k } - p ^ { * } \\Vert ^ { 2 } } \\\\ & { \\qquad + 4 \\big ( 1 + 9 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) \\big ) \\Vert B ( z ^ { * } ) \\Vert ^ { 2 } + 6 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } N . } \\end{array}", + "type": "interline_equation", + "image_path": "c2b2419a8d9dba549c85dc9c915c00e304e96e2478c850da1c7b214ac7dae5d9.jpg" + } + ] + } + ], + "index": 21, + "virtual_lines": [ + { + "bbox": [ + 157, + 482, + 454, + 495.0 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 157, + 495.0, + 454, + 508.0 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 157, + 508.0, + 454, + 521.0 + ], + "spans": [], + "index": 22 + } + ] + }, + { + "type": "title", + "bbox": [ + 107, + 529, + 237, + 541 + ], + "lines": [ + { + "bbox": [ + 105, + 527, + 237, + 543 + ], + "spans": [ + { + "bbox": [ + 105, + 527, + 237, + 543 + ], + "score": 1.0, + "content": "C.3.2 SECOND TERM IN (22)", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 23 + }, + { + "type": "text", + "bbox": [ + 105, + 547, + 468, + 561 + ], + "lines": [ + { + "bbox": [ + 104, + 545, + 468, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 545, + 122, + 563 + ], + "score": 1.0, + "content": "For", + "type": "text" + }, + { + "bbox": [ + 123, + 549, + 156, + 559 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 156, + 545, + 346, + 563 + ], + "score": 1.0, + "content": ", line 5 of the algorithm may be rearranged into", + "type": "text" + }, + { + "bbox": [ + 346, + 548, + 452, + 561 + ], + "score": 0.91, + "content": "y _ { i } ^ { k } = \\tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) + w _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 452, + 545, + 468, + 563 + ], + "score": 1.0, + "content": ", so", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24, + "bbox_fs": [ + 104, + 545, + 468, + 563 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 111, + 565, + 504, + 734 + ], + "lines": [ + { + "bbox": [ + 111, + 565, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 111, + 565, + 504, + 734 + ], + "score": 0.92, + "content": "\\begin{array} { r l r } { { \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\bigg \\| ^ { 2 } = \\bigg \\| \\sum _ { i = 1 } ^ { n } ( \\tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) + w _ { i } ^ { k } ) \\bigg \\| ^ { 2 } } } \\\\ & { } & { \\leq 2 \\bigg \\| \\tau ^ { - 1 } \\sum _ { i = 1 } ^ { n } ( z ^ { k } - x _ { i } ^ { k } ) \\bigg \\| ^ { 2 } + 2 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { k } \\bigg \\| ^ { 2 } } \\\\ & { } & { \\leq 2 \\pi \\tau ^ { - 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { k } \\bigg \\| ^ { 2 } } \\\\ & { } & { \\leq 4 \\pi ^ { 2 } \\tau ^ { - 2 } \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 4 \\pi \\tau ^ { - 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { * } - x _ { i } ^ { k } \\| ^ { 2 } + 4 m \\sum _ { i = 1 } ^ { n } \\| w _ { i } ^ { k } - w _ { i } ^ { * } \\| ^ { 2 } + 4 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\bigg \\| ^ { 2 } } \\\\ & { } & { \\leq 4 \\pi ^ { 2 } ( \\tau ^ { - 2 } + 1 ) \\| y ^ { k } - p ^ { * } \\| ^ { 2 } + 4 \\pi \\tau ^ { - 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { * } - x _ { i } ^ { k } \\| ^ { 2 } + 4 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\bigg \\| ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "7d3cf101ab3ad11ddd41c7f652d6ff2a57f5deec296959da5119f272170259c8.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 111, + 565, + 504, + 621.3333333333334 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 111, + 621.3333333333334, + 504, + 677.6666666666667 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 111, + 677.6666666666667, + 504, + 734.0000000000001 + ], + "spans": [], + "index": 27 + } + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 104, + 81, + 504, + 106 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 246, + 95 + ], + "score": 1.0, + "content": "By the definition of the solution set", + "type": "text" + }, + { + "bbox": [ + 247, + 83, + 255, + 93 + ], + "score": 0.81, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 255, + 82, + 282, + 95 + ], + "score": 1.0, + "content": "in (5),", + "type": "text" + }, + { + "bbox": [ + 283, + 82, + 336, + 95 + ], + "score": 0.93, + "content": "w _ { i } ^ { * } \\in A _ { i } ( z ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 336, + 82, + 351, + 95 + ], + "score": 1.0, + "content": ", so", + "type": "text" + }, + { + "bbox": [ + 351, + 82, + 462, + 95 + ], + "score": 0.92, + "content": "z ^ { * } + \\tau w _ { i } ^ { * } \\in ( I + \\tau A _ { i } ) ( z ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 462, + 82, + 505, + 95 + ], + "score": 1.0, + "content": ", and since", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 93, + 476, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 93, + 476, + 106 + ], + "score": 1.0, + "content": "the resolvent is single-valued (Bauschke & Combettes, 2017, Cor. 23.9) we therefore obtain", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "interline_equation", + "bbox": [ + 196, + 112, + 415, + 127 + ], + "lines": [ + { + "bbox": [ + 196, + 112, + 415, + 127 + ], + "spans": [ + { + "bbox": [ + 196, + 112, + 415, + 127 + ], + "score": 0.89, + "content": "z ^ { * } = ( I + \\tau A _ { i } ) ^ { - 1 } ( I + \\tau A _ { i } ) ( z ^ { * } ) = J _ { \\tau A _ { i } } ( z ^ { * } + \\tau w _ { i } ^ { * } ) .", + "type": "interline_equation", + "image_path": "db5125d1ddb028192cd3a9d7be6669dbce1f8e31e7578b0f223e98ded57a63a1.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 196, + 112, + 415, + 127 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 135, + 506, + 169 + ], + "lines": [ + { + "bbox": [ + 104, + 133, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 309, + 150 + ], + "score": 1.0, + "content": "From lines 3 and 4 of the algorithm, we also have", + "type": "text" + }, + { + "bbox": [ + 309, + 135, + 402, + 149 + ], + "score": 0.93, + "content": "x _ { i } ^ { k } = J _ { \\tau A _ { i } } ( z ^ { k } + \\tau w _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 402, + 133, + 417, + 150 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 418, + 137, + 451, + 147 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 451, + 133, + 506, + 150 + ], + "score": 1.0, + "content": ". Thus, using", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 147, + 505, + 160 + ], + "spans": [ + { + "bbox": [ + 106, + 147, + 505, + 160 + ], + "score": 1.0, + "content": "the nonexpansiveness of the resolvent (Bauschke & Combettes, 2017, Def. 4.1 and Cor. 23.9), we", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 158, + 129, + 171 + ], + "spans": [ + { + "bbox": [ + 105, + 158, + 129, + 171 + ], + "score": 1.0, + "content": "have", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4 + }, + { + "type": "interline_equation", + "bbox": [ + 176, + 176, + 433, + 326 + ], + "lines": [ + { + "bbox": [ + 176, + 176, + 433, + 326 + ], + "spans": [ + { + "bbox": [ + 176, + 176, + 433, + 326 + ], + "score": 0.97, + "content": "\\begin{array} { r l } { \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { * } - x _ { i } ^ { k } \\| ^ { 2 } = \\displaystyle \\sum _ { i = 1 } ^ { n } \\left\\| J _ { T , 4 _ { i } } ( z ^ { k } + \\tau w _ { i } ^ { k } ) - J _ { \\tau , 4 _ { i } } ( z ^ { * } + \\tau w _ { i } ^ { * } ) \\right\\| ^ { 2 } } & { } \\\\ { \\leq \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } + \\tau w _ { i } ^ { k } - z ^ { * } - \\tau w _ { i } ^ { * } \\| ^ { 2 } } & { } \\\\ { = \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - z ^ { * } + \\tau ( w _ { i } ^ { k } - w _ { i } ^ { * } ) \\| ^ { 2 } } & { } \\\\ { \\leq 2 n \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 2 \\tau ^ { 2 } \\displaystyle \\sum _ { i = 1 } ^ { n } \\| w _ { i } ^ { k } - w _ { i } ^ { * } \\| ^ { 2 } } & { } \\\\ { \\leq 2 ( n + \\tau ^ { 2 } ) \\| y ^ { k } - p ^ { * } \\| ^ { 2 } . } & { } \\end{array}", + "type": "interline_equation", + "image_path": "eb396dbda3e27676a945368d3c89fc99a8dd9c9083043cef34f280e2656cddf7.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 176, + 176, + 433, + 226.0 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 176, + 226.0, + 433, + 276.0 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 176, + 276.0, + 433, + 326.0 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 331, + 235, + 344 + ], + "lines": [ + { + "bbox": [ + 106, + 331, + 235, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 331, + 235, + 345 + ], + "score": 1.0, + "content": "Combining (27) and (28) yields", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9 + }, + { + "type": "interline_equation", + "bbox": [ + 183, + 351, + 427, + 384 + ], + "lines": [ + { + "bbox": [ + 183, + 351, + 427, + 384 + ], + "spans": [ + { + "bbox": [ + 183, + 351, + 427, + 384 + ], + "score": 0.93, + "content": "\\Big \\| \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } \\leq 1 2 n ^ { 2 } \\tau ^ { - 2 } ( n + \\tau ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 4 \\Big \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\Big \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "e19c099610947f7954ffd8e965fac5644ea8f19726ee21ea405eeb72782628fe.jpg" + } + ] + } + ], + "index": 10.5, + "virtual_lines": [ + { + "bbox": [ + 183, + 351, + 427, + 367.5 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 183, + 367.5, + 427, + 384.0 + ], + "spans": [], + "index": 11 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 397, + 273, + 409 + ], + "lines": [ + { + "bbox": [ + 106, + 396, + 274, + 411 + ], + "spans": [ + { + "bbox": [ + 106, + 396, + 274, + 411 + ], + "score": 1.0, + "content": "Combining (26) and (29) with (22) yields", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12 + }, + { + "type": "interline_equation", + "bbox": [ + 130, + 415, + 479, + 483 + ], + "lines": [ + { + "bbox": [ + 130, + 415, + 479, + 483 + ], + "spans": [ + { + "bbox": [ + 130, + 415, + 479, + 483 + ], + "score": 0.92, + "content": "\\begin{array} { r l } & { \\mathbb { E } \\left[ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\right] \\le 2 4 \\left[ ( 1 + 9 \\overline { { \\rho } } ^ { 2 } ) ( L ^ { 2 } + 1 ) ^ { 2 } ( N + 1 ) ^ { 2 } + n ^ { 2 } \\tau ^ { - 2 } ( n + \\tau ^ { 2 } ) \\right] \\| p ^ { k } - p ^ { * } \\| ^ { 2 } } \\\\ & { \\qquad + 1 6 ( N + 1 ) \\big ( 1 + 9 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) \\big ) \\| B ( z ^ { * } ) \\| ^ { 2 } + 8 \\bigg \\| \\displaystyle \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\bigg \\| ^ { 2 } } \\\\ & { \\qquad + 2 4 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) N + 4 N . } \\end{array}", + "type": "interline_equation", + "image_path": "11c9e257fcbf2170a2f61304fa0a6f971bb95d5038cc34d808dd6c744cbae9ef.jpg" + } + ] + } + ], + "index": 14, + "virtual_lines": [ + { + "bbox": [ + 130, + 415, + 479, + 437.6666666666667 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 130, + 437.6666666666667, + 479, + 460.33333333333337 + ], + "spans": [], + "index": 14 + }, + { + "bbox": [ + 130, + 460.33333333333337, + 479, + 483.00000000000006 + ], + "spans": [], + "index": 15 + } + ] + }, + { + "type": "title", + "bbox": [ + 107, + 495, + 246, + 506 + ], + "lines": [ + { + "bbox": [ + 106, + 493, + 247, + 508 + ], + "spans": [ + { + "bbox": [ + 106, + 493, + 247, + 508 + ], + "score": 1.0, + "content": "C.3.3 DUAL GRADIENT NORM", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "text", + "bbox": [ + 107, + 515, + 505, + 539 + ], + "lines": [ + { + "bbox": [ + 106, + 515, + 505, + 528 + ], + "spans": [ + { + "bbox": [ + 106, + 515, + 175, + 528 + ], + "score": 1.0, + "content": "Considering that", + "type": "text" + }, + { + "bbox": [ + 175, + 516, + 196, + 527 + ], + "score": 0.91, + "content": "\\nabla \\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 196, + 515, + 343, + 528 + ], + "score": 1.0, + "content": "is taken with respect to the subspace", + "type": "text" + }, + { + "bbox": [ + 344, + 516, + 353, + 526 + ], + "score": 0.83, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 353, + 515, + 505, + 528 + ], + "score": 1.0, + "content": ", the gradients with respect to the dual", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 527, + 452, + 539 + ], + "spans": [ + { + "bbox": [ + 106, + 527, + 391, + 538 + ], + "score": 1.0, + "content": "variables are β€” see for example Eckstein & Svaiter (2009) β€” for each", + "type": "text" + }, + { + "bbox": [ + 391, + 527, + 448, + 539 + ], + "score": 0.91, + "content": "i \\in { 1 . . ( n + 1 ) }", + "type": "inline_equation" + }, + { + "bbox": [ + 449, + 527, + 452, + 538 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17.5 + }, + { + "type": "interline_equation", + "bbox": [ + 153, + 560, + 459, + 669 + ], + "lines": [ + { + "bbox": [ + 153, + 560, + 459, + 669 + ], + "spans": [ + { + "bbox": [ + 153, + 560, + 459, + 669 + ], + "score": 0.95, + "content": "\\begin{array} { l } { \\displaystyle \\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 } = \\left\\| x _ { i } ^ { k } - \\frac { 1 } { n + 1 } \\sum _ { j = 1 } ^ { n + 1 } x _ { j } ^ { k } \\right\\| ^ { 2 } = \\left\\| \\frac { 1 } { n + 1 } \\sum _ { j = 1 } ^ { n + 1 } ( x _ { i } ^ { k } - x _ { j } ^ { k } ) \\right\\| ^ { 2 } } \\\\ { \\displaystyle \\leq \\sum _ { j = 1 } ^ { n + 1 } \\| x _ { i } ^ { k } - x _ { j } ^ { k } \\| ^ { 2 } } \\\\ { \\displaystyle \\leq 2 \\sum _ { j = 1 } ^ { n + 1 } \\big ( \\| x _ { i } ^ { k } - z ^ { k } \\| ^ { 2 } + \\| z ^ { k } - x _ { j } ^ { k } \\| ^ { 2 } \\big ) } \\end{array}", + "type": "interline_equation", + "image_path": "1a1fa31bba74c41927b4f6301aa28599463ff4b0bb6758dac99a886bfae20aea.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 153, + 560, + 459, + 596.3333333333334 + ], + "spans": [], + "index": 19 + }, + { + "bbox": [ + 153, + 596.3333333333334, + 459, + 632.6666666666667 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 153, + 632.6666666666667, + 459, + 669.0000000000001 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 676, + 390, + 689 + ], + "lines": [ + { + "bbox": [ + 106, + 675, + 390, + 691 + ], + "spans": [ + { + "bbox": [ + 106, + 675, + 221, + 691 + ], + "score": 1.0, + "content": "Summing this inequality for", + "type": "text" + }, + { + "bbox": [ + 221, + 677, + 279, + 689 + ], + "score": 0.92, + "content": "i \\in { 1 . . ( n + 1 ) }", + "type": "inline_equation" + }, + { + "bbox": [ + 279, + 675, + 390, + 691 + ], + "score": 1.0, + "content": "and collecting terms yields", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 22 + }, + { + "type": "interline_equation", + "bbox": [ + 214, + 696, + 395, + 731 + ], + "lines": [ + { + "bbox": [ + 214, + 696, + 395, + 731 + ], + "spans": [ + { + "bbox": [ + 214, + 696, + 395, + 731 + ], + "score": 0.93, + "content": "\\sum _ { i = 1 } ^ { n + 1 } \\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 } \\leq 4 ( n + 1 ) \\sum _ { i = 1 } ^ { n + 1 } \\| x _ { i } ^ { k } - z ^ { k } \\| ^ { 2 } ,", + "type": "interline_equation", + "image_path": "ed986999f76c7947bd3840d828f3236344aa681c73273becba85527a8cd9d4ec.jpg" + } + ] + } + ], + "index": 23.5, + "virtual_lines": [ + { + "bbox": [ + 214, + 696, + 395, + 713.5 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 214, + 713.5, + 395, + 731.0 + ], + "spans": [], + "index": 24 + } + ] + } + ], + "page_idx": 18, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 765 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 765 + ], + "score": 1.0, + "content": "19", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 104, + 81, + 504, + 106 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 246, + 95 + ], + "score": 1.0, + "content": "By the definition of the solution set", + "type": "text" + }, + { + "bbox": [ + 247, + 83, + 255, + 93 + ], + "score": 0.81, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 255, + 82, + 282, + 95 + ], + "score": 1.0, + "content": "in (5),", + "type": "text" + }, + { + "bbox": [ + 283, + 82, + 336, + 95 + ], + "score": 0.93, + "content": "w _ { i } ^ { * } \\in A _ { i } ( z ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 336, + 82, + 351, + 95 + ], + "score": 1.0, + "content": ", so", + "type": "text" + }, + { + "bbox": [ + 351, + 82, + 462, + 95 + ], + "score": 0.92, + "content": "z ^ { * } + \\tau w _ { i } ^ { * } \\in ( I + \\tau A _ { i } ) ( z ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 462, + 82, + 505, + 95 + ], + "score": 1.0, + "content": ", and since", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 93, + 476, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 93, + 476, + 106 + ], + "score": 1.0, + "content": "the resolvent is single-valued (Bauschke & Combettes, 2017, Cor. 23.9) we therefore obtain", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5, + "bbox_fs": [ + 105, + 82, + 505, + 106 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 196, + 112, + 415, + 127 + ], + "lines": [ + { + "bbox": [ + 196, + 112, + 415, + 127 + ], + "spans": [ + { + "bbox": [ + 196, + 112, + 415, + 127 + ], + "score": 0.89, + "content": "z ^ { * } = ( I + \\tau A _ { i } ) ^ { - 1 } ( I + \\tau A _ { i } ) ( z ^ { * } ) = J _ { \\tau A _ { i } } ( z ^ { * } + \\tau w _ { i } ^ { * } ) .", + "type": "interline_equation", + "image_path": "db5125d1ddb028192cd3a9d7be6669dbce1f8e31e7578b0f223e98ded57a63a1.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 196, + 112, + 415, + 127 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 135, + 506, + 169 + ], + "lines": [ + { + "bbox": [ + 104, + 133, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 309, + 150 + ], + "score": 1.0, + "content": "From lines 3 and 4 of the algorithm, we also have", + "type": "text" + }, + { + "bbox": [ + 309, + 135, + 402, + 149 + ], + "score": 0.93, + "content": "x _ { i } ^ { k } = J _ { \\tau A _ { i } } ( z ^ { k } + \\tau w _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 402, + 133, + 417, + 150 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 418, + 137, + 451, + 147 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 451, + 133, + 506, + 150 + ], + "score": 1.0, + "content": ". Thus, using", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 147, + 505, + 160 + ], + "spans": [ + { + "bbox": [ + 106, + 147, + 505, + 160 + ], + "score": 1.0, + "content": "the nonexpansiveness of the resolvent (Bauschke & Combettes, 2017, Def. 4.1 and Cor. 23.9), we", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 158, + 129, + 171 + ], + "spans": [ + { + "bbox": [ + 105, + 158, + 129, + 171 + ], + "score": 1.0, + "content": "have", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4, + "bbox_fs": [ + 104, + 133, + 506, + 171 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 176, + 176, + 433, + 326 + ], + "lines": [ + { + "bbox": [ + 176, + 176, + 433, + 326 + ], + "spans": [ + { + "bbox": [ + 176, + 176, + 433, + 326 + ], + "score": 0.97, + "content": "\\begin{array} { r l } { \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { * } - x _ { i } ^ { k } \\| ^ { 2 } = \\displaystyle \\sum _ { i = 1 } ^ { n } \\left\\| J _ { T , 4 _ { i } } ( z ^ { k } + \\tau w _ { i } ^ { k } ) - J _ { \\tau , 4 _ { i } } ( z ^ { * } + \\tau w _ { i } ^ { * } ) \\right\\| ^ { 2 } } & { } \\\\ { \\leq \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } + \\tau w _ { i } ^ { k } - z ^ { * } - \\tau w _ { i } ^ { * } \\| ^ { 2 } } & { } \\\\ { = \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - z ^ { * } + \\tau ( w _ { i } ^ { k } - w _ { i } ^ { * } ) \\| ^ { 2 } } & { } \\\\ { \\leq 2 n \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 2 \\tau ^ { 2 } \\displaystyle \\sum _ { i = 1 } ^ { n } \\| w _ { i } ^ { k } - w _ { i } ^ { * } \\| ^ { 2 } } & { } \\\\ { \\leq 2 ( n + \\tau ^ { 2 } ) \\| y ^ { k } - p ^ { * } \\| ^ { 2 } . } & { } \\end{array}", + "type": "interline_equation", + "image_path": "eb396dbda3e27676a945368d3c89fc99a8dd9c9083043cef34f280e2656cddf7.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 176, + 176, + 433, + 226.0 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 176, + 226.0, + 433, + 276.0 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 176, + 276.0, + 433, + 326.0 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 331, + 235, + 344 + ], + "lines": [ + { + "bbox": [ + 106, + 331, + 235, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 331, + 235, + 345 + ], + "score": 1.0, + "content": "Combining (27) and (28) yields", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9, + "bbox_fs": [ + 106, + 331, + 235, + 345 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 183, + 351, + 427, + 384 + ], + "lines": [ + { + "bbox": [ + 183, + 351, + 427, + 384 + ], + "spans": [ + { + "bbox": [ + 183, + 351, + 427, + 384 + ], + "score": 0.93, + "content": "\\Big \\| \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } \\leq 1 2 n ^ { 2 } \\tau ^ { - 2 } ( n + \\tau ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 4 \\Big \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\Big \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "e19c099610947f7954ffd8e965fac5644ea8f19726ee21ea405eeb72782628fe.jpg" + } + ] + } + ], + "index": 10.5, + "virtual_lines": [ + { + "bbox": [ + 183, + 351, + 427, + 367.5 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 183, + 367.5, + 427, + 384.0 + ], + "spans": [], + "index": 11 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 397, + 273, + 409 + ], + "lines": [ + { + "bbox": [ + 106, + 396, + 274, + 411 + ], + "spans": [ + { + "bbox": [ + 106, + 396, + 274, + 411 + ], + "score": 1.0, + "content": "Combining (26) and (29) with (22) yields", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12, + "bbox_fs": [ + 106, + 396, + 274, + 411 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 130, + 415, + 479, + 483 + ], + "lines": [ + { + "bbox": [ + 130, + 415, + 479, + 483 + ], + "spans": [ + { + "bbox": [ + 130, + 415, + 479, + 483 + ], + "score": 0.92, + "content": "\\begin{array} { r l } & { \\mathbb { E } \\left[ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\right] \\le 2 4 \\left[ ( 1 + 9 \\overline { { \\rho } } ^ { 2 } ) ( L ^ { 2 } + 1 ) ^ { 2 } ( N + 1 ) ^ { 2 } + n ^ { 2 } \\tau ^ { - 2 } ( n + \\tau ^ { 2 } ) \\right] \\| p ^ { k } - p ^ { * } \\| ^ { 2 } } \\\\ & { \\qquad + 1 6 ( N + 1 ) \\big ( 1 + 9 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) \\big ) \\| B ( z ^ { * } ) \\| ^ { 2 } + 8 \\bigg \\| \\displaystyle \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\bigg \\| ^ { 2 } } \\\\ & { \\qquad + 2 4 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) N + 4 N . } \\end{array}", + "type": "interline_equation", + "image_path": "11c9e257fcbf2170a2f61304fa0a6f971bb95d5038cc34d808dd6c744cbae9ef.jpg" + } + ] + } + ], + "index": 14, + "virtual_lines": [ + { + "bbox": [ + 130, + 415, + 479, + 437.6666666666667 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 130, + 437.6666666666667, + 479, + 460.33333333333337 + ], + "spans": [], + "index": 14 + }, + { + "bbox": [ + 130, + 460.33333333333337, + 479, + 483.00000000000006 + ], + "spans": [], + "index": 15 + } + ] + }, + { + "type": "title", + "bbox": [ + 107, + 495, + 246, + 506 + ], + "lines": [ + { + "bbox": [ + 106, + 493, + 247, + 508 + ], + "spans": [ + { + "bbox": [ + 106, + 493, + 247, + 508 + ], + "score": 1.0, + "content": "C.3.3 DUAL GRADIENT NORM", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "text", + "bbox": [ + 107, + 515, + 505, + 539 + ], + "lines": [ + { + "bbox": [ + 106, + 515, + 505, + 528 + ], + "spans": [ + { + "bbox": [ + 106, + 515, + 175, + 528 + ], + "score": 1.0, + "content": "Considering that", + "type": "text" + }, + { + "bbox": [ + 175, + 516, + 196, + 527 + ], + "score": 0.91, + "content": "\\nabla \\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 196, + 515, + 343, + 528 + ], + "score": 1.0, + "content": "is taken with respect to the subspace", + "type": "text" + }, + { + "bbox": [ + 344, + 516, + 353, + 526 + ], + "score": 0.83, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 353, + 515, + 505, + 528 + ], + "score": 1.0, + "content": ", the gradients with respect to the dual", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 527, + 452, + 539 + ], + "spans": [ + { + "bbox": [ + 106, + 527, + 391, + 538 + ], + "score": 1.0, + "content": "variables are β€” see for example Eckstein & Svaiter (2009) β€” for each", + "type": "text" + }, + { + "bbox": [ + 391, + 527, + 448, + 539 + ], + "score": 0.91, + "content": "i \\in { 1 . . ( n + 1 ) }", + "type": "inline_equation" + }, + { + "bbox": [ + 449, + 527, + 452, + 538 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17.5, + "bbox_fs": [ + 106, + 515, + 505, + 539 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 153, + 560, + 459, + 669 + ], + "lines": [ + { + "bbox": [ + 153, + 560, + 459, + 669 + ], + "spans": [ + { + "bbox": [ + 153, + 560, + 459, + 669 + ], + "score": 0.95, + "content": "\\begin{array} { l } { \\displaystyle \\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 } = \\left\\| x _ { i } ^ { k } - \\frac { 1 } { n + 1 } \\sum _ { j = 1 } ^ { n + 1 } x _ { j } ^ { k } \\right\\| ^ { 2 } = \\left\\| \\frac { 1 } { n + 1 } \\sum _ { j = 1 } ^ { n + 1 } ( x _ { i } ^ { k } - x _ { j } ^ { k } ) \\right\\| ^ { 2 } } \\\\ { \\displaystyle \\leq \\sum _ { j = 1 } ^ { n + 1 } \\| x _ { i } ^ { k } - x _ { j } ^ { k } \\| ^ { 2 } } \\\\ { \\displaystyle \\leq 2 \\sum _ { j = 1 } ^ { n + 1 } \\big ( \\| x _ { i } ^ { k } - z ^ { k } \\| ^ { 2 } + \\| z ^ { k } - x _ { j } ^ { k } \\| ^ { 2 } \\big ) } \\end{array}", + "type": "interline_equation", + "image_path": "1a1fa31bba74c41927b4f6301aa28599463ff4b0bb6758dac99a886bfae20aea.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 153, + 560, + 459, + 596.3333333333334 + ], + "spans": [], + "index": 19 + }, + { + "bbox": [ + 153, + 596.3333333333334, + 459, + 632.6666666666667 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 153, + 632.6666666666667, + 459, + 669.0000000000001 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 676, + 390, + 689 + ], + "lines": [ + { + "bbox": [ + 106, + 675, + 390, + 691 + ], + "spans": [ + { + "bbox": [ + 106, + 675, + 221, + 691 + ], + "score": 1.0, + "content": "Summing this inequality for", + "type": "text" + }, + { + "bbox": [ + 221, + 677, + 279, + 689 + ], + "score": 0.92, + "content": "i \\in { 1 . . ( n + 1 ) }", + "type": "inline_equation" + }, + { + "bbox": [ + 279, + 675, + 390, + 691 + ], + "score": 1.0, + "content": "and collecting terms yields", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 22, + "bbox_fs": [ + 106, + 675, + 390, + 691 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 214, + 696, + 395, + 731 + ], + "lines": [ + { + "bbox": [ + 214, + 696, + 395, + 731 + ], + "spans": [ + { + "bbox": [ + 214, + 696, + 395, + 731 + ], + "score": 0.93, + "content": "\\sum _ { i = 1 } ^ { n + 1 } \\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 } \\leq 4 ( n + 1 ) \\sum _ { i = 1 } ^ { n + 1 } \\| x _ { i } ^ { k } - z ^ { k } \\| ^ { 2 } ,", + "type": "interline_equation", + "image_path": "ed986999f76c7947bd3840d828f3236344aa681c73273becba85527a8cd9d4ec.jpg" + } + ] + } + ], + "index": 23.5, + "virtual_lines": [ + { + "bbox": [ + 214, + 696, + 395, + 713.5 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 214, + 713.5, + 395, + 731.0 + ], + "spans": [], + "index": 24 + } + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 312, + 95 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 313, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 259, + 96 + ], + "score": 1.0, + "content": "so taking expectations conditioned on", + "type": "text" + }, + { + "bbox": [ + 259, + 83, + 272, + 94 + ], + "score": 0.91, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 273, + 81, + 313, + 96 + ], + "score": 1.0, + "content": "produces", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "interline_equation", + "bbox": [ + 118, + 101, + 495, + 302 + ], + "lines": [ + { + "bbox": [ + 118, + 101, + 495, + 302 + ], + "spans": [ + { + "bbox": [ + 118, + 101, + 495, + 302 + ], + "score": 0.94, + "content": "\\begin{array} { r l } { \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } \\| \\nabla _ { x _ { i } } \\varphi _ { i } \\| ^ { 2 } | \\mathcal { F } _ { k } | \\leq 4 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\leq 4 ( n + 1 ) \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | + 4 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\leq 4 ( n + 1 ) \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\qquad + s ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | + 8 ( n + 1 ) ^ { 2 } \\| z ^ { k } - z ^ { k } | ^ { 2 } } \\\\ & { \\qquad + s ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\leq 4 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\qquad + s ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | + 8 ( n + 1 ) ^ { 2 } \\| n ^ { k } - p ^ { k } | ^ { 2 } } \\\\ & { \\leq 8 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } 2 \\tau ^ { 2 } + 1 + 9 s ^ { 2 } ( 2 ^ { k } + 1 ) | | b ^ { k } - z ^ { k } | ^ { 2 } | } \\\\ & \\leq 8 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } 2 \\tau ^ { 2 } + 1 + 9 s ^ { 2 } ( 2 ^ { k } + 1 ) | | b ^ { k } - z \\end{array}", + "type": "interline_equation", + "image_path": "b0b75238fbd7c69b9219538cf3bdf1f40557f202386711687b1ac8c8f85fb99b.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 118, + 101, + 495, + 168.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 118, + 168.0, + 495, + 235.0 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 118, + 235.0, + 495, + 302.0 + ], + "spans": [], + "index": 3 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 305, + 302, + 317 + ], + "lines": [ + { + "bbox": [ + 106, + 305, + 304, + 319 + ], + "spans": [ + { + "bbox": [ + 106, + 305, + 304, + 319 + ], + "score": 1.0, + "content": "where the final inequality employs (25) and (28).", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4 + }, + { + "type": "text", + "bbox": [ + 106, + 322, + 391, + 335 + ], + "lines": [ + { + "bbox": [ + 106, + 322, + 391, + 336 + ], + "spans": [ + { + "bbox": [ + 106, + 322, + 391, + 336 + ], + "score": 1.0, + "content": "All told, using (30) and (31) and simplifying the constants, one obtains", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "interline_equation", + "bbox": [ + 180, + 340, + 430, + 393 + ], + "lines": [ + { + "bbox": [ + 180, + 340, + 430, + 393 + ], + "spans": [ + { + "bbox": [ + 180, + 340, + 430, + 393 + ], + "score": 0.94, + "content": "\\begin{array} { r l r } { { \\mathbb { E } [ \\| \\nabla \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] = \\mathbb { E } [ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] + \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } [ \\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] } } \\\\ & { } & { \\leq C _ { 1 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + C _ { 2 } , } \\end{array}", + "type": "interline_equation", + "image_path": "0217ef0b4f5b5b4f5eef035ad32b486e027814bdc21defea076f02e797f48ca9.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 180, + 340, + 430, + 357.6666666666667 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 180, + 357.6666666666667, + 430, + 375.33333333333337 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 180, + 375.33333333333337, + 430, + 393.00000000000006 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 398, + 133, + 409 + ], + "lines": [ + { + "bbox": [ + 105, + 396, + 135, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 135, + 410 + ], + "score": 1.0, + "content": "where", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9 + }, + { + "type": "interline_equation", + "bbox": [ + 184, + 414, + 426, + 449 + ], + "lines": [ + { + "bbox": [ + 184, + 414, + 426, + 449 + ], + "spans": [ + { + "bbox": [ + 184, + 414, + 426, + 449 + ], + "score": 0.9, + "content": "\\begin{array} { c } { { C _ { 1 } = 2 4 ( 1 + 1 0 \\overline { { { \\rho } } } ^ { 2 } ) ( n + 1 ) ( L ^ { 2 } + 1 ) ^ { 2 } ( N + 1 ) ^ { 2 } } } \\\\ { { { } } } \\\\ { { + 8 ( n + 1 ) \\left( 2 \\tau ^ { 2 } + 6 ( n + 1 ) + 1 + 3 ( n + 1 ) ^ { 2 } \\tau ^ { - 2 } \\right) } } \\end{array}", + "type": "interline_equation", + "image_path": "48759a6394bd537ecf90c80d0c636bfbcce7e85be2075712e959bd63159ec8c3.jpg" + } + ] + } + ], + "index": 10.5, + "virtual_lines": [ + { + "bbox": [ + 184, + 414, + 426, + 431.5 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 184, + 431.5, + 426, + 449.0 + ], + "spans": [], + "index": 11 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 453, + 124, + 464 + ], + "lines": [ + { + "bbox": [ + 105, + 453, + 123, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 123, + 464 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12 + }, + { + "type": "interline_equation", + "bbox": [ + 145, + 470, + 464, + 519 + ], + "lines": [ + { + "bbox": [ + 145, + 470, + 464, + 519 + ], + "spans": [ + { + "bbox": [ + 145, + 470, + 464, + 519 + ], + "score": 0.91, + "content": "\\begin{array} { l } { { C _ { 2 } = 1 6 ( N + 1 ) \\left[ 1 + 4 { \\overline { { \\rho } } } ^ { 2 } ( n + 1 ) + 9 { \\overline { { \\rho } } } ^ { 2 } L ^ { 2 } ( N + 1 ) \\right] \\| B ( z ^ { * } ) \\| ^ { 2 } + 8 \\| \\displaystyle \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\| ^ { 2 } } } \\\\ { { \\nonumber } } \\\\ { { \\qquad + 1 2 { \\overline { { \\rho } } } ^ { 2 } N ( 2 L ^ { 2 } ( N + 1 ) + n + 1 ) + 4 N . } } \\end{array}", + "type": "interline_equation", + "image_path": "c8f22239efd08b606b7b055333c56a780f6c8c5afa476bac5207185e4d1330e5.jpg" + } + ] + } + ], + "index": 14, + "virtual_lines": [ + { + "bbox": [ + 145, + 470, + 464, + 486.3333333333333 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 145, + 486.3333333333333, + 464, + 502.66666666666663 + ], + "spans": [], + "index": 14 + }, + { + "bbox": [ + 145, + 502.66666666666663, + 464, + 519.0 + ], + "spans": [], + "index": 15 + } + ] + }, + { + "type": "title", + "bbox": [ + 107, + 531, + 255, + 543 + ], + "lines": [ + { + "bbox": [ + 105, + 530, + 256, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 220, + 545 + ], + "score": 1.0, + "content": "C.4 LOWER BOUND FOR", + "type": "text" + }, + { + "bbox": [ + 221, + 533, + 233, + 543 + ], + "score": 0.84, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 233, + 530, + 256, + 545 + ], + "score": 1.0, + "content": "-GAP", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "text", + "bbox": [ + 106, + 552, + 197, + 564 + ], + "lines": [ + { + "bbox": [ + 106, + 551, + 198, + 566 + ], + "spans": [ + { + "bbox": [ + 106, + 551, + 198, + 566 + ], + "score": 1.0, + "content": "Recalling (13), that is,", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 17 + }, + { + "type": "interline_equation", + "bbox": [ + 163, + 570, + 447, + 586 + ], + "lines": [ + { + "bbox": [ + 163, + 570, + 447, + 586 + ], + "spans": [ + { + "bbox": [ + 163, + 570, + 447, + 586 + ], + "score": 0.89, + "content": "\\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } ( \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) ) + \\alpha _ { k } ^ { 2 } \\| \\nabla \\varphi _ { k } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "6b7313da379a20d7cf6014a35a92634d0fa03bbc9844da339a1bc13222e0177d.jpg" + } + ] + } + ], + "index": 18, + "virtual_lines": [ + { + "bbox": [ + 163, + 570, + 447, + 586 + ], + "spans": [], + "index": 18 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 592, + 313, + 605 + ], + "lines": [ + { + "bbox": [ + 105, + 592, + 313, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 313, + 606 + ], + "score": 1.0, + "content": "We may use the gradient bound from (32) to obtain", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 19 + }, + { + "type": "interline_equation", + "bbox": [ + 115, + 611, + 479, + 627 + ], + "lines": [ + { + "bbox": [ + 115, + 611, + 479, + 627 + ], + "spans": [ + { + "bbox": [ + 115, + 611, + 479, + 627 + ], + "score": 0.91, + "content": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } \\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] + C _ { 2 } \\alpha _ { k } ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "b400dde624bf2a28d00a84d70f846140dc3787e37736406bc30c3e9606e98a6e.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 115, + 611, + 479, + 627 + ], + "spans": [], + "index": 20 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 634, + 506, + 659 + ], + "lines": [ + { + "bbox": [ + 105, + 633, + 506, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 633, + 324, + 648 + ], + "score": 1.0, + "content": "We now focus on finding a lower bound for the term", + "type": "text" + }, + { + "bbox": [ + 325, + 634, + 426, + 648 + ], + "score": 0.93, + "content": "\\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ]", + "type": "inline_equation" + }, + { + "bbox": [ + 427, + 633, + 506, + 648 + ], + "score": 1.0, + "content": ", which we call the", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 645, + 306, + 660 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 109, + 660 + ], + "score": 1.0, + "content": "β€œ", + "type": "text" + }, + { + "bbox": [ + 110, + 647, + 122, + 658 + ], + "score": 0.74, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 122, + 645, + 208, + 660 + ], + "score": 1.0, + "content": "-gap”. Recall that for", + "type": "text" + }, + { + "bbox": [ + 208, + 646, + 301, + 659 + ], + "score": 0.92, + "content": "p = ( z , w _ { 1 } , \\ldots , w _ { n + 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 645, + 306, + 660 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 21.5 + }, + { + "type": "interline_equation", + "bbox": [ + 240, + 665, + 370, + 699 + ], + "lines": [ + { + "bbox": [ + 240, + 665, + 370, + 699 + ], + "spans": [ + { + "bbox": [ + 240, + 665, + 370, + 699 + ], + "score": 0.94, + "content": "\\varphi _ { k } ( p ) = \\sum _ { i = 1 } ^ { n + 1 } \\langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \\rangle .", + "type": "interline_equation", + "image_path": "db273c99f30b728b8227982916fa285f9ecb52884901e46f6cd7adef34ca3695.jpg" + } + ] + } + ], + "index": 23.5, + "virtual_lines": [ + { + "bbox": [ + 240, + 665, + 370, + 682.0 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 240, + 682.0, + 370, + 699.0 + ], + "spans": [], + "index": 24 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 705, + 504, + 735 + ], + "lines": [ + { + "bbox": [ + 105, + 704, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 704, + 142, + 722 + ], + "score": 1.0, + "content": "For each", + "type": "text" + }, + { + "bbox": [ + 142, + 707, + 198, + 719 + ], + "score": 0.92, + "content": "i \\in { 1 . . ( n + 1 ) }", + "type": "inline_equation" + }, + { + "bbox": [ + 199, + 704, + 228, + 722 + ], + "score": 1.0, + "content": ", define", + "type": "text" + }, + { + "bbox": [ + 228, + 706, + 342, + 720 + ], + "score": 0.91, + "content": "\\varphi _ { i , k } ( p ) \\doteq \\langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \\rangle", + "type": "inline_equation" + }, + { + "bbox": [ + 342, + 704, + 394, + 722 + ], + "score": 1.0, + "content": ". We will call", + "type": "text" + }, + { + "bbox": [ + 394, + 706, + 504, + 720 + ], + "score": 0.91, + "content": "\\mathbb { E } [ \\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) \\vert \\mathcal { F } _ { k } ]", + "type": "inline_equation" + } + ], + "index": 25 + }, + { + "bbox": [ + 102, + 712, + 312, + 738 + ], + "spans": [ + { + "bbox": [ + 102, + 712, + 125, + 738 + ], + "score": 1.0, + "content": "the β€œ", + "type": "text" + }, + { + "bbox": [ + 125, + 721, + 143, + 733 + ], + "score": 0.81, + "content": "\\varphi _ { i , k }", + "type": "inline_equation" + }, + { + "bbox": [ + 144, + 712, + 209, + 738 + ], + "score": 1.0, + "content": "-gap”. Note that", + "type": "text" + }, + { + "bbox": [ + 209, + 719, + 304, + 734 + ], + "score": 0.92, + "content": "\\begin{array} { r } { \\varphi _ { k } ( p ) = \\sum _ { i = 1 } ^ { n + 1 } \\varphi _ { i , k } ( p ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 305, + 712, + 312, + 738 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 25.5 + } + ], + "page_idx": 19, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 750, + 312, + 761 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 763 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 763 + ], + "score": 1.0, + "content": "20", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 312, + 95 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 313, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 259, + 96 + ], + "score": 1.0, + "content": "so taking expectations conditioned on", + "type": "text" + }, + { + "bbox": [ + 259, + 83, + 272, + 94 + ], + "score": 0.91, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 273, + 81, + 313, + 96 + ], + "score": 1.0, + "content": "produces", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0, + "bbox_fs": [ + 105, + 81, + 313, + 96 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 118, + 101, + 495, + 302 + ], + "lines": [ + { + "bbox": [ + 118, + 101, + 495, + 302 + ], + "spans": [ + { + "bbox": [ + 118, + 101, + 495, + 302 + ], + "score": 0.94, + "content": "\\begin{array} { r l } { \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } \\| \\nabla _ { x _ { i } } \\varphi _ { i } \\| ^ { 2 } | \\mathcal { F } _ { k } | \\leq 4 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\leq 4 ( n + 1 ) \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | + 4 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\leq 4 ( n + 1 ) \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\qquad + s ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | + 8 ( n + 1 ) ^ { 2 } \\| z ^ { k } - z ^ { k } | ^ { 2 } } \\\\ & { \\qquad + s ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\leq 4 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\qquad + s ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | + 8 ( n + 1 ) ^ { 2 } \\| n ^ { k } - p ^ { k } | ^ { 2 } } \\\\ & { \\leq 8 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } 2 \\tau ^ { 2 } + 1 + 9 s ^ { 2 } ( 2 ^ { k } + 1 ) | | b ^ { k } - z ^ { k } | ^ { 2 } | } \\\\ & \\leq 8 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } 2 \\tau ^ { 2 } + 1 + 9 s ^ { 2 } ( 2 ^ { k } + 1 ) | | b ^ { k } - z \\end{array}", + "type": "interline_equation", + "image_path": "b0b75238fbd7c69b9219538cf3bdf1f40557f202386711687b1ac8c8f85fb99b.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 118, + 101, + 495, + 168.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 118, + 168.0, + 495, + 235.0 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 118, + 235.0, + 495, + 302.0 + ], + "spans": [], + "index": 3 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 305, + 302, + 317 + ], + "lines": [ + { + "bbox": [ + 106, + 305, + 304, + 319 + ], + "spans": [ + { + "bbox": [ + 106, + 305, + 304, + 319 + ], + "score": 1.0, + "content": "where the final inequality employs (25) and (28).", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4, + "bbox_fs": [ + 106, + 305, + 304, + 319 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 322, + 391, + 335 + ], + "lines": [ + { + "bbox": [ + 106, + 322, + 391, + 336 + ], + "spans": [ + { + "bbox": [ + 106, + 322, + 391, + 336 + ], + "score": 1.0, + "content": "All told, using (30) and (31) and simplifying the constants, one obtains", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5, + "bbox_fs": [ + 106, + 322, + 391, + 336 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 180, + 340, + 430, + 393 + ], + "lines": [ + { + "bbox": [ + 180, + 340, + 430, + 393 + ], + "spans": [ + { + "bbox": [ + 180, + 340, + 430, + 393 + ], + "score": 0.94, + "content": "\\begin{array} { r l r } { { \\mathbb { E } [ \\| \\nabla \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] = \\mathbb { E } [ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] + \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } [ \\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] } } \\\\ & { } & { \\leq C _ { 1 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + C _ { 2 } , } \\end{array}", + "type": "interline_equation", + "image_path": "0217ef0b4f5b5b4f5eef035ad32b486e027814bdc21defea076f02e797f48ca9.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 180, + 340, + 430, + 357.6666666666667 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 180, + 357.6666666666667, + 430, + 375.33333333333337 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 180, + 375.33333333333337, + 430, + 393.00000000000006 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 398, + 133, + 409 + ], + "lines": [ + { + "bbox": [ + 105, + 396, + 135, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 135, + 410 + ], + "score": 1.0, + "content": "where", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9, + "bbox_fs": [ + 105, + 396, + 135, + 410 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 184, + 414, + 426, + 449 + ], + "lines": [ + { + "bbox": [ + 184, + 414, + 426, + 449 + ], + "spans": [ + { + "bbox": [ + 184, + 414, + 426, + 449 + ], + "score": 0.9, + "content": "\\begin{array} { c } { { C _ { 1 } = 2 4 ( 1 + 1 0 \\overline { { { \\rho } } } ^ { 2 } ) ( n + 1 ) ( L ^ { 2 } + 1 ) ^ { 2 } ( N + 1 ) ^ { 2 } } } \\\\ { { { } } } \\\\ { { + 8 ( n + 1 ) \\left( 2 \\tau ^ { 2 } + 6 ( n + 1 ) + 1 + 3 ( n + 1 ) ^ { 2 } \\tau ^ { - 2 } \\right) } } \\end{array}", + "type": "interline_equation", + "image_path": "48759a6394bd537ecf90c80d0c636bfbcce7e85be2075712e959bd63159ec8c3.jpg" + } + ] + } + ], + "index": 10.5, + "virtual_lines": [ + { + "bbox": [ + 184, + 414, + 426, + 431.5 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 184, + 431.5, + 426, + 449.0 + ], + "spans": [], + "index": 11 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 453, + 124, + 464 + ], + "lines": [ + { + "bbox": [ + 105, + 453, + 123, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 123, + 464 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12, + "bbox_fs": [ + 105, + 453, + 123, + 464 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 145, + 470, + 464, + 519 + ], + "lines": [ + { + "bbox": [ + 145, + 470, + 464, + 519 + ], + "spans": [ + { + "bbox": [ + 145, + 470, + 464, + 519 + ], + "score": 0.91, + "content": "\\begin{array} { l } { { C _ { 2 } = 1 6 ( N + 1 ) \\left[ 1 + 4 { \\overline { { \\rho } } } ^ { 2 } ( n + 1 ) + 9 { \\overline { { \\rho } } } ^ { 2 } L ^ { 2 } ( N + 1 ) \\right] \\| B ( z ^ { * } ) \\| ^ { 2 } + 8 \\| \\displaystyle \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\| ^ { 2 } } } \\\\ { { \\nonumber } } \\\\ { { \\qquad + 1 2 { \\overline { { \\rho } } } ^ { 2 } N ( 2 L ^ { 2 } ( N + 1 ) + n + 1 ) + 4 N . } } \\end{array}", + "type": "interline_equation", + "image_path": "c8f22239efd08b606b7b055333c56a780f6c8c5afa476bac5207185e4d1330e5.jpg" + } + ] + } + ], + "index": 14, + "virtual_lines": [ + { + "bbox": [ + 145, + 470, + 464, + 486.3333333333333 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 145, + 486.3333333333333, + 464, + 502.66666666666663 + ], + "spans": [], + "index": 14 + }, + { + "bbox": [ + 145, + 502.66666666666663, + 464, + 519.0 + ], + "spans": [], + "index": 15 + } + ] + }, + { + "type": "title", + "bbox": [ + 107, + 531, + 255, + 543 + ], + "lines": [ + { + "bbox": [ + 105, + 530, + 256, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 220, + 545 + ], + "score": 1.0, + "content": "C.4 LOWER BOUND FOR", + "type": "text" + }, + { + "bbox": [ + 221, + 533, + 233, + 543 + ], + "score": 0.84, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 233, + 530, + 256, + 545 + ], + "score": 1.0, + "content": "-GAP", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "text", + "bbox": [ + 106, + 552, + 197, + 564 + ], + "lines": [ + { + "bbox": [ + 106, + 551, + 198, + 566 + ], + "spans": [ + { + "bbox": [ + 106, + 551, + 198, + 566 + ], + "score": 1.0, + "content": "Recalling (13), that is,", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 17, + "bbox_fs": [ + 106, + 551, + 198, + 566 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 163, + 570, + 447, + 586 + ], + "lines": [ + { + "bbox": [ + 163, + 570, + 447, + 586 + ], + "spans": [ + { + "bbox": [ + 163, + 570, + 447, + 586 + ], + "score": 0.89, + "content": "\\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } ( \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) ) + \\alpha _ { k } ^ { 2 } \\| \\nabla \\varphi _ { k } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "6b7313da379a20d7cf6014a35a92634d0fa03bbc9844da339a1bc13222e0177d.jpg" + } + ] + } + ], + "index": 18, + "virtual_lines": [ + { + "bbox": [ + 163, + 570, + 447, + 586 + ], + "spans": [], + "index": 18 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 592, + 313, + 605 + ], + "lines": [ + { + "bbox": [ + 105, + 592, + 313, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 313, + 606 + ], + "score": 1.0, + "content": "We may use the gradient bound from (32) to obtain", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 19, + "bbox_fs": [ + 105, + 592, + 313, + 606 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 115, + 611, + 479, + 627 + ], + "lines": [ + { + "bbox": [ + 115, + 611, + 479, + 627 + ], + "spans": [ + { + "bbox": [ + 115, + 611, + 479, + 627 + ], + "score": 0.91, + "content": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } \\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] + C _ { 2 } \\alpha _ { k } ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "b400dde624bf2a28d00a84d70f846140dc3787e37736406bc30c3e9606e98a6e.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 115, + 611, + 479, + 627 + ], + "spans": [], + "index": 20 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 634, + 506, + 659 + ], + "lines": [ + { + "bbox": [ + 105, + 633, + 506, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 633, + 324, + 648 + ], + "score": 1.0, + "content": "We now focus on finding a lower bound for the term", + "type": "text" + }, + { + "bbox": [ + 325, + 634, + 426, + 648 + ], + "score": 0.93, + "content": "\\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ]", + "type": "inline_equation" + }, + { + "bbox": [ + 427, + 633, + 506, + 648 + ], + "score": 1.0, + "content": ", which we call the", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 645, + 306, + 660 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 109, + 660 + ], + "score": 1.0, + "content": "β€œ", + "type": "text" + }, + { + "bbox": [ + 110, + 647, + 122, + 658 + ], + "score": 0.74, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 122, + 645, + 208, + 660 + ], + "score": 1.0, + "content": "-gap”. Recall that for", + "type": "text" + }, + { + "bbox": [ + 208, + 646, + 301, + 659 + ], + "score": 0.92, + "content": "p = ( z , w _ { 1 } , \\ldots , w _ { n + 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 645, + 306, + 660 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 21.5, + "bbox_fs": [ + 104, + 633, + 506, + 660 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 240, + 665, + 370, + 699 + ], + "lines": [ + { + "bbox": [ + 240, + 665, + 370, + 699 + ], + "spans": [ + { + "bbox": [ + 240, + 665, + 370, + 699 + ], + "score": 0.94, + "content": "\\varphi _ { k } ( p ) = \\sum _ { i = 1 } ^ { n + 1 } \\langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \\rangle .", + "type": "interline_equation", + "image_path": "db273c99f30b728b8227982916fa285f9ecb52884901e46f6cd7adef34ca3695.jpg" + } + ] + } + ], + "index": 23.5, + "virtual_lines": [ + { + "bbox": [ + 240, + 665, + 370, + 682.0 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 240, + 682.0, + 370, + 699.0 + ], + "spans": [], + "index": 24 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 705, + 504, + 735 + ], + "lines": [ + { + "bbox": [ + 105, + 704, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 704, + 142, + 722 + ], + "score": 1.0, + "content": "For each", + "type": "text" + }, + { + "bbox": [ + 142, + 707, + 198, + 719 + ], + "score": 0.92, + "content": "i \\in { 1 . . ( n + 1 ) }", + "type": "inline_equation" + }, + { + "bbox": [ + 199, + 704, + 228, + 722 + ], + "score": 1.0, + "content": ", define", + "type": "text" + }, + { + "bbox": [ + 228, + 706, + 342, + 720 + ], + "score": 0.91, + "content": "\\varphi _ { i , k } ( p ) \\doteq \\langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \\rangle", + "type": "inline_equation" + }, + { + "bbox": [ + 342, + 704, + 394, + 722 + ], + "score": 1.0, + "content": ". We will call", + "type": "text" + }, + { + "bbox": [ + 394, + 706, + 504, + 720 + ], + "score": 0.91, + "content": "\\mathbb { E } [ \\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) \\vert \\mathcal { F } _ { k } ]", + "type": "inline_equation" + } + ], + "index": 25 + }, + { + "bbox": [ + 102, + 712, + 312, + 738 + ], + "spans": [ + { + "bbox": [ + 102, + 712, + 125, + 738 + ], + "score": 1.0, + "content": "the β€œ", + "type": "text" + }, + { + "bbox": [ + 125, + 721, + 143, + 733 + ], + "score": 0.81, + "content": "\\varphi _ { i , k }", + "type": "inline_equation" + }, + { + "bbox": [ + 144, + 712, + 209, + 738 + ], + "score": 1.0, + "content": "-gap”. Note that", + "type": "text" + }, + { + "bbox": [ + 209, + 719, + 304, + 734 + ], + "score": 0.92, + "content": "\\begin{array} { r } { \\varphi _ { k } ( p ) = \\sum _ { i = 1 } ^ { n + 1 } \\varphi _ { i , k } ( p ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 305, + 712, + 312, + 738 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 25.5, + "bbox_fs": [ + 102, + 704, + 504, + 738 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 321, + 95 + ], + "lines": [ + { + "bbox": [ + 106, + 81, + 320, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 220, + 96 + ], + "score": 1.0, + "content": "C.5 LOWER BOUND FOR", + "type": "text" + }, + { + "bbox": [ + 221, + 84, + 238, + 95 + ], + "score": 0.86, + "content": "\\varphi _ { i , k }", + "type": "inline_equation" + }, + { + "bbox": [ + 238, + 81, + 286, + 96 + ], + "score": 1.0, + "content": "-GAP OVER", + "type": "text" + }, + { + "bbox": [ + 286, + 83, + 320, + 93 + ], + "score": 0.74, + "content": "i \\in 1 . . n", + "type": "inline_equation" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 102, + 326, + 115 + ], + "lines": [ + { + "bbox": [ + 105, + 102, + 326, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 122, + 116 + ], + "score": 1.0, + "content": "For", + "type": "text" + }, + { + "bbox": [ + 123, + 104, + 156, + 114 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 156, + 102, + 326, + 116 + ], + "score": 1.0, + "content": ", we have from line 5 of the algorithm that", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 1 + }, + { + "type": "interline_equation", + "bbox": [ + 255, + 118, + 354, + 134 + ], + "lines": [ + { + "bbox": [ + 255, + 118, + 354, + 134 + ], + "spans": [ + { + "bbox": [ + 255, + 118, + 354, + 134 + ], + "score": 0.91, + "content": "z ^ { k } - x _ { i } ^ { k } = \\tau ( y _ { i } ^ { k } - w _ { i } ^ { k } ) .", + "type": "interline_equation", + "image_path": "1278d9537922579f771741d1ea2fd252984be67bc9aea61233395d5f3286e109.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 255, + 118, + 354, + 134 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 138, + 406, + 153 + ], + "lines": [ + { + "bbox": [ + 106, + 138, + 403, + 154 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 131, + 154 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 132, + 139, + 259, + 153 + ], + "score": 0.93, + "content": "\\varphi _ { i , k } ( p ^ { k } ) = \\langle z ^ { k } - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } ^ { k } \\rangle", + "type": "inline_equation" + }, + { + "bbox": [ + 260, + 138, + 370, + 154 + ], + "score": 1.0, + "content": ", one may conclude that for", + "type": "text" + }, + { + "bbox": [ + 370, + 141, + 403, + 151 + ], + "score": 0.88, + "content": "i \\in 1 . . n", + "type": "inline_equation" + } + ], + "index": 3 + } + ], + "index": 3 + }, + { + "type": "interline_equation", + "bbox": [ + 214, + 156, + 396, + 181 + ], + "lines": [ + { + "bbox": [ + 214, + 156, + 396, + 181 + ], + "spans": [ + { + "bbox": [ + 214, + 156, + 396, + 181 + ], + "score": 0.92, + "content": "\\varphi _ { i , k } ( p ^ { k } ) = \\frac { \\tau } { 2 } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "390d901b63754f4eb1e58031fd91b54dda5d5d016bc60926b2990cb15211b85a.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 214, + 156, + 396, + 181 + ], + "spans": [], + "index": 4 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 189, + 334, + 201 + ], + "lines": [ + { + "bbox": [ + 106, + 188, + 334, + 203 + ], + "spans": [ + { + "bbox": [ + 106, + 188, + 197, + 203 + ], + "score": 1.0, + "content": "On the other hand, for", + "type": "text" + }, + { + "bbox": [ + 198, + 190, + 227, + 201 + ], + "score": 0.92, + "content": "p ^ { * } \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 227, + 188, + 245, + 203 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 245, + 190, + 279, + 200 + ], + "score": 0.9, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 279, + 188, + 334, + 203 + ], + "score": 1.0, + "content": ", one also has", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "interline_equation", + "bbox": [ + 227, + 204, + 383, + 219 + ], + "lines": [ + { + "bbox": [ + 227, + 204, + 383, + 219 + ], + "spans": [ + { + "bbox": [ + 227, + 204, + 383, + 219 + ], + "score": 0.91, + "content": "- \\varphi _ { i , k } \\mathopen { } \\mathclose \\bgroup \\left( p ^ { * } \\aftergroup \\egroup \\right) = \\mathopen { } \\mathclose \\bgroup \\left. z ^ { * } - x _ { i } ^ { k } , w _ { i } ^ { * } - y _ { i } ^ { k } \\aftergroup \\egroup \\right. \\geq 0", + "type": "interline_equation", + "image_path": "ffc9f525b71929e041e7477f14a95f253f613a27c9b72aa743a9a40cfb2a51c1.jpg" + } + ] + } + ], + "index": 6, + "virtual_lines": [ + { + "bbox": [ + 227, + 204, + 383, + 219 + ], + "spans": [], + "index": 6 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 223, + 361, + 236 + ], + "lines": [ + { + "bbox": [ + 105, + 222, + 362, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 200, + 236 + ], + "score": 1.0, + "content": "by the monotonicity of", + "type": "text" + }, + { + "bbox": [ + 200, + 224, + 212, + 235 + ], + "score": 0.88, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 222, + 275, + 236 + ], + "score": 1.0, + "content": ". Therefore, for", + "type": "text" + }, + { + "bbox": [ + 275, + 224, + 308, + 234 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 308, + 222, + 362, + 236 + ], + "score": 1.0, + "content": ", it holds that", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 7 + }, + { + "type": "interline_equation", + "bbox": [ + 191, + 239, + 418, + 263 + ], + "lines": [ + { + "bbox": [ + 191, + 239, + 418, + 263 + ], + "spans": [ + { + "bbox": [ + 191, + 239, + 418, + 263 + ], + "score": 0.91, + "content": "\\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) \\geq \\frac { \\tau } { 2 } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } ,", + "type": "interline_equation", + "image_path": "b9de66e401adf4c675eedf82b6a4c6374ccb9dab43acfa9b7e362589f65b25d7.jpg" + } + ] + } + ], + "index": 8, + "virtual_lines": [ + { + "bbox": [ + 191, + 239, + 418, + 263 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 266, + 313, + 279 + ], + "lines": [ + { + "bbox": [ + 105, + 266, + 313, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 265, + 279 + ], + "score": 1.0, + "content": "and taking expectations conditioned on", + "type": "text" + }, + { + "bbox": [ + 265, + 267, + 278, + 278 + ], + "score": 0.9, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 278, + 266, + 313, + 279 + ], + "score": 1.0, + "content": "leads to", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9 + }, + { + "type": "interline_equation", + "bbox": [ + 177, + 282, + 433, + 306 + ], + "lines": [ + { + "bbox": [ + 177, + 282, + 433, + 306 + ], + "spans": [ + { + "bbox": [ + 177, + 282, + 433, + 306 + ], + "score": 0.9, + "content": "\\mathbb { E } [ \\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] \\ge \\frac { \\tau } { 2 } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 }", + "type": "interline_equation", + "image_path": "796efc8de94808fd6e81122cb4be704764f41c0dcad51832b197b125404c4e9c.jpg" + } + ] + } + ], + "index": 10, + "virtual_lines": [ + { + "bbox": [ + 177, + 282, + 433, + 306 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 311, + 398, + 324 + ], + "lines": [ + { + "bbox": [ + 106, + 310, + 399, + 324 + ], + "spans": [ + { + "bbox": [ + 106, + 310, + 206, + 324 + ], + "score": 1.0, + "content": "where we have used that", + "type": "text" + }, + { + "bbox": [ + 207, + 311, + 218, + 324 + ], + "score": 0.9, + "content": "x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 310, + 236, + 324 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 236, + 311, + 247, + 324 + ], + "score": 0.91, + "content": "y _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 248, + 310, + 284, + 324 + ], + "score": 1.0, + "content": "are both", + "type": "text" + }, + { + "bbox": [ + 284, + 312, + 297, + 322 + ], + "score": 0.89, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 297, + 310, + 361, + 324 + ], + "score": 1.0, + "content": "-measurable for", + "type": "text" + }, + { + "bbox": [ + 361, + 312, + 394, + 322 + ], + "score": 0.88, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 394, + 310, + 399, + 324 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11 + }, + { + "type": "title", + "bbox": [ + 108, + 336, + 272, + 348 + ], + "lines": [ + { + "bbox": [ + 105, + 334, + 273, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 220, + 350 + ], + "score": 1.0, + "content": "C.6 LOWER BOUND FOR", + "type": "text" + }, + { + "bbox": [ + 221, + 338, + 250, + 349 + ], + "score": 0.84, + "content": "\\varphi _ { n + 1 , k }", + "type": "inline_equation" + }, + { + "bbox": [ + 250, + 334, + 273, + 350 + ], + "score": 1.0, + "content": "-GAP", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12 + }, + { + "type": "text", + "bbox": [ + 107, + 356, + 272, + 368 + ], + "lines": [ + { + "bbox": [ + 105, + 355, + 272, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 355, + 272, + 370 + ], + "score": 1.0, + "content": "From lines 6-7 of the algorithm, we have", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 13 + }, + { + "type": "interline_equation", + "bbox": [ + 225, + 372, + 386, + 388 + ], + "lines": [ + { + "bbox": [ + 225, + 372, + 386, + 388 + ], + "spans": [ + { + "bbox": [ + 225, + 372, + 386, + 388 + ], + "score": 0.91, + "content": "z ^ { k } - x _ { n + 1 } ^ { k } = \\rho _ { k } ( B ( z ^ { k } ) - w _ { n + 1 } ^ { k } + \\epsilon ^ { k } ) .", + "type": "interline_equation", + "image_path": "d3b94454758d38466af0ec4ae7c279789cc1fc2223355ccdc58d5dbe8236a818.jpg" + } + ] + } + ], + "index": 14, + "virtual_lines": [ + { + "bbox": [ + 225, + 372, + 386, + 388 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 392, + 149, + 403 + ], + "lines": [ + { + "bbox": [ + 105, + 390, + 151, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 151, + 405 + ], + "score": 1.0, + "content": "Therefore,", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + }, + { + "type": "interline_equation", + "bbox": [ + 114, + 407, + 495, + 637 + ], + "lines": [ + { + "bbox": [ + 114, + 407, + 495, + 637 + ], + "spans": [ + { + "bbox": [ + 114, + 407, + 495, + 637 + ], + "score": 0.94, + "content": "\\begin{array} { r l } { \\hat { \\sigma } _ { \\beta 1 , 1 } \\hat { x } _ { \\beta ^ { \\prime } 1 , 1 } ^ { ( f ) } = \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } - \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle } & { \\mathrm { ~ C ~ e ~ } } \\\\ & { = \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { x } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { y } _ { \\beta ^ { \\prime } 1 } ^ { ( f ) } - \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 2 } - \\boldsymbol { x } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { y } _ { \\beta 1 } ^ { ( f ) } - \\boldsymbol { B } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle } \\\\ & { - \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { u } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { u } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\boldsymbol { y } _ { \\beta 1 } ^ { ( f ) } - \\boldsymbol { u } _ { \\alpha - 1 , \\beta ^ { \\prime } } ^ { ( f ) } \\rangle } \\\\ & - \\langle \\boldsymbol { u } _ { \\alpha } ^ { \\beta } \\boldsymbol { y } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 4 } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { u } _ { \\alpha } ^ { \\beta } \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } } ^ { ( f ) } \\boldsymbol { y } _ { \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { u } _ { \\alpha } ^ { \\beta } \\boldsymbol { u } _ \\alpha + 1 , \\end{array}", + "type": "interline_equation", + "image_path": "898e6ca4426be5f91cea15a620315a8abeff7916dfc36e2260dbbda9b3210241.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 114, + 407, + 495, + 483.6666666666667 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 114, + 483.6666666666667, + 495, + 560.3333333333334 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 114, + 560.3333333333334, + 495, + 637.0 + ], + "spans": [], + "index": 18 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 639, + 503, + 663 + ], + "lines": [ + { + "bbox": [ + 106, + 639, + 505, + 653 + ], + "spans": [ + { + "bbox": [ + 106, + 639, + 505, + 653 + ], + "score": 1.0, + "content": "where equality (a) uses line 8 of the algorithm and the inequality employs the Cauchy-Schwartz", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 650, + 306, + 663 + ], + "spans": [ + { + "bbox": [ + 106, + 650, + 292, + 663 + ], + "score": 1.0, + "content": "inequality followed by Lipschitz continuity of", + "type": "text" + }, + { + "bbox": [ + 293, + 652, + 301, + 661 + ], + "score": 0.84, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 650, + 306, + 663 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 19.5 + }, + { + "type": "text", + "bbox": [ + 107, + 668, + 182, + 679 + ], + "lines": [ + { + "bbox": [ + 106, + 667, + 183, + 680 + ], + "spans": [ + { + "bbox": [ + 106, + 667, + 183, + 680 + ], + "score": 1.0, + "content": "On the other hand,", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 21 + }, + { + "type": "interline_equation", + "bbox": [ + 171, + 682, + 439, + 732 + ], + "lines": [ + { + "bbox": [ + 171, + 682, + 439, + 732 + ], + "spans": [ + { + "bbox": [ + 171, + 682, + 439, + 732 + ], + "score": 0.93, + "content": "\\begin{array} { r l } & { - \\varphi _ { n + 1 , k } ( p ^ { * } ) = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - y _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , B ( z ^ { * } ) - B ( x _ { i } ^ { k } ) \\rangle + \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle } \\\\ & { \\qquad \\geq \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle , } \\end{array}", + "type": "interline_equation", + "image_path": "6a4e5417f7938f29831e57985545a264816ab553e42ce83944e5870df2859abe.jpg" + } + ] + } + ], + "index": 23, + "virtual_lines": [ + { + "bbox": [ + 171, + 682, + 439, + 698.6666666666666 + ], + "spans": [], + "index": 22 + }, + { + "bbox": [ + 171, + 698.6666666666666, + 439, + 715.3333333333333 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 171, + 715.3333333333333, + 439, + 731.9999999999999 + ], + "spans": [], + "index": 24 + } + ] + } + ], + "page_idx": 20, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 310, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 312, + 764 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 312, + 764 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 14, + "width": 14 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 321, + 95 + ], + "lines": [ + { + "bbox": [ + 106, + 81, + 320, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 220, + 96 + ], + "score": 1.0, + "content": "C.5 LOWER BOUND FOR", + "type": "text" + }, + { + "bbox": [ + 221, + 84, + 238, + 95 + ], + "score": 0.86, + "content": "\\varphi _ { i , k }", + "type": "inline_equation" + }, + { + "bbox": [ + 238, + 81, + 286, + 96 + ], + "score": 1.0, + "content": "-GAP OVER", + "type": "text" + }, + { + "bbox": [ + 286, + 83, + 320, + 93 + ], + "score": 0.74, + "content": "i \\in 1 . . n", + "type": "inline_equation" + } + ], + "index": 0 + } + ], + "index": 0, + "bbox_fs": [ + 106, + 81, + 320, + 96 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 102, + 326, + 115 + ], + "lines": [ + { + "bbox": [ + 105, + 102, + 326, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 122, + 116 + ], + "score": 1.0, + "content": "For", + "type": "text" + }, + { + "bbox": [ + 123, + 104, + 156, + 114 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 156, + 102, + 326, + 116 + ], + "score": 1.0, + "content": ", we have from line 5 of the algorithm that", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 1, + "bbox_fs": [ + 105, + 102, + 326, + 116 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 255, + 118, + 354, + 134 + ], + "lines": [ + { + "bbox": [ + 255, + 118, + 354, + 134 + ], + "spans": [ + { + "bbox": [ + 255, + 118, + 354, + 134 + ], + "score": 0.91, + "content": "z ^ { k } - x _ { i } ^ { k } = \\tau ( y _ { i } ^ { k } - w _ { i } ^ { k } ) .", + "type": "interline_equation", + "image_path": "1278d9537922579f771741d1ea2fd252984be67bc9aea61233395d5f3286e109.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 255, + 118, + 354, + 134 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 138, + 406, + 153 + ], + "lines": [ + { + "bbox": [ + 106, + 138, + 403, + 154 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 131, + 154 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 132, + 139, + 259, + 153 + ], + "score": 0.93, + "content": "\\varphi _ { i , k } ( p ^ { k } ) = \\langle z ^ { k } - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } ^ { k } \\rangle", + "type": "inline_equation" + }, + { + "bbox": [ + 260, + 138, + 370, + 154 + ], + "score": 1.0, + "content": ", one may conclude that for", + "type": "text" + }, + { + "bbox": [ + 370, + 141, + 403, + 151 + ], + "score": 0.88, + "content": "i \\in 1 . . n", + "type": "inline_equation" + } + ], + "index": 3 + } + ], + "index": 3, + "bbox_fs": [ + 106, + 138, + 403, + 154 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 214, + 156, + 396, + 181 + ], + "lines": [ + { + "bbox": [ + 214, + 156, + 396, + 181 + ], + "spans": [ + { + "bbox": [ + 214, + 156, + 396, + 181 + ], + "score": 0.92, + "content": "\\varphi _ { i , k } ( p ^ { k } ) = \\frac { \\tau } { 2 } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "390d901b63754f4eb1e58031fd91b54dda5d5d016bc60926b2990cb15211b85a.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 214, + 156, + 396, + 181 + ], + "spans": [], + "index": 4 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 189, + 334, + 201 + ], + "lines": [ + { + "bbox": [ + 106, + 188, + 334, + 203 + ], + "spans": [ + { + "bbox": [ + 106, + 188, + 197, + 203 + ], + "score": 1.0, + "content": "On the other hand, for", + "type": "text" + }, + { + "bbox": [ + 198, + 190, + 227, + 201 + ], + "score": 0.92, + "content": "p ^ { * } \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 227, + 188, + 245, + 203 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 245, + 190, + 279, + 200 + ], + "score": 0.9, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 279, + 188, + 334, + 203 + ], + "score": 1.0, + "content": ", one also has", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5, + "bbox_fs": [ + 106, + 188, + 334, + 203 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 227, + 204, + 383, + 219 + ], + "lines": [ + { + "bbox": [ + 227, + 204, + 383, + 219 + ], + "spans": [ + { + "bbox": [ + 227, + 204, + 383, + 219 + ], + "score": 0.91, + "content": "- \\varphi _ { i , k } \\mathopen { } \\mathclose \\bgroup \\left( p ^ { * } \\aftergroup \\egroup \\right) = \\mathopen { } \\mathclose \\bgroup \\left. z ^ { * } - x _ { i } ^ { k } , w _ { i } ^ { * } - y _ { i } ^ { k } \\aftergroup \\egroup \\right. \\geq 0", + "type": "interline_equation", + "image_path": "ffc9f525b71929e041e7477f14a95f253f613a27c9b72aa743a9a40cfb2a51c1.jpg" + } + ] + } + ], + "index": 6, + "virtual_lines": [ + { + "bbox": [ + 227, + 204, + 383, + 219 + ], + "spans": [], + "index": 6 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 223, + 361, + 236 + ], + "lines": [ + { + "bbox": [ + 105, + 222, + 362, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 200, + 236 + ], + "score": 1.0, + "content": "by the monotonicity of", + "type": "text" + }, + { + "bbox": [ + 200, + 224, + 212, + 235 + ], + "score": 0.88, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 222, + 275, + 236 + ], + "score": 1.0, + "content": ". Therefore, for", + "type": "text" + }, + { + "bbox": [ + 275, + 224, + 308, + 234 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 308, + 222, + 362, + 236 + ], + "score": 1.0, + "content": ", it holds that", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 7, + "bbox_fs": [ + 105, + 222, + 362, + 236 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 191, + 239, + 418, + 263 + ], + "lines": [ + { + "bbox": [ + 191, + 239, + 418, + 263 + ], + "spans": [ + { + "bbox": [ + 191, + 239, + 418, + 263 + ], + "score": 0.91, + "content": "\\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) \\geq \\frac { \\tau } { 2 } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } ,", + "type": "interline_equation", + "image_path": "b9de66e401adf4c675eedf82b6a4c6374ccb9dab43acfa9b7e362589f65b25d7.jpg" + } + ] + } + ], + "index": 8, + "virtual_lines": [ + { + "bbox": [ + 191, + 239, + 418, + 263 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 266, + 313, + 279 + ], + "lines": [ + { + "bbox": [ + 105, + 266, + 313, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 265, + 279 + ], + "score": 1.0, + "content": "and taking expectations conditioned on", + "type": "text" + }, + { + "bbox": [ + 265, + 267, + 278, + 278 + ], + "score": 0.9, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 278, + 266, + 313, + 279 + ], + "score": 1.0, + "content": "leads to", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9, + "bbox_fs": [ + 105, + 266, + 313, + 279 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 177, + 282, + 433, + 306 + ], + "lines": [ + { + "bbox": [ + 177, + 282, + 433, + 306 + ], + "spans": [ + { + "bbox": [ + 177, + 282, + 433, + 306 + ], + "score": 0.9, + "content": "\\mathbb { E } [ \\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] \\ge \\frac { \\tau } { 2 } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 }", + "type": "interline_equation", + "image_path": "796efc8de94808fd6e81122cb4be704764f41c0dcad51832b197b125404c4e9c.jpg" + } + ] + } + ], + "index": 10, + "virtual_lines": [ + { + "bbox": [ + 177, + 282, + 433, + 306 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 311, + 398, + 324 + ], + "lines": [ + { + "bbox": [ + 106, + 310, + 399, + 324 + ], + "spans": [ + { + "bbox": [ + 106, + 310, + 206, + 324 + ], + "score": 1.0, + "content": "where we have used that", + "type": "text" + }, + { + "bbox": [ + 207, + 311, + 218, + 324 + ], + "score": 0.9, + "content": "x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 310, + 236, + 324 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 236, + 311, + 247, + 324 + ], + "score": 0.91, + "content": "y _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 248, + 310, + 284, + 324 + ], + "score": 1.0, + "content": "are both", + "type": "text" + }, + { + "bbox": [ + 284, + 312, + 297, + 322 + ], + "score": 0.89, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 297, + 310, + 361, + 324 + ], + "score": 1.0, + "content": "-measurable for", + "type": "text" + }, + { + "bbox": [ + 361, + 312, + 394, + 322 + ], + "score": 0.88, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 394, + 310, + 399, + 324 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11, + "bbox_fs": [ + 106, + 310, + 399, + 324 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 336, + 272, + 348 + ], + "lines": [ + { + "bbox": [ + 105, + 334, + 273, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 220, + 350 + ], + "score": 1.0, + "content": "C.6 LOWER BOUND FOR", + "type": "text" + }, + { + "bbox": [ + 221, + 338, + 250, + 349 + ], + "score": 0.84, + "content": "\\varphi _ { n + 1 , k }", + "type": "inline_equation" + }, + { + "bbox": [ + 250, + 334, + 273, + 350 + ], + "score": 1.0, + "content": "-GAP", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12 + }, + { + "type": "text", + "bbox": [ + 107, + 356, + 272, + 368 + ], + "lines": [ + { + "bbox": [ + 105, + 355, + 272, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 355, + 272, + 370 + ], + "score": 1.0, + "content": "From lines 6-7 of the algorithm, we have", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 13, + "bbox_fs": [ + 105, + 355, + 272, + 370 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 225, + 372, + 386, + 388 + ], + "lines": [ + { + "bbox": [ + 225, + 372, + 386, + 388 + ], + "spans": [ + { + "bbox": [ + 225, + 372, + 386, + 388 + ], + "score": 0.91, + "content": "z ^ { k } - x _ { n + 1 } ^ { k } = \\rho _ { k } ( B ( z ^ { k } ) - w _ { n + 1 } ^ { k } + \\epsilon ^ { k } ) .", + "type": "interline_equation", + "image_path": "d3b94454758d38466af0ec4ae7c279789cc1fc2223355ccdc58d5dbe8236a818.jpg" + } + ] + } + ], + "index": 14, + "virtual_lines": [ + { + "bbox": [ + 225, + 372, + 386, + 388 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 392, + 149, + 403 + ], + "lines": [ + { + "bbox": [ + 105, + 390, + 151, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 151, + 405 + ], + "score": 1.0, + "content": "Therefore,", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15, + "bbox_fs": [ + 105, + 390, + 151, + 405 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 114, + 407, + 495, + 637 + ], + "lines": [ + { + "bbox": [ + 114, + 407, + 495, + 637 + ], + "spans": [ + { + "bbox": [ + 114, + 407, + 495, + 637 + ], + "score": 0.94, + "content": "\\begin{array} { r l } { \\hat { \\sigma } _ { \\beta 1 , 1 } \\hat { x } _ { \\beta ^ { \\prime } 1 , 1 } ^ { ( f ) } = \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } - \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle } & { \\mathrm { ~ C ~ e ~ } } \\\\ & { = \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { x } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { y } _ { \\beta ^ { \\prime } 1 } ^ { ( f ) } - \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 2 } - \\boldsymbol { x } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { y } _ { \\beta 1 } ^ { ( f ) } - \\boldsymbol { B } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle } \\\\ & { - \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { u } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { u } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\boldsymbol { y } _ { \\beta 1 } ^ { ( f ) } - \\boldsymbol { u } _ { \\alpha - 1 , \\beta ^ { \\prime } } ^ { ( f ) } \\rangle } \\\\ & - \\langle \\boldsymbol { u } _ { \\alpha } ^ { \\beta } \\boldsymbol { y } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 4 } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { u } _ { \\alpha } ^ { \\beta } \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } } ^ { ( f ) } \\boldsymbol { y } _ { \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { u } _ { \\alpha } ^ { \\beta } \\boldsymbol { u } _ \\alpha + 1 , \\end{array}", + "type": "interline_equation", + "image_path": "898e6ca4426be5f91cea15a620315a8abeff7916dfc36e2260dbbda9b3210241.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 114, + 407, + 495, + 483.6666666666667 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 114, + 483.6666666666667, + 495, + 560.3333333333334 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 114, + 560.3333333333334, + 495, + 637.0 + ], + "spans": [], + "index": 18 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 639, + 503, + 663 + ], + "lines": [ + { + "bbox": [ + 106, + 639, + 505, + 653 + ], + "spans": [ + { + "bbox": [ + 106, + 639, + 505, + 653 + ], + "score": 1.0, + "content": "where equality (a) uses line 8 of the algorithm and the inequality employs the Cauchy-Schwartz", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 650, + 306, + 663 + ], + "spans": [ + { + "bbox": [ + 106, + 650, + 292, + 663 + ], + "score": 1.0, + "content": "inequality followed by Lipschitz continuity of", + "type": "text" + }, + { + "bbox": [ + 293, + 652, + 301, + 661 + ], + "score": 0.84, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 650, + 306, + 663 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 19.5, + "bbox_fs": [ + 106, + 639, + 505, + 663 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 668, + 182, + 679 + ], + "lines": [ + { + "bbox": [ + 106, + 667, + 183, + 680 + ], + "spans": [ + { + "bbox": [ + 106, + 667, + 183, + 680 + ], + "score": 1.0, + "content": "On the other hand,", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 21, + "bbox_fs": [ + 106, + 667, + 183, + 680 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 171, + 682, + 439, + 732 + ], + "lines": [ + { + "bbox": [ + 171, + 682, + 439, + 732 + ], + "spans": [ + { + "bbox": [ + 171, + 682, + 439, + 732 + ], + "score": 0.93, + "content": "\\begin{array} { r l } & { - \\varphi _ { n + 1 , k } ( p ^ { * } ) = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - y _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , B ( z ^ { * } ) - B ( x _ { i } ^ { k } ) \\rangle + \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle } \\\\ & { \\qquad \\geq \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle , } \\end{array}", + "type": "interline_equation", + "image_path": "6a4e5417f7938f29831e57985545a264816ab553e42ce83944e5870df2859abe.jpg" + } + ] + } + ], + "index": 23, + "virtual_lines": [ + { + "bbox": [ + 171, + 682, + 439, + 698.6666666666666 + ], + "spans": [], + "index": 22 + }, + { + "bbox": [ + 171, + 698.6666666666666, + 439, + 715.3333333333333 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 171, + 715.3333333333333, + 439, + 731.9999999999999 + ], + "spans": [], + "index": 24 + } + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 82, + 504, + 105 + ], + "lines": [ + { + "bbox": [ + 104, + 81, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 505, + 96 + ], + "score": 1.0, + "content": "where the second equality uses line 8 of the algorithm and the inequality follows from the monotonicity", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 92, + 131, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 92, + 117, + 105 + ], + "score": 1.0, + "content": "of", + "type": "text" + }, + { + "bbox": [ + 118, + 94, + 126, + 104 + ], + "score": 0.83, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 127, + 92, + 131, + 105 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "text", + "bbox": [ + 106, + 110, + 235, + 122 + ], + "lines": [ + { + "bbox": [ + 106, + 110, + 235, + 124 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 235, + 124 + ], + "score": 1.0, + "content": "Combining (39) and (40) yields", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2 + }, + { + "type": "interline_equation", + "bbox": [ + 111, + 122, + 506, + 189 + ], + "lines": [ + { + "bbox": [ + 111, + 122, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 111, + 122, + 506, + 189 + ], + "score": 0.93, + "content": "\\begin{array} { r l } & { \\circ _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } ) \\geq \\rho _ { k } ( 1 - \\rho _ { k } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } + \\rho _ { k } ( 1 - 2 \\rho _ { k } L ) \\langle \\epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad + \\langle z ^ { k } - x _ { n + 1 } ^ { k } , e ^ { k } \\rangle + \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle - \\rho _ { k } ^ { 2 } L \\| \\epsilon ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad = \\rho _ { k } ( 1 - \\rho _ { k } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } L \\| \\epsilon ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad + \\rho _ { k } ( 1 - 2 \\rho _ { k } L ) \\langle \\epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle + \\langle z ^ { k } - z ^ { * } , e ^ { k } \\rangle . \\qquad ( 4 1 ) } \\end{array}", + "type": "interline_equation", + "image_path": "2cef8eedbcd0916382969d399b818e62dc0ebe3405e9ef9217ea55ea2991d83d.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 111, + 122, + 506, + 144.33333333333334 + ], + "spans": [], + "index": 3 + }, + { + "bbox": [ + 111, + 144.33333333333334, + 506, + 166.66666666666669 + ], + "spans": [], + "index": 4 + }, + { + "bbox": [ + 111, + 166.66666666666669, + 506, + 189.00000000000003 + ], + "spans": [], + "index": 5 + } + ] + }, + { + "type": "text", + "bbox": [ + 111, + 193, + 391, + 205 + ], + "lines": [ + { + "bbox": [ + 109, + 192, + 391, + 207 + ], + "spans": [ + { + "bbox": [ + 109, + 192, + 286, + 207 + ], + "score": 1.0, + "content": "Now, if we take expectations conditioned on", + "type": "text" + }, + { + "bbox": [ + 286, + 194, + 299, + 205 + ], + "score": 0.89, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 299, + 192, + 391, + 207 + ], + "score": 1.0, + "content": "and use (9), we obtain", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6 + }, + { + "type": "interline_equation", + "bbox": [ + 199, + 206, + 410, + 222 + ], + "lines": [ + { + "bbox": [ + 199, + 206, + 410, + 222 + ], + "spans": [ + { + "bbox": [ + 199, + 206, + 410, + 222 + ], + "score": 0.86, + "content": "{ \\mathbb E } \\big [ \\langle z ^ { k } - z ^ { * } , e ^ { k } \\rangle \\bigm | \\mathcal F _ { k } \\big ] = \\langle z ^ { k } - z ^ { * } , { \\mathbb E } [ e ^ { k } | \\mathcal F _ { k } ] \\rangle = 0 .", + "type": "interline_equation", + "image_path": "a109b0dd57cd47be02010d6f26dcc8ed46492611dc1ba10ccc11675e82d12b9d.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 199, + 206, + 410, + 222 + ], + "spans": [], + "index": 7 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 222, + 207, + 234 + ], + "lines": [ + { + "bbox": [ + 106, + 221, + 207, + 236 + ], + "spans": [ + { + "bbox": [ + 106, + 221, + 207, + 236 + ], + "score": 1.0, + "content": "Similarly, (9) also yields", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 8 + }, + { + "type": "interline_equation", + "bbox": [ + 171, + 235, + 438, + 251 + ], + "lines": [ + { + "bbox": [ + 171, + 235, + 438, + 251 + ], + "spans": [ + { + "bbox": [ + 171, + 235, + 438, + 251 + ], + "score": 0.87, + "content": "\\begin{array} { r } { \\mathbb { E } \\big [ \\langle \\epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle \\big | \\mathcal { F } _ { k } \\big ] = \\langle \\mathbb { E } [ \\epsilon ^ { k } | \\mathcal { F } _ { k } ] , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle = 0 . } \\end{array}", + "type": "interline_equation", + "image_path": "a524547846e253ac644a2637a2aa66698e5bd1ed9aa153aa81a39c224fa4c1e5.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 171, + 235, + 438, + 251 + ], + "spans": [], + "index": 9 + } + ] + }, + { + "type": "text", + "bbox": [ + 113, + 251, + 360, + 263 + ], + "lines": [ + { + "bbox": [ + 111, + 249, + 362, + 265 + ], + "spans": [ + { + "bbox": [ + 111, + 249, + 362, + 265 + ], + "score": 1.0, + "content": "hus, using (42) and (43) and taking expectations of (41) yields", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 10 + }, + { + "type": "interline_equation", + "bbox": [ + 116, + 262, + 494, + 298 + ], + "lines": [ + { + "bbox": [ + 116, + 262, + 494, + 298 + ], + "spans": [ + { + "bbox": [ + 116, + 262, + 494, + 298 + ], + "score": 0.92, + "content": "\\begin{array} { r l } & { \\mathbb { E } [ \\varphi _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } ) \\mid \\mathcal { F } _ { k } ] \\ge \\rho _ { k } ( 1 - \\rho _ { k } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } L \\mathbb { E } [ \\| \\epsilon ^ { k } \\| ^ { 2 } \\vert \\mathcal { F } _ { k } ] } \\\\ & { \\qquad \\ge \\rho _ { k } ( 1 - \\bar { \\rho } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } N L ( 1 + \\| B ( z ^ { k } ) \\| ^ { 2 } ) , } \\end{array}", + "type": "interline_equation", + "image_path": "fee85ee1b4f440fe08548cae6b0960b74f1d34127c39931c49651b80ddc21aad.jpg" + } + ] + } + ], + "index": 12, + "virtual_lines": [ + { + "bbox": [ + 116, + 262, + 494, + 274.0 + ], + "spans": [], + "index": 11 + }, + { + "bbox": [ + 116, + 274.0, + 494, + 286.0 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 116, + 286.0, + 494, + 298.0 + ], + "spans": [], + "index": 13 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 308, + 505, + 330 + ], + "lines": [ + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "score": 1.0, + "content": "where in the second inequality we used (12) and the noise variance bound (10). Recall from (12) that", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 318, + 159, + 331 + ], + "spans": [ + { + "bbox": [ + 106, + 319, + 154, + 331 + ], + "score": 0.9, + "content": "1 - \\overline { { \\rho } } L > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 155, + 318, + 159, + 331 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 14.5 + }, + { + "type": "text", + "bbox": [ + 106, + 336, + 193, + 347 + ], + "lines": [ + { + "bbox": [ + 106, + 335, + 194, + 348 + ], + "spans": [ + { + "bbox": [ + 106, + 335, + 194, + 348 + ], + "score": 1.0, + "content": "Next, we remark that", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "interline_equation", + "bbox": [ + 149, + 347, + 462, + 380 + ], + "lines": [ + { + "bbox": [ + 149, + 347, + 462, + 380 + ], + "spans": [ + { + "bbox": [ + 149, + 347, + 462, + 380 + ], + "score": 0.88, + "content": "\\begin{array} { r l } & { \\| B ( z ^ { k } ) \\| ^ { 2 } = \\| B ( z ^ { k } ) - B ( z ^ { * } ) + B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 L ^ { 2 } \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } \\leq 2 L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "908750830336ba4611314739dfeef10958b281d08ec14b4328ed49b40e2d4aa7.jpg" + } + ] + } + ], + "index": 18, + "virtual_lines": [ + { + "bbox": [ + 149, + 347, + 462, + 358.0 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 149, + 358.0, + 462, + 369.0 + ], + "spans": [], + "index": 18 + }, + { + "bbox": [ + 149, + 369.0, + 462, + 380.0 + ], + "spans": [], + "index": 19 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 380, + 279, + 392 + ], + "lines": [ + { + "bbox": [ + 106, + 379, + 279, + 393 + ], + "spans": [ + { + "bbox": [ + 106, + 379, + 279, + 393 + ], + "score": 1.0, + "content": "Substituting this inequality into (44) yields", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20 + }, + { + "type": "interline_equation", + "bbox": [ + 117, + 391, + 477, + 426 + ], + "lines": [ + { + "bbox": [ + 117, + 391, + 477, + 426 + ], + "spans": [ + { + "bbox": [ + 117, + 391, + 477, + 426 + ], + "score": 0.9, + "content": "\\begin{array} { r l } & { \\mathbb { E } [ \\varphi _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] \\geq \\rho _ { k } ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad - 2 \\rho _ { k } ^ { 2 } N L ^ { 3 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } N L ( 1 + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } ) . } \\end{array}", + "type": "interline_equation", + "image_path": "c185e6b0a8b9cd53f00cbcc89d0f4e975fa5fb7ce030633f594ce590b87d0e11.jpg" + } + ] + } + ], + "index": 22, + "virtual_lines": [ + { + "bbox": [ + 117, + 391, + 477, + 402.6666666666667 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 117, + 402.6666666666667, + 477, + 414.33333333333337 + ], + "spans": [], + "index": 22 + }, + { + "bbox": [ + 117, + 414.33333333333337, + 477, + 426.00000000000006 + ], + "spans": [], + "index": 23 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 430, + 492, + 443 + ], + "lines": [ + { + "bbox": [ + 105, + 429, + 494, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 429, + 252, + 444 + ], + "score": 1.0, + "content": "Finalizing the lower bound on the", + "type": "text" + }, + { + "bbox": [ + 252, + 432, + 264, + 442 + ], + "score": 0.84, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 429, + 372, + 444 + ], + "score": 1.0, + "content": "-gap Summing (37) over", + "type": "text" + }, + { + "bbox": [ + 372, + 431, + 406, + 441 + ], + "score": 0.9, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 406, + 429, + 494, + 444 + ], + "score": 1.0, + "content": "and using (45) yields", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24 + }, + { + "type": "interline_equation", + "bbox": [ + 126, + 443, + 484, + 545 + ], + "lines": [ + { + "bbox": [ + 126, + 443, + 484, + 545 + ], + "spans": [ + { + "bbox": [ + 126, + 443, + 484, + 545 + ], + "score": 0.95, + "content": "\\begin{array} { r l r } { { \\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] = \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } [ \\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] } } \\\\ & { } & { \\geq \\frac { 7 } { 2 } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } } \\\\ & { } & { + \\rho _ { k } ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - 2 \\rho _ { k } ^ { 2 } N L ^ { 3 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } } \\\\ & { } & { - \\rho _ { k } ^ { 2 } N L ( 1 + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } ) . } \\end{array}", + "type": "interline_equation", + "image_path": "9b68b90678ff9366588df480aa47f8d6e3c70a5dbfcde6d18ba165fc27734677.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 126, + 443, + 484, + 477.0 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 126, + 477.0, + 484, + 511.0 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 126, + 511.0, + 484, + 545.0 + ], + "spans": [], + "index": 27 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 552, + 387, + 565 + ], + "lines": [ + { + "bbox": [ + 105, + 552, + 388, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 388, + 567 + ], + "score": 1.0, + "content": "C.7 ESTABLISHING STOCHASTIC QUASI-FEJER MONOTONICITY", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28 + }, + { + "type": "text", + "bbox": [ + 106, + 574, + 180, + 585 + ], + "lines": [ + { + "bbox": [ + 105, + 572, + 181, + 587 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 181, + 587 + ], + "score": 1.0, + "content": "Returning to (35),", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 29 + }, + { + "type": "text", + "bbox": [ + 109, + 587, + 488, + 614 + ], + "lines": [ + { + "bbox": [ + 123, + 587, + 486, + 602 + ], + "spans": [ + { + "bbox": [ + 123, + 587, + 486, + 602 + ], + "score": 0.87, + "content": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } \\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] + C _ { 2 } \\alpha _ { k } ^ { 2 } , } \\end{array}", + "type": "inline_equation", + "image_path": "877b9d10207f6d0171702fd8bbaf328e500b5e2cc0265b281cb989dfce63b177.jpg" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 601, + 440, + 615 + ], + "spans": [ + { + "bbox": [ + 106, + 601, + 440, + 615 + ], + "score": 1.0, + "content": "we may now substitute (46) for the expectation on the right-hand side. First, define", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 30.5 + }, + { + "type": "interline_equation", + "bbox": [ + 144, + 615, + 466, + 648 + ], + "lines": [ + { + "bbox": [ + 144, + 615, + 466, + 648 + ], + "spans": [ + { + "bbox": [ + 144, + 615, + 466, + 648 + ], + "score": 0.92, + "content": "T _ { k } \\doteq \\frac { \\tau } { \\overline { { \\rho } } } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\overline { { \\rho } } \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } ,", + "type": "interline_equation", + "image_path": "11aa682640a3403534593a006f13059473eb882f9691f5197ad2881d9c045666.jpg" + } + ] + } + ], + "index": 33, + "virtual_lines": [ + { + "bbox": [ + 144, + 615, + 466, + 626.0 + ], + "spans": [], + "index": 32 + }, + { + "bbox": [ + 144, + 626.0, + 466, + 637.0 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 144, + 637.0, + 466, + 648.0 + ], + "spans": [], + "index": 34 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 649, + 284, + 660 + ], + "lines": [ + { + "bbox": [ + 106, + 648, + 285, + 662 + ], + "spans": [ + { + "bbox": [ + 106, + 648, + 285, + 662 + ], + "score": 1.0, + "content": "after which we may use (46) in (35) to yield", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 35 + }, + { + "type": "interline_equation", + "bbox": [ + 114, + 660, + 480, + 676 + ], + "lines": [ + { + "bbox": [ + 114, + 660, + 480, + 676 + ], + "spans": [ + { + "bbox": [ + 114, + 660, + 480, + 676 + ], + "score": 0.88, + "content": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le \\big ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } + C _ { 3 } \\alpha _ { k } \\rho _ { k } ^ { 2 } \\big ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha _ { k } \\rho _ { k } T _ { k } + C _ { 2 } \\alpha _ { k } ^ { 2 } + C _ { 4 } \\alpha _ { k } \\rho _ { k } ^ { 2 } } \\end{array}", + "type": "interline_equation", + "image_path": "51f6e4ea6827b0fab220fa09d144f61f5dc0d4e9dd17ff0cfa53eada7217961b.jpg" + } + ] + } + ], + "index": 36, + "virtual_lines": [ + { + "bbox": [ + 114, + 660, + 480, + 676 + ], + "spans": [], + "index": 36 + } + ] + }, + { + "type": "text", + "bbox": [ + 116, + 677, + 345, + 689 + ], + "lines": [ + { + "bbox": [ + 114, + 676, + 346, + 690 + ], + "spans": [ + { + "bbox": [ + 114, + 676, + 133, + 690 + ], + "score": 1.0, + "content": "here", + "type": "text" + }, + { + "bbox": [ + 133, + 678, + 145, + 688 + ], + "score": 0.88, + "content": "C _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 146, + 676, + 164, + 690 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 164, + 677, + 177, + 688 + ], + "score": 0.88, + "content": "C _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 177, + 676, + 346, + 690 + ], + "score": 1.0, + "content": "are defined as before in (33) and (34) and", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 37 + }, + { + "type": "interline_equation", + "bbox": [ + 245, + 689, + 366, + 720 + ], + "lines": [ + { + "bbox": [ + 245, + 689, + 366, + 720 + ], + "spans": [ + { + "bbox": [ + 245, + 689, + 366, + 720 + ], + "score": 0.89, + "content": "\\begin{array} { l } { C _ { 3 } = 4 N L ^ { 3 } } \\\\ { C _ { 4 } = 2 N L ( 1 + 2 \\| B ( z ^ { \\ast } ) \\| ^ { 2 } ) . } \\end{array}", + "type": "interline_equation", + "image_path": "8c1a47741607a1b552df795163176d43cc5e21fb70ac1f4a33110458fdcfdf26.jpg" + } + ] + } + ], + "index": 38.5, + "virtual_lines": [ + { + "bbox": [ + 245, + 689, + 366, + 704.5 + ], + "spans": [], + "index": 38 + }, + { + "bbox": [ + 245, + 704.5, + 366, + 720.0 + ], + "spans": [], + "index": 39 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 720, + 261, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 720, + 262, + 734 + ], + "spans": [ + { + "bbox": [ + 106, + 720, + 262, + 734 + ], + "score": 1.0, + "content": "This completes the proof of Lemma 3.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 40 + } + ], + "page_idx": 21, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 15, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 82, + 504, + 105 + ], + "lines": [ + { + "bbox": [ + 104, + 81, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 505, + 96 + ], + "score": 1.0, + "content": "where the second equality uses line 8 of the algorithm and the inequality follows from the monotonicity", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 92, + 131, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 92, + 117, + 105 + ], + "score": 1.0, + "content": "of", + "type": "text" + }, + { + "bbox": [ + 118, + 94, + 126, + 104 + ], + "score": 0.83, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 127, + 92, + 131, + 105 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5, + "bbox_fs": [ + 104, + 81, + 505, + 105 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 110, + 235, + 122 + ], + "lines": [ + { + "bbox": [ + 106, + 110, + 235, + 124 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 235, + 124 + ], + "score": 1.0, + "content": "Combining (39) and (40) yields", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2, + "bbox_fs": [ + 106, + 110, + 235, + 124 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 111, + 122, + 506, + 189 + ], + "lines": [ + { + "bbox": [ + 111, + 122, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 111, + 122, + 506, + 189 + ], + "score": 0.93, + "content": "\\begin{array} { r l } & { \\circ _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } ) \\geq \\rho _ { k } ( 1 - \\rho _ { k } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } + \\rho _ { k } ( 1 - 2 \\rho _ { k } L ) \\langle \\epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad + \\langle z ^ { k } - x _ { n + 1 } ^ { k } , e ^ { k } \\rangle + \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle - \\rho _ { k } ^ { 2 } L \\| \\epsilon ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad = \\rho _ { k } ( 1 - \\rho _ { k } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } L \\| \\epsilon ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad + \\rho _ { k } ( 1 - 2 \\rho _ { k } L ) \\langle \\epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle + \\langle z ^ { k } - z ^ { * } , e ^ { k } \\rangle . \\qquad ( 4 1 ) } \\end{array}", + "type": "interline_equation", + "image_path": "2cef8eedbcd0916382969d399b818e62dc0ebe3405e9ef9217ea55ea2991d83d.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 111, + 122, + 506, + 144.33333333333334 + ], + "spans": [], + "index": 3 + }, + { + "bbox": [ + 111, + 144.33333333333334, + 506, + 166.66666666666669 + ], + "spans": [], + "index": 4 + }, + { + "bbox": [ + 111, + 166.66666666666669, + 506, + 189.00000000000003 + ], + "spans": [], + "index": 5 + } + ] + }, + { + "type": "text", + "bbox": [ + 111, + 193, + 391, + 205 + ], + "lines": [ + { + "bbox": [ + 109, + 192, + 391, + 207 + ], + "spans": [ + { + "bbox": [ + 109, + 192, + 286, + 207 + ], + "score": 1.0, + "content": "Now, if we take expectations conditioned on", + "type": "text" + }, + { + "bbox": [ + 286, + 194, + 299, + 205 + ], + "score": 0.89, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 299, + 192, + 391, + 207 + ], + "score": 1.0, + "content": "and use (9), we obtain", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6, + "bbox_fs": [ + 109, + 192, + 391, + 207 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 199, + 206, + 410, + 222 + ], + "lines": [ + { + "bbox": [ + 199, + 206, + 410, + 222 + ], + "spans": [ + { + "bbox": [ + 199, + 206, + 410, + 222 + ], + "score": 0.86, + "content": "{ \\mathbb E } \\big [ \\langle z ^ { k } - z ^ { * } , e ^ { k } \\rangle \\bigm | \\mathcal F _ { k } \\big ] = \\langle z ^ { k } - z ^ { * } , { \\mathbb E } [ e ^ { k } | \\mathcal F _ { k } ] \\rangle = 0 .", + "type": "interline_equation", + "image_path": "a109b0dd57cd47be02010d6f26dcc8ed46492611dc1ba10ccc11675e82d12b9d.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 199, + 206, + 410, + 222 + ], + "spans": [], + "index": 7 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 222, + 207, + 234 + ], + "lines": [ + { + "bbox": [ + 106, + 221, + 207, + 236 + ], + "spans": [ + { + "bbox": [ + 106, + 221, + 207, + 236 + ], + "score": 1.0, + "content": "Similarly, (9) also yields", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 8, + "bbox_fs": [ + 106, + 221, + 207, + 236 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 171, + 235, + 438, + 251 + ], + "lines": [ + { + "bbox": [ + 171, + 235, + 438, + 251 + ], + "spans": [ + { + "bbox": [ + 171, + 235, + 438, + 251 + ], + "score": 0.87, + "content": "\\begin{array} { r } { \\mathbb { E } \\big [ \\langle \\epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle \\big | \\mathcal { F } _ { k } \\big ] = \\langle \\mathbb { E } [ \\epsilon ^ { k } | \\mathcal { F } _ { k } ] , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle = 0 . } \\end{array}", + "type": "interline_equation", + "image_path": "a524547846e253ac644a2637a2aa66698e5bd1ed9aa153aa81a39c224fa4c1e5.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 171, + 235, + 438, + 251 + ], + "spans": [], + "index": 9 + } + ] + }, + { + "type": "text", + "bbox": [ + 113, + 251, + 360, + 263 + ], + "lines": [ + { + "bbox": [ + 111, + 249, + 362, + 265 + ], + "spans": [ + { + "bbox": [ + 111, + 249, + 362, + 265 + ], + "score": 1.0, + "content": "hus, using (42) and (43) and taking expectations of (41) yields", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 10, + "bbox_fs": [ + 111, + 249, + 362, + 265 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 116, + 262, + 494, + 298 + ], + "lines": [ + { + "bbox": [ + 116, + 262, + 494, + 298 + ], + "spans": [ + { + "bbox": [ + 116, + 262, + 494, + 298 + ], + "score": 0.92, + "content": "\\begin{array} { r l } & { \\mathbb { E } [ \\varphi _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } ) \\mid \\mathcal { F } _ { k } ] \\ge \\rho _ { k } ( 1 - \\rho _ { k } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } L \\mathbb { E } [ \\| \\epsilon ^ { k } \\| ^ { 2 } \\vert \\mathcal { F } _ { k } ] } \\\\ & { \\qquad \\ge \\rho _ { k } ( 1 - \\bar { \\rho } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } N L ( 1 + \\| B ( z ^ { k } ) \\| ^ { 2 } ) , } \\end{array}", + "type": "interline_equation", + "image_path": "fee85ee1b4f440fe08548cae6b0960b74f1d34127c39931c49651b80ddc21aad.jpg" + } + ] + } + ], + "index": 12, + "virtual_lines": [ + { + "bbox": [ + 116, + 262, + 494, + 274.0 + ], + "spans": [], + "index": 11 + }, + { + "bbox": [ + 116, + 274.0, + 494, + 286.0 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 116, + 286.0, + 494, + 298.0 + ], + "spans": [], + "index": 13 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 308, + 505, + 330 + ], + "lines": [ + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "score": 1.0, + "content": "where in the second inequality we used (12) and the noise variance bound (10). Recall from (12) that", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 318, + 159, + 331 + ], + "spans": [ + { + "bbox": [ + 106, + 319, + 154, + 331 + ], + "score": 0.9, + "content": "1 - \\overline { { \\rho } } L > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 155, + 318, + 159, + 331 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 14.5, + "bbox_fs": [ + 105, + 307, + 506, + 331 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 336, + 193, + 347 + ], + "lines": [ + { + "bbox": [ + 106, + 335, + 194, + 348 + ], + "spans": [ + { + "bbox": [ + 106, + 335, + 194, + 348 + ], + "score": 1.0, + "content": "Next, we remark that", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16, + "bbox_fs": [ + 106, + 335, + 194, + 348 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 149, + 347, + 462, + 380 + ], + "lines": [ + { + "bbox": [ + 149, + 347, + 462, + 380 + ], + "spans": [ + { + "bbox": [ + 149, + 347, + 462, + 380 + ], + "score": 0.88, + "content": "\\begin{array} { r l } & { \\| B ( z ^ { k } ) \\| ^ { 2 } = \\| B ( z ^ { k } ) - B ( z ^ { * } ) + B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 L ^ { 2 } \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } \\leq 2 L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "908750830336ba4611314739dfeef10958b281d08ec14b4328ed49b40e2d4aa7.jpg" + } + ] + } + ], + "index": 18, + "virtual_lines": [ + { + "bbox": [ + 149, + 347, + 462, + 358.0 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 149, + 358.0, + 462, + 369.0 + ], + "spans": [], + "index": 18 + }, + { + "bbox": [ + 149, + 369.0, + 462, + 380.0 + ], + "spans": [], + "index": 19 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 380, + 279, + 392 + ], + "lines": [ + { + "bbox": [ + 106, + 379, + 279, + 393 + ], + "spans": [ + { + "bbox": [ + 106, + 379, + 279, + 393 + ], + "score": 1.0, + "content": "Substituting this inequality into (44) yields", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20, + "bbox_fs": [ + 106, + 379, + 279, + 393 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 117, + 391, + 477, + 426 + ], + "lines": [ + { + "bbox": [ + 117, + 391, + 477, + 426 + ], + "spans": [ + { + "bbox": [ + 117, + 391, + 477, + 426 + ], + "score": 0.9, + "content": "\\begin{array} { r l } & { \\mathbb { E } [ \\varphi _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] \\geq \\rho _ { k } ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad - 2 \\rho _ { k } ^ { 2 } N L ^ { 3 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } N L ( 1 + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } ) . } \\end{array}", + "type": "interline_equation", + "image_path": "c185e6b0a8b9cd53f00cbcc89d0f4e975fa5fb7ce030633f594ce590b87d0e11.jpg" + } + ] + } + ], + "index": 22, + "virtual_lines": [ + { + "bbox": [ + 117, + 391, + 477, + 402.6666666666667 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 117, + 402.6666666666667, + 477, + 414.33333333333337 + ], + "spans": [], + "index": 22 + }, + { + "bbox": [ + 117, + 414.33333333333337, + 477, + 426.00000000000006 + ], + "spans": [], + "index": 23 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 430, + 492, + 443 + ], + "lines": [ + { + "bbox": [ + 105, + 429, + 494, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 429, + 252, + 444 + ], + "score": 1.0, + "content": "Finalizing the lower bound on the", + "type": "text" + }, + { + "bbox": [ + 252, + 432, + 264, + 442 + ], + "score": 0.84, + "content": "\\varphi _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 429, + 372, + 444 + ], + "score": 1.0, + "content": "-gap Summing (37) over", + "type": "text" + }, + { + "bbox": [ + 372, + 431, + 406, + 441 + ], + "score": 0.9, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 406, + 429, + 494, + 444 + ], + "score": 1.0, + "content": "and using (45) yields", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24, + "bbox_fs": [ + 105, + 429, + 494, + 444 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 126, + 443, + 484, + 545 + ], + "lines": [ + { + "bbox": [ + 126, + 443, + 484, + 545 + ], + "spans": [ + { + "bbox": [ + 126, + 443, + 484, + 545 + ], + "score": 0.95, + "content": "\\begin{array} { r l r } { { \\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] = \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } [ \\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] } } \\\\ & { } & { \\geq \\frac { 7 } { 2 } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } } \\\\ & { } & { + \\rho _ { k } ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - 2 \\rho _ { k } ^ { 2 } N L ^ { 3 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } } \\\\ & { } & { - \\rho _ { k } ^ { 2 } N L ( 1 + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } ) . } \\end{array}", + "type": "interline_equation", + "image_path": "9b68b90678ff9366588df480aa47f8d6e3c70a5dbfcde6d18ba165fc27734677.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 126, + 443, + 484, + 477.0 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 126, + 477.0, + 484, + 511.0 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 126, + 511.0, + 484, + 545.0 + ], + "spans": [], + "index": 27 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 552, + 387, + 565 + ], + "lines": [ + { + "bbox": [ + 105, + 552, + 388, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 388, + 567 + ], + "score": 1.0, + "content": "C.7 ESTABLISHING STOCHASTIC QUASI-FEJER MONOTONICITY", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28, + "bbox_fs": [ + 105, + 552, + 388, + 567 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 574, + 180, + 585 + ], + "lines": [ + { + "bbox": [ + 105, + 572, + 181, + 587 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 181, + 587 + ], + "score": 1.0, + "content": "Returning to (35),", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 29, + "bbox_fs": [ + 105, + 572, + 181, + 587 + ] + }, + { + "type": "text", + "bbox": [ + 109, + 587, + 488, + 614 + ], + "lines": [ + { + "bbox": [ + 123, + 587, + 486, + 602 + ], + "spans": [ + { + "bbox": [ + 123, + 587, + 486, + 602 + ], + "score": 0.87, + "content": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } \\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] + C _ { 2 } \\alpha _ { k } ^ { 2 } , } \\end{array}", + "type": "inline_equation", + "image_path": "877b9d10207f6d0171702fd8bbaf328e500b5e2cc0265b281cb989dfce63b177.jpg" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 601, + 440, + 615 + ], + "spans": [ + { + "bbox": [ + 106, + 601, + 440, + 615 + ], + "score": 1.0, + "content": "we may now substitute (46) for the expectation on the right-hand side. First, define", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 30.5, + "bbox_fs": [ + 106, + 587, + 486, + 615 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 144, + 615, + 466, + 648 + ], + "lines": [ + { + "bbox": [ + 144, + 615, + 466, + 648 + ], + "spans": [ + { + "bbox": [ + 144, + 615, + 466, + 648 + ], + "score": 0.92, + "content": "T _ { k } \\doteq \\frac { \\tau } { \\overline { { \\rho } } } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\overline { { \\rho } } \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } ,", + "type": "interline_equation", + "image_path": "11aa682640a3403534593a006f13059473eb882f9691f5197ad2881d9c045666.jpg" + } + ] + } + ], + "index": 33, + "virtual_lines": [ + { + "bbox": [ + 144, + 615, + 466, + 626.0 + ], + "spans": [], + "index": 32 + }, + { + "bbox": [ + 144, + 626.0, + 466, + 637.0 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 144, + 637.0, + 466, + 648.0 + ], + "spans": [], + "index": 34 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 649, + 284, + 660 + ], + "lines": [ + { + "bbox": [ + 106, + 648, + 285, + 662 + ], + "spans": [ + { + "bbox": [ + 106, + 648, + 285, + 662 + ], + "score": 1.0, + "content": "after which we may use (46) in (35) to yield", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 35, + "bbox_fs": [ + 106, + 648, + 285, + 662 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 114, + 660, + 480, + 676 + ], + "lines": [ + { + "bbox": [ + 114, + 660, + 480, + 676 + ], + "spans": [ + { + "bbox": [ + 114, + 660, + 480, + 676 + ], + "score": 0.88, + "content": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le \\big ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } + C _ { 3 } \\alpha _ { k } \\rho _ { k } ^ { 2 } \\big ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha _ { k } \\rho _ { k } T _ { k } + C _ { 2 } \\alpha _ { k } ^ { 2 } + C _ { 4 } \\alpha _ { k } \\rho _ { k } ^ { 2 } } \\end{array}", + "type": "interline_equation", + "image_path": "51f6e4ea6827b0fab220fa09d144f61f5dc0d4e9dd17ff0cfa53eada7217961b.jpg" + } + ] + } + ], + "index": 36, + "virtual_lines": [ + { + "bbox": [ + 114, + 660, + 480, + 676 + ], + "spans": [], + "index": 36 + } + ] + }, + { + "type": "text", + "bbox": [ + 116, + 677, + 345, + 689 + ], + "lines": [ + { + "bbox": [ + 114, + 676, + 346, + 690 + ], + "spans": [ + { + "bbox": [ + 114, + 676, + 133, + 690 + ], + "score": 1.0, + "content": "here", + "type": "text" + }, + { + "bbox": [ + 133, + 678, + 145, + 688 + ], + "score": 0.88, + "content": "C _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 146, + 676, + 164, + 690 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 164, + 677, + 177, + 688 + ], + "score": 0.88, + "content": "C _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 177, + 676, + 346, + 690 + ], + "score": 1.0, + "content": "are defined as before in (33) and (34) and", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 37, + "bbox_fs": [ + 114, + 676, + 346, + 690 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 245, + 689, + 366, + 720 + ], + "lines": [ + { + "bbox": [ + 245, + 689, + 366, + 720 + ], + "spans": [ + { + "bbox": [ + 245, + 689, + 366, + 720 + ], + "score": 0.89, + "content": "\\begin{array} { l } { C _ { 3 } = 4 N L ^ { 3 } } \\\\ { C _ { 4 } = 2 N L ( 1 + 2 \\| B ( z ^ { \\ast } ) \\| ^ { 2 } ) . } \\end{array}", + "type": "interline_equation", + "image_path": "8c1a47741607a1b552df795163176d43cc5e21fb70ac1f4a33110458fdcfdf26.jpg" + } + ] + } + ], + "index": 38.5, + "virtual_lines": [ + { + "bbox": [ + 245, + 689, + 366, + 704.5 + ], + "spans": [], + "index": 38 + }, + { + "bbox": [ + 245, + 704.5, + 366, + 720.0 + ], + "spans": [], + "index": 39 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 720, + 261, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 720, + 262, + 734 + ], + "spans": [ + { + "bbox": [ + 106, + 720, + 262, + 734 + ], + "score": 1.0, + "content": "This completes the proof of Lemma 3.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 40, + "bbox_fs": [ + 106, + 720, + 262, + 734 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 108, + 82, + 246, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 247, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 247, + 95 + ], + "score": 1.0, + "content": "C.8 A CONVERGENCE LEMMA", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 103, + 504, + 137 + ], + "lines": [ + { + "bbox": [ + 105, + 102, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 506, + 117 + ], + "score": 1.0, + "content": "Before establishing almost-sure convergence, we need the following lemma to derive convergence", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 115, + 505, + 126 + ], + "spans": [ + { + "bbox": [ + 106, + 115, + 252, + 126 + ], + "score": 1.0, + "content": "of the iterates from convergence of", + "type": "text" + }, + { + "bbox": [ + 252, + 115, + 264, + 126 + ], + "score": 0.88, + "content": "T _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 115, + 505, + 126 + ], + "score": 1.0, + "content": "defined above. Note that a more elaborate result would be", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 125, + 274, + 138 + ], + "spans": [ + { + "bbox": [ + 105, + 125, + 274, + 138 + ], + "score": 1.0, + "content": "needed in an infinite-dimensional setting.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 2 + }, + { + "type": "text", + "bbox": [ + 105, + 140, + 503, + 168 + ], + "lines": [ + { + "bbox": [ + 104, + 136, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 104, + 136, + 275, + 158 + ], + "score": 1.0, + "content": "Lemma 4. For deterministic sequences", + "type": "text" + }, + { + "bbox": [ + 275, + 140, + 335, + 153 + ], + "score": 0.86, + "content": "z ^ { k } \\in \\mathbb { R } ^ { ( n + 1 ) d }", + "type": "inline_equation" + }, + { + "bbox": [ + 335, + 136, + 339, + 158 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 339, + 140, + 408, + 154 + ], + "score": 0.9, + "content": "\\{ ( w _ { i } ^ { k } ) _ { i = 1 } ^ { n + 1 } \\} \\ \\in \\ { \\mathcal { P } }", + "type": "inline_equation" + }, + { + "bbox": [ + 409, + 136, + 433, + 158 + ], + "score": 1.0, + "content": ", and", + "type": "text" + }, + { + "bbox": [ + 433, + 140, + 505, + 155 + ], + "score": 0.91, + "content": "\\{ ( x _ { i } ^ { k } , y _ { i } ^ { k } ) _ { i = 1 } ^ { n + 1 } \\} \\in", + "type": "inline_equation" + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 146, + 374, + 175 + ], + "spans": [ + { + "bbox": [ + 107, + 153, + 144, + 165 + ], + "score": 0.88, + "content": "\\mathbb { R } ^ { 2 ( n + 1 ) d }", + "type": "inline_equation" + }, + { + "bbox": [ + 145, + 146, + 201, + 175 + ], + "score": 1.0, + "content": ", suppose that", + "type": "text" + }, + { + "bbox": [ + 201, + 154, + 253, + 167 + ], + "score": 0.96, + "content": "y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 254, + 146, + 306, + 175 + ], + "score": 1.0, + "content": "for i ∈ 1..n,", + "type": "text" + }, + { + "bbox": [ + 307, + 154, + 366, + 168 + ], + "score": 0.67, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 366, + 146, + 374, + 175 + ], + "score": 1.0, + "content": "i=,", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4.5 + }, + { + "type": "interline_equation", + "bbox": [ + 164, + 172, + 446, + 206 + ], + "lines": [ + { + "bbox": [ + 164, + 172, + 446, + 206 + ], + "spans": [ + { + "bbox": [ + 164, + 172, + 446, + 206 + ], + "score": 0.92, + "content": "\\xi _ { 1 } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\xi _ { 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\xi _ { 3 } \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } \\to 0", + "type": "interline_equation", + "image_path": "e15e1a65faa6eed30bf8fd1c364850ce6885980aea45d59c103767f4fadedcc1.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 164, + 172, + 446, + 183.33333333333334 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 164, + 183.33333333333334, + 446, + 194.66666666666669 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 164, + 194.66666666666669, + 446, + 206.00000000000003 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 212, + 493, + 227 + ], + "lines": [ + { + "bbox": [ + 103, + 210, + 491, + 228 + ], + "spans": [ + { + "bbox": [ + 103, + 210, + 152, + 228 + ], + "score": 1.0, + "content": "for scalars", + "type": "text" + }, + { + "bbox": [ + 152, + 213, + 207, + 225 + ], + "score": 0.92, + "content": "\\xi _ { 1 } , \\xi _ { 2 } , \\xi _ { 3 } > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 207, + 210, + 227, + 228 + ], + "score": 1.0, + "content": ", and", + "type": "text" + }, + { + "bbox": [ + 227, + 212, + 438, + 226 + ], + "score": 0.87, + "content": "p ^ { k } \\doteq ( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\to \\hat { p } \\doteq ( \\hat { z } , \\hat { w } _ { 1 } , \\ldots , \\hat { w } _ { n + 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 439, + 210, + 466, + 228 + ], + "score": 1.0, + "content": ". Then", + "type": "text" + }, + { + "bbox": [ + 466, + 213, + 491, + 225 + ], + "score": 0.9, + "content": "\\hat { p } \\in \\mathcal S", + "type": "inline_equation" + } + ], + "index": 9 + } + ], + "index": 9 + }, + { + "type": "text", + "bbox": [ + 106, + 239, + 505, + 289 + ], + "lines": [ + { + "bbox": [ + 105, + 240, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 240, + 172, + 255 + ], + "score": 1.0, + "content": "Proof. Fix any", + "type": "text" + }, + { + "bbox": [ + 172, + 241, + 236, + 254 + ], + "score": 0.93, + "content": "i \\in \\{ 1 , \\ldots , n \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 236, + 240, + 271, + 255 + ], + "score": 1.0, + "content": ". Since", + "type": "text" + }, + { + "bbox": [ + 271, + 241, + 343, + 253 + ], + "score": 0.92, + "content": "\\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 344, + 240, + 398, + 255 + ], + "score": 1.0, + "content": "by (50) and", + "type": "text" + }, + { + "bbox": [ + 399, + 241, + 443, + 253 + ], + "score": 0.92, + "content": "w _ { i } ^ { k } \\hat { w } _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 443, + 240, + 506, + 255 + ], + "score": 1.0, + "content": ", we also have", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 251, + 503, + 268 + ], + "spans": [ + { + "bbox": [ + 107, + 253, + 144, + 266 + ], + "score": 0.92, + "content": "y _ { i } ^ { k } \\hat { w } _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 144, + 251, + 280, + 268 + ], + "score": 1.0, + "content": ". Similarly, (50) also implies that", + "type": "text" + }, + { + "bbox": [ + 280, + 254, + 344, + 266 + ], + "score": 0.91, + "content": "\\lVert z ^ { k } - x _ { i } ^ { k } \\rVert \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 344, + 251, + 382, + 268 + ], + "score": 1.0, + "content": ", so from", + "type": "text" + }, + { + "bbox": [ + 382, + 253, + 414, + 264 + ], + "score": 0.91, + "content": "z ^ { k } \\hat { z }", + "type": "inline_equation" + }, + { + "bbox": [ + 415, + 251, + 470, + 268 + ], + "score": 1.0, + "content": "we also have", + "type": "text" + }, + { + "bbox": [ + 470, + 253, + 503, + 266 + ], + "score": 0.93, + "content": "x _ { i } ^ { k } \\hat { z }", + "type": "inline_equation" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 264, + 506, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 264, + 132, + 279 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 132, + 265, + 186, + 278 + ], + "score": 0.92, + "content": "y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 186, + 264, + 205, + 279 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 206, + 265, + 283, + 278 + ], + "score": 0.92, + "content": "( x _ { i } ^ { k } , y _ { i } ^ { k } ) ( \\hat { z } , \\hat { w } _ { i } )", + "type": "inline_equation" + }, + { + "bbox": [ + 284, + 264, + 506, + 279 + ], + "score": 1.0, + "content": ", (Bauschke & Combettes, 2017, Prop. 20.37) implies", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 276, + 424, + 290 + ], + "spans": [ + { + "bbox": [ + 107, + 277, + 154, + 289 + ], + "score": 0.92, + "content": "\\hat { w } _ { i } \\in A _ { i } ( \\hat { z } )", + "type": "inline_equation" + }, + { + "bbox": [ + 154, + 276, + 183, + 290 + ], + "score": 1.0, + "content": ". Since", + "type": "text" + }, + { + "bbox": [ + 183, + 278, + 188, + 287 + ], + "score": 0.74, + "content": "i", + "type": "inline_equation" + }, + { + "bbox": [ + 188, + 276, + 385, + 290 + ], + "score": 1.0, + "content": "was arbitrary, the preceding conclusions hold for", + "type": "text" + }, + { + "bbox": [ + 386, + 277, + 419, + 288 + ], + "score": 0.88, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 419, + 276, + 424, + 290 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 11.5 + }, + { + "type": "text", + "bbox": [ + 106, + 293, + 506, + 329 + ], + "lines": [ + { + "bbox": [ + 103, + 287, + 508, + 310 + ], + "spans": [ + { + "bbox": [ + 103, + 287, + 219, + 310 + ], + "score": 1.0, + "content": "Now, (50) also implies that", + "type": "text" + }, + { + "bbox": [ + 219, + 293, + 312, + 306 + ], + "score": 0.93, + "content": "\\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 312, + 287, + 384, + 310 + ], + "score": 1.0, + "content": ". Therefore, since", + "type": "text" + }, + { + "bbox": [ + 385, + 293, + 446, + 306 + ], + "score": 0.93, + "content": "w _ { n + 1 } ^ { k } \\to \\hat { w } _ { n + 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 447, + 287, + 508, + 310 + ], + "score": 1.0, + "content": ", we also have", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 304, + 507, + 320 + ], + "spans": [ + { + "bbox": [ + 107, + 306, + 174, + 318 + ], + "score": 0.9, + "content": "B ( z ^ { k } ) \\to \\hat { w } _ { n + 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 174, + 304, + 276, + 320 + ], + "score": 1.0, + "content": ". Much as before, since", + "type": "text" + }, + { + "bbox": [ + 276, + 306, + 383, + 318 + ], + "score": 0.91, + "content": "( z ^ { k } , B ( z ^ { k } ) ) ( \\hat { z } , \\hat { w } _ { n + 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 383, + 304, + 507, + 320 + ], + "score": 1.0, + "content": ", we may apply (Bauschke &", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 316, + 377, + 330 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 315, + 330 + ], + "score": 1.0, + "content": "Combettes, 2017, Prop. 20.37) to conclude that that", + "type": "text" + }, + { + "bbox": [ + 315, + 318, + 372, + 329 + ], + "score": 0.92, + "content": "\\hat { w } _ { n + 1 } = B ( \\hat { z } )", + "type": "inline_equation" + }, + { + "bbox": [ + 372, + 316, + 377, + 330 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 15 + }, + { + "type": "text", + "bbox": [ + 108, + 334, + 504, + 360 + ], + "lines": [ + { + "bbox": [ + 105, + 332, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 105, + 332, + 221, + 348 + ], + "score": 1.0, + "content": "Since the linear subspace", + "type": "text" + }, + { + "bbox": [ + 222, + 335, + 231, + 344 + ], + "score": 0.81, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 231, + 332, + 415, + 348 + ], + "score": 1.0, + "content": "defined in (6) must be closed, the limit", + "type": "text" + }, + { + "bbox": [ + 416, + 334, + 490, + 347 + ], + "score": 0.91, + "content": "\\left( \\hat { z } , \\hat { w } _ { 1 } , \\dots , \\hat { w } _ { n + 1 } \\right)", + "type": "inline_equation" + }, + { + "bbox": [ + 491, + 332, + 506, + 348 + ], + "score": 1.0, + "content": "of", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 340, + 367, + 366 + ], + "spans": [ + { + "bbox": [ + 107, + 346, + 218, + 360 + ], + "score": 0.93, + "content": "\\{ ( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\} \\subset \\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 340, + 263, + 366 + ], + "score": 1.0, + "content": "must be in", + "type": "text" + }, + { + "bbox": [ + 263, + 348, + 272, + 358 + ], + "score": 0.81, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 272, + 340, + 302, + 366 + ], + "score": 1.0, + "content": ", hence", + "type": "text" + }, + { + "bbox": [ + 302, + 345, + 359, + 360 + ], + "score": 0.93, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } { \\hat { w } } _ { i } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 360, + 340, + 367, + 366 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17.5 + }, + { + "type": "text", + "bbox": [ + 106, + 363, + 506, + 390 + ], + "lines": [ + { + "bbox": [ + 165, + 363, + 506, + 398 + ], + "spans": [ + { + "bbox": [ + 165, + 363, + 173, + 398 + ], + "score": 1.0, + "content": "nt .", + "type": "text" + }, + { + "bbox": [ + 173, + 364, + 271, + 376 + ], + "score": 0.91, + "content": "\\hat { p } = ( \\hat { z } , \\hat { w } _ { 1 } , \\dots , \\hat { w } _ { n + 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 271, + 363, + 308, + 398 + ], + "score": 1.0, + "content": "satisfies tions defi", + "type": "text" + }, + { + "bbox": [ + 309, + 364, + 360, + 376 + ], + "score": 0.93, + "content": "\\hat { w } _ { i } \\in A _ { i } ( \\hat { z } )", + "type": "inline_equation" + }, + { + "bbox": [ + 360, + 363, + 378, + 398 + ], + "score": 1.0, + "content": "for ship", + "type": "text" + }, + { + "bbox": [ + 378, + 365, + 415, + 375 + ], + "score": 0.71, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 416, + 363, + 421, + 398 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 421, + 364, + 482, + 376 + ], + "score": 0.91, + "content": "\\hat { w } _ { n + 1 } = B ( \\hat { z } )", + "type": "inline_equation" + }, + { + "bbox": [ + 483, + 363, + 506, + 398 + ], + "score": 1.0, + "content": ", and", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 375, + 472, + 390 + ], + "spans": [ + { + "bbox": [ + 106, + 375, + 164, + 390 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } { \\hat { w } } _ { i } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 388, + 378, + 396, + 388 + ], + "score": 0.8, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 447, + 378, + 472, + 389 + ], + "score": 0.89, + "content": "\\hat { p } \\in \\mathcal S", + "type": "inline_equation" + } + ], + "index": 19 + } + ], + "index": 19.5 + }, + { + "type": "title", + "bbox": [ + 108, + 402, + 296, + 415 + ], + "lines": [ + { + "bbox": [ + 106, + 402, + 298, + 416 + ], + "spans": [ + { + "bbox": [ + 106, + 402, + 298, + 416 + ], + "score": 1.0, + "content": "C.9 FINISHING THE PROOF OF THEOREM 1", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 21 + }, + { + "type": "text", + "bbox": [ + 106, + 423, + 506, + 458 + ], + "lines": [ + { + "bbox": [ + 106, + 423, + 506, + 438 + ], + "spans": [ + { + "bbox": [ + 106, + 423, + 134, + 438 + ], + "score": 1.0, + "content": "Given", + "type": "text" + }, + { + "bbox": [ + 134, + 423, + 190, + 437 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { k } \\alpha _ { k } ^ { 2 } < \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 423, + 213, + 438 + ], + "score": 1.0, + "content": ", and", + "type": "text" + }, + { + "bbox": [ + 214, + 423, + 275, + 437 + ], + "score": 0.92, + "content": "\\sum \\alpha _ { k } \\rho _ { k } ^ { 2 } < \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 275, + 423, + 506, + 438 + ], + "score": 1.0, + "content": ", (47) satisfies the conditions of Stochastic Quasi-Fejer", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 434, + 504, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 459, + 448 + ], + "score": 1.0, + "content": "Monotonicity as given in Lemma 2. By applying Lemma 2, we conclude that there exist", + "type": "text" + }, + { + "bbox": [ + 459, + 435, + 504, + 446 + ], + "score": 0.91, + "content": "\\Omega _ { 1 } , \\Omega _ { 2 } , \\Omega _ { 3 }", + "type": "inline_equation" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 446, + 263, + 458 + ], + "spans": [ + { + "bbox": [ + 106, + 446, + 145, + 458 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 145, + 446, + 188, + 458 + ], + "score": 0.93, + "content": "P [ \\Omega _ { i } ] = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 189, + 446, + 203, + 458 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 204, + 446, + 245, + 457 + ], + "score": 0.91, + "content": "i = { 1 , 2 , 3 }", + "type": "inline_equation" + }, + { + "bbox": [ + 246, + 446, + 263, + 458 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 23 + }, + { + "type": "text", + "bbox": [ + 131, + 467, + 199, + 479 + ], + "lines": [ + { + "bbox": [ + 129, + 465, + 199, + 480 + ], + "spans": [ + { + "bbox": [ + 129, + 465, + 169, + 480 + ], + "score": 1.0, + "content": "1. for all", + "type": "text" + }, + { + "bbox": [ + 169, + 468, + 199, + 478 + ], + "score": 0.79, + "content": "v \\in \\Omega _ { 1 }", + "type": "inline_equation" + } + ], + "index": 25 + } + ], + "index": 25 + }, + { + "type": "interline_equation", + "bbox": [ + 277, + 484, + 368, + 517 + ], + "lines": [ + { + "bbox": [ + 277, + 484, + 368, + 517 + ], + "spans": [ + { + "bbox": [ + 277, + 484, + 368, + 517 + ], + "score": 0.93, + "content": "\\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } T _ { k } ( v ) < \\infty ,", + "type": "interline_equation", + "image_path": "32b73f4fcaa1a2e08db69eefb664ad651527a0b201d7a4b8dac9d3b578f8d85d.jpg" + } + ] + } + ], + "index": 26.5, + "virtual_lines": [ + { + "bbox": [ + 277, + 484, + 368, + 500.5 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 277, + 500.5, + 368, + 517.0 + ], + "spans": [], + "index": 27 + } + ] + }, + { + "type": "text", + "bbox": [ + 130, + 525, + 505, + 539 + ], + "lines": [ + { + "bbox": [ + 128, + 525, + 507, + 540 + ], + "spans": [ + { + "bbox": [ + 128, + 525, + 168, + 540 + ], + "score": 1.0, + "content": "2. for all", + "type": "text" + }, + { + "bbox": [ + 168, + 527, + 198, + 538 + ], + "score": 0.92, + "content": "v \\in \\Omega _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 199, + 525, + 218, + 540 + ], + "score": 1.0, + "content": ", and", + "type": "text" + }, + { + "bbox": [ + 218, + 527, + 248, + 538 + ], + "score": 0.83, + "content": "p ^ { * } \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 249, + 525, + 252, + 540 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 252, + 525, + 306, + 539 + ], + "score": 0.86, + "content": "\\| p ^ { k } ( v ) - p ^ { * } \\|", + "type": "inline_equation" + }, + { + "bbox": [ + 307, + 525, + 507, + 540 + ], + "score": 1.0, + "content": "converges to a finite nonnegative random-variable,", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28 + }, + { + "type": "text", + "bbox": [ + 130, + 542, + 301, + 555 + ], + "lines": [ + { + "bbox": [ + 129, + 541, + 301, + 555 + ], + "spans": [ + { + "bbox": [ + 129, + 541, + 169, + 555 + ], + "score": 1.0, + "content": "3. for all", + "type": "text" + }, + { + "bbox": [ + 169, + 541, + 227, + 555 + ], + "score": 0.65, + "content": "v \\in \\Omega _ { 3 } , p ^ { k } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 227, + 541, + 301, + 555 + ], + "score": 1.0, + "content": "remains bounded.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 29 + }, + { + "type": "text", + "bbox": [ + 105, + 563, + 504, + 576 + ], + "lines": [ + { + "bbox": [ + 104, + 559, + 507, + 580 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 131, + 580 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 132, + 563, + 204, + 576 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } = \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 205, + 559, + 303, + 580 + ], + "score": 1.0, + "content": ", (51) implies that for all", + "type": "text" + }, + { + "bbox": [ + 303, + 564, + 333, + 575 + ], + "score": 0.92, + "content": "v \\in \\Omega _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 333, + 559, + 441, + 580 + ], + "score": 1.0, + "content": "there exists a subsequence", + "type": "text" + }, + { + "bbox": [ + 442, + 564, + 465, + 576 + ], + "score": 0.94, + "content": "q _ { k } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 465, + 559, + 507, + 580 + ], + "score": 1.0, + "content": "such that", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 30 + }, + { + "type": "interline_equation", + "bbox": [ + 280, + 582, + 331, + 596 + ], + "lines": [ + { + "bbox": [ + 280, + 582, + 331, + 596 + ], + "spans": [ + { + "bbox": [ + 280, + 582, + 331, + 596 + ], + "score": 0.91, + "content": "T _ { q _ { k } ( v ) } \\to 0 .", + "type": "interline_equation", + "image_path": "90c3ec9255f814d47099fbff42eb94da7804c7fca8841b75d2215bfbda6f3802.jpg" + } + ] + } + ], + "index": 31, + "virtual_lines": [ + { + "bbox": [ + 280, + 582, + 331, + 596 + ], + "spans": [], + "index": 31 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 608, + 506, + 657 + ], + "lines": [ + { + "bbox": [ + 105, + 608, + 506, + 622 + ], + "spans": [ + { + "bbox": [ + 105, + 608, + 122, + 622 + ], + "score": 1.0, + "content": "Let", + "type": "text" + }, + { + "bbox": [ + 122, + 609, + 204, + 621 + ], + "score": 0.92, + "content": "\\Omega ^ { \\prime } = \\Omega _ { 1 } \\cap \\Omega _ { 2 } \\cap \\Omega _ { 3 }", + "type": "inline_equation" + }, + { + "bbox": [ + 204, + 608, + 259, + 622 + ], + "score": 1.0, + "content": "and note that", + "type": "text" + }, + { + "bbox": [ + 259, + 609, + 302, + 621 + ], + "score": 0.93, + "content": "P [ \\Omega ^ { \\prime } ] = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 608, + 338, + 622 + ], + "score": 1.0, + "content": ". Choose", + "type": "text" + }, + { + "bbox": [ + 338, + 609, + 366, + 620 + ], + "score": 0.91, + "content": "v \\in \\Omega ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 367, + 608, + 395, + 622 + ], + "score": 1.0, + "content": ". Since", + "type": "text" + }, + { + "bbox": [ + 396, + 608, + 419, + 621 + ], + "score": 0.93, + "content": "p ^ { k } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 420, + 608, + 506, + 622 + ], + "score": 1.0, + "content": "remains bounded, so", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 619, + 506, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 127, + 635 + ], + "score": 1.0, + "content": "does", + "type": "text" + }, + { + "bbox": [ + 128, + 621, + 165, + 633 + ], + "score": 0.9, + "content": "p ^ { q _ { k } ( v ) } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 165, + 619, + 181, + 635 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 181, + 622, + 204, + 634 + ], + "score": 0.91, + "content": "q _ { k } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 205, + 619, + 428, + 635 + ], + "score": 1.0, + "content": "defined above in (52). Thus there exists a subsequence", + "type": "text" + }, + { + "bbox": [ + 428, + 621, + 487, + 634 + ], + "score": 0.93, + "content": "r _ { k } ( v ) \\subseteq q _ { k } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 487, + 619, + 506, + 635 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 107, + 630, + 508, + 649 + ], + "spans": [ + { + "bbox": [ + 107, + 633, + 171, + 646 + ], + "score": 0.9, + "content": "\\hat { p } ( v ) \\in \\mathbb { R } ^ { ( n + 2 ) d }", + "type": "inline_equation" + }, + { + "bbox": [ + 171, + 630, + 211, + 649 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 212, + 633, + 282, + 646 + ], + "score": 0.93, + "content": "p ^ { r _ { k } ( v ) } ( v ) \\hat { p } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 283, + 630, + 328, + 649 + ], + "score": 1.0, + "content": ". But since", + "type": "text" + }, + { + "bbox": [ + 328, + 634, + 374, + 647 + ], + "score": 0.93, + "content": "T _ { q _ { k } ( v ) } \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 375, + 630, + 456, + 649 + ], + "score": 1.0, + "content": ", it also follows that", + "type": "text" + }, + { + "bbox": [ + 456, + 635, + 502, + 647 + ], + "score": 0.92, + "content": "T _ { r _ { k } ( v ) } \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 503, + 630, + 508, + 649 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 104, + 643, + 137, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 137, + 658 + ], + "score": 1.0, + "content": "that is,", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 33.5 + }, + { + "type": "interline_equation", + "bbox": [ + 115, + 664, + 495, + 716 + ], + "lines": [ + { + "bbox": [ + 115, + 664, + 495, + 716 + ], + "spans": [ + { + "bbox": [ + 115, + 664, + 495, + 716 + ], + "score": 0.94, + "content": "\\begin{array} { r l r } { { \\frac { \\tau } { \\rho } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { r _ { k } ( v ) } ( v ) - w _ { i } ^ { r _ { k } ( v ) } ( v ) \\| ^ { 2 } + \\frac { 1 } { \\rho \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { r _ { k } ( v ) } ( v ) - x _ { i } ^ { r _ { k } ( v ) } ( v ) \\| ^ { 2 } } } \\\\ & { } & { \\qquad + 2 ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { r _ { k } ( v ) } ( v ) ) - w _ { n + 1 } ^ { r _ { k } ( v ) } ( v ) \\| ^ { 2 } \\to 0 . } \\end{array}", + "type": "interline_equation", + "image_path": "7d02e07e324d8a9aeee14ca2095c1217ccd896ed9199f904d123a88f63dde927.jpg" + } + ] + } + ], + "index": 37, + "virtual_lines": [ + { + "bbox": [ + 115, + 664, + 495, + 681.3333333333334 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 115, + 681.3333333333334, + 495, + 698.6666666666667 + ], + "spans": [], + "index": 37 + }, + { + "bbox": [ + 115, + 698.6666666666667, + 495, + 716.0000000000001 + ], + "spans": [], + "index": 38 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 720, + 283, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 720, + 283, + 733 + ], + "spans": [ + { + "bbox": [ + 106, + 720, + 242, + 733 + ], + "score": 1.0, + "content": "We then have from Lemma 4 that", + "type": "text" + }, + { + "bbox": [ + 242, + 720, + 280, + 732 + ], + "score": 0.92, + "content": "\\hat { p } ( v ) \\in S", + "type": "inline_equation" + }, + { + "bbox": [ + 280, + 720, + 283, + 733 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 39 + } + ], + "page_idx": 22, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 764 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 764 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 14, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 108, + 82, + 246, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 247, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 247, + 95 + ], + "score": 1.0, + "content": "C.8 A CONVERGENCE LEMMA", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 103, + 504, + 137 + ], + "lines": [ + { + "bbox": [ + 105, + 102, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 506, + 117 + ], + "score": 1.0, + "content": "Before establishing almost-sure convergence, we need the following lemma to derive convergence", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 115, + 505, + 126 + ], + "spans": [ + { + "bbox": [ + 106, + 115, + 252, + 126 + ], + "score": 1.0, + "content": "of the iterates from convergence of", + "type": "text" + }, + { + "bbox": [ + 252, + 115, + 264, + 126 + ], + "score": 0.88, + "content": "T _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 115, + 505, + 126 + ], + "score": 1.0, + "content": "defined above. Note that a more elaborate result would be", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 125, + 274, + 138 + ], + "spans": [ + { + "bbox": [ + 105, + 125, + 274, + 138 + ], + "score": 1.0, + "content": "needed in an infinite-dimensional setting.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 2, + "bbox_fs": [ + 105, + 102, + 506, + 138 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 140, + 503, + 168 + ], + "lines": [ + { + "bbox": [ + 104, + 136, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 104, + 136, + 275, + 158 + ], + "score": 1.0, + "content": "Lemma 4. For deterministic sequences", + "type": "text" + }, + { + "bbox": [ + 275, + 140, + 335, + 153 + ], + "score": 0.86, + "content": "z ^ { k } \\in \\mathbb { R } ^ { ( n + 1 ) d }", + "type": "inline_equation" + }, + { + "bbox": [ + 335, + 136, + 339, + 158 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 339, + 140, + 408, + 154 + ], + "score": 0.9, + "content": "\\{ ( w _ { i } ^ { k } ) _ { i = 1 } ^ { n + 1 } \\} \\ \\in \\ { \\mathcal { P } }", + "type": "inline_equation" + }, + { + "bbox": [ + 409, + 136, + 433, + 158 + ], + "score": 1.0, + "content": ", and", + "type": "text" + }, + { + "bbox": [ + 433, + 140, + 505, + 155 + ], + "score": 0.91, + "content": "\\{ ( x _ { i } ^ { k } , y _ { i } ^ { k } ) _ { i = 1 } ^ { n + 1 } \\} \\in", + "type": "inline_equation" + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 146, + 374, + 175 + ], + "spans": [ + { + "bbox": [ + 107, + 153, + 144, + 165 + ], + "score": 0.88, + "content": "\\mathbb { R } ^ { 2 ( n + 1 ) d }", + "type": "inline_equation" + }, + { + "bbox": [ + 145, + 146, + 201, + 175 + ], + "score": 1.0, + "content": ", suppose that", + "type": "text" + }, + { + "bbox": [ + 201, + 154, + 253, + 167 + ], + "score": 0.96, + "content": "y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 254, + 146, + 306, + 175 + ], + "score": 1.0, + "content": "for i ∈ 1..n,", + "type": "text" + }, + { + "bbox": [ + 307, + 154, + 366, + 168 + ], + "score": 0.67, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 366, + 146, + 374, + 175 + ], + "score": 1.0, + "content": "i=,", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4.5, + "bbox_fs": [ + 104, + 136, + 505, + 175 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 164, + 172, + 446, + 206 + ], + "lines": [ + { + "bbox": [ + 164, + 172, + 446, + 206 + ], + "spans": [ + { + "bbox": [ + 164, + 172, + 446, + 206 + ], + "score": 0.92, + "content": "\\xi _ { 1 } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\xi _ { 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\xi _ { 3 } \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } \\to 0", + "type": "interline_equation", + "image_path": "e15e1a65faa6eed30bf8fd1c364850ce6885980aea45d59c103767f4fadedcc1.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 164, + 172, + 446, + 183.33333333333334 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 164, + 183.33333333333334, + 446, + 194.66666666666669 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 164, + 194.66666666666669, + 446, + 206.00000000000003 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 212, + 493, + 227 + ], + "lines": [ + { + "bbox": [ + 103, + 210, + 491, + 228 + ], + "spans": [ + { + "bbox": [ + 103, + 210, + 152, + 228 + ], + "score": 1.0, + "content": "for scalars", + "type": "text" + }, + { + "bbox": [ + 152, + 213, + 207, + 225 + ], + "score": 0.92, + "content": "\\xi _ { 1 } , \\xi _ { 2 } , \\xi _ { 3 } > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 207, + 210, + 227, + 228 + ], + "score": 1.0, + "content": ", and", + "type": "text" + }, + { + "bbox": [ + 227, + 212, + 438, + 226 + ], + "score": 0.87, + "content": "p ^ { k } \\doteq ( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\to \\hat { p } \\doteq ( \\hat { z } , \\hat { w } _ { 1 } , \\ldots , \\hat { w } _ { n + 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 439, + 210, + 466, + 228 + ], + "score": 1.0, + "content": ". Then", + "type": "text" + }, + { + "bbox": [ + 466, + 213, + 491, + 225 + ], + "score": 0.9, + "content": "\\hat { p } \\in \\mathcal S", + "type": "inline_equation" + } + ], + "index": 9 + } + ], + "index": 9, + "bbox_fs": [ + 103, + 210, + 491, + 228 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 239, + 505, + 289 + ], + "lines": [ + { + "bbox": [ + 105, + 240, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 240, + 172, + 255 + ], + "score": 1.0, + "content": "Proof. Fix any", + "type": "text" + }, + { + "bbox": [ + 172, + 241, + 236, + 254 + ], + "score": 0.93, + "content": "i \\in \\{ 1 , \\ldots , n \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 236, + 240, + 271, + 255 + ], + "score": 1.0, + "content": ". Since", + "type": "text" + }, + { + "bbox": [ + 271, + 241, + 343, + 253 + ], + "score": 0.92, + "content": "\\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 344, + 240, + 398, + 255 + ], + "score": 1.0, + "content": "by (50) and", + "type": "text" + }, + { + "bbox": [ + 399, + 241, + 443, + 253 + ], + "score": 0.92, + "content": "w _ { i } ^ { k } \\hat { w } _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 443, + 240, + 506, + 255 + ], + "score": 1.0, + "content": ", we also have", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 251, + 503, + 268 + ], + "spans": [ + { + "bbox": [ + 107, + 253, + 144, + 266 + ], + "score": 0.92, + "content": "y _ { i } ^ { k } \\hat { w } _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 144, + 251, + 280, + 268 + ], + "score": 1.0, + "content": ". Similarly, (50) also implies that", + "type": "text" + }, + { + "bbox": [ + 280, + 254, + 344, + 266 + ], + "score": 0.91, + "content": "\\lVert z ^ { k } - x _ { i } ^ { k } \\rVert \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 344, + 251, + 382, + 268 + ], + "score": 1.0, + "content": ", so from", + "type": "text" + }, + { + "bbox": [ + 382, + 253, + 414, + 264 + ], + "score": 0.91, + "content": "z ^ { k } \\hat { z }", + "type": "inline_equation" + }, + { + "bbox": [ + 415, + 251, + 470, + 268 + ], + "score": 1.0, + "content": "we also have", + "type": "text" + }, + { + "bbox": [ + 470, + 253, + 503, + 266 + ], + "score": 0.93, + "content": "x _ { i } ^ { k } \\hat { z }", + "type": "inline_equation" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 264, + 506, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 264, + 132, + 279 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 132, + 265, + 186, + 278 + ], + "score": 0.92, + "content": "y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 186, + 264, + 205, + 279 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 206, + 265, + 283, + 278 + ], + "score": 0.92, + "content": "( x _ { i } ^ { k } , y _ { i } ^ { k } ) ( \\hat { z } , \\hat { w } _ { i } )", + "type": "inline_equation" + }, + { + "bbox": [ + 284, + 264, + 506, + 279 + ], + "score": 1.0, + "content": ", (Bauschke & Combettes, 2017, Prop. 20.37) implies", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 276, + 424, + 290 + ], + "spans": [ + { + "bbox": [ + 107, + 277, + 154, + 289 + ], + "score": 0.92, + "content": "\\hat { w } _ { i } \\in A _ { i } ( \\hat { z } )", + "type": "inline_equation" + }, + { + "bbox": [ + 154, + 276, + 183, + 290 + ], + "score": 1.0, + "content": ". Since", + "type": "text" + }, + { + "bbox": [ + 183, + 278, + 188, + 287 + ], + "score": 0.74, + "content": "i", + "type": "inline_equation" + }, + { + "bbox": [ + 188, + 276, + 385, + 290 + ], + "score": 1.0, + "content": "was arbitrary, the preceding conclusions hold for", + "type": "text" + }, + { + "bbox": [ + 386, + 277, + 419, + 288 + ], + "score": 0.88, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 419, + 276, + 424, + 290 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 11.5, + "bbox_fs": [ + 105, + 240, + 506, + 290 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 293, + 506, + 329 + ], + "lines": [ + { + "bbox": [ + 103, + 287, + 508, + 310 + ], + "spans": [ + { + "bbox": [ + 103, + 287, + 219, + 310 + ], + "score": 1.0, + "content": "Now, (50) also implies that", + "type": "text" + }, + { + "bbox": [ + 219, + 293, + 312, + 306 + ], + "score": 0.93, + "content": "\\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 312, + 287, + 384, + 310 + ], + "score": 1.0, + "content": ". Therefore, since", + "type": "text" + }, + { + "bbox": [ + 385, + 293, + 446, + 306 + ], + "score": 0.93, + "content": "w _ { n + 1 } ^ { k } \\to \\hat { w } _ { n + 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 447, + 287, + 508, + 310 + ], + "score": 1.0, + "content": ", we also have", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 304, + 507, + 320 + ], + "spans": [ + { + "bbox": [ + 107, + 306, + 174, + 318 + ], + "score": 0.9, + "content": "B ( z ^ { k } ) \\to \\hat { w } _ { n + 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 174, + 304, + 276, + 320 + ], + "score": 1.0, + "content": ". Much as before, since", + "type": "text" + }, + { + "bbox": [ + 276, + 306, + 383, + 318 + ], + "score": 0.91, + "content": "( z ^ { k } , B ( z ^ { k } ) ) ( \\hat { z } , \\hat { w } _ { n + 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 383, + 304, + 507, + 320 + ], + "score": 1.0, + "content": ", we may apply (Bauschke &", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 316, + 377, + 330 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 315, + 330 + ], + "score": 1.0, + "content": "Combettes, 2017, Prop. 20.37) to conclude that that", + "type": "text" + }, + { + "bbox": [ + 315, + 318, + 372, + 329 + ], + "score": 0.92, + "content": "\\hat { w } _ { n + 1 } = B ( \\hat { z } )", + "type": "inline_equation" + }, + { + "bbox": [ + 372, + 316, + 377, + 330 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 15, + "bbox_fs": [ + 103, + 287, + 508, + 330 + ] + }, + { + "type": "text", + "bbox": [ + 108, + 334, + 504, + 360 + ], + "lines": [ + { + "bbox": [ + 105, + 332, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 105, + 332, + 221, + 348 + ], + "score": 1.0, + "content": "Since the linear subspace", + "type": "text" + }, + { + "bbox": [ + 222, + 335, + 231, + 344 + ], + "score": 0.81, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 231, + 332, + 415, + 348 + ], + "score": 1.0, + "content": "defined in (6) must be closed, the limit", + "type": "text" + }, + { + "bbox": [ + 416, + 334, + 490, + 347 + ], + "score": 0.91, + "content": "\\left( \\hat { z } , \\hat { w } _ { 1 } , \\dots , \\hat { w } _ { n + 1 } \\right)", + "type": "inline_equation" + }, + { + "bbox": [ + 491, + 332, + 506, + 348 + ], + "score": 1.0, + "content": "of", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 340, + 367, + 366 + ], + "spans": [ + { + "bbox": [ + 107, + 346, + 218, + 360 + ], + "score": 0.93, + "content": "\\{ ( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\} \\subset \\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 340, + 263, + 366 + ], + "score": 1.0, + "content": "must be in", + "type": "text" + }, + { + "bbox": [ + 263, + 348, + 272, + 358 + ], + "score": 0.81, + "content": "\\mathcal { P }", + "type": "inline_equation" + }, + { + "bbox": [ + 272, + 340, + 302, + 366 + ], + "score": 1.0, + "content": ", hence", + "type": "text" + }, + { + "bbox": [ + 302, + 345, + 359, + 360 + ], + "score": 0.93, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } { \\hat { w } } _ { i } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 360, + 340, + 367, + 366 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17.5, + "bbox_fs": [ + 105, + 332, + 506, + 366 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 363, + 506, + 390 + ], + "lines": [ + { + "bbox": [ + 165, + 363, + 506, + 398 + ], + "spans": [ + { + "bbox": [ + 165, + 363, + 173, + 398 + ], + "score": 1.0, + "content": "nt .", + "type": "text" + }, + { + "bbox": [ + 173, + 364, + 271, + 376 + ], + "score": 0.91, + "content": "\\hat { p } = ( \\hat { z } , \\hat { w } _ { 1 } , \\dots , \\hat { w } _ { n + 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 271, + 363, + 308, + 398 + ], + "score": 1.0, + "content": "satisfies tions defi", + "type": "text" + }, + { + "bbox": [ + 309, + 364, + 360, + 376 + ], + "score": 0.93, + "content": "\\hat { w } _ { i } \\in A _ { i } ( \\hat { z } )", + "type": "inline_equation" + }, + { + "bbox": [ + 360, + 363, + 378, + 398 + ], + "score": 1.0, + "content": "for ship", + "type": "text" + }, + { + "bbox": [ + 378, + 365, + 415, + 375 + ], + "score": 0.71, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 416, + 363, + 421, + 398 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 421, + 364, + 482, + 376 + ], + "score": 0.91, + "content": "\\hat { w } _ { n + 1 } = B ( \\hat { z } )", + "type": "inline_equation" + }, + { + "bbox": [ + 483, + 363, + 506, + 398 + ], + "score": 1.0, + "content": ", and", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 375, + 472, + 390 + ], + "spans": [ + { + "bbox": [ + 106, + 375, + 164, + 390 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } { \\hat { w } } _ { i } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 388, + 378, + 396, + 388 + ], + "score": 0.8, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 447, + 378, + 472, + 389 + ], + "score": 0.89, + "content": "\\hat { p } \\in \\mathcal S", + "type": "inline_equation" + } + ], + "index": 19 + } + ], + "index": 19.5, + "bbox_fs": [ + 106, + 363, + 506, + 398 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 402, + 296, + 415 + ], + "lines": [ + { + "bbox": [ + 106, + 402, + 298, + 416 + ], + "spans": [ + { + "bbox": [ + 106, + 402, + 298, + 416 + ], + "score": 1.0, + "content": "C.9 FINISHING THE PROOF OF THEOREM 1", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 21 + }, + { + "type": "text", + "bbox": [ + 106, + 423, + 506, + 458 + ], + "lines": [ + { + "bbox": [ + 106, + 423, + 506, + 438 + ], + "spans": [ + { + "bbox": [ + 106, + 423, + 134, + 438 + ], + "score": 1.0, + "content": "Given", + "type": "text" + }, + { + "bbox": [ + 134, + 423, + 190, + 437 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { k } \\alpha _ { k } ^ { 2 } < \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 423, + 213, + 438 + ], + "score": 1.0, + "content": ", and", + "type": "text" + }, + { + "bbox": [ + 214, + 423, + 275, + 437 + ], + "score": 0.92, + "content": "\\sum \\alpha _ { k } \\rho _ { k } ^ { 2 } < \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 275, + 423, + 506, + 438 + ], + "score": 1.0, + "content": ", (47) satisfies the conditions of Stochastic Quasi-Fejer", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 434, + 504, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 459, + 448 + ], + "score": 1.0, + "content": "Monotonicity as given in Lemma 2. By applying Lemma 2, we conclude that there exist", + "type": "text" + }, + { + "bbox": [ + 459, + 435, + 504, + 446 + ], + "score": 0.91, + "content": "\\Omega _ { 1 } , \\Omega _ { 2 } , \\Omega _ { 3 }", + "type": "inline_equation" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 446, + 263, + 458 + ], + "spans": [ + { + "bbox": [ + 106, + 446, + 145, + 458 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 145, + 446, + 188, + 458 + ], + "score": 0.93, + "content": "P [ \\Omega _ { i } ] = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 189, + 446, + 203, + 458 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 204, + 446, + 245, + 457 + ], + "score": 0.91, + "content": "i = { 1 , 2 , 3 }", + "type": "inline_equation" + }, + { + "bbox": [ + 246, + 446, + 263, + 458 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 23, + "bbox_fs": [ + 105, + 423, + 506, + 458 + ] + }, + { + "type": "text", + "bbox": [ + 131, + 467, + 199, + 479 + ], + "lines": [ + { + "bbox": [ + 129, + 465, + 199, + 480 + ], + "spans": [ + { + "bbox": [ + 129, + 465, + 169, + 480 + ], + "score": 1.0, + "content": "1. for all", + "type": "text" + }, + { + "bbox": [ + 169, + 468, + 199, + 478 + ], + "score": 0.79, + "content": "v \\in \\Omega _ { 1 }", + "type": "inline_equation" + } + ], + "index": 25 + } + ], + "index": 25, + "bbox_fs": [ + 129, + 465, + 199, + 480 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 277, + 484, + 368, + 517 + ], + "lines": [ + { + "bbox": [ + 277, + 484, + 368, + 517 + ], + "spans": [ + { + "bbox": [ + 277, + 484, + 368, + 517 + ], + "score": 0.93, + "content": "\\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } T _ { k } ( v ) < \\infty ,", + "type": "interline_equation", + "image_path": "32b73f4fcaa1a2e08db69eefb664ad651527a0b201d7a4b8dac9d3b578f8d85d.jpg" + } + ] + } + ], + "index": 26.5, + "virtual_lines": [ + { + "bbox": [ + 277, + 484, + 368, + 500.5 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 277, + 500.5, + 368, + 517.0 + ], + "spans": [], + "index": 27 + } + ] + }, + { + "type": "text", + "bbox": [ + 130, + 525, + 505, + 539 + ], + "lines": [ + { + "bbox": [ + 128, + 525, + 507, + 540 + ], + "spans": [ + { + "bbox": [ + 128, + 525, + 168, + 540 + ], + "score": 1.0, + "content": "2. for all", + "type": "text" + }, + { + "bbox": [ + 168, + 527, + 198, + 538 + ], + "score": 0.92, + "content": "v \\in \\Omega _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 199, + 525, + 218, + 540 + ], + "score": 1.0, + "content": ", and", + "type": "text" + }, + { + "bbox": [ + 218, + 527, + 248, + 538 + ], + "score": 0.83, + "content": "p ^ { * } \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 249, + 525, + 252, + 540 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 252, + 525, + 306, + 539 + ], + "score": 0.86, + "content": "\\| p ^ { k } ( v ) - p ^ { * } \\|", + "type": "inline_equation" + }, + { + "bbox": [ + 307, + 525, + 507, + 540 + ], + "score": 1.0, + "content": "converges to a finite nonnegative random-variable,", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28, + "bbox_fs": [ + 128, + 525, + 507, + 540 + ] + }, + { + "type": "text", + "bbox": [ + 130, + 542, + 301, + 555 + ], + "lines": [ + { + "bbox": [ + 129, + 541, + 301, + 555 + ], + "spans": [ + { + "bbox": [ + 129, + 541, + 169, + 555 + ], + "score": 1.0, + "content": "3. for all", + "type": "text" + }, + { + "bbox": [ + 169, + 541, + 227, + 555 + ], + "score": 0.65, + "content": "v \\in \\Omega _ { 3 } , p ^ { k } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 227, + 541, + 301, + 555 + ], + "score": 1.0, + "content": "remains bounded.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 29, + "bbox_fs": [ + 129, + 541, + 301, + 555 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 563, + 504, + 576 + ], + "lines": [ + { + "bbox": [ + 104, + 559, + 507, + 580 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 131, + 580 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 132, + 563, + 204, + 576 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } = \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 205, + 559, + 303, + 580 + ], + "score": 1.0, + "content": ", (51) implies that for all", + "type": "text" + }, + { + "bbox": [ + 303, + 564, + 333, + 575 + ], + "score": 0.92, + "content": "v \\in \\Omega _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 333, + 559, + 441, + 580 + ], + "score": 1.0, + "content": "there exists a subsequence", + "type": "text" + }, + { + "bbox": [ + 442, + 564, + 465, + 576 + ], + "score": 0.94, + "content": "q _ { k } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 465, + 559, + 507, + 580 + ], + "score": 1.0, + "content": "such that", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 30, + "bbox_fs": [ + 104, + 559, + 507, + 580 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 280, + 582, + 331, + 596 + ], + "lines": [ + { + "bbox": [ + 280, + 582, + 331, + 596 + ], + "spans": [ + { + "bbox": [ + 280, + 582, + 331, + 596 + ], + "score": 0.91, + "content": "T _ { q _ { k } ( v ) } \\to 0 .", + "type": "interline_equation", + "image_path": "90c3ec9255f814d47099fbff42eb94da7804c7fca8841b75d2215bfbda6f3802.jpg" + } + ] + } + ], + "index": 31, + "virtual_lines": [ + { + "bbox": [ + 280, + 582, + 331, + 596 + ], + "spans": [], + "index": 31 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 608, + 506, + 657 + ], + "lines": [ + { + "bbox": [ + 105, + 608, + 506, + 622 + ], + "spans": [ + { + "bbox": [ + 105, + 608, + 122, + 622 + ], + "score": 1.0, + "content": "Let", + "type": "text" + }, + { + "bbox": [ + 122, + 609, + 204, + 621 + ], + "score": 0.92, + "content": "\\Omega ^ { \\prime } = \\Omega _ { 1 } \\cap \\Omega _ { 2 } \\cap \\Omega _ { 3 }", + "type": "inline_equation" + }, + { + "bbox": [ + 204, + 608, + 259, + 622 + ], + "score": 1.0, + "content": "and note that", + "type": "text" + }, + { + "bbox": [ + 259, + 609, + 302, + 621 + ], + "score": 0.93, + "content": "P [ \\Omega ^ { \\prime } ] = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 608, + 338, + 622 + ], + "score": 1.0, + "content": ". Choose", + "type": "text" + }, + { + "bbox": [ + 338, + 609, + 366, + 620 + ], + "score": 0.91, + "content": "v \\in \\Omega ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 367, + 608, + 395, + 622 + ], + "score": 1.0, + "content": ". Since", + "type": "text" + }, + { + "bbox": [ + 396, + 608, + 419, + 621 + ], + "score": 0.93, + "content": "p ^ { k } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 420, + 608, + 506, + 622 + ], + "score": 1.0, + "content": "remains bounded, so", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 619, + 506, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 127, + 635 + ], + "score": 1.0, + "content": "does", + "type": "text" + }, + { + "bbox": [ + 128, + 621, + 165, + 633 + ], + "score": 0.9, + "content": "p ^ { q _ { k } ( v ) } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 165, + 619, + 181, + 635 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 181, + 622, + 204, + 634 + ], + "score": 0.91, + "content": "q _ { k } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 205, + 619, + 428, + 635 + ], + "score": 1.0, + "content": "defined above in (52). Thus there exists a subsequence", + "type": "text" + }, + { + "bbox": [ + 428, + 621, + 487, + 634 + ], + "score": 0.93, + "content": "r _ { k } ( v ) \\subseteq q _ { k } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 487, + 619, + 506, + 635 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 107, + 630, + 508, + 649 + ], + "spans": [ + { + "bbox": [ + 107, + 633, + 171, + 646 + ], + "score": 0.9, + "content": "\\hat { p } ( v ) \\in \\mathbb { R } ^ { ( n + 2 ) d }", + "type": "inline_equation" + }, + { + "bbox": [ + 171, + 630, + 211, + 649 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 212, + 633, + 282, + 646 + ], + "score": 0.93, + "content": "p ^ { r _ { k } ( v ) } ( v ) \\hat { p } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 283, + 630, + 328, + 649 + ], + "score": 1.0, + "content": ". But since", + "type": "text" + }, + { + "bbox": [ + 328, + 634, + 374, + 647 + ], + "score": 0.93, + "content": "T _ { q _ { k } ( v ) } \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 375, + 630, + 456, + 649 + ], + "score": 1.0, + "content": ", it also follows that", + "type": "text" + }, + { + "bbox": [ + 456, + 635, + 502, + 647 + ], + "score": 0.92, + "content": "T _ { r _ { k } ( v ) } \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 503, + 630, + 508, + 649 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 104, + 643, + 137, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 137, + 658 + ], + "score": 1.0, + "content": "that is,", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 33.5, + "bbox_fs": [ + 104, + 608, + 508, + 658 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 115, + 664, + 495, + 716 + ], + "lines": [ + { + "bbox": [ + 115, + 664, + 495, + 716 + ], + "spans": [ + { + "bbox": [ + 115, + 664, + 495, + 716 + ], + "score": 0.94, + "content": "\\begin{array} { r l r } { { \\frac { \\tau } { \\rho } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { r _ { k } ( v ) } ( v ) - w _ { i } ^ { r _ { k } ( v ) } ( v ) \\| ^ { 2 } + \\frac { 1 } { \\rho \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { r _ { k } ( v ) } ( v ) - x _ { i } ^ { r _ { k } ( v ) } ( v ) \\| ^ { 2 } } } \\\\ & { } & { \\qquad + 2 ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { r _ { k } ( v ) } ( v ) ) - w _ { n + 1 } ^ { r _ { k } ( v ) } ( v ) \\| ^ { 2 } \\to 0 . } \\end{array}", + "type": "interline_equation", + "image_path": "7d02e07e324d8a9aeee14ca2095c1217ccd896ed9199f904d123a88f63dde927.jpg" + } + ] + } + ], + "index": 37, + "virtual_lines": [ + { + "bbox": [ + 115, + 664, + 495, + 681.3333333333334 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 115, + 681.3333333333334, + 495, + 698.6666666666667 + ], + "spans": [], + "index": 37 + }, + { + "bbox": [ + 115, + 698.6666666666667, + 495, + 716.0000000000001 + ], + "spans": [], + "index": 38 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 720, + 283, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 720, + 283, + 733 + ], + "spans": [ + { + "bbox": [ + 106, + 720, + 242, + 733 + ], + "score": 1.0, + "content": "We then have from Lemma 4 that", + "type": "text" + }, + { + "bbox": [ + 242, + 720, + 280, + 732 + ], + "score": 0.92, + "content": "\\hat { p } ( v ) \\in S", + "type": "inline_equation" + }, + { + "bbox": [ + 280, + 720, + 283, + 733 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 39, + "bbox_fs": [ + 106, + 720, + 283, + 733 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 81, + 504, + 105 + ], + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 130, + 97 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 131, + 81, + 202, + 95 + ], + "score": 0.93, + "content": "p ^ { r _ { k } ( v ) } ( v ) \\hat { p } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 202, + 79, + 261, + 97 + ], + "score": 1.0, + "content": ", it follows that", + "type": "text" + }, + { + "bbox": [ + 262, + 81, + 358, + 95 + ], + "score": 0.93, + "content": "\\lVert p ^ { r _ { k } ( v ) } ( v ) - \\hat { p } ( v ) \\rVert \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 358, + 79, + 400, + 97 + ], + "score": 1.0, + "content": ". But since", + "type": "text" + }, + { + "bbox": [ + 400, + 81, + 504, + 95 + ], + "score": 0.77, + "content": "\\hat { p } ( v ) \\in S , \\| p ^ { k } ( v ) - \\hat { p } ( v ) \\|", + "type": "inline_equation" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 93, + 243, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 93, + 243, + 106 + ], + "score": 1.0, + "content": "converges by point 2 above. Thus", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "interline_equation", + "bbox": [ + 194, + 110, + 415, + 131 + ], + "lines": [ + { + "bbox": [ + 194, + 110, + 415, + 131 + ], + "spans": [ + { + "bbox": [ + 194, + 110, + 415, + 131 + ], + "score": 0.9, + "content": "\\operatorname* { l i m } _ { k \\to \\infty } \\| p ^ { k } ( v ) - \\hat { p } ( v ) \\| = \\operatorname* { l i m } _ { k \\to \\infty } \\| p ^ { r _ { k } ( v ) } ( v ) - \\hat { p } ( v ) \\| = 0 .", + "type": "interline_equation", + "image_path": "748c81bde216d95dc0a3a4f346d3ea01b0ea6d25d54f69de6fd2eba2aeace320.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 194, + 110, + 415, + 131 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 137, + 505, + 161 + ], + "lines": [ + { + "bbox": [ + 105, + 135, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 135, + 149, + 152 + ], + "score": 1.0, + "content": "Therefore", + "type": "text" + }, + { + "bbox": [ + 149, + 137, + 226, + 150 + ], + "score": 0.94, + "content": "p ^ { k } ( v ) \\hat { p } ( v ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 226, + 135, + 301, + 152 + ], + "score": 1.0, + "content": ". Thus there exists", + "type": "text" + }, + { + "bbox": [ + 302, + 138, + 327, + 150 + ], + "score": 0.92, + "content": "\\hat { p } \\in \\mathcal S", + "type": "inline_equation" + }, + { + "bbox": [ + 327, + 135, + 367, + 152 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 367, + 137, + 399, + 150 + ], + "score": 0.93, + "content": "p ^ { k } \\hat { p }", + "type": "inline_equation" + }, + { + "bbox": [ + 400, + 135, + 505, + 152 + ], + "score": 1.0, + "content": "a.s., which completes the", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 149, + 191, + 161 + ], + "spans": [ + { + "bbox": [ + 105, + 149, + 191, + 161 + ], + "score": 1.0, + "content": "proof of Theorem 1.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3.5 + }, + { + "type": "title", + "bbox": [ + 108, + 174, + 259, + 186 + ], + "lines": [ + { + "bbox": [ + 106, + 174, + 259, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 174, + 259, + 186 + ], + "score": 1.0, + "content": "C.10 TWO ADDITIONAL RESULTS", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 106, + 195, + 505, + 218 + ], + "lines": [ + { + "bbox": [ + 104, + 192, + 506, + 209 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 426, + 209 + ], + "score": 1.0, + "content": "In this section, we prove two additional useful results about SPS. First, that", + "type": "text" + }, + { + "bbox": [ + 426, + 195, + 462, + 208 + ], + "score": 0.92, + "content": "x _ { i } ^ { k } \\hat { z }", + "type": "inline_equation" + }, + { + "bbox": [ + 463, + 192, + 506, + 209 + ], + "score": 1.0, + "content": "(a.s.) for", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 206, + 274, + 219 + ], + "spans": [ + { + "bbox": [ + 107, + 207, + 158, + 218 + ], + "score": 0.91, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 158, + 206, + 213, + 219 + ], + "score": 1.0, + "content": ". Second, that", + "type": "text" + }, + { + "bbox": [ + 214, + 207, + 248, + 218 + ], + "score": 0.91, + "content": "G _ { k } \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 249, + 206, + 274, + 219 + ], + "score": 1.0, + "content": "(a.s.).", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 6.5 + }, + { + "type": "text", + "bbox": [ + 106, + 223, + 146, + 235 + ], + "lines": [ + { + "bbox": [ + 105, + 222, + 146, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 146, + 236 + ], + "score": 1.0, + "content": "Note that", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 8 + }, + { + "type": "interline_equation", + "bbox": [ + 258, + 233, + 352, + 248 + ], + "lines": [ + { + "bbox": [ + 258, + 233, + 352, + 248 + ], + "spans": [ + { + "bbox": [ + 258, + 233, + 352, + 248 + ], + "score": 0.91, + "content": "x _ { i } ^ { k } = J _ { \\tau A _ { i } } ( z ^ { k } + \\tau w _ { i } ^ { k } )", + "type": "interline_equation", + "image_path": "0440627561d8ce777d9ee6f78e45ef19515f026b3cf865cdb8a6950b263bea96.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 258, + 233, + 352, + 248 + ], + "spans": [], + "index": 9 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 251, + 505, + 272 + ], + "lines": [ + { + "bbox": [ + 105, + 249, + 506, + 265 + ], + "spans": [ + { + "bbox": [ + 105, + 249, + 147, + 265 + ], + "score": 1.0, + "content": "and since", + "type": "text" + }, + { + "bbox": [ + 147, + 250, + 158, + 261 + ], + "score": 0.87, + "content": "z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 159, + 249, + 177, + 265 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 177, + 250, + 191, + 263 + ], + "score": 0.91, + "content": "w _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 249, + 297, + 265 + ], + "score": 1.0, + "content": "convergence a.s., so does", + "type": "text" + }, + { + "bbox": [ + 298, + 251, + 309, + 263 + ], + "score": 0.9, + "content": "x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 309, + 249, + 422, + 265 + ], + "score": 1.0, + "content": ". Consider the subsequence", + "type": "text" + }, + { + "bbox": [ + 423, + 251, + 446, + 263 + ], + "score": 0.92, + "content": "q _ { k } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 446, + 249, + 506, + 265 + ], + "score": 1.0, + "content": "such that (52)", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 260, + 158, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 260, + 158, + 275 + ], + "score": 1.0, + "content": "holds. Then", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 10.5 + }, + { + "type": "interline_equation", + "bbox": [ + 263, + 270, + 348, + 287 + ], + "lines": [ + { + "bbox": [ + 263, + 270, + 348, + 287 + ], + "spans": [ + { + "bbox": [ + 263, + 270, + 348, + 287 + ], + "score": 0.9, + "content": "z ^ { q _ { k } ( v ) } - x _ { i } ^ { q _ { k } ( v ) } 0", + "type": "interline_equation", + "image_path": "d33e75965917693fec5a75456d8bbfda621de1046323b8908959411188fc4760.jpg" + } + ] + } + ], + "index": 12, + "virtual_lines": [ + { + "bbox": [ + 263, + 270, + 348, + 287 + ], + "spans": [], + "index": 12 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 290, + 126, + 301 + ], + "lines": [ + { + "bbox": [ + 105, + 289, + 126, + 302 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 126, + 302 + ], + "score": 1.0, + "content": "thus", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 13 + }, + { + "type": "interline_equation", + "bbox": [ + 280, + 298, + 330, + 315 + ], + "lines": [ + { + "bbox": [ + 280, + 298, + 330, + 315 + ], + "spans": [ + { + "bbox": [ + 280, + 298, + 330, + 315 + ], + "score": 0.9, + "content": "x _ { i } ^ { q _ { k } ( v ) } \\hat { z } .", + "type": "interline_equation", + "image_path": "2b2c5a46db20413fa548a746f333c068bdb7c878bb77383191d5cd983dfaa4c5.jpg" + } + ] + } + ], + "index": 14, + "virtual_lines": [ + { + "bbox": [ + 280, + 298, + 330, + 315 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 318, + 348, + 331 + ], + "lines": [ + { + "bbox": [ + 106, + 318, + 349, + 332 + ], + "spans": [ + { + "bbox": [ + 106, + 318, + 131, + 332 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 131, + 318, + 143, + 331 + ], + "score": 0.9, + "content": "x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 143, + 318, + 339, + 332 + ], + "score": 1.0, + "content": "converges to some limit (a.s.), that limit must be", + "type": "text" + }, + { + "bbox": [ + 339, + 320, + 345, + 329 + ], + "score": 0.81, + "content": "\\hat { z }", + "type": "inline_equation" + }, + { + "bbox": [ + 345, + 318, + 349, + 332 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + }, + { + "type": "text", + "bbox": [ + 106, + 336, + 152, + 347 + ], + "lines": [ + { + "bbox": [ + 106, + 335, + 153, + 347 + ], + "spans": [ + { + "bbox": [ + 106, + 335, + 153, + 347 + ], + "score": 1.0, + "content": "Recall that", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "interline_equation", + "bbox": [ + 166, + 352, + 443, + 369 + ], + "lines": [ + { + "bbox": [ + 166, + 352, + 443, + 369 + ], + "spans": [ + { + "bbox": [ + 166, + 352, + 443, + 369 + ], + "score": 0.9, + "content": "\\begin{array} { r } { G _ { k } \\doteq \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "151c16ae8074e9e0055aa01a49c3f69a548055954baf36b93febd019dfff97d5.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 166, + 352, + 443, + 369 + ], + "spans": [], + "index": 17 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 374, + 505, + 396 + ], + "lines": [ + { + "bbox": [ + 104, + 372, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 189, + 388 + ], + "score": 1.0, + "content": "We have shown that", + "type": "text" + }, + { + "bbox": [ + 189, + 374, + 200, + 385 + ], + "score": 0.88, + "content": "z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 200, + 372, + 218, + 388 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 219, + 374, + 230, + 387 + ], + "score": 0.91, + "content": "x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 231, + 372, + 329, + 388 + ], + "score": 1.0, + "content": "share the same limit for", + "type": "text" + }, + { + "bbox": [ + 330, + 375, + 380, + 386 + ], + "score": 0.92, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 381, + 372, + 450, + 388 + ], + "score": 1.0, + "content": "(a.s.). Therefore", + "type": "text" + }, + { + "bbox": [ + 450, + 374, + 504, + 387 + ], + "score": 0.93, + "content": "z ^ { k } - x _ { i } ^ { k } \\to 0", + "type": "inline_equation" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 385, + 157, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 157, + 399 + ], + "score": 1.0, + "content": "(a.s.). Since", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 18.5 + }, + { + "type": "interline_equation", + "bbox": [ + 250, + 395, + 359, + 411 + ], + "lines": [ + { + "bbox": [ + 250, + 395, + 359, + 411 + ], + "spans": [ + { + "bbox": [ + 250, + 395, + 359, + 411 + ], + "score": 0.91, + "content": "y _ { i } ^ { k } - w _ { i } ^ { k } = \\tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) ,", + "type": "interline_equation", + "image_path": "4a835aa7c8343f26bfecb7e5e0d0dede023435d09bb032d4620b11b37dc014fc.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 250, + 395, + 359, + 411 + ], + "spans": [], + "index": 20 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 412, + 356, + 426 + ], + "lines": [ + { + "bbox": [ + 106, + 412, + 356, + 427 + ], + "spans": [ + { + "bbox": [ + 106, + 412, + 164, + 427 + ], + "score": 1.0, + "content": "it follows that", + "type": "text" + }, + { + "bbox": [ + 164, + 413, + 221, + 426 + ], + "score": 0.93, + "content": "y _ { i } ^ { k } - w _ { i } ^ { k } \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 221, + 412, + 258, + 427 + ], + "score": 1.0, + "content": "(a.s.) for", + "type": "text" + }, + { + "bbox": [ + 259, + 414, + 310, + 425 + ], + "score": 0.91, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 311, + 412, + 356, + 427 + ], + "score": 1.0, + "content": ". Therefore", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 21 + }, + { + "type": "interline_equation", + "bbox": [ + 253, + 432, + 357, + 446 + ], + "lines": [ + { + "bbox": [ + 253, + 432, + 357, + 446 + ], + "spans": [ + { + "bbox": [ + 253, + 432, + 357, + 446 + ], + "score": 0.9, + "content": "G _ { k } \\to \\| B ( \\hat { z } ) - \\hat { w } _ { n + 1 } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "40310841d9035ee07b18142bb91b309417da6f135b21758f3617359ab73da9c5.jpg" + } + ] + } + ], + "index": 22, + "virtual_lines": [ + { + "bbox": [ + 253, + 432, + 357, + 446 + ], + "spans": [], + "index": 22 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 451, + 419, + 465 + ], + "lines": [ + { + "bbox": [ + 105, + 450, + 420, + 466 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 146, + 466 + ], + "score": 1.0, + "content": "But since", + "type": "text" + }, + { + "bbox": [ + 147, + 452, + 240, + 464 + ], + "score": 0.91, + "content": "( z , \\hat { w } _ { 1 } , \\dots , \\hat { w } _ { n + 1 } ) \\in S", + "type": "inline_equation" + }, + { + "bbox": [ + 240, + 450, + 243, + 466 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 244, + 452, + 302, + 464 + ], + "score": 0.91, + "content": "\\hat { w } _ { n + 1 } = B ( \\hat { z } )", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 450, + 359, + 466 + ], + "score": 1.0, + "content": "implying that", + "type": "text" + }, + { + "bbox": [ + 360, + 452, + 394, + 463 + ], + "score": 0.89, + "content": "G _ { k } \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 394, + 450, + 420, + 466 + ], + "score": 1.0, + "content": "(a.s.).", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 23 + }, + { + "type": "title", + "bbox": [ + 107, + 480, + 232, + 493 + ], + "lines": [ + { + "bbox": [ + 106, + 479, + 233, + 495 + ], + "spans": [ + { + "bbox": [ + 106, + 479, + 233, + 495 + ], + "score": 1.0, + "content": "D PROOF OF LEMMA 1", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24 + }, + { + "type": "text", + "bbox": [ + 106, + 505, + 171, + 518 + ], + "lines": [ + { + "bbox": [ + 105, + 504, + 172, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 115, + 519 + ], + "score": 1.0, + "content": "If", + "type": "text" + }, + { + "bbox": [ + 116, + 506, + 148, + 517 + ], + "score": 0.91, + "content": "G _ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 148, + 504, + 172, + 519 + ], + "score": 1.0, + "content": ", then", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25 + }, + { + "type": "interline_equation", + "bbox": [ + 220, + 522, + 390, + 537 + ], + "lines": [ + { + "bbox": [ + 220, + 522, + 390, + 537 + ], + "spans": [ + { + "bbox": [ + 220, + 522, + 390, + 537 + ], + "score": 0.87, + "content": "\\forall i = 1 , \\ldots , n : \\quad y _ { i } ^ { k } = w _ { i } ^ { k } \\mathrm { ~ a n d ~ } z ^ { k } = x _ { i } ^ { k } .", + "type": "interline_equation", + "image_path": "b2792ea2f4f3698192fa19e4d41bb503dd15d5640a490afd3ecb253be78cefa8.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 220, + 522, + 390, + 537 + ], + "spans": [], + "index": 26 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 543, + 341, + 557 + ], + "lines": [ + { + "bbox": [ + 106, + 542, + 341, + 558 + ], + "spans": [ + { + "bbox": [ + 106, + 542, + 131, + 558 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 132, + 544, + 183, + 557 + ], + "score": 0.94, + "content": "y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 184, + 542, + 199, + 558 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 200, + 545, + 250, + 556 + ], + "score": 0.88, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 251, + 542, + 341, + 558 + ], + "score": 1.0, + "content": ", (53) implies that that", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "interline_equation", + "bbox": [ + 247, + 562, + 363, + 578 + ], + "lines": [ + { + "bbox": [ + 247, + 562, + 363, + 578 + ], + "spans": [ + { + "bbox": [ + 247, + 562, + 363, + 578 + ], + "score": 0.9, + "content": "\\forall i \\in 1 . . n : \\quad w _ { i } ^ { k } \\in A _ { i } ( z ^ { k } ) .", + "type": "interline_equation", + "image_path": "49e56291aaa31ae64d937bbc229390022d58efc3bf9fce2fc3a74829efd0bc8b.jpg" + } + ] + } + ], + "index": 28, + "virtual_lines": [ + { + "bbox": [ + 247, + 562, + 363, + 578 + ], + "spans": [], + "index": 28 + } + ] + }, + { + "type": "text", + "bbox": [ + 108, + 584, + 498, + 599 + ], + "lines": [ + { + "bbox": [ + 103, + 580, + 502, + 603 + ], + "spans": [ + { + "bbox": [ + 103, + 580, + 159, + 603 + ], + "score": 1.0, + "content": "Furthermore", + "type": "text" + }, + { + "bbox": [ + 159, + 586, + 191, + 597 + ], + "score": 0.92, + "content": "G _ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 580, + 260, + 603 + ], + "score": 1.0, + "content": "also implies that", + "type": "text" + }, + { + "bbox": [ + 261, + 585, + 323, + 599 + ], + "score": 0.94, + "content": "w _ { n + 1 } ^ { k } = B ( z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 323, + 580, + 383, + 603 + ], + "score": 1.0, + "content": ". Finally, since", + "type": "text" + }, + { + "bbox": [ + 383, + 584, + 442, + 599 + ], + "score": 0.93, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 443, + 580, + 502, + 603 + ], + "score": 1.0, + "content": ", we have that", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 29 + }, + { + "type": "interline_equation", + "bbox": [ + 253, + 604, + 357, + 621 + ], + "lines": [ + { + "bbox": [ + 253, + 604, + 357, + 621 + ], + "spans": [ + { + "bbox": [ + 253, + 604, + 357, + 621 + ], + "score": 0.89, + "content": "( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\in { \\mathcal { S } } .", + "type": "interline_equation", + "image_path": "0df7136549ee64cdfc42bacb3918667d0334a6a45cdbf16f6dbb05c58de79d6d.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 253, + 604, + 357, + 621 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 631, + 506, + 692 + ], + "lines": [ + { + "bbox": [ + 105, + 631, + 507, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 191, + 647 + ], + "score": 1.0, + "content": "Conversely, suppose", + "type": "text" + }, + { + "bbox": [ + 192, + 632, + 291, + 646 + ], + "score": 0.92, + "content": "( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 291, + 631, + 365, + 647 + ], + "score": 1.0, + "content": ". The definition of", + "type": "text" + }, + { + "bbox": [ + 365, + 634, + 373, + 643 + ], + "score": 0.81, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 373, + 631, + 424, + 647 + ], + "score": 1.0, + "content": "implies that", + "type": "text" + }, + { + "bbox": [ + 424, + 632, + 487, + 646 + ], + "score": 0.94, + "content": "B ( z ^ { k } ) = w _ { n + 1 } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 487, + 631, + 507, + 647 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 645, + 507, + 658 + ], + "spans": [ + { + "bbox": [ + 106, + 645, + 176, + 658 + ], + "score": 1.0, + "content": "furthermore that", + "type": "text" + }, + { + "bbox": [ + 177, + 646, + 232, + 658 + ], + "score": 0.91, + "content": "w _ { i } ^ { k } \\in A _ { i } ( z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 232, + 645, + 248, + 658 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 249, + 647, + 283, + 657 + ], + "score": 0.86, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 284, + 645, + 323, + 658 + ], + "score": 1.0, + "content": ". For any", + "type": "text" + }, + { + "bbox": [ + 324, + 646, + 358, + 657 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 359, + 645, + 507, + 658 + ], + "score": 1.0, + "content": ", considering line 3 of Algorithm 1,", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 657, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 165, + 671 + ], + "score": 1.0, + "content": "we may write", + "type": "text" + }, + { + "bbox": [ + 165, + 658, + 304, + 670 + ], + "score": 0.86, + "content": "t _ { i } ^ { k } = z ^ { k } + \\tau w _ { i . } ^ { k } \\in ( I + \\tau A _ { i } ) ( z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 304, + 657, + 348, + 671 + ], + "score": 1.0, + "content": ", implying", + "type": "text" + }, + { + "bbox": [ + 349, + 657, + 443, + 670 + ], + "score": 0.9, + "content": "z ^ { k } \\in ( I + \\tau A _ { i } ) ^ { - 1 } ( t _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 443, + 657, + 506, + 671 + ], + "score": 1.0, + "content": ". But since the", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 667, + 506, + 682 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 145, + 682 + ], + "score": 1.0, + "content": "resolvent", + "type": "text" + }, + { + "bbox": [ + 145, + 669, + 230, + 681 + ], + "score": 0.85, + "content": "J _ { \\tau A _ { i } } = ( I + \\tau A _ { i } ) ^ { - 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 231, + 667, + 506, + 682 + ], + "score": 1.0, + "content": "is single-valued (Bauschke & Combettes, 2017, Prop. 23.8), we must", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 679, + 506, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 127, + 693 + ], + "score": 1.0, + "content": "have", + "type": "text" + }, + { + "bbox": [ + 128, + 680, + 219, + 692 + ], + "score": 0.9, + "content": "z ^ { k } = ( I + \\tau A _ { i } ) ^ { - 1 } ( t _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 220, + 679, + 322, + 693 + ], + "score": 1.0, + "content": ". Thus, by line 4, we have", + "type": "text" + }, + { + "bbox": [ + 323, + 680, + 357, + 692 + ], + "score": 0.93, + "content": "x _ { i } ^ { k } = z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 358, + 679, + 506, + 693 + ], + "score": 1.0, + "content": ". We may also derive from line 5 that", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 33 + }, + { + "type": "interline_equation", + "bbox": [ + 201, + 698, + 409, + 713 + ], + "lines": [ + { + "bbox": [ + 201, + 698, + 409, + 713 + ], + "spans": [ + { + "bbox": [ + 201, + 698, + 409, + 713 + ], + "score": 0.89, + "content": "y _ { i } ^ { k } = \\tau ^ { - 1 } ( t _ { i } ^ { k } - x _ { i } ^ { k } ) = \\tau ^ { - 1 } ( z ^ { k } + \\tau w _ { i } ^ { k } - z ^ { k } ) = w _ { i } ^ { k } .", + "type": "interline_equation", + "image_path": "2917366b2617db8f1010409ffffdd31cc441d40e0b08c7a4f40f7f9002464c34.jpg" + } + ] + } + ], + "index": 36, + "virtual_lines": [ + { + "bbox": [ + 201, + 698, + 409, + 713 + ], + "spans": [], + "index": 36 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 719, + 482, + 734 + ], + "lines": [ + { + "bbox": [ + 104, + 717, + 480, + 736 + ], + "spans": [ + { + "bbox": [ + 104, + 717, + 154, + 736 + ], + "score": 1.0, + "content": "Thus, since", + "type": "text" + }, + { + "bbox": [ + 154, + 720, + 189, + 733 + ], + "score": 0.93, + "content": "x _ { i } ^ { k } = z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 189, + 717, + 207, + 736 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 208, + 720, + 244, + 733 + ], + "score": 0.93, + "content": "y _ { i } ^ { k } = w _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 244, + 717, + 260, + 736 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 260, + 721, + 311, + 732 + ], + "score": 0.91, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 311, + 717, + 329, + 736 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 329, + 720, + 392, + 733 + ], + "score": 0.93, + "content": "w _ { n + 1 } ^ { k } = B ( z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 392, + 717, + 448, + 736 + ], + "score": 1.0, + "content": ", we have that", + "type": "text" + }, + { + "bbox": [ + 448, + 721, + 480, + 732 + ], + "score": 0.92, + "content": "G _ { k } = 0", + "type": "inline_equation" + } + ], + "index": 37 + } + ], + "index": 37 + } + ], + "page_idx": 23, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 39 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 749, + 313, + 765 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 313, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 16, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 81, + 504, + 105 + ], + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 130, + 97 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 131, + 81, + 202, + 95 + ], + "score": 0.93, + "content": "p ^ { r _ { k } ( v ) } ( v ) \\hat { p } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 202, + 79, + 261, + 97 + ], + "score": 1.0, + "content": ", it follows that", + "type": "text" + }, + { + "bbox": [ + 262, + 81, + 358, + 95 + ], + "score": 0.93, + "content": "\\lVert p ^ { r _ { k } ( v ) } ( v ) - \\hat { p } ( v ) \\rVert \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 358, + 79, + 400, + 97 + ], + "score": 1.0, + "content": ". But since", + "type": "text" + }, + { + "bbox": [ + 400, + 81, + 504, + 95 + ], + "score": 0.77, + "content": "\\hat { p } ( v ) \\in S , \\| p ^ { k } ( v ) - \\hat { p } ( v ) \\|", + "type": "inline_equation" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 93, + 243, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 93, + 243, + 106 + ], + "score": 1.0, + "content": "converges by point 2 above. Thus", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5, + "bbox_fs": [ + 105, + 79, + 504, + 106 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 194, + 110, + 415, + 131 + ], + "lines": [ + { + "bbox": [ + 194, + 110, + 415, + 131 + ], + "spans": [ + { + "bbox": [ + 194, + 110, + 415, + 131 + ], + "score": 0.9, + "content": "\\operatorname* { l i m } _ { k \\to \\infty } \\| p ^ { k } ( v ) - \\hat { p } ( v ) \\| = \\operatorname* { l i m } _ { k \\to \\infty } \\| p ^ { r _ { k } ( v ) } ( v ) - \\hat { p } ( v ) \\| = 0 .", + "type": "interline_equation", + "image_path": "748c81bde216d95dc0a3a4f346d3ea01b0ea6d25d54f69de6fd2eba2aeace320.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 194, + 110, + 415, + 131 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 137, + 505, + 161 + ], + "lines": [ + { + "bbox": [ + 105, + 135, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 135, + 149, + 152 + ], + "score": 1.0, + "content": "Therefore", + "type": "text" + }, + { + "bbox": [ + 149, + 137, + 226, + 150 + ], + "score": 0.94, + "content": "p ^ { k } ( v ) \\hat { p } ( v ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 226, + 135, + 301, + 152 + ], + "score": 1.0, + "content": ". Thus there exists", + "type": "text" + }, + { + "bbox": [ + 302, + 138, + 327, + 150 + ], + "score": 0.92, + "content": "\\hat { p } \\in \\mathcal S", + "type": "inline_equation" + }, + { + "bbox": [ + 327, + 135, + 367, + 152 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 367, + 137, + 399, + 150 + ], + "score": 0.93, + "content": "p ^ { k } \\hat { p }", + "type": "inline_equation" + }, + { + "bbox": [ + 400, + 135, + 505, + 152 + ], + "score": 1.0, + "content": "a.s., which completes the", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 149, + 191, + 161 + ], + "spans": [ + { + "bbox": [ + 105, + 149, + 191, + 161 + ], + "score": 1.0, + "content": "proof of Theorem 1.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3.5, + "bbox_fs": [ + 105, + 135, + 505, + 161 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 174, + 259, + 186 + ], + "lines": [ + { + "bbox": [ + 106, + 174, + 259, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 174, + 259, + 186 + ], + "score": 1.0, + "content": "C.10 TWO ADDITIONAL RESULTS", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 106, + 195, + 505, + 218 + ], + "lines": [ + { + "bbox": [ + 104, + 192, + 506, + 209 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 426, + 209 + ], + "score": 1.0, + "content": "In this section, we prove two additional useful results about SPS. First, that", + "type": "text" + }, + { + "bbox": [ + 426, + 195, + 462, + 208 + ], + "score": 0.92, + "content": "x _ { i } ^ { k } \\hat { z }", + "type": "inline_equation" + }, + { + "bbox": [ + 463, + 192, + 506, + 209 + ], + "score": 1.0, + "content": "(a.s.) for", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 206, + 274, + 219 + ], + "spans": [ + { + "bbox": [ + 107, + 207, + 158, + 218 + ], + "score": 0.91, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 158, + 206, + 213, + 219 + ], + "score": 1.0, + "content": ". Second, that", + "type": "text" + }, + { + "bbox": [ + 214, + 207, + 248, + 218 + ], + "score": 0.91, + "content": "G _ { k } \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 249, + 206, + 274, + 219 + ], + "score": 1.0, + "content": "(a.s.).", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 6.5, + "bbox_fs": [ + 104, + 192, + 506, + 219 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 223, + 146, + 235 + ], + "lines": [ + { + "bbox": [ + 105, + 222, + 146, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 146, + 236 + ], + "score": 1.0, + "content": "Note that", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 8, + "bbox_fs": [ + 105, + 222, + 146, + 236 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 258, + 233, + 352, + 248 + ], + "lines": [ + { + "bbox": [ + 258, + 233, + 352, + 248 + ], + "spans": [ + { + "bbox": [ + 258, + 233, + 352, + 248 + ], + "score": 0.91, + "content": "x _ { i } ^ { k } = J _ { \\tau A _ { i } } ( z ^ { k } + \\tau w _ { i } ^ { k } )", + "type": "interline_equation", + "image_path": "0440627561d8ce777d9ee6f78e45ef19515f026b3cf865cdb8a6950b263bea96.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 258, + 233, + 352, + 248 + ], + "spans": [], + "index": 9 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 251, + 505, + 272 + ], + "lines": [ + { + "bbox": [ + 105, + 249, + 506, + 265 + ], + "spans": [ + { + "bbox": [ + 105, + 249, + 147, + 265 + ], + "score": 1.0, + "content": "and since", + "type": "text" + }, + { + "bbox": [ + 147, + 250, + 158, + 261 + ], + "score": 0.87, + "content": "z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 159, + 249, + 177, + 265 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 177, + 250, + 191, + 263 + ], + "score": 0.91, + "content": "w _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 249, + 297, + 265 + ], + "score": 1.0, + "content": "convergence a.s., so does", + "type": "text" + }, + { + "bbox": [ + 298, + 251, + 309, + 263 + ], + "score": 0.9, + "content": "x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 309, + 249, + 422, + 265 + ], + "score": 1.0, + "content": ". Consider the subsequence", + "type": "text" + }, + { + "bbox": [ + 423, + 251, + 446, + 263 + ], + "score": 0.92, + "content": "q _ { k } ( v )", + "type": "inline_equation" + }, + { + "bbox": [ + 446, + 249, + 506, + 265 + ], + "score": 1.0, + "content": "such that (52)", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 260, + 158, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 260, + 158, + 275 + ], + "score": 1.0, + "content": "holds. Then", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 10.5, + "bbox_fs": [ + 105, + 249, + 506, + 275 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 263, + 270, + 348, + 287 + ], + "lines": [ + { + "bbox": [ + 263, + 270, + 348, + 287 + ], + "spans": [ + { + "bbox": [ + 263, + 270, + 348, + 287 + ], + "score": 0.9, + "content": "z ^ { q _ { k } ( v ) } - x _ { i } ^ { q _ { k } ( v ) } 0", + "type": "interline_equation", + "image_path": "d33e75965917693fec5a75456d8bbfda621de1046323b8908959411188fc4760.jpg" + } + ] + } + ], + "index": 12, + "virtual_lines": [ + { + "bbox": [ + 263, + 270, + 348, + 287 + ], + "spans": [], + "index": 12 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 290, + 126, + 301 + ], + "lines": [ + { + "bbox": [ + 105, + 289, + 126, + 302 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 126, + 302 + ], + "score": 1.0, + "content": "thus", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 13, + "bbox_fs": [ + 105, + 289, + 126, + 302 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 280, + 298, + 330, + 315 + ], + "lines": [ + { + "bbox": [ + 280, + 298, + 330, + 315 + ], + "spans": [ + { + "bbox": [ + 280, + 298, + 330, + 315 + ], + "score": 0.9, + "content": "x _ { i } ^ { q _ { k } ( v ) } \\hat { z } .", + "type": "interline_equation", + "image_path": "2b2c5a46db20413fa548a746f333c068bdb7c878bb77383191d5cd983dfaa4c5.jpg" + } + ] + } + ], + "index": 14, + "virtual_lines": [ + { + "bbox": [ + 280, + 298, + 330, + 315 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 318, + 348, + 331 + ], + "lines": [ + { + "bbox": [ + 106, + 318, + 349, + 332 + ], + "spans": [ + { + "bbox": [ + 106, + 318, + 131, + 332 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 131, + 318, + 143, + 331 + ], + "score": 0.9, + "content": "x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 143, + 318, + 339, + 332 + ], + "score": 1.0, + "content": "converges to some limit (a.s.), that limit must be", + "type": "text" + }, + { + "bbox": [ + 339, + 320, + 345, + 329 + ], + "score": 0.81, + "content": "\\hat { z }", + "type": "inline_equation" + }, + { + "bbox": [ + 345, + 318, + 349, + 332 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15, + "bbox_fs": [ + 106, + 318, + 349, + 332 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 336, + 152, + 347 + ], + "lines": [ + { + "bbox": [ + 106, + 335, + 153, + 347 + ], + "spans": [ + { + "bbox": [ + 106, + 335, + 153, + 347 + ], + "score": 1.0, + "content": "Recall that", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16, + "bbox_fs": [ + 106, + 335, + 153, + 347 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 166, + 352, + 443, + 369 + ], + "lines": [ + { + "bbox": [ + 166, + 352, + 443, + 369 + ], + "spans": [ + { + "bbox": [ + 166, + 352, + 443, + 369 + ], + "score": 0.9, + "content": "\\begin{array} { r } { G _ { k } \\doteq \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "151c16ae8074e9e0055aa01a49c3f69a548055954baf36b93febd019dfff97d5.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 166, + 352, + 443, + 369 + ], + "spans": [], + "index": 17 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 374, + 505, + 396 + ], + "lines": [ + { + "bbox": [ + 104, + 372, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 189, + 388 + ], + "score": 1.0, + "content": "We have shown that", + "type": "text" + }, + { + "bbox": [ + 189, + 374, + 200, + 385 + ], + "score": 0.88, + "content": "z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 200, + 372, + 218, + 388 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 219, + 374, + 230, + 387 + ], + "score": 0.91, + "content": "x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 231, + 372, + 329, + 388 + ], + "score": 1.0, + "content": "share the same limit for", + "type": "text" + }, + { + "bbox": [ + 330, + 375, + 380, + 386 + ], + "score": 0.92, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 381, + 372, + 450, + 388 + ], + "score": 1.0, + "content": "(a.s.). Therefore", + "type": "text" + }, + { + "bbox": [ + 450, + 374, + 504, + 387 + ], + "score": 0.93, + "content": "z ^ { k } - x _ { i } ^ { k } \\to 0", + "type": "inline_equation" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 385, + 157, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 157, + 399 + ], + "score": 1.0, + "content": "(a.s.). Since", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 18.5, + "bbox_fs": [ + 104, + 372, + 504, + 399 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 250, + 395, + 359, + 411 + ], + "lines": [ + { + "bbox": [ + 250, + 395, + 359, + 411 + ], + "spans": [ + { + "bbox": [ + 250, + 395, + 359, + 411 + ], + "score": 0.91, + "content": "y _ { i } ^ { k } - w _ { i } ^ { k } = \\tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) ,", + "type": "interline_equation", + "image_path": "4a835aa7c8343f26bfecb7e5e0d0dede023435d09bb032d4620b11b37dc014fc.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 250, + 395, + 359, + 411 + ], + "spans": [], + "index": 20 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 412, + 356, + 426 + ], + "lines": [ + { + "bbox": [ + 106, + 412, + 356, + 427 + ], + "spans": [ + { + "bbox": [ + 106, + 412, + 164, + 427 + ], + "score": 1.0, + "content": "it follows that", + "type": "text" + }, + { + "bbox": [ + 164, + 413, + 221, + 426 + ], + "score": 0.93, + "content": "y _ { i } ^ { k } - w _ { i } ^ { k } \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 221, + 412, + 258, + 427 + ], + "score": 1.0, + "content": "(a.s.) for", + "type": "text" + }, + { + "bbox": [ + 259, + 414, + 310, + 425 + ], + "score": 0.91, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 311, + 412, + 356, + 427 + ], + "score": 1.0, + "content": ". Therefore", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 21, + "bbox_fs": [ + 106, + 412, + 356, + 427 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 253, + 432, + 357, + 446 + ], + "lines": [ + { + "bbox": [ + 253, + 432, + 357, + 446 + ], + "spans": [ + { + "bbox": [ + 253, + 432, + 357, + 446 + ], + "score": 0.9, + "content": "G _ { k } \\to \\| B ( \\hat { z } ) - \\hat { w } _ { n + 1 } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "40310841d9035ee07b18142bb91b309417da6f135b21758f3617359ab73da9c5.jpg" + } + ] + } + ], + "index": 22, + "virtual_lines": [ + { + "bbox": [ + 253, + 432, + 357, + 446 + ], + "spans": [], + "index": 22 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 451, + 419, + 465 + ], + "lines": [ + { + "bbox": [ + 105, + 450, + 420, + 466 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 146, + 466 + ], + "score": 1.0, + "content": "But since", + "type": "text" + }, + { + "bbox": [ + 147, + 452, + 240, + 464 + ], + "score": 0.91, + "content": "( z , \\hat { w } _ { 1 } , \\dots , \\hat { w } _ { n + 1 } ) \\in S", + "type": "inline_equation" + }, + { + "bbox": [ + 240, + 450, + 243, + 466 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 244, + 452, + 302, + 464 + ], + "score": 0.91, + "content": "\\hat { w } _ { n + 1 } = B ( \\hat { z } )", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 450, + 359, + 466 + ], + "score": 1.0, + "content": "implying that", + "type": "text" + }, + { + "bbox": [ + 360, + 452, + 394, + 463 + ], + "score": 0.89, + "content": "G _ { k } \\to 0", + "type": "inline_equation" + }, + { + "bbox": [ + 394, + 450, + 420, + 466 + ], + "score": 1.0, + "content": "(a.s.).", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 23, + "bbox_fs": [ + 105, + 450, + 420, + 466 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 480, + 232, + 493 + ], + "lines": [ + { + "bbox": [ + 106, + 479, + 233, + 495 + ], + "spans": [ + { + "bbox": [ + 106, + 479, + 233, + 495 + ], + "score": 1.0, + "content": "D PROOF OF LEMMA 1", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24 + }, + { + "type": "text", + "bbox": [ + 106, + 505, + 171, + 518 + ], + "lines": [ + { + "bbox": [ + 105, + 504, + 172, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 115, + 519 + ], + "score": 1.0, + "content": "If", + "type": "text" + }, + { + "bbox": [ + 116, + 506, + 148, + 517 + ], + "score": 0.91, + "content": "G _ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 148, + 504, + 172, + 519 + ], + "score": 1.0, + "content": ", then", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25, + "bbox_fs": [ + 105, + 504, + 172, + 519 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 220, + 522, + 390, + 537 + ], + "lines": [ + { + "bbox": [ + 220, + 522, + 390, + 537 + ], + "spans": [ + { + "bbox": [ + 220, + 522, + 390, + 537 + ], + "score": 0.87, + "content": "\\forall i = 1 , \\ldots , n : \\quad y _ { i } ^ { k } = w _ { i } ^ { k } \\mathrm { ~ a n d ~ } z ^ { k } = x _ { i } ^ { k } .", + "type": "interline_equation", + "image_path": "b2792ea2f4f3698192fa19e4d41bb503dd15d5640a490afd3ecb253be78cefa8.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 220, + 522, + 390, + 537 + ], + "spans": [], + "index": 26 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 543, + 341, + 557 + ], + "lines": [ + { + "bbox": [ + 106, + 542, + 341, + 558 + ], + "spans": [ + { + "bbox": [ + 106, + 542, + 131, + 558 + ], + "score": 1.0, + "content": "Since", + "type": "text" + }, + { + "bbox": [ + 132, + 544, + 183, + 557 + ], + "score": 0.94, + "content": "y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 184, + 542, + 199, + 558 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 200, + 545, + 250, + 556 + ], + "score": 0.88, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 251, + 542, + 341, + 558 + ], + "score": 1.0, + "content": ", (53) implies that that", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27, + "bbox_fs": [ + 106, + 542, + 341, + 558 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 247, + 562, + 363, + 578 + ], + "lines": [ + { + "bbox": [ + 247, + 562, + 363, + 578 + ], + "spans": [ + { + "bbox": [ + 247, + 562, + 363, + 578 + ], + "score": 0.9, + "content": "\\forall i \\in 1 . . n : \\quad w _ { i } ^ { k } \\in A _ { i } ( z ^ { k } ) .", + "type": "interline_equation", + "image_path": "49e56291aaa31ae64d937bbc229390022d58efc3bf9fce2fc3a74829efd0bc8b.jpg" + } + ] + } + ], + "index": 28, + "virtual_lines": [ + { + "bbox": [ + 247, + 562, + 363, + 578 + ], + "spans": [], + "index": 28 + } + ] + }, + { + "type": "text", + "bbox": [ + 108, + 584, + 498, + 599 + ], + "lines": [ + { + "bbox": [ + 103, + 580, + 502, + 603 + ], + "spans": [ + { + "bbox": [ + 103, + 580, + 159, + 603 + ], + "score": 1.0, + "content": "Furthermore", + "type": "text" + }, + { + "bbox": [ + 159, + 586, + 191, + 597 + ], + "score": 0.92, + "content": "G _ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 580, + 260, + 603 + ], + "score": 1.0, + "content": "also implies that", + "type": "text" + }, + { + "bbox": [ + 261, + 585, + 323, + 599 + ], + "score": 0.94, + "content": "w _ { n + 1 } ^ { k } = B ( z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 323, + 580, + 383, + 603 + ], + "score": 1.0, + "content": ". Finally, since", + "type": "text" + }, + { + "bbox": [ + 383, + 584, + 442, + 599 + ], + "score": 0.93, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 443, + 580, + 502, + 603 + ], + "score": 1.0, + "content": ", we have that", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 29, + "bbox_fs": [ + 103, + 580, + 502, + 603 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 253, + 604, + 357, + 621 + ], + "lines": [ + { + "bbox": [ + 253, + 604, + 357, + 621 + ], + "spans": [ + { + "bbox": [ + 253, + 604, + 357, + 621 + ], + "score": 0.89, + "content": "( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\in { \\mathcal { S } } .", + "type": "interline_equation", + "image_path": "0df7136549ee64cdfc42bacb3918667d0334a6a45cdbf16f6dbb05c58de79d6d.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 253, + 604, + 357, + 621 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 631, + 506, + 692 + ], + "lines": [ + { + "bbox": [ + 105, + 631, + 507, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 191, + 647 + ], + "score": 1.0, + "content": "Conversely, suppose", + "type": "text" + }, + { + "bbox": [ + 192, + 632, + 291, + 646 + ], + "score": 0.92, + "content": "( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 291, + 631, + 365, + 647 + ], + "score": 1.0, + "content": ". The definition of", + "type": "text" + }, + { + "bbox": [ + 365, + 634, + 373, + 643 + ], + "score": 0.81, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 373, + 631, + 424, + 647 + ], + "score": 1.0, + "content": "implies that", + "type": "text" + }, + { + "bbox": [ + 424, + 632, + 487, + 646 + ], + "score": 0.94, + "content": "B ( z ^ { k } ) = w _ { n + 1 } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 487, + 631, + 507, + 647 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 645, + 507, + 658 + ], + "spans": [ + { + "bbox": [ + 106, + 645, + 176, + 658 + ], + "score": 1.0, + "content": "furthermore that", + "type": "text" + }, + { + "bbox": [ + 177, + 646, + 232, + 658 + ], + "score": 0.91, + "content": "w _ { i } ^ { k } \\in A _ { i } ( z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 232, + 645, + 248, + 658 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 249, + 647, + 283, + 657 + ], + "score": 0.86, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 284, + 645, + 323, + 658 + ], + "score": 1.0, + "content": ". For any", + "type": "text" + }, + { + "bbox": [ + 324, + 646, + 358, + 657 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 359, + 645, + 507, + 658 + ], + "score": 1.0, + "content": ", considering line 3 of Algorithm 1,", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 657, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 165, + 671 + ], + "score": 1.0, + "content": "we may write", + "type": "text" + }, + { + "bbox": [ + 165, + 658, + 304, + 670 + ], + "score": 0.86, + "content": "t _ { i } ^ { k } = z ^ { k } + \\tau w _ { i . } ^ { k } \\in ( I + \\tau A _ { i } ) ( z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 304, + 657, + 348, + 671 + ], + "score": 1.0, + "content": ", implying", + "type": "text" + }, + { + "bbox": [ + 349, + 657, + 443, + 670 + ], + "score": 0.9, + "content": "z ^ { k } \\in ( I + \\tau A _ { i } ) ^ { - 1 } ( t _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 443, + 657, + 506, + 671 + ], + "score": 1.0, + "content": ". But since the", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 667, + 506, + 682 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 145, + 682 + ], + "score": 1.0, + "content": "resolvent", + "type": "text" + }, + { + "bbox": [ + 145, + 669, + 230, + 681 + ], + "score": 0.85, + "content": "J _ { \\tau A _ { i } } = ( I + \\tau A _ { i } ) ^ { - 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 231, + 667, + 506, + 682 + ], + "score": 1.0, + "content": "is single-valued (Bauschke & Combettes, 2017, Prop. 23.8), we must", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 679, + 506, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 127, + 693 + ], + "score": 1.0, + "content": "have", + "type": "text" + }, + { + "bbox": [ + 128, + 680, + 219, + 692 + ], + "score": 0.9, + "content": "z ^ { k } = ( I + \\tau A _ { i } ) ^ { - 1 } ( t _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 220, + 679, + 322, + 693 + ], + "score": 1.0, + "content": ". Thus, by line 4, we have", + "type": "text" + }, + { + "bbox": [ + 323, + 680, + 357, + 692 + ], + "score": 0.93, + "content": "x _ { i } ^ { k } = z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 358, + 679, + 506, + 693 + ], + "score": 1.0, + "content": ". We may also derive from line 5 that", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 33, + "bbox_fs": [ + 105, + 631, + 507, + 693 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 201, + 698, + 409, + 713 + ], + "lines": [ + { + "bbox": [ + 201, + 698, + 409, + 713 + ], + "spans": [ + { + "bbox": [ + 201, + 698, + 409, + 713 + ], + "score": 0.89, + "content": "y _ { i } ^ { k } = \\tau ^ { - 1 } ( t _ { i } ^ { k } - x _ { i } ^ { k } ) = \\tau ^ { - 1 } ( z ^ { k } + \\tau w _ { i } ^ { k } - z ^ { k } ) = w _ { i } ^ { k } .", + "type": "interline_equation", + "image_path": "2917366b2617db8f1010409ffffdd31cc441d40e0b08c7a4f40f7f9002464c34.jpg" + } + ] + } + ], + "index": 36, + "virtual_lines": [ + { + "bbox": [ + 201, + 698, + 409, + 713 + ], + "spans": [], + "index": 36 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 719, + 482, + 734 + ], + "lines": [ + { + "bbox": [ + 104, + 717, + 480, + 736 + ], + "spans": [ + { + "bbox": [ + 104, + 717, + 154, + 736 + ], + "score": 1.0, + "content": "Thus, since", + "type": "text" + }, + { + "bbox": [ + 154, + 720, + 189, + 733 + ], + "score": 0.93, + "content": "x _ { i } ^ { k } = z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 189, + 717, + 207, + 736 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 208, + 720, + 244, + 733 + ], + "score": 0.93, + "content": "y _ { i } ^ { k } = w _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 244, + 717, + 260, + 736 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 260, + 721, + 311, + 732 + ], + "score": 0.91, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 311, + 717, + 329, + 736 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 329, + 720, + 392, + 733 + ], + "score": 0.93, + "content": "w _ { n + 1 } ^ { k } = B ( z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 392, + 717, + 448, + 736 + ], + "score": 1.0, + "content": ", we have that", + "type": "text" + }, + { + "bbox": [ + 448, + 721, + 480, + 732 + ], + "score": 0.92, + "content": "G _ { k } = 0", + "type": "inline_equation" + } + ], + "index": 37 + } + ], + "index": 37, + "bbox_fs": [ + 104, + 717, + 480, + 736 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 80, + 244, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 79, + 245, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 245, + 96 + ], + "score": 1.0, + "content": "E PROOF OF THEOREM 2", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 105, + 417, + 118 + ], + "lines": [ + { + "bbox": [ + 105, + 105, + 419, + 119 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 419, + 119 + ], + "score": 1.0, + "content": "In addition to the proof, we provide a more detailed statement of the theorem:", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 1 + }, + { + "type": "text", + "bbox": [ + 106, + 120, + 371, + 133 + ], + "lines": [ + { + "bbox": [ + 106, + 120, + 373, + 133 + ], + "spans": [ + { + "bbox": [ + 106, + 120, + 251, + 133 + ], + "score": 1.0, + "content": "Theorem 3. Fix the total iterations", + "type": "text" + }, + { + "bbox": [ + 251, + 120, + 280, + 132 + ], + "score": 0.9, + "content": "K \\geq 1", + "type": "inline_equation" + }, + { + "bbox": [ + 280, + 120, + 373, + 133 + ], + "score": 1.0, + "content": "of Algorithm 1 and set", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2 + }, + { + "type": "interline_equation", + "bbox": [ + 173, + 136, + 436, + 180 + ], + "lines": [ + { + "bbox": [ + 173, + 136, + 436, + 180 + ], + "spans": [ + { + "bbox": [ + 173, + 136, + 436, + 180 + ], + "score": 0.93, + "content": "\\begin{array} { l l } { { \\forall k = 1 , \\dots , K : } } & { { \\qquad \\rho _ { k } = \\rho \\doteq \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , \\frac { 1 } { 2 L } \\right\\} } } \\\\ { { \\forall k = 1 , \\dots , K : } } & { { \\qquad \\alpha _ { k } = \\alpha \\doteq C _ { f } \\rho ^ { 2 } } } \\end{array}", + "type": "interline_equation", + "image_path": "936cde96fcc58bc50ba91cae51c795cbfb5482cabf41e73a300f6a1d380110b9.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 173, + 136, + 436, + 150.66666666666666 + ], + "spans": [], + "index": 3 + }, + { + "bbox": [ + 173, + 150.66666666666666, + 436, + 165.33333333333331 + ], + "spans": [], + "index": 4 + }, + { + "bbox": [ + 173, + 165.33333333333331, + 436, + 179.99999999999997 + ], + "spans": [], + "index": 5 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 182, + 359, + 195 + ], + "lines": [ + { + "bbox": [ + 104, + 181, + 360, + 197 + ], + "spans": [ + { + "bbox": [ + 104, + 181, + 144, + 197 + ], + "score": 1.0, + "content": "for some", + "type": "text" + }, + { + "bbox": [ + 144, + 183, + 175, + 195 + ], + "score": 0.91, + "content": "C _ { f } > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 176, + 181, + 326, + 197 + ], + "score": 1.0, + "content": ". Suppose (9)-(11) hold. Then for any", + "type": "text" + }, + { + "bbox": [ + 326, + 183, + 356, + 195 + ], + "score": 0.91, + "content": "p ^ { * } \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 356, + 181, + 360, + 197 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6 + }, + { + "type": "interline_equation", + "bbox": [ + 114, + 200, + 482, + 272 + ], + "lines": [ + { + "bbox": [ + 114, + 200, + 482, + 272 + ], + "spans": [ + { + "bbox": [ + 114, + 200, + 482, + 272 + ], + "score": 0.92, + "content": "\\begin{array} { l } { \\displaystyle \\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] \\leq \\frac { 8 L ^ { 3 } \\exp \\left( C _ { f } ( C _ { 1 } + C _ { 3 } ) \\right) } { C _ { f } \\operatorname* { m i n } \\{ \\tau , \\tau ^ { - 1 } \\} K } \\left( \\| p ^ { 1 } - p ^ { * } \\| ^ { 2 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) \\mathrm { ~ } f o r ~ K < ( 2 L ) ^ { 4 } } \\\\ { \\displaystyle \\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] \\leq \\frac { \\exp \\left( C _ { f } ( C _ { 1 } + C _ { 3 } ) \\right) } { C _ { f } \\operatorname* { m i n } \\{ \\tau , \\tau ^ { - 1 } \\} K ^ { 1 / 4 } } \\left( \\| p ^ { 1 } - p ^ { * } \\| ^ { 2 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) \\mathrm { ~ } f o r ~ K \\geq ( 2 L ) ^ { 4 } . } \\end{array}", + "type": "interline_equation", + "image_path": "c9fa1d04d7da23ebdc1b4bef62f8dc703baf5af7613ef6df359b99fda6b816f6.jpg" + } + ] + } + ], + "index": 8, + "virtual_lines": [ + { + "bbox": [ + 114, + 200, + 482, + 224.0 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 114, + 224.0, + 482, + 248.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 114, + 248.0, + 482, + 272.0 + ], + "spans": [], + "index": 9 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 274, + 505, + 298 + ], + "lines": [ + { + "bbox": [ + 105, + 273, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 273, + 134, + 289 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 135, + 275, + 148, + 286 + ], + "score": 0.89, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 149, + 273, + 362, + 289 + ], + "score": 1.0, + "content": "is the approximation residual defined in (14), and", + "type": "text" + }, + { + "bbox": [ + 363, + 275, + 423, + 287 + ], + "score": 0.92, + "content": "C _ { 1 } , C _ { 2 } , C _ { 3 } , C _ { 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 424, + 273, + 505, + 289 + ], + "score": 1.0, + "content": "are the nonegative", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 285, + 388, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 388, + 300 + ], + "score": 1.0, + "content": "constants defined in (33), (34), (48), and (49), respectively. Therefore,", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 10.5 + }, + { + "type": "interline_equation", + "bbox": [ + 247, + 302, + 363, + 337 + ], + "lines": [ + { + "bbox": [ + 247, + 302, + 363, + 337 + ], + "spans": [ + { + "bbox": [ + 247, + 302, + 363, + 337 + ], + "score": 0.94, + "content": "\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] = \\mathcal { O } ( K ^ { - 1 / 4 } ) .", + "type": "interline_equation", + "image_path": "e986e6e1960ab0f3a833a9e77a76ef9d2f01388c506cb00bb90f663f549d2789.jpg" + } + ] + } + ], + "index": 12.5, + "virtual_lines": [ + { + "bbox": [ + 247, + 302, + 363, + 319.5 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 247, + 319.5, + 363, + 337.0 + ], + "spans": [], + "index": 13 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 348, + 506, + 371 + ], + "lines": [ + { + "bbox": [ + 106, + 347, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 106, + 347, + 153, + 360 + ], + "score": 1.0, + "content": "Proof. Fix", + "type": "text" + }, + { + "bbox": [ + 153, + 350, + 188, + 359 + ], + "score": 0.87, + "content": "\\alpha _ { k } = \\alpha", + "type": "inline_equation" + }, + { + "bbox": [ + 188, + 347, + 208, + 360 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 208, + 351, + 240, + 360 + ], + "score": 0.89, + "content": "\\rho _ { k } = \\rho", + "type": "inline_equation" + }, + { + "bbox": [ + 240, + 347, + 273, + 360 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 273, + 351, + 281, + 358 + ], + "score": 0.81, + "content": "\\alpha", + "type": "inline_equation" + }, + { + "bbox": [ + 281, + 347, + 300, + 360 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 300, + 351, + 307, + 360 + ], + "score": 0.83, + "content": "\\rho", + "type": "inline_equation" + }, + { + "bbox": [ + 308, + 347, + 506, + 360 + ], + "score": 1.0, + "content": "are the respective right-hand sides of (55)-(56).", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 357, + 486, + 372 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 377, + 372 + ], + "score": 1.0, + "content": "Lemma 3 implies that (21) so long as (9)-(11) hold and the stepsize", + "type": "text" + }, + { + "bbox": [ + 378, + 361, + 384, + 370 + ], + "score": 0.82, + "content": "\\rho", + "type": "inline_equation" + }, + { + "bbox": [ + 384, + 357, + 419, + 372 + ], + "score": 1.0, + "content": "satisfies", + "type": "text" + }, + { + "bbox": [ + 419, + 358, + 456, + 370 + ], + "score": 0.93, + "content": "\\rho < L ^ { - 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 456, + 357, + 486, + 372 + ], + "score": 1.0, + "content": ". Since", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 14.5 + }, + { + "type": "interline_equation", + "bbox": [ + 240, + 374, + 370, + 402 + ], + "lines": [ + { + "bbox": [ + 240, + 374, + 370, + 402 + ], + "spans": [ + { + "bbox": [ + 240, + 374, + 370, + 402 + ], + "score": 0.94, + "content": "\\rho = \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , \\frac { 1 } { 2 L } \\right\\} \\leq \\frac { 1 } { 2 L } ,", + "type": "interline_equation", + "image_path": "8bcf7b82a6af0b050ba47f614a56f1e1812efd3f74d3a27177cc9bd0f241b69b.jpg" + } + ] + } + ], + "index": 16, + "virtual_lines": [ + { + "bbox": [ + 240, + 374, + 370, + 402 + ], + "spans": [], + "index": 16 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 406, + 227, + 417 + ], + "lines": [ + { + "bbox": [ + 106, + 405, + 228, + 419 + ], + "spans": [ + { + "bbox": [ + 106, + 405, + 228, + 419 + ], + "score": 1.0, + "content": "we conclude that (21) applies.", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 17 + }, + { + "type": "text", + "bbox": [ + 106, + 422, + 307, + 435 + ], + "lines": [ + { + "bbox": [ + 106, + 422, + 307, + 436 + ], + "spans": [ + { + "bbox": [ + 106, + 422, + 187, + 436 + ], + "score": 1.0, + "content": "Rewriting (21) with", + "type": "text" + }, + { + "bbox": [ + 188, + 425, + 220, + 434 + ], + "score": 0.89, + "content": "\\alpha _ { k } = \\alpha", + "type": "inline_equation" + }, + { + "bbox": [ + 221, + 422, + 238, + 436 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 238, + 425, + 268, + 434 + ], + "score": 0.93, + "content": "\\rho _ { k } = \\rho", + "type": "inline_equation" + }, + { + "bbox": [ + 268, + 422, + 307, + 436 + ], + "score": 1.0, + "content": ", we have", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 18 + }, + { + "type": "interline_equation", + "bbox": [ + 131, + 438, + 479, + 453 + ], + "lines": [ + { + "bbox": [ + 131, + 438, + 479, + 453 + ], + "spans": [ + { + "bbox": [ + 131, + 438, + 479, + 453 + ], + "score": 0.89, + "content": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha \\rho T _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "f3ceb59174b248b72d8d2ac6ae444e5be98b9ef6963dd7f8cfaf30b57ea1d74c.jpg" + } + ] + } + ], + "index": 19, + "virtual_lines": [ + { + "bbox": [ + 131, + 438, + 479, + 453 + ], + "spans": [], + "index": 19 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 457, + 301, + 469 + ], + "lines": [ + { + "bbox": [ + 105, + 456, + 302, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 250, + 471 + ], + "score": 1.0, + "content": "Therefore, taking expectations over", + "type": "text" + }, + { + "bbox": [ + 250, + 458, + 263, + 469 + ], + "score": 0.89, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 263, + 456, + 302, + 471 + ], + "score": 1.0, + "content": ", we have", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20 + }, + { + "type": "interline_equation", + "bbox": [ + 128, + 473, + 467, + 488 + ], + "lines": [ + { + "bbox": [ + 128, + 473, + 467, + 488 + ], + "spans": [ + { + "bbox": [ + 128, + 473, + 467, + 488 + ], + "score": 0.87, + "content": "\\begin{array} { r } { \\mathbb { E } \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) \\mathbb { E } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha \\rho \\mathbb { E } T _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "be002811de5c0ba4afb39e57c2ef241e8e0e6a2e321c863bf375349c9c4c0b6f.jpg" + } + ] + } + ], + "index": 21, + "virtual_lines": [ + { + "bbox": [ + 128, + 473, + 467, + 488 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 492, + 152, + 504 + ], + "lines": [ + { + "bbox": [ + 106, + 492, + 153, + 504 + ], + "spans": [ + { + "bbox": [ + 106, + 492, + 153, + 504 + ], + "score": 1.0, + "content": "Recall that", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 22 + }, + { + "type": "interline_equation", + "bbox": [ + 143, + 506, + 466, + 539 + ], + "lines": [ + { + "bbox": [ + 143, + 506, + 466, + 539 + ], + "spans": [ + { + "bbox": [ + 143, + 506, + 466, + 539 + ], + "score": 0.91, + "content": "T _ { k } \\doteq \\frac { \\tau } { \\rho } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\rho \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } ,", + "type": "interline_equation", + "image_path": "c0c041abdc366d835ab20547eb97d52e9bd343c6e14371ccf0b8062a0abe9419.jpg" + } + ] + } + ], + "index": 24, + "virtual_lines": [ + { + "bbox": [ + 143, + 506, + 466, + 517.0 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 143, + 517.0, + 466, + 528.0 + ], + "spans": [], + "index": 24 + }, + { + "bbox": [ + 143, + 528.0, + 466, + 539.0 + ], + "spans": [], + "index": 25 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 542, + 504, + 577 + ], + "lines": [ + { + "bbox": [ + 105, + 541, + 506, + 556 + ], + "spans": [ + { + "bbox": [ + 105, + 541, + 304, + 556 + ], + "score": 1.0, + "content": "where for the first two terms we have simply set", + "type": "text" + }, + { + "bbox": [ + 304, + 543, + 330, + 554 + ], + "score": 0.91, + "content": "\\rho = \\overline { { \\rho } }", + "type": "inline_equation" + }, + { + "bbox": [ + 330, + 541, + 506, + 556 + ], + "score": 1.0, + "content": "because the stepsize is constant. However,", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 553, + 505, + 566 + ], + "spans": [ + { + "bbox": [ + 105, + 553, + 320, + 566 + ], + "score": 1.0, + "content": "for the final term, we will still use an upper bound,", + "type": "text" + }, + { + "bbox": [ + 320, + 555, + 326, + 565 + ], + "score": 0.76, + "content": "\\overline { { \\rho } }", + "type": "inline_equation" + }, + { + "bbox": [ + 327, + 553, + 343, + 566 + ], + "score": 1.0, + "content": ", on", + "type": "text" + }, + { + "bbox": [ + 344, + 556, + 350, + 565 + ], + "score": 0.79, + "content": "\\rho", + "type": "inline_equation" + }, + { + "bbox": [ + 351, + 553, + 505, + 566 + ], + "score": 1.0, + "content": ". In the current setting, we know that", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 107, + 564, + 462, + 578 + ], + "spans": [ + { + "bbox": [ + 107, + 564, + 166, + 577 + ], + "score": 0.93, + "content": "\\rho \\leq ( 1 / 2 ) L ^ { - 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 166, + 564, + 270, + 578 + ], + "score": 1.0, + "content": "and therefore we may set", + "type": "text" + }, + { + "bbox": [ + 271, + 564, + 330, + 577 + ], + "score": 0.95, + "content": "\\overline { { \\rho } } = ( 1 / 2 ) L ^ { - 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 330, + 564, + 357, + 578 + ], + "score": 1.0, + "content": ". Thus", + "type": "text" + }, + { + "bbox": [ + 357, + 564, + 415, + 577 + ], + "score": 0.92, + "content": "1 - \\overline { { \\rho } } L = 1 / 2", + "type": "inline_equation" + }, + { + "bbox": [ + 416, + 564, + 462, + 578 + ], + "score": 1.0, + "content": ", leading to", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 27 + }, + { + "type": "interline_equation", + "bbox": [ + 145, + 580, + 465, + 613 + ], + "lines": [ + { + "bbox": [ + 145, + 580, + 465, + 613 + ], + "spans": [ + { + "bbox": [ + 145, + 580, + 465, + 613 + ], + "score": 0.92, + "content": "\\rho \\mathbb { E } T _ { k } = \\tau \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\tau ^ { - 1 } \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\rho \\mathbb { E } \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "7d17604a57c4da09806d10a8726be4a9262bf28ad5f53bcf71543543be7d2831.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 145, + 580, + 465, + 591.0 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 145, + 591.0, + 465, + 602.0 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 145, + 602.0, + 465, + 613.0 + ], + "spans": [], + "index": 31 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 617, + 122, + 628 + ], + "lines": [ + { + "bbox": [ + 105, + 616, + 123, + 630 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 123, + 630 + ], + "score": 1.0, + "content": "Let", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 32 + }, + { + "type": "interline_equation", + "bbox": [ + 132, + 630, + 478, + 663 + ], + "lines": [ + { + "bbox": [ + 132, + 630, + 478, + 663 + ], + "spans": [ + { + "bbox": [ + 132, + 630, + 478, + 663 + ], + "score": 0.92, + "content": "U _ { k } \\doteq \\mathbb { E } \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } \\qquad W _ { k } \\doteq \\tau \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\tau ^ { - 1 } \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } ,", + "type": "interline_equation", + "image_path": "3542b23bd6ba76ee1a5024fd546bbc9ce81c8724379783e8cc3e1024c5f22f77.jpg" + } + ] + } + ], + "index": 34, + "virtual_lines": [ + { + "bbox": [ + 132, + 630, + 478, + 641.0 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 132, + 641.0, + 478, + 652.0 + ], + "spans": [], + "index": 34 + }, + { + "bbox": [ + 132, + 652.0, + 478, + 663.0 + ], + "spans": [], + "index": 35 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 667, + 135, + 677 + ], + "lines": [ + { + "bbox": [ + 105, + 665, + 136, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 136, + 678 + ], + "score": 1.0, + "content": "so that", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 36 + }, + { + "type": "interline_equation", + "bbox": [ + 264, + 685, + 346, + 695 + ], + "lines": [ + { + "bbox": [ + 264, + 685, + 346, + 695 + ], + "spans": [ + { + "bbox": [ + 264, + 685, + 346, + 695 + ], + "score": 0.88, + "content": "\\rho \\mathbb { E } T _ { k } = \\rho U _ { k } + W _ { k } ,", + "type": "interline_equation", + "image_path": "2da8171408a871e6f550dde6e27402a69dbce0d53f091c1466a274077a810e82.jpg" + } + ] + } + ], + "index": 37, + "virtual_lines": [ + { + "bbox": [ + 264, + 685, + 346, + 695 + ], + "spans": [], + "index": 37 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 700, + 154, + 712 + ], + "lines": [ + { + "bbox": [ + 105, + 699, + 156, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 156, + 712 + ], + "score": 1.0, + "content": "and also let", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 38 + }, + { + "type": "interline_equation", + "bbox": [ + 265, + 717, + 345, + 730 + ], + "lines": [ + { + "bbox": [ + 265, + 717, + 345, + 730 + ], + "spans": [ + { + "bbox": [ + 265, + 717, + 345, + 730 + ], + "score": 0.9, + "content": "V _ { k } \\doteq \\mathbb { E } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "ca3de38cff2c10b5a84076d4198e8173a2cbe40638114fda907a44f2be6dac48.jpg" + } + ] + } + ], + "index": 39, + "virtual_lines": [ + { + "bbox": [ + 265, + 717, + 345, + 730 + ], + "spans": [], + "index": 39 + } + ] + } + ], + "page_idx": 24, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 39 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 749, + 313, + 765 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 313, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 16, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 80, + 244, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 79, + 245, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 245, + 96 + ], + "score": 1.0, + "content": "E PROOF OF THEOREM 2", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 105, + 417, + 118 + ], + "lines": [ + { + "bbox": [ + 105, + 105, + 419, + 119 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 419, + 119 + ], + "score": 1.0, + "content": "In addition to the proof, we provide a more detailed statement of the theorem:", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 1, + "bbox_fs": [ + 105, + 105, + 419, + 119 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 120, + 371, + 133 + ], + "lines": [ + { + "bbox": [ + 106, + 120, + 373, + 133 + ], + "spans": [ + { + "bbox": [ + 106, + 120, + 251, + 133 + ], + "score": 1.0, + "content": "Theorem 3. Fix the total iterations", + "type": "text" + }, + { + "bbox": [ + 251, + 120, + 280, + 132 + ], + "score": 0.9, + "content": "K \\geq 1", + "type": "inline_equation" + }, + { + "bbox": [ + 280, + 120, + 373, + 133 + ], + "score": 1.0, + "content": "of Algorithm 1 and set", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2, + "bbox_fs": [ + 106, + 120, + 373, + 133 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 173, + 136, + 436, + 180 + ], + "lines": [ + { + "bbox": [ + 173, + 136, + 436, + 180 + ], + "spans": [ + { + "bbox": [ + 173, + 136, + 436, + 180 + ], + "score": 0.93, + "content": "\\begin{array} { l l } { { \\forall k = 1 , \\dots , K : } } & { { \\qquad \\rho _ { k } = \\rho \\doteq \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , \\frac { 1 } { 2 L } \\right\\} } } \\\\ { { \\forall k = 1 , \\dots , K : } } & { { \\qquad \\alpha _ { k } = \\alpha \\doteq C _ { f } \\rho ^ { 2 } } } \\end{array}", + "type": "interline_equation", + "image_path": "936cde96fcc58bc50ba91cae51c795cbfb5482cabf41e73a300f6a1d380110b9.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 173, + 136, + 436, + 150.66666666666666 + ], + "spans": [], + "index": 3 + }, + { + "bbox": [ + 173, + 150.66666666666666, + 436, + 165.33333333333331 + ], + "spans": [], + "index": 4 + }, + { + "bbox": [ + 173, + 165.33333333333331, + 436, + 179.99999999999997 + ], + "spans": [], + "index": 5 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 182, + 359, + 195 + ], + "lines": [ + { + "bbox": [ + 104, + 181, + 360, + 197 + ], + "spans": [ + { + "bbox": [ + 104, + 181, + 144, + 197 + ], + "score": 1.0, + "content": "for some", + "type": "text" + }, + { + "bbox": [ + 144, + 183, + 175, + 195 + ], + "score": 0.91, + "content": "C _ { f } > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 176, + 181, + 326, + 197 + ], + "score": 1.0, + "content": ". Suppose (9)-(11) hold. Then for any", + "type": "text" + }, + { + "bbox": [ + 326, + 183, + 356, + 195 + ], + "score": 0.91, + "content": "p ^ { * } \\in { \\mathcal { S } }", + "type": "inline_equation" + }, + { + "bbox": [ + 356, + 181, + 360, + 197 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6, + "bbox_fs": [ + 104, + 181, + 360, + 197 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 114, + 200, + 482, + 272 + ], + "lines": [ + { + "bbox": [ + 114, + 200, + 482, + 272 + ], + "spans": [ + { + "bbox": [ + 114, + 200, + 482, + 272 + ], + "score": 0.92, + "content": "\\begin{array} { l } { \\displaystyle \\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] \\leq \\frac { 8 L ^ { 3 } \\exp \\left( C _ { f } ( C _ { 1 } + C _ { 3 } ) \\right) } { C _ { f } \\operatorname* { m i n } \\{ \\tau , \\tau ^ { - 1 } \\} K } \\left( \\| p ^ { 1 } - p ^ { * } \\| ^ { 2 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) \\mathrm { ~ } f o r ~ K < ( 2 L ) ^ { 4 } } \\\\ { \\displaystyle \\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] \\leq \\frac { \\exp \\left( C _ { f } ( C _ { 1 } + C _ { 3 } ) \\right) } { C _ { f } \\operatorname* { m i n } \\{ \\tau , \\tau ^ { - 1 } \\} K ^ { 1 / 4 } } \\left( \\| p ^ { 1 } - p ^ { * } \\| ^ { 2 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) \\mathrm { ~ } f o r ~ K \\geq ( 2 L ) ^ { 4 } . } \\end{array}", + "type": "interline_equation", + "image_path": "c9fa1d04d7da23ebdc1b4bef62f8dc703baf5af7613ef6df359b99fda6b816f6.jpg" + } + ] + } + ], + "index": 8, + "virtual_lines": [ + { + "bbox": [ + 114, + 200, + 482, + 224.0 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 114, + 224.0, + 482, + 248.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 114, + 248.0, + 482, + 272.0 + ], + "spans": [], + "index": 9 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 274, + 505, + 298 + ], + "lines": [ + { + "bbox": [ + 105, + 273, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 273, + 134, + 289 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 135, + 275, + 148, + 286 + ], + "score": 0.89, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 149, + 273, + 362, + 289 + ], + "score": 1.0, + "content": "is the approximation residual defined in (14), and", + "type": "text" + }, + { + "bbox": [ + 363, + 275, + 423, + 287 + ], + "score": 0.92, + "content": "C _ { 1 } , C _ { 2 } , C _ { 3 } , C _ { 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 424, + 273, + 505, + 289 + ], + "score": 1.0, + "content": "are the nonegative", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 285, + 388, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 388, + 300 + ], + "score": 1.0, + "content": "constants defined in (33), (34), (48), and (49), respectively. Therefore,", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 10.5, + "bbox_fs": [ + 105, + 273, + 505, + 300 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 247, + 302, + 363, + 337 + ], + "lines": [ + { + "bbox": [ + 247, + 302, + 363, + 337 + ], + "spans": [ + { + "bbox": [ + 247, + 302, + 363, + 337 + ], + "score": 0.94, + "content": "\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] = \\mathcal { O } ( K ^ { - 1 / 4 } ) .", + "type": "interline_equation", + "image_path": "e986e6e1960ab0f3a833a9e77a76ef9d2f01388c506cb00bb90f663f549d2789.jpg" + } + ] + } + ], + "index": 12.5, + "virtual_lines": [ + { + "bbox": [ + 247, + 302, + 363, + 319.5 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 247, + 319.5, + 363, + 337.0 + ], + "spans": [], + "index": 13 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 348, + 506, + 371 + ], + "lines": [ + { + "bbox": [ + 106, + 347, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 106, + 347, + 153, + 360 + ], + "score": 1.0, + "content": "Proof. Fix", + "type": "text" + }, + { + "bbox": [ + 153, + 350, + 188, + 359 + ], + "score": 0.87, + "content": "\\alpha _ { k } = \\alpha", + "type": "inline_equation" + }, + { + "bbox": [ + 188, + 347, + 208, + 360 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 208, + 351, + 240, + 360 + ], + "score": 0.89, + "content": "\\rho _ { k } = \\rho", + "type": "inline_equation" + }, + { + "bbox": [ + 240, + 347, + 273, + 360 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 273, + 351, + 281, + 358 + ], + "score": 0.81, + "content": "\\alpha", + "type": "inline_equation" + }, + { + "bbox": [ + 281, + 347, + 300, + 360 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 300, + 351, + 307, + 360 + ], + "score": 0.83, + "content": "\\rho", + "type": "inline_equation" + }, + { + "bbox": [ + 308, + 347, + 506, + 360 + ], + "score": 1.0, + "content": "are the respective right-hand sides of (55)-(56).", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 357, + 486, + 372 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 377, + 372 + ], + "score": 1.0, + "content": "Lemma 3 implies that (21) so long as (9)-(11) hold and the stepsize", + "type": "text" + }, + { + "bbox": [ + 378, + 361, + 384, + 370 + ], + "score": 0.82, + "content": "\\rho", + "type": "inline_equation" + }, + { + "bbox": [ + 384, + 357, + 419, + 372 + ], + "score": 1.0, + "content": "satisfies", + "type": "text" + }, + { + "bbox": [ + 419, + 358, + 456, + 370 + ], + "score": 0.93, + "content": "\\rho < L ^ { - 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 456, + 357, + 486, + 372 + ], + "score": 1.0, + "content": ". Since", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 14.5, + "bbox_fs": [ + 105, + 347, + 506, + 372 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 240, + 374, + 370, + 402 + ], + "lines": [ + { + "bbox": [ + 240, + 374, + 370, + 402 + ], + "spans": [ + { + "bbox": [ + 240, + 374, + 370, + 402 + ], + "score": 0.94, + "content": "\\rho = \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , \\frac { 1 } { 2 L } \\right\\} \\leq \\frac { 1 } { 2 L } ,", + "type": "interline_equation", + "image_path": "8bcf7b82a6af0b050ba47f614a56f1e1812efd3f74d3a27177cc9bd0f241b69b.jpg" + } + ] + } + ], + "index": 16, + "virtual_lines": [ + { + "bbox": [ + 240, + 374, + 370, + 402 + ], + "spans": [], + "index": 16 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 406, + 227, + 417 + ], + "lines": [ + { + "bbox": [ + 106, + 405, + 228, + 419 + ], + "spans": [ + { + "bbox": [ + 106, + 405, + 228, + 419 + ], + "score": 1.0, + "content": "we conclude that (21) applies.", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 17, + "bbox_fs": [ + 106, + 405, + 228, + 419 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 422, + 307, + 435 + ], + "lines": [ + { + "bbox": [ + 106, + 422, + 307, + 436 + ], + "spans": [ + { + "bbox": [ + 106, + 422, + 187, + 436 + ], + "score": 1.0, + "content": "Rewriting (21) with", + "type": "text" + }, + { + "bbox": [ + 188, + 425, + 220, + 434 + ], + "score": 0.89, + "content": "\\alpha _ { k } = \\alpha", + "type": "inline_equation" + }, + { + "bbox": [ + 221, + 422, + 238, + 436 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 238, + 425, + 268, + 434 + ], + "score": 0.93, + "content": "\\rho _ { k } = \\rho", + "type": "inline_equation" + }, + { + "bbox": [ + 268, + 422, + 307, + 436 + ], + "score": 1.0, + "content": ", we have", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 18, + "bbox_fs": [ + 106, + 422, + 307, + 436 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 131, + 438, + 479, + 453 + ], + "lines": [ + { + "bbox": [ + 131, + 438, + 479, + 453 + ], + "spans": [ + { + "bbox": [ + 131, + 438, + 479, + 453 + ], + "score": 0.89, + "content": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha \\rho T _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "f3ceb59174b248b72d8d2ac6ae444e5be98b9ef6963dd7f8cfaf30b57ea1d74c.jpg" + } + ] + } + ], + "index": 19, + "virtual_lines": [ + { + "bbox": [ + 131, + 438, + 479, + 453 + ], + "spans": [], + "index": 19 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 457, + 301, + 469 + ], + "lines": [ + { + "bbox": [ + 105, + 456, + 302, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 250, + 471 + ], + "score": 1.0, + "content": "Therefore, taking expectations over", + "type": "text" + }, + { + "bbox": [ + 250, + 458, + 263, + 469 + ], + "score": 0.89, + "content": "\\mathcal { F } _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 263, + 456, + 302, + 471 + ], + "score": 1.0, + "content": ", we have", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20, + "bbox_fs": [ + 105, + 456, + 302, + 471 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 128, + 473, + 467, + 488 + ], + "lines": [ + { + "bbox": [ + 128, + 473, + 467, + 488 + ], + "spans": [ + { + "bbox": [ + 128, + 473, + 467, + 488 + ], + "score": 0.87, + "content": "\\begin{array} { r } { \\mathbb { E } \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) \\mathbb { E } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha \\rho \\mathbb { E } T _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "be002811de5c0ba4afb39e57c2ef241e8e0e6a2e321c863bf375349c9c4c0b6f.jpg" + } + ] + } + ], + "index": 21, + "virtual_lines": [ + { + "bbox": [ + 128, + 473, + 467, + 488 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 492, + 152, + 504 + ], + "lines": [ + { + "bbox": [ + 106, + 492, + 153, + 504 + ], + "spans": [ + { + "bbox": [ + 106, + 492, + 153, + 504 + ], + "score": 1.0, + "content": "Recall that", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 22, + "bbox_fs": [ + 106, + 492, + 153, + 504 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 143, + 506, + 466, + 539 + ], + "lines": [ + { + "bbox": [ + 143, + 506, + 466, + 539 + ], + "spans": [ + { + "bbox": [ + 143, + 506, + 466, + 539 + ], + "score": 0.91, + "content": "T _ { k } \\doteq \\frac { \\tau } { \\rho } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\rho \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } ,", + "type": "interline_equation", + "image_path": "c0c041abdc366d835ab20547eb97d52e9bd343c6e14371ccf0b8062a0abe9419.jpg" + } + ] + } + ], + "index": 24, + "virtual_lines": [ + { + "bbox": [ + 143, + 506, + 466, + 517.0 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 143, + 517.0, + 466, + 528.0 + ], + "spans": [], + "index": 24 + }, + { + "bbox": [ + 143, + 528.0, + 466, + 539.0 + ], + "spans": [], + "index": 25 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 542, + 504, + 577 + ], + "lines": [ + { + "bbox": [ + 105, + 541, + 506, + 556 + ], + "spans": [ + { + "bbox": [ + 105, + 541, + 304, + 556 + ], + "score": 1.0, + "content": "where for the first two terms we have simply set", + "type": "text" + }, + { + "bbox": [ + 304, + 543, + 330, + 554 + ], + "score": 0.91, + "content": "\\rho = \\overline { { \\rho } }", + "type": "inline_equation" + }, + { + "bbox": [ + 330, + 541, + 506, + 556 + ], + "score": 1.0, + "content": "because the stepsize is constant. However,", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 553, + 505, + 566 + ], + "spans": [ + { + "bbox": [ + 105, + 553, + 320, + 566 + ], + "score": 1.0, + "content": "for the final term, we will still use an upper bound,", + "type": "text" + }, + { + "bbox": [ + 320, + 555, + 326, + 565 + ], + "score": 0.76, + "content": "\\overline { { \\rho } }", + "type": "inline_equation" + }, + { + "bbox": [ + 327, + 553, + 343, + 566 + ], + "score": 1.0, + "content": ", on", + "type": "text" + }, + { + "bbox": [ + 344, + 556, + 350, + 565 + ], + "score": 0.79, + "content": "\\rho", + "type": "inline_equation" + }, + { + "bbox": [ + 351, + 553, + 505, + 566 + ], + "score": 1.0, + "content": ". In the current setting, we know that", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 107, + 564, + 462, + 578 + ], + "spans": [ + { + "bbox": [ + 107, + 564, + 166, + 577 + ], + "score": 0.93, + "content": "\\rho \\leq ( 1 / 2 ) L ^ { - 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 166, + 564, + 270, + 578 + ], + "score": 1.0, + "content": "and therefore we may set", + "type": "text" + }, + { + "bbox": [ + 271, + 564, + 330, + 577 + ], + "score": 0.95, + "content": "\\overline { { \\rho } } = ( 1 / 2 ) L ^ { - 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 330, + 564, + 357, + 578 + ], + "score": 1.0, + "content": ". Thus", + "type": "text" + }, + { + "bbox": [ + 357, + 564, + 415, + 577 + ], + "score": 0.92, + "content": "1 - \\overline { { \\rho } } L = 1 / 2", + "type": "inline_equation" + }, + { + "bbox": [ + 416, + 564, + 462, + 578 + ], + "score": 1.0, + "content": ", leading to", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 27, + "bbox_fs": [ + 105, + 541, + 506, + 578 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 145, + 580, + 465, + 613 + ], + "lines": [ + { + "bbox": [ + 145, + 580, + 465, + 613 + ], + "spans": [ + { + "bbox": [ + 145, + 580, + 465, + 613 + ], + "score": 0.92, + "content": "\\rho \\mathbb { E } T _ { k } = \\tau \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\tau ^ { - 1 } \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\rho \\mathbb { E } \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "7d17604a57c4da09806d10a8726be4a9262bf28ad5f53bcf71543543be7d2831.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 145, + 580, + 465, + 591.0 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 145, + 591.0, + 465, + 602.0 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 145, + 602.0, + 465, + 613.0 + ], + "spans": [], + "index": 31 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 617, + 122, + 628 + ], + "lines": [ + { + "bbox": [ + 105, + 616, + 123, + 630 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 123, + 630 + ], + "score": 1.0, + "content": "Let", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 32, + "bbox_fs": [ + 105, + 616, + 123, + 630 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 132, + 630, + 478, + 663 + ], + "lines": [ + { + "bbox": [ + 132, + 630, + 478, + 663 + ], + "spans": [ + { + "bbox": [ + 132, + 630, + 478, + 663 + ], + "score": 0.92, + "content": "U _ { k } \\doteq \\mathbb { E } \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } \\qquad W _ { k } \\doteq \\tau \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\tau ^ { - 1 } \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } ,", + "type": "interline_equation", + "image_path": "3542b23bd6ba76ee1a5024fd546bbc9ce81c8724379783e8cc3e1024c5f22f77.jpg" + } + ] + } + ], + "index": 34, + "virtual_lines": [ + { + "bbox": [ + 132, + 630, + 478, + 641.0 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 132, + 641.0, + 478, + 652.0 + ], + "spans": [], + "index": 34 + }, + { + "bbox": [ + 132, + 652.0, + 478, + 663.0 + ], + "spans": [], + "index": 35 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 667, + 135, + 677 + ], + "lines": [ + { + "bbox": [ + 105, + 665, + 136, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 136, + 678 + ], + "score": 1.0, + "content": "so that", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 36, + "bbox_fs": [ + 105, + 665, + 136, + 678 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 264, + 685, + 346, + 695 + ], + "lines": [ + { + "bbox": [ + 264, + 685, + 346, + 695 + ], + "spans": [ + { + "bbox": [ + 264, + 685, + 346, + 695 + ], + "score": 0.88, + "content": "\\rho \\mathbb { E } T _ { k } = \\rho U _ { k } + W _ { k } ,", + "type": "interline_equation", + "image_path": "2da8171408a871e6f550dde6e27402a69dbce0d53f091c1466a274077a810e82.jpg" + } + ] + } + ], + "index": 37, + "virtual_lines": [ + { + "bbox": [ + 264, + 685, + 346, + 695 + ], + "spans": [], + "index": 37 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 700, + 154, + 712 + ], + "lines": [ + { + "bbox": [ + 105, + 699, + 156, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 156, + 712 + ], + "score": 1.0, + "content": "and also let", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 38, + "bbox_fs": [ + 105, + 699, + 156, + 712 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 265, + 717, + 345, + 730 + ], + "lines": [ + { + "bbox": [ + 265, + 717, + 345, + 730 + ], + "spans": [ + { + "bbox": [ + 265, + 717, + 345, + 730 + ], + "score": 0.9, + "content": "V _ { k } \\doteq \\mathbb { E } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "ca3de38cff2c10b5a84076d4198e8173a2cbe40638114fda907a44f2be6dac48.jpg" + } + ] + } + ], + "index": 39, + "virtual_lines": [ + { + "bbox": [ + 265, + 717, + 345, + 730 + ], + "spans": [], + "index": 39 + } + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 266, + 94 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 266, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 266, + 95 + ], + "score": 1.0, + "content": "Using these definitions in (59) we write", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "interline_equation", + "bbox": [ + 165, + 97, + 444, + 112 + ], + "lines": [ + { + "bbox": [ + 165, + 97, + 444, + 112 + ], + "spans": [ + { + "bbox": [ + 165, + 97, + 444, + 112 + ], + "score": 0.89, + "content": "\\begin{array} { r } { V _ { k + 1 } \\leq \\big ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } \\big ) V _ { k } - \\alpha \\rho U _ { k } - \\alpha W _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "9afa166c3d95c3b6ee88ee851af061eaf8dd58fe08791b319530bb21e55da5f0.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 165, + 97, + 444, + 112 + ], + "spans": [], + "index": 1 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 116, + 150, + 127 + ], + "lines": [ + { + "bbox": [ + 105, + 114, + 151, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 114, + 151, + 129 + ], + "score": 1.0, + "content": "Therefore,", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2 + }, + { + "type": "interline_equation", + "bbox": [ + 118, + 130, + 496, + 254 + ], + "lines": [ + { + "bbox": [ + 118, + 130, + 496, + 254 + ], + "spans": [ + { + "bbox": [ + 118, + 130, + 496, + 254 + ], + "score": 0.94, + "content": "\\begin{array} { c } { { V _ { k + 1 } + \\alpha \\rho U _ { k } + \\alpha W _ { k } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) V _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } } \\\\ { { \\Longleftrightarrow V _ { k + 1 } + \\alpha \\rho \\displaystyle \\sum _ { j = 1 } ^ { k } U _ { j } + \\alpha \\displaystyle \\sum _ { j = 1 } ^ { k } W _ { j } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) V _ { k } + \\alpha \\rho \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } U _ { j } + \\alpha \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } W _ { j } } } \\\\ { { \\qquad + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } } \\\\ { { \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) \\left[ V _ { k } + \\alpha \\rho \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } U _ { j } + \\alpha \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } W _ { j } \\right] } } \\\\ { { \\qquad + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } , } } \\end{array}", + "type": "interline_equation", + "image_path": "3679110dfca62716db36bd7e924798be4200825212368be40ad2d6882e90ea31.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 118, + 130, + 496, + 171.33333333333334 + ], + "spans": [], + "index": 3 + }, + { + "bbox": [ + 118, + 171.33333333333334, + 496, + 212.66666666666669 + ], + "spans": [], + "index": 4 + }, + { + "bbox": [ + 118, + 212.66666666666669, + 496, + 254.00000000000003 + ], + "spans": [], + "index": 5 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 257, + 292, + 269 + ], + "lines": [ + { + "bbox": [ + 105, + 255, + 292, + 271 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 206, + 271 + ], + "score": 1.0, + "content": "where we have used that", + "type": "text" + }, + { + "bbox": [ + 207, + 258, + 257, + 269 + ], + "score": 0.91, + "content": "U _ { k } , W _ { k } \\ge 0", + "type": "inline_equation" + }, + { + "bbox": [ + 257, + 255, + 292, + 271 + ], + "score": 1.0, + "content": ". Letting", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6 + }, + { + "type": "interline_equation", + "bbox": [ + 233, + 272, + 377, + 308 + ], + "lines": [ + { + "bbox": [ + 233, + 272, + 377, + 308 + ], + "spans": [ + { + "bbox": [ + 233, + 272, + 377, + 308 + ], + "score": 0.94, + "content": "R _ { k } = V _ { k } + \\alpha \\rho \\sum _ { j = 1 } ^ { k - 1 } U _ { j } + \\alpha \\sum _ { j = 1 } ^ { k - 1 } W _ { j } ,", + "type": "interline_equation", + "image_path": "cbce57afca0ba498d20517c0a2d49c2de2dab0ea31e87287717f3c7d21c6a7d6.jpg" + } + ] + } + ], + "index": 7.5, + "virtual_lines": [ + { + "bbox": [ + 233, + 272, + 377, + 290.0 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 233, + 290.0, + 377, + 308.0 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 313, + 160, + 324 + ], + "lines": [ + { + "bbox": [ + 105, + 312, + 162, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 162, + 325 + ], + "score": 1.0, + "content": "we then have", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9 + }, + { + "type": "interline_equation", + "bbox": [ + 198, + 327, + 412, + 342 + ], + "lines": [ + { + "bbox": [ + 198, + 327, + 412, + 342 + ], + "spans": [ + { + "bbox": [ + 198, + 327, + 412, + 342 + ], + "score": 0.88, + "content": "R _ { k + 1 } \\leq { \\left( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } \\right) } R _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } ,", + "type": "interline_equation", + "image_path": "6cf8ab205320c887d3eb63c8c846fb87a8084875f11e81261b3ded0ee69822c3.jpg" + } + ] + } + ], + "index": 10, + "virtual_lines": [ + { + "bbox": [ + 198, + 327, + 412, + 342 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 346, + 164, + 357 + ], + "lines": [ + { + "bbox": [ + 106, + 345, + 165, + 359 + ], + "spans": [ + { + "bbox": [ + 106, + 345, + 165, + 359 + ], + "score": 1.0, + "content": "which implies", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11 + }, + { + "type": "interline_equation", + "bbox": [ + 133, + 362, + 478, + 397 + ], + "lines": [ + { + "bbox": [ + 133, + 362, + 478, + 397 + ], + "spans": [ + { + "bbox": [ + 133, + 362, + 478, + 397 + ], + "score": 0.92, + "content": "R _ { k + 1 } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } R _ { 1 } + ( C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } ) \\sum _ { j = 1 } ^ { k } ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k - j } .", + "type": "interline_equation", + "image_path": "54c85a90e8c290da50d2cd556d249e83a42680e60b3f29761524aa14ba3d1fd3.jpg" + } + ] + } + ], + "index": 13, + "virtual_lines": [ + { + "bbox": [ + 133, + 362, + 478, + 373.6666666666667 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 133, + 373.6666666666667, + 478, + 385.33333333333337 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 133, + 385.33333333333337, + 478, + 397.00000000000006 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 400, + 129, + 411 + ], + "lines": [ + { + "bbox": [ + 105, + 399, + 131, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 131, + 414 + ], + "score": 1.0, + "content": "Now,", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + }, + { + "type": "interline_equation", + "bbox": [ + 183, + 416, + 426, + 538 + ], + "lines": [ + { + "bbox": [ + 183, + 416, + 426, + 538 + ], + "spans": [ + { + "bbox": [ + 183, + 416, + 426, + 538 + ], + "score": 0.94, + "content": "\\begin{array} { r l r } { { \\sum _ { j = 1 } ^ { k } ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k - j } = \\sum _ { j = 0 } ^ { k - 1 } ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { j } } } \\\\ & { } & { = \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } - 1 } { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) - 1 } } \\\\ & { } & { = \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } - 1 } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } } \\\\ & { } & { \\leq \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } . } \\end{array}", + "type": "interline_equation", + "image_path": "dd6f672dcc12cab7f51f57b8003c9e9869f8dde6472f70b41820ac2b9f412335.jpg" + } + ] + } + ], + "index": 19.5, + "virtual_lines": [ + { + "bbox": [ + 183, + 416, + 426, + 431.25 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 183, + 431.25, + 426, + 446.5 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 183, + 446.5, + 426, + 461.75 + ], + "spans": [], + "index": 18 + }, + { + "bbox": [ + 183, + 461.75, + 426, + 477.0 + ], + "spans": [], + "index": 19 + }, + { + "bbox": [ + 183, + 477.0, + 426, + 492.25 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 183, + 492.25, + 426, + 507.5 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 183, + 507.5, + 426, + 522.75 + ], + "spans": [], + "index": 22 + }, + { + "bbox": [ + 183, + 522.75, + 426, + 538.0 + ], + "spans": [], + "index": 23 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 538, + 150, + 550 + ], + "lines": [ + { + "bbox": [ + 105, + 537, + 151, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 537, + 151, + 552 + ], + "score": 1.0, + "content": "Therefore,", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24 + }, + { + "type": "interline_equation", + "bbox": [ + 185, + 555, + 425, + 581 + ], + "lines": [ + { + "bbox": [ + 185, + 555, + 425, + 581 + ], + "spans": [ + { + "bbox": [ + 185, + 555, + 425, + 581 + ], + "score": 0.93, + "content": "R _ { k + 1 } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } \\left( R _ { 1 } + { \\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } } \\right) .", + "type": "interline_equation", + "image_path": "dde5937c8b7f4dbfdccf262dd488ec99feb4ba7b68b4c155e3e24b70e83c2458.jpg" + } + ] + } + ], + "index": 25, + "virtual_lines": [ + { + "bbox": [ + 185, + 555, + 425, + 581 + ], + "spans": [], + "index": 25 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 591, + 275, + 603 + ], + "lines": [ + { + "bbox": [ + 106, + 591, + 275, + 603 + ], + "spans": [ + { + "bbox": [ + 106, + 591, + 220, + 603 + ], + "score": 1.0, + "content": "Fix the number of iterations", + "type": "text" + }, + { + "bbox": [ + 221, + 591, + 249, + 602 + ], + "score": 0.93, + "content": "K \\geq 1", + "type": "inline_equation" + }, + { + "bbox": [ + 249, + 591, + 275, + 603 + ], + "score": 1.0, + "content": ". Now", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 26 + }, + { + "type": "interline_equation", + "bbox": [ + 226, + 606, + 384, + 633 + ], + "lines": [ + { + "bbox": [ + 226, + 606, + 384, + 633 + ], + "spans": [ + { + "bbox": [ + 226, + 606, + 384, + 633 + ], + "score": 0.94, + "content": "\\rho = \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , \\frac { 1 } { 2 L } \\right\\} \\leq \\frac { 1 } { K ^ { 1 / 4 } } \\leq 1 .", + "type": "interline_equation", + "image_path": "f8ecdc1cdf63419fb980b006038e2f23f4047ebd1c48a84fbf075fac7cafd0db.jpg" + } + ] + } + ], + "index": 27, + "virtual_lines": [ + { + "bbox": [ + 226, + 606, + 384, + 633 + ], + "spans": [], + "index": 27 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 637, + 150, + 648 + ], + "lines": [ + { + "bbox": [ + 105, + 635, + 151, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 151, + 650 + ], + "score": 1.0, + "content": "Therefore,", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28 + }, + { + "type": "interline_equation", + "bbox": [ + 159, + 651, + 452, + 732 + ], + "lines": [ + { + "bbox": [ + 159, + 651, + 452, + 732 + ], + "spans": [ + { + "bbox": [ + 159, + 651, + 452, + 732 + ], + "score": 0.94, + "content": "\\begin{array} { l } { \\displaystyle \\alpha \\rho \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\alpha \\rho \\sum _ { j = 1 } ^ { K } U _ { j } + \\alpha \\sum _ { j = 1 } ^ { K } W _ { j } } \\\\ { \\leq R _ { K + 1 } } \\\\ { \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { K } \\left( R _ { 1 } + \\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } \\right) . } \\end{array}", + "type": "interline_equation", + "image_path": "b9411a474832c7af5eee5bbf4f39ba58ca4a29c2f493647cf7e9b8e0f82cd857.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 159, + 651, + 452, + 678.0 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 159, + 678.0, + 452, + 705.0 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 159, + 705.0, + 452, + 732.0 + ], + "spans": [], + "index": 31 + } + ] + } + ], + "page_idx": 25, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 761 + ], + "lines": [ + { + "bbox": [ + 298, + 749, + 313, + 764 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 313, + 764 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 15, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 266, + 94 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 266, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 266, + 95 + ], + "score": 1.0, + "content": "Using these definitions in (59) we write", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0, + "bbox_fs": [ + 106, + 82, + 266, + 95 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 165, + 97, + 444, + 112 + ], + "lines": [ + { + "bbox": [ + 165, + 97, + 444, + 112 + ], + "spans": [ + { + "bbox": [ + 165, + 97, + 444, + 112 + ], + "score": 0.89, + "content": "\\begin{array} { r } { V _ { k + 1 } \\leq \\big ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } \\big ) V _ { k } - \\alpha \\rho U _ { k } - \\alpha W _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } . } \\end{array}", + "type": "interline_equation", + "image_path": "9afa166c3d95c3b6ee88ee851af061eaf8dd58fe08791b319530bb21e55da5f0.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 165, + 97, + 444, + 112 + ], + "spans": [], + "index": 1 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 116, + 150, + 127 + ], + "lines": [ + { + "bbox": [ + 105, + 114, + 151, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 114, + 151, + 129 + ], + "score": 1.0, + "content": "Therefore,", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2, + "bbox_fs": [ + 105, + 114, + 151, + 129 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 118, + 130, + 496, + 254 + ], + "lines": [ + { + "bbox": [ + 118, + 130, + 496, + 254 + ], + "spans": [ + { + "bbox": [ + 118, + 130, + 496, + 254 + ], + "score": 0.94, + "content": "\\begin{array} { c } { { V _ { k + 1 } + \\alpha \\rho U _ { k } + \\alpha W _ { k } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) V _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } } \\\\ { { \\Longleftrightarrow V _ { k + 1 } + \\alpha \\rho \\displaystyle \\sum _ { j = 1 } ^ { k } U _ { j } + \\alpha \\displaystyle \\sum _ { j = 1 } ^ { k } W _ { j } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) V _ { k } + \\alpha \\rho \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } U _ { j } + \\alpha \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } W _ { j } } } \\\\ { { \\qquad + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } } \\\\ { { \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) \\left[ V _ { k } + \\alpha \\rho \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } U _ { j } + \\alpha \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } W _ { j } \\right] } } \\\\ { { \\qquad + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } , } } \\end{array}", + "type": "interline_equation", + "image_path": "3679110dfca62716db36bd7e924798be4200825212368be40ad2d6882e90ea31.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 118, + 130, + 496, + 171.33333333333334 + ], + "spans": [], + "index": 3 + }, + { + "bbox": [ + 118, + 171.33333333333334, + 496, + 212.66666666666669 + ], + "spans": [], + "index": 4 + }, + { + "bbox": [ + 118, + 212.66666666666669, + 496, + 254.00000000000003 + ], + "spans": [], + "index": 5 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 257, + 292, + 269 + ], + "lines": [ + { + "bbox": [ + 105, + 255, + 292, + 271 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 206, + 271 + ], + "score": 1.0, + "content": "where we have used that", + "type": "text" + }, + { + "bbox": [ + 207, + 258, + 257, + 269 + ], + "score": 0.91, + "content": "U _ { k } , W _ { k } \\ge 0", + "type": "inline_equation" + }, + { + "bbox": [ + 257, + 255, + 292, + 271 + ], + "score": 1.0, + "content": ". Letting", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6, + "bbox_fs": [ + 105, + 255, + 292, + 271 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 233, + 272, + 377, + 308 + ], + "lines": [ + { + "bbox": [ + 233, + 272, + 377, + 308 + ], + "spans": [ + { + "bbox": [ + 233, + 272, + 377, + 308 + ], + "score": 0.94, + "content": "R _ { k } = V _ { k } + \\alpha \\rho \\sum _ { j = 1 } ^ { k - 1 } U _ { j } + \\alpha \\sum _ { j = 1 } ^ { k - 1 } W _ { j } ,", + "type": "interline_equation", + "image_path": "cbce57afca0ba498d20517c0a2d49c2de2dab0ea31e87287717f3c7d21c6a7d6.jpg" + } + ] + } + ], + "index": 7.5, + "virtual_lines": [ + { + "bbox": [ + 233, + 272, + 377, + 290.0 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 233, + 290.0, + 377, + 308.0 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 313, + 160, + 324 + ], + "lines": [ + { + "bbox": [ + 105, + 312, + 162, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 162, + 325 + ], + "score": 1.0, + "content": "we then have", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9, + "bbox_fs": [ + 105, + 312, + 162, + 325 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 198, + 327, + 412, + 342 + ], + "lines": [ + { + "bbox": [ + 198, + 327, + 412, + 342 + ], + "spans": [ + { + "bbox": [ + 198, + 327, + 412, + 342 + ], + "score": 0.88, + "content": "R _ { k + 1 } \\leq { \\left( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } \\right) } R _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } ,", + "type": "interline_equation", + "image_path": "6cf8ab205320c887d3eb63c8c846fb87a8084875f11e81261b3ded0ee69822c3.jpg" + } + ] + } + ], + "index": 10, + "virtual_lines": [ + { + "bbox": [ + 198, + 327, + 412, + 342 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 346, + 164, + 357 + ], + "lines": [ + { + "bbox": [ + 106, + 345, + 165, + 359 + ], + "spans": [ + { + "bbox": [ + 106, + 345, + 165, + 359 + ], + "score": 1.0, + "content": "which implies", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11, + "bbox_fs": [ + 106, + 345, + 165, + 359 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 133, + 362, + 478, + 397 + ], + "lines": [ + { + "bbox": [ + 133, + 362, + 478, + 397 + ], + "spans": [ + { + "bbox": [ + 133, + 362, + 478, + 397 + ], + "score": 0.92, + "content": "R _ { k + 1 } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } R _ { 1 } + ( C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } ) \\sum _ { j = 1 } ^ { k } ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k - j } .", + "type": "interline_equation", + "image_path": "54c85a90e8c290da50d2cd556d249e83a42680e60b3f29761524aa14ba3d1fd3.jpg" + } + ] + } + ], + "index": 13, + "virtual_lines": [ + { + "bbox": [ + 133, + 362, + 478, + 373.6666666666667 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 133, + 373.6666666666667, + 478, + 385.33333333333337 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 133, + 385.33333333333337, + 478, + 397.00000000000006 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 400, + 129, + 411 + ], + "lines": [ + { + "bbox": [ + 105, + 399, + 131, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 131, + 414 + ], + "score": 1.0, + "content": "Now,", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15, + "bbox_fs": [ + 105, + 399, + 131, + 414 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 183, + 416, + 426, + 538 + ], + "lines": [ + { + "bbox": [ + 183, + 416, + 426, + 538 + ], + "spans": [ + { + "bbox": [ + 183, + 416, + 426, + 538 + ], + "score": 0.94, + "content": "\\begin{array} { r l r } { { \\sum _ { j = 1 } ^ { k } ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k - j } = \\sum _ { j = 0 } ^ { k - 1 } ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { j } } } \\\\ & { } & { = \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } - 1 } { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) - 1 } } \\\\ & { } & { = \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } - 1 } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } } \\\\ & { } & { \\leq \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } . } \\end{array}", + "type": "interline_equation", + "image_path": "dd6f672dcc12cab7f51f57b8003c9e9869f8dde6472f70b41820ac2b9f412335.jpg" + } + ] + } + ], + "index": 19.5, + "virtual_lines": [ + { + "bbox": [ + 183, + 416, + 426, + 431.25 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 183, + 431.25, + 426, + 446.5 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 183, + 446.5, + 426, + 461.75 + ], + "spans": [], + "index": 18 + }, + { + "bbox": [ + 183, + 461.75, + 426, + 477.0 + ], + "spans": [], + "index": 19 + }, + { + "bbox": [ + 183, + 477.0, + 426, + 492.25 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 183, + 492.25, + 426, + 507.5 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 183, + 507.5, + 426, + 522.75 + ], + "spans": [], + "index": 22 + }, + { + "bbox": [ + 183, + 522.75, + 426, + 538.0 + ], + "spans": [], + "index": 23 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 538, + 150, + 550 + ], + "lines": [ + { + "bbox": [ + 105, + 537, + 151, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 537, + 151, + 552 + ], + "score": 1.0, + "content": "Therefore,", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24, + "bbox_fs": [ + 105, + 537, + 151, + 552 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 185, + 555, + 425, + 581 + ], + "lines": [ + { + "bbox": [ + 185, + 555, + 425, + 581 + ], + "spans": [ + { + "bbox": [ + 185, + 555, + 425, + 581 + ], + "score": 0.93, + "content": "R _ { k + 1 } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } \\left( R _ { 1 } + { \\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } } \\right) .", + "type": "interline_equation", + "image_path": "dde5937c8b7f4dbfdccf262dd488ec99feb4ba7b68b4c155e3e24b70e83c2458.jpg" + } + ] + } + ], + "index": 25, + "virtual_lines": [ + { + "bbox": [ + 185, + 555, + 425, + 581 + ], + "spans": [], + "index": 25 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 591, + 275, + 603 + ], + "lines": [ + { + "bbox": [ + 106, + 591, + 275, + 603 + ], + "spans": [ + { + "bbox": [ + 106, + 591, + 220, + 603 + ], + "score": 1.0, + "content": "Fix the number of iterations", + "type": "text" + }, + { + "bbox": [ + 221, + 591, + 249, + 602 + ], + "score": 0.93, + "content": "K \\geq 1", + "type": "inline_equation" + }, + { + "bbox": [ + 249, + 591, + 275, + 603 + ], + "score": 1.0, + "content": ". Now", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 26, + "bbox_fs": [ + 106, + 591, + 275, + 603 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 226, + 606, + 384, + 633 + ], + "lines": [ + { + "bbox": [ + 226, + 606, + 384, + 633 + ], + "spans": [ + { + "bbox": [ + 226, + 606, + 384, + 633 + ], + "score": 0.94, + "content": "\\rho = \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , \\frac { 1 } { 2 L } \\right\\} \\leq \\frac { 1 } { K ^ { 1 / 4 } } \\leq 1 .", + "type": "interline_equation", + "image_path": "f8ecdc1cdf63419fb980b006038e2f23f4047ebd1c48a84fbf075fac7cafd0db.jpg" + } + ] + } + ], + "index": 27, + "virtual_lines": [ + { + "bbox": [ + 226, + 606, + 384, + 633 + ], + "spans": [], + "index": 27 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 637, + 150, + 648 + ], + "lines": [ + { + "bbox": [ + 105, + 635, + 151, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 151, + 650 + ], + "score": 1.0, + "content": "Therefore,", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28, + "bbox_fs": [ + 105, + 635, + 151, + 650 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 159, + 651, + 452, + 732 + ], + "lines": [ + { + "bbox": [ + 159, + 651, + 452, + 732 + ], + "spans": [ + { + "bbox": [ + 159, + 651, + 452, + 732 + ], + "score": 0.94, + "content": "\\begin{array} { l } { \\displaystyle \\alpha \\rho \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\alpha \\rho \\sum _ { j = 1 } ^ { K } U _ { j } + \\alpha \\sum _ { j = 1 } ^ { K } W _ { j } } \\\\ { \\leq R _ { K + 1 } } \\\\ { \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { K } \\left( R _ { 1 } + \\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } \\right) . } \\end{array}", + "type": "interline_equation", + "image_path": "b9411a474832c7af5eee5bbf4f39ba58ca4a29c2f493647cf7e9b8e0f82cd857.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 159, + 651, + 452, + 678.0 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 159, + 678.0, + 452, + 705.0 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 159, + 705.0, + 452, + 732.0 + ], + "spans": [], + "index": 31 + } + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 257, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 257, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 190, + 95 + ], + "score": 1.0, + "content": "Dividing through by", + "type": "text" + }, + { + "bbox": [ + 190, + 83, + 212, + 94 + ], + "score": 0.8, + "content": "\\alpha \\rho K", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 82, + 257, + 95 + ], + "score": 1.0, + "content": ", we obtain", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "interline_equation", + "bbox": [ + 158, + 98, + 452, + 133 + ], + "lines": [ + { + "bbox": [ + 158, + 98, + 452, + 133 + ], + "spans": [ + { + "bbox": [ + 158, + 98, + 452, + 133 + ], + "score": 0.94, + "content": "\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { K } } { \\alpha \\rho K } \\left( R _ { 1 } + \\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } \\right) ,", + "type": "interline_equation", + "image_path": "709fbff6f7f9fc78233cab42c32bb1b3525562556338f382da851e69e5f67ba0.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 158, + 98, + 452, + 109.66666666666667 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 158, + 109.66666666666667, + 452, + 121.33333333333334 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 158, + 121.33333333333334, + 452, + 133.0 + ], + "spans": [], + "index": 3 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 145, + 246, + 157 + ], + "lines": [ + { + "bbox": [ + 105, + 144, + 248, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 146, + 159 + ], + "score": 1.0, + "content": "and since", + "type": "text" + }, + { + "bbox": [ + 146, + 145, + 189, + 158 + ], + "score": 0.93, + "content": "\\alpha = C _ { f } \\rho ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 189, + 144, + 248, + 159 + ], + "score": 1.0, + "content": ", we also have", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4 + }, + { + "type": "interline_equation", + "bbox": [ + 239, + 162, + 372, + 190 + ], + "lines": [ + { + "bbox": [ + 239, + 162, + 372, + 190 + ], + "spans": [ + { + "bbox": [ + 239, + 162, + 372, + 190 + ], + "score": 0.94, + "content": "\\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } = \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } .", + "type": "interline_equation", + "image_path": "2a924310d03b5ee557f28fb34d78a936893e8f7cf3ff9134c8d76d1600906ee3.jpg" + } + ] + } + ], + "index": 5, + "virtual_lines": [ + { + "bbox": [ + 239, + 162, + 372, + 190 + ], + "spans": [], + "index": 5 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 194, + 160, + 205 + ], + "lines": [ + { + "bbox": [ + 106, + 193, + 162, + 207 + ], + "spans": [ + { + "bbox": [ + 106, + 193, + 162, + 207 + ], + "score": 1.0, + "content": "Furthermore,", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6 + }, + { + "type": "interline_equation", + "bbox": [ + 242, + 209, + 368, + 226 + ], + "lines": [ + { + "bbox": [ + 242, + 209, + 368, + 226 + ], + "spans": [ + { + "bbox": [ + 242, + 209, + 368, + 226 + ], + "score": 0.91, + "content": "\\rho \\leq K ^ { - \\frac { 1 } { 4 } } \\implies \\alpha \\leq C _ { f } K ^ { - \\frac { 1 } { 2 } } .", + "type": "interline_equation", + "image_path": "b69e4375e97113c3ae9c1241a18fc51e9b50dc039c28a0839addd7cb241b9721.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 242, + 209, + 368, + 226 + ], + "spans": [], + "index": 7 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 230, + 243, + 242 + ], + "lines": [ + { + "bbox": [ + 106, + 230, + 243, + 243 + ], + "spans": [ + { + "bbox": [ + 106, + 230, + 243, + 243 + ], + "score": 1.0, + "content": "Substituting these into (60) yields", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 8 + }, + { + "type": "interline_equation", + "bbox": [ + 168, + 245, + 443, + 317 + ], + "lines": [ + { + "bbox": [ + 168, + 245, + 443, + 317 + ], + "spans": [ + { + "bbox": [ + 168, + 245, + 443, + 317 + ], + "score": 0.94, + "content": "\\begin{array} { r } { \\displaystyle \\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\frac { \\left( 1 + \\frac { C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) } { K } \\right) ^ { K } } { \\alpha \\rho K } \\left( R _ { 1 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) } \\\\ { \\leq \\frac { \\exp ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } { \\alpha \\rho K } \\left( R _ { 1 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) , } \\end{array}", + "type": "interline_equation", + "image_path": "0e2124502d00a7615dcadf05fc1ed22e68a79cacf3e5071ac1736f2399dff60d.jpg" + } + ] + } + ], + "index": 10, + "virtual_lines": [ + { + "bbox": [ + 168, + 245, + 443, + 269.0 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 168, + 269.0, + 443, + 293.0 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 168, + 293.0, + 443, + 317.0 + ], + "spans": [], + "index": 11 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 321, + 463, + 334 + ], + "lines": [ + { + "bbox": [ + 105, + 319, + 464, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 237, + 336 + ], + "score": 1.0, + "content": "where we have used that for any", + "type": "text" + }, + { + "bbox": [ + 237, + 321, + 333, + 334 + ], + "score": 0.93, + "content": "t \\ge 0 , 1 + t / K \\le e ^ { t / K }", + "type": "inline_equation" + }, + { + "bbox": [ + 333, + 319, + 387, + 336 + ], + "score": 1.0, + "content": ", so therefore", + "type": "text" + }, + { + "bbox": [ + 387, + 321, + 459, + 334 + ], + "score": 0.94, + "content": "( 1 + t / K ) ^ { K } \\leq e ^ { t }", + "type": "inline_equation" + }, + { + "bbox": [ + 460, + 319, + 464, + 336 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12 + }, + { + "type": "text", + "bbox": [ + 106, + 339, + 505, + 364 + ], + "lines": [ + { + "bbox": [ + 104, + 337, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 104, + 337, + 240, + 354 + ], + "score": 1.0, + "content": "The worst-case rates in terms of", + "type": "text" + }, + { + "bbox": [ + 241, + 341, + 251, + 350 + ], + "score": 0.84, + "content": "K", + "type": "inline_equation" + }, + { + "bbox": [ + 252, + 337, + 303, + 354 + ], + "score": 1.0, + "content": "occur when", + "type": "text" + }, + { + "bbox": [ + 303, + 339, + 352, + 352 + ], + "score": 0.93, + "content": "\\rho = K ^ { - 1 / 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 352, + 337, + 371, + 354 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 371, + 339, + 433, + 352 + ], + "score": 0.91, + "content": "\\alpha = C _ { f } K ^ { - 1 / 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 434, + 337, + 506, + 354 + ], + "score": 1.0, + "content": ". This is the case", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 351, + 454, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 130, + 365 + ], + "score": 1.0, + "content": "when", + "type": "text" + }, + { + "bbox": [ + 131, + 352, + 177, + 364 + ], + "score": 0.93, + "content": "K \\geq ( 2 L ) ^ { 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 178, + 351, + 384, + 365 + ], + "score": 1.0, + "content": ". Substituting these into the denominator yields, for", + "type": "text" + }, + { + "bbox": [ + 385, + 352, + 432, + 364 + ], + "score": 0.92, + "content": "K \\geq ( 2 L ) ^ { 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 432, + 351, + 454, + 365 + ], + "score": 1.0, + "content": ", that", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13.5 + }, + { + "type": "interline_equation", + "bbox": [ + 167, + 368, + 443, + 403 + ], + "lines": [ + { + "bbox": [ + 167, + 368, + 443, + 403 + ], + "spans": [ + { + "bbox": [ + 167, + 368, + 443, + 403 + ], + "score": 0.93, + "content": "\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\frac { \\exp ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } { C _ { f } K ^ { 1 / 4 } } \\left( R _ { 1 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) .", + "type": "interline_equation", + "image_path": "a8c50d5cddeb344ec50671b2a66248b1a9062849bd17f1164cc995fe7923f6f9.jpg" + } + ] + } + ], + "index": 16, + "virtual_lines": [ + { + "bbox": [ + 167, + 368, + 443, + 379.6666666666667 + ], + "spans": [], + "index": 15 + }, + { + "bbox": [ + 167, + 379.6666666666667, + 443, + 391.33333333333337 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 167, + 391.33333333333337, + 443, + 403.00000000000006 + ], + "spans": [], + "index": 17 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 408, + 330, + 422 + ], + "lines": [ + { + "bbox": [ + 106, + 408, + 330, + 423 + ], + "spans": [ + { + "bbox": [ + 106, + 408, + 154, + 423 + ], + "score": 1.0, + "content": "Thus, since", + "type": "text" + }, + { + "bbox": [ + 154, + 408, + 285, + 422 + ], + "score": 0.92, + "content": "G _ { k } \\leq \\operatorname* { m a x } \\{ \\tau , \\tau ^ { - 1 } \\} \\left( U _ { k } + W _ { k } \\right)", + "type": "inline_equation" + }, + { + "bbox": [ + 285, + 408, + 330, + 423 + ], + "score": 1.0, + "content": ", we obtain", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 18 + }, + { + "type": "interline_equation", + "bbox": [ + 131, + 426, + 479, + 461 + ], + "lines": [ + { + "bbox": [ + 131, + 426, + 479, + 461 + ], + "spans": [ + { + "bbox": [ + 131, + 426, + 479, + 461 + ], + "score": 0.9, + "content": "\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] \\leq \\frac { \\operatorname* { m a x } \\{ \\tau , \\tau ^ { - 1 } \\} \\exp { ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } } { C _ { f } K ^ { 1 / 4 } } \\left( \\| p ^ { 1 } - p ^ { * } \\| ^ { 2 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) ,", + "type": "interline_equation", + "image_path": "97ff30d691a0554097db475829ee6914e88295e18e02f95e3f6207493be4aa19.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 131, + 426, + 479, + 437.6666666666667 + ], + "spans": [], + "index": 19 + }, + { + "bbox": [ + 131, + 437.6666666666667, + 479, + 449.33333333333337 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 131, + 449.33333333333337, + 479, + 461.00000000000006 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 465, + 163, + 477 + ], + "lines": [ + { + "bbox": [ + 105, + 464, + 164, + 478 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 164, + 478 + ], + "score": 1.0, + "content": "which is (58).", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 22 + }, + { + "type": "text", + "bbox": [ + 107, + 482, + 504, + 505 + ], + "lines": [ + { + "bbox": [ + 104, + 479, + 504, + 496 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 133, + 496 + ], + "score": 1.0, + "content": "When", + "type": "text" + }, + { + "bbox": [ + 133, + 482, + 180, + 495 + ], + "score": 0.92, + "content": "K < ( 2 L ) ^ { 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 181, + 479, + 371, + 496 + ], + "score": 1.0, + "content": ", (57) can similarly be obtained by substituting", + "type": "text" + }, + { + "bbox": [ + 372, + 482, + 421, + 495 + ], + "score": 0.94, + "content": "\\rho = ( 2 L ) ^ { - 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 422, + 479, + 440, + 496 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 441, + 482, + 504, + 495 + ], + "score": 0.87, + "content": "\\alpha = C _ { f } ( 2 L ) ^ { - 2 }", + "type": "inline_equation" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 492, + 506, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 146, + 506 + ], + "score": 1.0, + "content": "into (61).", + "type": "text" + }, + { + "bbox": [ + 494, + 495, + 506, + 505 + ], + "score": 0.997, + "content": "β–‘", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 23.5 + }, + { + "type": "title", + "bbox": [ + 108, + 521, + 275, + 534 + ], + "lines": [ + { + "bbox": [ + 104, + 519, + 277, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 277, + 536 + ], + "score": 1.0, + "content": "F APPROXIMATION RESIDUALS", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25 + }, + { + "type": "text", + "bbox": [ + 106, + 545, + 505, + 580 + ], + "lines": [ + { + "bbox": [ + 105, + 545, + 505, + 559 + ], + "spans": [ + { + "bbox": [ + 105, + 545, + 505, + 559 + ], + "score": 1.0, + "content": "In this section we derive the approximation residual used to assess the performance of the algorithms", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 556, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 505, + 570 + ], + "score": 1.0, + "content": "in the numerical experiments. This residual relies on the following product-space reformulation", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 566, + 135, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 566, + 135, + 581 + ], + "score": 1.0, + "content": "of (1).", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 27 + }, + { + "type": "text", + "bbox": [ + 107, + 592, + 398, + 604 + ], + "lines": [ + { + "bbox": [ + 105, + 592, + 398, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 398, + 605 + ], + "score": 1.0, + "content": "F.1 PRODUCT-SPACE REFORMULATION AND RESIDUAL PRINCIPLE", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 29 + }, + { + "type": "text", + "bbox": [ + 106, + 613, + 310, + 624 + ], + "lines": [ + { + "bbox": [ + 105, + 610, + 311, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 610, + 311, + 628 + ], + "score": 1.0, + "content": "Recall (1), the monotone inclusion we are solving:", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 30 + }, + { + "type": "interline_equation", + "bbox": [ + 228, + 629, + 382, + 662 + ], + "lines": [ + { + "bbox": [ + 228, + 629, + 382, + 662 + ], + "spans": [ + { + "bbox": [ + 228, + 629, + 382, + 662 + ], + "score": 0.94, + "content": "{ \\mathrm { F i n d ~ } } z \\in \\mathbb { R } ^ { d } : 0 \\in \\sum _ { i = 1 } ^ { n } A _ { i } ( z ) + B ( z ) .", + "type": "interline_equation", + "image_path": "3cda2b34e62a90a27633b8f271f0dd9396d59e4d546b2003413a53f23c6f3e9b.jpg" + } + ] + } + ], + "index": 31.5, + "virtual_lines": [ + { + "bbox": [ + 228, + 629, + 382, + 645.5 + ], + "spans": [], + "index": 31 + }, + { + "bbox": [ + 228, + 645.5, + 382, + 662.0 + ], + "spans": [], + "index": 32 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 665, + 506, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 665, + 506, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 506, + 678 + ], + "score": 1.0, + "content": "In this section we demonstrate a β€œproduct-space\" reformulation of (1) which allows us to rewrite", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 677, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 106, + 677, + 505, + 689 + ], + "score": 1.0, + "content": "it in a standard form involving just two operators, one maximal monotone and the other monotone", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 364, + 700 + ], + "score": 1.0, + "content": "and Lipschitz. This approach was pioneered in (BriceΓ±o-Arias", + "type": "text" + }, + { + "bbox": [ + 365, + 689, + 373, + 698 + ], + "score": 0.27, + "content": "\\&", + "type": "inline_equation" + }, + { + "bbox": [ + 374, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "Combettes, 2011; Combettes &", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 699, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 505, + 711 + ], + "score": 1.0, + "content": "Pesquet, 2012). Along with allowing for a simple definition of an approximation residual as a measure", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "of approximation error in solving (1), it allows one to apply operator splitting methods originally", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 721, + 384, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 721, + 373, + 732 + ], + "score": 1.0, + "content": "formulated for two operators to problems such as (1) for any finite", + "type": "text" + }, + { + "bbox": [ + 373, + 724, + 380, + 730 + ], + "score": 0.75, + "content": "n", + "type": "inline_equation" + }, + { + "bbox": [ + 380, + 721, + 384, + 732 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 35.5 + } + ], + "page_idx": 26, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 312, + 764 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 312, + 764 + ], + "score": 1.0, + "content": "27", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 257, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 257, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 190, + 95 + ], + "score": 1.0, + "content": "Dividing through by", + "type": "text" + }, + { + "bbox": [ + 190, + 83, + 212, + 94 + ], + "score": 0.8, + "content": "\\alpha \\rho K", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 82, + 257, + 95 + ], + "score": 1.0, + "content": ", we obtain", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0, + "bbox_fs": [ + 105, + 82, + 257, + 95 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 158, + 98, + 452, + 133 + ], + "lines": [ + { + "bbox": [ + 158, + 98, + 452, + 133 + ], + "spans": [ + { + "bbox": [ + 158, + 98, + 452, + 133 + ], + "score": 0.94, + "content": "\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { K } } { \\alpha \\rho K } \\left( R _ { 1 } + \\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } \\right) ,", + "type": "interline_equation", + "image_path": "709fbff6f7f9fc78233cab42c32bb1b3525562556338f382da851e69e5f67ba0.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 158, + 98, + 452, + 109.66666666666667 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 158, + 109.66666666666667, + 452, + 121.33333333333334 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 158, + 121.33333333333334, + 452, + 133.0 + ], + "spans": [], + "index": 3 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 145, + 246, + 157 + ], + "lines": [ + { + "bbox": [ + 105, + 144, + 248, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 146, + 159 + ], + "score": 1.0, + "content": "and since", + "type": "text" + }, + { + "bbox": [ + 146, + 145, + 189, + 158 + ], + "score": 0.93, + "content": "\\alpha = C _ { f } \\rho ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 189, + 144, + 248, + 159 + ], + "score": 1.0, + "content": ", we also have", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4, + "bbox_fs": [ + 105, + 144, + 248, + 159 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 239, + 162, + 372, + 190 + ], + "lines": [ + { + "bbox": [ + 239, + 162, + 372, + 190 + ], + "spans": [ + { + "bbox": [ + 239, + 162, + 372, + 190 + ], + "score": 0.94, + "content": "\\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } = \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } .", + "type": "interline_equation", + "image_path": "2a924310d03b5ee557f28fb34d78a936893e8f7cf3ff9134c8d76d1600906ee3.jpg" + } + ] + } + ], + "index": 5, + "virtual_lines": [ + { + "bbox": [ + 239, + 162, + 372, + 190 + ], + "spans": [], + "index": 5 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 194, + 160, + 205 + ], + "lines": [ + { + "bbox": [ + 106, + 193, + 162, + 207 + ], + "spans": [ + { + "bbox": [ + 106, + 193, + 162, + 207 + ], + "score": 1.0, + "content": "Furthermore,", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6, + "bbox_fs": [ + 106, + 193, + 162, + 207 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 242, + 209, + 368, + 226 + ], + "lines": [ + { + "bbox": [ + 242, + 209, + 368, + 226 + ], + "spans": [ + { + "bbox": [ + 242, + 209, + 368, + 226 + ], + "score": 0.91, + "content": "\\rho \\leq K ^ { - \\frac { 1 } { 4 } } \\implies \\alpha \\leq C _ { f } K ^ { - \\frac { 1 } { 2 } } .", + "type": "interline_equation", + "image_path": "b69e4375e97113c3ae9c1241a18fc51e9b50dc039c28a0839addd7cb241b9721.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 242, + 209, + 368, + 226 + ], + "spans": [], + "index": 7 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 230, + 243, + 242 + ], + "lines": [ + { + "bbox": [ + 106, + 230, + 243, + 243 + ], + "spans": [ + { + "bbox": [ + 106, + 230, + 243, + 243 + ], + "score": 1.0, + "content": "Substituting these into (60) yields", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 8, + "bbox_fs": [ + 106, + 230, + 243, + 243 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 168, + 245, + 443, + 317 + ], + "lines": [ + { + "bbox": [ + 168, + 245, + 443, + 317 + ], + "spans": [ + { + "bbox": [ + 168, + 245, + 443, + 317 + ], + "score": 0.94, + "content": "\\begin{array} { r } { \\displaystyle \\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\frac { \\left( 1 + \\frac { C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) } { K } \\right) ^ { K } } { \\alpha \\rho K } \\left( R _ { 1 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) } \\\\ { \\leq \\frac { \\exp ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } { \\alpha \\rho K } \\left( R _ { 1 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) , } \\end{array}", + "type": "interline_equation", + "image_path": "0e2124502d00a7615dcadf05fc1ed22e68a79cacf3e5071ac1736f2399dff60d.jpg" + } + ] + } + ], + "index": 10, + "virtual_lines": [ + { + "bbox": [ + 168, + 245, + 443, + 269.0 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 168, + 269.0, + 443, + 293.0 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 168, + 293.0, + 443, + 317.0 + ], + "spans": [], + "index": 11 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 321, + 463, + 334 + ], + "lines": [ + { + "bbox": [ + 105, + 319, + 464, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 237, + 336 + ], + "score": 1.0, + "content": "where we have used that for any", + "type": "text" + }, + { + "bbox": [ + 237, + 321, + 333, + 334 + ], + "score": 0.93, + "content": "t \\ge 0 , 1 + t / K \\le e ^ { t / K }", + "type": "inline_equation" + }, + { + "bbox": [ + 333, + 319, + 387, + 336 + ], + "score": 1.0, + "content": ", so therefore", + "type": "text" + }, + { + "bbox": [ + 387, + 321, + 459, + 334 + ], + "score": 0.94, + "content": "( 1 + t / K ) ^ { K } \\leq e ^ { t }", + "type": "inline_equation" + }, + { + "bbox": [ + 460, + 319, + 464, + 336 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12, + "bbox_fs": [ + 105, + 319, + 464, + 336 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 339, + 505, + 364 + ], + "lines": [ + { + "bbox": [ + 104, + 337, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 104, + 337, + 240, + 354 + ], + "score": 1.0, + "content": "The worst-case rates in terms of", + "type": "text" + }, + { + "bbox": [ + 241, + 341, + 251, + 350 + ], + "score": 0.84, + "content": "K", + "type": "inline_equation" + }, + { + "bbox": [ + 252, + 337, + 303, + 354 + ], + "score": 1.0, + "content": "occur when", + "type": "text" + }, + { + "bbox": [ + 303, + 339, + 352, + 352 + ], + "score": 0.93, + "content": "\\rho = K ^ { - 1 / 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 352, + 337, + 371, + 354 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 371, + 339, + 433, + 352 + ], + "score": 0.91, + "content": "\\alpha = C _ { f } K ^ { - 1 / 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 434, + 337, + 506, + 354 + ], + "score": 1.0, + "content": ". This is the case", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 351, + 454, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 130, + 365 + ], + "score": 1.0, + "content": "when", + "type": "text" + }, + { + "bbox": [ + 131, + 352, + 177, + 364 + ], + "score": 0.93, + "content": "K \\geq ( 2 L ) ^ { 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 178, + 351, + 384, + 365 + ], + "score": 1.0, + "content": ". Substituting these into the denominator yields, for", + "type": "text" + }, + { + "bbox": [ + 385, + 352, + 432, + 364 + ], + "score": 0.92, + "content": "K \\geq ( 2 L ) ^ { 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 432, + 351, + 454, + 365 + ], + "score": 1.0, + "content": ", that", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13.5, + "bbox_fs": [ + 104, + 337, + 506, + 365 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 167, + 368, + 443, + 403 + ], + "lines": [ + { + "bbox": [ + 167, + 368, + 443, + 403 + ], + "spans": [ + { + "bbox": [ + 167, + 368, + 443, + 403 + ], + "score": 0.93, + "content": "\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\frac { \\exp ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } { C _ { f } K ^ { 1 / 4 } } \\left( R _ { 1 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) .", + "type": "interline_equation", + "image_path": "a8c50d5cddeb344ec50671b2a66248b1a9062849bd17f1164cc995fe7923f6f9.jpg" + } + ] + } + ], + "index": 16, + "virtual_lines": [ + { + "bbox": [ + 167, + 368, + 443, + 379.6666666666667 + ], + "spans": [], + "index": 15 + }, + { + "bbox": [ + 167, + 379.6666666666667, + 443, + 391.33333333333337 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 167, + 391.33333333333337, + 443, + 403.00000000000006 + ], + "spans": [], + "index": 17 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 408, + 330, + 422 + ], + "lines": [ + { + "bbox": [ + 106, + 408, + 330, + 423 + ], + "spans": [ + { + "bbox": [ + 106, + 408, + 154, + 423 + ], + "score": 1.0, + "content": "Thus, since", + "type": "text" + }, + { + "bbox": [ + 154, + 408, + 285, + 422 + ], + "score": 0.92, + "content": "G _ { k } \\leq \\operatorname* { m a x } \\{ \\tau , \\tau ^ { - 1 } \\} \\left( U _ { k } + W _ { k } \\right)", + "type": "inline_equation" + }, + { + "bbox": [ + 285, + 408, + 330, + 423 + ], + "score": 1.0, + "content": ", we obtain", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 18, + "bbox_fs": [ + 106, + 408, + 330, + 423 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 131, + 426, + 479, + 461 + ], + "lines": [ + { + "bbox": [ + 131, + 426, + 479, + 461 + ], + "spans": [ + { + "bbox": [ + 131, + 426, + 479, + 461 + ], + "score": 0.9, + "content": "\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] \\leq \\frac { \\operatorname* { m a x } \\{ \\tau , \\tau ^ { - 1 } \\} \\exp { ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } } { C _ { f } K ^ { 1 / 4 } } \\left( \\| p ^ { 1 } - p ^ { * } \\| ^ { 2 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) ,", + "type": "interline_equation", + "image_path": "97ff30d691a0554097db475829ee6914e88295e18e02f95e3f6207493be4aa19.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 131, + 426, + 479, + 437.6666666666667 + ], + "spans": [], + "index": 19 + }, + { + "bbox": [ + 131, + 437.6666666666667, + 479, + 449.33333333333337 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 131, + 449.33333333333337, + 479, + 461.00000000000006 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 465, + 163, + 477 + ], + "lines": [ + { + "bbox": [ + 105, + 464, + 164, + 478 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 164, + 478 + ], + "score": 1.0, + "content": "which is (58).", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 22, + "bbox_fs": [ + 105, + 464, + 164, + 478 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 482, + 504, + 505 + ], + "lines": [ + { + "bbox": [ + 104, + 479, + 504, + 496 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 133, + 496 + ], + "score": 1.0, + "content": "When", + "type": "text" + }, + { + "bbox": [ + 133, + 482, + 180, + 495 + ], + "score": 0.92, + "content": "K < ( 2 L ) ^ { 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 181, + 479, + 371, + 496 + ], + "score": 1.0, + "content": ", (57) can similarly be obtained by substituting", + "type": "text" + }, + { + "bbox": [ + 372, + 482, + 421, + 495 + ], + "score": 0.94, + "content": "\\rho = ( 2 L ) ^ { - 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 422, + 479, + 440, + 496 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 441, + 482, + 504, + 495 + ], + "score": 0.87, + "content": "\\alpha = C _ { f } ( 2 L ) ^ { - 2 }", + "type": "inline_equation" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 492, + 506, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 146, + 506 + ], + "score": 1.0, + "content": "into (61).", + "type": "text" + }, + { + "bbox": [ + 494, + 495, + 506, + 505 + ], + "score": 0.997, + "content": "β–‘", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 23.5, + "bbox_fs": [ + 104, + 479, + 506, + 506 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 521, + 275, + 534 + ], + "lines": [ + { + "bbox": [ + 104, + 519, + 277, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 277, + 536 + ], + "score": 1.0, + "content": "F APPROXIMATION RESIDUALS", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25 + }, + { + "type": "text", + "bbox": [ + 106, + 545, + 505, + 580 + ], + "lines": [ + { + "bbox": [ + 105, + 545, + 505, + 559 + ], + "spans": [ + { + "bbox": [ + 105, + 545, + 505, + 559 + ], + "score": 1.0, + "content": "In this section we derive the approximation residual used to assess the performance of the algorithms", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 556, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 505, + 570 + ], + "score": 1.0, + "content": "in the numerical experiments. This residual relies on the following product-space reformulation", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 566, + 135, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 566, + 135, + 581 + ], + "score": 1.0, + "content": "of (1).", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 27, + "bbox_fs": [ + 105, + 545, + 505, + 581 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 592, + 398, + 604 + ], + "lines": [ + { + "bbox": [ + 105, + 592, + 398, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 398, + 605 + ], + "score": 1.0, + "content": "F.1 PRODUCT-SPACE REFORMULATION AND RESIDUAL PRINCIPLE", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 29, + "bbox_fs": [ + 105, + 592, + 398, + 605 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 613, + 310, + 624 + ], + "lines": [ + { + "bbox": [ + 105, + 610, + 311, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 610, + 311, + 628 + ], + "score": 1.0, + "content": "Recall (1), the monotone inclusion we are solving:", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 30, + "bbox_fs": [ + 105, + 610, + 311, + 628 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 228, + 629, + 382, + 662 + ], + "lines": [ + { + "bbox": [ + 228, + 629, + 382, + 662 + ], + "spans": [ + { + "bbox": [ + 228, + 629, + 382, + 662 + ], + "score": 0.94, + "content": "{ \\mathrm { F i n d ~ } } z \\in \\mathbb { R } ^ { d } : 0 \\in \\sum _ { i = 1 } ^ { n } A _ { i } ( z ) + B ( z ) .", + "type": "interline_equation", + "image_path": "3cda2b34e62a90a27633b8f271f0dd9396d59e4d546b2003413a53f23c6f3e9b.jpg" + } + ] + } + ], + "index": 31.5, + "virtual_lines": [ + { + "bbox": [ + 228, + 629, + 382, + 645.5 + ], + "spans": [], + "index": 31 + }, + { + "bbox": [ + 228, + 645.5, + 382, + 662.0 + ], + "spans": [], + "index": 32 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 665, + 506, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 665, + 506, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 506, + 678 + ], + "score": 1.0, + "content": "In this section we demonstrate a β€œproduct-space\" reformulation of (1) which allows us to rewrite", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 677, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 106, + 677, + 505, + 689 + ], + "score": 1.0, + "content": "it in a standard form involving just two operators, one maximal monotone and the other monotone", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 364, + 700 + ], + "score": 1.0, + "content": "and Lipschitz. This approach was pioneered in (BriceΓ±o-Arias", + "type": "text" + }, + { + "bbox": [ + 365, + 689, + 373, + 698 + ], + "score": 0.27, + "content": "\\&", + "type": "inline_equation" + }, + { + "bbox": [ + 374, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "Combettes, 2011; Combettes &", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 699, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 505, + 711 + ], + "score": 1.0, + "content": "Pesquet, 2012). Along with allowing for a simple definition of an approximation residual as a measure", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "of approximation error in solving (1), it allows one to apply operator splitting methods originally", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 721, + 384, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 721, + 373, + 732 + ], + "score": 1.0, + "content": "formulated for two operators to problems such as (1) for any finite", + "type": "text" + }, + { + "bbox": [ + 373, + 724, + 380, + 730 + ], + "score": 0.75, + "content": "n", + "type": "inline_equation" + }, + { + "bbox": [ + 380, + 721, + 384, + 732 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 35.5, + "bbox_fs": [ + 105, + 665, + 506, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 268, + 94 + ], + "lines": [ + { + "bbox": [ + 106, + 81, + 268, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 268, + 96 + ], + "score": 1.0, + "content": "Observe that solving (1) is equivalent to", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "interline_equation", + "bbox": [ + 182, + 97, + 427, + 149 + ], + "lines": [ + { + "bbox": [ + 182, + 97, + 427, + 149 + ], + "spans": [ + { + "bbox": [ + 182, + 97, + 427, + 149 + ], + "score": 0.9, + "content": "\\begin{array} { l l } { \\mathrm { F i n d } \\left( w _ { 1 } , \\ldots , w _ { n } , z \\right) \\in \\mathbb { R } ^ { \\left( n + 1 \\right) d } : } & { w _ { i } \\in A _ { i } ( z ) , \\quad i \\in { 1 . . n } } \\\\ & { \\quad \\displaystyle 0 \\in \\sum _ { i = 1 } ^ { n } w _ { i } + B ( z ) . } \\end{array}", + "type": "interline_equation", + "image_path": "7a56b1051c11c41e1ca53600e95cbd8143c44db6433bee45e39fe04bc10e8844.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 182, + 97, + 427, + 114.33333333333333 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 182, + 114.33333333333333, + 427, + 131.66666666666666 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 182, + 131.66666666666666, + 427, + 149.0 + ], + "spans": [], + "index": 3 + } + ] + }, + { + "type": "text", + "bbox": [ + 109, + 150, + 501, + 196 + ], + "lines": [ + { + "bbox": [ + 106, + 149, + 504, + 164 + ], + "spans": [ + { + "bbox": [ + 106, + 149, + 351, + 164 + ], + "score": 1.0, + "content": "This formulation resembles that of the extended solution set", + "type": "text" + }, + { + "bbox": [ + 351, + 152, + 359, + 162 + ], + "score": 0.83, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 360, + 149, + 504, + 164 + ], + "score": 1.0, + "content": "used in projective spitting, as given", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 163, + 504, + 174 + ], + "spans": [ + { + "bbox": [ + 108, + 163, + 400, + 174 + ], + "score": 1.0, + "content": "in (5), except that it combines the final two conditions in the definition of", + "type": "text" + }, + { + "bbox": [ + 400, + 163, + 408, + 172 + ], + "score": 0.84, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 408, + 163, + 504, + 174 + ], + "score": 1.0, + "content": ", and thus does not need", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 108, + 173, + 503, + 185 + ], + "spans": [ + { + "bbox": [ + 108, + 173, + 194, + 185 + ], + "score": 1.0, + "content": "the final dual variable", + "type": "text" + }, + { + "bbox": [ + 194, + 174, + 218, + 185 + ], + "score": 0.89, + "content": "w _ { n + 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 173, + 503, + 185 + ], + "score": 1.0, + "content": ". From the definition of the inverse of an operator, the above formulation", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 185, + 170, + 197 + ], + "spans": [ + { + "bbox": [ + 107, + 185, + 170, + 197 + ], + "score": 1.0, + "content": "is equivalent to", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 5.5 + }, + { + "type": "interline_equation", + "bbox": [ + 171, + 199, + 440, + 250 + ], + "lines": [ + { + "bbox": [ + 171, + 199, + 440, + 250 + ], + "spans": [ + { + "bbox": [ + 171, + 199, + 440, + 250 + ], + "score": 0.91, + "content": "\\begin{array} { r l } { \\mathrm { F i n d ~ } ( w _ { 1 } , \\dots , w _ { n } , z ) \\in \\mathbb { R } ^ { ( n + 1 ) d } : } & { 0 \\in A _ { i } ^ { - 1 } ( w _ { i } ) - z , \\quad i \\in 1 . . n } \\\\ & { 0 \\in \\displaystyle \\sum _ { i = 1 } ^ { n } w _ { i } + B ( z ) . } \\end{array}", + "type": "interline_equation", + "image_path": "a5e366b1daec78e36a75c0d0685354e977bf371f6228267db8c5657d622d206b.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 171, + 199, + 440, + 216.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 171, + 216.0, + 440, + 233.0 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 171, + 233.0, + 440, + 250.0 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 254, + 453, + 267 + ], + "lines": [ + { + "bbox": [ + 104, + 253, + 455, + 268 + ], + "spans": [ + { + "bbox": [ + 104, + 253, + 304, + 268 + ], + "score": 1.0, + "content": "These conditions are in turn equivalent to finding", + "type": "text" + }, + { + "bbox": [ + 305, + 254, + 414, + 267 + ], + "score": 0.89, + "content": "( w _ { 1 } , \\ldots , w _ { n } , z ) \\in \\mathbb { R } ^ { ( n + 1 ) d }", + "type": "inline_equation" + }, + { + "bbox": [ + 415, + 253, + 455, + 268 + ], + "score": 1.0, + "content": "such that", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11 + }, + { + "type": "interline_equation", + "bbox": [ + 214, + 271, + 395, + 284 + ], + "lines": [ + { + "bbox": [ + 214, + 271, + 395, + 284 + ], + "spans": [ + { + "bbox": [ + 214, + 271, + 395, + 284 + ], + "score": 0.89, + "content": "0 \\in \\mathcal { A } ( w _ { 1 } , \\ldots , w _ { n } , z ) + \\mathcal { B } ( w _ { 1 } , \\ldots , w _ { n } , z ) ,", + "type": "interline_equation", + "image_path": "3f2baa06ae4751b44507f1aec5ab9cad1cab25df653740d7a66eff10132dcc1d.jpg" + } + ] + } + ], + "index": 12, + "virtual_lines": [ + { + "bbox": [ + 214, + 271, + 395, + 284 + ], + "spans": [], + "index": 12 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 289, + 232, + 300 + ], + "lines": [ + { + "bbox": [ + 105, + 287, + 233, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 133, + 303 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 290, + 145, + 299 + ], + "score": 0.85, + "content": "\\mathcal { A }", + "type": "inline_equation" + }, + { + "bbox": [ + 145, + 287, + 233, + 303 + ], + "score": 1.0, + "content": "is the set-valued map", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 13 + }, + { + "type": "interline_equation", + "bbox": [ + 165, + 304, + 446, + 319 + ], + "lines": [ + { + "bbox": [ + 165, + 304, + 446, + 319 + ], + "spans": [ + { + "bbox": [ + 165, + 304, + 446, + 319 + ], + "score": 0.88, + "content": "\\mathcal { A } ( w _ { 1 } , \\dots , w _ { n } , z ) \\mapsto A _ { 1 } ^ { - 1 } ( w _ { 1 } ) \\times A _ { 2 } ^ { - 1 } ( w _ { 2 } ) \\times \\dots \\times A _ { n } ^ { - 1 } ( w _ { n } ) \\times \\{ 0 \\}", + "type": "interline_equation", + "image_path": "977d8a427bdf60035473ef5b916a73acdd9ad16bff24eb09b967888aa0743027.jpg" + } + ] + } + ], + "index": 14, + "virtual_lines": [ + { + "bbox": [ + 165, + 304, + 446, + 319 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 323, + 251, + 334 + ], + "lines": [ + { + "bbox": [ + 106, + 323, + 250, + 336 + ], + "spans": [ + { + "bbox": [ + 106, + 323, + 123, + 336 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 123, + 324, + 134, + 333 + ], + "score": 0.85, + "content": "\\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 135, + 323, + 250, + 336 + ], + "score": 1.0, + "content": "is the single-valued operator", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + }, + { + "type": "interline_equation", + "bbox": [ + 165, + 339, + 444, + 393 + ], + "lines": [ + { + "bbox": [ + 165, + 339, + 444, + 393 + ], + "spans": [ + { + "bbox": [ + 165, + 339, + 444, + 393 + ], + "score": 0.94, + "content": "\\mathcal { B } ( w _ { 1 } , \\dots , w _ { n } , z ) \\mapsto \\left[ \\begin{array} { c c c c } { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { \\vdots } & { \\ddots } & { \\vdots } & { \\vdots } \\\\ { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { I } & { \\cdots } & { I } & { 0 } \\end{array} \\right] \\left[ \\begin{array} { c } { w _ { 1 } } \\\\ { \\vdots } \\\\ { w _ { n } } \\\\ { z } \\end{array} \\right] + \\left[ \\begin{array} { c } { 0 } \\\\ { \\vdots } \\\\ { 0 } \\\\ { B ( z ) } \\end{array} \\right] .", + "type": "interline_equation", + "image_path": "0ff8bced27f5d7b5ccb6d4d33cc7dda36dfb975caf3ecd6390b3e0d9fd11b075.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 165, + 339, + 444, + 357.0 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 165, + 357.0, + 444, + 375.0 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 165, + 375.0, + 444, + 393.0 + ], + "spans": [], + "index": 18 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 397, + 504, + 453 + ], + "lines": [ + { + "bbox": [ + 106, + 397, + 505, + 409 + ], + "spans": [ + { + "bbox": [ + 106, + 397, + 214, + 409 + ], + "score": 1.0, + "content": "It is easily established that", + "type": "text" + }, + { + "bbox": [ + 214, + 398, + 225, + 407 + ], + "score": 0.86, + "content": "\\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 226, + 397, + 445, + 409 + ], + "score": 1.0, + "content": "is maximal monotone and Lipschitz continuous, while", + "type": "text" + }, + { + "bbox": [ + 446, + 398, + 457, + 407 + ], + "score": 0.83, + "content": "\\mathcal { A }", + "type": "inline_equation" + }, + { + "bbox": [ + 457, + 397, + 505, + 409 + ], + "score": 1.0, + "content": "is maximal", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 408, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 105, + 408, + 189, + 420 + ], + "score": 1.0, + "content": "monotone. Letting", + "type": "text" + }, + { + "bbox": [ + 189, + 408, + 251, + 419 + ], + "score": 0.91, + "content": "\\mathcal { T } \\doteq \\mathcal { A } + \\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 251, + 408, + 505, + 420 + ], + "score": 1.0, + "content": ", it follows from (Bauschke & Combettes, 2017, Proposition", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 419, + 505, + 431 + ], + "spans": [ + { + "bbox": [ + 106, + 419, + 154, + 431 + ], + "score": 1.0, + "content": "20.23) that", + "type": "text" + }, + { + "bbox": [ + 154, + 419, + 165, + 429 + ], + "score": 0.86, + "content": "\\mathcal { T }", + "type": "inline_equation" + }, + { + "bbox": [ + 165, + 419, + 505, + 431 + ], + "score": 1.0, + "content": "is maximal monotone. Thus, we have reformulated (1) as the monotone inclusion", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 107, + 426, + 507, + 444 + ], + "spans": [ + { + "bbox": [ + 107, + 430, + 147, + 442 + ], + "score": 0.89, + "content": "0 \\in \\mathcal { T } ( q )", + "type": "inline_equation" + }, + { + "bbox": [ + 148, + 426, + 163, + 444 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 163, + 432, + 169, + 441 + ], + "score": 0.78, + "content": "q", + "type": "inline_equation" + }, + { + "bbox": [ + 169, + 426, + 253, + 444 + ], + "score": 1.0, + "content": "in the product space", + "type": "text" + }, + { + "bbox": [ + 254, + 429, + 288, + 440 + ], + "score": 0.91, + "content": "\\mathbb { R } ^ { ( n + 1 ) \\bar { d } }", + "type": "inline_equation" + }, + { + "bbox": [ + 288, + 426, + 329, + 444 + ], + "score": 1.0, + "content": ". A vector", + "type": "text" + }, + { + "bbox": [ + 330, + 429, + 360, + 440 + ], + "score": 0.91, + "content": "z \\in \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 360, + 426, + 507, + 444 + ], + "score": 1.0, + "content": "solves (1) if and only if there exists", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 438, + 389, + 456 + ], + "spans": [ + { + "bbox": [ + 106, + 442, + 189, + 453 + ], + "score": 0.92, + "content": "( w _ { 1 } , \\dots , w _ { n } ) \\in \\mathbb { R } ^ { n d }", + "type": "inline_equation" + }, + { + "bbox": [ + 190, + 438, + 229, + 456 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 229, + 441, + 270, + 453 + ], + "score": 0.93, + "content": "\\bar { 0 } \\in \\mathcal { T } ( q )", + "type": "inline_equation" + }, + { + "bbox": [ + 270, + 438, + 301, + 456 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 301, + 442, + 383, + 453 + ], + "score": 0.91, + "content": "q = ( w _ { 1 } , \\dots , w _ { n } , z )", + "type": "inline_equation" + }, + { + "bbox": [ + 384, + 438, + 389, + 456 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 21 + }, + { + "type": "text", + "bbox": [ + 106, + 457, + 505, + 560 + ], + "lines": [ + { + "bbox": [ + 105, + 457, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 157, + 472 + ], + "score": 1.0, + "content": "For any pair", + "type": "text" + }, + { + "bbox": [ + 158, + 458, + 180, + 470 + ], + "score": 0.92, + "content": "( q , v )", + "type": "inline_equation" + }, + { + "bbox": [ + 181, + 457, + 219, + 472 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 219, + 457, + 261, + 470 + ], + "score": 0.85, + "content": "v \\in \\mathcal { T } ( q )", + "type": "inline_equation" + }, + { + "bbox": [ + 261, + 457, + 265, + 472 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 265, + 457, + 285, + 470 + ], + "score": 0.87, + "content": "\\| v \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 286, + 457, + 449, + 472 + ], + "score": 1.0, + "content": "represents an approximation residual for", + "type": "text" + }, + { + "bbox": [ + 449, + 460, + 456, + 469 + ], + "score": 0.78, + "content": "q", + "type": "inline_equation" + }, + { + "bbox": [ + 456, + 457, + 506, + 472 + ], + "score": 1.0, + "content": "in the sense", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 468, + 506, + 482 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 124, + 482 + ], + "score": 1.0, + "content": "that", + "type": "text" + }, + { + "bbox": [ + 124, + 470, + 150, + 479 + ], + "score": 0.92, + "content": "v = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 150, + 468, + 184, + 482 + ], + "score": 1.0, + "content": "implies", + "type": "text" + }, + { + "bbox": [ + 184, + 471, + 190, + 480 + ], + "score": 0.81, + "content": "q", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 468, + 337, + 482 + ], + "score": 1.0, + "content": "is a solution to (62). One may take", + "type": "text" + }, + { + "bbox": [ + 337, + 469, + 358, + 481 + ], + "score": 0.92, + "content": "\\| \\bar { v } \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 358, + 468, + 474, + 482 + ], + "score": 1.0, + "content": "as a measure of the error of", + "type": "text" + }, + { + "bbox": [ + 474, + 471, + 480, + 480 + ], + "score": 0.78, + "content": "q", + "type": "inline_equation" + }, + { + "bbox": [ + 481, + 468, + 506, + 482 + ], + "score": 1.0, + "content": "as an", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 480, + 506, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 312, + 493 + ], + "score": 1.0, + "content": "approximate solution to (62), and it can only be 0 if", + "type": "text" + }, + { + "bbox": [ + 312, + 482, + 318, + 491 + ], + "score": 0.81, + "content": "q", + "type": "inline_equation" + }, + { + "bbox": [ + 318, + 480, + 506, + 493 + ], + "score": 1.0, + "content": "is a solution. Given two approximate solutions", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 107, + 490, + 506, + 504 + ], + "spans": [ + { + "bbox": [ + 107, + 493, + 117, + 502 + ], + "score": 0.83, + "content": "q _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 117, + 490, + 135, + 504 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 135, + 492, + 145, + 502 + ], + "score": 0.85, + "content": "q _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 146, + 490, + 214, + 504 + ], + "score": 1.0, + "content": "with certificates", + "type": "text" + }, + { + "bbox": [ + 214, + 491, + 261, + 502 + ], + "score": 0.91, + "content": "v _ { 1 } \\in T ( q _ { 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 261, + 490, + 279, + 504 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 280, + 491, + 329, + 503 + ], + "score": 0.94, + "content": "v _ { 2 } \\in \\mathcal { T } ( q _ { 2 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 330, + 490, + 387, + 504 + ], + "score": 1.0, + "content": ", we will treat", + "type": "text" + }, + { + "bbox": [ + 387, + 492, + 397, + 502 + ], + "score": 0.85, + "content": "q _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 398, + 490, + 506, + 504 + ], + "score": 1.0, + "content": "as a β€œbetter” approximate", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 501, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 105, + 501, + 162, + 515 + ], + "score": 1.0, + "content": "solution than", + "type": "text" + }, + { + "bbox": [ + 163, + 503, + 173, + 514 + ], + "score": 0.84, + "content": "q _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 173, + 501, + 184, + 515 + ], + "score": 1.0, + "content": "if", + "type": "text" + }, + { + "bbox": [ + 185, + 502, + 247, + 514 + ], + "score": 0.92, + "content": "\\| v _ { 1 } \\| ^ { 2 } < \\| v _ { 2 } \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 248, + 501, + 506, + 515 + ], + "score": 1.0, + "content": ". Doing so is somewhat analogous to the practice, common in", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 512, + 506, + 526 + ], + "spans": [ + { + "bbox": [ + 105, + 512, + 247, + 526 + ], + "score": 1.0, + "content": "optimization, of using the gradient", + "type": "text" + }, + { + "bbox": [ + 248, + 513, + 290, + 525 + ], + "score": 0.94, + "content": "\\| \\nabla f ( x ) \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 290, + 512, + 506, + 526 + ], + "score": 1.0, + "content": "as a measure of quality of an approximate minimizer", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 104, + 523, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 523, + 231, + 537 + ], + "score": 1.0, + "content": "of some differentiable function", + "type": "text" + }, + { + "bbox": [ + 232, + 524, + 239, + 535 + ], + "score": 0.83, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 239, + 523, + 343, + 537 + ], + "score": 1.0, + "content": ". However, note that since", + "type": "text" + }, + { + "bbox": [ + 344, + 524, + 371, + 536 + ], + "score": 0.92, + "content": "\\mathcal { T } ( q _ { 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 371, + 523, + 506, + 537 + ], + "score": 1.0, + "content": "is a set, there may exist elements", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 534, + 506, + 549 + ], + "spans": [ + { + "bbox": [ + 105, + 534, + 118, + 549 + ], + "score": 1.0, + "content": "of", + "type": "text" + }, + { + "bbox": [ + 118, + 534, + 146, + 547 + ], + "score": 0.93, + "content": "\\mathcal { T } ( q _ { 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 146, + 534, + 246, + 549 + ], + "score": 1.0, + "content": "with smaller norm than", + "type": "text" + }, + { + "bbox": [ + 246, + 536, + 257, + 546 + ], + "score": 0.85, + "content": "v _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 257, + 534, + 371, + 549 + ], + "score": 1.0, + "content": ". Thus any given certificate", + "type": "text" + }, + { + "bbox": [ + 371, + 537, + 381, + 546 + ], + "score": 0.86, + "content": "v _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 382, + 534, + 506, + 549 + ], + "score": 1.0, + "content": "only corresponds to an upper", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 545, + 216, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 545, + 146, + 560 + ], + "score": 1.0, + "content": "bound on", + "type": "text" + }, + { + "bbox": [ + 147, + 546, + 212, + 559 + ], + "score": 0.93, + "content": "\\mathrm { d i s t } ^ { 2 } ( 0 , \\mathcal { T } ( q _ { 1 } ) )", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 545, + 216, + 560 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 28 + }, + { + "type": "title", + "bbox": [ + 107, + 571, + 374, + 584 + ], + "lines": [ + { + "bbox": [ + 105, + 572, + 375, + 584 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 375, + 584 + ], + "score": 1.0, + "content": "F.2 APPROXIMATION RESIDUAL FOR PROJECTIVE SPLITTING", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 33 + }, + { + "type": "text", + "bbox": [ + 107, + 591, + 505, + 617 + ], + "lines": [ + { + "bbox": [ + 105, + 591, + 505, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 591, + 213, + 606 + ], + "score": 1.0, + "content": "In SPS (Algorithm 1), for", + "type": "text" + }, + { + "bbox": [ + 213, + 593, + 247, + 603 + ], + "score": 0.9, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 247, + 591, + 288, + 606 + ], + "score": 1.0, + "content": ", the pairs", + "type": "text" + }, + { + "bbox": [ + 288, + 592, + 322, + 605 + ], + "score": 0.92, + "content": "( x _ { i } ^ { k } , y _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 591, + 398, + 606 + ], + "score": 1.0, + "content": "are chosen so that", + "type": "text" + }, + { + "bbox": [ + 399, + 592, + 451, + 605 + ], + "score": 0.93, + "content": "y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 451, + 591, + 505, + 606 + ], + "score": 1.0, + "content": ". This can be", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 603, + 412, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 603, + 294, + 618 + ], + "score": 1.0, + "content": "seen from the definition of the resolvent. Thus", + "type": "text" + }, + { + "bbox": [ + 294, + 604, + 353, + 617 + ], + "score": 0.93, + "content": "\\hat { x _ { i } ^ { k } } \\in A _ { i } ^ { - 1 } ( y _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 354, + 603, + 412, + 618 + ], + "score": 1.0, + "content": ". Observe that", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 34.5 + }, + { + "type": "interline_equation", + "bbox": [ + 198, + 620, + 412, + 680 + ], + "lines": [ + { + "bbox": [ + 198, + 620, + 412, + 680 + ], + "spans": [ + { + "bbox": [ + 198, + 620, + 412, + 680 + ], + "score": 0.94, + "content": "\\begin{array} { r } { v ^ { k } \\doteq \\left[ \\begin{array} { c } { x _ { 1 } ^ { k } - z ^ { k } } \\\\ { \\vdots } \\\\ { x _ { n } ^ { k } - z ^ { k } } \\\\ { B ( z ^ { k } ) + \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } } \\end{array} \\right] \\in \\mathcal { T } ( y _ { 1 } ^ { k } , \\dotsc , y _ { n } ^ { k } , z ^ { k } ) . } \\end{array}", + "type": "interline_equation", + "image_path": "34c2b8bc590a85d01c47ca4535edfab24349cbca1ec1369ce3179fc21f731ae6.jpg" + } + ] + } + ], + "index": 37.5, + "virtual_lines": [ + { + "bbox": [ + 198, + 620, + 412, + 635.0 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 198, + 635.0, + 412, + 650.0 + ], + "spans": [], + "index": 37 + }, + { + "bbox": [ + 198, + 650.0, + 412, + 665.0 + ], + "spans": [], + "index": 38 + }, + { + "bbox": [ + 198, + 665.0, + 412, + 680.0 + ], + "spans": [], + "index": 39 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 682, + 280, + 694 + ], + "lines": [ + { + "bbox": [ + 105, + 681, + 281, + 695 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 281, + 695 + ], + "score": 1.0, + "content": "The approximation residual for SPS is thus", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 40 + }, + { + "type": "interline_equation", + "bbox": [ + 195, + 698, + 416, + 731 + ], + "lines": [ + { + "bbox": [ + 195, + 698, + 416, + 731 + ], + "spans": [ + { + "bbox": [ + 195, + 698, + 416, + 731 + ], + "score": 0.93, + "content": "R _ { k } \\dot { = } \\| v ^ { k } \\| ^ { 2 } = \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\left\\| B ( z ^ { k } ) + \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\right\\| ^ { 2 }", + "type": "interline_equation", + "image_path": "f7278fc1183759e6f4ed367df5b00f786a09c9df101097ac6b06ec1e247e4d9a.jpg" + } + ] + } + ], + "index": 41.5, + "virtual_lines": [ + { + "bbox": [ + 195, + 698, + 416, + 714.5 + ], + "spans": [], + "index": 41 + }, + { + "bbox": [ + 195, + 714.5, + 416, + 731.0 + ], + "spans": [], + "index": 42 + } + ] + } + ], + "page_idx": 27, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 763 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 763 + ], + "score": 1.0, + "content": "28", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 268, + 94 + ], + "lines": [ + { + "bbox": [ + 106, + 81, + 268, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 268, + 96 + ], + "score": 1.0, + "content": "Observe that solving (1) is equivalent to", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0, + "bbox_fs": [ + 106, + 81, + 268, + 96 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 182, + 97, + 427, + 149 + ], + "lines": [ + { + "bbox": [ + 182, + 97, + 427, + 149 + ], + "spans": [ + { + "bbox": [ + 182, + 97, + 427, + 149 + ], + "score": 0.9, + "content": "\\begin{array} { l l } { \\mathrm { F i n d } \\left( w _ { 1 } , \\ldots , w _ { n } , z \\right) \\in \\mathbb { R } ^ { \\left( n + 1 \\right) d } : } & { w _ { i } \\in A _ { i } ( z ) , \\quad i \\in { 1 . . n } } \\\\ & { \\quad \\displaystyle 0 \\in \\sum _ { i = 1 } ^ { n } w _ { i } + B ( z ) . } \\end{array}", + "type": "interline_equation", + "image_path": "7a56b1051c11c41e1ca53600e95cbd8143c44db6433bee45e39fe04bc10e8844.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 182, + 97, + 427, + 114.33333333333333 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 182, + 114.33333333333333, + 427, + 131.66666666666666 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 182, + 131.66666666666666, + 427, + 149.0 + ], + "spans": [], + "index": 3 + } + ] + }, + { + "type": "text", + "bbox": [ + 109, + 150, + 501, + 196 + ], + "lines": [ + { + "bbox": [ + 106, + 149, + 504, + 164 + ], + "spans": [ + { + "bbox": [ + 106, + 149, + 351, + 164 + ], + "score": 1.0, + "content": "This formulation resembles that of the extended solution set", + "type": "text" + }, + { + "bbox": [ + 351, + 152, + 359, + 162 + ], + "score": 0.83, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 360, + 149, + 504, + 164 + ], + "score": 1.0, + "content": "used in projective spitting, as given", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 163, + 504, + 174 + ], + "spans": [ + { + "bbox": [ + 108, + 163, + 400, + 174 + ], + "score": 1.0, + "content": "in (5), except that it combines the final two conditions in the definition of", + "type": "text" + }, + { + "bbox": [ + 400, + 163, + 408, + 172 + ], + "score": 0.84, + "content": "s", + "type": "inline_equation" + }, + { + "bbox": [ + 408, + 163, + 504, + 174 + ], + "score": 1.0, + "content": ", and thus does not need", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 108, + 173, + 503, + 185 + ], + "spans": [ + { + "bbox": [ + 108, + 173, + 194, + 185 + ], + "score": 1.0, + "content": "the final dual variable", + "type": "text" + }, + { + "bbox": [ + 194, + 174, + 218, + 185 + ], + "score": 0.89, + "content": "w _ { n + 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 173, + 503, + 185 + ], + "score": 1.0, + "content": ". From the definition of the inverse of an operator, the above formulation", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 185, + 170, + 197 + ], + "spans": [ + { + "bbox": [ + 107, + 185, + 170, + 197 + ], + "score": 1.0, + "content": "is equivalent to", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 5.5, + "bbox_fs": [ + 106, + 149, + 504, + 197 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 171, + 199, + 440, + 250 + ], + "lines": [ + { + "bbox": [ + 171, + 199, + 440, + 250 + ], + "spans": [ + { + "bbox": [ + 171, + 199, + 440, + 250 + ], + "score": 0.91, + "content": "\\begin{array} { r l } { \\mathrm { F i n d ~ } ( w _ { 1 } , \\dots , w _ { n } , z ) \\in \\mathbb { R } ^ { ( n + 1 ) d } : } & { 0 \\in A _ { i } ^ { - 1 } ( w _ { i } ) - z , \\quad i \\in 1 . . n } \\\\ & { 0 \\in \\displaystyle \\sum _ { i = 1 } ^ { n } w _ { i } + B ( z ) . } \\end{array}", + "type": "interline_equation", + "image_path": "a5e366b1daec78e36a75c0d0685354e977bf371f6228267db8c5657d622d206b.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 171, + 199, + 440, + 216.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 171, + 216.0, + 440, + 233.0 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 171, + 233.0, + 440, + 250.0 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 254, + 453, + 267 + ], + "lines": [ + { + "bbox": [ + 104, + 253, + 455, + 268 + ], + "spans": [ + { + "bbox": [ + 104, + 253, + 304, + 268 + ], + "score": 1.0, + "content": "These conditions are in turn equivalent to finding", + "type": "text" + }, + { + "bbox": [ + 305, + 254, + 414, + 267 + ], + "score": 0.89, + "content": "( w _ { 1 } , \\ldots , w _ { n } , z ) \\in \\mathbb { R } ^ { ( n + 1 ) d }", + "type": "inline_equation" + }, + { + "bbox": [ + 415, + 253, + 455, + 268 + ], + "score": 1.0, + "content": "such that", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11, + "bbox_fs": [ + 104, + 253, + 455, + 268 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 214, + 271, + 395, + 284 + ], + "lines": [ + { + "bbox": [ + 214, + 271, + 395, + 284 + ], + "spans": [ + { + "bbox": [ + 214, + 271, + 395, + 284 + ], + "score": 0.89, + "content": "0 \\in \\mathcal { A } ( w _ { 1 } , \\ldots , w _ { n } , z ) + \\mathcal { B } ( w _ { 1 } , \\ldots , w _ { n } , z ) ,", + "type": "interline_equation", + "image_path": "3f2baa06ae4751b44507f1aec5ab9cad1cab25df653740d7a66eff10132dcc1d.jpg" + } + ] + } + ], + "index": 12, + "virtual_lines": [ + { + "bbox": [ + 214, + 271, + 395, + 284 + ], + "spans": [], + "index": 12 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 289, + 232, + 300 + ], + "lines": [ + { + "bbox": [ + 105, + 287, + 233, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 133, + 303 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 290, + 145, + 299 + ], + "score": 0.85, + "content": "\\mathcal { A }", + "type": "inline_equation" + }, + { + "bbox": [ + 145, + 287, + 233, + 303 + ], + "score": 1.0, + "content": "is the set-valued map", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 13, + "bbox_fs": [ + 105, + 287, + 233, + 303 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 165, + 304, + 446, + 319 + ], + "lines": [ + { + "bbox": [ + 165, + 304, + 446, + 319 + ], + "spans": [ + { + "bbox": [ + 165, + 304, + 446, + 319 + ], + "score": 0.88, + "content": "\\mathcal { A } ( w _ { 1 } , \\dots , w _ { n } , z ) \\mapsto A _ { 1 } ^ { - 1 } ( w _ { 1 } ) \\times A _ { 2 } ^ { - 1 } ( w _ { 2 } ) \\times \\dots \\times A _ { n } ^ { - 1 } ( w _ { n } ) \\times \\{ 0 \\}", + "type": "interline_equation", + "image_path": "977d8a427bdf60035473ef5b916a73acdd9ad16bff24eb09b967888aa0743027.jpg" + } + ] + } + ], + "index": 14, + "virtual_lines": [ + { + "bbox": [ + 165, + 304, + 446, + 319 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 323, + 251, + 334 + ], + "lines": [ + { + "bbox": [ + 106, + 323, + 250, + 336 + ], + "spans": [ + { + "bbox": [ + 106, + 323, + 123, + 336 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 123, + 324, + 134, + 333 + ], + "score": 0.85, + "content": "\\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 135, + 323, + 250, + 336 + ], + "score": 1.0, + "content": "is the single-valued operator", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15, + "bbox_fs": [ + 106, + 323, + 250, + 336 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 165, + 339, + 444, + 393 + ], + "lines": [ + { + "bbox": [ + 165, + 339, + 444, + 393 + ], + "spans": [ + { + "bbox": [ + 165, + 339, + 444, + 393 + ], + "score": 0.94, + "content": "\\mathcal { B } ( w _ { 1 } , \\dots , w _ { n } , z ) \\mapsto \\left[ \\begin{array} { c c c c } { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { \\vdots } & { \\ddots } & { \\vdots } & { \\vdots } \\\\ { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { I } & { \\cdots } & { I } & { 0 } \\end{array} \\right] \\left[ \\begin{array} { c } { w _ { 1 } } \\\\ { \\vdots } \\\\ { w _ { n } } \\\\ { z } \\end{array} \\right] + \\left[ \\begin{array} { c } { 0 } \\\\ { \\vdots } \\\\ { 0 } \\\\ { B ( z ) } \\end{array} \\right] .", + "type": "interline_equation", + "image_path": "0ff8bced27f5d7b5ccb6d4d33cc7dda36dfb975caf3ecd6390b3e0d9fd11b075.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 165, + 339, + 444, + 357.0 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 165, + 357.0, + 444, + 375.0 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 165, + 375.0, + 444, + 393.0 + ], + "spans": [], + "index": 18 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 397, + 504, + 453 + ], + "lines": [ + { + "bbox": [ + 106, + 397, + 505, + 409 + ], + "spans": [ + { + "bbox": [ + 106, + 397, + 214, + 409 + ], + "score": 1.0, + "content": "It is easily established that", + "type": "text" + }, + { + "bbox": [ + 214, + 398, + 225, + 407 + ], + "score": 0.86, + "content": "\\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 226, + 397, + 445, + 409 + ], + "score": 1.0, + "content": "is maximal monotone and Lipschitz continuous, while", + "type": "text" + }, + { + "bbox": [ + 446, + 398, + 457, + 407 + ], + "score": 0.83, + "content": "\\mathcal { A }", + "type": "inline_equation" + }, + { + "bbox": [ + 457, + 397, + 505, + 409 + ], + "score": 1.0, + "content": "is maximal", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 408, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 105, + 408, + 189, + 420 + ], + "score": 1.0, + "content": "monotone. Letting", + "type": "text" + }, + { + "bbox": [ + 189, + 408, + 251, + 419 + ], + "score": 0.91, + "content": "\\mathcal { T } \\doteq \\mathcal { A } + \\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 251, + 408, + 505, + 420 + ], + "score": 1.0, + "content": ", it follows from (Bauschke & Combettes, 2017, Proposition", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 419, + 505, + 431 + ], + "spans": [ + { + "bbox": [ + 106, + 419, + 154, + 431 + ], + "score": 1.0, + "content": "20.23) that", + "type": "text" + }, + { + "bbox": [ + 154, + 419, + 165, + 429 + ], + "score": 0.86, + "content": "\\mathcal { T }", + "type": "inline_equation" + }, + { + "bbox": [ + 165, + 419, + 505, + 431 + ], + "score": 1.0, + "content": "is maximal monotone. Thus, we have reformulated (1) as the monotone inclusion", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 107, + 426, + 507, + 444 + ], + "spans": [ + { + "bbox": [ + 107, + 430, + 147, + 442 + ], + "score": 0.89, + "content": "0 \\in \\mathcal { T } ( q )", + "type": "inline_equation" + }, + { + "bbox": [ + 148, + 426, + 163, + 444 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 163, + 432, + 169, + 441 + ], + "score": 0.78, + "content": "q", + "type": "inline_equation" + }, + { + "bbox": [ + 169, + 426, + 253, + 444 + ], + "score": 1.0, + "content": "in the product space", + "type": "text" + }, + { + "bbox": [ + 254, + 429, + 288, + 440 + ], + "score": 0.91, + "content": "\\mathbb { R } ^ { ( n + 1 ) \\bar { d } }", + "type": "inline_equation" + }, + { + "bbox": [ + 288, + 426, + 329, + 444 + ], + "score": 1.0, + "content": ". A vector", + "type": "text" + }, + { + "bbox": [ + 330, + 429, + 360, + 440 + ], + "score": 0.91, + "content": "z \\in \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 360, + 426, + 507, + 444 + ], + "score": 1.0, + "content": "solves (1) if and only if there exists", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 438, + 389, + 456 + ], + "spans": [ + { + "bbox": [ + 106, + 442, + 189, + 453 + ], + "score": 0.92, + "content": "( w _ { 1 } , \\dots , w _ { n } ) \\in \\mathbb { R } ^ { n d }", + "type": "inline_equation" + }, + { + "bbox": [ + 190, + 438, + 229, + 456 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 229, + 441, + 270, + 453 + ], + "score": 0.93, + "content": "\\bar { 0 } \\in \\mathcal { T } ( q )", + "type": "inline_equation" + }, + { + "bbox": [ + 270, + 438, + 301, + 456 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 301, + 442, + 383, + 453 + ], + "score": 0.91, + "content": "q = ( w _ { 1 } , \\dots , w _ { n } , z )", + "type": "inline_equation" + }, + { + "bbox": [ + 384, + 438, + 389, + 456 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 21, + "bbox_fs": [ + 105, + 397, + 507, + 456 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 457, + 505, + 560 + ], + "lines": [ + { + "bbox": [ + 105, + 457, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 157, + 472 + ], + "score": 1.0, + "content": "For any pair", + "type": "text" + }, + { + "bbox": [ + 158, + 458, + 180, + 470 + ], + "score": 0.92, + "content": "( q , v )", + "type": "inline_equation" + }, + { + "bbox": [ + 181, + 457, + 219, + 472 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 219, + 457, + 261, + 470 + ], + "score": 0.85, + "content": "v \\in \\mathcal { T } ( q )", + "type": "inline_equation" + }, + { + "bbox": [ + 261, + 457, + 265, + 472 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 265, + 457, + 285, + 470 + ], + "score": 0.87, + "content": "\\| v \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 286, + 457, + 449, + 472 + ], + "score": 1.0, + "content": "represents an approximation residual for", + "type": "text" + }, + { + "bbox": [ + 449, + 460, + 456, + 469 + ], + "score": 0.78, + "content": "q", + "type": "inline_equation" + }, + { + "bbox": [ + 456, + 457, + 506, + 472 + ], + "score": 1.0, + "content": "in the sense", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 468, + 506, + 482 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 124, + 482 + ], + "score": 1.0, + "content": "that", + "type": "text" + }, + { + "bbox": [ + 124, + 470, + 150, + 479 + ], + "score": 0.92, + "content": "v = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 150, + 468, + 184, + 482 + ], + "score": 1.0, + "content": "implies", + "type": "text" + }, + { + "bbox": [ + 184, + 471, + 190, + 480 + ], + "score": 0.81, + "content": "q", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 468, + 337, + 482 + ], + "score": 1.0, + "content": "is a solution to (62). One may take", + "type": "text" + }, + { + "bbox": [ + 337, + 469, + 358, + 481 + ], + "score": 0.92, + "content": "\\| \\bar { v } \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 358, + 468, + 474, + 482 + ], + "score": 1.0, + "content": "as a measure of the error of", + "type": "text" + }, + { + "bbox": [ + 474, + 471, + 480, + 480 + ], + "score": 0.78, + "content": "q", + "type": "inline_equation" + }, + { + "bbox": [ + 481, + 468, + 506, + 482 + ], + "score": 1.0, + "content": "as an", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 480, + 506, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 312, + 493 + ], + "score": 1.0, + "content": "approximate solution to (62), and it can only be 0 if", + "type": "text" + }, + { + "bbox": [ + 312, + 482, + 318, + 491 + ], + "score": 0.81, + "content": "q", + "type": "inline_equation" + }, + { + "bbox": [ + 318, + 480, + 506, + 493 + ], + "score": 1.0, + "content": "is a solution. Given two approximate solutions", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 107, + 490, + 506, + 504 + ], + "spans": [ + { + "bbox": [ + 107, + 493, + 117, + 502 + ], + "score": 0.83, + "content": "q _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 117, + 490, + 135, + 504 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 135, + 492, + 145, + 502 + ], + "score": 0.85, + "content": "q _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 146, + 490, + 214, + 504 + ], + "score": 1.0, + "content": "with certificates", + "type": "text" + }, + { + "bbox": [ + 214, + 491, + 261, + 502 + ], + "score": 0.91, + "content": "v _ { 1 } \\in T ( q _ { 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 261, + 490, + 279, + 504 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 280, + 491, + 329, + 503 + ], + "score": 0.94, + "content": "v _ { 2 } \\in \\mathcal { T } ( q _ { 2 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 330, + 490, + 387, + 504 + ], + "score": 1.0, + "content": ", we will treat", + "type": "text" + }, + { + "bbox": [ + 387, + 492, + 397, + 502 + ], + "score": 0.85, + "content": "q _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 398, + 490, + 506, + 504 + ], + "score": 1.0, + "content": "as a β€œbetter” approximate", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 501, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 105, + 501, + 162, + 515 + ], + "score": 1.0, + "content": "solution than", + "type": "text" + }, + { + "bbox": [ + 163, + 503, + 173, + 514 + ], + "score": 0.84, + "content": "q _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 173, + 501, + 184, + 515 + ], + "score": 1.0, + "content": "if", + "type": "text" + }, + { + "bbox": [ + 185, + 502, + 247, + 514 + ], + "score": 0.92, + "content": "\\| v _ { 1 } \\| ^ { 2 } < \\| v _ { 2 } \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 248, + 501, + 506, + 515 + ], + "score": 1.0, + "content": ". Doing so is somewhat analogous to the practice, common in", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 512, + 506, + 526 + ], + "spans": [ + { + "bbox": [ + 105, + 512, + 247, + 526 + ], + "score": 1.0, + "content": "optimization, of using the gradient", + "type": "text" + }, + { + "bbox": [ + 248, + 513, + 290, + 525 + ], + "score": 0.94, + "content": "\\| \\nabla f ( x ) \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 290, + 512, + 506, + 526 + ], + "score": 1.0, + "content": "as a measure of quality of an approximate minimizer", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 104, + 523, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 523, + 231, + 537 + ], + "score": 1.0, + "content": "of some differentiable function", + "type": "text" + }, + { + "bbox": [ + 232, + 524, + 239, + 535 + ], + "score": 0.83, + "content": "f", + "type": "inline_equation" + }, + { + "bbox": [ + 239, + 523, + 343, + 537 + ], + "score": 1.0, + "content": ". However, note that since", + "type": "text" + }, + { + "bbox": [ + 344, + 524, + 371, + 536 + ], + "score": 0.92, + "content": "\\mathcal { T } ( q _ { 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 371, + 523, + 506, + 537 + ], + "score": 1.0, + "content": "is a set, there may exist elements", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 534, + 506, + 549 + ], + "spans": [ + { + "bbox": [ + 105, + 534, + 118, + 549 + ], + "score": 1.0, + "content": "of", + "type": "text" + }, + { + "bbox": [ + 118, + 534, + 146, + 547 + ], + "score": 0.93, + "content": "\\mathcal { T } ( q _ { 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 146, + 534, + 246, + 549 + ], + "score": 1.0, + "content": "with smaller norm than", + "type": "text" + }, + { + "bbox": [ + 246, + 536, + 257, + 546 + ], + "score": 0.85, + "content": "v _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 257, + 534, + 371, + 549 + ], + "score": 1.0, + "content": ". Thus any given certificate", + "type": "text" + }, + { + "bbox": [ + 371, + 537, + 381, + 546 + ], + "score": 0.86, + "content": "v _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 382, + 534, + 506, + 549 + ], + "score": 1.0, + "content": "only corresponds to an upper", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 545, + 216, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 545, + 146, + 560 + ], + "score": 1.0, + "content": "bound on", + "type": "text" + }, + { + "bbox": [ + 147, + 546, + 212, + 559 + ], + "score": 0.93, + "content": "\\mathrm { d i s t } ^ { 2 } ( 0 , \\mathcal { T } ( q _ { 1 } ) )", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 545, + 216, + 560 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 28, + "bbox_fs": [ + 104, + 457, + 506, + 560 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 571, + 374, + 584 + ], + "lines": [ + { + "bbox": [ + 105, + 572, + 375, + 584 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 375, + 584 + ], + "score": 1.0, + "content": "F.2 APPROXIMATION RESIDUAL FOR PROJECTIVE SPLITTING", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 33 + }, + { + "type": "text", + "bbox": [ + 107, + 591, + 505, + 617 + ], + "lines": [ + { + "bbox": [ + 105, + 591, + 505, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 591, + 213, + 606 + ], + "score": 1.0, + "content": "In SPS (Algorithm 1), for", + "type": "text" + }, + { + "bbox": [ + 213, + 593, + 247, + 603 + ], + "score": 0.9, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 247, + 591, + 288, + 606 + ], + "score": 1.0, + "content": ", the pairs", + "type": "text" + }, + { + "bbox": [ + 288, + 592, + 322, + 605 + ], + "score": 0.92, + "content": "( x _ { i } ^ { k } , y _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 591, + 398, + 606 + ], + "score": 1.0, + "content": "are chosen so that", + "type": "text" + }, + { + "bbox": [ + 399, + 592, + 451, + 605 + ], + "score": 0.93, + "content": "y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 451, + 591, + 505, + 606 + ], + "score": 1.0, + "content": ". This can be", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 603, + 412, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 603, + 294, + 618 + ], + "score": 1.0, + "content": "seen from the definition of the resolvent. Thus", + "type": "text" + }, + { + "bbox": [ + 294, + 604, + 353, + 617 + ], + "score": 0.93, + "content": "\\hat { x _ { i } ^ { k } } \\in A _ { i } ^ { - 1 } ( y _ { i } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 354, + 603, + 412, + 618 + ], + "score": 1.0, + "content": ". Observe that", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 34.5, + "bbox_fs": [ + 105, + 591, + 505, + 618 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 198, + 620, + 412, + 680 + ], + "lines": [ + { + "bbox": [ + 198, + 620, + 412, + 680 + ], + "spans": [ + { + "bbox": [ + 198, + 620, + 412, + 680 + ], + "score": 0.94, + "content": "\\begin{array} { r } { v ^ { k } \\doteq \\left[ \\begin{array} { c } { x _ { 1 } ^ { k } - z ^ { k } } \\\\ { \\vdots } \\\\ { x _ { n } ^ { k } - z ^ { k } } \\\\ { B ( z ^ { k } ) + \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } } \\end{array} \\right] \\in \\mathcal { T } ( y _ { 1 } ^ { k } , \\dotsc , y _ { n } ^ { k } , z ^ { k } ) . } \\end{array}", + "type": "interline_equation", + "image_path": "34c2b8bc590a85d01c47ca4535edfab24349cbca1ec1369ce3179fc21f731ae6.jpg" + } + ] + } + ], + "index": 37.5, + "virtual_lines": [ + { + "bbox": [ + 198, + 620, + 412, + 635.0 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 198, + 635.0, + 412, + 650.0 + ], + "spans": [], + "index": 37 + }, + { + "bbox": [ + 198, + 650.0, + 412, + 665.0 + ], + "spans": [], + "index": 38 + }, + { + "bbox": [ + 198, + 665.0, + 412, + 680.0 + ], + "spans": [], + "index": 39 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 682, + 280, + 694 + ], + "lines": [ + { + "bbox": [ + 105, + 681, + 281, + 695 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 281, + 695 + ], + "score": 1.0, + "content": "The approximation residual for SPS is thus", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 40, + "bbox_fs": [ + 105, + 681, + 281, + 695 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 195, + 698, + 416, + 731 + ], + "lines": [ + { + "bbox": [ + 195, + 698, + 416, + 731 + ], + "spans": [ + { + "bbox": [ + 195, + 698, + 416, + 731 + ], + "score": 0.93, + "content": "R _ { k } \\dot { = } \\| v ^ { k } \\| ^ { 2 } = \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\left\\| B ( z ^ { k } ) + \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\right\\| ^ { 2 }", + "type": "interline_equation", + "image_path": "f7278fc1183759e6f4ed367df5b00f786a09c9df101097ac6b06ec1e247e4d9a.jpg" + } + ] + } + ], + "index": 41.5, + "virtual_lines": [ + { + "bbox": [ + 195, + 698, + 416, + 714.5 + ], + "spans": [], + "index": 41 + }, + { + "bbox": [ + 195, + 714.5, + 416, + 731.0 + ], + "spans": [], + "index": 42 + } + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 81, + 504, + 105 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 266, + 96 + ], + "score": 1.0, + "content": "which is an approximation residual for", + "type": "text" + }, + { + "bbox": [ + 266, + 82, + 331, + 95 + ], + "score": 0.92, + "content": "( y _ { 1 } ^ { k } , \\dots , y _ { n } ^ { k } , z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 331, + 81, + 505, + 96 + ], + "score": 1.0, + "content": "in the sense defined above. We may relate", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 93, + 397, + 106 + ], + "spans": [ + { + "bbox": [ + 107, + 94, + 120, + 105 + ], + "score": 0.88, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 120, + 93, + 241, + 106 + ], + "score": 1.0, + "content": "to the approximation residual", + "type": "text" + }, + { + "bbox": [ + 241, + 94, + 255, + 105 + ], + "score": 0.89, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 255, + 93, + 397, + 106 + ], + "score": 1.0, + "content": "for SPS from Section 5 as follows:", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "interline_equation", + "bbox": [ + 168, + 108, + 444, + 259 + ], + "lines": [ + { + "bbox": [ + 168, + 108, + 444, + 259 + ], + "spans": [ + { + "bbox": [ + 168, + 108, + 444, + 259 + ], + "score": 0.95, + "content": "\\begin{array} { r l } & { H _ { k } = \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\left\\| B ( z ^ { k } ) + \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\right\\| ^ { 2 } } \\\\ & { \\quad = \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\left\\| B ( z ^ { k } ) + \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } - \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } \\right\\| ^ { 2 } } \\\\ & { \\quad \\leq \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } + 2 \\left\\| \\displaystyle \\sum _ { i = 1 } ^ { n } ( y _ { i } ^ { k } - w _ { i } ^ { k } ) \\right\\| ^ { 2 } } \\\\ & { \\quad \\leq \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } + 2 n \\displaystyle \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } } \\\\ & { \\quad < \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\| B ( z ^ { k } ) + \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } } \\\\ & { \\quad < \\rho _ { n } \\alpha , } \\end{array}", + "type": "interline_equation", + "image_path": "8c330fe1de9e966ef91dbdae30060c1e446bcc0e0d0a4af3dfce1d7733b22253.jpg" + } + ] + } + ], + "index": 3, + "virtual_lines": [ + { + "bbox": [ + 168, + 108, + 444, + 158.33333333333334 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 168, + 158.33333333333334, + 444, + 208.66666666666669 + ], + "spans": [], + "index": 3 + }, + { + "bbox": [ + 168, + 208.66666666666669, + 444, + 259.0 + ], + "spans": [], + "index": 4 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 268, + 505, + 293 + ], + "lines": [ + { + "bbox": [ + 104, + 264, + 505, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 264, + 339, + 288 + ], + "score": 1.0, + "content": "where in the second equality we have used the fact that", + "type": "text" + }, + { + "bbox": [ + 339, + 268, + 401, + 283 + ], + "score": 0.93, + "content": "\\begin{array} { r } { \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0 } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 402, + 264, + 433, + 288 + ], + "score": 1.0, + "content": ". Thus,", + "type": "text" + }, + { + "bbox": [ + 434, + 271, + 447, + 281 + ], + "score": 0.9, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 264, + 505, + 288 + ], + "score": 1.0, + "content": "has the same", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 282, + 285, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 187, + 293 + ], + "score": 1.0, + "content": "convergence rate as", + "type": "text" + }, + { + "bbox": [ + 187, + 282, + 201, + 292 + ], + "score": 0.88, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 201, + 282, + 285, + 293 + ], + "score": 1.0, + "content": "given in Theorem 2.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 5.5 + }, + { + "type": "text", + "bbox": [ + 107, + 297, + 504, + 321 + ], + "lines": [ + { + "bbox": [ + 105, + 295, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 105, + 295, + 402, + 312 + ], + "score": 1.0, + "content": "Note that while the certificate given in (65) focuses on the primal iterate", + "type": "text" + }, + { + "bbox": [ + 402, + 297, + 413, + 308 + ], + "score": 0.87, + "content": "z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 414, + 295, + 506, + 312 + ], + "score": 1.0, + "content": ", it may be changed to", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 306, + 279, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 306, + 159, + 324 + ], + "score": 1.0, + "content": "focus on any", + "type": "text" + }, + { + "bbox": [ + 160, + 308, + 171, + 321 + ], + "score": 0.9, + "content": "x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 172, + 306, + 187, + 324 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 187, + 309, + 238, + 321 + ], + "score": 0.92, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 238, + 306, + 279, + 324 + ], + "score": 1.0, + "content": ", by using", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 7.5 + }, + { + "type": "interline_equation", + "bbox": [ + 198, + 323, + 413, + 382 + ], + "lines": [ + { + "bbox": [ + 198, + 323, + 413, + 382 + ], + "spans": [ + { + "bbox": [ + 198, + 323, + 413, + 382 + ], + "score": 0.94, + "content": "\\boldsymbol { v } _ { i } ^ { k } \\doteq \\left[ \\begin{array} { c } { x _ { 1 } ^ { k } - x _ { i } ^ { k } } \\\\ { \\vdots } \\\\ { x _ { n } ^ { k } - x _ { i } ^ { k } } \\\\ { B ( x _ { i } ^ { k } ) + \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } } \\end{array} \\right] \\in \\mathscr { T } ( y _ { 1 } ^ { k } , \\ldots , y _ { n } ^ { k } , x _ { i } ^ { k } ) .", + "type": "interline_equation", + "image_path": "8204a56b968d2a373a697b9c3bb802847ba995a3a004d55c90e1cbc1f39ce696.jpg" + } + ] + } + ], + "index": 10.5, + "virtual_lines": [ + { + "bbox": [ + 198, + 323, + 413, + 337.75 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 198, + 337.75, + 413, + 352.5 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 198, + 352.5, + 413, + 367.25 + ], + "spans": [], + "index": 11 + }, + { + "bbox": [ + 198, + 367.25, + 413, + 382.0 + ], + "spans": [], + "index": 12 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 385, + 505, + 408 + ], + "lines": [ + { + "bbox": [ + 105, + 384, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 223, + 399 + ], + "score": 1.0, + "content": "The approximation residual", + "type": "text" + }, + { + "bbox": [ + 223, + 385, + 248, + 398 + ], + "score": 0.92, + "content": "\\| v _ { i } ^ { k } \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 249, + 384, + 434, + 399 + ], + "score": 1.0, + "content": "may also be shown to have the same rate as", + "type": "text" + }, + { + "bbox": [ + 434, + 387, + 448, + 397 + ], + "score": 0.9, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 384, + 505, + 399 + ], + "score": 1.0, + "content": "by following", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 396, + 274, + 409 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 257, + 409 + ], + "score": 1.0, + "content": "similar derivations to those above for", + "type": "text" + }, + { + "bbox": [ + 257, + 397, + 270, + 408 + ], + "score": 0.89, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 271, + 396, + 274, + 409 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13.5 + }, + { + "type": "title", + "bbox": [ + 106, + 421, + 210, + 433 + ], + "lines": [ + { + "bbox": [ + 105, + 420, + 210, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 210, + 434 + ], + "score": 1.0, + "content": "F.3 TSENG’S METHOD", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + }, + { + "type": "text", + "bbox": [ + 107, + 441, + 504, + 465 + ], + "lines": [ + { + "bbox": [ + 105, + 440, + 505, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 440, + 505, + 455 + ], + "score": 1.0, + "content": "Tseng’s method (Tseng, 2000) can be applied to (62), resulting in the following recursion with iterates", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 451, + 181, + 466 + ], + "spans": [ + { + "bbox": [ + 107, + 453, + 176, + 466 + ], + "score": 0.92, + "content": "q ^ { k } , \\bar { q } ^ { \\bar { k } } \\in \\mathbb { R } ^ { ( n + 1 ) d }", + "type": "inline_equation" + }, + { + "bbox": [ + 177, + 451, + 181, + 466 + ], + "score": 1.0, + "content": ":", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 16.5 + }, + { + "type": "interline_equation", + "bbox": [ + 234, + 466, + 376, + 505 + ], + "lines": [ + { + "bbox": [ + 234, + 466, + 376, + 505 + ], + "spans": [ + { + "bbox": [ + 234, + 466, + 376, + 505 + ], + "score": 0.9, + "content": "\\begin{array} { c } { \\bar { q } ^ { k } = J _ { \\alpha \\mathcal { A } } ( q ^ { k } - \\alpha \\mathcal { B } ( q ^ { k } ) ) } \\\\ { q ^ { k + 1 } = \\bar { q } ^ { k } + \\alpha \\big ( \\mathcal { B } ( q ^ { k } ) - \\mathcal { B } ( \\bar { q } ^ { k } ) \\big ) , } \\end{array}", + "type": "interline_equation", + "image_path": "89d314a14597b152cec2b1449bae122026196d7ab0fe3ab9a5d6fe622328c894.jpg" + } + ] + } + ], + "index": 18.5, + "virtual_lines": [ + { + "bbox": [ + 234, + 466, + 376, + 485.5 + ], + "spans": [], + "index": 18 + }, + { + "bbox": [ + 234, + 485.5, + 376, + 505.0 + ], + "spans": [], + "index": 19 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 507, + 505, + 531 + ], + "lines": [ + { + "bbox": [ + 106, + 507, + 505, + 520 + ], + "spans": [ + { + "bbox": [ + 106, + 507, + 132, + 520 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 509, + 144, + 518 + ], + "score": 0.83, + "content": "\\mathcal { A }", + "type": "inline_equation" + }, + { + "bbox": [ + 145, + 507, + 162, + 520 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 162, + 509, + 173, + 518 + ], + "score": 0.86, + "content": "\\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 173, + 507, + 354, + 520 + ], + "score": 1.0, + "content": "are defined in (63) and (64). The resolvent of", + "type": "text" + }, + { + "bbox": [ + 354, + 509, + 366, + 518 + ], + "score": 0.85, + "content": "\\mathcal { A }", + "type": "inline_equation" + }, + { + "bbox": [ + 366, + 507, + 505, + 520 + ], + "score": 1.0, + "content": "may be readily computed from the", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 519, + 491, + 532 + ], + "spans": [ + { + "bbox": [ + 106, + 519, + 174, + 532 + ], + "score": 1.0, + "content": "resolvents of the", + "type": "text" + }, + { + "bbox": [ + 175, + 519, + 186, + 530 + ], + "score": 0.89, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 187, + 519, + 491, + 532 + ], + "score": 1.0, + "content": "using Moreau’s identity (Bauschke & Combettes, 2017, Proposition 23.20).", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20.5 + }, + { + "type": "text", + "bbox": [ + 105, + 535, + 504, + 559 + ], + "lines": [ + { + "bbox": [ + 106, + 536, + 505, + 547 + ], + "spans": [ + { + "bbox": [ + 106, + 536, + 505, + 547 + ], + "score": 1.0, + "content": "Analogous to SPS, Tseng’s method has an approximation residual, which in this case is an element of", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 107, + 545, + 506, + 560 + ], + "spans": [ + { + "bbox": [ + 107, + 546, + 135, + 559 + ], + "score": 0.91, + "content": "\\mathcal { T } ( \\bar { q } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 135, + 545, + 446, + 560 + ], + "score": 1.0, + "content": ". In particular, using the general properties of resolvent operators as applied to", + "type": "text" + }, + { + "bbox": [ + 446, + 547, + 466, + 558 + ], + "score": 0.9, + "content": "J _ { \\alpha \\mathcal { A } }", + "type": "inline_equation" + }, + { + "bbox": [ + 466, + 545, + 506, + 560 + ], + "score": 1.0, + "content": ", we have", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 22.5 + }, + { + "type": "interline_equation", + "bbox": [ + 239, + 561, + 371, + 585 + ], + "lines": [ + { + "bbox": [ + 239, + 561, + 371, + 585 + ], + "spans": [ + { + "bbox": [ + 239, + 561, + 371, + 585 + ], + "score": 0.94, + "content": "\\frac { 1 } { \\alpha } ( q ^ { k } - \\bar { q } ^ { k } ) - \\mathcal { B } ( q ^ { k } ) \\in \\mathcal { A } ( \\bar { q } ^ { k } ) .", + "type": "interline_equation", + "image_path": "3ad651c74ae9cc276826b94d05a927814ef5af68d350de03d3fdebef58f09adf.jpg" + } + ] + } + ], + "index": 24, + "virtual_lines": [ + { + "bbox": [ + 239, + 561, + 371, + 585 + ], + "spans": [], + "index": 24 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 588, + 236, + 600 + ], + "lines": [ + { + "bbox": [ + 106, + 588, + 236, + 601 + ], + "spans": [ + { + "bbox": [ + 106, + 588, + 236, + 601 + ], + "score": 1.0, + "content": "Also, rearranging (68) produces", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25 + }, + { + "type": "interline_equation", + "bbox": [ + 234, + 603, + 377, + 626 + ], + "lines": [ + { + "bbox": [ + 234, + 603, + 377, + 626 + ], + "spans": [ + { + "bbox": [ + 234, + 603, + 377, + 626 + ], + "score": 0.93, + "content": "\\frac { 1 } { \\alpha } ( \\bar { q } ^ { k } - q ^ { k + 1 } ) + \\mathcal { B } ( q ^ { k } ) = \\mathcal { B } ( \\bar { q } ^ { k } ) .", + "type": "interline_equation", + "image_path": "43c918c62fec061b1e0fbc610629a312f5c06c11e5b4ee40a38fe9612a78331d.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 234, + 603, + 377, + 626 + ], + "spans": [], + "index": 26 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 628, + 254, + 640 + ], + "lines": [ + { + "bbox": [ + 106, + 628, + 254, + 641 + ], + "spans": [ + { + "bbox": [ + 106, + 628, + 254, + 641 + ], + "score": 1.0, + "content": "Adding these two relations produces", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "interline_equation", + "bbox": [ + 215, + 643, + 396, + 667 + ], + "lines": [ + { + "bbox": [ + 215, + 643, + 396, + 667 + ], + "spans": [ + { + "bbox": [ + 215, + 643, + 396, + 667 + ], + "score": 0.92, + "content": "\\frac { 1 } { \\alpha } ( q ^ { k } - q ^ { k + 1 } ) \\in \\mathcal { A } ( \\bar { q } ^ { k } ) + \\mathcal { B } ( \\bar { q } ^ { k } ) = \\mathcal { T } ( \\bar { q } ^ { k } )", + "type": "interline_equation", + "image_path": "83171b6910d48e4bfa177ccd43caea81d1d347085c2a164be95154adc4fb3099.jpg" + } + ] + } + ], + "index": 28, + "virtual_lines": [ + { + "bbox": [ + 215, + 643, + 396, + 667 + ], + "spans": [], + "index": 28 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 669, + 149, + 681 + ], + "lines": [ + { + "bbox": [ + 105, + 668, + 151, + 682 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 151, + 682 + ], + "score": 1.0, + "content": "Therefore,", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 29 + }, + { + "type": "interline_equation", + "bbox": [ + 250, + 683, + 360, + 707 + ], + "lines": [ + { + "bbox": [ + 250, + 683, + 360, + 707 + ], + "spans": [ + { + "bbox": [ + 250, + 683, + 360, + 707 + ], + "score": 0.94, + "content": "R _ { k } ^ { \\mathrm { { T s e n g } } } \\doteq \\frac { 1 } { \\alpha ^ { 2 } } \\| q ^ { k } - q ^ { k + 1 } \\| ^ { 2 }", + "type": "interline_equation", + "image_path": "22729217f5cf585083c76e9bf3744e87097890be4fd87e4b64c60b0296861963.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 250, + 683, + 360, + 707 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 709, + 505, + 731 + ], + "lines": [ + { + "bbox": [ + 105, + 709, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 430, + 723 + ], + "score": 1.0, + "content": "represents a measure of the approximation error for Tseng’s method equivalent to", + "type": "text" + }, + { + "bbox": [ + 430, + 710, + 443, + 721 + ], + "score": 0.9, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 444, + 709, + 506, + 723 + ], + "score": 1.0, + "content": "defined in (66)", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 720, + 142, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 142, + 732 + ], + "score": 1.0, + "content": "for SPS.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 31.5 + } + ], + "page_idx": 28, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 764 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 764 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 14, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 81, + 504, + 105 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 266, + 96 + ], + "score": 1.0, + "content": "which is an approximation residual for", + "type": "text" + }, + { + "bbox": [ + 266, + 82, + 331, + 95 + ], + "score": 0.92, + "content": "( y _ { 1 } ^ { k } , \\dots , y _ { n } ^ { k } , z ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 331, + 81, + 505, + 96 + ], + "score": 1.0, + "content": "in the sense defined above. We may relate", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 93, + 397, + 106 + ], + "spans": [ + { + "bbox": [ + 107, + 94, + 120, + 105 + ], + "score": 0.88, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 120, + 93, + 241, + 106 + ], + "score": 1.0, + "content": "to the approximation residual", + "type": "text" + }, + { + "bbox": [ + 241, + 94, + 255, + 105 + ], + "score": 0.89, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 255, + 93, + 397, + 106 + ], + "score": 1.0, + "content": "for SPS from Section 5 as follows:", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5, + "bbox_fs": [ + 105, + 81, + 505, + 106 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 168, + 108, + 444, + 259 + ], + "lines": [ + { + "bbox": [ + 168, + 108, + 444, + 259 + ], + "spans": [ + { + "bbox": [ + 168, + 108, + 444, + 259 + ], + "score": 0.95, + "content": "\\begin{array} { r l } & { H _ { k } = \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\left\\| B ( z ^ { k } ) + \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\right\\| ^ { 2 } } \\\\ & { \\quad = \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\left\\| B ( z ^ { k } ) + \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } - \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } \\right\\| ^ { 2 } } \\\\ & { \\quad \\leq \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } + 2 \\left\\| \\displaystyle \\sum _ { i = 1 } ^ { n } ( y _ { i } ^ { k } - w _ { i } ^ { k } ) \\right\\| ^ { 2 } } \\\\ & { \\quad \\leq \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } + 2 n \\displaystyle \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } } \\\\ & { \\quad < \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\| B ( z ^ { k } ) + \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } } \\\\ & { \\quad < \\rho _ { n } \\alpha , } \\end{array}", + "type": "interline_equation", + "image_path": "8c330fe1de9e966ef91dbdae30060c1e446bcc0e0d0a4af3dfce1d7733b22253.jpg" + } + ] + } + ], + "index": 3, + "virtual_lines": [ + { + "bbox": [ + 168, + 108, + 444, + 158.33333333333334 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 168, + 158.33333333333334, + 444, + 208.66666666666669 + ], + "spans": [], + "index": 3 + }, + { + "bbox": [ + 168, + 208.66666666666669, + 444, + 259.0 + ], + "spans": [], + "index": 4 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 268, + 505, + 293 + ], + "lines": [ + { + "bbox": [ + 104, + 264, + 505, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 264, + 339, + 288 + ], + "score": 1.0, + "content": "where in the second equality we have used the fact that", + "type": "text" + }, + { + "bbox": [ + 339, + 268, + 401, + 283 + ], + "score": 0.93, + "content": "\\begin{array} { r } { \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0 } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 402, + 264, + 433, + 288 + ], + "score": 1.0, + "content": ". Thus,", + "type": "text" + }, + { + "bbox": [ + 434, + 271, + 447, + 281 + ], + "score": 0.9, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 264, + 505, + 288 + ], + "score": 1.0, + "content": "has the same", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 282, + 285, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 187, + 293 + ], + "score": 1.0, + "content": "convergence rate as", + "type": "text" + }, + { + "bbox": [ + 187, + 282, + 201, + 292 + ], + "score": 0.88, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 201, + 282, + 285, + 293 + ], + "score": 1.0, + "content": "given in Theorem 2.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 5.5, + "bbox_fs": [ + 104, + 264, + 505, + 293 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 297, + 504, + 321 + ], + "lines": [ + { + "bbox": [ + 105, + 295, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 105, + 295, + 402, + 312 + ], + "score": 1.0, + "content": "Note that while the certificate given in (65) focuses on the primal iterate", + "type": "text" + }, + { + "bbox": [ + 402, + 297, + 413, + 308 + ], + "score": 0.87, + "content": "z ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 414, + 295, + 506, + 312 + ], + "score": 1.0, + "content": ", it may be changed to", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 306, + 279, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 306, + 159, + 324 + ], + "score": 1.0, + "content": "focus on any", + "type": "text" + }, + { + "bbox": [ + 160, + 308, + 171, + 321 + ], + "score": 0.9, + "content": "x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 172, + 306, + 187, + 324 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 187, + 309, + 238, + 321 + ], + "score": 0.92, + "content": "i = 1 , \\ldots , n", + "type": "inline_equation" + }, + { + "bbox": [ + 238, + 306, + 279, + 324 + ], + "score": 1.0, + "content": ", by using", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 7.5, + "bbox_fs": [ + 105, + 295, + 506, + 324 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 198, + 323, + 413, + 382 + ], + "lines": [ + { + "bbox": [ + 198, + 323, + 413, + 382 + ], + "spans": [ + { + "bbox": [ + 198, + 323, + 413, + 382 + ], + "score": 0.94, + "content": "\\boldsymbol { v } _ { i } ^ { k } \\doteq \\left[ \\begin{array} { c } { x _ { 1 } ^ { k } - x _ { i } ^ { k } } \\\\ { \\vdots } \\\\ { x _ { n } ^ { k } - x _ { i } ^ { k } } \\\\ { B ( x _ { i } ^ { k } ) + \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } } \\end{array} \\right] \\in \\mathscr { T } ( y _ { 1 } ^ { k } , \\ldots , y _ { n } ^ { k } , x _ { i } ^ { k } ) .", + "type": "interline_equation", + "image_path": "8204a56b968d2a373a697b9c3bb802847ba995a3a004d55c90e1cbc1f39ce696.jpg" + } + ] + } + ], + "index": 10.5, + "virtual_lines": [ + { + "bbox": [ + 198, + 323, + 413, + 337.75 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 198, + 337.75, + 413, + 352.5 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 198, + 352.5, + 413, + 367.25 + ], + "spans": [], + "index": 11 + }, + { + "bbox": [ + 198, + 367.25, + 413, + 382.0 + ], + "spans": [], + "index": 12 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 385, + 505, + 408 + ], + "lines": [ + { + "bbox": [ + 105, + 384, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 223, + 399 + ], + "score": 1.0, + "content": "The approximation residual", + "type": "text" + }, + { + "bbox": [ + 223, + 385, + 248, + 398 + ], + "score": 0.92, + "content": "\\| v _ { i } ^ { k } \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 249, + 384, + 434, + 399 + ], + "score": 1.0, + "content": "may also be shown to have the same rate as", + "type": "text" + }, + { + "bbox": [ + 434, + 387, + 448, + 397 + ], + "score": 0.9, + "content": "G _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 384, + 505, + 399 + ], + "score": 1.0, + "content": "by following", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 396, + 274, + 409 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 257, + 409 + ], + "score": 1.0, + "content": "similar derivations to those above for", + "type": "text" + }, + { + "bbox": [ + 257, + 397, + 270, + 408 + ], + "score": 0.89, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 271, + 396, + 274, + 409 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13.5, + "bbox_fs": [ + 105, + 384, + 505, + 409 + ] + }, + { + "type": "title", + "bbox": [ + 106, + 421, + 210, + 433 + ], + "lines": [ + { + "bbox": [ + 105, + 420, + 210, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 210, + 434 + ], + "score": 1.0, + "content": "F.3 TSENG’S METHOD", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + }, + { + "type": "text", + "bbox": [ + 107, + 441, + 504, + 465 + ], + "lines": [ + { + "bbox": [ + 105, + 440, + 505, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 440, + 505, + 455 + ], + "score": 1.0, + "content": "Tseng’s method (Tseng, 2000) can be applied to (62), resulting in the following recursion with iterates", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 451, + 181, + 466 + ], + "spans": [ + { + "bbox": [ + 107, + 453, + 176, + 466 + ], + "score": 0.92, + "content": "q ^ { k } , \\bar { q } ^ { \\bar { k } } \\in \\mathbb { R } ^ { ( n + 1 ) d }", + "type": "inline_equation" + }, + { + "bbox": [ + 177, + 451, + 181, + 466 + ], + "score": 1.0, + "content": ":", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 16.5, + "bbox_fs": [ + 105, + 440, + 505, + 466 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 234, + 466, + 376, + 505 + ], + "lines": [ + { + "bbox": [ + 234, + 466, + 376, + 505 + ], + "spans": [ + { + "bbox": [ + 234, + 466, + 376, + 505 + ], + "score": 0.9, + "content": "\\begin{array} { c } { \\bar { q } ^ { k } = J _ { \\alpha \\mathcal { A } } ( q ^ { k } - \\alpha \\mathcal { B } ( q ^ { k } ) ) } \\\\ { q ^ { k + 1 } = \\bar { q } ^ { k } + \\alpha \\big ( \\mathcal { B } ( q ^ { k } ) - \\mathcal { B } ( \\bar { q } ^ { k } ) \\big ) , } \\end{array}", + "type": "interline_equation", + "image_path": "89d314a14597b152cec2b1449bae122026196d7ab0fe3ab9a5d6fe622328c894.jpg" + } + ] + } + ], + "index": 18.5, + "virtual_lines": [ + { + "bbox": [ + 234, + 466, + 376, + 485.5 + ], + "spans": [], + "index": 18 + }, + { + "bbox": [ + 234, + 485.5, + 376, + 505.0 + ], + "spans": [], + "index": 19 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 507, + 505, + 531 + ], + "lines": [ + { + "bbox": [ + 106, + 507, + 505, + 520 + ], + "spans": [ + { + "bbox": [ + 106, + 507, + 132, + 520 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 509, + 144, + 518 + ], + "score": 0.83, + "content": "\\mathcal { A }", + "type": "inline_equation" + }, + { + "bbox": [ + 145, + 507, + 162, + 520 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 162, + 509, + 173, + 518 + ], + "score": 0.86, + "content": "\\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 173, + 507, + 354, + 520 + ], + "score": 1.0, + "content": "are defined in (63) and (64). The resolvent of", + "type": "text" + }, + { + "bbox": [ + 354, + 509, + 366, + 518 + ], + "score": 0.85, + "content": "\\mathcal { A }", + "type": "inline_equation" + }, + { + "bbox": [ + 366, + 507, + 505, + 520 + ], + "score": 1.0, + "content": "may be readily computed from the", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 519, + 491, + 532 + ], + "spans": [ + { + "bbox": [ + 106, + 519, + 174, + 532 + ], + "score": 1.0, + "content": "resolvents of the", + "type": "text" + }, + { + "bbox": [ + 175, + 519, + 186, + 530 + ], + "score": 0.89, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 187, + 519, + 491, + 532 + ], + "score": 1.0, + "content": "using Moreau’s identity (Bauschke & Combettes, 2017, Proposition 23.20).", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20.5, + "bbox_fs": [ + 106, + 507, + 505, + 532 + ] + }, + { + "type": "text", + "bbox": [ + 105, + 535, + 504, + 559 + ], + "lines": [ + { + "bbox": [ + 106, + 536, + 505, + 547 + ], + "spans": [ + { + "bbox": [ + 106, + 536, + 505, + 547 + ], + "score": 1.0, + "content": "Analogous to SPS, Tseng’s method has an approximation residual, which in this case is an element of", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 107, + 545, + 506, + 560 + ], + "spans": [ + { + "bbox": [ + 107, + 546, + 135, + 559 + ], + "score": 0.91, + "content": "\\mathcal { T } ( \\bar { q } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 135, + 545, + 446, + 560 + ], + "score": 1.0, + "content": ". In particular, using the general properties of resolvent operators as applied to", + "type": "text" + }, + { + "bbox": [ + 446, + 547, + 466, + 558 + ], + "score": 0.9, + "content": "J _ { \\alpha \\mathcal { A } }", + "type": "inline_equation" + }, + { + "bbox": [ + 466, + 545, + 506, + 560 + ], + "score": 1.0, + "content": ", we have", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 22.5, + "bbox_fs": [ + 106, + 536, + 506, + 560 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 239, + 561, + 371, + 585 + ], + "lines": [ + { + "bbox": [ + 239, + 561, + 371, + 585 + ], + "spans": [ + { + "bbox": [ + 239, + 561, + 371, + 585 + ], + "score": 0.94, + "content": "\\frac { 1 } { \\alpha } ( q ^ { k } - \\bar { q } ^ { k } ) - \\mathcal { B } ( q ^ { k } ) \\in \\mathcal { A } ( \\bar { q } ^ { k } ) .", + "type": "interline_equation", + "image_path": "3ad651c74ae9cc276826b94d05a927814ef5af68d350de03d3fdebef58f09adf.jpg" + } + ] + } + ], + "index": 24, + "virtual_lines": [ + { + "bbox": [ + 239, + 561, + 371, + 585 + ], + "spans": [], + "index": 24 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 588, + 236, + 600 + ], + "lines": [ + { + "bbox": [ + 106, + 588, + 236, + 601 + ], + "spans": [ + { + "bbox": [ + 106, + 588, + 236, + 601 + ], + "score": 1.0, + "content": "Also, rearranging (68) produces", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25, + "bbox_fs": [ + 106, + 588, + 236, + 601 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 234, + 603, + 377, + 626 + ], + "lines": [ + { + "bbox": [ + 234, + 603, + 377, + 626 + ], + "spans": [ + { + "bbox": [ + 234, + 603, + 377, + 626 + ], + "score": 0.93, + "content": "\\frac { 1 } { \\alpha } ( \\bar { q } ^ { k } - q ^ { k + 1 } ) + \\mathcal { B } ( q ^ { k } ) = \\mathcal { B } ( \\bar { q } ^ { k } ) .", + "type": "interline_equation", + "image_path": "43c918c62fec061b1e0fbc610629a312f5c06c11e5b4ee40a38fe9612a78331d.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 234, + 603, + 377, + 626 + ], + "spans": [], + "index": 26 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 628, + 254, + 640 + ], + "lines": [ + { + "bbox": [ + 106, + 628, + 254, + 641 + ], + "spans": [ + { + "bbox": [ + 106, + 628, + 254, + 641 + ], + "score": 1.0, + "content": "Adding these two relations produces", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27, + "bbox_fs": [ + 106, + 628, + 254, + 641 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 215, + 643, + 396, + 667 + ], + "lines": [ + { + "bbox": [ + 215, + 643, + 396, + 667 + ], + "spans": [ + { + "bbox": [ + 215, + 643, + 396, + 667 + ], + "score": 0.92, + "content": "\\frac { 1 } { \\alpha } ( q ^ { k } - q ^ { k + 1 } ) \\in \\mathcal { A } ( \\bar { q } ^ { k } ) + \\mathcal { B } ( \\bar { q } ^ { k } ) = \\mathcal { T } ( \\bar { q } ^ { k } )", + "type": "interline_equation", + "image_path": "83171b6910d48e4bfa177ccd43caea81d1d347085c2a164be95154adc4fb3099.jpg" + } + ] + } + ], + "index": 28, + "virtual_lines": [ + { + "bbox": [ + 215, + 643, + 396, + 667 + ], + "spans": [], + "index": 28 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 669, + 149, + 681 + ], + "lines": [ + { + "bbox": [ + 105, + 668, + 151, + 682 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 151, + 682 + ], + "score": 1.0, + "content": "Therefore,", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 29, + "bbox_fs": [ + 105, + 668, + 151, + 682 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 250, + 683, + 360, + 707 + ], + "lines": [ + { + "bbox": [ + 250, + 683, + 360, + 707 + ], + "spans": [ + { + "bbox": [ + 250, + 683, + 360, + 707 + ], + "score": 0.94, + "content": "R _ { k } ^ { \\mathrm { { T s e n g } } } \\doteq \\frac { 1 } { \\alpha ^ { 2 } } \\| q ^ { k } - q ^ { k + 1 } \\| ^ { 2 }", + "type": "interline_equation", + "image_path": "22729217f5cf585083c76e9bf3744e87097890be4fd87e4b64c60b0296861963.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 250, + 683, + 360, + 707 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 709, + 505, + 731 + ], + "lines": [ + { + "bbox": [ + 105, + 709, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 430, + 723 + ], + "score": 1.0, + "content": "represents a measure of the approximation error for Tseng’s method equivalent to", + "type": "text" + }, + { + "bbox": [ + 430, + 710, + 443, + 721 + ], + "score": 0.9, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 444, + 709, + 506, + 723 + ], + "score": 1.0, + "content": "defined in (66)", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 720, + 142, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 142, + 732 + ], + "score": 1.0, + "content": "for SPS.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 31.5, + "bbox_fs": [ + 105, + 709, + 506, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 106, + 82, + 152, + 93 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 153, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 153, + 94 + ], + "score": 1.0, + "content": "F.4 FRB", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 102, + 505, + 136 + ], + "lines": [ + { + "bbox": [ + 105, + 101, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 101, + 505, + 116 + ], + "score": 1.0, + "content": "The forward-reflected-backward method (FRB) (Malitsky & Tam, 2020) is another method that may", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 113, + 505, + 127 + ], + "spans": [ + { + "bbox": [ + 105, + 113, + 212, + 127 + ], + "score": 1.0, + "content": "be applied to the splitting", + "type": "text" + }, + { + "bbox": [ + 213, + 114, + 269, + 124 + ], + "score": 0.92, + "content": "\\mathcal { T } = \\mathcal { A } + \\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 270, + 113, + 284, + 127 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 285, + 115, + 296, + 124 + ], + "score": 0.86, + "content": "\\mathcal { A }", + "type": "inline_equation" + }, + { + "bbox": [ + 297, + 113, + 314, + 127 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 315, + 115, + 326, + 124 + ], + "score": 0.85, + "content": "\\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 326, + 113, + 505, + 127 + ], + "score": 1.0, + "content": "as defined in (63) and (64). Doing so yields", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 126, + 146, + 136 + ], + "spans": [ + { + "bbox": [ + 104, + 126, + 146, + 136 + ], + "score": 1.0, + "content": "recursion", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 2 + }, + { + "type": "interline_equation", + "bbox": [ + 212, + 136, + 398, + 158 + ], + "lines": [ + { + "bbox": [ + 212, + 136, + 398, + 158 + ], + "spans": [ + { + "bbox": [ + 212, + 136, + 398, + 158 + ], + "score": 0.93, + "content": "q ^ { k + 1 } = J _ { \\alpha \\mathcal { A } } \\Big ( q ^ { k } - \\alpha \\big ( 2 \\mathcal { B } ( q ^ { k } ) - \\mathcal { B } ( q ^ { k - 1 } ) \\big ) \\Big ) .", + "type": "interline_equation", + "image_path": "655dd6f05410a89facdc5d6f19324b42be393018991f46d1f42a5e2ff9d18e83.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 212, + 136, + 398, + 158 + ], + "spans": [], + "index": 4 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 159, + 421, + 171 + ], + "lines": [ + { + "bbox": [ + 106, + 158, + 422, + 172 + ], + "spans": [ + { + "bbox": [ + 106, + 158, + 422, + 172 + ], + "score": 1.0, + "content": "Following similar arguments to those for Tseng’s method, it can be shown that", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "interline_equation", + "bbox": [ + 164, + 172, + 446, + 196 + ], + "lines": [ + { + "bbox": [ + 164, + 172, + 446, + 196 + ], + "spans": [ + { + "bbox": [ + 164, + 172, + 446, + 196 + ], + "score": 0.9, + "content": "v _ { \\mathrm { F R B } } ^ { k } \\doteq \\frac { 1 } { \\alpha } \\left( q ^ { k - 1 } - q ^ { k } \\right) + \\mathcal { B } ( q ^ { k } ) + \\mathcal { B } ( q ^ { k - 2 } ) - 2 \\mathcal { B } ( q ^ { k - 1 } ) \\in \\mathcal { T } ( q ^ { k } ) .", + "type": "interline_equation", + "image_path": "6b07573d670ff5416c2619a9e8110309fa6f1a414ccd82adbc8eba4d5e7c0478.jpg" + } + ] + } + ], + "index": 6, + "virtual_lines": [ + { + "bbox": [ + 164, + 172, + 446, + 196 + ], + "spans": [], + "index": 6 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 197, + 436, + 209 + ], + "lines": [ + { + "bbox": [ + 105, + 196, + 436, + 210 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 385, + 210 + ], + "score": 1.0, + "content": "Thus, FRB admits the following approximation residual equivalent to", + "type": "text" + }, + { + "bbox": [ + 386, + 198, + 399, + 209 + ], + "score": 0.88, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 399, + 196, + 436, + 210 + ], + "score": 1.0, + "content": "for SPS:", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 7 + }, + { + "type": "interline_equation", + "bbox": [ + 269, + 211, + 341, + 226 + ], + "lines": [ + { + "bbox": [ + 269, + 211, + 341, + 226 + ], + "spans": [ + { + "bbox": [ + 269, + 211, + 341, + 226 + ], + "score": 0.89, + "content": "R _ { k } ^ { \\mathrm { F R B } } \\doteq \\| v _ { \\mathrm { F R B } } ^ { k } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "0c5fbce05b820ef6ec87537c6abb8acba48a214be84135a5d060c536a9cb9685.jpg" + } + ] + } + ], + "index": 8, + "virtual_lines": [ + { + "bbox": [ + 269, + 211, + 341, + 226 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 234, + 504, + 257 + ], + "lines": [ + { + "bbox": [ + 105, + 233, + 506, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 506, + 247 + ], + "score": 1.0, + "content": "Finally, we remark that the stepsizes used in both the Tseng and FRB methods can be chosen via a", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 246, + 297, + 257 + ], + "spans": [ + { + "bbox": [ + 106, + 246, + 297, + 257 + ], + "score": 1.0, + "content": "linesearch procedure that we do not detail here.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 9.5 + }, + { + "type": "title", + "bbox": [ + 108, + 269, + 260, + 281 + ], + "lines": [ + { + "bbox": [ + 105, + 268, + 261, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 261, + 282 + ], + "score": 1.0, + "content": "F.5 STOCHASTIC TSENG METHOD", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11 + }, + { + "type": "text", + "bbox": [ + 106, + 289, + 506, + 349 + ], + "lines": [ + { + "bbox": [ + 105, + 290, + 505, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 505, + 303 + ], + "score": 1.0, + "content": "The stochastic version of Tseng’s method of (BΓΆhm et al., 2020) (S-Tseng) may be applied to the", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 300, + 507, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 146, + 315 + ], + "score": 1.0, + "content": "inclusion", + "type": "text" + }, + { + "bbox": [ + 147, + 301, + 223, + 313 + ], + "score": 0.92, + "content": "0 \\in \\mathcal { A } ( q ) + \\mathcal { B } ( q )", + "type": "inline_equation" + }, + { + "bbox": [ + 224, + 300, + 303, + 315 + ], + "score": 1.0, + "content": ", since the operator", + "type": "text" + }, + { + "bbox": [ + 304, + 302, + 315, + 312 + ], + "score": 0.84, + "content": "\\mathcal { A }", + "type": "inline_equation" + }, + { + "bbox": [ + 316, + 300, + 507, + 315 + ], + "score": 1.0, + "content": "may be written as a subdifferential. However,", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 311, + 506, + 326 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 506, + 326 + ], + "score": 1.0, + "content": "unlike the deterministic Tseng method, it does not produce a valid residual. Note also that S-Tseng", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 320, + 506, + 339 + ], + "spans": [ + { + "bbox": [ + 105, + 320, + 224, + 339 + ], + "score": 1.0, + "content": "outputs an ergodic sequence", + "type": "text" + }, + { + "bbox": [ + 225, + 322, + 239, + 336 + ], + "score": 0.89, + "content": "\\mathbf { \\bar { \\boldsymbol { q } } } _ { \\mathrm { e r g } } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 240, + 320, + 506, + 339 + ], + "score": 1.0, + "content": ". To construct a residual for the ergodic sequence, we compute a", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 103, + 330, + 475, + 354 + ], + "spans": [ + { + "bbox": [ + 103, + 330, + 392, + 354 + ], + "score": 1.0, + "content": "deterministic step of Tseng’s method according to (67)-(68), starting at", + "type": "text" + }, + { + "bbox": [ + 392, + 335, + 407, + 350 + ], + "score": 0.91, + "content": "q _ { \\mathrm { e r g } } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 407, + 330, + 475, + 354 + ], + "score": 1.0, + "content": ". That is, letting", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 14 + }, + { + "type": "interline_equation", + "bbox": [ + 234, + 351, + 378, + 385 + ], + "lines": [ + { + "bbox": [ + 234, + 351, + 378, + 385 + ], + "spans": [ + { + "bbox": [ + 234, + 351, + 378, + 385 + ], + "score": 0.78, + "content": "\\begin{array} { r l } & { \\bar { q } ^ { k } = J _ { \\alpha \\mathcal { A } } ( q _ { \\mathrm { e r g } } ^ { k } - \\mathcal { B } ( q _ { \\mathrm { e r g } } ^ { k } ) ) } \\\\ & { q ^ { k + 1 } = \\bar { q } ^ { k } + \\alpha ( \\mathcal { B } ( q _ { \\mathrm { e r g } } ^ { k } ) - \\mathcal { B } ( \\bar { q } ^ { k } ) ) , } \\end{array}", + "type": "interline_equation", + "image_path": "2344c4c5c1c54a1f6fe3ed0f7c91678f883d06deedd00452876444567f27a69a.jpg" + } + ] + } + ], + "index": 17.5, + "virtual_lines": [ + { + "bbox": [ + 234, + 351, + 378, + 368.0 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 234, + 368.0, + 378, + 385.0 + ], + "spans": [], + "index": 18 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 386, + 379, + 397 + ], + "lines": [ + { + "bbox": [ + 105, + 384, + 380, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 380, + 399 + ], + "score": 1.0, + "content": "we can then compute essentially the same residual as in Section F.3,", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 19 + }, + { + "type": "interline_equation", + "bbox": [ + 244, + 399, + 366, + 423 + ], + "lines": [ + { + "bbox": [ + 244, + 399, + 366, + 423 + ], + "spans": [ + { + "bbox": [ + 244, + 399, + 366, + 423 + ], + "score": 0.93, + "content": "R _ { k } ^ { \\mathrm { { S - T s e n g } } } \\doteq \\frac { 1 } { \\alpha ^ { 2 } } \\| q _ { \\mathrm { { e r g } } } ^ { k } - q ^ { k + 1 } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "e620c909214f65b396b3a0722afcf1e8bd8c008ab42e6d865ac16215300f24cb.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 244, + 399, + 366, + 423 + ], + "spans": [], + "index": 20 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 430, + 503, + 445 + ], + "lines": [ + { + "bbox": [ + 104, + 428, + 504, + 448 + ], + "spans": [ + { + "bbox": [ + 104, + 428, + 344, + 448 + ], + "score": 1.0, + "content": "To construct the stochastic oracle for S-Tseng, we assumed", + "type": "text" + }, + { + "bbox": [ + 345, + 431, + 440, + 445 + ], + "score": 0.92, + "content": "\\begin{array} { r } { B ( z ) = \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } B _ { i } ( z ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 441, + 428, + 504, + 448 + ], + "score": 1.0, + "content": ". Then we used", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 21 + }, + { + "type": "interline_equation", + "bbox": [ + 142, + 447, + 468, + 504 + ], + "lines": [ + { + "bbox": [ + 142, + 447, + 468, + 504 + ], + "spans": [ + { + "bbox": [ + 142, + 447, + 468, + 504 + ], + "score": 0.95, + "content": "\\tilde { \\mathcal { B } } ( w _ { 1 } , \\dots , w _ { n } , z ) \\mapsto \\left[ \\begin{array} { c c c c } { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { \\vdots } & { \\ddots } & { \\vdots } & { \\vdots } \\\\ { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { I } & { \\cdots } & { I } & { 0 } \\end{array} \\right] \\left[ \\begin{array} { c } { w _ { 1 } } \\\\ { \\vdots } \\\\ { w _ { n } } \\\\ { z } \\end{array} \\right] + \\left[ \\begin{array} { c } { 0 } \\\\ { \\vdots } \\\\ { 0 } \\\\ { \\frac { 1 } { \\vert \\mathbf { B } \\vert } \\sum _ { j \\in \\mathbf { B } } B _ { j } ( z ) } \\end{array} \\right] .", + "type": "interline_equation", + "image_path": "249f4f681bd045146bf6bfa6273c06c2cbb4fcfed7052a0b32bc60ffaba59552.jpg" + } + ] + } + ], + "index": 23, + "virtual_lines": [ + { + "bbox": [ + 142, + 447, + 468, + 466.0 + ], + "spans": [], + "index": 22 + }, + { + "bbox": [ + 142, + 466.0, + 468, + 485.0 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 142, + 485.0, + 468, + 504.0 + ], + "spans": [], + "index": 24 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 506, + 257, + 518 + ], + "lines": [ + { + "bbox": [ + 105, + 505, + 257, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 186, + 519 + ], + "score": 1.0, + "content": "for some minibatch", + "type": "text" + }, + { + "bbox": [ + 187, + 506, + 254, + 518 + ], + "score": 0.93, + "content": "\\mathbf { B } \\in \\{ 1 , \\dots , m \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 254, + 505, + 257, + 519 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25 + }, + { + "type": "title", + "bbox": [ + 107, + 530, + 246, + 542 + ], + "lines": [ + { + "bbox": [ + 106, + 530, + 246, + 542 + ], + "spans": [ + { + "bbox": [ + 106, + 530, + 246, + 542 + ], + "score": 1.0, + "content": "F.6 VARIANCE-REDUCED FRB", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 107, + 551, + 505, + 586 + ], + "lines": [ + { + "bbox": [ + 105, + 551, + 505, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 551, + 387, + 564 + ], + "score": 1.0, + "content": "The FRB-VR method of Alacaoglu et al. (2021) can also be applied to", + "type": "text" + }, + { + "bbox": [ + 388, + 551, + 462, + 563 + ], + "score": 0.91, + "content": "0 \\in \\mathcal { A } ( q ) + \\mathcal { B } ( q )", + "type": "inline_equation" + }, + { + "bbox": [ + 463, + 551, + 505, + 564 + ], + "score": 1.0, + "content": ", using the", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 562, + 506, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 197, + 576 + ], + "score": 1.0, + "content": "same stochastic oracle", + "type": "text" + }, + { + "bbox": [ + 197, + 562, + 208, + 574 + ], + "score": 0.86, + "content": "\\tilde { \\mathcal { B } }", + "type": "inline_equation" + }, + { + "bbox": [ + 209, + 563, + 414, + 576 + ], + "score": 1.0, + "content": "defined in (69). if we let the iterates of FRB-VR be", + "type": "text" + }, + { + "bbox": [ + 414, + 563, + 446, + 576 + ], + "score": 0.92, + "content": "( q ^ { k } , p ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 447, + 563, + 506, + 576 + ], + "score": 1.0, + "content": ", then line 4 of", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 575, + 335, + 587 + ], + "spans": [ + { + "bbox": [ + 106, + 575, + 335, + 587 + ], + "score": 1.0, + "content": "Algorithm 1 of Alacaoglu et al. (2021) can be written as", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28 + }, + { + "type": "interline_equation", + "bbox": [ + 217, + 587, + 394, + 622 + ], + "lines": [ + { + "bbox": [ + 217, + 587, + 394, + 622 + ], + "spans": [ + { + "bbox": [ + 217, + 587, + 394, + 622 + ], + "score": 0.92, + "content": "\\begin{array} { c } { \\hat { q } ^ { k } = q ^ { k } - \\tau ( \\mathcal { B } ( p ^ { k } ) + \\tilde { \\mathcal { B } } ( q ^ { k } ) - \\tilde { \\mathcal { B } } ( p ^ { k } ) ) } \\\\ { q ^ { k + 1 } = J _ { \\tau \\mathcal { A } } ( \\hat { q } ^ { k } ) . } \\end{array}", + "type": "interline_equation", + "image_path": "2aca7939639a60ed396ae7fe36c40b6c6475db65d7a4e41f99e21d78db27d23a.jpg" + } + ] + } + ], + "index": 30.5, + "virtual_lines": [ + { + "bbox": [ + 217, + 587, + 394, + 604.5 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 217, + 604.5, + 394, + 622.0 + ], + "spans": [], + "index": 31 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 628, + 504, + 651 + ], + "lines": [ + { + "bbox": [ + 106, + 628, + 505, + 640 + ], + "spans": [ + { + "bbox": [ + 106, + 628, + 505, + 640 + ], + "score": 1.0, + "content": "Once again, the method does not directly produce a residual, but one can be developed from the", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 636, + 442, + 653 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 280, + 653 + ], + "score": 1.0, + "content": "algorithm definition as follows: (71) yields", + "type": "text" + }, + { + "bbox": [ + 280, + 639, + 396, + 651 + ], + "score": 0.89, + "content": "\\dot { \\tau } ^ { - 1 } ( \\hat { q } ^ { k } - q ^ { k + 1 } ) \\in \\mathcal { A } ( q ^ { k + 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 397, + 636, + 442, + 653 + ], + "score": 1.0, + "content": "and hence", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32.5 + }, + { + "type": "interline_equation", + "bbox": [ + 205, + 653, + 402, + 668 + ], + "lines": [ + { + "bbox": [ + 205, + 653, + 402, + 668 + ], + "spans": [ + { + "bbox": [ + 205, + 653, + 402, + 668 + ], + "score": 0.84, + "content": "\\tau ^ { - 1 } ( \\hat { q } ^ { k } - q ^ { k + 1 } ) + \\mathcal { B } ( q ^ { k + 1 } ) \\in ( \\mathcal { A } + \\mathcal { B } ) ( q ^ { k + 1 } ) .", + "type": "interline_equation", + "image_path": "246fff05c45ed5e58bd7f2cb080f07240417c731e17fc74bab71fc73ebb16ee9.jpg" + } + ] + } + ], + "index": 34, + "virtual_lines": [ + { + "bbox": [ + 205, + 653, + 402, + 668 + ], + "spans": [], + "index": 34 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 670, + 226, + 681 + ], + "lines": [ + { + "bbox": [ + 106, + 669, + 226, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 669, + 226, + 682 + ], + "score": 1.0, + "content": "Therefore we use the residual", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 35 + }, + { + "type": "interline_equation", + "bbox": [ + 214, + 682, + 395, + 698 + ], + "lines": [ + { + "bbox": [ + 214, + 682, + 395, + 698 + ], + "spans": [ + { + "bbox": [ + 214, + 682, + 395, + 698 + ], + "score": 0.9, + "content": "R _ { k } ^ { \\mathrm { F R B - V R } } = \\lVert \\tau ^ { - 1 } ( \\hat { q } ^ { k } - q ^ { k + 1 } ) + \\mathcal { B } ( q ^ { k + 1 } ) \\rVert ^ { 2 } .", + "type": "interline_equation", + "image_path": "1244d3a1edda7049a11dcc5a30602f2df84095e8473a81632532e57fbd0af7c5.jpg" + } + ] + } + ], + "index": 36, + "virtual_lines": [ + { + "bbox": [ + 214, + 682, + 395, + 698 + ], + "spans": [], + "index": 36 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 705, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 141, + 699, + 510, + 735 + ], + "spans": [ + { + "bbox": [ + 141, + 699, + 167, + 735 + ], + "score": 1.0, + "content": "plots for FR", + "type": "text" + }, + { + "bbox": [ + 167, + 709, + 181, + 720 + ], + "score": 0.9, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 181, + 699, + 220, + 735 + ], + "score": 1.0, + "content": "for SPS, VR.", + "type": "text" + }, + { + "bbox": [ + 221, + 707, + 246, + 721 + ], + "score": 0.92, + "content": "R _ { k } ^ { \\mathrm { T s e n g } }", + "type": "inline_equation" + }, + { + "bbox": [ + 247, + 699, + 334, + 735 + ], + "score": 1.0, + "content": "for Tseng’s method,", + "type": "text" + }, + { + "bbox": [ + 335, + 708, + 357, + 721 + ], + "score": 0.92, + "content": "R _ { k } ^ { \\mathrm { F R B } }", + "type": "inline_equation" + }, + { + "bbox": [ + 357, + 699, + 398, + 735 + ], + "score": 1.0, + "content": "for FRB,", + "type": "text" + }, + { + "bbox": [ + 399, + 707, + 431, + 721 + ], + "score": 0.85, + "content": "R _ { k } ^ { \\mathrm { S - T s e n g } }", + "type": "inline_equation" + }, + { + "bbox": [ + 432, + 699, + 510, + 735 + ], + "score": 1.0, + "content": "for S-Tseng, and", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 107, + 720, + 141, + 733 + ], + "spans": [ + { + "bbox": [ + 107, + 720, + 141, + 733 + ], + "score": 0.84, + "content": "R _ { k } ^ { \\mathrm { F R B - V R } }", + "type": "inline_equation" + } + ], + "index": 37 + } + ], + "index": 37.5 + } + ], + "page_idx": 29, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 763 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 763 + ], + "score": 1.0, + "content": "30", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 106, + 82, + 152, + 93 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 153, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 153, + 94 + ], + "score": 1.0, + "content": "F.4 FRB", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 102, + 505, + 136 + ], + "lines": [ + { + "bbox": [ + 105, + 101, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 101, + 505, + 116 + ], + "score": 1.0, + "content": "The forward-reflected-backward method (FRB) (Malitsky & Tam, 2020) is another method that may", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 113, + 505, + 127 + ], + "spans": [ + { + "bbox": [ + 105, + 113, + 212, + 127 + ], + "score": 1.0, + "content": "be applied to the splitting", + "type": "text" + }, + { + "bbox": [ + 213, + 114, + 269, + 124 + ], + "score": 0.92, + "content": "\\mathcal { T } = \\mathcal { A } + \\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 270, + 113, + 284, + 127 + ], + "score": 1.0, + "content": "for", + "type": "text" + }, + { + "bbox": [ + 285, + 115, + 296, + 124 + ], + "score": 0.86, + "content": "\\mathcal { A }", + "type": "inline_equation" + }, + { + "bbox": [ + 297, + 113, + 314, + 127 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 315, + 115, + 326, + 124 + ], + "score": 0.85, + "content": "\\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 326, + 113, + 505, + 127 + ], + "score": 1.0, + "content": "as defined in (63) and (64). Doing so yields", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 126, + 146, + 136 + ], + "spans": [ + { + "bbox": [ + 104, + 126, + 146, + 136 + ], + "score": 1.0, + "content": "recursion", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 2, + "bbox_fs": [ + 104, + 101, + 505, + 136 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 212, + 136, + 398, + 158 + ], + "lines": [ + { + "bbox": [ + 212, + 136, + 398, + 158 + ], + "spans": [ + { + "bbox": [ + 212, + 136, + 398, + 158 + ], + "score": 0.93, + "content": "q ^ { k + 1 } = J _ { \\alpha \\mathcal { A } } \\Big ( q ^ { k } - \\alpha \\big ( 2 \\mathcal { B } ( q ^ { k } ) - \\mathcal { B } ( q ^ { k - 1 } ) \\big ) \\Big ) .", + "type": "interline_equation", + "image_path": "655dd6f05410a89facdc5d6f19324b42be393018991f46d1f42a5e2ff9d18e83.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 212, + 136, + 398, + 158 + ], + "spans": [], + "index": 4 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 159, + 421, + 171 + ], + "lines": [ + { + "bbox": [ + 106, + 158, + 422, + 172 + ], + "spans": [ + { + "bbox": [ + 106, + 158, + 422, + 172 + ], + "score": 1.0, + "content": "Following similar arguments to those for Tseng’s method, it can be shown that", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5, + "bbox_fs": [ + 106, + 158, + 422, + 172 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 164, + 172, + 446, + 196 + ], + "lines": [ + { + "bbox": [ + 164, + 172, + 446, + 196 + ], + "spans": [ + { + "bbox": [ + 164, + 172, + 446, + 196 + ], + "score": 0.9, + "content": "v _ { \\mathrm { F R B } } ^ { k } \\doteq \\frac { 1 } { \\alpha } \\left( q ^ { k - 1 } - q ^ { k } \\right) + \\mathcal { B } ( q ^ { k } ) + \\mathcal { B } ( q ^ { k - 2 } ) - 2 \\mathcal { B } ( q ^ { k - 1 } ) \\in \\mathcal { T } ( q ^ { k } ) .", + "type": "interline_equation", + "image_path": "6b07573d670ff5416c2619a9e8110309fa6f1a414ccd82adbc8eba4d5e7c0478.jpg" + } + ] + } + ], + "index": 6, + "virtual_lines": [ + { + "bbox": [ + 164, + 172, + 446, + 196 + ], + "spans": [], + "index": 6 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 197, + 436, + 209 + ], + "lines": [ + { + "bbox": [ + 105, + 196, + 436, + 210 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 385, + 210 + ], + "score": 1.0, + "content": "Thus, FRB admits the following approximation residual equivalent to", + "type": "text" + }, + { + "bbox": [ + 386, + 198, + 399, + 209 + ], + "score": 0.88, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 399, + 196, + 436, + 210 + ], + "score": 1.0, + "content": "for SPS:", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 7, + "bbox_fs": [ + 105, + 196, + 436, + 210 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 269, + 211, + 341, + 226 + ], + "lines": [ + { + "bbox": [ + 269, + 211, + 341, + 226 + ], + "spans": [ + { + "bbox": [ + 269, + 211, + 341, + 226 + ], + "score": 0.89, + "content": "R _ { k } ^ { \\mathrm { F R B } } \\doteq \\| v _ { \\mathrm { F R B } } ^ { k } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "0c5fbce05b820ef6ec87537c6abb8acba48a214be84135a5d060c536a9cb9685.jpg" + } + ] + } + ], + "index": 8, + "virtual_lines": [ + { + "bbox": [ + 269, + 211, + 341, + 226 + ], + "spans": [], + "index": 8 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 234, + 504, + 257 + ], + "lines": [ + { + "bbox": [ + 105, + 233, + 506, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 506, + 247 + ], + "score": 1.0, + "content": "Finally, we remark that the stepsizes used in both the Tseng and FRB methods can be chosen via a", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 246, + 297, + 257 + ], + "spans": [ + { + "bbox": [ + 106, + 246, + 297, + 257 + ], + "score": 1.0, + "content": "linesearch procedure that we do not detail here.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 9.5, + "bbox_fs": [ + 105, + 233, + 506, + 257 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 269, + 260, + 281 + ], + "lines": [ + { + "bbox": [ + 105, + 268, + 261, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 261, + 282 + ], + "score": 1.0, + "content": "F.5 STOCHASTIC TSENG METHOD", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11 + }, + { + "type": "text", + "bbox": [ + 106, + 289, + 506, + 349 + ], + "lines": [ + { + "bbox": [ + 105, + 290, + 505, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 505, + 303 + ], + "score": 1.0, + "content": "The stochastic version of Tseng’s method of (BΓΆhm et al., 2020) (S-Tseng) may be applied to the", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 300, + 507, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 146, + 315 + ], + "score": 1.0, + "content": "inclusion", + "type": "text" + }, + { + "bbox": [ + 147, + 301, + 223, + 313 + ], + "score": 0.92, + "content": "0 \\in \\mathcal { A } ( q ) + \\mathcal { B } ( q )", + "type": "inline_equation" + }, + { + "bbox": [ + 224, + 300, + 303, + 315 + ], + "score": 1.0, + "content": ", since the operator", + "type": "text" + }, + { + "bbox": [ + 304, + 302, + 315, + 312 + ], + "score": 0.84, + "content": "\\mathcal { A }", + "type": "inline_equation" + }, + { + "bbox": [ + 316, + 300, + 507, + 315 + ], + "score": 1.0, + "content": "may be written as a subdifferential. However,", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 311, + 506, + 326 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 506, + 326 + ], + "score": 1.0, + "content": "unlike the deterministic Tseng method, it does not produce a valid residual. Note also that S-Tseng", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 320, + 506, + 339 + ], + "spans": [ + { + "bbox": [ + 105, + 320, + 224, + 339 + ], + "score": 1.0, + "content": "outputs an ergodic sequence", + "type": "text" + }, + { + "bbox": [ + 225, + 322, + 239, + 336 + ], + "score": 0.89, + "content": "\\mathbf { \\bar { \\boldsymbol { q } } } _ { \\mathrm { e r g } } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 240, + 320, + 506, + 339 + ], + "score": 1.0, + "content": ". To construct a residual for the ergodic sequence, we compute a", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 103, + 330, + 475, + 354 + ], + "spans": [ + { + "bbox": [ + 103, + 330, + 392, + 354 + ], + "score": 1.0, + "content": "deterministic step of Tseng’s method according to (67)-(68), starting at", + "type": "text" + }, + { + "bbox": [ + 392, + 335, + 407, + 350 + ], + "score": 0.91, + "content": "q _ { \\mathrm { e r g } } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 407, + 330, + 475, + 354 + ], + "score": 1.0, + "content": ". That is, letting", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 14, + "bbox_fs": [ + 103, + 290, + 507, + 354 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 234, + 351, + 378, + 385 + ], + "lines": [ + { + "bbox": [ + 234, + 351, + 378, + 385 + ], + "spans": [ + { + "bbox": [ + 234, + 351, + 378, + 385 + ], + "score": 0.78, + "content": "\\begin{array} { r l } & { \\bar { q } ^ { k } = J _ { \\alpha \\mathcal { A } } ( q _ { \\mathrm { e r g } } ^ { k } - \\mathcal { B } ( q _ { \\mathrm { e r g } } ^ { k } ) ) } \\\\ & { q ^ { k + 1 } = \\bar { q } ^ { k } + \\alpha ( \\mathcal { B } ( q _ { \\mathrm { e r g } } ^ { k } ) - \\mathcal { B } ( \\bar { q } ^ { k } ) ) , } \\end{array}", + "type": "interline_equation", + "image_path": "2344c4c5c1c54a1f6fe3ed0f7c91678f883d06deedd00452876444567f27a69a.jpg" + } + ] + } + ], + "index": 17.5, + "virtual_lines": [ + { + "bbox": [ + 234, + 351, + 378, + 368.0 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 234, + 368.0, + 378, + 385.0 + ], + "spans": [], + "index": 18 + } + ] + }, + { + "type": "text", + "bbox": [ + 105, + 386, + 379, + 397 + ], + "lines": [ + { + "bbox": [ + 105, + 384, + 380, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 380, + 399 + ], + "score": 1.0, + "content": "we can then compute essentially the same residual as in Section F.3,", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 19, + "bbox_fs": [ + 105, + 384, + 380, + 399 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 244, + 399, + 366, + 423 + ], + "lines": [ + { + "bbox": [ + 244, + 399, + 366, + 423 + ], + "spans": [ + { + "bbox": [ + 244, + 399, + 366, + 423 + ], + "score": 0.93, + "content": "R _ { k } ^ { \\mathrm { { S - T s e n g } } } \\doteq \\frac { 1 } { \\alpha ^ { 2 } } \\| q _ { \\mathrm { { e r g } } } ^ { k } - q ^ { k + 1 } \\| ^ { 2 } .", + "type": "interline_equation", + "image_path": "e620c909214f65b396b3a0722afcf1e8bd8c008ab42e6d865ac16215300f24cb.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 244, + 399, + 366, + 423 + ], + "spans": [], + "index": 20 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 430, + 503, + 445 + ], + "lines": [ + { + "bbox": [ + 104, + 428, + 504, + 448 + ], + "spans": [ + { + "bbox": [ + 104, + 428, + 344, + 448 + ], + "score": 1.0, + "content": "To construct the stochastic oracle for S-Tseng, we assumed", + "type": "text" + }, + { + "bbox": [ + 345, + 431, + 440, + 445 + ], + "score": 0.92, + "content": "\\begin{array} { r } { B ( z ) = \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } B _ { i } ( z ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 441, + 428, + 504, + 448 + ], + "score": 1.0, + "content": ". Then we used", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 21, + "bbox_fs": [ + 104, + 428, + 504, + 448 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 142, + 447, + 468, + 504 + ], + "lines": [ + { + "bbox": [ + 142, + 447, + 468, + 504 + ], + "spans": [ + { + "bbox": [ + 142, + 447, + 468, + 504 + ], + "score": 0.95, + "content": "\\tilde { \\mathcal { B } } ( w _ { 1 } , \\dots , w _ { n } , z ) \\mapsto \\left[ \\begin{array} { c c c c } { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { \\vdots } & { \\ddots } & { \\vdots } & { \\vdots } \\\\ { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { I } & { \\cdots } & { I } & { 0 } \\end{array} \\right] \\left[ \\begin{array} { c } { w _ { 1 } } \\\\ { \\vdots } \\\\ { w _ { n } } \\\\ { z } \\end{array} \\right] + \\left[ \\begin{array} { c } { 0 } \\\\ { \\vdots } \\\\ { 0 } \\\\ { \\frac { 1 } { \\vert \\mathbf { B } \\vert } \\sum _ { j \\in \\mathbf { B } } B _ { j } ( z ) } \\end{array} \\right] .", + "type": "interline_equation", + "image_path": "249f4f681bd045146bf6bfa6273c06c2cbb4fcfed7052a0b32bc60ffaba59552.jpg" + } + ] + } + ], + "index": 23, + "virtual_lines": [ + { + "bbox": [ + 142, + 447, + 468, + 466.0 + ], + "spans": [], + "index": 22 + }, + { + "bbox": [ + 142, + 466.0, + 468, + 485.0 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 142, + 485.0, + 468, + 504.0 + ], + "spans": [], + "index": 24 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 506, + 257, + 518 + ], + "lines": [ + { + "bbox": [ + 105, + 505, + 257, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 186, + 519 + ], + "score": 1.0, + "content": "for some minibatch", + "type": "text" + }, + { + "bbox": [ + 187, + 506, + 254, + 518 + ], + "score": 0.93, + "content": "\\mathbf { B } \\in \\{ 1 , \\dots , m \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 254, + 505, + 257, + 519 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25, + "bbox_fs": [ + 105, + 505, + 257, + 519 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 530, + 246, + 542 + ], + "lines": [ + { + "bbox": [ + 106, + 530, + 246, + 542 + ], + "spans": [ + { + "bbox": [ + 106, + 530, + 246, + 542 + ], + "score": 1.0, + "content": "F.6 VARIANCE-REDUCED FRB", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 107, + 551, + 505, + 586 + ], + "lines": [ + { + "bbox": [ + 105, + 551, + 505, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 551, + 387, + 564 + ], + "score": 1.0, + "content": "The FRB-VR method of Alacaoglu et al. (2021) can also be applied to", + "type": "text" + }, + { + "bbox": [ + 388, + 551, + 462, + 563 + ], + "score": 0.91, + "content": "0 \\in \\mathcal { A } ( q ) + \\mathcal { B } ( q )", + "type": "inline_equation" + }, + { + "bbox": [ + 463, + 551, + 505, + 564 + ], + "score": 1.0, + "content": ", using the", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 562, + 506, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 197, + 576 + ], + "score": 1.0, + "content": "same stochastic oracle", + "type": "text" + }, + { + "bbox": [ + 197, + 562, + 208, + 574 + ], + "score": 0.86, + "content": "\\tilde { \\mathcal { B } }", + "type": "inline_equation" + }, + { + "bbox": [ + 209, + 563, + 414, + 576 + ], + "score": 1.0, + "content": "defined in (69). if we let the iterates of FRB-VR be", + "type": "text" + }, + { + "bbox": [ + 414, + 563, + 446, + 576 + ], + "score": 0.92, + "content": "( q ^ { k } , p ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 447, + 563, + 506, + 576 + ], + "score": 1.0, + "content": ", then line 4 of", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 575, + 335, + 587 + ], + "spans": [ + { + "bbox": [ + 106, + 575, + 335, + 587 + ], + "score": 1.0, + "content": "Algorithm 1 of Alacaoglu et al. (2021) can be written as", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28, + "bbox_fs": [ + 105, + 551, + 506, + 587 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 217, + 587, + 394, + 622 + ], + "lines": [ + { + "bbox": [ + 217, + 587, + 394, + 622 + ], + "spans": [ + { + "bbox": [ + 217, + 587, + 394, + 622 + ], + "score": 0.92, + "content": "\\begin{array} { c } { \\hat { q } ^ { k } = q ^ { k } - \\tau ( \\mathcal { B } ( p ^ { k } ) + \\tilde { \\mathcal { B } } ( q ^ { k } ) - \\tilde { \\mathcal { B } } ( p ^ { k } ) ) } \\\\ { q ^ { k + 1 } = J _ { \\tau \\mathcal { A } } ( \\hat { q } ^ { k } ) . } \\end{array}", + "type": "interline_equation", + "image_path": "2aca7939639a60ed396ae7fe36c40b6c6475db65d7a4e41f99e21d78db27d23a.jpg" + } + ] + } + ], + "index": 30.5, + "virtual_lines": [ + { + "bbox": [ + 217, + 587, + 394, + 604.5 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 217, + 604.5, + 394, + 622.0 + ], + "spans": [], + "index": 31 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 628, + 504, + 651 + ], + "lines": [ + { + "bbox": [ + 106, + 628, + 505, + 640 + ], + "spans": [ + { + "bbox": [ + 106, + 628, + 505, + 640 + ], + "score": 1.0, + "content": "Once again, the method does not directly produce a residual, but one can be developed from the", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 636, + 442, + 653 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 280, + 653 + ], + "score": 1.0, + "content": "algorithm definition as follows: (71) yields", + "type": "text" + }, + { + "bbox": [ + 280, + 639, + 396, + 651 + ], + "score": 0.89, + "content": "\\dot { \\tau } ^ { - 1 } ( \\hat { q } ^ { k } - q ^ { k + 1 } ) \\in \\mathcal { A } ( q ^ { k + 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 397, + 636, + 442, + 653 + ], + "score": 1.0, + "content": "and hence", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32.5, + "bbox_fs": [ + 105, + 628, + 505, + 653 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 205, + 653, + 402, + 668 + ], + "lines": [ + { + "bbox": [ + 205, + 653, + 402, + 668 + ], + "spans": [ + { + "bbox": [ + 205, + 653, + 402, + 668 + ], + "score": 0.84, + "content": "\\tau ^ { - 1 } ( \\hat { q } ^ { k } - q ^ { k + 1 } ) + \\mathcal { B } ( q ^ { k + 1 } ) \\in ( \\mathcal { A } + \\mathcal { B } ) ( q ^ { k + 1 } ) .", + "type": "interline_equation", + "image_path": "246fff05c45ed5e58bd7f2cb080f07240417c731e17fc74bab71fc73ebb16ee9.jpg" + } + ] + } + ], + "index": 34, + "virtual_lines": [ + { + "bbox": [ + 205, + 653, + 402, + 668 + ], + "spans": [], + "index": 34 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 670, + 226, + 681 + ], + "lines": [ + { + "bbox": [ + 106, + 669, + 226, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 669, + 226, + 682 + ], + "score": 1.0, + "content": "Therefore we use the residual", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 35, + "bbox_fs": [ + 106, + 669, + 226, + 682 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 214, + 682, + 395, + 698 + ], + "lines": [ + { + "bbox": [ + 214, + 682, + 395, + 698 + ], + "spans": [ + { + "bbox": [ + 214, + 682, + 395, + 698 + ], + "score": 0.9, + "content": "R _ { k } ^ { \\mathrm { F R B - V R } } = \\lVert \\tau ^ { - 1 } ( \\hat { q } ^ { k } - q ^ { k + 1 } ) + \\mathcal { B } ( q ^ { k + 1 } ) \\rVert ^ { 2 } .", + "type": "interline_equation", + "image_path": "1244d3a1edda7049a11dcc5a30602f2df84095e8473a81632532e57fbd0af7c5.jpg" + } + ] + } + ], + "index": 36, + "virtual_lines": [ + { + "bbox": [ + 214, + 682, + 395, + 698 + ], + "spans": [], + "index": 36 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 705, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 141, + 699, + 510, + 735 + ], + "spans": [ + { + "bbox": [ + 141, + 699, + 167, + 735 + ], + "score": 1.0, + "content": "plots for FR", + "type": "text" + }, + { + "bbox": [ + 167, + 709, + 181, + 720 + ], + "score": 0.9, + "content": "R _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 181, + 699, + 220, + 735 + ], + "score": 1.0, + "content": "for SPS, VR.", + "type": "text" + }, + { + "bbox": [ + 221, + 707, + 246, + 721 + ], + "score": 0.92, + "content": "R _ { k } ^ { \\mathrm { T s e n g } }", + "type": "inline_equation" + }, + { + "bbox": [ + 247, + 699, + 334, + 735 + ], + "score": 1.0, + "content": "for Tseng’s method,", + "type": "text" + }, + { + "bbox": [ + 335, + 708, + 357, + 721 + ], + "score": 0.92, + "content": "R _ { k } ^ { \\mathrm { F R B } }", + "type": "inline_equation" + }, + { + "bbox": [ + 357, + 699, + 398, + 735 + ], + "score": 1.0, + "content": "for FRB,", + "type": "text" + }, + { + "bbox": [ + 399, + 707, + 431, + 721 + ], + "score": 0.85, + "content": "R _ { k } ^ { \\mathrm { S - T s e n g } }", + "type": "inline_equation" + }, + { + "bbox": [ + 432, + 699, + 510, + 735 + ], + "score": 1.0, + "content": "for S-Tseng, and", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 107, + 720, + 141, + 733 + ], + "spans": [ + { + "bbox": [ + 107, + 720, + 141, + 733 + ], + "score": 0.84, + "content": "R _ { k } ^ { \\mathrm { F R B - V R } }", + "type": "inline_equation" + } + ], + "index": 37 + } + ], + "index": 37.5, + "bbox_fs": [ + 107, + 699, + 510, + 735 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 82, + 437, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 439, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 439, + 95 + ], + "score": 1.0, + "content": "F.7 BENEFITS AND DRAWBACKS OF THE PRODUCT SPACE REFORMULATION", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 102, + 505, + 224 + ], + "lines": [ + { + "bbox": [ + 105, + 102, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 506, + 116 + ], + "score": 1.0, + "content": "The main benefit of the product space reformulation (PSR) is that it allows one to use familiar", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 114, + 505, + 127 + ], + "spans": [ + { + "bbox": [ + 105, + 114, + 267, + 127 + ], + "score": 1.0, + "content": "2-operator splitting schemes for solving", + "type": "text" + }, + { + "bbox": [ + 267, + 114, + 342, + 126 + ], + "score": 0.93, + "content": "0 \\in \\mathcal { A } ( q ) + \\mathcal { B } ( q )", + "type": "inline_equation" + }, + { + "bbox": [ + 343, + 114, + 505, + 127 + ], + "score": 1.0, + "content": "to solve the more complicated recursion", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 125, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 125, + 375, + 137 + ], + "score": 1.0, + "content": "(1). However, one drawback of this approach is that the operator", + "type": "text" + }, + { + "bbox": [ + 375, + 126, + 386, + 135 + ], + "score": 0.83, + "content": "\\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 387, + 125, + 506, + 137 + ], + "score": 1.0, + "content": ", defined in (64), combines a", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 135, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 105, + 135, + 361, + 149 + ], + "score": 1.0, + "content": "skew-symmetric consensus matrix with the Lipschitz operator", + "type": "text" + }, + { + "bbox": [ + 361, + 136, + 370, + 146 + ], + "score": 0.78, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 371, + 135, + 411, + 149 + ], + "score": 1.0, + "content": ". Treating", + "type": "text" + }, + { + "bbox": [ + 412, + 136, + 423, + 146 + ], + "score": 0.84, + "content": "\\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 423, + 135, + 506, + 149 + ], + "score": 1.0, + "content": "as a single operator", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 147, + 506, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 428, + 159 + ], + "score": 1.0, + "content": "necessitates using a single stepsize for both of its constituent operators, but the", + "type": "text" + }, + { + "bbox": [ + 428, + 147, + 438, + 157 + ], + "score": 0.8, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 438, + 147, + 506, + 159 + ], + "score": 1.0, + "content": "component will", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 158, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 105, + 158, + 506, + 172 + ], + "score": 1.0, + "content": "generally have a much larger Lipschitz constant than the skew part, necessitating a smaller stepsize", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 168, + 505, + 181 + ], + "spans": [ + { + "bbox": [ + 105, + 168, + 505, + 181 + ], + "score": 1.0, + "content": "than is ideal for the skew operator. This difficulty can be countered by using different stepsizes", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 179, + 505, + 192 + ], + "spans": [ + { + "bbox": [ + 105, + 179, + 505, + 192 + ], + "score": 1.0, + "content": "for the primal and dual components, but that strategy introduces additional tuning parameters. In", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 190, + 505, + 203 + ], + "spans": [ + { + "bbox": [ + 106, + 190, + 505, + 203 + ], + "score": 1.0, + "content": "other works, methods based on PSR have exhibited slower convergence than deterministic projective", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 202, + 506, + 214 + ], + "spans": [ + { + "bbox": [ + 105, + 202, + 506, + 214 + ], + "score": 1.0, + "content": "splitting methods (Johnstone & Eckstein, 2021; 2020b). However, in our experiments in Section 7,", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 212, + 234, + 225 + ], + "spans": [ + { + "bbox": [ + 106, + 212, + 234, + 225 + ], + "score": 1.0, + "content": "the performance is comparable.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 6 + }, + { + "type": "title", + "bbox": [ + 108, + 239, + 276, + 252 + ], + "lines": [ + { + "bbox": [ + 105, + 238, + 278, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 238, + 278, + 255 + ], + "score": 1.0, + "content": "G VARIATIONAL INEQUALITIES", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12 + }, + { + "type": "text", + "bbox": [ + 108, + 263, + 503, + 286 + ], + "lines": [ + { + "bbox": [ + 105, + 262, + 505, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 262, + 170, + 277 + ], + "score": 1.0, + "content": "For a mapping", + "type": "text" + }, + { + "bbox": [ + 170, + 263, + 231, + 273 + ], + "score": 0.92, + "content": "B : \\mathbb { R } ^ { d } \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 231, + 262, + 351, + 277 + ], + "score": 1.0, + "content": "and a closed and convex set", + "type": "text" + }, + { + "bbox": [ + 352, + 264, + 358, + 273 + ], + "score": 0.8, + "content": "\\mathcal { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 359, + 262, + 505, + 277 + ], + "score": 1.0, + "content": ", the variational inequality problem", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 275, + 307, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 237, + 286 + ], + "score": 1.0, + "content": "(Harker & Pang, 1990) is to find", + "type": "text" + }, + { + "bbox": [ + 238, + 275, + 266, + 285 + ], + "score": 0.91, + "content": "z ^ { \\ast } \\in \\mathcal { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 267, + 275, + 307, + 286 + ], + "score": 1.0, + "content": "such that", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13.5 + }, + { + "type": "interline_equation", + "bbox": [ + 244, + 287, + 367, + 302 + ], + "lines": [ + { + "bbox": [ + 244, + 287, + 367, + 302 + ], + "spans": [ + { + "bbox": [ + 244, + 287, + 367, + 302 + ], + "score": 0.91, + "content": "B ( z ^ { * } ) ^ { \\top } ( z - z ^ { * } ) \\geq 0 , \\forall z \\in { \\mathcal { C } } .", + "type": "interline_equation", + "image_path": "7363e137bd9d3e57b042782850801a76b88095c5d51c497025360deb38801bea.jpg" + } + ] + } + ], + "index": 15, + "virtual_lines": [ + { + "bbox": [ + 244, + 287, + 367, + 302 + ], + "spans": [], + "index": 15 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 303, + 399, + 314 + ], + "lines": [ + { + "bbox": [ + 106, + 302, + 399, + 316 + ], + "spans": [ + { + "bbox": [ + 106, + 302, + 399, + 316 + ], + "score": 1.0, + "content": "Consider the normal cone mapping discussed in Section 2 and defined as", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "interline_equation", + "bbox": [ + 226, + 315, + 385, + 330 + ], + "lines": [ + { + "bbox": [ + 226, + 315, + 385, + 330 + ], + "spans": [ + { + "bbox": [ + 226, + 315, + 385, + 330 + ], + "score": 0.91, + "content": "N _ { { \\mathcal { C } } } ( x ) \\doteq \\{ g : g ^ { \\top } ( y - x ) \\le 0 \\ \\forall y \\in { \\mathcal { C } } \\}", + "type": "interline_equation", + "image_path": "a3ba3311c6ed414d58176f577330a1ad43554d1ea0c2294617992ad703954d54.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 226, + 315, + 385, + 330 + ], + "spans": [], + "index": 17 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 331, + 504, + 353 + ], + "lines": [ + { + "bbox": [ + 106, + 331, + 505, + 344 + ], + "spans": [ + { + "bbox": [ + 106, + 331, + 307, + 344 + ], + "score": 1.0, + "content": "It is easily seen that (72) is equivalent to finding", + "type": "text" + }, + { + "bbox": [ + 307, + 332, + 318, + 342 + ], + "score": 0.86, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 319, + 331, + 360, + 344 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 360, + 331, + 438, + 343 + ], + "score": 0.93, + "content": "- B ( z ^ { * } ) \\in N _ { \\mathcal { C } } ( z ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 439, + 331, + 484, + 344 + ], + "score": 1.0, + "content": ". Hence, if", + "type": "text" + }, + { + "bbox": [ + 485, + 332, + 494, + 341 + ], + "score": 0.83, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 495, + 331, + 505, + 344 + ], + "score": 1.0, + "content": "is", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 343, + 329, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 329, + 354 + ], + "score": 1.0, + "content": "monotone, (72) is equivalent to the monotone inclusion", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 18.5 + }, + { + "type": "interline_equation", + "bbox": [ + 260, + 354, + 351, + 368 + ], + "lines": [ + { + "bbox": [ + 260, + 354, + 351, + 368 + ], + "spans": [ + { + "bbox": [ + 260, + 354, + 351, + 368 + ], + "score": 0.91, + "content": "0 \\in B ( z ^ { * } ) + N _ { \\cal { C } } ( z ^ { * } ) .", + "type": "interline_equation", + "image_path": "3c03e1f04f0304a1368e3e3b7053ce68dfc5a0e546ad31d95dd9c2efb339f212.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 260, + 354, + 351, + 368 + ], + "spans": [], + "index": 20 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 369, + 505, + 458 + ], + "lines": [ + { + "bbox": [ + 105, + 369, + 506, + 382 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 506, + 382 + ], + "score": 1.0, + "content": "Thus, monotone variational inequalities are a special case of monotone inclusions with two operators,", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 381, + 506, + 393 + ], + "spans": [ + { + "bbox": [ + 106, + 381, + 470, + 393 + ], + "score": 1.0, + "content": "one of which is single-valued and the other is the normal cone map of the constraint set", + "type": "text" + }, + { + "bbox": [ + 471, + 381, + 478, + 390 + ], + "score": 0.73, + "content": "\\mathcal { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 478, + 381, + 506, + 393 + ], + "score": 1.0, + "content": ". As a", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 392, + 505, + 404 + ], + "spans": [ + { + "bbox": [ + 106, + 392, + 505, + 404 + ], + "score": 1.0, + "content": "consequence, methods for monotone inclusions can be used to solve monotone variational inequality", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 403, + 505, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 505, + 414 + ], + "score": 1.0, + "content": "problems. The reverse, however, may not be true. For example, the analysis of the extragradient", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 413, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 332, + 426 + ], + "score": 1.0, + "content": "method (Korpelevich, 1977) relies on the second operator", + "type": "text" + }, + { + "bbox": [ + 333, + 414, + 346, + 424 + ], + "score": 0.89, + "content": "N _ { \\mathcal { C } }", + "type": "inline_equation" + }, + { + "bbox": [ + 347, + 413, + 505, + 426 + ], + "score": 1.0, + "content": "in (73) being a normal cone, as opposed", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 424, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 105, + 424, + 506, + 437 + ], + "score": 1.0, + "content": "to a more general monotone operator. We are not aware of any direct extension of the extragradient", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 434, + 505, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 505, + 449 + ], + "score": 1.0, + "content": "method’s analysis allowing a more general resolvent to be used in place of the projection map", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 446, + 194, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 447, + 176, + 459 + ], + "score": 1.0, + "content": "corresponding to", + "type": "text" + }, + { + "bbox": [ + 177, + 446, + 190, + 457 + ], + "score": 0.89, + "content": "N _ { \\mathcal { C } }", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 447, + 194, + 459 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 24.5 + }, + { + "type": "text", + "bbox": [ + 106, + 469, + 504, + 502 + ], + "lines": [ + { + "bbox": [ + 105, + 468, + 505, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 505, + 483 + ], + "score": 1.0, + "content": "The Restricted Gap Function There is a disadvantage to pursuing convergence rates based on", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 480, + 505, + 492 + ], + "spans": [ + { + "bbox": [ + 106, + 480, + 505, + 492 + ], + "score": 1.0, + "content": "variational inequalities (as in BΓΆhm et al. (2020) and Alacaoglu et al. (2021)) rather than monotone", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 492, + 474, + 504 + ], + "spans": [ + { + "bbox": [ + 106, + 492, + 474, + 504 + ], + "score": 1.0, + "content": "inclusions. Convergence rate analyses for variational inequalities focus on the gap function:", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 30 + }, + { + "type": "interline_equation", + "bbox": [ + 242, + 503, + 369, + 525 + ], + "lines": [ + { + "bbox": [ + 242, + 503, + 369, + 525 + ], + "spans": [ + { + "bbox": [ + 242, + 503, + 369, + 525 + ], + "score": 0.94, + "content": "G _ { { \\mathcal C } } ( z ) \\doteq \\operatorname* { s u p } _ { z ^ { \\prime } \\in { \\mathcal C } } B ( z ^ { \\prime } ) ^ { \\top } ( z - z ^ { \\prime } ) .", + "type": "interline_equation", + "image_path": "45c8199cfc83f22b1c6ed9f1ade0c07c7b8cfbba9cfc235961eb41d81d8b2734.jpg" + } + ] + } + ], + "index": 32, + "virtual_lines": [ + { + "bbox": [ + 242, + 503, + 369, + 525 + ], + "spans": [], + "index": 32 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 526, + 505, + 570 + ], + "lines": [ + { + "bbox": [ + 105, + 526, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 194, + 540 + ], + "score": 1.0, + "content": "It can be shown that", + "type": "text" + }, + { + "bbox": [ + 195, + 527, + 243, + 539 + ], + "score": 0.93, + "content": "G _ { \\mathcal { C } } ( z ) \\geq 0", + "type": "inline_equation" + }, + { + "bbox": [ + 244, + 526, + 263, + 540 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 263, + 527, + 312, + 539 + ], + "score": 0.91, + "content": "G _ { \\mathcal { C } } ( z ) = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 312, + 526, + 372, + 540 + ], + "score": 1.0, + "content": "if and only if", + "type": "text" + }, + { + "bbox": [ + 372, + 529, + 378, + 537 + ], + "score": 0.76, + "content": "z", + "type": "inline_equation" + }, + { + "bbox": [ + 379, + 526, + 506, + 540 + ], + "score": 1.0, + "content": "solves (72). However, (74) is", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 536, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 288, + 551 + ], + "score": 1.0, + "content": "meaningless for most problems, since unless", + "type": "text" + }, + { + "bbox": [ + 288, + 539, + 295, + 547 + ], + "score": 0.83, + "content": "\\mathcal { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 295, + 536, + 344, + 551 + ], + "score": 1.0, + "content": "is compact,", + "type": "text" + }, + { + "bbox": [ + 345, + 537, + 371, + 550 + ], + "score": 0.93, + "content": "G \\overset { \\cdot } { c } ( z )", + "type": "inline_equation" + }, + { + "bbox": [ + 372, + 536, + 454, + 551 + ], + "score": 1.0, + "content": "is typically equal to", + "type": "text" + }, + { + "bbox": [ + 454, + 539, + 473, + 548 + ], + "score": 0.88, + "content": "+ \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 536, + 506, + 551 + ], + "score": 1.0, + "content": "for any", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 549, + 505, + 561 + ], + "spans": [ + { + "bbox": [ + 105, + 549, + 505, + 561 + ], + "score": 1.0, + "content": "nonsolution (Diakonikolas, 2020). Thus researchers instead focus on the restricted gap function", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 559, + 177, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 177, + 572 + ], + "score": 1.0, + "content": "(Nesterov, 2007)", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 34.5 + }, + { + "type": "interline_equation", + "bbox": [ + 239, + 570, + 372, + 593 + ], + "lines": [ + { + "bbox": [ + 239, + 570, + 372, + 593 + ], + "spans": [ + { + "bbox": [ + 239, + 570, + 372, + 593 + ], + "score": 0.94, + "content": "G _ { { \\mathcal C } _ { 2 } } ( z ) \\doteq \\operatorname* { s u p } _ { z ^ { \\prime } \\in { \\mathcal C } _ { 2 } } B ( z ^ { \\prime } ) ^ { \\top } ( z - z ^ { \\prime } ) .", + "type": "interline_equation", + "image_path": "b3c7ac50ac762d88a20ea79a90e45f2f3a4a225fd3302e905011d704dce56117.jpg" + } + ] + } + ], + "index": 37, + "virtual_lines": [ + { + "bbox": [ + 239, + 570, + 372, + 593 + ], + "spans": [], + "index": 37 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 594, + 505, + 682 + ], + "lines": [ + { + "bbox": [ + 106, + 594, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 106, + 594, + 133, + 606 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 595, + 144, + 605 + ], + "score": 0.88, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 144, + 594, + 492, + 606 + ], + "score": 1.0, + "content": "is an arbitrary compact set. However, now the results are only meaningful over the set", + "type": "text" + }, + { + "bbox": [ + 492, + 594, + 503, + 605 + ], + "score": 0.87, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 503, + 594, + 506, + 606 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 104, + 604, + 506, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 131, + 618 + ], + "score": 1.0, + "content": "Thus,", + "type": "text" + }, + { + "bbox": [ + 131, + 605, + 142, + 616 + ], + "score": 0.87, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 142, + 604, + 506, + 618 + ], + "score": 1.0, + "content": "must be chosen large enough so that the iterates of the algorithm remain in the interior of", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 615, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 106, + 616, + 117, + 627 + ], + "score": 0.84, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 118, + 615, + 471, + 629 + ], + "score": 1.0, + "content": "(BΓΆhm et al., 2020). Further, the convergence rate bound depends on the diameter of", + "type": "text" + }, + { + "bbox": [ + 472, + 616, + 483, + 627 + ], + "score": 0.87, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 483, + 615, + 506, + 629 + ], + "score": 1.0, + "content": ". For", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 626, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 506, + 640 + ], + "score": 1.0, + "content": "some algorithms (Mokhtari et al., 2020) a valid set is provided which bounds the iterates. However", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 637, + 505, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 505, + 650 + ], + "score": 1.0, + "content": "BΓΆhm et al. (2020) and Alacaoglu et al. (2021) do not provide one, although in principle it could be", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 648, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 648, + 506, + 662 + ], + "score": 1.0, + "content": "done so long as the ergodic sequence can be bounded almost-surely. Thus, the convergence rates", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 660, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 106, + 660, + 506, + 672 + ], + "score": 1.0, + "content": "depending on (75) in BΓΆhm et al. (2020) and Alacaoglu et al. (2021) are somewhat incomplete in that", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 106, + 671, + 251, + 683 + ], + "spans": [ + { + "bbox": [ + 106, + 671, + 251, + 683 + ], + "score": 1.0, + "content": "they depend on unknown constants.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 41.5 + }, + { + "type": "text", + "bbox": [ + 107, + 687, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "score": 1.0, + "content": "In contrast, rates based on the approximation residual in the monotone inclusion setting, including", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 699, + 506, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 506, + 711 + ], + "score": 1.0, + "content": "ours given in (57)–(58), completely avoid this pitfall. There is no need to select a compact set", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 710, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 710, + 505, + 722 + ], + "score": 1.0, + "content": "containing the algorithm’s iterates and the constants in our rates are all explicit or depend on standard", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 106, + 721, + 309, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 721, + 309, + 732 + ], + "score": 1.0, + "content": "quantities such as the initial distance to a solution.", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 47.5 + } + ], + "page_idx": 30, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 310, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 312, + 765 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 312, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 15, + "width": 14 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 82, + 437, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 439, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 439, + 95 + ], + "score": 1.0, + "content": "F.7 BENEFITS AND DRAWBACKS OF THE PRODUCT SPACE REFORMULATION", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 102, + 505, + 224 + ], + "lines": [ + { + "bbox": [ + 105, + 102, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 506, + 116 + ], + "score": 1.0, + "content": "The main benefit of the product space reformulation (PSR) is that it allows one to use familiar", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 114, + 505, + 127 + ], + "spans": [ + { + "bbox": [ + 105, + 114, + 267, + 127 + ], + "score": 1.0, + "content": "2-operator splitting schemes for solving", + "type": "text" + }, + { + "bbox": [ + 267, + 114, + 342, + 126 + ], + "score": 0.93, + "content": "0 \\in \\mathcal { A } ( q ) + \\mathcal { B } ( q )", + "type": "inline_equation" + }, + { + "bbox": [ + 343, + 114, + 505, + 127 + ], + "score": 1.0, + "content": "to solve the more complicated recursion", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 125, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 125, + 375, + 137 + ], + "score": 1.0, + "content": "(1). However, one drawback of this approach is that the operator", + "type": "text" + }, + { + "bbox": [ + 375, + 126, + 386, + 135 + ], + "score": 0.83, + "content": "\\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 387, + 125, + 506, + 137 + ], + "score": 1.0, + "content": ", defined in (64), combines a", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 135, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 105, + 135, + 361, + 149 + ], + "score": 1.0, + "content": "skew-symmetric consensus matrix with the Lipschitz operator", + "type": "text" + }, + { + "bbox": [ + 361, + 136, + 370, + 146 + ], + "score": 0.78, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 371, + 135, + 411, + 149 + ], + "score": 1.0, + "content": ". Treating", + "type": "text" + }, + { + "bbox": [ + 412, + 136, + 423, + 146 + ], + "score": 0.84, + "content": "\\mathcal { B }", + "type": "inline_equation" + }, + { + "bbox": [ + 423, + 135, + 506, + 149 + ], + "score": 1.0, + "content": "as a single operator", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 147, + 506, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 428, + 159 + ], + "score": 1.0, + "content": "necessitates using a single stepsize for both of its constituent operators, but the", + "type": "text" + }, + { + "bbox": [ + 428, + 147, + 438, + 157 + ], + "score": 0.8, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 438, + 147, + 506, + 159 + ], + "score": 1.0, + "content": "component will", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 158, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 105, + 158, + 506, + 172 + ], + "score": 1.0, + "content": "generally have a much larger Lipschitz constant than the skew part, necessitating a smaller stepsize", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 168, + 505, + 181 + ], + "spans": [ + { + "bbox": [ + 105, + 168, + 505, + 181 + ], + "score": 1.0, + "content": "than is ideal for the skew operator. This difficulty can be countered by using different stepsizes", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 179, + 505, + 192 + ], + "spans": [ + { + "bbox": [ + 105, + 179, + 505, + 192 + ], + "score": 1.0, + "content": "for the primal and dual components, but that strategy introduces additional tuning parameters. In", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 190, + 505, + 203 + ], + "spans": [ + { + "bbox": [ + 106, + 190, + 505, + 203 + ], + "score": 1.0, + "content": "other works, methods based on PSR have exhibited slower convergence than deterministic projective", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 202, + 506, + 214 + ], + "spans": [ + { + "bbox": [ + 105, + 202, + 506, + 214 + ], + "score": 1.0, + "content": "splitting methods (Johnstone & Eckstein, 2021; 2020b). However, in our experiments in Section 7,", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 212, + 234, + 225 + ], + "spans": [ + { + "bbox": [ + 106, + 212, + 234, + 225 + ], + "score": 1.0, + "content": "the performance is comparable.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 6, + "bbox_fs": [ + 105, + 102, + 506, + 225 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 239, + 276, + 252 + ], + "lines": [ + { + "bbox": [ + 105, + 238, + 278, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 238, + 278, + 255 + ], + "score": 1.0, + "content": "G VARIATIONAL INEQUALITIES", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12 + }, + { + "type": "text", + "bbox": [ + 108, + 263, + 503, + 286 + ], + "lines": [ + { + "bbox": [ + 105, + 262, + 505, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 262, + 170, + 277 + ], + "score": 1.0, + "content": "For a mapping", + "type": "text" + }, + { + "bbox": [ + 170, + 263, + 231, + 273 + ], + "score": 0.92, + "content": "B : \\mathbb { R } ^ { d } \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 231, + 262, + 351, + 277 + ], + "score": 1.0, + "content": "and a closed and convex set", + "type": "text" + }, + { + "bbox": [ + 352, + 264, + 358, + 273 + ], + "score": 0.8, + "content": "\\mathcal { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 359, + 262, + 505, + 277 + ], + "score": 1.0, + "content": ", the variational inequality problem", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 275, + 307, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 237, + 286 + ], + "score": 1.0, + "content": "(Harker & Pang, 1990) is to find", + "type": "text" + }, + { + "bbox": [ + 238, + 275, + 266, + 285 + ], + "score": 0.91, + "content": "z ^ { \\ast } \\in \\mathcal { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 267, + 275, + 307, + 286 + ], + "score": 1.0, + "content": "such that", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13.5, + "bbox_fs": [ + 105, + 262, + 505, + 286 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 244, + 287, + 367, + 302 + ], + "lines": [ + { + "bbox": [ + 244, + 287, + 367, + 302 + ], + "spans": [ + { + "bbox": [ + 244, + 287, + 367, + 302 + ], + "score": 0.91, + "content": "B ( z ^ { * } ) ^ { \\top } ( z - z ^ { * } ) \\geq 0 , \\forall z \\in { \\mathcal { C } } .", + "type": "interline_equation", + "image_path": "7363e137bd9d3e57b042782850801a76b88095c5d51c497025360deb38801bea.jpg" + } + ] + } + ], + "index": 15, + "virtual_lines": [ + { + "bbox": [ + 244, + 287, + 367, + 302 + ], + "spans": [], + "index": 15 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 303, + 399, + 314 + ], + "lines": [ + { + "bbox": [ + 106, + 302, + 399, + 316 + ], + "spans": [ + { + "bbox": [ + 106, + 302, + 399, + 316 + ], + "score": 1.0, + "content": "Consider the normal cone mapping discussed in Section 2 and defined as", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16, + "bbox_fs": [ + 106, + 302, + 399, + 316 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 226, + 315, + 385, + 330 + ], + "lines": [ + { + "bbox": [ + 226, + 315, + 385, + 330 + ], + "spans": [ + { + "bbox": [ + 226, + 315, + 385, + 330 + ], + "score": 0.91, + "content": "N _ { { \\mathcal { C } } } ( x ) \\doteq \\{ g : g ^ { \\top } ( y - x ) \\le 0 \\ \\forall y \\in { \\mathcal { C } } \\}", + "type": "interline_equation", + "image_path": "a3ba3311c6ed414d58176f577330a1ad43554d1ea0c2294617992ad703954d54.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 226, + 315, + 385, + 330 + ], + "spans": [], + "index": 17 + } + ] + }, + { + "type": "text", + "bbox": [ + 104, + 331, + 504, + 353 + ], + "lines": [ + { + "bbox": [ + 106, + 331, + 505, + 344 + ], + "spans": [ + { + "bbox": [ + 106, + 331, + 307, + 344 + ], + "score": 1.0, + "content": "It is easily seen that (72) is equivalent to finding", + "type": "text" + }, + { + "bbox": [ + 307, + 332, + 318, + 342 + ], + "score": 0.86, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 319, + 331, + 360, + 344 + ], + "score": 1.0, + "content": "such that", + "type": "text" + }, + { + "bbox": [ + 360, + 331, + 438, + 343 + ], + "score": 0.93, + "content": "- B ( z ^ { * } ) \\in N _ { \\mathcal { C } } ( z ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 439, + 331, + 484, + 344 + ], + "score": 1.0, + "content": ". Hence, if", + "type": "text" + }, + { + "bbox": [ + 485, + 332, + 494, + 341 + ], + "score": 0.83, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 495, + 331, + 505, + 344 + ], + "score": 1.0, + "content": "is", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 343, + 329, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 329, + 354 + ], + "score": 1.0, + "content": "monotone, (72) is equivalent to the monotone inclusion", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 18.5, + "bbox_fs": [ + 105, + 331, + 505, + 354 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 260, + 354, + 351, + 368 + ], + "lines": [ + { + "bbox": [ + 260, + 354, + 351, + 368 + ], + "spans": [ + { + "bbox": [ + 260, + 354, + 351, + 368 + ], + "score": 0.91, + "content": "0 \\in B ( z ^ { * } ) + N _ { \\cal { C } } ( z ^ { * } ) .", + "type": "interline_equation", + "image_path": "3c03e1f04f0304a1368e3e3b7053ce68dfc5a0e546ad31d95dd9c2efb339f212.jpg" + } + ] + } + ], + "index": 20, + "virtual_lines": [ + { + "bbox": [ + 260, + 354, + 351, + 368 + ], + "spans": [], + "index": 20 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 369, + 505, + 458 + ], + "lines": [ + { + "bbox": [ + 105, + 369, + 506, + 382 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 506, + 382 + ], + "score": 1.0, + "content": "Thus, monotone variational inequalities are a special case of monotone inclusions with two operators,", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 381, + 506, + 393 + ], + "spans": [ + { + "bbox": [ + 106, + 381, + 470, + 393 + ], + "score": 1.0, + "content": "one of which is single-valued and the other is the normal cone map of the constraint set", + "type": "text" + }, + { + "bbox": [ + 471, + 381, + 478, + 390 + ], + "score": 0.73, + "content": "\\mathcal { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 478, + 381, + 506, + 393 + ], + "score": 1.0, + "content": ". As a", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 392, + 505, + 404 + ], + "spans": [ + { + "bbox": [ + 106, + 392, + 505, + 404 + ], + "score": 1.0, + "content": "consequence, methods for monotone inclusions can be used to solve monotone variational inequality", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 403, + 505, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 505, + 414 + ], + "score": 1.0, + "content": "problems. The reverse, however, may not be true. For example, the analysis of the extragradient", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 413, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 332, + 426 + ], + "score": 1.0, + "content": "method (Korpelevich, 1977) relies on the second operator", + "type": "text" + }, + { + "bbox": [ + 333, + 414, + 346, + 424 + ], + "score": 0.89, + "content": "N _ { \\mathcal { C } }", + "type": "inline_equation" + }, + { + "bbox": [ + 347, + 413, + 505, + 426 + ], + "score": 1.0, + "content": "in (73) being a normal cone, as opposed", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 424, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 105, + 424, + 506, + 437 + ], + "score": 1.0, + "content": "to a more general monotone operator. We are not aware of any direct extension of the extragradient", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 434, + 505, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 505, + 449 + ], + "score": 1.0, + "content": "method’s analysis allowing a more general resolvent to be used in place of the projection map", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 446, + 194, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 447, + 176, + 459 + ], + "score": 1.0, + "content": "corresponding to", + "type": "text" + }, + { + "bbox": [ + 177, + 446, + 190, + 457 + ], + "score": 0.89, + "content": "N _ { \\mathcal { C } }", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 447, + 194, + 459 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 24.5, + "bbox_fs": [ + 105, + 369, + 506, + 459 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 469, + 504, + 502 + ], + "lines": [ + { + "bbox": [ + 105, + 468, + 505, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 505, + 483 + ], + "score": 1.0, + "content": "The Restricted Gap Function There is a disadvantage to pursuing convergence rates based on", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 480, + 505, + 492 + ], + "spans": [ + { + "bbox": [ + 106, + 480, + 505, + 492 + ], + "score": 1.0, + "content": "variational inequalities (as in BΓΆhm et al. (2020) and Alacaoglu et al. (2021)) rather than monotone", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 492, + 474, + 504 + ], + "spans": [ + { + "bbox": [ + 106, + 492, + 474, + 504 + ], + "score": 1.0, + "content": "inclusions. Convergence rate analyses for variational inequalities focus on the gap function:", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 30, + "bbox_fs": [ + 105, + 468, + 505, + 504 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 242, + 503, + 369, + 525 + ], + "lines": [ + { + "bbox": [ + 242, + 503, + 369, + 525 + ], + "spans": [ + { + "bbox": [ + 242, + 503, + 369, + 525 + ], + "score": 0.94, + "content": "G _ { { \\mathcal C } } ( z ) \\doteq \\operatorname* { s u p } _ { z ^ { \\prime } \\in { \\mathcal C } } B ( z ^ { \\prime } ) ^ { \\top } ( z - z ^ { \\prime } ) .", + "type": "interline_equation", + "image_path": "45c8199cfc83f22b1c6ed9f1ade0c07c7b8cfbba9cfc235961eb41d81d8b2734.jpg" + } + ] + } + ], + "index": 32, + "virtual_lines": [ + { + "bbox": [ + 242, + 503, + 369, + 525 + ], + "spans": [], + "index": 32 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 526, + 505, + 570 + ], + "lines": [ + { + "bbox": [ + 105, + 526, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 194, + 540 + ], + "score": 1.0, + "content": "It can be shown that", + "type": "text" + }, + { + "bbox": [ + 195, + 527, + 243, + 539 + ], + "score": 0.93, + "content": "G _ { \\mathcal { C } } ( z ) \\geq 0", + "type": "inline_equation" + }, + { + "bbox": [ + 244, + 526, + 263, + 540 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 263, + 527, + 312, + 539 + ], + "score": 0.91, + "content": "G _ { \\mathcal { C } } ( z ) = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 312, + 526, + 372, + 540 + ], + "score": 1.0, + "content": "if and only if", + "type": "text" + }, + { + "bbox": [ + 372, + 529, + 378, + 537 + ], + "score": 0.76, + "content": "z", + "type": "inline_equation" + }, + { + "bbox": [ + 379, + 526, + 506, + 540 + ], + "score": 1.0, + "content": "solves (72). However, (74) is", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 536, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 288, + 551 + ], + "score": 1.0, + "content": "meaningless for most problems, since unless", + "type": "text" + }, + { + "bbox": [ + 288, + 539, + 295, + 547 + ], + "score": 0.83, + "content": "\\mathcal { C }", + "type": "inline_equation" + }, + { + "bbox": [ + 295, + 536, + 344, + 551 + ], + "score": 1.0, + "content": "is compact,", + "type": "text" + }, + { + "bbox": [ + 345, + 537, + 371, + 550 + ], + "score": 0.93, + "content": "G \\overset { \\cdot } { c } ( z )", + "type": "inline_equation" + }, + { + "bbox": [ + 372, + 536, + 454, + 551 + ], + "score": 1.0, + "content": "is typically equal to", + "type": "text" + }, + { + "bbox": [ + 454, + 539, + 473, + 548 + ], + "score": 0.88, + "content": "+ \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 536, + 506, + 551 + ], + "score": 1.0, + "content": "for any", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 549, + 505, + 561 + ], + "spans": [ + { + "bbox": [ + 105, + 549, + 505, + 561 + ], + "score": 1.0, + "content": "nonsolution (Diakonikolas, 2020). Thus researchers instead focus on the restricted gap function", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 559, + 177, + 572 + ], + "spans": [ + { + "bbox": [ + 105, + 559, + 177, + 572 + ], + "score": 1.0, + "content": "(Nesterov, 2007)", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 34.5, + "bbox_fs": [ + 104, + 526, + 506, + 572 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 239, + 570, + 372, + 593 + ], + "lines": [ + { + "bbox": [ + 239, + 570, + 372, + 593 + ], + "spans": [ + { + "bbox": [ + 239, + 570, + 372, + 593 + ], + "score": 0.94, + "content": "G _ { { \\mathcal C } _ { 2 } } ( z ) \\doteq \\operatorname* { s u p } _ { z ^ { \\prime } \\in { \\mathcal C } _ { 2 } } B ( z ^ { \\prime } ) ^ { \\top } ( z - z ^ { \\prime } ) .", + "type": "interline_equation", + "image_path": "b3c7ac50ac762d88a20ea79a90e45f2f3a4a225fd3302e905011d704dce56117.jpg" + } + ] + } + ], + "index": 37, + "virtual_lines": [ + { + "bbox": [ + 239, + 570, + 372, + 593 + ], + "spans": [], + "index": 37 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 594, + 505, + 682 + ], + "lines": [ + { + "bbox": [ + 106, + 594, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 106, + 594, + 133, + 606 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 595, + 144, + 605 + ], + "score": 0.88, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 144, + 594, + 492, + 606 + ], + "score": 1.0, + "content": "is an arbitrary compact set. However, now the results are only meaningful over the set", + "type": "text" + }, + { + "bbox": [ + 492, + 594, + 503, + 605 + ], + "score": 0.87, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 503, + 594, + 506, + 606 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 104, + 604, + 506, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 131, + 618 + ], + "score": 1.0, + "content": "Thus,", + "type": "text" + }, + { + "bbox": [ + 131, + 605, + 142, + 616 + ], + "score": 0.87, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 142, + 604, + 506, + 618 + ], + "score": 1.0, + "content": "must be chosen large enough so that the iterates of the algorithm remain in the interior of", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 615, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 106, + 616, + 117, + 627 + ], + "score": 0.84, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 118, + 615, + 471, + 629 + ], + "score": 1.0, + "content": "(BΓΆhm et al., 2020). Further, the convergence rate bound depends on the diameter of", + "type": "text" + }, + { + "bbox": [ + 472, + 616, + 483, + 627 + ], + "score": 0.87, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 483, + 615, + 506, + 629 + ], + "score": 1.0, + "content": ". For", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 626, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 506, + 640 + ], + "score": 1.0, + "content": "some algorithms (Mokhtari et al., 2020) a valid set is provided which bounds the iterates. However", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 637, + 505, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 505, + 650 + ], + "score": 1.0, + "content": "BΓΆhm et al. (2020) and Alacaoglu et al. (2021) do not provide one, although in principle it could be", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 648, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 648, + 506, + 662 + ], + "score": 1.0, + "content": "done so long as the ergodic sequence can be bounded almost-surely. Thus, the convergence rates", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 660, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 106, + 660, + 506, + 672 + ], + "score": 1.0, + "content": "depending on (75) in BΓΆhm et al. (2020) and Alacaoglu et al. (2021) are somewhat incomplete in that", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 106, + 671, + 251, + 683 + ], + "spans": [ + { + "bbox": [ + 106, + 671, + 251, + 683 + ], + "score": 1.0, + "content": "they depend on unknown constants.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 41.5, + "bbox_fs": [ + 104, + 594, + 506, + 683 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 687, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "score": 1.0, + "content": "In contrast, rates based on the approximation residual in the monotone inclusion setting, including", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 699, + 506, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 506, + 711 + ], + "score": 1.0, + "content": "ours given in (57)–(58), completely avoid this pitfall. There is no need to select a compact set", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 710, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 710, + 505, + 722 + ], + "score": 1.0, + "content": "containing the algorithm’s iterates and the constants in our rates are all explicit or depend on standard", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 106, + 721, + 309, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 721, + 309, + 732 + ], + "score": 1.0, + "content": "quantities such as the initial distance to a solution.", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 47.5, + "bbox_fs": [ + 105, + 687, + 506, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 80, + 333, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 79, + 334, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 334, + 97 + ], + "score": 1.0, + "content": "H MEMORY-SAVING TECHNIQUE FOR SPS", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 105, + 505, + 198 + ], + "lines": [ + { + "bbox": [ + 105, + 105, + 506, + 119 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 164, + 119 + ], + "score": 1.0, + "content": "The variables", + "type": "text" + }, + { + "bbox": [ + 164, + 105, + 190, + 118 + ], + "score": 0.34, + "content": "t _ { i } ^ { k } , x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 105, + 213, + 119 + ], + "score": 1.0, + "content": ", and", + "type": "text" + }, + { + "bbox": [ + 213, + 105, + 225, + 118 + ], + "score": 0.9, + "content": "y _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 225, + 105, + 403, + 119 + ], + "score": 1.0, + "content": "on lines 3-5 of SPS are stored in variables", + "type": "text" + }, + { + "bbox": [ + 419, + 105, + 438, + 119 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 445, + 105, + 506, + 119 + ], + "score": 1.0, + "content": ". Another two", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 102, + 111, + 510, + 136 + ], + "spans": [ + { + "bbox": [ + 102, + 111, + 146, + 136 + ], + "score": 1.0, + "content": "variables", + "type": "text" + }, + { + "bbox": [ + 146, + 119, + 153, + 128 + ], + "score": 0.76, + "content": "\\bar { x }", + "type": "inline_equation" + }, + { + "bbox": [ + 154, + 111, + 173, + 136 + ], + "score": 1.0, + "content": "iand", + "type": "text" + }, + { + "bbox": [ + 173, + 119, + 180, + 130 + ], + "score": 0.81, + "content": "\\bar { y }", + "type": "inline_equation" + }, + { + "bbox": [ + 180, + 111, + 239, + 136 + ], + "score": 1.0, + "content": "i i keep track of", + "type": "text" + }, + { + "bbox": [ + 240, + 117, + 277, + 131 + ], + "score": 0.93, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n } x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 277, + 111, + 297, + 136 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 297, + 117, + 334, + 130 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 334, + 111, + 477, + 136 + ], + "score": 1.0, + "content": ". The dual variables are stored as", + "type": "text" + }, + { + "bbox": [ + 477, + 120, + 489, + 129 + ], + "score": 0.85, + "content": "w _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 489, + 111, + 510, + 136 + ], + "score": 1.0, + "content": "for", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 129, + 506, + 144 + ], + "spans": [ + { + "bbox": [ + 106, + 131, + 142, + 141 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 142, + 129, + 252, + 144 + ], + "score": 1.0, + "content": "and the primal variable as", + "type": "text" + }, + { + "bbox": [ + 253, + 133, + 259, + 141 + ], + "score": 0.74, + "content": "z", + "type": "inline_equation" + }, + { + "bbox": [ + 259, + 129, + 289, + 144 + ], + "score": 1.0, + "content": ". Once", + "type": "text" + }, + { + "bbox": [ + 289, + 131, + 321, + 143 + ], + "score": 0.92, + "content": "x = x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 129, + 394, + 144 + ], + "score": 1.0, + "content": "is computed, the", + "type": "text" + }, + { + "bbox": [ + 394, + 130, + 405, + 141 + ], + "score": 0.86, + "content": "i ^ { \\mathrm { { t h } } }", + "type": "inline_equation" + }, + { + "bbox": [ + 406, + 129, + 462, + 144 + ], + "score": 1.0, + "content": "dual variable", + "type": "text" + }, + { + "bbox": [ + 462, + 132, + 474, + 142 + ], + "score": 0.87, + "content": "w _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 475, + 129, + 506, + 144 + ], + "score": 1.0, + "content": "can be", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 141, + 505, + 154 + ], + "spans": [ + { + "bbox": [ + 105, + 141, + 185, + 154 + ], + "score": 1.0, + "content": "partially updated as", + "type": "text" + }, + { + "bbox": [ + 185, + 143, + 252, + 153 + ], + "score": 0.91, + "content": "w _ { i } w _ { i } - \\alpha _ { k } x", + "type": "inline_equation" + }, + { + "bbox": [ + 252, + 141, + 505, + 154 + ], + "score": 1.0, + "content": ". Once all the operators have been processed, the update for each", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 152, + 506, + 165 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 248, + 165 + ], + "score": 1.0, + "content": "dual variable may be completed via", + "type": "text" + }, + { + "bbox": [ + 248, + 152, + 356, + 164 + ], + "score": 0.9, + "content": "w _ { i } w _ { i } + \\alpha _ { k } \\bar { ( n + 1 ) } _ { . } ^ { - 1 } \\bar { x }", + "type": "inline_equation" + }, + { + "bbox": [ + 357, + 152, + 506, + 165 + ], + "score": 1.0, + "content": ". Also, the primal update is computed", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 160, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 160, + 117, + 177 + ], + "score": 1.0, + "content": "as", + "type": "text" + }, + { + "bbox": [ + 118, + 164, + 173, + 175 + ], + "score": 0.93, + "content": "z z - \\alpha _ { k } \\bar { y }", + "type": "inline_equation" + }, + { + "bbox": [ + 173, + 160, + 317, + 177 + ], + "score": 1.0, + "content": ". During the calculation loop for the", + "type": "text" + }, + { + "bbox": [ + 317, + 163, + 343, + 176 + ], + "score": 0.91, + "content": "x _ { i } ^ { k } , y _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 343, + 160, + 490, + 177 + ], + "score": 1.0, + "content": ", the terms in approximation residual", + "type": "text" + }, + { + "bbox": [ + 490, + 164, + 504, + 174 + ], + "score": 0.89, + "content": "R _ { k }", + "type": "inline_equation" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 175, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 175, + 505, + 186 + ], + "score": 1.0, + "content": "may also be accumulated one by one. The total total number of vector elements that must be stored is", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 185, + 147, + 198 + ], + "spans": [ + { + "bbox": [ + 106, + 185, + 142, + 198 + ], + "score": 0.91, + "content": "( n + 7 ) d", + "type": "inline_equation" + }, + { + "bbox": [ + 143, + 185, + 147, + 198 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 4.5 + }, + { + "type": "title", + "bbox": [ + 106, + 212, + 474, + 226 + ], + "lines": [ + { + "bbox": [ + 104, + 213, + 477, + 227 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 477, + 227 + ], + "score": 1.0, + "content": "I ADDITIONAL INFORMATION ABOUT THE NUMERICAL EXPERIMENTS", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9 + }, + { + "type": "text", + "bbox": [ + 106, + 237, + 343, + 250 + ], + "lines": [ + { + "bbox": [ + 105, + 237, + 344, + 252 + ], + "spans": [ + { + "bbox": [ + 105, + 237, + 344, + 252 + ], + "score": 1.0, + "content": "We solve the following convex-concave min-max problem:", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 10 + }, + { + "type": "interline_equation", + "bbox": [ + 121, + 254, + 471, + 307 + ], + "lines": [ + { + "bbox": [ + 121, + 254, + 471, + 307 + ], + "spans": [ + { + "bbox": [ + 121, + 254, + 471, + 307 + ], + "score": 0.91, + "content": "\\begin{array} { r l } { \\underset { \\beta \\in \\mathbb { R } ^ { d } } { \\operatorname* { m i n } } \\quad \\underset { \\gamma \\in \\mathbb { R } ^ { m } } { \\operatorname* { m a x } } } & { \\left\\{ \\lambda ( \\delta - \\kappa ) + \\displaystyle \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\Psi ( \\langle \\hat { x } _ { i } , \\beta \\rangle ) + \\displaystyle \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } ( \\hat { y } _ { i } \\langle \\hat { x } _ { i } , \\beta \\rangle - \\lambda \\kappa ) + c \\| \\beta \\| _ { 1 } \\right\\} } \\\\ { \\mathrm { s . t . } \\quad } & { \\| \\beta \\| _ { 2 } \\leq \\lambda / ( L _ { \\Psi } + 1 ) \\qquad \\| \\gamma \\| _ { \\infty } \\leq 1 . } \\end{array}", + "type": "interline_equation", + "image_path": "18cb44d386da099e52ffe5a9435b5ba27f93d0ab884a89cf537d101f2f54b20f.jpg" + } + ] + } + ], + "index": 12, + "virtual_lines": [ + { + "bbox": [ + 121, + 254, + 471, + 271.6666666666667 + ], + "spans": [], + "index": 11 + }, + { + "bbox": [ + 121, + 271.6666666666667, + 471, + 289.33333333333337 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 121, + 289.33333333333337, + 471, + 307.00000000000006 + ], + "spans": [], + "index": 13 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 311, + 506, + 412 + ], + "lines": [ + { + "bbox": [ + 106, + 312, + 504, + 324 + ], + "spans": [ + { + "bbox": [ + 106, + 312, + 494, + 324 + ], + "score": 1.0, + "content": "This model is identical to that of (Yu et al., 2021, Thm. 4.3) except for the addition of the", + "type": "text" + }, + { + "bbox": [ + 495, + 312, + 504, + 323 + ], + "score": 0.88, + "content": "\\ell _ { 1 }", + "type": "inline_equation" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 323, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 184, + 336 + ], + "score": 1.0, + "content": "regularization term", + "type": "text" + }, + { + "bbox": [ + 184, + 323, + 210, + 335 + ], + "score": 0.92, + "content": "c \\| \\beta \\| _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 210, + 323, + 240, + 336 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 240, + 324, + 264, + 334 + ], + "score": 0.91, + "content": "c \\geq 0", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 323, + 496, + 336 + ], + "score": 1.0, + "content": "is a given constant. The goal is to learn the model weights", + "type": "text" + }, + { + "bbox": [ + 497, + 324, + 504, + 335 + ], + "score": 0.84, + "content": "\\beta", + "type": "inline_equation" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 332, + 506, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 332, + 210, + 349 + ], + "score": 1.0, + "content": "from a training dataset of", + "type": "text" + }, + { + "bbox": [ + 210, + 336, + 220, + 344 + ], + "score": 0.78, + "content": "m", + "type": "inline_equation" + }, + { + "bbox": [ + 221, + 332, + 282, + 349 + ], + "score": 1.0, + "content": "feature vectors", + "type": "text" + }, + { + "bbox": [ + 283, + 335, + 293, + 345 + ], + "score": 0.88, + "content": "{ \\hat { x } } _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 294, + 332, + 397, + 349 + ], + "score": 1.0, + "content": "and corresponding labels", + "type": "text" + }, + { + "bbox": [ + 397, + 335, + 406, + 345 + ], + "score": 0.87, + "content": "\\hat { y } _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 406, + 332, + 506, + 349 + ], + "score": 1.0, + "content": ". Rather than computing", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 344, + 506, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 374, + 358 + ], + "score": 1.0, + "content": "the expected loss over the training set, the formulation uses, for each", + "type": "text" + }, + { + "bbox": [ + 375, + 345, + 382, + 356 + ], + "score": 0.83, + "content": "\\beta", + "type": "inline_equation" + }, + { + "bbox": [ + 382, + 344, + 506, + 358 + ], + "score": 1.0, + "content": ", the worst possible distribution", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 354, + 507, + 370 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 387, + 370 + ], + "score": 1.0, + "content": "within a Wasserstein-metric ball around the empirical distribution of the", + "type": "text" + }, + { + "bbox": [ + 387, + 356, + 426, + 368 + ], + "score": 0.92, + "content": "\\{ ( \\hat { x } _ { i } , \\hat { y } _ { i } ) \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 427, + 354, + 507, + 370 + ], + "score": 1.0, + "content": ", with the parameter", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 366, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 107, + 367, + 131, + 378 + ], + "score": 0.9, + "content": "\\delta \\geq 0", + "type": "inline_equation" + }, + { + "bbox": [ + 131, + 366, + 332, + 380 + ], + "score": 1.0, + "content": "giving the diameter of the ball and the parameter", + "type": "text" + }, + { + "bbox": [ + 332, + 367, + 357, + 378 + ], + "score": 0.91, + "content": "\\kappa \\geq 0", + "type": "inline_equation" + }, + { + "bbox": [ + 358, + 366, + 506, + 380 + ], + "score": 1.0, + "content": "specifying the relative weighting of", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 378, + 505, + 390 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 239, + 390 + ], + "score": 1.0, + "content": "features and labels. The variables", + "type": "text" + }, + { + "bbox": [ + 239, + 380, + 247, + 390 + ], + "score": 0.83, + "content": "\\gamma", + "type": "inline_equation" + }, + { + "bbox": [ + 247, + 378, + 264, + 390 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 264, + 379, + 271, + 388 + ], + "score": 0.81, + "content": "\\lambda", + "type": "inline_equation" + }, + { + "bbox": [ + 272, + 378, + 505, + 390 + ], + "score": 1.0, + "content": "parameterize the selection of this worst-case distribution in", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 388, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 227, + 402 + ], + "score": 1.0, + "content": "response to the model weights", + "type": "text" + }, + { + "bbox": [ + 227, + 390, + 234, + 401 + ], + "score": 0.84, + "content": "\\beta", + "type": "inline_equation" + }, + { + "bbox": [ + 235, + 388, + 270, + 402 + ], + "score": 1.0, + "content": ". Finally,", + "type": "text" + }, + { + "bbox": [ + 271, + 389, + 279, + 399 + ], + "score": 0.83, + "content": "\\Psi", + "type": "inline_equation" + }, + { + "bbox": [ + 280, + 388, + 379, + 402 + ], + "score": 1.0, + "content": "is the logistic loss kernel", + "type": "text" + }, + { + "bbox": [ + 380, + 389, + 454, + 400 + ], + "score": 0.89, + "content": "t \\mapsto \\log ( e ^ { t } + e ^ { - t } )", + "type": "inline_equation" + }, + { + "bbox": [ + 455, + 388, + 472, + 402 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 472, + 389, + 505, + 399 + ], + "score": 0.88, + "content": "L _ { \\Psi } = 1", + "type": "inline_equation" + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 399, + 491, + 412 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 387, + 412 + ], + "score": 1.0, + "content": "is the corresponding Lipschitz constant. In all the experiments, we set", + "type": "text" + }, + { + "bbox": [ + 388, + 401, + 430, + 410 + ], + "score": 0.9, + "content": "\\delta = \\kappa = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 431, + 399, + 449, + 412 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 449, + 400, + 488, + 410 + ], + "score": 0.9, + "content": "c = 1 0 ^ { - 3 }", + "type": "inline_equation" + }, + { + "bbox": [ + 488, + 399, + 491, + 412 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 18 + }, + { + "type": "text", + "bbox": [ + 107, + 416, + 504, + 440 + ], + "lines": [ + { + "bbox": [ + 105, + 415, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 476, + 430 + ], + "score": 1.0, + "content": "We now show how we converted this problem to the form (1) for our experiments. Let", + "type": "text" + }, + { + "bbox": [ + 476, + 419, + 483, + 427 + ], + "score": 0.73, + "content": "z", + "type": "inline_equation" + }, + { + "bbox": [ + 483, + 415, + 506, + 430 + ], + "score": 1.0, + "content": "be a", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 427, + 246, + 440 + ], + "spans": [ + { + "bbox": [ + 106, + 427, + 163, + 440 + ], + "score": 1.0, + "content": "shorthand for", + "type": "text" + }, + { + "bbox": [ + 163, + 428, + 198, + 440 + ], + "score": 0.93, + "content": "( \\lambda , \\beta , \\gamma )", + "type": "inline_equation" + }, + { + "bbox": [ + 198, + 427, + 246, + 440 + ], + "score": 1.0, + "content": ", and define", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 23.5 + }, + { + "type": "interline_equation", + "bbox": [ + 168, + 444, + 442, + 477 + ], + "lines": [ + { + "bbox": [ + 168, + 444, + 442, + 477 + ], + "spans": [ + { + "bbox": [ + 168, + 444, + 442, + 477 + ], + "score": 0.96, + "content": "\\mathcal { L } ( z ) \\doteq \\lambda ( \\delta - \\kappa ) + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\Psi ( \\langle { \\hat { x } _ { i } } , \\beta \\rangle ) + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } ( \\hat { y } _ { i } \\langle { \\hat { x } _ { i } } , \\beta \\rangle - \\lambda \\kappa ) .", + "type": "interline_equation", + "image_path": "aa3614c6b6306691620346b018a88bd125aca6d20f519ffa82859a5be919d1e7.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 168, + 444, + 442, + 455.0 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 168, + 455.0, + 442, + 466.0 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 168, + 466.0, + 442, + 477.0 + ], + "spans": [], + "index": 27 + } + ] + }, + { + "type": "text", + "bbox": [ + 108, + 481, + 501, + 504 + ], + "lines": [ + { + "bbox": [ + 105, + 480, + 503, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 503, + 494 + ], + "score": 1.0, + "content": "The first-order necessary and sufficient conditions for the convex-concave saddlepoint problem in", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 104, + 491, + 141, + 505 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 141, + 505 + ], + "score": 1.0, + "content": "(76) are", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28.5 + }, + { + "type": "interline_equation", + "bbox": [ + 248, + 509, + 363, + 523 + ], + "lines": [ + { + "bbox": [ + 248, + 509, + 363, + 523 + ], + "spans": [ + { + "bbox": [ + 248, + 509, + 363, + 523 + ], + "score": 0.9, + "content": "0 \\in B ( z ) + A _ { 1 } ( z ) + A _ { 2 } ( z )", + "type": "interline_equation", + "image_path": "ccd93350e661533a4eb2fd3e28c8ef4fe823d80b6afb68632ffba1d7a532e259.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 248, + 509, + 363, + 523 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 528, + 270, + 540 + ], + "lines": [ + { + "bbox": [ + 106, + 527, + 271, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 527, + 195, + 541 + ], + "score": 1.0, + "content": "where the vector field", + "type": "text" + }, + { + "bbox": [ + 195, + 528, + 217, + 540 + ], + "score": 0.92, + "content": "B ( z )", + "type": "inline_equation" + }, + { + "bbox": [ + 217, + 527, + 271, + 541 + ], + "score": 1.0, + "content": "is defined as", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 31 + }, + { + "type": "interline_equation", + "bbox": [ + 253, + 545, + 356, + 579 + ], + "lines": [ + { + "bbox": [ + 253, + 545, + 356, + 579 + ], + "spans": [ + { + "bbox": [ + 253, + 545, + 356, + 579 + ], + "score": 0.94, + "content": "\\boldsymbol { B } ( z ) \\doteq \\left[ \\begin{array} { l } { \\nabla _ { \\boldsymbol { \\lambda } , \\beta } \\mathcal { L } ( z ) } \\\\ { - \\nabla _ { \\boldsymbol { \\gamma } } \\mathcal { L } ( z ) } \\end{array} \\right] ,", + "type": "interline_equation", + "image_path": "9cca8f05961d129210e381b2e73d7cdad0e42dd8c0f644cad0af15ae26b2145a.jpg" + } + ] + } + ], + "index": 32.5, + "virtual_lines": [ + { + "bbox": [ + 253, + 545, + 356, + 562.0 + ], + "spans": [], + "index": 32 + }, + { + "bbox": [ + 253, + 562.0, + 356, + 579.0 + ], + "spans": [], + "index": 33 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 584, + 126, + 595 + ], + "lines": [ + { + "bbox": [ + 105, + 583, + 126, + 595 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 126, + 595 + ], + "score": 1.0, + "content": "with", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 34 + }, + { + "type": "interline_equation", + "bbox": [ + 183, + 598, + 426, + 633 + ], + "lines": [ + { + "bbox": [ + 183, + 598, + 426, + 633 + ], + "spans": [ + { + "bbox": [ + 183, + 598, + 426, + 633 + ], + "score": 0.94, + "content": "\\begin{array} { r } { \\nabla _ { \\lambda , \\beta } \\mathcal { L } ( z ) = \\left[ \\begin{array} { c } { \\delta - \\kappa ( 1 + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } ) } \\\\ { \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\Psi ^ { \\prime } ( \\langle \\hat { x } _ { i } , \\beta \\rangle ) \\hat { x } _ { i } + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } \\hat { y } _ { i } \\hat { x } _ { i } } \\end{array} \\right] } \\end{array}", + "type": "interline_equation", + "image_path": "85f1bf2cee31fddc6e9593a9a10edca9b6f09cf13548e4dd23fd4c2f73466f27.jpg" + } + ] + } + ], + "index": 35.5, + "virtual_lines": [ + { + "bbox": [ + 183, + 598, + 426, + 615.5 + ], + "spans": [], + "index": 35 + }, + { + "bbox": [ + 183, + 615.5, + 426, + 633.0 + ], + "spans": [], + "index": 36 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 637, + 123, + 648 + ], + "lines": [ + { + "bbox": [ + 105, + 637, + 123, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 123, + 648 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 37 + }, + { + "type": "interline_equation", + "bbox": [ + 225, + 651, + 386, + 705 + ], + "lines": [ + { + "bbox": [ + 225, + 651, + 386, + 705 + ], + "spans": [ + { + "bbox": [ + 225, + 651, + 386, + 705 + ], + "score": 0.93, + "content": "\\nabla _ { \\boldsymbol { \\gamma } } \\mathcal { L } ( z ) = \\left[ \\begin{array} { c } { \\frac { 1 } { m } ( \\hat { y } _ { 1 } \\langle \\hat { x } _ { 1 } , \\beta \\rangle - \\lambda \\kappa ) } \\\\ { \\vdots } \\\\ { \\frac { 1 } { m } ( \\hat { y } _ { m } \\langle \\hat { x } _ { m } , \\beta \\rangle - \\lambda \\kappa ) } \\end{array} \\right] .", + "type": "interline_equation", + "image_path": "914e4a4b20d9d95dbd3bbb4bc5dbaf285ce50d0c8096409b0c932be2678ec031.jpg" + } + ] + } + ], + "index": 39, + "virtual_lines": [ + { + "bbox": [ + 225, + 651, + 386, + 669.0 + ], + "spans": [], + "index": 38 + }, + { + "bbox": [ + 225, + 669.0, + 386, + 687.0 + ], + "spans": [], + "index": 39 + }, + { + "bbox": [ + 225, + 687.0, + 386, + 705.0 + ], + "spans": [], + "index": 40 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 709, + 506, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 709, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 709, + 217, + 722 + ], + "score": 1.0, + "content": "It is readily confirmed that", + "type": "text" + }, + { + "bbox": [ + 218, + 710, + 227, + 720 + ], + "score": 0.83, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 227, + 709, + 461, + 722 + ], + "score": 1.0, + "content": "defined in this manner is Lipschitz. The monotonicity of", + "type": "text" + }, + { + "bbox": [ + 462, + 710, + 471, + 720 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 471, + 709, + 506, + 722 + ], + "score": 1.0, + "content": "follows", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 720, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 106, + 720, + 506, + 733 + ], + "score": 1.0, + "content": "from its being the generalized gradient of a convex-concave saddle function (Rockafellar, 1970).", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 41.5 + } + ], + "page_idx": 31, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 309, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 309, + 39 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 749, + 313, + 765 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 313, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 16, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 80, + 333, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 79, + 334, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 334, + 97 + ], + "score": 1.0, + "content": "H MEMORY-SAVING TECHNIQUE FOR SPS", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 105, + 505, + 198 + ], + "lines": [ + { + "bbox": [ + 105, + 105, + 506, + 119 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 164, + 119 + ], + "score": 1.0, + "content": "The variables", + "type": "text" + }, + { + "bbox": [ + 164, + 105, + 190, + 118 + ], + "score": 0.34, + "content": "t _ { i } ^ { k } , x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 105, + 213, + 119 + ], + "score": 1.0, + "content": ", and", + "type": "text" + }, + { + "bbox": [ + 213, + 105, + 225, + 118 + ], + "score": 0.9, + "content": "y _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 225, + 105, + 403, + 119 + ], + "score": 1.0, + "content": "on lines 3-5 of SPS are stored in variables", + "type": "text" + }, + { + "bbox": [ + 419, + 105, + 438, + 119 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 445, + 105, + 506, + 119 + ], + "score": 1.0, + "content": ". Another two", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 102, + 111, + 510, + 136 + ], + "spans": [ + { + "bbox": [ + 102, + 111, + 146, + 136 + ], + "score": 1.0, + "content": "variables", + "type": "text" + }, + { + "bbox": [ + 146, + 119, + 153, + 128 + ], + "score": 0.76, + "content": "\\bar { x }", + "type": "inline_equation" + }, + { + "bbox": [ + 154, + 111, + 173, + 136 + ], + "score": 1.0, + "content": "iand", + "type": "text" + }, + { + "bbox": [ + 173, + 119, + 180, + 130 + ], + "score": 0.81, + "content": "\\bar { y }", + "type": "inline_equation" + }, + { + "bbox": [ + 180, + 111, + 239, + 136 + ], + "score": 1.0, + "content": "i i keep track of", + "type": "text" + }, + { + "bbox": [ + 240, + 117, + 277, + 131 + ], + "score": 0.93, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n } x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 277, + 111, + 297, + 136 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 297, + 117, + 334, + 130 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 334, + 111, + 477, + 136 + ], + "score": 1.0, + "content": ". The dual variables are stored as", + "type": "text" + }, + { + "bbox": [ + 477, + 120, + 489, + 129 + ], + "score": 0.85, + "content": "w _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 489, + 111, + 510, + 136 + ], + "score": 1.0, + "content": "for", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 129, + 506, + 144 + ], + "spans": [ + { + "bbox": [ + 106, + 131, + 142, + 141 + ], + "score": 0.89, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 142, + 129, + 252, + 144 + ], + "score": 1.0, + "content": "and the primal variable as", + "type": "text" + }, + { + "bbox": [ + 253, + 133, + 259, + 141 + ], + "score": 0.74, + "content": "z", + "type": "inline_equation" + }, + { + "bbox": [ + 259, + 129, + 289, + 144 + ], + "score": 1.0, + "content": ". Once", + "type": "text" + }, + { + "bbox": [ + 289, + 131, + 321, + 143 + ], + "score": 0.92, + "content": "x = x _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 129, + 394, + 144 + ], + "score": 1.0, + "content": "is computed, the", + "type": "text" + }, + { + "bbox": [ + 394, + 130, + 405, + 141 + ], + "score": 0.86, + "content": "i ^ { \\mathrm { { t h } } }", + "type": "inline_equation" + }, + { + "bbox": [ + 406, + 129, + 462, + 144 + ], + "score": 1.0, + "content": "dual variable", + "type": "text" + }, + { + "bbox": [ + 462, + 132, + 474, + 142 + ], + "score": 0.87, + "content": "w _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 475, + 129, + 506, + 144 + ], + "score": 1.0, + "content": "can be", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 141, + 505, + 154 + ], + "spans": [ + { + "bbox": [ + 105, + 141, + 185, + 154 + ], + "score": 1.0, + "content": "partially updated as", + "type": "text" + }, + { + "bbox": [ + 185, + 143, + 252, + 153 + ], + "score": 0.91, + "content": "w _ { i } w _ { i } - \\alpha _ { k } x", + "type": "inline_equation" + }, + { + "bbox": [ + 252, + 141, + 505, + 154 + ], + "score": 1.0, + "content": ". Once all the operators have been processed, the update for each", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 152, + 506, + 165 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 248, + 165 + ], + "score": 1.0, + "content": "dual variable may be completed via", + "type": "text" + }, + { + "bbox": [ + 248, + 152, + 356, + 164 + ], + "score": 0.9, + "content": "w _ { i } w _ { i } + \\alpha _ { k } \\bar { ( n + 1 ) } _ { . } ^ { - 1 } \\bar { x }", + "type": "inline_equation" + }, + { + "bbox": [ + 357, + 152, + 506, + 165 + ], + "score": 1.0, + "content": ". Also, the primal update is computed", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 160, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 160, + 117, + 177 + ], + "score": 1.0, + "content": "as", + "type": "text" + }, + { + "bbox": [ + 118, + 164, + 173, + 175 + ], + "score": 0.93, + "content": "z z - \\alpha _ { k } \\bar { y }", + "type": "inline_equation" + }, + { + "bbox": [ + 173, + 160, + 317, + 177 + ], + "score": 1.0, + "content": ". During the calculation loop for the", + "type": "text" + }, + { + "bbox": [ + 317, + 163, + 343, + 176 + ], + "score": 0.91, + "content": "x _ { i } ^ { k } , y _ { i } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 343, + 160, + 490, + 177 + ], + "score": 1.0, + "content": ", the terms in approximation residual", + "type": "text" + }, + { + "bbox": [ + 490, + 164, + 504, + 174 + ], + "score": 0.89, + "content": "R _ { k }", + "type": "inline_equation" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 175, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 175, + 505, + 186 + ], + "score": 1.0, + "content": "may also be accumulated one by one. The total total number of vector elements that must be stored is", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 185, + 147, + 198 + ], + "spans": [ + { + "bbox": [ + 106, + 185, + 142, + 198 + ], + "score": 0.91, + "content": "( n + 7 ) d", + "type": "inline_equation" + }, + { + "bbox": [ + 143, + 185, + 147, + 198 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 4.5, + "bbox_fs": [ + 102, + 105, + 510, + 198 + ] + }, + { + "type": "title", + "bbox": [ + 106, + 212, + 474, + 226 + ], + "lines": [ + { + "bbox": [ + 104, + 213, + 477, + 227 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 477, + 227 + ], + "score": 1.0, + "content": "I ADDITIONAL INFORMATION ABOUT THE NUMERICAL EXPERIMENTS", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9 + }, + { + "type": "text", + "bbox": [ + 106, + 237, + 343, + 250 + ], + "lines": [ + { + "bbox": [ + 105, + 237, + 344, + 252 + ], + "spans": [ + { + "bbox": [ + 105, + 237, + 344, + 252 + ], + "score": 1.0, + "content": "We solve the following convex-concave min-max problem:", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 10, + "bbox_fs": [ + 105, + 237, + 344, + 252 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 121, + 254, + 471, + 307 + ], + "lines": [ + { + "bbox": [ + 121, + 254, + 471, + 307 + ], + "spans": [ + { + "bbox": [ + 121, + 254, + 471, + 307 + ], + "score": 0.91, + "content": "\\begin{array} { r l } { \\underset { \\beta \\in \\mathbb { R } ^ { d } } { \\operatorname* { m i n } } \\quad \\underset { \\gamma \\in \\mathbb { R } ^ { m } } { \\operatorname* { m a x } } } & { \\left\\{ \\lambda ( \\delta - \\kappa ) + \\displaystyle \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\Psi ( \\langle \\hat { x } _ { i } , \\beta \\rangle ) + \\displaystyle \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } ( \\hat { y } _ { i } \\langle \\hat { x } _ { i } , \\beta \\rangle - \\lambda \\kappa ) + c \\| \\beta \\| _ { 1 } \\right\\} } \\\\ { \\mathrm { s . t . } \\quad } & { \\| \\beta \\| _ { 2 } \\leq \\lambda / ( L _ { \\Psi } + 1 ) \\qquad \\| \\gamma \\| _ { \\infty } \\leq 1 . } \\end{array}", + "type": "interline_equation", + "image_path": "18cb44d386da099e52ffe5a9435b5ba27f93d0ab884a89cf537d101f2f54b20f.jpg" + } + ] + } + ], + "index": 12, + "virtual_lines": [ + { + "bbox": [ + 121, + 254, + 471, + 271.6666666666667 + ], + "spans": [], + "index": 11 + }, + { + "bbox": [ + 121, + 271.6666666666667, + 471, + 289.33333333333337 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 121, + 289.33333333333337, + 471, + 307.00000000000006 + ], + "spans": [], + "index": 13 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 311, + 506, + 412 + ], + "lines": [ + { + "bbox": [ + 106, + 312, + 504, + 324 + ], + "spans": [ + { + "bbox": [ + 106, + 312, + 494, + 324 + ], + "score": 1.0, + "content": "This model is identical to that of (Yu et al., 2021, Thm. 4.3) except for the addition of the", + "type": "text" + }, + { + "bbox": [ + 495, + 312, + 504, + 323 + ], + "score": 0.88, + "content": "\\ell _ { 1 }", + "type": "inline_equation" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 323, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 184, + 336 + ], + "score": 1.0, + "content": "regularization term", + "type": "text" + }, + { + "bbox": [ + 184, + 323, + 210, + 335 + ], + "score": 0.92, + "content": "c \\| \\beta \\| _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 210, + 323, + 240, + 336 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 240, + 324, + 264, + 334 + ], + "score": 0.91, + "content": "c \\geq 0", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 323, + 496, + 336 + ], + "score": 1.0, + "content": "is a given constant. The goal is to learn the model weights", + "type": "text" + }, + { + "bbox": [ + 497, + 324, + 504, + 335 + ], + "score": 0.84, + "content": "\\beta", + "type": "inline_equation" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 332, + 506, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 332, + 210, + 349 + ], + "score": 1.0, + "content": "from a training dataset of", + "type": "text" + }, + { + "bbox": [ + 210, + 336, + 220, + 344 + ], + "score": 0.78, + "content": "m", + "type": "inline_equation" + }, + { + "bbox": [ + 221, + 332, + 282, + 349 + ], + "score": 1.0, + "content": "feature vectors", + "type": "text" + }, + { + "bbox": [ + 283, + 335, + 293, + 345 + ], + "score": 0.88, + "content": "{ \\hat { x } } _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 294, + 332, + 397, + 349 + ], + "score": 1.0, + "content": "and corresponding labels", + "type": "text" + }, + { + "bbox": [ + 397, + 335, + 406, + 345 + ], + "score": 0.87, + "content": "\\hat { y } _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 406, + 332, + 506, + 349 + ], + "score": 1.0, + "content": ". Rather than computing", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 344, + 506, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 374, + 358 + ], + "score": 1.0, + "content": "the expected loss over the training set, the formulation uses, for each", + "type": "text" + }, + { + "bbox": [ + 375, + 345, + 382, + 356 + ], + "score": 0.83, + "content": "\\beta", + "type": "inline_equation" + }, + { + "bbox": [ + 382, + 344, + 506, + 358 + ], + "score": 1.0, + "content": ", the worst possible distribution", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 354, + 507, + 370 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 387, + 370 + ], + "score": 1.0, + "content": "within a Wasserstein-metric ball around the empirical distribution of the", + "type": "text" + }, + { + "bbox": [ + 387, + 356, + 426, + 368 + ], + "score": 0.92, + "content": "\\{ ( \\hat { x } _ { i } , \\hat { y } _ { i } ) \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 427, + 354, + 507, + 370 + ], + "score": 1.0, + "content": ", with the parameter", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 366, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 107, + 367, + 131, + 378 + ], + "score": 0.9, + "content": "\\delta \\geq 0", + "type": "inline_equation" + }, + { + "bbox": [ + 131, + 366, + 332, + 380 + ], + "score": 1.0, + "content": "giving the diameter of the ball and the parameter", + "type": "text" + }, + { + "bbox": [ + 332, + 367, + 357, + 378 + ], + "score": 0.91, + "content": "\\kappa \\geq 0", + "type": "inline_equation" + }, + { + "bbox": [ + 358, + 366, + 506, + 380 + ], + "score": 1.0, + "content": "specifying the relative weighting of", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 378, + 505, + 390 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 239, + 390 + ], + "score": 1.0, + "content": "features and labels. The variables", + "type": "text" + }, + { + "bbox": [ + 239, + 380, + 247, + 390 + ], + "score": 0.83, + "content": "\\gamma", + "type": "inline_equation" + }, + { + "bbox": [ + 247, + 378, + 264, + 390 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 264, + 379, + 271, + 388 + ], + "score": 0.81, + "content": "\\lambda", + "type": "inline_equation" + }, + { + "bbox": [ + 272, + 378, + 505, + 390 + ], + "score": 1.0, + "content": "parameterize the selection of this worst-case distribution in", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 388, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 227, + 402 + ], + "score": 1.0, + "content": "response to the model weights", + "type": "text" + }, + { + "bbox": [ + 227, + 390, + 234, + 401 + ], + "score": 0.84, + "content": "\\beta", + "type": "inline_equation" + }, + { + "bbox": [ + 235, + 388, + 270, + 402 + ], + "score": 1.0, + "content": ". Finally,", + "type": "text" + }, + { + "bbox": [ + 271, + 389, + 279, + 399 + ], + "score": 0.83, + "content": "\\Psi", + "type": "inline_equation" + }, + { + "bbox": [ + 280, + 388, + 379, + 402 + ], + "score": 1.0, + "content": "is the logistic loss kernel", + "type": "text" + }, + { + "bbox": [ + 380, + 389, + 454, + 400 + ], + "score": 0.89, + "content": "t \\mapsto \\log ( e ^ { t } + e ^ { - t } )", + "type": "inline_equation" + }, + { + "bbox": [ + 455, + 388, + 472, + 402 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 472, + 389, + 505, + 399 + ], + "score": 0.88, + "content": "L _ { \\Psi } = 1", + "type": "inline_equation" + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 399, + 491, + 412 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 387, + 412 + ], + "score": 1.0, + "content": "is the corresponding Lipschitz constant. In all the experiments, we set", + "type": "text" + }, + { + "bbox": [ + 388, + 401, + 430, + 410 + ], + "score": 0.9, + "content": "\\delta = \\kappa = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 431, + 399, + 449, + 412 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 449, + 400, + 488, + 410 + ], + "score": 0.9, + "content": "c = 1 0 ^ { - 3 }", + "type": "inline_equation" + }, + { + "bbox": [ + 488, + 399, + 491, + 412 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 18, + "bbox_fs": [ + 104, + 312, + 507, + 412 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 416, + 504, + 440 + ], + "lines": [ + { + "bbox": [ + 105, + 415, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 476, + 430 + ], + "score": 1.0, + "content": "We now show how we converted this problem to the form (1) for our experiments. Let", + "type": "text" + }, + { + "bbox": [ + 476, + 419, + 483, + 427 + ], + "score": 0.73, + "content": "z", + "type": "inline_equation" + }, + { + "bbox": [ + 483, + 415, + 506, + 430 + ], + "score": 1.0, + "content": "be a", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 427, + 246, + 440 + ], + "spans": [ + { + "bbox": [ + 106, + 427, + 163, + 440 + ], + "score": 1.0, + "content": "shorthand for", + "type": "text" + }, + { + "bbox": [ + 163, + 428, + 198, + 440 + ], + "score": 0.93, + "content": "( \\lambda , \\beta , \\gamma )", + "type": "inline_equation" + }, + { + "bbox": [ + 198, + 427, + 246, + 440 + ], + "score": 1.0, + "content": ", and define", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 23.5, + "bbox_fs": [ + 105, + 415, + 506, + 440 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 168, + 444, + 442, + 477 + ], + "lines": [ + { + "bbox": [ + 168, + 444, + 442, + 477 + ], + "spans": [ + { + "bbox": [ + 168, + 444, + 442, + 477 + ], + "score": 0.96, + "content": "\\mathcal { L } ( z ) \\doteq \\lambda ( \\delta - \\kappa ) + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\Psi ( \\langle { \\hat { x } _ { i } } , \\beta \\rangle ) + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } ( \\hat { y } _ { i } \\langle { \\hat { x } _ { i } } , \\beta \\rangle - \\lambda \\kappa ) .", + "type": "interline_equation", + "image_path": "aa3614c6b6306691620346b018a88bd125aca6d20f519ffa82859a5be919d1e7.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 168, + 444, + 442, + 455.0 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 168, + 455.0, + 442, + 466.0 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 168, + 466.0, + 442, + 477.0 + ], + "spans": [], + "index": 27 + } + ] + }, + { + "type": "text", + "bbox": [ + 108, + 481, + 501, + 504 + ], + "lines": [ + { + "bbox": [ + 105, + 480, + 503, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 503, + 494 + ], + "score": 1.0, + "content": "The first-order necessary and sufficient conditions for the convex-concave saddlepoint problem in", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 104, + 491, + 141, + 505 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 141, + 505 + ], + "score": 1.0, + "content": "(76) are", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28.5, + "bbox_fs": [ + 104, + 480, + 503, + 505 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 248, + 509, + 363, + 523 + ], + "lines": [ + { + "bbox": [ + 248, + 509, + 363, + 523 + ], + "spans": [ + { + "bbox": [ + 248, + 509, + 363, + 523 + ], + "score": 0.9, + "content": "0 \\in B ( z ) + A _ { 1 } ( z ) + A _ { 2 } ( z )", + "type": "interline_equation", + "image_path": "ccd93350e661533a4eb2fd3e28c8ef4fe823d80b6afb68632ffba1d7a532e259.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 248, + 509, + 363, + 523 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 528, + 270, + 540 + ], + "lines": [ + { + "bbox": [ + 106, + 527, + 271, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 527, + 195, + 541 + ], + "score": 1.0, + "content": "where the vector field", + "type": "text" + }, + { + "bbox": [ + 195, + 528, + 217, + 540 + ], + "score": 0.92, + "content": "B ( z )", + "type": "inline_equation" + }, + { + "bbox": [ + 217, + 527, + 271, + 541 + ], + "score": 1.0, + "content": "is defined as", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 31, + "bbox_fs": [ + 106, + 527, + 271, + 541 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 253, + 545, + 356, + 579 + ], + "lines": [ + { + "bbox": [ + 253, + 545, + 356, + 579 + ], + "spans": [ + { + "bbox": [ + 253, + 545, + 356, + 579 + ], + "score": 0.94, + "content": "\\boldsymbol { B } ( z ) \\doteq \\left[ \\begin{array} { l } { \\nabla _ { \\boldsymbol { \\lambda } , \\beta } \\mathcal { L } ( z ) } \\\\ { - \\nabla _ { \\boldsymbol { \\gamma } } \\mathcal { L } ( z ) } \\end{array} \\right] ,", + "type": "interline_equation", + "image_path": "9cca8f05961d129210e381b2e73d7cdad0e42dd8c0f644cad0af15ae26b2145a.jpg" + } + ] + } + ], + "index": 32.5, + "virtual_lines": [ + { + "bbox": [ + 253, + 545, + 356, + 562.0 + ], + "spans": [], + "index": 32 + }, + { + "bbox": [ + 253, + 562.0, + 356, + 579.0 + ], + "spans": [], + "index": 33 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 584, + 126, + 595 + ], + "lines": [ + { + "bbox": [ + 105, + 583, + 126, + 595 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 126, + 595 + ], + "score": 1.0, + "content": "with", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 34, + "bbox_fs": [ + 105, + 583, + 126, + 595 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 183, + 598, + 426, + 633 + ], + "lines": [ + { + "bbox": [ + 183, + 598, + 426, + 633 + ], + "spans": [ + { + "bbox": [ + 183, + 598, + 426, + 633 + ], + "score": 0.94, + "content": "\\begin{array} { r } { \\nabla _ { \\lambda , \\beta } \\mathcal { L } ( z ) = \\left[ \\begin{array} { c } { \\delta - \\kappa ( 1 + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } ) } \\\\ { \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\Psi ^ { \\prime } ( \\langle \\hat { x } _ { i } , \\beta \\rangle ) \\hat { x } _ { i } + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } \\hat { y } _ { i } \\hat { x } _ { i } } \\end{array} \\right] } \\end{array}", + "type": "interline_equation", + "image_path": "85f1bf2cee31fddc6e9593a9a10edca9b6f09cf13548e4dd23fd4c2f73466f27.jpg" + } + ] + } + ], + "index": 35.5, + "virtual_lines": [ + { + "bbox": [ + 183, + 598, + 426, + 615.5 + ], + "spans": [], + "index": 35 + }, + { + "bbox": [ + 183, + 615.5, + 426, + 633.0 + ], + "spans": [], + "index": 36 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 637, + 123, + 648 + ], + "lines": [ + { + "bbox": [ + 105, + 637, + 123, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 123, + 648 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 37, + "bbox_fs": [ + 105, + 637, + 123, + 648 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 225, + 651, + 386, + 705 + ], + "lines": [ + { + "bbox": [ + 225, + 651, + 386, + 705 + ], + "spans": [ + { + "bbox": [ + 225, + 651, + 386, + 705 + ], + "score": 0.93, + "content": "\\nabla _ { \\boldsymbol { \\gamma } } \\mathcal { L } ( z ) = \\left[ \\begin{array} { c } { \\frac { 1 } { m } ( \\hat { y } _ { 1 } \\langle \\hat { x } _ { 1 } , \\beta \\rangle - \\lambda \\kappa ) } \\\\ { \\vdots } \\\\ { \\frac { 1 } { m } ( \\hat { y } _ { m } \\langle \\hat { x } _ { m } , \\beta \\rangle - \\lambda \\kappa ) } \\end{array} \\right] .", + "type": "interline_equation", + "image_path": "914e4a4b20d9d95dbd3bbb4bc5dbaf285ce50d0c8096409b0c932be2678ec031.jpg" + } + ] + } + ], + "index": 39, + "virtual_lines": [ + { + "bbox": [ + 225, + 651, + 386, + 669.0 + ], + "spans": [], + "index": 38 + }, + { + "bbox": [ + 225, + 669.0, + 386, + 687.0 + ], + "spans": [], + "index": 39 + }, + { + "bbox": [ + 225, + 687.0, + 386, + 705.0 + ], + "spans": [], + "index": 40 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 709, + 506, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 709, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 709, + 217, + 722 + ], + "score": 1.0, + "content": "It is readily confirmed that", + "type": "text" + }, + { + "bbox": [ + 218, + 710, + 227, + 720 + ], + "score": 0.83, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 227, + 709, + 461, + 722 + ], + "score": 1.0, + "content": "defined in this manner is Lipschitz. The monotonicity of", + "type": "text" + }, + { + "bbox": [ + 462, + 710, + 471, + 720 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 471, + 709, + 506, + 722 + ], + "score": 1.0, + "content": "follows", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 720, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 106, + 720, + 506, + 733 + ], + "score": 1.0, + "content": "from its being the generalized gradient of a convex-concave saddle function (Rockafellar, 1970).", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 41.5, + "bbox_fs": [ + 106, + 709, + 506, + 733 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 504, + 105 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 504, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 225, + 96 + ], + "score": 1.0, + "content": "For the set-valued operators,", + "type": "text" + }, + { + "bbox": [ + 225, + 82, + 251, + 95 + ], + "score": 0.93, + "content": "A _ { 1 } ( z )", + "type": "inline_equation" + }, + { + "bbox": [ + 252, + 81, + 394, + 96 + ], + "score": 1.0, + "content": "corresponds to the constraints and", + "type": "text" + }, + { + "bbox": [ + 394, + 83, + 420, + 95 + ], + "score": 0.93, + "content": "A _ { 2 } ( z )", + "type": "inline_equation" + }, + { + "bbox": [ + 420, + 81, + 494, + 96 + ], + "score": 1.0, + "content": "to the nonsmooth", + "type": "text" + }, + { + "bbox": [ + 495, + 83, + 504, + 93 + ], + "score": 0.85, + "content": "\\ell _ { 1 }", + "type": "inline_equation" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 93, + 229, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 229, + 106 + ], + "score": 1.0, + "content": "regularizer, and are defined as", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "interline_equation", + "bbox": [ + 242, + 110, + 367, + 124 + ], + "lines": [ + { + "bbox": [ + 242, + 110, + 367, + 124 + ], + "spans": [ + { + "bbox": [ + 242, + 110, + 367, + 124 + ], + "score": 0.92, + "content": "A _ { 1 } ( z ) \\doteq N _ { \\mathcal { C } _ { 1 } } ( \\lambda , \\beta ) \\times N _ { \\mathcal { C } _ { 2 } } ( \\gamma ) ,", + "type": "interline_equation", + "image_path": "a9322f8f88dfd4faa2d5690a44116c0abe283d3f1db5b52383fa6be335f04da9.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 242, + 110, + 367, + 124 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 129, + 132, + 140 + ], + "lines": [ + { + "bbox": [ + 105, + 128, + 134, + 142 + ], + "spans": [ + { + "bbox": [ + 105, + 128, + 134, + 142 + ], + "score": 1.0, + "content": "where", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 3 + }, + { + "type": "interline_equation", + "bbox": [ + 163, + 145, + 447, + 161 + ], + "lines": [ + { + "bbox": [ + 163, + 145, + 447, + 161 + ], + "spans": [ + { + "bbox": [ + 163, + 145, + 447, + 161 + ], + "score": 0.88, + "content": "\\begin{array} { r } { \\mathcal { C } _ { 1 } \\doteq \\bigl \\{ ( \\lambda , \\beta ) : \\| \\beta \\| _ { 2 } \\le \\lambda / ( L _ { \\Psi } + 1 ) \\bigr \\} \\quad \\mathrm { ~ a n d ~ } \\quad \\mathcal { C } _ { 2 } \\doteq \\{ \\gamma : \\| \\gamma \\| _ { \\infty } \\le 1 \\} , } \\end{array}", + "type": "interline_equation", + "image_path": "a3f401bfc4e2d70d79c685ea6e10fe54076e2df77299cec2dd1b2cff859d932c.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 163, + 145, + 447, + 161 + ], + "spans": [], + "index": 4 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 165, + 123, + 176 + ], + "lines": [ + { + "bbox": [ + 105, + 165, + 123, + 176 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 123, + 176 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "interline_equation", + "bbox": [ + 224, + 182, + 386, + 196 + ], + "lines": [ + { + "bbox": [ + 224, + 182, + 386, + 196 + ], + "spans": [ + { + "bbox": [ + 224, + 182, + 386, + 196 + ], + "score": 0.92, + "content": "A _ { 2 } ( z ) \\doteq \\{ \\mathbf { 0 } _ { 1 \\times 1 } \\} \\times c \\partial \\| \\beta \\| _ { 1 } \\times \\{ \\mathbf { 0 } _ { m \\times 1 } \\} .", + "type": "interline_equation", + "image_path": "5f142bbd37a6d13571c5c73e38796564f96a9147885f69e0454ea746e642b5cb.jpg" + } + ] + } + ], + "index": 6, + "virtual_lines": [ + { + "bbox": [ + 224, + 182, + 386, + 196 + ], + "spans": [], + "index": 6 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 200, + 505, + 257 + ], + "lines": [ + { + "bbox": [ + 106, + 201, + 506, + 214 + ], + "spans": [ + { + "bbox": [ + 106, + 201, + 184, + 213 + ], + "score": 1.0, + "content": "Here, the notation", + "type": "text" + }, + { + "bbox": [ + 185, + 202, + 206, + 214 + ], + "score": 0.91, + "content": "{ \\bf 0 } _ { p \\times 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 207, + 201, + 257, + 213 + ], + "score": 1.0, + "content": "denotes the", + "type": "text" + }, + { + "bbox": [ + 258, + 203, + 264, + 213 + ], + "score": 0.82, + "content": "p", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 201, + 401, + 213 + ], + "score": 1.0, + "content": "-dimensional vector of all zeros.", + "type": "text" + }, + { + "bbox": [ + 402, + 202, + 413, + 212 + ], + "score": 0.87, + "content": "\\mathcal { C } _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 413, + 201, + 506, + 213 + ], + "score": 1.0, + "content": "is a scaled version of", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 210, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 210, + 388, + 225 + ], + "score": 1.0, + "content": "the second-order cone, well known to be a closed convex set, while", + "type": "text" + }, + { + "bbox": [ + 388, + 213, + 399, + 223 + ], + "score": 0.88, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 399, + 210, + 489, + 225 + ], + "score": 1.0, + "content": "is the unit ball of the", + "type": "text" + }, + { + "bbox": [ + 490, + 212, + 504, + 223 + ], + "score": 0.88, + "content": "\\ell _ { \\infty }", + "type": "inline_equation" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 223, + 505, + 235 + ], + "spans": [ + { + "bbox": [ + 106, + 223, + 255, + 235 + ], + "score": 1.0, + "content": "norm, also closed and convex. Since", + "type": "text" + }, + { + "bbox": [ + 256, + 224, + 269, + 234 + ], + "score": 0.89, + "content": "A _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 269, + 223, + 466, + 235 + ], + "score": 1.0, + "content": "is a normal cone map of a closed convex set and", + "type": "text" + }, + { + "bbox": [ + 467, + 223, + 480, + 234 + ], + "score": 0.89, + "content": "A _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 480, + 223, + 505, + 235 + ], + "score": 1.0, + "content": "is the", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 233, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 506, + 248 + ], + "score": 1.0, + "content": "subgradient map of a closed proper convex function (the scaled 1-norm), both of these operators are", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 245, + 392, + 257 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 362, + 257 + ], + "score": 1.0, + "content": "maximal monotone and problem (77) is a special case of (1) for", + "type": "text" + }, + { + "bbox": [ + 363, + 245, + 388, + 255 + ], + "score": 0.88, + "content": "n = 2", + "type": "inline_equation" + }, + { + "bbox": [ + 389, + 245, + 392, + 257 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 9 + }, + { + "type": "text", + "bbox": [ + 106, + 267, + 504, + 291 + ], + "lines": [ + { + "bbox": [ + 104, + 263, + 507, + 284 + ], + "spans": [ + { + "bbox": [ + 104, + 263, + 313, + 284 + ], + "score": 1.0, + "content": "Stochastic oracle implementation The operator", + "type": "text" + }, + { + "bbox": [ + 313, + 268, + 419, + 279 + ], + "score": 0.92, + "content": "B : \\mathbb { R } ^ { m + d + 1 } \\mapsto \\mathbb { R } ^ { m + d + 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 419, + 263, + 507, + 284 + ], + "score": 1.0, + "content": ", defined in (78), can", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 279, + 161, + 292 + ], + "spans": [ + { + "bbox": [ + 105, + 279, + 161, + 292 + ], + "score": 1.0, + "content": "be written as", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 12.5 + }, + { + "type": "interline_equation", + "bbox": [ + 261, + 294, + 350, + 327 + ], + "lines": [ + { + "bbox": [ + 261, + 294, + 350, + 327 + ], + "spans": [ + { + "bbox": [ + 261, + 294, + 350, + 327 + ], + "score": 0.93, + "content": "B ( z ) = \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } B _ { i } ( z )", + "type": "interline_equation", + "image_path": "8f96bdf635a021974b7492a256c1ebf04f386ff90e14544720b8c68faafa5170.jpg" + } + ] + } + ], + "index": 14.5, + "virtual_lines": [ + { + "bbox": [ + 261, + 294, + 350, + 310.5 + ], + "spans": [], + "index": 14 + }, + { + "bbox": [ + 261, + 310.5, + 350, + 327.0 + ], + "spans": [], + "index": 15 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 332, + 132, + 343 + ], + "lines": [ + { + "bbox": [ + 105, + 330, + 134, + 344 + ], + "spans": [ + { + "bbox": [ + 105, + 330, + 134, + 344 + ], + "score": 1.0, + "content": "where", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "interline_equation", + "bbox": [ + 225, + 348, + 384, + 426 + ], + "lines": [ + { + "bbox": [ + 225, + 348, + 384, + 426 + ], + "spans": [ + { + "bbox": [ + 225, + 348, + 384, + 426 + ], + "score": 0.95, + "content": "B _ { i } ( z ) \\doteq \\left[ \\begin{array} { c } { \\delta - \\kappa ( 1 + \\gamma _ { i } ) } \\\\ { \\Psi ^ { \\prime } ( \\langle \\hat { x } _ { i } , \\beta \\rangle ) \\hat { x } _ { i } + \\gamma _ { i } \\hat { y } _ { i } \\hat { x } _ { i } } \\\\ { \\mathbf { 0 } _ { ( i - 1 ) \\times 1 } } \\\\ { - ( \\hat { y } _ { i } \\langle \\hat { x } _ { i } , \\beta \\rangle - \\lambda \\kappa ) } \\\\ { \\mathbf { 0 } _ { ( m - i ) \\times 1 } } \\end{array} \\right] .", + "type": "interline_equation", + "image_path": "96a27dbf9aabec2390f8cc0626bed5c130859ad8934cc845adcc6d97bec8f517.jpg" + } + ] + } + ], + "index": 19, + "virtual_lines": [ + { + "bbox": [ + 225, + 348, + 384, + 363.6 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 225, + 363.6, + 384, + 379.20000000000005 + ], + "spans": [], + "index": 18 + }, + { + "bbox": [ + 225, + 379.20000000000005, + 384, + 394.80000000000007 + ], + "spans": [], + "index": 19 + }, + { + "bbox": [ + 225, + 394.80000000000007, + 384, + 410.4000000000001 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 225, + 410.4000000000001, + 384, + 426.0000000000001 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 432, + 504, + 459 + ], + "lines": [ + { + "bbox": [ + 104, + 430, + 506, + 448 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 310, + 448 + ], + "score": 1.0, + "content": "In our SPS experiments, the stochastic oracle for", + "type": "text" + }, + { + "bbox": [ + 311, + 434, + 320, + 443 + ], + "score": 0.85, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 320, + 430, + 361, + 448 + ], + "score": 1.0, + "content": "is simply", + "type": "text" + }, + { + "bbox": [ + 362, + 432, + 464, + 448 + ], + "score": 0.94, + "content": "\\begin{array} { r } { \\tilde { B } ( z ) = \\frac { 1 } { | \\mathbf { B } | } \\sum _ { i \\in \\mathbf { B } } B _ { i } ( z ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 465, + 430, + 506, + 448 + ], + "score": 1.0, + "content": "for some", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 446, + 335, + 460 + ], + "spans": [ + { + "bbox": [ + 106, + 446, + 149, + 460 + ], + "score": 1.0, + "content": "minibatch", + "type": "text" + }, + { + "bbox": [ + 149, + 447, + 217, + 459 + ], + "score": 0.95, + "content": "\\mathbf { B } \\subseteq \\{ 1 , \\dots , m \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 217, + 446, + 335, + 460 + ], + "score": 1.0, + "content": ". We used a batchsize of 100.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 22.5 + }, + { + "type": "text", + "bbox": [ + 108, + 470, + 506, + 505 + ], + "lines": [ + { + "bbox": [ + 106, + 470, + 505, + 483 + ], + "spans": [ + { + "bbox": [ + 106, + 470, + 286, + 483 + ], + "score": 1.0, + "content": "Resolvent computations The resolvent of", + "type": "text" + }, + { + "bbox": [ + 287, + 471, + 299, + 482 + ], + "score": 0.89, + "content": "A _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 300, + 470, + 505, + 483 + ], + "score": 1.0, + "content": "is readily constructed from the projection maps of", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 481, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 170, + 495 + ], + "score": 1.0, + "content": "the simple sets", + "type": "text" + }, + { + "bbox": [ + 170, + 483, + 181, + 493 + ], + "score": 0.87, + "content": "\\mathcal { C } _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 182, + 481, + 200, + 495 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 201, + 482, + 212, + 493 + ], + "score": 0.87, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 481, + 298, + 495 + ], + "score": 1.0, + "content": ", while the resolvent", + "type": "text" + }, + { + "bbox": [ + 298, + 483, + 311, + 493 + ], + "score": 0.88, + "content": "A _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 311, + 481, + 468, + 495 + ], + "score": 1.0, + "content": "involves the proximal operator of the", + "type": "text" + }, + { + "bbox": [ + 468, + 482, + 478, + 493 + ], + "score": 0.87, + "content": "\\ell _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 479, + 481, + 506, + 495 + ], + "score": 1.0, + "content": "norm.", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 492, + 159, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 159, + 507 + ], + "score": 1.0, + "content": "Specifically,", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 25 + }, + { + "type": "interline_equation", + "bbox": [ + 163, + 511, + 447, + 558 + ], + "lines": [ + { + "bbox": [ + 163, + 511, + 447, + 558 + ], + "spans": [ + { + "bbox": [ + 163, + 511, + 447, + 558 + ], + "score": 0.94, + "content": "J _ { \\rho A _ { 1 } } ( z ) = \\left[ \\begin{array} { c } { \\mathrm { p r o j } _ { \\mathcal { C } _ { 1 } } ( \\lambda , \\beta ) } \\\\ { \\mathrm { p r o j } _ { \\mathcal { C } _ { 2 } } ( \\gamma ) } \\end{array} \\right] \\quad \\mathrm { a n d } \\quad J _ { \\rho A _ { 2 } } ( z ) = \\left[ \\begin{array} { c } { \\mathbf { 0 } _ { 1 \\times 1 } } \\\\ { \\mathrm { p r o x } _ { \\rho c \\| \\cdot \\| _ { 1 } } ( \\beta ) } \\\\ { \\mathbf { 0 } _ { m \\times 1 } } \\end{array} \\right] .", + "type": "interline_equation", + "image_path": "49a3b38017b4b6b3fb6ec9fa19ad7b9373e672d849628227aee85bf3d001b5c7.jpg" + } + ] + } + ], + "index": 28, + "virtual_lines": [ + { + "bbox": [ + 163, + 511, + 447, + 526.6666666666666 + ], + "spans": [], + "index": 27 + }, + { + "bbox": [ + 163, + 526.6666666666666, + 447, + 542.3333333333333 + ], + "spans": [], + "index": 28 + }, + { + "bbox": [ + 163, + 542.3333333333333, + 447, + 557.9999999999999 + ], + "spans": [], + "index": 29 + } + ] + }, + { + "type": "text", + "bbox": [ + 108, + 563, + 505, + 609 + ], + "lines": [ + { + "bbox": [ + 106, + 564, + 505, + 576 + ], + "spans": [ + { + "bbox": [ + 106, + 564, + 164, + 576 + ], + "score": 1.0, + "content": "The constraint", + "type": "text" + }, + { + "bbox": [ + 164, + 564, + 175, + 575 + ], + "score": 0.89, + "content": "\\mathcal { C } _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 176, + 564, + 309, + 576 + ], + "score": 1.0, + "content": "is a scaled second-order cone and", + "type": "text" + }, + { + "bbox": [ + 309, + 564, + 320, + 575 + ], + "score": 0.89, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 320, + 564, + 344, + 576 + ], + "score": 1.0, + "content": "is the", + "type": "text" + }, + { + "bbox": [ + 344, + 564, + 358, + 575 + ], + "score": 0.9, + "content": "\\ell _ { \\infty }", + "type": "inline_equation" + }, + { + "bbox": [ + 358, + 564, + 505, + 576 + ], + "score": 1.0, + "content": "ball, both of which have closed-form", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 575, + 506, + 587 + ], + "spans": [ + { + "bbox": [ + 105, + 575, + 282, + 587 + ], + "score": 1.0, + "content": "projections. The proximal operator of the", + "type": "text" + }, + { + "bbox": [ + 282, + 575, + 293, + 586 + ], + "score": 0.88, + "content": "\\ell _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 293, + 575, + 506, + 587 + ], + "score": 1.0, + "content": "norm is the well-known soft-thresholding operator", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 585, + 505, + 599 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 505, + 599 + ], + "score": 1.0, + "content": "(Parikh & Boyd, 2013, Section 6.5.2). Therefore all resolvents in the formulation may be computed", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 597, + 201, + 610 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 201, + 610 + ], + "score": 1.0, + "content": "quickly and accurately.", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 31.5 + }, + { + "type": "text", + "bbox": [ + 106, + 619, + 506, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 619, + 506, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 387, + 633 + ], + "score": 1.0, + "content": "SPS stepsize choices For the stepsize in SPS, we ordinarily require", + "type": "text" + }, + { + "bbox": [ + 387, + 621, + 447, + 632 + ], + "score": 0.93, + "content": "\\rho _ { k } \\le \\overline { { \\rho } } < 1 / L", + "type": "inline_equation" + }, + { + "bbox": [ + 447, + 619, + 506, + 633 + ], + "score": 1.0, + "content": "for the global", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 631, + 506, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 182, + 644 + ], + "score": 1.0, + "content": "Lipschitz constant", + "type": "text" + }, + { + "bbox": [ + 182, + 632, + 191, + 641 + ], + "score": 0.78, + "content": "L", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 631, + 202, + 644 + ], + "score": 1.0, + "content": "of", + "type": "text" + }, + { + "bbox": [ + 203, + 632, + 212, + 641 + ], + "score": 0.79, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 631, + 506, + 644 + ], + "score": 1.0, + "content": ". However, since the global Lipschitz constant may be pessimistic, better", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 642, + 506, + 655 + ], + "spans": [ + { + "bbox": [ + 105, + 642, + 506, + 655 + ], + "score": 1.0, + "content": "performance can often be achieved by experimenting with larger stepsizes. If divergence is observed,", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 653, + 506, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 506, + 667 + ], + "score": 1.0, + "content": "then the stepsize can be decreased. This type of strategy is common for SGD and similar stochastic", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 104, + 661, + 507, + 678 + ], + "spans": [ + { + "bbox": [ + 104, + 661, + 263, + 678 + ], + "score": 1.0, + "content": "methods. Thus, for SPS-decay we set", + "type": "text" + }, + { + "bbox": [ + 264, + 663, + 329, + 675 + ], + "score": 0.92, + "content": "\\alpha _ { k } ^ { - \\pm } = C _ { d } k ^ { - 0 . 5 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 329, + 661, + 348, + 678 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 348, + 664, + 412, + 676 + ], + "score": 0.92, + "content": "\\rho _ { k } = C _ { d } k ^ { - 0 . 2 5 }", + "type": "inline_equation" + }, + { + "bbox": [ + 412, + 661, + 507, + 678 + ], + "score": 1.0, + "content": ", and performed a grid", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 675, + 506, + 688 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 204, + 688 + ], + "score": 1.0, + "content": "search to select the best", + "type": "text" + }, + { + "bbox": [ + 204, + 676, + 217, + 686 + ], + "score": 0.89, + "content": "C _ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 675, + 241, + 688 + ], + "score": 1.0, + "content": "from", + "type": "text" + }, + { + "bbox": [ + 241, + 676, + 315, + 687 + ], + "score": 0.9, + "content": "\\{ 0 . 1 , 0 . 5 , 1 , 5 , 1 0 \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 315, + 675, + 363, + 688 + ], + "score": 1.0, + "content": ", arriving at", + "type": "text" + }, + { + "bbox": [ + 363, + 676, + 394, + 686 + ], + "score": 0.9, + "content": "C _ { d } = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 394, + 675, + 506, + 688 + ], + "score": 1.0, + "content": "for epsilon and SUSY, and", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 686, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 147, + 699 + ], + "score": 0.91, + "content": "C _ { d } = 0 . 5", + "type": "inline_equation" + }, + { + "bbox": [ + 147, + 686, + 299, + 701 + ], + "score": 1.0, + "content": "for real-sim. For SPS-fixed we used", + "type": "text" + }, + { + "bbox": [ + 300, + 687, + 348, + 699 + ], + "score": 0.92, + "content": "\\rho = K ^ { - 1 / 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 349, + 686, + 368, + 701 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 368, + 687, + 411, + 700 + ], + "score": 0.93, + "content": "\\alpha = C _ { f } \\rho ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 412, + 686, + 506, + 701 + ], + "score": 1.0, + "content": ", and performed a grid", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 698, + 504, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 169, + 712 + ], + "score": 1.0, + "content": "search to select", + "type": "text" + }, + { + "bbox": [ + 169, + 699, + 182, + 711 + ], + "score": 0.89, + "content": "C _ { f }", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 698, + 204, + 712 + ], + "score": 1.0, + "content": "over", + "type": "text" + }, + { + "bbox": [ + 204, + 699, + 277, + 711 + ], + "score": 0.91, + "content": "\\{ 0 . 1 , 0 . 5 , 1 , 5 , 1 0 \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 278, + 698, + 324, + 712 + ], + "score": 1.0, + "content": ", arriving at", + "type": "text" + }, + { + "bbox": [ + 324, + 699, + 356, + 711 + ], + "score": 0.9, + "content": "C _ { f } = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 356, + 698, + 472, + 712 + ], + "score": 1.0, + "content": "for epsilon and real-sim, and", + "type": "text" + }, + { + "bbox": [ + 473, + 699, + 504, + 711 + ], + "score": 0.91, + "content": "C _ { f } = 5", + "type": "inline_equation" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 709, + 507, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 507, + 722 + ], + "score": 1.0, + "content": "for SUSY. The total number of iterations for SPS-fixed was chosen as follows: For the epsilon dataset,", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 720, + 456, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 141, + 732 + ], + "score": 1.0, + "content": "we used", + "type": "text" + }, + { + "bbox": [ + 141, + 721, + 185, + 731 + ], + "score": 0.9, + "content": "K = 5 0 0 0", + "type": "inline_equation" + }, + { + "bbox": [ + 185, + 720, + 265, + 732 + ], + "score": 1.0, + "content": ", for SUSY we used", + "type": "text" + }, + { + "bbox": [ + 266, + 721, + 304, + 731 + ], + "score": 0.9, + "content": "K = 2 0 0", + "type": "inline_equation" + }, + { + "bbox": [ + 304, + 720, + 408, + 732 + ], + "score": 1.0, + "content": ", and for real-sim we used", + "type": "text" + }, + { + "bbox": [ + 409, + 721, + 452, + 731 + ], + "score": 0.91, + "content": "K = 1 0 0 0", + "type": "inline_equation" + }, + { + "bbox": [ + 452, + 720, + 456, + 732 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 38.5 + } + ], + "page_idx": 32, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 308, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 308, + 39 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 312, + 763 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 312, + 763 + ], + "score": 1.0, + "content": "33", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 504, + 105 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 504, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 225, + 96 + ], + "score": 1.0, + "content": "For the set-valued operators,", + "type": "text" + }, + { + "bbox": [ + 225, + 82, + 251, + 95 + ], + "score": 0.93, + "content": "A _ { 1 } ( z )", + "type": "inline_equation" + }, + { + "bbox": [ + 252, + 81, + 394, + 96 + ], + "score": 1.0, + "content": "corresponds to the constraints and", + "type": "text" + }, + { + "bbox": [ + 394, + 83, + 420, + 95 + ], + "score": 0.93, + "content": "A _ { 2 } ( z )", + "type": "inline_equation" + }, + { + "bbox": [ + 420, + 81, + 494, + 96 + ], + "score": 1.0, + "content": "to the nonsmooth", + "type": "text" + }, + { + "bbox": [ + 495, + 83, + 504, + 93 + ], + "score": 0.85, + "content": "\\ell _ { 1 }", + "type": "inline_equation" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 93, + 229, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 229, + 106 + ], + "score": 1.0, + "content": "regularizer, and are defined as", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5, + "bbox_fs": [ + 105, + 81, + 504, + 106 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 242, + 110, + 367, + 124 + ], + "lines": [ + { + "bbox": [ + 242, + 110, + 367, + 124 + ], + "spans": [ + { + "bbox": [ + 242, + 110, + 367, + 124 + ], + "score": 0.92, + "content": "A _ { 1 } ( z ) \\doteq N _ { \\mathcal { C } _ { 1 } } ( \\lambda , \\beta ) \\times N _ { \\mathcal { C } _ { 2 } } ( \\gamma ) ,", + "type": "interline_equation", + "image_path": "a9322f8f88dfd4faa2d5690a44116c0abe283d3f1db5b52383fa6be335f04da9.jpg" + } + ] + } + ], + "index": 2, + "virtual_lines": [ + { + "bbox": [ + 242, + 110, + 367, + 124 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 129, + 132, + 140 + ], + "lines": [ + { + "bbox": [ + 105, + 128, + 134, + 142 + ], + "spans": [ + { + "bbox": [ + 105, + 128, + 134, + 142 + ], + "score": 1.0, + "content": "where", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 3, + "bbox_fs": [ + 105, + 128, + 134, + 142 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 163, + 145, + 447, + 161 + ], + "lines": [ + { + "bbox": [ + 163, + 145, + 447, + 161 + ], + "spans": [ + { + "bbox": [ + 163, + 145, + 447, + 161 + ], + "score": 0.88, + "content": "\\begin{array} { r } { \\mathcal { C } _ { 1 } \\doteq \\bigl \\{ ( \\lambda , \\beta ) : \\| \\beta \\| _ { 2 } \\le \\lambda / ( L _ { \\Psi } + 1 ) \\bigr \\} \\quad \\mathrm { ~ a n d ~ } \\quad \\mathcal { C } _ { 2 } \\doteq \\{ \\gamma : \\| \\gamma \\| _ { \\infty } \\le 1 \\} , } \\end{array}", + "type": "interline_equation", + "image_path": "a3f401bfc4e2d70d79c685ea6e10fe54076e2df77299cec2dd1b2cff859d932c.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 163, + 145, + 447, + 161 + ], + "spans": [], + "index": 4 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 165, + 123, + 176 + ], + "lines": [ + { + "bbox": [ + 105, + 165, + 123, + 176 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 123, + 176 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5, + "bbox_fs": [ + 105, + 165, + 123, + 176 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 224, + 182, + 386, + 196 + ], + "lines": [ + { + "bbox": [ + 224, + 182, + 386, + 196 + ], + "spans": [ + { + "bbox": [ + 224, + 182, + 386, + 196 + ], + "score": 0.92, + "content": "A _ { 2 } ( z ) \\doteq \\{ \\mathbf { 0 } _ { 1 \\times 1 } \\} \\times c \\partial \\| \\beta \\| _ { 1 } \\times \\{ \\mathbf { 0 } _ { m \\times 1 } \\} .", + "type": "interline_equation", + "image_path": "5f142bbd37a6d13571c5c73e38796564f96a9147885f69e0454ea746e642b5cb.jpg" + } + ] + } + ], + "index": 6, + "virtual_lines": [ + { + "bbox": [ + 224, + 182, + 386, + 196 + ], + "spans": [], + "index": 6 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 200, + 505, + 257 + ], + "lines": [ + { + "bbox": [ + 106, + 201, + 506, + 214 + ], + "spans": [ + { + "bbox": [ + 106, + 201, + 184, + 213 + ], + "score": 1.0, + "content": "Here, the notation", + "type": "text" + }, + { + "bbox": [ + 185, + 202, + 206, + 214 + ], + "score": 0.91, + "content": "{ \\bf 0 } _ { p \\times 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 207, + 201, + 257, + 213 + ], + "score": 1.0, + "content": "denotes the", + "type": "text" + }, + { + "bbox": [ + 258, + 203, + 264, + 213 + ], + "score": 0.82, + "content": "p", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 201, + 401, + 213 + ], + "score": 1.0, + "content": "-dimensional vector of all zeros.", + "type": "text" + }, + { + "bbox": [ + 402, + 202, + 413, + 212 + ], + "score": 0.87, + "content": "\\mathcal { C } _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 413, + 201, + 506, + 213 + ], + "score": 1.0, + "content": "is a scaled version of", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 210, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 210, + 388, + 225 + ], + "score": 1.0, + "content": "the second-order cone, well known to be a closed convex set, while", + "type": "text" + }, + { + "bbox": [ + 388, + 213, + 399, + 223 + ], + "score": 0.88, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 399, + 210, + 489, + 225 + ], + "score": 1.0, + "content": "is the unit ball of the", + "type": "text" + }, + { + "bbox": [ + 490, + 212, + 504, + 223 + ], + "score": 0.88, + "content": "\\ell _ { \\infty }", + "type": "inline_equation" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 223, + 505, + 235 + ], + "spans": [ + { + "bbox": [ + 106, + 223, + 255, + 235 + ], + "score": 1.0, + "content": "norm, also closed and convex. Since", + "type": "text" + }, + { + "bbox": [ + 256, + 224, + 269, + 234 + ], + "score": 0.89, + "content": "A _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 269, + 223, + 466, + 235 + ], + "score": 1.0, + "content": "is a normal cone map of a closed convex set and", + "type": "text" + }, + { + "bbox": [ + 467, + 223, + 480, + 234 + ], + "score": 0.89, + "content": "A _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 480, + 223, + 505, + 235 + ], + "score": 1.0, + "content": "is the", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 233, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 506, + 248 + ], + "score": 1.0, + "content": "subgradient map of a closed proper convex function (the scaled 1-norm), both of these operators are", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 245, + 392, + 257 + ], + "spans": [ + { + "bbox": [ + 106, + 245, + 362, + 257 + ], + "score": 1.0, + "content": "maximal monotone and problem (77) is a special case of (1) for", + "type": "text" + }, + { + "bbox": [ + 363, + 245, + 388, + 255 + ], + "score": 0.88, + "content": "n = 2", + "type": "inline_equation" + }, + { + "bbox": [ + 389, + 245, + 392, + 257 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 9, + "bbox_fs": [ + 105, + 201, + 506, + 257 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 267, + 504, + 291 + ], + "lines": [ + { + "bbox": [ + 104, + 263, + 507, + 284 + ], + "spans": [ + { + "bbox": [ + 104, + 263, + 313, + 284 + ], + "score": 1.0, + "content": "Stochastic oracle implementation The operator", + "type": "text" + }, + { + "bbox": [ + 313, + 268, + 419, + 279 + ], + "score": 0.92, + "content": "B : \\mathbb { R } ^ { m + d + 1 } \\mapsto \\mathbb { R } ^ { m + d + 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 419, + 263, + 507, + 284 + ], + "score": 1.0, + "content": ", defined in (78), can", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 279, + 161, + 292 + ], + "spans": [ + { + "bbox": [ + 105, + 279, + 161, + 292 + ], + "score": 1.0, + "content": "be written as", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 12.5, + "bbox_fs": [ + 104, + 263, + 507, + 292 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 261, + 294, + 350, + 327 + ], + "lines": [ + { + "bbox": [ + 261, + 294, + 350, + 327 + ], + "spans": [ + { + "bbox": [ + 261, + 294, + 350, + 327 + ], + "score": 0.93, + "content": "B ( z ) = \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } B _ { i } ( z )", + "type": "interline_equation", + "image_path": "8f96bdf635a021974b7492a256c1ebf04f386ff90e14544720b8c68faafa5170.jpg" + } + ] + } + ], + "index": 14.5, + "virtual_lines": [ + { + "bbox": [ + 261, + 294, + 350, + 310.5 + ], + "spans": [], + "index": 14 + }, + { + "bbox": [ + 261, + 310.5, + 350, + 327.0 + ], + "spans": [], + "index": 15 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 332, + 132, + 343 + ], + "lines": [ + { + "bbox": [ + 105, + 330, + 134, + 344 + ], + "spans": [ + { + "bbox": [ + 105, + 330, + 134, + 344 + ], + "score": 1.0, + "content": "where", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16, + "bbox_fs": [ + 105, + 330, + 134, + 344 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 225, + 348, + 384, + 426 + ], + "lines": [ + { + "bbox": [ + 225, + 348, + 384, + 426 + ], + "spans": [ + { + "bbox": [ + 225, + 348, + 384, + 426 + ], + "score": 0.95, + "content": "B _ { i } ( z ) \\doteq \\left[ \\begin{array} { c } { \\delta - \\kappa ( 1 + \\gamma _ { i } ) } \\\\ { \\Psi ^ { \\prime } ( \\langle \\hat { x } _ { i } , \\beta \\rangle ) \\hat { x } _ { i } + \\gamma _ { i } \\hat { y } _ { i } \\hat { x } _ { i } } \\\\ { \\mathbf { 0 } _ { ( i - 1 ) \\times 1 } } \\\\ { - ( \\hat { y } _ { i } \\langle \\hat { x } _ { i } , \\beta \\rangle - \\lambda \\kappa ) } \\\\ { \\mathbf { 0 } _ { ( m - i ) \\times 1 } } \\end{array} \\right] .", + "type": "interline_equation", + "image_path": "96a27dbf9aabec2390f8cc0626bed5c130859ad8934cc845adcc6d97bec8f517.jpg" + } + ] + } + ], + "index": 19, + "virtual_lines": [ + { + "bbox": [ + 225, + 348, + 384, + 363.6 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 225, + 363.6, + 384, + 379.20000000000005 + ], + "spans": [], + "index": 18 + }, + { + "bbox": [ + 225, + 379.20000000000005, + 384, + 394.80000000000007 + ], + "spans": [], + "index": 19 + }, + { + "bbox": [ + 225, + 394.80000000000007, + 384, + 410.4000000000001 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 225, + 410.4000000000001, + 384, + 426.0000000000001 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 432, + 504, + 459 + ], + "lines": [ + { + "bbox": [ + 104, + 430, + 506, + 448 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 310, + 448 + ], + "score": 1.0, + "content": "In our SPS experiments, the stochastic oracle for", + "type": "text" + }, + { + "bbox": [ + 311, + 434, + 320, + 443 + ], + "score": 0.85, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 320, + 430, + 361, + 448 + ], + "score": 1.0, + "content": "is simply", + "type": "text" + }, + { + "bbox": [ + 362, + 432, + 464, + 448 + ], + "score": 0.94, + "content": "\\begin{array} { r } { \\tilde { B } ( z ) = \\frac { 1 } { | \\mathbf { B } | } \\sum _ { i \\in \\mathbf { B } } B _ { i } ( z ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 465, + 430, + 506, + 448 + ], + "score": 1.0, + "content": "for some", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 446, + 335, + 460 + ], + "spans": [ + { + "bbox": [ + 106, + 446, + 149, + 460 + ], + "score": 1.0, + "content": "minibatch", + "type": "text" + }, + { + "bbox": [ + 149, + 447, + 217, + 459 + ], + "score": 0.95, + "content": "\\mathbf { B } \\subseteq \\{ 1 , \\dots , m \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 217, + 446, + 335, + 460 + ], + "score": 1.0, + "content": ". We used a batchsize of 100.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 22.5, + "bbox_fs": [ + 104, + 430, + 506, + 460 + ] + }, + { + "type": "text", + "bbox": [ + 108, + 470, + 506, + 505 + ], + "lines": [ + { + "bbox": [ + 106, + 470, + 505, + 483 + ], + "spans": [ + { + "bbox": [ + 106, + 470, + 286, + 483 + ], + "score": 1.0, + "content": "Resolvent computations The resolvent of", + "type": "text" + }, + { + "bbox": [ + 287, + 471, + 299, + 482 + ], + "score": 0.89, + "content": "A _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 300, + 470, + 505, + 483 + ], + "score": 1.0, + "content": "is readily constructed from the projection maps of", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 481, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 170, + 495 + ], + "score": 1.0, + "content": "the simple sets", + "type": "text" + }, + { + "bbox": [ + 170, + 483, + 181, + 493 + ], + "score": 0.87, + "content": "\\mathcal { C } _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 182, + 481, + 200, + 495 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 201, + 482, + 212, + 493 + ], + "score": 0.87, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 481, + 298, + 495 + ], + "score": 1.0, + "content": ", while the resolvent", + "type": "text" + }, + { + "bbox": [ + 298, + 483, + 311, + 493 + ], + "score": 0.88, + "content": "A _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 311, + 481, + 468, + 495 + ], + "score": 1.0, + "content": "involves the proximal operator of the", + "type": "text" + }, + { + "bbox": [ + 468, + 482, + 478, + 493 + ], + "score": 0.87, + "content": "\\ell _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 479, + 481, + 506, + 495 + ], + "score": 1.0, + "content": "norm.", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 492, + 159, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 159, + 507 + ], + "score": 1.0, + "content": "Specifically,", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 25, + "bbox_fs": [ + 105, + 470, + 506, + 507 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 163, + 511, + 447, + 558 + ], + "lines": [ + { + "bbox": [ + 163, + 511, + 447, + 558 + ], + "spans": [ + { + "bbox": [ + 163, + 511, + 447, + 558 + ], + "score": 0.94, + "content": "J _ { \\rho A _ { 1 } } ( z ) = \\left[ \\begin{array} { c } { \\mathrm { p r o j } _ { \\mathcal { C } _ { 1 } } ( \\lambda , \\beta ) } \\\\ { \\mathrm { p r o j } _ { \\mathcal { C } _ { 2 } } ( \\gamma ) } \\end{array} \\right] \\quad \\mathrm { a n d } \\quad J _ { \\rho A _ { 2 } } ( z ) = \\left[ \\begin{array} { c } { \\mathbf { 0 } _ { 1 \\times 1 } } \\\\ { \\mathrm { p r o x } _ { \\rho c \\| \\cdot \\| _ { 1 } } ( \\beta ) } \\\\ { \\mathbf { 0 } _ { m \\times 1 } } \\end{array} \\right] .", + "type": "interline_equation", + "image_path": "49a3b38017b4b6b3fb6ec9fa19ad7b9373e672d849628227aee85bf3d001b5c7.jpg" + } + ] + } + ], + "index": 28, + "virtual_lines": [ + { + "bbox": [ + 163, + 511, + 447, + 526.6666666666666 + ], + "spans": [], + "index": 27 + }, + { + "bbox": [ + 163, + 526.6666666666666, + 447, + 542.3333333333333 + ], + "spans": [], + "index": 28 + }, + { + "bbox": [ + 163, + 542.3333333333333, + 447, + 557.9999999999999 + ], + "spans": [], + "index": 29 + } + ] + }, + { + "type": "text", + "bbox": [ + 108, + 563, + 505, + 609 + ], + "lines": [ + { + "bbox": [ + 106, + 564, + 505, + 576 + ], + "spans": [ + { + "bbox": [ + 106, + 564, + 164, + 576 + ], + "score": 1.0, + "content": "The constraint", + "type": "text" + }, + { + "bbox": [ + 164, + 564, + 175, + 575 + ], + "score": 0.89, + "content": "\\mathcal { C } _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 176, + 564, + 309, + 576 + ], + "score": 1.0, + "content": "is a scaled second-order cone and", + "type": "text" + }, + { + "bbox": [ + 309, + 564, + 320, + 575 + ], + "score": 0.89, + "content": "\\mathcal { C } _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 320, + 564, + 344, + 576 + ], + "score": 1.0, + "content": "is the", + "type": "text" + }, + { + "bbox": [ + 344, + 564, + 358, + 575 + ], + "score": 0.9, + "content": "\\ell _ { \\infty }", + "type": "inline_equation" + }, + { + "bbox": [ + 358, + 564, + 505, + 576 + ], + "score": 1.0, + "content": "ball, both of which have closed-form", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 575, + 506, + 587 + ], + "spans": [ + { + "bbox": [ + 105, + 575, + 282, + 587 + ], + "score": 1.0, + "content": "projections. The proximal operator of the", + "type": "text" + }, + { + "bbox": [ + 282, + 575, + 293, + 586 + ], + "score": 0.88, + "content": "\\ell _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 293, + 575, + 506, + 587 + ], + "score": 1.0, + "content": "norm is the well-known soft-thresholding operator", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 585, + 505, + 599 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 505, + 599 + ], + "score": 1.0, + "content": "(Parikh & Boyd, 2013, Section 6.5.2). Therefore all resolvents in the formulation may be computed", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 597, + 201, + 610 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 201, + 610 + ], + "score": 1.0, + "content": "quickly and accurately.", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 31.5, + "bbox_fs": [ + 105, + 564, + 506, + 610 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 619, + 506, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 619, + 506, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 387, + 633 + ], + "score": 1.0, + "content": "SPS stepsize choices For the stepsize in SPS, we ordinarily require", + "type": "text" + }, + { + "bbox": [ + 387, + 621, + 447, + 632 + ], + "score": 0.93, + "content": "\\rho _ { k } \\le \\overline { { \\rho } } < 1 / L", + "type": "inline_equation" + }, + { + "bbox": [ + 447, + 619, + 506, + 633 + ], + "score": 1.0, + "content": "for the global", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 631, + 506, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 182, + 644 + ], + "score": 1.0, + "content": "Lipschitz constant", + "type": "text" + }, + { + "bbox": [ + 182, + 632, + 191, + 641 + ], + "score": 0.78, + "content": "L", + "type": "inline_equation" + }, + { + "bbox": [ + 191, + 631, + 202, + 644 + ], + "score": 1.0, + "content": "of", + "type": "text" + }, + { + "bbox": [ + 203, + 632, + 212, + 641 + ], + "score": 0.79, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 631, + 506, + 644 + ], + "score": 1.0, + "content": ". However, since the global Lipschitz constant may be pessimistic, better", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 642, + 506, + 655 + ], + "spans": [ + { + "bbox": [ + 105, + 642, + 506, + 655 + ], + "score": 1.0, + "content": "performance can often be achieved by experimenting with larger stepsizes. If divergence is observed,", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 653, + 506, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 506, + 667 + ], + "score": 1.0, + "content": "then the stepsize can be decreased. This type of strategy is common for SGD and similar stochastic", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 104, + 661, + 507, + 678 + ], + "spans": [ + { + "bbox": [ + 104, + 661, + 263, + 678 + ], + "score": 1.0, + "content": "methods. Thus, for SPS-decay we set", + "type": "text" + }, + { + "bbox": [ + 264, + 663, + 329, + 675 + ], + "score": 0.92, + "content": "\\alpha _ { k } ^ { - \\pm } = C _ { d } k ^ { - 0 . 5 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 329, + 661, + 348, + 678 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 348, + 664, + 412, + 676 + ], + "score": 0.92, + "content": "\\rho _ { k } = C _ { d } k ^ { - 0 . 2 5 }", + "type": "inline_equation" + }, + { + "bbox": [ + 412, + 661, + 507, + 678 + ], + "score": 1.0, + "content": ", and performed a grid", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 675, + 506, + 688 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 204, + 688 + ], + "score": 1.0, + "content": "search to select the best", + "type": "text" + }, + { + "bbox": [ + 204, + 676, + 217, + 686 + ], + "score": 0.89, + "content": "C _ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 675, + 241, + 688 + ], + "score": 1.0, + "content": "from", + "type": "text" + }, + { + "bbox": [ + 241, + 676, + 315, + 687 + ], + "score": 0.9, + "content": "\\{ 0 . 1 , 0 . 5 , 1 , 5 , 1 0 \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 315, + 675, + 363, + 688 + ], + "score": 1.0, + "content": ", arriving at", + "type": "text" + }, + { + "bbox": [ + 363, + 676, + 394, + 686 + ], + "score": 0.9, + "content": "C _ { d } = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 394, + 675, + 506, + 688 + ], + "score": 1.0, + "content": "for epsilon and SUSY, and", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 686, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 147, + 699 + ], + "score": 0.91, + "content": "C _ { d } = 0 . 5", + "type": "inline_equation" + }, + { + "bbox": [ + 147, + 686, + 299, + 701 + ], + "score": 1.0, + "content": "for real-sim. For SPS-fixed we used", + "type": "text" + }, + { + "bbox": [ + 300, + 687, + 348, + 699 + ], + "score": 0.92, + "content": "\\rho = K ^ { - 1 / 4 }", + "type": "inline_equation" + }, + { + "bbox": [ + 349, + 686, + 368, + 701 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 368, + 687, + 411, + 700 + ], + "score": 0.93, + "content": "\\alpha = C _ { f } \\rho ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 412, + 686, + 506, + 701 + ], + "score": 1.0, + "content": ", and performed a grid", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 698, + 504, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 169, + 712 + ], + "score": 1.0, + "content": "search to select", + "type": "text" + }, + { + "bbox": [ + 169, + 699, + 182, + 711 + ], + "score": 0.89, + "content": "C _ { f }", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 698, + 204, + 712 + ], + "score": 1.0, + "content": "over", + "type": "text" + }, + { + "bbox": [ + 204, + 699, + 277, + 711 + ], + "score": 0.91, + "content": "\\{ 0 . 1 , 0 . 5 , 1 , 5 , 1 0 \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 278, + 698, + 324, + 712 + ], + "score": 1.0, + "content": ", arriving at", + "type": "text" + }, + { + "bbox": [ + 324, + 699, + 356, + 711 + ], + "score": 0.9, + "content": "C _ { f } = 1", + "type": "inline_equation" + }, + { + "bbox": [ + 356, + 698, + 472, + 712 + ], + "score": 1.0, + "content": "for epsilon and real-sim, and", + "type": "text" + }, + { + "bbox": [ + 473, + 699, + 504, + 711 + ], + "score": 0.91, + "content": "C _ { f } = 5", + "type": "inline_equation" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 709, + 507, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 507, + 722 + ], + "score": 1.0, + "content": "for SUSY. The total number of iterations for SPS-fixed was chosen as follows: For the epsilon dataset,", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 720, + 456, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 141, + 732 + ], + "score": 1.0, + "content": "we used", + "type": "text" + }, + { + "bbox": [ + 141, + 721, + 185, + 731 + ], + "score": 0.9, + "content": "K = 5 0 0 0", + "type": "inline_equation" + }, + { + "bbox": [ + 185, + 720, + 265, + 732 + ], + "score": 1.0, + "content": ", for SUSY we used", + "type": "text" + }, + { + "bbox": [ + 266, + 721, + 304, + 731 + ], + "score": 0.9, + "content": "K = 2 0 0", + "type": "inline_equation" + }, + { + "bbox": [ + 304, + 720, + 408, + 732 + ], + "score": 1.0, + "content": ", and for real-sim we used", + "type": "text" + }, + { + "bbox": [ + 409, + 721, + 452, + 731 + ], + "score": 0.91, + "content": "K = 1 0 0 0", + "type": "inline_equation" + }, + { + "bbox": [ + 452, + 720, + 456, + 732 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 38.5, + "bbox_fs": [ + 104, + 619, + 507, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 90, + 495, + 180 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 108, + 90, + 495, + 180 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 108, + 90, + 495, + 180 + ], + "spans": [ + { + "bbox": [ + 108, + 90, + 495, + 180 + ], + "score": 0.964, + "type": "image", + "image_path": "c94c000fa07b86ebc660cc1004df85b2b9b8c4f0b3338fc20eb74e1d9a379c4e.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 108, + 90, + 495, + 120.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 108, + 120.0, + 495, + 150.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 108, + 150.0, + 495, + 180.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 186, + 503, + 209 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 185, + 505, + 200 + ], + "spans": [ + { + "bbox": [ + 106, + 185, + 505, + 200 + ], + "score": 1.0, + "content": "Figure 2: Approximation residual versus epoch for three LIBSVM benchmark datasets. Left: epsilon,", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 197, + 231, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 197, + 231, + 210 + ], + "score": 1.0, + "content": "middle: SUSY, right: real-sim.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3.5 + } + ], + "index": 2.25 + }, + { + "type": "text", + "bbox": [ + 106, + 235, + 506, + 327 + ], + "lines": [ + { + "bbox": [ + 105, + 236, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 506, + 248 + ], + "score": 1.0, + "content": "Parameter choices for the other algorithms All methods are initialized at the same random point.", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 246, + 506, + 260 + ], + "spans": [ + { + "bbox": [ + 105, + 246, + 470, + 260 + ], + "score": 1.0, + "content": "For Tseng’s method, we used the backtracking linesearch variant with an initial stepsize of 1,", + "type": "text" + }, + { + "bbox": [ + 470, + 248, + 503, + 258 + ], + "score": 0.88, + "content": "\\theta = 0 . 8", + "type": "inline_equation" + }, + { + "bbox": [ + 503, + 246, + 506, + 260 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 258, + 505, + 271 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 505, + 271 + ], + "score": 1.0, + "content": "and a stepsize reduction factor of 0.7. For FRB, we used the backtracking linesearch variant with the", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 269, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 459, + 282 + ], + "score": 1.0, + "content": "same settings as for Tseng’s method. For deterministic PS, we used a fixed stepsize of", + "type": "text" + }, + { + "bbox": [ + 459, + 269, + 484, + 280 + ], + "score": 0.84, + "content": "0 . 9 / L", + "type": "inline_equation" + }, + { + "bbox": [ + 484, + 269, + 505, + 282 + ], + "score": 1.0, + "content": ". For", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 277, + 504, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 372, + 295 + ], + "score": 1.0, + "content": "the stochastic Tseng’s method of BΓΆhm et al. (2020), the stepsize", + "type": "text" + }, + { + "bbox": [ + 372, + 282, + 385, + 291 + ], + "score": 0.86, + "content": "\\alpha _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 385, + 277, + 441, + 295 + ], + "score": 1.0, + "content": "must satisfy:", + "type": "text" + }, + { + "bbox": [ + 441, + 280, + 504, + 293 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { k = 1 } ^ { \\infty } { \\dot { \\alpha } } _ { k } = \\infty", + "type": "inline_equation" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 286, + 509, + 311 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 155, + 307 + ], + "score": 1.0, + "content": "and P∞k=1", + "type": "text" + }, + { + "bbox": [ + 124, + 291, + 189, + 305 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } ^ { 2 } < \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 190, + 286, + 239, + 311 + ], + "score": 1.0, + "content": ". So we set", + "type": "text" + }, + { + "bbox": [ + 239, + 291, + 291, + 304 + ], + "score": 0.93, + "content": "\\alpha _ { k } = C k ^ { - d }", + "type": "inline_equation" + }, + { + "bbox": [ + 291, + 286, + 423, + 311 + ], + "score": 1.0, + "content": "and perform a grid search over", + "type": "text" + }, + { + "bbox": [ + 424, + 293, + 452, + 305 + ], + "score": 0.93, + "content": "\\{ C , d \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 452, + 286, + 509, + 311 + ], + "score": 1.0, + "content": "k=1 in the range", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 303, + 507, + 318 + ], + "spans": [ + { + "bbox": [ + 107, + 305, + 192, + 317 + ], + "score": 0.86, + "content": "[ 1 0 ^ { - 4 } , 1 0 ] \\times [ 0 . 5 1 , 1 ]", + "type": "inline_equation" + }, + { + "bbox": [ + 193, + 303, + 235, + 318 + ], + "score": 1.0, + "content": ", checking", + "type": "text" + }, + { + "bbox": [ + 236, + 305, + 259, + 315 + ], + "score": 0.87, + "content": "5 \\times 5", + "type": "inline_equation" + }, + { + "bbox": [ + 260, + 303, + 507, + 318 + ], + "score": 1.0, + "content": "values to find the best setting for each of the three problems.", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 316, + 245, + 327 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 245, + 327 + ], + "score": 1.0, + "content": "The selected values are in Table 1.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 8.5 + }, + { + "type": "table", + "bbox": [ + 231, + 340, + 376, + 391 + ], + "blocks": [ + { + "type": "table_body", + "bbox": [ + 231, + 340, + 376, + 391 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 231, + 340, + 376, + 391 + ], + "spans": [ + { + "bbox": [ + 231, + 340, + 376, + 391 + ], + "score": 0.973, + "html": "
epsilonSUSYreal-sim
C0.560.560.77
d0.60.60.55
", + "type": "table", + "image_path": "dd0e452b28fcdce907b3cc14b3e92d306bb1210d5ecdd036a064d82a6b7c8165.jpg" + } + ] + } + ], + "index": 13.5, + "virtual_lines": [ + { + "bbox": [ + 231, + 340, + 376, + 365.5 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 231, + 365.5, + 376, + 391.0 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "table_caption", + "bbox": [ + 227, + 407, + 383, + 419 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 226, + 404, + 385, + 420 + ], + "spans": [ + { + "bbox": [ + 226, + 404, + 385, + 420 + ], + "score": 1.0, + "content": "Table 1: Parameter Values for S-Tseng", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + } + ], + "index": 14.25 + }, + { + "type": "text", + "bbox": [ + 107, + 444, + 505, + 489 + ], + "lines": [ + { + "bbox": [ + 105, + 445, + 505, + 457 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 300, + 457 + ], + "score": 1.0, + "content": "The work of BΓΆhm et al. (2020) also introduced", + "type": "text" + }, + { + "bbox": [ + 301, + 445, + 325, + 456 + ], + "score": 0.38, + "content": "\\mathrm { F B F p }", + "type": "inline_equation" + }, + { + "bbox": [ + 325, + 445, + 505, + 457 + ], + "score": 1.0, + "content": ", a stochastic version of Tseng’s method that", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 455, + 505, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 455, + 505, + 468 + ], + "score": 1.0, + "content": "reuses a previously-computed gradient and therefore only needs one additional gradient calculation", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 467, + 505, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 467, + 505, + 479 + ], + "score": 1.0, + "content": "per iteration. In our experiments, the performance of the two methods was about the same, so we", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 478, + 325, + 489 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 325, + 489 + ], + "score": 1.0, + "content": "only report the performance of stoch. Tseng’s method.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 17.5 + }, + { + "type": "text", + "bbox": [ + 106, + 494, + 505, + 517 + ], + "lines": [ + { + "bbox": [ + 105, + 493, + 506, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 375, + 507 + ], + "score": 1.0, + "content": "For variance-reduced FRB, the main parameter is the probability", + "type": "text" + }, + { + "bbox": [ + 376, + 496, + 382, + 506 + ], + "score": 0.79, + "content": "p", + "type": "inline_equation" + }, + { + "bbox": [ + 383, + 493, + 452, + 507 + ], + "score": 1.0, + "content": ". We hand-tuned", + "type": "text" + }, + { + "bbox": [ + 453, + 496, + 459, + 506 + ], + "score": 0.79, + "content": "p", + "type": "inline_equation" + }, + { + "bbox": [ + 459, + 493, + 506, + 507 + ], + "score": 1.0, + "content": ",arriving at", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 505, + 427, + 518 + ], + "spans": [ + { + "bbox": [ + 106, + 506, + 144, + 517 + ], + "score": 0.89, + "content": "p = 0 . 0 1", + "type": "inline_equation" + }, + { + "bbox": [ + 144, + 505, + 427, + 518 + ], + "score": 1.0, + "content": "for all problems. We set the stepsize to its maximum allowed value of", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20.5 + }, + { + "type": "interline_equation", + "bbox": [ + 268, + 527, + 343, + 553 + ], + "lines": [ + { + "bbox": [ + 268, + 527, + 343, + 553 + ], + "spans": [ + { + "bbox": [ + 268, + 527, + 343, + 553 + ], + "score": 0.95, + "content": "\\tau = { \\frac { 1 - \\sqrt { 1 - p } } { 2 L } } .", + "type": "interline_equation", + "image_path": "1d2d594ab5b0a4bd7eba2426ebd3b854f23aa6cc165fa808793d71d980fa5604.jpg" + } + ] + } + ], + "index": 22, + "virtual_lines": [ + { + "bbox": [ + 268, + 527, + 343, + 553 + ], + "spans": [], + "index": 22 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 568, + 506, + 646 + ], + "lines": [ + { + "bbox": [ + 106, + 569, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 106, + 569, + 506, + 581 + ], + "score": 1.0, + "content": "Plots versus Epoch Figure 2 plots the performance of each method versus epoch (i.e. data pass).", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 580, + 506, + 592 + ], + "spans": [ + { + "bbox": [ + 106, + 580, + 506, + 592 + ], + "score": 1.0, + "content": "This shows an even more dramatic benefit for the stochastic methods than the plots versus time,", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 590, + 505, + 603 + ], + "spans": [ + { + "bbox": [ + 106, + 590, + 505, + 603 + ], + "score": 1.0, + "content": "since at each iteration the stochastic methods only need to process small amounts of data, whereas", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 602, + 505, + 613 + ], + "spans": [ + { + "bbox": [ + 106, + 602, + 505, + 613 + ], + "score": 1.0, + "content": "deterministic methods must process all of it. We believe these benefits do not fully manifest", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 613, + 505, + 624 + ], + "spans": [ + { + "bbox": [ + 106, + 613, + 505, + 624 + ], + "score": 1.0, + "content": "themselves in the plots versus time due to overheads in each iteration of the stochastic methods,", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 624, + 505, + 635 + ], + "spans": [ + { + "bbox": [ + 106, + 624, + 505, + 635 + ], + "score": 1.0, + "content": "multithreading providing a boost for the deterministic methods, memory access patterns, and other", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 635, + 206, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 635, + 206, + 646 + ], + "score": 1.0, + "content": "practical considerations.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 106, + 664, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 663, + 505, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 663, + 505, + 677 + ], + "score": 1.0, + "content": "Fraction of Nonzero Entries versus Running time Figure 3 plots the fraction of nonzero entries", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 103, + 673, + 507, + 691 + ], + "spans": [ + { + "bbox": [ + 103, + 673, + 507, + 691 + ], + "score": 1.0, + "content": "in the iterates of each method versus running time. For each method, we used output of proxckΒ·k1.", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 687, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 504, + 700 + ], + "score": 1.0, + "content": "We observe that our methods produce sparse intermediate iterates for two of the three problems. This", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "score": 1.0, + "content": "is one of the benefits of proximal splitting algorithms in general, including our method. For the other", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 381, + 722 + ], + "score": 1.0, + "content": "problem, SUSY, no method produces sparse iterates, suggesting that", + "type": "text" + }, + { + "bbox": [ + 381, + 712, + 387, + 720 + ], + "score": 0.7, + "content": "c", + "type": "inline_equation" + }, + { + "bbox": [ + 387, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "should be increased if sparse", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 721, + 192, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 721, + 192, + 732 + ], + "score": 1.0, + "content": "solutions are desired.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 32.5 + } + ], + "page_idx": 33, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 106, + 26, + 309, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 309, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 749, + 313, + 764 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 313, + 764 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 15, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 90, + 495, + 180 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 108, + 90, + 495, + 180 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 108, + 90, + 495, + 180 + ], + "spans": [ + { + "bbox": [ + 108, + 90, + 495, + 180 + ], + "score": 0.964, + "type": "image", + "image_path": "c94c000fa07b86ebc660cc1004df85b2b9b8c4f0b3338fc20eb74e1d9a379c4e.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 108, + 90, + 495, + 120.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 108, + 120.0, + 495, + 150.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 108, + 150.0, + 495, + 180.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 186, + 503, + 209 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 185, + 505, + 200 + ], + "spans": [ + { + "bbox": [ + 106, + 185, + 505, + 200 + ], + "score": 1.0, + "content": "Figure 2: Approximation residual versus epoch for three LIBSVM benchmark datasets. Left: epsilon,", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 197, + 231, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 197, + 231, + 210 + ], + "score": 1.0, + "content": "middle: SUSY, right: real-sim.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3.5 + } + ], + "index": 2.25 + }, + { + "type": "text", + "bbox": [ + 106, + 235, + 506, + 327 + ], + "lines": [ + { + "bbox": [ + 105, + 236, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 506, + 248 + ], + "score": 1.0, + "content": "Parameter choices for the other algorithms All methods are initialized at the same random point.", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 246, + 506, + 260 + ], + "spans": [ + { + "bbox": [ + 105, + 246, + 470, + 260 + ], + "score": 1.0, + "content": "For Tseng’s method, we used the backtracking linesearch variant with an initial stepsize of 1,", + "type": "text" + }, + { + "bbox": [ + 470, + 248, + 503, + 258 + ], + "score": 0.88, + "content": "\\theta = 0 . 8", + "type": "inline_equation" + }, + { + "bbox": [ + 503, + 246, + 506, + 260 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 258, + 505, + 271 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 505, + 271 + ], + "score": 1.0, + "content": "and a stepsize reduction factor of 0.7. For FRB, we used the backtracking linesearch variant with the", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 269, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 459, + 282 + ], + "score": 1.0, + "content": "same settings as for Tseng’s method. For deterministic PS, we used a fixed stepsize of", + "type": "text" + }, + { + "bbox": [ + 459, + 269, + 484, + 280 + ], + "score": 0.84, + "content": "0 . 9 / L", + "type": "inline_equation" + }, + { + "bbox": [ + 484, + 269, + 505, + 282 + ], + "score": 1.0, + "content": ". For", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 277, + 504, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 372, + 295 + ], + "score": 1.0, + "content": "the stochastic Tseng’s method of BΓΆhm et al. (2020), the stepsize", + "type": "text" + }, + { + "bbox": [ + 372, + 282, + 385, + 291 + ], + "score": 0.86, + "content": "\\alpha _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 385, + 277, + 441, + 295 + ], + "score": 1.0, + "content": "must satisfy:", + "type": "text" + }, + { + "bbox": [ + 441, + 280, + 504, + 293 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { k = 1 } ^ { \\infty } { \\dot { \\alpha } } _ { k } = \\infty", + "type": "inline_equation" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 286, + 509, + 311 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 155, + 307 + ], + "score": 1.0, + "content": "and P∞k=1", + "type": "text" + }, + { + "bbox": [ + 124, + 291, + 189, + 305 + ], + "score": 0.92, + "content": "\\textstyle \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } ^ { 2 } < \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 190, + 286, + 239, + 311 + ], + "score": 1.0, + "content": ". So we set", + "type": "text" + }, + { + "bbox": [ + 239, + 291, + 291, + 304 + ], + "score": 0.93, + "content": "\\alpha _ { k } = C k ^ { - d }", + "type": "inline_equation" + }, + { + "bbox": [ + 291, + 286, + 423, + 311 + ], + "score": 1.0, + "content": "and perform a grid search over", + "type": "text" + }, + { + "bbox": [ + 424, + 293, + 452, + 305 + ], + "score": 0.93, + "content": "\\{ C , d \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 452, + 286, + 509, + 311 + ], + "score": 1.0, + "content": "k=1 in the range", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 303, + 507, + 318 + ], + "spans": [ + { + "bbox": [ + 107, + 305, + 192, + 317 + ], + "score": 0.86, + "content": "[ 1 0 ^ { - 4 } , 1 0 ] \\times [ 0 . 5 1 , 1 ]", + "type": "inline_equation" + }, + { + "bbox": [ + 193, + 303, + 235, + 318 + ], + "score": 1.0, + "content": ", checking", + "type": "text" + }, + { + "bbox": [ + 236, + 305, + 259, + 315 + ], + "score": 0.87, + "content": "5 \\times 5", + "type": "inline_equation" + }, + { + "bbox": [ + 260, + 303, + 507, + 318 + ], + "score": 1.0, + "content": "values to find the best setting for each of the three problems.", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 316, + 245, + 327 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 245, + 327 + ], + "score": 1.0, + "content": "The selected values are in Table 1.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 8.5, + "bbox_fs": [ + 104, + 236, + 509, + 327 + ] + }, + { + "type": "table", + "bbox": [ + 231, + 340, + 376, + 391 + ], + "blocks": [ + { + "type": "table_body", + "bbox": [ + 231, + 340, + 376, + 391 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 231, + 340, + 376, + 391 + ], + "spans": [ + { + "bbox": [ + 231, + 340, + 376, + 391 + ], + "score": 0.973, + "html": "
epsilonSUSYreal-sim
C0.560.560.77
d0.60.60.55
", + "type": "table", + "image_path": "dd0e452b28fcdce907b3cc14b3e92d306bb1210d5ecdd036a064d82a6b7c8165.jpg" + } + ] + } + ], + "index": 13.5, + "virtual_lines": [ + { + "bbox": [ + 231, + 340, + 376, + 365.5 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 231, + 365.5, + 376, + 391.0 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "table_caption", + "bbox": [ + 227, + 407, + 383, + 419 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 226, + 404, + 385, + 420 + ], + "spans": [ + { + "bbox": [ + 226, + 404, + 385, + 420 + ], + "score": 1.0, + "content": "Table 1: Parameter Values for S-Tseng", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + } + ], + "index": 14.25 + }, + { + "type": "text", + "bbox": [ + 107, + 444, + 505, + 489 + ], + "lines": [ + { + "bbox": [ + 105, + 445, + 505, + 457 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 300, + 457 + ], + "score": 1.0, + "content": "The work of BΓΆhm et al. (2020) also introduced", + "type": "text" + }, + { + "bbox": [ + 301, + 445, + 325, + 456 + ], + "score": 0.38, + "content": "\\mathrm { F B F p }", + "type": "inline_equation" + }, + { + "bbox": [ + 325, + 445, + 505, + 457 + ], + "score": 1.0, + "content": ", a stochastic version of Tseng’s method that", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 455, + 505, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 455, + 505, + 468 + ], + "score": 1.0, + "content": "reuses a previously-computed gradient and therefore only needs one additional gradient calculation", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 467, + 505, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 467, + 505, + 479 + ], + "score": 1.0, + "content": "per iteration. In our experiments, the performance of the two methods was about the same, so we", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 478, + 325, + 489 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 325, + 489 + ], + "score": 1.0, + "content": "only report the performance of stoch. Tseng’s method.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 17.5, + "bbox_fs": [ + 105, + 445, + 505, + 489 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 494, + 505, + 517 + ], + "lines": [ + { + "bbox": [ + 105, + 493, + 506, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 375, + 507 + ], + "score": 1.0, + "content": "For variance-reduced FRB, the main parameter is the probability", + "type": "text" + }, + { + "bbox": [ + 376, + 496, + 382, + 506 + ], + "score": 0.79, + "content": "p", + "type": "inline_equation" + }, + { + "bbox": [ + 383, + 493, + 452, + 507 + ], + "score": 1.0, + "content": ". We hand-tuned", + "type": "text" + }, + { + "bbox": [ + 453, + 496, + 459, + 506 + ], + "score": 0.79, + "content": "p", + "type": "inline_equation" + }, + { + "bbox": [ + 459, + 493, + 506, + 507 + ], + "score": 1.0, + "content": ",arriving at", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 505, + 427, + 518 + ], + "spans": [ + { + "bbox": [ + 106, + 506, + 144, + 517 + ], + "score": 0.89, + "content": "p = 0 . 0 1", + "type": "inline_equation" + }, + { + "bbox": [ + 144, + 505, + 427, + 518 + ], + "score": 1.0, + "content": "for all problems. We set the stepsize to its maximum allowed value of", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20.5, + "bbox_fs": [ + 105, + 493, + 506, + 518 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 268, + 527, + 343, + 553 + ], + "lines": [ + { + "bbox": [ + 268, + 527, + 343, + 553 + ], + "spans": [ + { + "bbox": [ + 268, + 527, + 343, + 553 + ], + "score": 0.95, + "content": "\\tau = { \\frac { 1 - \\sqrt { 1 - p } } { 2 L } } .", + "type": "interline_equation", + "image_path": "1d2d594ab5b0a4bd7eba2426ebd3b854f23aa6cc165fa808793d71d980fa5604.jpg" + } + ] + } + ], + "index": 22, + "virtual_lines": [ + { + "bbox": [ + 268, + 527, + 343, + 553 + ], + "spans": [], + "index": 22 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 568, + 506, + 646 + ], + "lines": [ + { + "bbox": [ + 106, + 569, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 106, + 569, + 506, + 581 + ], + "score": 1.0, + "content": "Plots versus Epoch Figure 2 plots the performance of each method versus epoch (i.e. data pass).", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 580, + 506, + 592 + ], + "spans": [ + { + "bbox": [ + 106, + 580, + 506, + 592 + ], + "score": 1.0, + "content": "This shows an even more dramatic benefit for the stochastic methods than the plots versus time,", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 590, + 505, + 603 + ], + "spans": [ + { + "bbox": [ + 106, + 590, + 505, + 603 + ], + "score": 1.0, + "content": "since at each iteration the stochastic methods only need to process small amounts of data, whereas", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 602, + 505, + 613 + ], + "spans": [ + { + "bbox": [ + 106, + 602, + 505, + 613 + ], + "score": 1.0, + "content": "deterministic methods must process all of it. We believe these benefits do not fully manifest", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 613, + 505, + 624 + ], + "spans": [ + { + "bbox": [ + 106, + 613, + 505, + 624 + ], + "score": 1.0, + "content": "themselves in the plots versus time due to overheads in each iteration of the stochastic methods,", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 624, + 505, + 635 + ], + "spans": [ + { + "bbox": [ + 106, + 624, + 505, + 635 + ], + "score": 1.0, + "content": "multithreading providing a boost for the deterministic methods, memory access patterns, and other", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 635, + 206, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 635, + 206, + 646 + ], + "score": 1.0, + "content": "practical considerations.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 26, + "bbox_fs": [ + 106, + 569, + 506, + 646 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 664, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 663, + 505, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 663, + 505, + 677 + ], + "score": 1.0, + "content": "Fraction of Nonzero Entries versus Running time Figure 3 plots the fraction of nonzero entries", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 103, + 673, + 507, + 691 + ], + "spans": [ + { + "bbox": [ + 103, + 673, + 507, + 691 + ], + "score": 1.0, + "content": "in the iterates of each method versus running time. For each method, we used output of proxckΒ·k1.", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 687, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 504, + 700 + ], + "score": 1.0, + "content": "We observe that our methods produce sparse intermediate iterates for two of the three problems. This", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "score": 1.0, + "content": "is one of the benefits of proximal splitting algorithms in general, including our method. For the other", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 381, + 722 + ], + "score": 1.0, + "content": "problem, SUSY, no method produces sparse iterates, suggesting that", + "type": "text" + }, + { + "bbox": [ + 381, + 712, + 387, + 720 + ], + "score": 0.7, + "content": "c", + "type": "inline_equation" + }, + { + "bbox": [ + 387, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "should be increased if sparse", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 721, + 192, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 721, + 192, + 732 + ], + "score": 1.0, + "content": "solutions are desired.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 32.5, + "bbox_fs": [ + 103, + 663, + 507, + 732 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 90, + 493, + 181 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 110, + 90, + 493, + 181 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 110, + 90, + 493, + 181 + ], + "spans": [ + { + "bbox": [ + 110, + 90, + 493, + 181 + ], + "score": 0.965, + "type": "image", + "image_path": "7829a04c81fd4e0356905a55be1d4c62aec1a4bb1bef565b9369bc3a3ec4170b.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 110, + 90, + 493, + 120.33333333333333 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 110, + 120.33333333333333, + 493, + 150.66666666666666 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 110, + 150.66666666666666, + 493, + 181.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 186, + 505, + 209 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 185, + 506, + 199 + ], + "spans": [ + { + "bbox": [ + 105, + 185, + 506, + 199 + ], + "score": 1.0, + "content": "Figure 3: Fraction of nonzero entries versus running time for the three datasets. Left: epsilon, middle:", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 197, + 198, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 197, + 198, + 210 + ], + "score": 1.0, + "content": "SUSY, right: real-sim.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3.5 + } + ], + "index": 2.25 + }, + { + "type": "title", + "bbox": [ + 107, + 229, + 413, + 242 + ], + "lines": [ + { + "bbox": [ + 105, + 228, + 415, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 415, + 244 + ], + "score": 1.0, + "content": "J LOCAL CONVERGENCE ON NON-MONOTONE PROBLEMS", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 107, + 254, + 506, + 309 + ], + "lines": [ + { + "bbox": [ + 105, + 253, + 506, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 253, + 506, + 267 + ], + "score": 1.0, + "content": "The work by Hsieh et al. (2020) provides a local convergence analysis for DSEG applied to locally", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 265, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 106, + 265, + 476, + 277 + ], + "score": 1.0, + "content": "monotone problems. Recall that DSEG is equivalent to the special case of SPS for which", + "type": "text" + }, + { + "bbox": [ + 476, + 266, + 503, + 275 + ], + "score": 0.89, + "content": "n = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 503, + 265, + 506, + 277 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 275, + 507, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 507, + 289 + ], + "score": 1.0, + "content": "While extending this result to the more general setting of SPS is beyond the scope of this manuscript,", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 287, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 106, + 287, + 506, + 300 + ], + "score": 1.0, + "content": "we next provide a preliminary sketch of how the analysis of Hsieh et al. (2020) might be generalized", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 298, + 325, + 311 + ], + "spans": [ + { + "bbox": [ + 105, + 298, + 325, + 311 + ], + "score": 1.0, + "content": "to our setting. We leave a formal proof to future work.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 8 + }, + { + "type": "text", + "bbox": [ + 107, + 321, + 505, + 377 + ], + "lines": [ + { + "bbox": [ + 106, + 321, + 505, + 333 + ], + "spans": [ + { + "bbox": [ + 106, + 321, + 505, + 333 + ], + "score": 1.0, + "content": "Sketch of assumptions and main result The first assumption needed is the existence of an isolated", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 331, + 507, + 347 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 141, + 347 + ], + "score": 1.0, + "content": "solution", + "type": "text" + }, + { + "bbox": [ + 142, + 333, + 263, + 345 + ], + "score": 0.92, + "content": "p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } , \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 331, + 425, + 347 + ], + "score": 1.0, + "content": ". We then require that there exists a ball", + "type": "text" + }, + { + "bbox": [ + 425, + 332, + 454, + 344 + ], + "score": 0.92, + "content": "\\mathbb { B } _ { r } \\big ( z ^ { * } \\big )", + "type": "inline_equation" + }, + { + "bbox": [ + 455, + 331, + 507, + 347 + ], + "score": 1.0, + "content": ", centered at", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 342, + 505, + 356 + ], + "spans": [ + { + "bbox": [ + 106, + 344, + 117, + 354 + ], + "score": 0.84, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 118, + 342, + 246, + 356 + ], + "score": 1.0, + "content": ", throughout which the operator", + "type": "text" + }, + { + "bbox": [ + 246, + 345, + 255, + 353 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 256, + 342, + 505, + 356 + ], + "score": 1.0, + "content": "is β€œwell-behaved”, meaning that it satisfies monotonicity and", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 354, + 505, + 366 + ], + "spans": [ + { + "bbox": [ + 106, + 354, + 293, + 366 + ], + "score": 1.0, + "content": "Lipschitz continuity. In addition, we need each", + "type": "text" + }, + { + "bbox": [ + 294, + 354, + 305, + 365 + ], + "score": 0.88, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 306, + 354, + 323, + 366 + ], + "score": 1.0, + "content": ", for", + "type": "text" + }, + { + "bbox": [ + 323, + 355, + 357, + 365 + ], + "score": 0.9, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 357, + 354, + 505, + 366 + ], + "score": 1.0, + "content": ", to be maximal monotone within this", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 365, + 426, + 378 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 426, + 378 + ], + "score": 1.0, + "content": "ball. Outside of the ball, the operators do not need to be monotone or Lipschitz.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 13 + }, + { + "type": "text", + "bbox": [ + 107, + 382, + 505, + 428 + ], + "lines": [ + { + "bbox": [ + 105, + 381, + 505, + 396 + ], + "spans": [ + { + "bbox": [ + 105, + 381, + 279, + 396 + ], + "score": 1.0, + "content": "Following (Hsieh et al., 2020, Assumption", + "type": "text" + }, + { + "bbox": [ + 280, + 383, + 289, + 393 + ], + "score": 0.71, + "content": "2 ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 289, + 381, + 505, + 396 + ], + "score": 1.0, + "content": "), the noise variance assumptions are slightly stronger", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 392, + 504, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 329, + 406 + ], + "score": 1.0, + "content": "than in the monotone case. In particular, we require that", + "type": "text" + }, + { + "bbox": [ + 329, + 393, + 407, + 405 + ], + "score": 0.92, + "content": "\\mathbb { E } [ \\| \\epsilon ^ { k } \\| ^ { q } | \\mathcal { F } _ { k } ] \\le \\dot { N } ^ { q }", + "type": "inline_equation" + }, + { + "bbox": [ + 407, + 392, + 425, + 406 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 425, + 393, + 504, + 405 + ], + "score": 0.92, + "content": "\\mathbb { E } [ \\| e ^ { k } \\| ^ { q } | \\dot { \\mathcal { F } } _ { k } ] \\le \\mathsf { \\bar { N } } ^ { q }", + "type": "inline_equation" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 404, + 505, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 404, + 144, + 417 + ], + "score": 1.0, + "content": "for some", + "type": "text" + }, + { + "bbox": [ + 144, + 405, + 168, + 416 + ], + "score": 0.89, + "content": "q > 2", + "type": "inline_equation" + }, + { + "bbox": [ + 169, + 404, + 505, + 417 + ], + "score": 1.0, + "content": ". As before, the noise must be zero-mean. Finally, the stepsize requirements are also", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 414, + 417, + 429 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 351, + 429 + ], + "score": 1.0, + "content": "slightly stronger than (12), having the added assumption that", + "type": "text" + }, + { + "bbox": [ + 351, + 415, + 412, + 428 + ], + "score": 0.93, + "content": "\\textstyle \\sum _ { k = 1 } ^ { \\infty } \\rho _ { k } ^ { q } < \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 413, + 414, + 417, + 429 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 17.5 + }, + { + "type": "text", + "bbox": [ + 106, + 433, + 503, + 456 + ], + "lines": [ + { + "bbox": [ + 105, + 432, + 505, + 446 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 404, + 446 + ], + "score": 1.0, + "content": "With these assumptions, the goal is to show that, so long as the initial point", + "type": "text" + }, + { + "bbox": [ + 405, + 433, + 415, + 444 + ], + "score": 0.88, + "content": "p ^ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 415, + 432, + 505, + 446 + ], + "score": 1.0, + "content": "is sufficiently close to", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 443, + 307, + 458 + ], + "spans": [ + { + "bbox": [ + 106, + 445, + 117, + 456 + ], + "score": 0.86, + "content": "p ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 117, + 443, + 227, + 458 + ], + "score": 1.0, + "content": ", then with high probability", + "type": "text" + }, + { + "bbox": [ + 227, + 444, + 239, + 456 + ], + "score": 0.89, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 239, + 443, + 292, + 458 + ], + "score": 1.0, + "content": "converges to", + "type": "text" + }, + { + "bbox": [ + 292, + 445, + 302, + 456 + ], + "score": 0.88, + "content": "p ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 303, + 443, + 307, + 458 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20.5 + }, + { + "type": "text", + "bbox": [ + 104, + 467, + 504, + 490 + ], + "lines": [ + { + "bbox": [ + 105, + 466, + 506, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 506, + 481 + ], + "score": 1.0, + "content": "Proof strategy The initial strategy is to develop the following recursion, satisfied by SPS, that does", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 478, + 343, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 343, + 492 + ], + "score": 1.0, + "content": "not (yet) utilize local monotonicity or Lipschitz continuity:", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 22.5 + }, + { + "type": "interline_equation", + "bbox": [ + 137, + 494, + 474, + 529 + ], + "lines": [ + { + "bbox": [ + 137, + 494, + 474, + 529 + ], + "spans": [ + { + "bbox": [ + 137, + 494, + 474, + 529 + ], + "score": 0.92, + "content": "\\begin{array} { r l } & { \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } \\leq ( 1 + c _ { 1 } \\alpha _ { k } ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - c _ { 2 } \\alpha _ { k } \\rho _ { k } ( T _ { k } ^ { \\prime } + l _ { k } + r _ { k } ) - c _ { 3 } \\alpha _ { k } ( r _ { k } ^ { \\prime } + q _ { k } ) } \\\\ & { \\qquad + c _ { 1 } \\alpha _ { k } ^ { 2 } \\big ( \\| e ^ { k } \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + c _ { 4 } \\big ) + c _ { 5 } \\alpha _ { k } q _ { k } ^ { \\prime } } \\end{array}", + "type": "interline_equation", + "image_path": "7bf4da9c6f11162f41c376cf0be6ea4f7ad92bfa09374aa8db24c9a79394cf79.jpg" + } + ] + } + ], + "index": 25, + "virtual_lines": [ + { + "bbox": [ + 137, + 494, + 474, + 505.6666666666667 + ], + "spans": [], + "index": 24 + }, + { + "bbox": [ + 137, + 505.6666666666667, + 474, + 517.3333333333334 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 137, + 517.3333333333334, + 474, + 529.0 + ], + "spans": [], + "index": 26 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 531, + 366, + 543 + ], + "lines": [ + { + "bbox": [ + 104, + 529, + 367, + 546 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 208, + 546 + ], + "score": 1.0, + "content": "for appropriate constants", + "type": "text" + }, + { + "bbox": [ + 208, + 532, + 260, + 542 + ], + "score": 0.91, + "content": "c _ { 1 } \\ldots c _ { 5 } \\geq 0", + "type": "inline_equation" + }, + { + "bbox": [ + 261, + 529, + 367, + 546 + ], + "score": 1.0, + "content": ". In this inequality, we use", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "interline_equation", + "bbox": [ + 178, + 547, + 432, + 689 + ], + "lines": [ + { + "bbox": [ + 178, + 547, + 432, + 689 + ], + "spans": [ + { + "bbox": [ + 178, + 547, + 432, + 689 + ], + "score": 0.94, + "content": "\\begin{array} { l } { \\displaystyle T _ { k } ^ { \\prime } \\stackrel { \\prime } { = } \\frac { \\tau } { \\overline { { \\rho } } } \\displaystyle \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\overline { { \\rho } } \\tau } \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } , } \\\\ { \\displaystyle l _ { k } \\stackrel { \\prime } { = } \\displaystyle \\sum _ { i = 1 } ^ { n } \\langle z ^ { * } - x _ { i } ^ { k } , w _ { i } ^ { * } - y _ { i } ^ { k } \\rangle + \\big \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { i } ^ { * } - B ( x _ { n + 1 } ^ { k } ) \\big \\rangle , } \\\\ { \\displaystyle r _ { k } \\stackrel { \\prime } { = } \\big \\langle k ^ { \\ell } , B ( \\tilde { x } ^ { k } ) - w _ { n + 1 } ^ { k } \\big \\rangle , } \\\\ { \\displaystyle r _ { k } ^ { \\prime } \\stackrel { \\prime } { = } \\big \\langle z ^ { k } - z ^ { * } , e ^ { k } \\big \\rangle , } \\\\ { \\displaystyle q _ { k } \\triangleq \\big ( \\rho _ { k } ^ { - 1 } - d / 2 \\big ) \\| \\tilde { x } ^ { k } - z ^ { k } \\| ^ { 2 } - \\| \\tilde { x } ^ { k } - z ^ { k } \\| \\| B ( \\tilde { x } ^ { k } ) - B ( z ^ { k } ) \\| } \\\\ { \\displaystyle q _ { k } ^ { \\prime } \\stackrel { \\prime } { = } \\rho _ { k } \\| \\epsilon ^ { k } \\| \\| B x _ { n + 1 } ^ { k } - B \\tilde { x } ^ { k } \\| + \\frac { 1 } { 2 d } \\| B \\tilde { x } _ { n + 1 } ^ { k } - B x _ { n + 1 } ^ { k } \\| ^ { 2 } , } \\end{array}", + "type": "interline_equation", + "image_path": "f3f22522325ad822559e198d45d55b9d852139e6bd903ed78054b5cb1e6a4087.jpg" + } + ] + } + ], + "index": 29, + "virtual_lines": [ + { + "bbox": [ + 178, + 547, + 432, + 594.3333333333334 + ], + "spans": [], + "index": 28 + }, + { + "bbox": [ + 178, + 594.3333333333334, + 432, + 641.6666666666667 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 178, + 641.6666666666667, + 432, + 689.0000000000001 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 690, + 133, + 701 + ], + "lines": [ + { + "bbox": [ + 105, + 689, + 135, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 135, + 703 + ], + "score": 1.0, + "content": "where", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 31 + }, + { + "type": "interline_equation", + "bbox": [ + 178, + 705, + 433, + 730 + ], + "lines": [ + { + "bbox": [ + 178, + 705, + 433, + 730 + ], + "spans": [ + { + "bbox": [ + 178, + 705, + 433, + 730 + ], + "score": 0.88, + "content": "\\tilde { x } ^ { k } \\doteq z ^ { k } - \\rho _ { k } \\bigl ( B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\bigr ) \\qquad d \\doteq \\frac { 1 - \\overline { { \\rho } } L } { 1 + \\overline { { \\rho } } / 2 } ,", + "type": "interline_equation", + "image_path": "ed91628b31c8533c4865e193ba80f7b606977d0725547c8ef5a119d74de7a711.jpg" + } + ] + } + ], + "index": 32, + "virtual_lines": [ + { + "bbox": [ + 178, + 705, + 433, + 730 + ], + "spans": [], + "index": 32 + } + ] + } + ], + "page_idx": 34, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 27, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 106, + 26, + 308, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 308, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 763 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 763 + ], + "score": 1.0, + "content": "35", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 90, + 493, + 181 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 110, + 90, + 493, + 181 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 110, + 90, + 493, + 181 + ], + "spans": [ + { + "bbox": [ + 110, + 90, + 493, + 181 + ], + "score": 0.965, + "type": "image", + "image_path": "7829a04c81fd4e0356905a55be1d4c62aec1a4bb1bef565b9369bc3a3ec4170b.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 110, + 90, + 493, + 120.33333333333333 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 110, + 120.33333333333333, + 493, + 150.66666666666666 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 110, + 150.66666666666666, + 493, + 181.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 186, + 505, + 209 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 185, + 506, + 199 + ], + "spans": [ + { + "bbox": [ + 105, + 185, + 506, + 199 + ], + "score": 1.0, + "content": "Figure 3: Fraction of nonzero entries versus running time for the three datasets. Left: epsilon, middle:", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 197, + 198, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 197, + 198, + 210 + ], + "score": 1.0, + "content": "SUSY, right: real-sim.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3.5 + } + ], + "index": 2.25 + }, + { + "type": "title", + "bbox": [ + 107, + 229, + 413, + 242 + ], + "lines": [ + { + "bbox": [ + 105, + 228, + 415, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 415, + 244 + ], + "score": 1.0, + "content": "J LOCAL CONVERGENCE ON NON-MONOTONE PROBLEMS", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 107, + 254, + 506, + 309 + ], + "lines": [ + { + "bbox": [ + 105, + 253, + 506, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 253, + 506, + 267 + ], + "score": 1.0, + "content": "The work by Hsieh et al. (2020) provides a local convergence analysis for DSEG applied to locally", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 265, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 106, + 265, + 476, + 277 + ], + "score": 1.0, + "content": "monotone problems. Recall that DSEG is equivalent to the special case of SPS for which", + "type": "text" + }, + { + "bbox": [ + 476, + 266, + 503, + 275 + ], + "score": 0.89, + "content": "n = 0", + "type": "inline_equation" + }, + { + "bbox": [ + 503, + 265, + 506, + 277 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 275, + 507, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 507, + 289 + ], + "score": 1.0, + "content": "While extending this result to the more general setting of SPS is beyond the scope of this manuscript,", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 287, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 106, + 287, + 506, + 300 + ], + "score": 1.0, + "content": "we next provide a preliminary sketch of how the analysis of Hsieh et al. (2020) might be generalized", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 298, + 325, + 311 + ], + "spans": [ + { + "bbox": [ + 105, + 298, + 325, + 311 + ], + "score": 1.0, + "content": "to our setting. We leave a formal proof to future work.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 8, + "bbox_fs": [ + 105, + 253, + 507, + 311 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 321, + 505, + 377 + ], + "lines": [ + { + "bbox": [ + 106, + 321, + 505, + 333 + ], + "spans": [ + { + "bbox": [ + 106, + 321, + 505, + 333 + ], + "score": 1.0, + "content": "Sketch of assumptions and main result The first assumption needed is the existence of an isolated", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 331, + 507, + 347 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 141, + 347 + ], + "score": 1.0, + "content": "solution", + "type": "text" + }, + { + "bbox": [ + 142, + 333, + 263, + 345 + ], + "score": 0.92, + "content": "p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } , \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 331, + 425, + 347 + ], + "score": 1.0, + "content": ". We then require that there exists a ball", + "type": "text" + }, + { + "bbox": [ + 425, + 332, + 454, + 344 + ], + "score": 0.92, + "content": "\\mathbb { B } _ { r } \\big ( z ^ { * } \\big )", + "type": "inline_equation" + }, + { + "bbox": [ + 455, + 331, + 507, + 347 + ], + "score": 1.0, + "content": ", centered at", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 342, + 505, + 356 + ], + "spans": [ + { + "bbox": [ + 106, + 344, + 117, + 354 + ], + "score": 0.84, + "content": "z ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 118, + 342, + 246, + 356 + ], + "score": 1.0, + "content": ", throughout which the operator", + "type": "text" + }, + { + "bbox": [ + 246, + 345, + 255, + 353 + ], + "score": 0.81, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 256, + 342, + 505, + 356 + ], + "score": 1.0, + "content": "is β€œwell-behaved”, meaning that it satisfies monotonicity and", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 354, + 505, + 366 + ], + "spans": [ + { + "bbox": [ + 106, + 354, + 293, + 366 + ], + "score": 1.0, + "content": "Lipschitz continuity. In addition, we need each", + "type": "text" + }, + { + "bbox": [ + 294, + 354, + 305, + 365 + ], + "score": 0.88, + "content": "A _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 306, + 354, + 323, + 366 + ], + "score": 1.0, + "content": ", for", + "type": "text" + }, + { + "bbox": [ + 323, + 355, + 357, + 365 + ], + "score": 0.9, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 357, + 354, + 505, + 366 + ], + "score": 1.0, + "content": ", to be maximal monotone within this", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 365, + 426, + 378 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 426, + 378 + ], + "score": 1.0, + "content": "ball. Outside of the ball, the operators do not need to be monotone or Lipschitz.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 13, + "bbox_fs": [ + 105, + 321, + 507, + 378 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 382, + 505, + 428 + ], + "lines": [ + { + "bbox": [ + 105, + 381, + 505, + 396 + ], + "spans": [ + { + "bbox": [ + 105, + 381, + 279, + 396 + ], + "score": 1.0, + "content": "Following (Hsieh et al., 2020, Assumption", + "type": "text" + }, + { + "bbox": [ + 280, + 383, + 289, + 393 + ], + "score": 0.71, + "content": "2 ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 289, + 381, + 505, + 396 + ], + "score": 1.0, + "content": "), the noise variance assumptions are slightly stronger", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 392, + 504, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 329, + 406 + ], + "score": 1.0, + "content": "than in the monotone case. In particular, we require that", + "type": "text" + }, + { + "bbox": [ + 329, + 393, + 407, + 405 + ], + "score": 0.92, + "content": "\\mathbb { E } [ \\| \\epsilon ^ { k } \\| ^ { q } | \\mathcal { F } _ { k } ] \\le \\dot { N } ^ { q }", + "type": "inline_equation" + }, + { + "bbox": [ + 407, + 392, + 425, + 406 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 425, + 393, + 504, + 405 + ], + "score": 0.92, + "content": "\\mathbb { E } [ \\| e ^ { k } \\| ^ { q } | \\dot { \\mathcal { F } } _ { k } ] \\le \\mathsf { \\bar { N } } ^ { q }", + "type": "inline_equation" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 404, + 505, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 404, + 144, + 417 + ], + "score": 1.0, + "content": "for some", + "type": "text" + }, + { + "bbox": [ + 144, + 405, + 168, + 416 + ], + "score": 0.89, + "content": "q > 2", + "type": "inline_equation" + }, + { + "bbox": [ + 169, + 404, + 505, + 417 + ], + "score": 1.0, + "content": ". As before, the noise must be zero-mean. Finally, the stepsize requirements are also", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 414, + 417, + 429 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 351, + 429 + ], + "score": 1.0, + "content": "slightly stronger than (12), having the added assumption that", + "type": "text" + }, + { + "bbox": [ + 351, + 415, + 412, + 428 + ], + "score": 0.93, + "content": "\\textstyle \\sum _ { k = 1 } ^ { \\infty } \\rho _ { k } ^ { q } < \\infty", + "type": "inline_equation" + }, + { + "bbox": [ + 413, + 414, + 417, + 429 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 17.5, + "bbox_fs": [ + 105, + 381, + 505, + 429 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 433, + 503, + 456 + ], + "lines": [ + { + "bbox": [ + 105, + 432, + 505, + 446 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 404, + 446 + ], + "score": 1.0, + "content": "With these assumptions, the goal is to show that, so long as the initial point", + "type": "text" + }, + { + "bbox": [ + 405, + 433, + 415, + 444 + ], + "score": 0.88, + "content": "p ^ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 415, + 432, + 505, + 446 + ], + "score": 1.0, + "content": "is sufficiently close to", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 443, + 307, + 458 + ], + "spans": [ + { + "bbox": [ + 106, + 445, + 117, + 456 + ], + "score": 0.86, + "content": "p ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 117, + 443, + 227, + 458 + ], + "score": 1.0, + "content": ", then with high probability", + "type": "text" + }, + { + "bbox": [ + 227, + 444, + 239, + 456 + ], + "score": 0.89, + "content": "p ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 239, + 443, + 292, + 458 + ], + "score": 1.0, + "content": "converges to", + "type": "text" + }, + { + "bbox": [ + 292, + 445, + 302, + 456 + ], + "score": 0.88, + "content": "p ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 303, + 443, + 307, + 458 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20.5, + "bbox_fs": [ + 105, + 432, + 505, + 458 + ] + }, + { + "type": "text", + "bbox": [ + 104, + 467, + 504, + 490 + ], + "lines": [ + { + "bbox": [ + 105, + 466, + 506, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 506, + 481 + ], + "score": 1.0, + "content": "Proof strategy The initial strategy is to develop the following recursion, satisfied by SPS, that does", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 478, + 343, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 343, + 492 + ], + "score": 1.0, + "content": "not (yet) utilize local monotonicity or Lipschitz continuity:", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 22.5, + "bbox_fs": [ + 105, + 466, + 506, + 492 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 137, + 494, + 474, + 529 + ], + "lines": [ + { + "bbox": [ + 137, + 494, + 474, + 529 + ], + "spans": [ + { + "bbox": [ + 137, + 494, + 474, + 529 + ], + "score": 0.92, + "content": "\\begin{array} { r l } & { \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } \\leq ( 1 + c _ { 1 } \\alpha _ { k } ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - c _ { 2 } \\alpha _ { k } \\rho _ { k } ( T _ { k } ^ { \\prime } + l _ { k } + r _ { k } ) - c _ { 3 } \\alpha _ { k } ( r _ { k } ^ { \\prime } + q _ { k } ) } \\\\ & { \\qquad + c _ { 1 } \\alpha _ { k } ^ { 2 } \\big ( \\| e ^ { k } \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + c _ { 4 } \\big ) + c _ { 5 } \\alpha _ { k } q _ { k } ^ { \\prime } } \\end{array}", + "type": "interline_equation", + "image_path": "7bf4da9c6f11162f41c376cf0be6ea4f7ad92bfa09374aa8db24c9a79394cf79.jpg" + } + ] + } + ], + "index": 25, + "virtual_lines": [ + { + "bbox": [ + 137, + 494, + 474, + 505.6666666666667 + ], + "spans": [], + "index": 24 + }, + { + "bbox": [ + 137, + 505.6666666666667, + 474, + 517.3333333333334 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 137, + 517.3333333333334, + 474, + 529.0 + ], + "spans": [], + "index": 26 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 531, + 366, + 543 + ], + "lines": [ + { + "bbox": [ + 104, + 529, + 367, + 546 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 208, + 546 + ], + "score": 1.0, + "content": "for appropriate constants", + "type": "text" + }, + { + "bbox": [ + 208, + 532, + 260, + 542 + ], + "score": 0.91, + "content": "c _ { 1 } \\ldots c _ { 5 } \\geq 0", + "type": "inline_equation" + }, + { + "bbox": [ + 261, + 529, + 367, + 546 + ], + "score": 1.0, + "content": ". In this inequality, we use", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27, + "bbox_fs": [ + 104, + 529, + 367, + 546 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 178, + 547, + 432, + 689 + ], + "lines": [ + { + "bbox": [ + 178, + 547, + 432, + 689 + ], + "spans": [ + { + "bbox": [ + 178, + 547, + 432, + 689 + ], + "score": 0.94, + "content": "\\begin{array} { l } { \\displaystyle T _ { k } ^ { \\prime } \\stackrel { \\prime } { = } \\frac { \\tau } { \\overline { { \\rho } } } \\displaystyle \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\overline { { \\rho } } \\tau } \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } , } \\\\ { \\displaystyle l _ { k } \\stackrel { \\prime } { = } \\displaystyle \\sum _ { i = 1 } ^ { n } \\langle z ^ { * } - x _ { i } ^ { k } , w _ { i } ^ { * } - y _ { i } ^ { k } \\rangle + \\big \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { i } ^ { * } - B ( x _ { n + 1 } ^ { k } ) \\big \\rangle , } \\\\ { \\displaystyle r _ { k } \\stackrel { \\prime } { = } \\big \\langle k ^ { \\ell } , B ( \\tilde { x } ^ { k } ) - w _ { n + 1 } ^ { k } \\big \\rangle , } \\\\ { \\displaystyle r _ { k } ^ { \\prime } \\stackrel { \\prime } { = } \\big \\langle z ^ { k } - z ^ { * } , e ^ { k } \\big \\rangle , } \\\\ { \\displaystyle q _ { k } \\triangleq \\big ( \\rho _ { k } ^ { - 1 } - d / 2 \\big ) \\| \\tilde { x } ^ { k } - z ^ { k } \\| ^ { 2 } - \\| \\tilde { x } ^ { k } - z ^ { k } \\| \\| B ( \\tilde { x } ^ { k } ) - B ( z ^ { k } ) \\| } \\\\ { \\displaystyle q _ { k } ^ { \\prime } \\stackrel { \\prime } { = } \\rho _ { k } \\| \\epsilon ^ { k } \\| \\| B x _ { n + 1 } ^ { k } - B \\tilde { x } ^ { k } \\| + \\frac { 1 } { 2 d } \\| B \\tilde { x } _ { n + 1 } ^ { k } - B x _ { n + 1 } ^ { k } \\| ^ { 2 } , } \\end{array}", + "type": "interline_equation", + "image_path": "f3f22522325ad822559e198d45d55b9d852139e6bd903ed78054b5cb1e6a4087.jpg" + } + ] + } + ], + "index": 29, + "virtual_lines": [ + { + "bbox": [ + 178, + 547, + 432, + 594.3333333333334 + ], + "spans": [], + "index": 28 + }, + { + "bbox": [ + 178, + 594.3333333333334, + 432, + 641.6666666666667 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 178, + 641.6666666666667, + 432, + 689.0000000000001 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 690, + 133, + 701 + ], + "lines": [ + { + "bbox": [ + 105, + 689, + 135, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 135, + 703 + ], + "score": 1.0, + "content": "where", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 31, + "bbox_fs": [ + 105, + 689, + 135, + 703 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 178, + 705, + 433, + 730 + ], + "lines": [ + { + "bbox": [ + 178, + 705, + 433, + 730 + ], + "spans": [ + { + "bbox": [ + 178, + 705, + 433, + 730 + ], + "score": 0.88, + "content": "\\tilde { x } ^ { k } \\doteq z ^ { k } - \\rho _ { k } \\bigl ( B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\bigr ) \\qquad d \\doteq \\frac { 1 - \\overline { { \\rho } } L } { 1 + \\overline { { \\rho } } / 2 } ,", + "type": "interline_equation", + "image_path": "ed91628b31c8533c4865e193ba80f7b606977d0725547c8ef5a119d74de7a711.jpg" + } + ] + } + ], + "index": 32, + "virtual_lines": [ + { + "bbox": [ + 178, + 705, + 433, + 730 + ], + "spans": [], + "index": 32 + } + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 81, + 505, + 107 + ], + "lines": [ + { + "bbox": [ + 105, + 80, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 126, + 96 + ], + "score": 1.0, + "content": "with", + "type": "text" + }, + { + "bbox": [ + 127, + 83, + 135, + 92 + ], + "score": 0.79, + "content": "L", + "type": "inline_equation" + }, + { + "bbox": [ + 135, + 80, + 283, + 96 + ], + "score": 1.0, + "content": "being the local Lipschitz constant of", + "type": "text" + }, + { + "bbox": [ + 284, + 83, + 293, + 92 + ], + "score": 0.82, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 293, + 80, + 306, + 96 + ], + "score": 1.0, + "content": "on", + "type": "text" + }, + { + "bbox": [ + 306, + 82, + 336, + 95 + ], + "score": 0.92, + "content": "\\mathbb { B } _ { r } \\big ( z ^ { * } \\big )", + "type": "inline_equation" + }, + { + "bbox": [ + 336, + 80, + 386, + 96 + ], + "score": 1.0, + "content": ". The iterate", + "type": "text" + }, + { + "bbox": [ + 386, + 82, + 397, + 92 + ], + "score": 0.89, + "content": "\\tilde { x } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 398, + 80, + 505, + 96 + ], + "score": 1.0, + "content": "is the analog of the iterate", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 93, + 249, + 108 + ], + "spans": [ + { + "bbox": [ + 107, + 93, + 137, + 108 + ], + "score": 0.92, + "content": "\\tilde { X } _ { t + 1 / 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 138, + 93, + 249, + 108 + ], + "score": 1.0, + "content": "used in Hsieh et al. (2020).", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "text", + "bbox": [ + 106, + 111, + 506, + 183 + ], + "lines": [ + { + "bbox": [ + 105, + 110, + 506, + 125 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 506, + 125 + ], + "score": 1.0, + "content": "The recursion (79) is derived by once again starting from (13) and following the arguments leading to", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 121, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 464, + 137 + ], + "score": 1.0, + "content": "(35), but this time not taking conditional expectations. In particular, the upper bounds on", + "type": "text" + }, + { + "bbox": [ + 464, + 123, + 504, + 135 + ], + "score": 0.92, + "content": "\\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 }", + "type": "inline_equation" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 132, + 507, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 124, + 149 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 124, + 135, + 168, + 147 + ], + "score": 0.93, + "content": "\\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 168, + 132, + 253, + 149 + ], + "score": 1.0, + "content": "contribute the terms", + "type": "text" + }, + { + "bbox": [ + 253, + 134, + 362, + 148 + ], + "score": 0.92, + "content": "c _ { 1 } \\alpha _ { k } ^ { 2 } ( \\| \\bar { e } ^ { k } \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + c _ { 4 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 363, + 132, + 381, + 149 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 381, + 134, + 448, + 147 + ], + "score": 0.92, + "content": "c _ { 1 } \\alpha _ { k } ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 132, + 469, + 149 + ], + "score": 1.0, + "content": ". For", + "type": "text" + }, + { + "bbox": [ + 469, + 136, + 502, + 146 + ], + "score": 0.85, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 503, + 132, + 507, + 149 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 147, + 506, + 162 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 122, + 162 + ], + "score": 1.0, + "content": "the", + "type": "text" + }, + { + "bbox": [ + 122, + 149, + 144, + 160 + ], + "score": 0.89, + "content": "{ } ^ { \\mathfrak { e } } \\varphi _ { i , k }", + "type": "inline_equation" + }, + { + "bbox": [ + 144, + 147, + 191, + 162 + ], + "score": 1.0, + "content": "-gap\" term,", + "type": "text" + }, + { + "bbox": [ + 191, + 147, + 272, + 160 + ], + "score": 0.91, + "content": "\\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 273, + 147, + 506, + 162 + ], + "score": 1.0, + "content": ", is dealt with in a similar manner to Section C.5, but this", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 158, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 105, + 158, + 336, + 172 + ], + "score": 1.0, + "content": "time not using monotonicity as in (36). This contributes", + "type": "text" + }, + { + "bbox": [ + 336, + 159, + 348, + 171 + ], + "score": 0.89, + "content": "T _ { k } ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 349, + 158, + 432, + 172 + ], + "score": 1.0, + "content": "and the first term in", + "type": "text" + }, + { + "bbox": [ + 432, + 159, + 441, + 170 + ], + "score": 0.85, + "content": "l _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 442, + 158, + 506, + 172 + ], + "score": 1.0, + "content": ". Finally, as we", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 168, + 452, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 168, + 177, + 184 + ], + "score": 1.0, + "content": "sketch below, the", + "type": "text" + }, + { + "bbox": [ + 178, + 171, + 212, + 182 + ], + "score": 0.89, + "content": "{ ^ { \\circ } } \\varphi _ { n + 1 , k }", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 168, + 302, + 184 + ], + "score": 1.0, + "content": "-gap\" term contributes", + "type": "text" + }, + { + "bbox": [ + 303, + 171, + 356, + 183 + ], + "score": 0.28, + "content": "r _ { k } , r _ { k } ^ { \\prime } , q _ { k } , q _ { k } ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 356, + 168, + 438, + 184 + ], + "score": 1.0, + "content": ", and the last term in", + "type": "text" + }, + { + "bbox": [ + 439, + 171, + 447, + 181 + ], + "score": 0.86, + "content": "l _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 168, + 452, + 184 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 4.5 + }, + { + "type": "text", + "bbox": [ + 107, + 187, + 505, + 234 + ], + "lines": [ + { + "bbox": [ + 105, + 186, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 141, + 203 + ], + "score": 1.0, + "content": "For the", + "type": "text" + }, + { + "bbox": [ + 142, + 189, + 196, + 201 + ], + "score": 0.57, + "content": "\\cdot \\circ _ { n + 1 , k } \\cdot \\mathbf { g } \\mathbf { a p } ^ { , , }", + "type": "inline_equation" + }, + { + "bbox": [ + 196, + 186, + 232, + 203 + ], + "score": 1.0, + "content": ", that is,", + "type": "text" + }, + { + "bbox": [ + 232, + 187, + 339, + 201 + ], + "score": 0.93, + "content": "\\varphi _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 339, + 186, + 506, + 203 + ], + "score": 1.0, + "content": ", we have to depart from the analysis in", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 199, + 506, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 199, + 333, + 213 + ], + "score": 1.0, + "content": "Section C.6 and use an alternative argument involving", + "type": "text" + }, + { + "bbox": [ + 333, + 200, + 345, + 210 + ], + "score": 0.86, + "content": "\\tilde { x } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 345, + 199, + 506, + 213 + ], + "score": 1.0, + "content": ". We now provide some details of this", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 210, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 104, + 210, + 250, + 224 + ], + "score": 1.0, + "content": "argument: in the following, we use", + "type": "text" + }, + { + "bbox": [ + 250, + 212, + 264, + 221 + ], + "score": 0.87, + "content": "B z", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 210, + 333, + 224 + ], + "score": 1.0, + "content": "as shorthand for", + "type": "text" + }, + { + "bbox": [ + 333, + 212, + 355, + 223 + ], + "score": 0.92, + "content": "B ( z )", + "type": "inline_equation" + }, + { + "bbox": [ + 355, + 210, + 415, + 224 + ], + "score": 1.0, + "content": "for any vector", + "type": "text" + }, + { + "bbox": [ + 415, + 211, + 445, + 222 + ], + "score": 0.91, + "content": "z \\in \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 446, + 210, + 506, + 224 + ], + "score": 1.0, + "content": ". We begin the", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 222, + 162, + 235 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 162, + 235 + ], + "score": 1.0, + "content": "analysis with", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 9.5 + }, + { + "type": "interline_equation", + "bbox": [ + 173, + 239, + 438, + 286 + ], + "lines": [ + { + "bbox": [ + 173, + 239, + 438, + 286 + ], + "spans": [ + { + "bbox": [ + 173, + 239, + 438, + 286 + ], + "score": 0.75, + "content": "\\begin{array} { r l } & { \\varphi _ { n + 1 , k } ( p ^ { k } ) = \\langle z ^ { k } - x _ { n + 1 } ^ { k } , y _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad = \\langle z ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\underbrace { \\langle z ^ { k } - x _ { n + 1 } ^ { k } , e ^ { k } \\rangle } _ { \\mathrm { p a r t } \\mathrm { o f } r _ { k } ^ { \\prime } } . } \\end{array}", + "type": "interline_equation", + "image_path": "7a41c8543df977baa533186e1906ea9f82ac0387eb9048b6fbfc355353936664.jpg" + } + ] + } + ], + "index": 13, + "virtual_lines": [ + { + "bbox": [ + 173, + 239, + 438, + 254.66666666666666 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 173, + 254.66666666666666, + 438, + 270.3333333333333 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 173, + 270.3333333333333, + 438, + 286.0 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 295, + 397, + 309 + ], + "lines": [ + { + "bbox": [ + 104, + 292, + 398, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 292, + 276, + 312 + ], + "score": 1.0, + "content": "The final term will combine with the term", + "type": "text" + }, + { + "bbox": [ + 276, + 295, + 342, + 309 + ], + "score": 0.92, + "content": "\\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle", + "type": "inline_equation" + }, + { + "bbox": [ + 342, + 292, + 398, + 312 + ], + "score": 1.0, + "content": "coming from", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + }, + { + "type": "interline_equation", + "bbox": [ + 165, + 313, + 444, + 348 + ], + "lines": [ + { + "bbox": [ + 165, + 313, + 444, + 348 + ], + "spans": [ + { + "bbox": [ + 165, + 313, + 444, + 348 + ], + "score": 0.92, + "content": "\\begin{array} { r l } & { - \\varphi _ { n + 1 , k } ( p ^ { * } ) = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - y _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - B x _ { n + 1 } ^ { k } \\rangle + \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e _ { n + 1 } ^ { k } \\rangle } \\end{array}", + "type": "interline_equation", + "image_path": "c578f3123900feba8fdaeb554b4a6137548b55fb56b425257c0fcd79e2a1e4e3.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 165, + 313, + 444, + 324.6666666666667 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 165, + 324.6666666666667, + 444, + 336.33333333333337 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 165, + 336.33333333333337, + 444, + 348.00000000000006 + ], + "spans": [], + "index": 18 + } + ] + }, + { + "type": "text", + "bbox": [ + 109, + 352, + 504, + 376 + ], + "lines": [ + { + "bbox": [ + 103, + 345, + 509, + 371 + ], + "spans": [ + { + "bbox": [ + 103, + 345, + 138, + 371 + ], + "score": 1.0, + "content": "to yield", + "type": "text" + }, + { + "bbox": [ + 138, + 354, + 149, + 366 + ], + "score": 0.89, + "content": "r _ { k } ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 149, + 345, + 354, + 371 + ], + "score": 1.0, + "content": "above. Equation (82) also yields the second term in", + "type": "text" + }, + { + "bbox": [ + 354, + 354, + 363, + 365 + ], + "score": 0.86, + "content": "l _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 363, + 345, + 410, + 371 + ], + "score": 1.0, + "content": ". Using that", + "type": "text" + }, + { + "bbox": [ + 411, + 352, + 488, + 366 + ], + "score": 0.92, + "content": "\\tilde { x } ^ { k } - x _ { n + 1 } ^ { k } = \\rho _ { k } \\epsilon _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 488, + 345, + 509, + 371 + ], + "score": 1.0, + "content": ", we", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 363, + 231, + 377 + ], + "spans": [ + { + "bbox": [ + 105, + 363, + 231, + 377 + ], + "score": 1.0, + "content": "rewrite the first term in (81) as", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 19.5 + }, + { + "type": "interline_equation", + "bbox": [ + 116, + 379, + 494, + 461 + ], + "lines": [ + { + "bbox": [ + 116, + 379, + 494, + 461 + ], + "spans": [ + { + "bbox": [ + 116, + 379, + 494, + 461 + ], + "score": 0.94, + "content": "\\begin{array} { r l } & { \\bigl \\langle z ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle = \\bigl \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle + \\bigl \\langle \\tilde { x } ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle } \\\\ & { \\qquad = \\bigl \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle + \\rho _ { k } \\bigl \\langle \\epsilon ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle \\qquad } \\\\ & { \\qquad = \\bigl \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle + \\rho _ { k } \\bigl \\langle \\epsilon ^ { k } , B x _ { n + 1 } ^ { k } - B \\tilde { x } ^ { k } \\bigr \\rangle \\qquad ( 8 } \\\\ & { \\qquad + \\rho _ { k } \\underbrace { \\bigl \\langle \\epsilon ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle } _ { r _ { k } } . } \\end{array}", + "type": "interline_equation", + "image_path": "cc88216d34b3fd4bf4c4a1541ff95dbf9379765edad8df4b947cdd641883ac92.jpg" + } + ] + } + ], + "index": 22, + "virtual_lines": [ + { + "bbox": [ + 116, + 379, + 494, + 406.3333333333333 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 116, + 406.3333333333333, + 494, + 433.66666666666663 + ], + "spans": [], + "index": 22 + }, + { + "bbox": [ + 116, + 433.66666666666663, + 494, + 460.99999999999994 + ], + "spans": [], + "index": 23 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 465, + 291, + 477 + ], + "lines": [ + { + "bbox": [ + 106, + 465, + 291, + 478 + ], + "spans": [ + { + "bbox": [ + 106, + 465, + 291, + 478 + ], + "score": 1.0, + "content": "Next, the terms in (83) admit the lower bound", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24 + }, + { + "type": "interline_equation", + "bbox": [ + 116, + 483, + 495, + 521 + ], + "lines": [ + { + "bbox": [ + 116, + 483, + 495, + 521 + ], + "spans": [ + { + "bbox": [ + 116, + 483, + 495, + 521 + ], + "score": 0.83, + "content": "\\begin{array} { r l } & { \\langle z ^ { k } - { \\tilde { x } } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\rho _ { k } \\langle \\epsilon ^ { k } , B x _ { n + 1 } ^ { k } - B { \\tilde { x } } ^ { k } \\rangle } \\\\ & { \\qquad \\geq \\langle z ^ { k } - { \\tilde { x } } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle - \\underbrace { \\rho _ { k } \\| \\epsilon ^ { k } \\| \\| B x _ { n + 1 } ^ { k } - B { \\tilde { x } } ^ { k } \\| } _ { \\mathrm { ~ } } . } \\end{array}", + "type": "interline_equation", + "image_path": "24c5b535d5e1260db5d97d0bc677844097fec4c9d993e421704c387d232c8f1b.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 116, + 483, + 495, + 495.6666666666667 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 116, + 495.6666666666667, + 495, + 508.33333333333337 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 116, + 508.33333333333337, + 495, + 521.0 + ], + "spans": [], + "index": 27 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 538, + 397, + 551 + ], + "lines": [ + { + "bbox": [ + 106, + 538, + 398, + 552 + ], + "spans": [ + { + "bbox": [ + 106, + 538, + 398, + 552 + ], + "score": 1.0, + "content": "Considering the first term on right-hand side of this bound, we also have", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28 + }, + { + "type": "interline_equation", + "bbox": [ + 111, + 555, + 502, + 603 + ], + "lines": [ + { + "bbox": [ + 111, + 555, + 502, + 603 + ], + "spans": [ + { + "bbox": [ + 111, + 555, + 502, + 603 + ], + "score": 0.91, + "content": "\\begin{array} { r l } { { \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle = \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - B \\tilde { x } ^ { k } \\rangle } } \\\\ & { \\geq \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\rangle - \\displaystyle \\frac { d } { 2 } \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } - \\displaystyle \\frac { 1 } { \\underline { { 2 d } } } \\| B \\tilde { x } ^ { k } - B x _ { n + 1 } ^ { k } \\| ^ { 2 } } \\end{array}", + "type": "interline_equation", + "image_path": "e2ed6a3f0f6da8fe7c33a178128929042bd9ce72ad8007fbde38da35bfdfb164.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 111, + 555, + 502, + 571.0 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 111, + 571.0, + 502, + 587.0 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 111, + 587.0, + 502, + 603.0 + ], + "spans": [], + "index": 31 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 619, + 502, + 642 + ], + "lines": [ + { + "bbox": [ + 105, + 619, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 137, + 633 + ], + "score": 1.0, + "content": "for any", + "type": "text" + }, + { + "bbox": [ + 137, + 620, + 162, + 630 + ], + "score": 0.89, + "content": "d > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 162, + 619, + 505, + 633 + ], + "score": 1.0, + "content": ", using Young’s inequality. Finally, for the first two terms of the right-hand side of the", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 630, + 225, + 642 + ], + "spans": [ + { + "bbox": [ + 106, + 630, + 225, + 642 + ], + "score": 1.0, + "content": "above relation, we may write", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32.5 + }, + { + "type": "interline_equation", + "bbox": [ + 116, + 649, + 496, + 732 + ], + "lines": [ + { + "bbox": [ + 116, + 649, + 496, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 649, + 496, + 732 + ], + "score": 0.94, + "content": "\\begin{array} { r l } { { \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\rangle - \\frac { d } { 2 } \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } } } \\\\ & { = \\langle z ^ { k } - \\tilde { x } ^ { k } , B z ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - B z ^ { k } \\rangle - \\frac { d } { 2 } \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } } \\\\ & { \\quad \\quad \\geq \\underbrace { ( \\rho _ { k } ^ { - 1 } - d / 2 ) \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } - \\| z ^ { k } - \\tilde { x } ^ { k } \\| \\| B \\tilde { x } ^ { k } - B z ^ { k } \\| } _ { q _ { k } } , } \\end{array}", + "type": "interline_equation", + "image_path": "74fa6f6938f7b925da332c981fd67157ffb313cbd333d8882c0638d5c2605496.jpg" + } + ] + } + ], + "index": 35, + "virtual_lines": [ + { + "bbox": [ + 116, + 649, + 496, + 676.6666666666666 + ], + "spans": [], + "index": 34 + }, + { + "bbox": [ + 116, + 676.6666666666666, + 496, + 704.3333333333333 + ], + "spans": [], + "index": 35 + }, + { + "bbox": [ + 116, + 704.3333333333333, + 496, + 731.9999999999999 + ], + "spans": [], + "index": 36 + } + ] + } + ], + "page_idx": 35, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 309, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 309, + 39 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 749, + 313, + 763 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 313, + 763 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 14, + "width": 15 + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 380, + 723, + 389, + 730 + ], + "lines": [] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 81, + 505, + 107 + ], + "lines": [ + { + "bbox": [ + 105, + 80, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 126, + 96 + ], + "score": 1.0, + "content": "with", + "type": "text" + }, + { + "bbox": [ + 127, + 83, + 135, + 92 + ], + "score": 0.79, + "content": "L", + "type": "inline_equation" + }, + { + "bbox": [ + 135, + 80, + 283, + 96 + ], + "score": 1.0, + "content": "being the local Lipschitz constant of", + "type": "text" + }, + { + "bbox": [ + 284, + 83, + 293, + 92 + ], + "score": 0.82, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 293, + 80, + 306, + 96 + ], + "score": 1.0, + "content": "on", + "type": "text" + }, + { + "bbox": [ + 306, + 82, + 336, + 95 + ], + "score": 0.92, + "content": "\\mathbb { B } _ { r } \\big ( z ^ { * } \\big )", + "type": "inline_equation" + }, + { + "bbox": [ + 336, + 80, + 386, + 96 + ], + "score": 1.0, + "content": ". The iterate", + "type": "text" + }, + { + "bbox": [ + 386, + 82, + 397, + 92 + ], + "score": 0.89, + "content": "\\tilde { x } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 398, + 80, + 505, + 96 + ], + "score": 1.0, + "content": "is the analog of the iterate", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 93, + 249, + 108 + ], + "spans": [ + { + "bbox": [ + 107, + 93, + 137, + 108 + ], + "score": 0.92, + "content": "\\tilde { X } _ { t + 1 / 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 138, + 93, + 249, + 108 + ], + "score": 1.0, + "content": "used in Hsieh et al. (2020).", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5, + "bbox_fs": [ + 105, + 80, + 505, + 108 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 111, + 506, + 183 + ], + "lines": [ + { + "bbox": [ + 105, + 110, + 506, + 125 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 506, + 125 + ], + "score": 1.0, + "content": "The recursion (79) is derived by once again starting from (13) and following the arguments leading to", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 121, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 464, + 137 + ], + "score": 1.0, + "content": "(35), but this time not taking conditional expectations. In particular, the upper bounds on", + "type": "text" + }, + { + "bbox": [ + 464, + 123, + 504, + 135 + ], + "score": 0.92, + "content": "\\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 }", + "type": "inline_equation" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 132, + 507, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 124, + 149 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 124, + 135, + 168, + 147 + ], + "score": 0.93, + "content": "\\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 168, + 132, + 253, + 149 + ], + "score": 1.0, + "content": "contribute the terms", + "type": "text" + }, + { + "bbox": [ + 253, + 134, + 362, + 148 + ], + "score": 0.92, + "content": "c _ { 1 } \\alpha _ { k } ^ { 2 } ( \\| \\bar { e } ^ { k } \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + c _ { 4 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 363, + 132, + 381, + 149 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 381, + 134, + 448, + 147 + ], + "score": 0.92, + "content": "c _ { 1 } \\alpha _ { k } ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 132, + 469, + 149 + ], + "score": 1.0, + "content": ". For", + "type": "text" + }, + { + "bbox": [ + 469, + 136, + 502, + 146 + ], + "score": 0.85, + "content": "i \\in 1 . . n", + "type": "inline_equation" + }, + { + "bbox": [ + 503, + 132, + 507, + 149 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 147, + 506, + 162 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 122, + 162 + ], + "score": 1.0, + "content": "the", + "type": "text" + }, + { + "bbox": [ + 122, + 149, + 144, + 160 + ], + "score": 0.89, + "content": "{ } ^ { \\mathfrak { e } } \\varphi _ { i , k }", + "type": "inline_equation" + }, + { + "bbox": [ + 144, + 147, + 191, + 162 + ], + "score": 1.0, + "content": "-gap\" term,", + "type": "text" + }, + { + "bbox": [ + 191, + 147, + 272, + 160 + ], + "score": 0.91, + "content": "\\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 273, + 147, + 506, + 162 + ], + "score": 1.0, + "content": ", is dealt with in a similar manner to Section C.5, but this", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 158, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 105, + 158, + 336, + 172 + ], + "score": 1.0, + "content": "time not using monotonicity as in (36). This contributes", + "type": "text" + }, + { + "bbox": [ + 336, + 159, + 348, + 171 + ], + "score": 0.89, + "content": "T _ { k } ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 349, + 158, + 432, + 172 + ], + "score": 1.0, + "content": "and the first term in", + "type": "text" + }, + { + "bbox": [ + 432, + 159, + 441, + 170 + ], + "score": 0.85, + "content": "l _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 442, + 158, + 506, + 172 + ], + "score": 1.0, + "content": ". Finally, as we", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 168, + 452, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 168, + 177, + 184 + ], + "score": 1.0, + "content": "sketch below, the", + "type": "text" + }, + { + "bbox": [ + 178, + 171, + 212, + 182 + ], + "score": 0.89, + "content": "{ ^ { \\circ } } \\varphi _ { n + 1 , k }", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 168, + 302, + 184 + ], + "score": 1.0, + "content": "-gap\" term contributes", + "type": "text" + }, + { + "bbox": [ + 303, + 171, + 356, + 183 + ], + "score": 0.28, + "content": "r _ { k } , r _ { k } ^ { \\prime } , q _ { k } , q _ { k } ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 356, + 168, + 438, + 184 + ], + "score": 1.0, + "content": ", and the last term in", + "type": "text" + }, + { + "bbox": [ + 439, + 171, + 447, + 181 + ], + "score": 0.86, + "content": "l _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 168, + 452, + 184 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 4.5, + "bbox_fs": [ + 104, + 110, + 507, + 184 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 187, + 505, + 234 + ], + "lines": [ + { + "bbox": [ + 105, + 186, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 141, + 203 + ], + "score": 1.0, + "content": "For the", + "type": "text" + }, + { + "bbox": [ + 142, + 189, + 196, + 201 + ], + "score": 0.57, + "content": "\\cdot \\circ _ { n + 1 , k } \\cdot \\mathbf { g } \\mathbf { a p } ^ { , , }", + "type": "inline_equation" + }, + { + "bbox": [ + 196, + 186, + 232, + 203 + ], + "score": 1.0, + "content": ", that is,", + "type": "text" + }, + { + "bbox": [ + 232, + 187, + 339, + 201 + ], + "score": 0.93, + "content": "\\varphi _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 339, + 186, + 506, + 203 + ], + "score": 1.0, + "content": ", we have to depart from the analysis in", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 199, + 506, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 199, + 333, + 213 + ], + "score": 1.0, + "content": "Section C.6 and use an alternative argument involving", + "type": "text" + }, + { + "bbox": [ + 333, + 200, + 345, + 210 + ], + "score": 0.86, + "content": "\\tilde { x } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 345, + 199, + 506, + 213 + ], + "score": 1.0, + "content": ". We now provide some details of this", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 210, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 104, + 210, + 250, + 224 + ], + "score": 1.0, + "content": "argument: in the following, we use", + "type": "text" + }, + { + "bbox": [ + 250, + 212, + 264, + 221 + ], + "score": 0.87, + "content": "B z", + "type": "inline_equation" + }, + { + "bbox": [ + 264, + 210, + 333, + 224 + ], + "score": 1.0, + "content": "as shorthand for", + "type": "text" + }, + { + "bbox": [ + 333, + 212, + 355, + 223 + ], + "score": 0.92, + "content": "B ( z )", + "type": "inline_equation" + }, + { + "bbox": [ + 355, + 210, + 415, + 224 + ], + "score": 1.0, + "content": "for any vector", + "type": "text" + }, + { + "bbox": [ + 415, + 211, + 445, + 222 + ], + "score": 0.91, + "content": "z \\in \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 446, + 210, + 506, + 224 + ], + "score": 1.0, + "content": ". We begin the", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 222, + 162, + 235 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 162, + 235 + ], + "score": 1.0, + "content": "analysis with", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 9.5, + "bbox_fs": [ + 104, + 186, + 506, + 235 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 173, + 239, + 438, + 286 + ], + "lines": [ + { + "bbox": [ + 173, + 239, + 438, + 286 + ], + "spans": [ + { + "bbox": [ + 173, + 239, + 438, + 286 + ], + "score": 0.75, + "content": "\\begin{array} { r l } & { \\varphi _ { n + 1 , k } ( p ^ { k } ) = \\langle z ^ { k } - x _ { n + 1 } ^ { k } , y _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad = \\langle z ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\underbrace { \\langle z ^ { k } - x _ { n + 1 } ^ { k } , e ^ { k } \\rangle } _ { \\mathrm { p a r t } \\mathrm { o f } r _ { k } ^ { \\prime } } . } \\end{array}", + "type": "interline_equation", + "image_path": "7a41c8543df977baa533186e1906ea9f82ac0387eb9048b6fbfc355353936664.jpg" + } + ] + } + ], + "index": 13, + "virtual_lines": [ + { + "bbox": [ + 173, + 239, + 438, + 254.66666666666666 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 173, + 254.66666666666666, + 438, + 270.3333333333333 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 173, + 270.3333333333333, + 438, + 286.0 + ], + "spans": [], + "index": 14 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 295, + 397, + 309 + ], + "lines": [ + { + "bbox": [ + 104, + 292, + 398, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 292, + 276, + 312 + ], + "score": 1.0, + "content": "The final term will combine with the term", + "type": "text" + }, + { + "bbox": [ + 276, + 295, + 342, + 309 + ], + "score": 0.92, + "content": "\\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle", + "type": "inline_equation" + }, + { + "bbox": [ + 342, + 292, + 398, + 312 + ], + "score": 1.0, + "content": "coming from", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15, + "bbox_fs": [ + 104, + 292, + 398, + 312 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 165, + 313, + 444, + 348 + ], + "lines": [ + { + "bbox": [ + 165, + 313, + 444, + 348 + ], + "spans": [ + { + "bbox": [ + 165, + 313, + 444, + 348 + ], + "score": 0.92, + "content": "\\begin{array} { r l } & { - \\varphi _ { n + 1 , k } ( p ^ { * } ) = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - y _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - B x _ { n + 1 } ^ { k } \\rangle + \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e _ { n + 1 } ^ { k } \\rangle } \\end{array}", + "type": "interline_equation", + "image_path": "c578f3123900feba8fdaeb554b4a6137548b55fb56b425257c0fcd79e2a1e4e3.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 165, + 313, + 444, + 324.6666666666667 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 165, + 324.6666666666667, + 444, + 336.33333333333337 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 165, + 336.33333333333337, + 444, + 348.00000000000006 + ], + "spans": [], + "index": 18 + } + ] + }, + { + "type": "text", + "bbox": [ + 109, + 352, + 504, + 376 + ], + "lines": [ + { + "bbox": [ + 103, + 345, + 509, + 371 + ], + "spans": [ + { + "bbox": [ + 103, + 345, + 138, + 371 + ], + "score": 1.0, + "content": "to yield", + "type": "text" + }, + { + "bbox": [ + 138, + 354, + 149, + 366 + ], + "score": 0.89, + "content": "r _ { k } ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 149, + 345, + 354, + 371 + ], + "score": 1.0, + "content": "above. Equation (82) also yields the second term in", + "type": "text" + }, + { + "bbox": [ + 354, + 354, + 363, + 365 + ], + "score": 0.86, + "content": "l _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 363, + 345, + 410, + 371 + ], + "score": 1.0, + "content": ". Using that", + "type": "text" + }, + { + "bbox": [ + 411, + 352, + 488, + 366 + ], + "score": 0.92, + "content": "\\tilde { x } ^ { k } - x _ { n + 1 } ^ { k } = \\rho _ { k } \\epsilon _ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 488, + 345, + 509, + 371 + ], + "score": 1.0, + "content": ", we", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 363, + 231, + 377 + ], + "spans": [ + { + "bbox": [ + 105, + 363, + 231, + 377 + ], + "score": 1.0, + "content": "rewrite the first term in (81) as", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 19.5, + "bbox_fs": [ + 103, + 345, + 509, + 377 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 116, + 379, + 494, + 461 + ], + "lines": [ + { + "bbox": [ + 116, + 379, + 494, + 461 + ], + "spans": [ + { + "bbox": [ + 116, + 379, + 494, + 461 + ], + "score": 0.94, + "content": "\\begin{array} { r l } & { \\bigl \\langle z ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle = \\bigl \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle + \\bigl \\langle \\tilde { x } ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle } \\\\ & { \\qquad = \\bigl \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle + \\rho _ { k } \\bigl \\langle \\epsilon ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle \\qquad } \\\\ & { \\qquad = \\bigl \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle + \\rho _ { k } \\bigl \\langle \\epsilon ^ { k } , B x _ { n + 1 } ^ { k } - B \\tilde { x } ^ { k } \\bigr \\rangle \\qquad ( 8 } \\\\ & { \\qquad + \\rho _ { k } \\underbrace { \\bigl \\langle \\epsilon ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle } _ { r _ { k } } . } \\end{array}", + "type": "interline_equation", + "image_path": "cc88216d34b3fd4bf4c4a1541ff95dbf9379765edad8df4b947cdd641883ac92.jpg" + } + ] + } + ], + "index": 22, + "virtual_lines": [ + { + "bbox": [ + 116, + 379, + 494, + 406.3333333333333 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 116, + 406.3333333333333, + 494, + 433.66666666666663 + ], + "spans": [], + "index": 22 + }, + { + "bbox": [ + 116, + 433.66666666666663, + 494, + 460.99999999999994 + ], + "spans": [], + "index": 23 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 465, + 291, + 477 + ], + "lines": [ + { + "bbox": [ + 106, + 465, + 291, + 478 + ], + "spans": [ + { + "bbox": [ + 106, + 465, + 291, + 478 + ], + "score": 1.0, + "content": "Next, the terms in (83) admit the lower bound", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24, + "bbox_fs": [ + 106, + 465, + 291, + 478 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 116, + 483, + 495, + 521 + ], + "lines": [ + { + "bbox": [ + 116, + 483, + 495, + 521 + ], + "spans": [ + { + "bbox": [ + 116, + 483, + 495, + 521 + ], + "score": 0.83, + "content": "\\begin{array} { r l } & { \\langle z ^ { k } - { \\tilde { x } } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\rho _ { k } \\langle \\epsilon ^ { k } , B x _ { n + 1 } ^ { k } - B { \\tilde { x } } ^ { k } \\rangle } \\\\ & { \\qquad \\geq \\langle z ^ { k } - { \\tilde { x } } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle - \\underbrace { \\rho _ { k } \\| \\epsilon ^ { k } \\| \\| B x _ { n + 1 } ^ { k } - B { \\tilde { x } } ^ { k } \\| } _ { \\mathrm { ~ } } . } \\end{array}", + "type": "interline_equation", + "image_path": "24c5b535d5e1260db5d97d0bc677844097fec4c9d993e421704c387d232c8f1b.jpg" + } + ] + } + ], + "index": 26, + "virtual_lines": [ + { + "bbox": [ + 116, + 483, + 495, + 495.6666666666667 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 116, + 495.6666666666667, + 495, + 508.33333333333337 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 116, + 508.33333333333337, + 495, + 521.0 + ], + "spans": [], + "index": 27 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 538, + 397, + 551 + ], + "lines": [ + { + "bbox": [ + 106, + 538, + 398, + 552 + ], + "spans": [ + { + "bbox": [ + 106, + 538, + 398, + 552 + ], + "score": 1.0, + "content": "Considering the first term on right-hand side of this bound, we also have", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28, + "bbox_fs": [ + 106, + 538, + 398, + 552 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 111, + 555, + 502, + 603 + ], + "lines": [ + { + "bbox": [ + 111, + 555, + 502, + 603 + ], + "spans": [ + { + "bbox": [ + 111, + 555, + 502, + 603 + ], + "score": 0.91, + "content": "\\begin{array} { r l } { { \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle = \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - B \\tilde { x } ^ { k } \\rangle } } \\\\ & { \\geq \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\rangle - \\displaystyle \\frac { d } { 2 } \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } - \\displaystyle \\frac { 1 } { \\underline { { 2 d } } } \\| B \\tilde { x } ^ { k } - B x _ { n + 1 } ^ { k } \\| ^ { 2 } } \\end{array}", + "type": "interline_equation", + "image_path": "e2ed6a3f0f6da8fe7c33a178128929042bd9ce72ad8007fbde38da35bfdfb164.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 111, + 555, + 502, + 571.0 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 111, + 571.0, + 502, + 587.0 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 111, + 587.0, + 502, + 603.0 + ], + "spans": [], + "index": 31 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 619, + 502, + 642 + ], + "lines": [ + { + "bbox": [ + 105, + 619, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 619, + 137, + 633 + ], + "score": 1.0, + "content": "for any", + "type": "text" + }, + { + "bbox": [ + 137, + 620, + 162, + 630 + ], + "score": 0.89, + "content": "d > 0", + "type": "inline_equation" + }, + { + "bbox": [ + 162, + 619, + 505, + 633 + ], + "score": 1.0, + "content": ", using Young’s inequality. Finally, for the first two terms of the right-hand side of the", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 630, + 225, + 642 + ], + "spans": [ + { + "bbox": [ + 106, + 630, + 225, + 642 + ], + "score": 1.0, + "content": "above relation, we may write", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 32.5, + "bbox_fs": [ + 105, + 619, + 505, + 642 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 116, + 649, + 496, + 732 + ], + "lines": [ + { + "bbox": [ + 116, + 649, + 496, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 649, + 496, + 732 + ], + "score": 0.94, + "content": "\\begin{array} { r l } { { \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\rangle - \\frac { d } { 2 } \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } } } \\\\ & { = \\langle z ^ { k } - \\tilde { x } ^ { k } , B z ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - B z ^ { k } \\rangle - \\frac { d } { 2 } \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } } \\\\ & { \\quad \\quad \\geq \\underbrace { ( \\rho _ { k } ^ { - 1 } - d / 2 ) \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } - \\| z ^ { k } - \\tilde { x } ^ { k } \\| \\| B \\tilde { x } ^ { k } - B z ^ { k } \\| } _ { q _ { k } } , } \\end{array}", + "type": "interline_equation", + "image_path": "74fa6f6938f7b925da332c981fd67157ffb313cbd333d8882c0638d5c2605496.jpg" + } + ] + } + ], + "index": 35, + "virtual_lines": [ + { + "bbox": [ + 116, + 649, + 496, + 676.6666666666666 + ], + "spans": [], + "index": 34 + }, + { + "bbox": [ + 116, + 676.6666666666666, + 496, + 704.3333333333333 + ], + "spans": [], + "index": 35 + }, + { + "bbox": [ + 116, + 704.3333333333333, + 496, + 731.9999999999999 + ], + "spans": [], + "index": 36 + } + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 81, + 505, + 118 + ], + "lines": [ + { + "bbox": [ + 102, + 75, + 506, + 99 + ], + "spans": [ + { + "bbox": [ + 102, + 75, + 439, + 99 + ], + "score": 1.0, + "content": "where in the final inequality we use the Cauchy-Schwartz inequality and substitute", + "type": "text" + }, + { + "bbox": [ + 439, + 81, + 506, + 96 + ], + "score": 0.92, + "content": "B z ^ { k } - w _ { n + 1 } ^ { k } =", + "type": "inline_equation" + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 91, + 509, + 111 + ], + "spans": [ + { + "bbox": [ + 107, + 93, + 164, + 108 + ], + "score": 0.91, + "content": "\\rho _ { k } ^ { - 1 } ( z ^ { k } - \\tilde { x } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 164, + 91, + 255, + 111 + ], + "score": 1.0, + "content": ", from the definition of", + "type": "text" + }, + { + "bbox": [ + 255, + 95, + 267, + 105 + ], + "score": 0.88, + "content": "\\tilde { x } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 267, + 91, + 509, + 111 + ], + "score": 1.0, + "content": "in (80). We have now accounted for all the terms appearing", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 105, + 139, + 119 + ], + "spans": [ + { + "bbox": [ + 104, + 105, + 139, + 119 + ], + "score": 1.0, + "content": "in (79).", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1 + }, + { + "type": "text", + "bbox": [ + 106, + 123, + 505, + 168 + ], + "lines": [ + { + "bbox": [ + 105, + 123, + 506, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 123, + 506, + 136 + ], + "score": 1.0, + "content": "The recursion (79) is analogous to equation (F.7) on page 24 of Hsieh et al. (2020) and provides", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 135, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 106, + 135, + 506, + 146 + ], + "score": 1.0, + "content": "the starting point for the local convergence analysis. The next step would be to derive an analog of", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 145, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 145, + 505, + 158 + ], + "score": 1.0, + "content": "Theorem F.1. of Hsieh et al. (2020) using (79). The following translation to the notation of Theorem", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 156, + 412, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 317, + 168 + ], + "score": 1.0, + "content": "F.1. could be used (note that Hsieh et al. (2020) uses", + "type": "text" + }, + { + "bbox": [ + 318, + 158, + 323, + 166 + ], + "score": 0.77, + "content": "t", + "type": "inline_equation" + }, + { + "bbox": [ + 323, + 156, + 412, + 168 + ], + "score": 1.0, + "content": "for iteration counter):", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 4.5 + }, + { + "type": "interline_equation", + "bbox": [ + 184, + 171, + 426, + 236 + ], + "lines": [ + { + "bbox": [ + 184, + 171, + 426, + 236 + ], + "spans": [ + { + "bbox": [ + 184, + 171, + 426, + 236 + ], + "score": 0.93, + "content": "\\begin{array} { r l } & { D _ { k } = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } , } \\\\ & { \\zeta _ { k } = c _ { 2 } \\alpha _ { k } \\rho _ { k } ( T _ { k } ^ { \\prime } + l _ { k } ) + c _ { 3 } \\alpha _ { k } q _ { k } , } \\\\ & { \\xi _ { k } = - c _ { 2 } \\alpha _ { k } \\rho _ { k } r _ { k } - c _ { 3 } \\alpha _ { k } r _ { k } ^ { \\prime } , } \\\\ & { \\chi _ { k } = c _ { 1 } \\alpha _ { k } ^ { 2 } \\big ( \\| e ^ { k } \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + c _ { 4 } \\big ) + c _ { 5 } \\alpha _ { k } q _ { k } ^ { \\prime } , } \\end{array}", + "type": "interline_equation", + "image_path": "f2736fc8a856d56c68e3ef766dd4fd1aec18e41e3f04b682737e69e5727fa78a.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 184, + 171, + 426, + 184.0 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 184, + 184.0, + 426, + 197.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 184, + 197.0, + 426, + 210.0 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 184, + 210.0, + 426, + 223.0 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 184, + 223.0, + 426, + 236.0 + ], + "spans": [], + "index": 11 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 239, + 240, + 251 + ], + "lines": [ + { + "bbox": [ + 106, + 239, + 240, + 252 + ], + "spans": [ + { + "bbox": [ + 106, + 239, + 162, + 252 + ], + "score": 1.0, + "content": "and the event", + "type": "text" + }, + { + "bbox": [ + 162, + 240, + 178, + 251 + ], + "score": 0.91, + "content": "E _ { \\infty } ^ { \\rho }", + "type": "inline_equation" + }, + { + "bbox": [ + 179, + 239, + 240, + 252 + ], + "score": 1.0, + "content": "is translated to", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12 + }, + { + "type": "interline_equation", + "bbox": [ + 152, + 256, + 457, + 272 + ], + "lines": [ + { + "bbox": [ + 152, + 256, + 457, + 272 + ], + "spans": [ + { + "bbox": [ + 152, + 256, + 457, + 272 + ], + "score": 0.86, + "content": "\\begin{array} { r } { E _ { \\infty } ^ { \\rho } = \\left\\{ x _ { n + 1 } ^ { k } \\in \\mathbb { B } _ { r } ( z ^ { * } ) , \\tilde { x } ^ { k } \\in \\mathbb { B } _ { \\rho r } ( z ^ { * } ) , p ^ { k } \\in \\mathbb { B } _ { \\rho r } ( p ^ { * } ) \\mathrm { ~ f o r ~ a l l ~ } k = 1 , 2 , \\ldots \\right\\} . } \\end{array}", + "type": "interline_equation", + "image_path": "03ca7d5826d37d3dfa079241797f56a314e74e8ce752b65bfea63722f09d3976.jpg" + } + ] + } + ], + "index": 13, + "virtual_lines": [ + { + "bbox": [ + 152, + 256, + 457, + 272 + ], + "spans": [], + "index": 13 + } + ] + }, + { + "type": "text", + "bbox": [ + 103, + 276, + 474, + 288 + ], + "lines": [ + { + "bbox": [ + 105, + 275, + 474, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 474, + 289 + ], + "score": 1.0, + "content": "An analog of Theorem 2 of Hsieh et al. (2020) could then be developed based on this result.", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 14 + } + ], + "page_idx": 36, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 308, + 37 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 309, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 309, + 38 + ], + "score": 1.0, + "content": "Under review as a conference paper at ICLR 2022", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 764 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 764 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 14, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 81, + 505, + 118 + ], + "lines": [ + { + "bbox": [ + 102, + 75, + 506, + 99 + ], + "spans": [ + { + "bbox": [ + 102, + 75, + 439, + 99 + ], + "score": 1.0, + "content": "where in the final inequality we use the Cauchy-Schwartz inequality and substitute", + "type": "text" + }, + { + "bbox": [ + 439, + 81, + 506, + 96 + ], + "score": 0.92, + "content": "B z ^ { k } - w _ { n + 1 } ^ { k } =", + "type": "inline_equation" + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 91, + 509, + 111 + ], + "spans": [ + { + "bbox": [ + 107, + 93, + 164, + 108 + ], + "score": 0.91, + "content": "\\rho _ { k } ^ { - 1 } ( z ^ { k } - \\tilde { x } ^ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 164, + 91, + 255, + 111 + ], + "score": 1.0, + "content": ", from the definition of", + "type": "text" + }, + { + "bbox": [ + 255, + 95, + 267, + 105 + ], + "score": 0.88, + "content": "\\tilde { x } ^ { k }", + "type": "inline_equation" + }, + { + "bbox": [ + 267, + 91, + 509, + 111 + ], + "score": 1.0, + "content": "in (80). We have now accounted for all the terms appearing", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 105, + 139, + 119 + ], + "spans": [ + { + "bbox": [ + 104, + 105, + 139, + 119 + ], + "score": 1.0, + "content": "in (79).", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1, + "bbox_fs": [ + 102, + 75, + 509, + 119 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 123, + 505, + 168 + ], + "lines": [ + { + "bbox": [ + 105, + 123, + 506, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 123, + 506, + 136 + ], + "score": 1.0, + "content": "The recursion (79) is analogous to equation (F.7) on page 24 of Hsieh et al. (2020) and provides", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 135, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 106, + 135, + 506, + 146 + ], + "score": 1.0, + "content": "the starting point for the local convergence analysis. The next step would be to derive an analog of", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 145, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 145, + 505, + 158 + ], + "score": 1.0, + "content": "Theorem F.1. of Hsieh et al. (2020) using (79). The following translation to the notation of Theorem", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 156, + 412, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 317, + 168 + ], + "score": 1.0, + "content": "F.1. could be used (note that Hsieh et al. (2020) uses", + "type": "text" + }, + { + "bbox": [ + 318, + 158, + 323, + 166 + ], + "score": 0.77, + "content": "t", + "type": "inline_equation" + }, + { + "bbox": [ + 323, + 156, + 412, + 168 + ], + "score": 1.0, + "content": "for iteration counter):", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 4.5, + "bbox_fs": [ + 105, + 123, + 506, + 168 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 184, + 171, + 426, + 236 + ], + "lines": [ + { + "bbox": [ + 184, + 171, + 426, + 236 + ], + "spans": [ + { + "bbox": [ + 184, + 171, + 426, + 236 + ], + "score": 0.93, + "content": "\\begin{array} { r l } & { D _ { k } = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } , } \\\\ & { \\zeta _ { k } = c _ { 2 } \\alpha _ { k } \\rho _ { k } ( T _ { k } ^ { \\prime } + l _ { k } ) + c _ { 3 } \\alpha _ { k } q _ { k } , } \\\\ & { \\xi _ { k } = - c _ { 2 } \\alpha _ { k } \\rho _ { k } r _ { k } - c _ { 3 } \\alpha _ { k } r _ { k } ^ { \\prime } , } \\\\ & { \\chi _ { k } = c _ { 1 } \\alpha _ { k } ^ { 2 } \\big ( \\| e ^ { k } \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + c _ { 4 } \\big ) + c _ { 5 } \\alpha _ { k } q _ { k } ^ { \\prime } , } \\end{array}", + "type": "interline_equation", + "image_path": "f2736fc8a856d56c68e3ef766dd4fd1aec18e41e3f04b682737e69e5727fa78a.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 184, + 171, + 426, + 184.0 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 184, + 184.0, + 426, + 197.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 184, + 197.0, + 426, + 210.0 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 184, + 210.0, + 426, + 223.0 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 184, + 223.0, + 426, + 236.0 + ], + "spans": [], + "index": 11 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 239, + 240, + 251 + ], + "lines": [ + { + "bbox": [ + 106, + 239, + 240, + 252 + ], + "spans": [ + { + "bbox": [ + 106, + 239, + 162, + 252 + ], + "score": 1.0, + "content": "and the event", + "type": "text" + }, + { + "bbox": [ + 162, + 240, + 178, + 251 + ], + "score": 0.91, + "content": "E _ { \\infty } ^ { \\rho }", + "type": "inline_equation" + }, + { + "bbox": [ + 179, + 239, + 240, + 252 + ], + "score": 1.0, + "content": "is translated to", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 12, + "bbox_fs": [ + 106, + 239, + 240, + 252 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 152, + 256, + 457, + 272 + ], + "lines": [ + { + "bbox": [ + 152, + 256, + 457, + 272 + ], + "spans": [ + { + "bbox": [ + 152, + 256, + 457, + 272 + ], + "score": 0.86, + "content": "\\begin{array} { r } { E _ { \\infty } ^ { \\rho } = \\left\\{ x _ { n + 1 } ^ { k } \\in \\mathbb { B } _ { r } ( z ^ { * } ) , \\tilde { x } ^ { k } \\in \\mathbb { B } _ { \\rho r } ( z ^ { * } ) , p ^ { k } \\in \\mathbb { B } _ { \\rho r } ( p ^ { * } ) \\mathrm { ~ f o r ~ a l l ~ } k = 1 , 2 , \\ldots \\right\\} . } \\end{array}", + "type": "interline_equation", + "image_path": "03ca7d5826d37d3dfa079241797f56a314e74e8ce752b65bfea63722f09d3976.jpg" + } + ] + } + ], + "index": 13, + "virtual_lines": [ + { + "bbox": [ + 152, + 256, + 457, + 272 + ], + "spans": [], + "index": 13 + } + ] + }, + { + "type": "text", + "bbox": [ + 103, + 276, + 474, + 288 + ], + "lines": [ + { + "bbox": [ + 105, + 275, + 474, + 289 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 474, + 289 + ], + "score": 1.0, + "content": "An analog of Theorem 2 of Hsieh et al. (2020) could then be developed based on this result.", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 14, + "bbox_fs": [ + 105, + 275, + 474, + 289 + ] + } + ] + } + ], + "_backend": "pipeline", + "_version_name": "2.2.2" +} \ No newline at end of file diff --git a/parse/dev/a0SRWViFYW/a0SRWViFYW_model.json b/parse/dev/a0SRWViFYW/a0SRWViFYW_model.json new file mode 100644 index 0000000000000000000000000000000000000000..124712d6e954943d0cad54dac4ea9e0a74c1c7d0 --- /dev/null +++ b/parse/dev/a0SRWViFYW/a0SRWViFYW_model.json @@ -0,0 +1,59535 @@ +[ + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 398, + 652, + 1305, + 652, + 1305, + 1080, + 398, + 1080 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 1238, + 1405, + 1238, + 1405, + 1513, + 298, + 1513 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 1529, + 1404, + 1529, + 1404, + 1774, + 298, + 1774 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 1788, + 1404, + 1788, + 1404, + 2034, + 298, + 2034 + ], + "score": 0.979 + }, + { + "category_id": 0, + "poly": [ + 300, + 219, + 1400, + 219, + 1400, + 378, + 300, + 378 + ], + "score": 0.96 + }, + { + "category_id": 1, + "poly": [ + 313, + 432, + 680, + 432, + 680, + 493, + 313, + 493 + ], + "score": 0.932 + }, + { + "category_id": 0, + "poly": [ + 302, + 1161, + 573, + 1161, + 573, + 1196, + 302, + 1196 + ], + "score": 0.894 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 104, + 298, + 104 + ], + "score": 0.893 + }, + { + "category_id": 0, + "poly": [ + 773, + 575, + 926, + 575, + 926, + 608, + 773, + 608 + ], + "score": 0.872 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 856, + 2088, + 856, + 2112, + 841, + 2112 + ], + "score": 0.723 + }, + { + "category_id": 15, + "poly": [ + 294.0, + 217.0, + 1081.0, + 217.0, + 1081.0, + 273.0, + 294.0, + 273.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 273.0, + 1405.0, + 273.0, + 1405.0, + 327.0, + 294.0, + 327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 331.0, + 625.0, + 331.0, + 625.0, + 381.0, + 295.0, + 381.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1158.0, + 579.0, + 1158.0, + 579.0, + 1205.0, + 294.0, + 1205.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 769.0, + 574.0, + 932.0, + 574.0, + 932.0, + 611.0, + 769.0, + 611.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 2088.0, + 857.0, + 2088.0, + 857.0, + 2116.0, + 840.0, + 2116.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 396.0, + 654.0, + 1307.0, + 654.0, + 1307.0, + 689.0, + 396.0, + 689.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 682.0, + 1310.0, + 682.0, + 1310.0, + 721.0, + 393.0, + 721.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 714.0, + 1305.0, + 714.0, + 1305.0, + 747.0, + 394.0, + 747.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 745.0, + 1306.0, + 745.0, + 1306.0, + 778.0, + 393.0, + 778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 775.0, + 1306.0, + 775.0, + 1306.0, + 807.0, + 393.0, + 807.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 806.0, + 1306.0, + 806.0, + 1306.0, + 841.0, + 393.0, + 841.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 835.0, + 1309.0, + 835.0, + 1309.0, + 872.0, + 393.0, + 872.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 867.0, + 1309.0, + 867.0, + 1309.0, + 901.0, + 393.0, + 901.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 897.0, + 1309.0, + 897.0, + 1309.0, + 932.0, + 392.0, + 932.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 928.0, + 1305.0, + 928.0, + 1305.0, + 963.0, + 394.0, + 963.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 959.0, + 1305.0, + 959.0, + 1305.0, + 991.0, + 394.0, + 991.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 988.0, + 1308.0, + 988.0, + 1308.0, + 1024.0, + 394.0, + 1024.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1020.0, + 1305.0, + 1020.0, + 1305.0, + 1055.0, + 393.0, + 1055.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1050.0, + 1058.0, + 1050.0, + 1058.0, + 1085.0, + 393.0, + 1085.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1236.0, + 1409.0, + 1236.0, + 1409.0, + 1275.0, + 293.0, + 1275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1269.0, + 1407.0, + 1269.0, + 1407.0, + 1305.0, + 295.0, + 1305.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1299.0, + 1408.0, + 1299.0, + 1408.0, + 1335.0, + 295.0, + 1335.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1329.0, + 1408.0, + 1329.0, + 1408.0, + 1365.0, + 295.0, + 1365.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1359.0, + 1408.0, + 1359.0, + 1408.0, + 1399.0, + 292.0, + 1399.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1388.0, + 1407.0, + 1388.0, + 1407.0, + 1429.0, + 292.0, + 1429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1421.0, + 1407.0, + 1421.0, + 1407.0, + 1456.0, + 293.0, + 1456.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1452.0, + 1407.0, + 1452.0, + 1407.0, + 1489.0, + 295.0, + 1489.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1482.0, + 511.0, + 1482.0, + 511.0, + 1516.0, + 295.0, + 1516.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1528.0, + 1405.0, + 1528.0, + 1405.0, + 1565.0, + 293.0, + 1565.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1562.0, + 1405.0, + 1562.0, + 1405.0, + 1594.0, + 292.0, + 1594.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1592.0, + 1405.0, + 1592.0, + 1405.0, + 1625.0, + 294.0, + 1625.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1622.0, + 1406.0, + 1622.0, + 1406.0, + 1655.0, + 294.0, + 1655.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1652.0, + 1407.0, + 1652.0, + 1407.0, + 1685.0, + 293.0, + 1685.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1679.0, + 1409.0, + 1679.0, + 1409.0, + 1717.0, + 292.0, + 1717.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1713.0, + 1408.0, + 1713.0, + 1408.0, + 1746.0, + 294.0, + 1746.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1743.0, + 1087.0, + 1743.0, + 1087.0, + 1776.0, + 294.0, + 1776.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1787.0, + 1406.0, + 1787.0, + 1406.0, + 1827.0, + 292.0, + 1827.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1820.0, + 1405.0, + 1820.0, + 1405.0, + 1853.0, + 294.0, + 1853.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1852.0, + 1404.0, + 1852.0, + 1404.0, + 1886.0, + 296.0, + 1886.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1882.0, + 1405.0, + 1882.0, + 1405.0, + 1916.0, + 293.0, + 1916.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1912.0, + 1406.0, + 1912.0, + 1406.0, + 1946.0, + 293.0, + 1946.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1940.0, + 1408.0, + 1940.0, + 1408.0, + 1978.0, + 291.0, + 1978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1971.0, + 1406.0, + 1971.0, + 1406.0, + 2008.0, + 292.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 2002.0, + 1408.0, + 2002.0, + 1408.0, + 2038.0, + 293.0, + 2038.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 315.0, + 432.0, + 560.0, + 432.0, + 560.0, + 465.0, + 315.0, + 465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 311.0, + 462.0, + 682.0, + 462.0, + 682.0, + 495.0, + 311.0, + 495.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 0, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 229, + 1405, + 229, + 1405, + 659, + 298, + 659 + ], + "score": 0.985 + }, + { + "category_id": 1, + "poly": [ + 298, + 1008, + 1404, + 1008, + 1404, + 1436, + 298, + 1436 + ], + "score": 0.984 + }, + { + "category_id": 1, + "poly": [ + 298, + 1451, + 1404, + 1451, + 1404, + 1697, + 298, + 1697 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 1711, + 1403, + 1711, + 1403, + 1897, + 298, + 1897 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 299, + 815, + 1404, + 815, + 1404, + 909, + 299, + 909 + ], + "score": 0.975 + }, + { + "category_id": 1, + "poly": [ + 299, + 1910, + 1403, + 1910, + 1403, + 2034, + 299, + 2034 + ], + "score": 0.972 + }, + { + "category_id": 1, + "poly": [ + 298, + 672, + 1403, + 672, + 1403, + 766, + 298, + 766 + ], + "score": 0.97 + }, + { + "category_id": 8, + "poly": [ + 606, + 939, + 1095, + 939, + 1095, + 982, + 606, + 982 + ], + "score": 0.943 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 104, + 298, + 104 + ], + "score": 0.894 + }, + { + "category_id": 9, + "poly": [ + 1366, + 946, + 1399, + 946, + 1399, + 976, + 1366, + 976 + ], + "score": 0.89 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 858, + 2088, + 858, + 2112, + 841, + 2112 + ], + "score": 0.712 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 859, + 2088, + 859, + 2112, + 841, + 2112 + ], + "score": 0.148 + }, + { + "category_id": 13, + "poly": [ + 1029, + 1133, + 1196, + 1133, + 1196, + 1163, + 1029, + 1163 + ], + "score": 0.92, + "latex": "A _ { 1 } , \\ldots , A _ { n } , B" + }, + { + "category_id": 13, + "poly": [ + 514, + 1744, + 586, + 1744, + 586, + 1771, + 514, + 1771 + ], + "score": 0.9, + "latex": "n = 0" + }, + { + "category_id": 14, + "poly": [ + 600, + 937, + 1096, + 937, + 1096, + 982, + 600, + 982 + ], + "score": 0.89, + "latex": "\\begin{array} { r } { \\mathrm { F i n d } z \\in \\mathbb { R } ^ { d } \\mathrm { ~ s . t . ~ } 0 \\in \\sum _ { i = 1 } ^ { n } A _ { i } ( z ) + B ( z ) , } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 840, + 1010, + 873, + 1010, + 873, + 1040, + 840, + 1040 + ], + "score": 0.89, + "latex": "A _ { i }" + }, + { + "category_id": 13, + "poly": [ + 1116, + 1376, + 1148, + 1376, + 1148, + 1405, + 1116, + 1405 + ], + "score": 0.88, + "latex": "A _ { i }" + }, + { + "category_id": 13, + "poly": [ + 1330, + 1943, + 1402, + 1943, + 1402, + 1971, + 1330, + 1971 + ], + "score": 0.88, + "latex": "n = 2" + }, + { + "category_id": 13, + "poly": [ + 373, + 1011, + 398, + 1011, + 398, + 1037, + 373, + 1037 + ], + "score": 0.81, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 972, + 1224, + 997, + 1224, + 997, + 1250, + 972, + 1250 + ], + "score": 0.81, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 1091, + 1254, + 1116, + 1254, + 1116, + 1280, + 1091, + 1280 + ], + "score": 0.81, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 796, + 1744, + 821, + 1744, + 821, + 1770, + 796, + 1770 + ], + "score": 0.8, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 595, + 1011, + 616, + 1011, + 616, + 1037, + 595, + 1037 + ], + "score": 0.77, + "latex": "L" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2120.0, + 839.0, + 2120.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2120.0, + 838.0, + 2120.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 230.0, + 1406.0, + 230.0, + 1406.0, + 266.0, + 295.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 262.0, + 1408.0, + 262.0, + 1408.0, + 297.0, + 295.0, + 297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 291.0, + 1410.0, + 291.0, + 1410.0, + 327.0, + 295.0, + 327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 318.0, + 1407.0, + 318.0, + 1407.0, + 359.0, + 292.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 352.0, + 1405.0, + 352.0, + 1405.0, + 388.0, + 293.0, + 388.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 379.0, + 1406.0, + 379.0, + 1406.0, + 421.0, + 292.0, + 421.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 413.0, + 1405.0, + 413.0, + 1405.0, + 448.0, + 292.0, + 448.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 444.0, + 1407.0, + 444.0, + 1407.0, + 480.0, + 295.0, + 480.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 475.0, + 1405.0, + 475.0, + 1405.0, + 510.0, + 295.0, + 510.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 505.0, + 1405.0, + 505.0, + 1405.0, + 540.0, + 293.0, + 540.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 534.0, + 1405.0, + 534.0, + 1405.0, + 570.0, + 295.0, + 570.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 565.0, + 1406.0, + 565.0, + 1406.0, + 601.0, + 293.0, + 601.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 596.0, + 1406.0, + 596.0, + 1406.0, + 631.0, + 295.0, + 631.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 626.0, + 698.0, + 626.0, + 698.0, + 662.0, + 295.0, + 662.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1009.0, + 372.0, + 1009.0, + 372.0, + 1044.0, + 294.0, + 1044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 399.0, + 1009.0, + 594.0, + 1009.0, + 594.0, + 1044.0, + 399.0, + 1044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 617.0, + 1009.0, + 839.0, + 1009.0, + 839.0, + 1044.0, + 617.0, + 1044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 874.0, + 1009.0, + 1408.0, + 1009.0, + 1408.0, + 1044.0, + 874.0, + 1044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1039.0, + 1405.0, + 1039.0, + 1405.0, + 1075.0, + 296.0, + 1075.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1070.0, + 1405.0, + 1070.0, + 1405.0, + 1105.0, + 293.0, + 1105.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1100.0, + 1405.0, + 1100.0, + 1405.0, + 1136.0, + 294.0, + 1136.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1132.0, + 1028.0, + 1132.0, + 1028.0, + 1167.0, + 294.0, + 1167.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1197.0, + 1132.0, + 1406.0, + 1132.0, + 1406.0, + 1167.0, + 1197.0, + 1167.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1164.0, + 1406.0, + 1164.0, + 1406.0, + 1195.0, + 294.0, + 1195.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1193.0, + 1406.0, + 1193.0, + 1406.0, + 1228.0, + 293.0, + 1228.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1221.0, + 971.0, + 1221.0, + 971.0, + 1258.0, + 292.0, + 1258.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 998.0, + 1221.0, + 1407.0, + 1221.0, + 1407.0, + 1258.0, + 998.0, + 1258.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1252.0, + 1090.0, + 1252.0, + 1090.0, + 1288.0, + 293.0, + 1288.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1117.0, + 1252.0, + 1406.0, + 1252.0, + 1406.0, + 1288.0, + 1117.0, + 1288.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1284.0, + 1406.0, + 1284.0, + 1406.0, + 1316.0, + 292.0, + 1316.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1311.0, + 1406.0, + 1311.0, + 1406.0, + 1349.0, + 292.0, + 1349.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1344.0, + 1404.0, + 1344.0, + 1404.0, + 1376.0, + 296.0, + 1376.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1376.0, + 1115.0, + 1376.0, + 1115.0, + 1407.0, + 296.0, + 1407.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1149.0, + 1376.0, + 1405.0, + 1376.0, + 1405.0, + 1407.0, + 1149.0, + 1407.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1407.0, + 659.0, + 1407.0, + 659.0, + 1436.0, + 294.0, + 1436.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1451.0, + 1408.0, + 1451.0, + 1408.0, + 1489.0, + 293.0, + 1489.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1483.0, + 1405.0, + 1483.0, + 1405.0, + 1516.0, + 294.0, + 1516.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1513.0, + 1404.0, + 1513.0, + 1404.0, + 1549.0, + 293.0, + 1549.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1545.0, + 1405.0, + 1545.0, + 1405.0, + 1576.0, + 293.0, + 1576.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1574.0, + 1406.0, + 1574.0, + 1406.0, + 1608.0, + 294.0, + 1608.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1604.0, + 1406.0, + 1604.0, + 1406.0, + 1641.0, + 293.0, + 1641.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1634.0, + 1405.0, + 1634.0, + 1405.0, + 1670.0, + 293.0, + 1670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1667.0, + 663.0, + 1667.0, + 663.0, + 1700.0, + 294.0, + 1700.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1713.0, + 1404.0, + 1713.0, + 1404.0, + 1745.0, + 296.0, + 1745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1740.0, + 513.0, + 1740.0, + 513.0, + 1778.0, + 292.0, + 1778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 587.0, + 1740.0, + 795.0, + 1740.0, + 795.0, + 1778.0, + 587.0, + 1778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 822.0, + 1740.0, + 1405.0, + 1740.0, + 1405.0, + 1778.0, + 822.0, + 1778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1775.0, + 1404.0, + 1775.0, + 1404.0, + 1807.0, + 294.0, + 1807.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1803.0, + 1404.0, + 1803.0, + 1404.0, + 1840.0, + 293.0, + 1840.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1832.0, + 1405.0, + 1832.0, + 1405.0, + 1872.0, + 293.0, + 1872.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1865.0, + 1164.0, + 1865.0, + 1164.0, + 1900.0, + 292.0, + 1900.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 815.0, + 1406.0, + 815.0, + 1406.0, + 852.0, + 295.0, + 852.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 847.0, + 1404.0, + 847.0, + 1404.0, + 881.0, + 295.0, + 881.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 878.0, + 900.0, + 878.0, + 900.0, + 912.0, + 297.0, + 912.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1910.0, + 1407.0, + 1910.0, + 1407.0, + 1945.0, + 294.0, + 1945.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1943.0, + 1329.0, + 1943.0, + 1329.0, + 1976.0, + 295.0, + 1976.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1403.0, + 1943.0, + 1407.0, + 1943.0, + 1407.0, + 1976.0, + 1403.0, + 1976.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1971.0, + 1404.0, + 1971.0, + 1404.0, + 2009.0, + 292.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 2004.0, + 405.0, + 2004.0, + 405.0, + 2034.0, + 294.0, + 2034.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 670.0, + 1405.0, + 670.0, + 1405.0, + 709.0, + 293.0, + 709.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 706.0, + 1404.0, + 706.0, + 1404.0, + 736.0, + 297.0, + 736.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 732.0, + 933.0, + 732.0, + 933.0, + 769.0, + 294.0, + 769.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 1, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 1292, + 1405, + 1292, + 1405, + 1509, + 297, + 1509 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 297, + 849, + 1405, + 849, + 1405, + 1097, + 297, + 1097 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 298, + 228, + 1405, + 228, + 1405, + 415, + 298, + 415 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 298, + 522, + 1405, + 522, + 1405, + 679, + 298, + 679 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 297, + 1538, + 1405, + 1538, + 1405, + 1663, + 297, + 1663 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 297, + 1693, + 1404, + 1693, + 1404, + 1787, + 297, + 1787 + ], + "score": 0.975 + }, + { + "category_id": 1, + "poly": [ + 297, + 707, + 1404, + 707, + 1404, + 804, + 297, + 804 + ], + "score": 0.975 + }, + { + "category_id": 1, + "poly": [ + 299, + 1856, + 1404, + 1856, + 1404, + 1951, + 299, + 1951 + ], + "score": 0.973 + }, + { + "category_id": 1, + "poly": [ + 299, + 1108, + 1401, + 1108, + 1401, + 1200, + 299, + 1200 + ], + "score": 0.961 + }, + { + "category_id": 8, + "poly": [ + 577, + 1204, + 1119, + 1204, + 1119, + 1288, + 577, + 1288 + ], + "score": 0.954 + }, + { + "category_id": 8, + "poly": [ + 578, + 1956, + 1118, + 1956, + 1118, + 2031, + 578, + 2031 + ], + "score": 0.953 + }, + { + "category_id": 8, + "poly": [ + 533, + 1792, + 1161, + 1792, + 1161, + 1848, + 533, + 1848 + ], + "score": 0.948 + }, + { + "category_id": 8, + "poly": [ + 558, + 809, + 1139, + 809, + 1139, + 846, + 558, + 846 + ], + "score": 0.933 + }, + { + "category_id": 0, + "poly": [ + 298, + 457, + 971, + 457, + 971, + 494, + 298, + 494 + ], + "score": 0.93 + }, + { + "category_id": 9, + "poly": [ + 1366, + 1795, + 1399, + 1795, + 1399, + 1824, + 1366, + 1824 + ], + "score": 0.884 + }, + { + "category_id": 9, + "poly": [ + 1366, + 1978, + 1400, + 1978, + 1400, + 2007, + 1366, + 2007 + ], + "score": 0.88 + }, + { + "category_id": 9, + "poly": [ + 1366, + 1228, + 1400, + 1228, + 1400, + 1258, + 1366, + 1258 + ], + "score": 0.876 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 105, + 298, + 105 + ], + "score": 0.857 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 859, + 2088, + 859, + 2112, + 841, + 2112 + ], + "score": 0.656 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 859, + 2088, + 859, + 2112, + 841, + 2112 + ], + "score": 0.556 + }, + { + "category_id": 14, + "poly": [ + 579, + 1953, + 1120, + 1953, + 1120, + 2031, + 579, + 2031 + ], + "score": 0.93, + "latex": "0 \\in \\left[ \\begin{array} { l } { \\nabla _ { x } F ( x ^ { * } , y ^ { * } ) } \\\\ { \\nabla _ { y } G ( x ^ { * } , y ^ { * } ) } \\end{array} \\right] + \\big ( N _ { \\Theta } ( x ^ { * } ) \\times N _ { \\Omega } ( y ^ { * } ) \\big ) ." + }, + { + "category_id": 14, + "poly": [ + 577, + 1198, + 1122, + 1198, + 1122, + 1290, + 577, + 1290 + ], + "score": 0.93, + "latex": "x ^ { * } \\in \\underset { x \\in \\mathbb { R } ^ { d } } { \\arg \\operatorname* { m i n } } \\sum _ { i = 1 } ^ { n } f _ { i } ( x ) \\iff 0 \\in \\sum _ { i = 1 } ^ { n } \\partial f _ { i } ( x ^ { * } ) ." + }, + { + "category_id": 13, + "poly": [ + 942, + 1855, + 1046, + 1855, + 1046, + 1889, + 942, + 1889 + ], + "score": 0.93, + "latex": "\\Omega \\subseteq \\mathbb { R } ^ { d _ { y } }" + }, + { + "category_id": 13, + "poly": [ + 941, + 1031, + 1061, + 1031, + 1061, + 1065, + 941, + 1065 + ], + "score": 0.93, + "latex": "0 \\in \\partial f ( x )" + }, + { + "category_id": 13, + "poly": [ + 469, + 1141, + 587, + 1141, + 587, + 1172, + 469, + 1172 + ], + "score": 0.93, + "latex": "f _ { 1 } , \\ldots , f _ { n }" + }, + { + "category_id": 13, + "poly": [ + 656, + 1002, + 768, + 1002, + 768, + 1035, + 656, + 1035 + ], + "score": 0.93, + "latex": "0 \\in T ( x )" + }, + { + "category_id": 13, + "poly": [ + 493, + 970, + 629, + 970, + 629, + 1004, + 493, + 1004 + ], + "score": 0.93, + "latex": "0 \\in \\partial f ( x ^ { * } )" + }, + { + "category_id": 13, + "poly": [ + 422, + 1384, + 541, + 1384, + 541, + 1417, + 422, + 1417 + ], + "score": 0.92, + "latex": "\\iota _ { C } ( x ) = 0" + }, + { + "category_id": 13, + "poly": [ + 648, + 740, + 1149, + 740, + 1149, + 775, + 648, + 775 + ], + "score": 0.92, + "latex": "\\partial f ( x ) \\ { \\overset { \\cdot } { = } } \\ \\left\\{ g : f ( y ) \\geq f ( x ) + g ^ { \\top } { \\big ( } { \\bar { y - x } } { \\big ) } \\right\\}" + }, + { + "category_id": 13, + "poly": [ + 298, + 1031, + 512, + 1031, + 512, + 1065, + 298, + 1065 + ], + "score": 0.92, + "latex": "\\bar { \\partial } f ( x ) = \\{ \\nabla f ( x ) \\}" + }, + { + "category_id": 13, + "poly": [ + 783, + 1855, + 888, + 1855, + 888, + 1889, + 783, + 1889 + ], + "score": 0.92, + "latex": "\\Theta \\subseteq \\mathbb { R } ^ { d _ { x } }" + }, + { + "category_id": 13, + "poly": [ + 710, + 1383, + 864, + 1383, + 864, + 1416, + 710, + 1416 + ], + "score": 0.92, + "latex": "\\iota _ { C } \\bar { ( } x ) = \\dot { + } \\infty" + }, + { + "category_id": 13, + "poly": [ + 531, + 1063, + 660, + 1063, + 660, + 1096, + 531, + 1096 + ], + "score": 0.92, + "latex": "0 = \\nabla f ( x )" + }, + { + "category_id": 13, + "poly": [ + 907, + 1384, + 981, + 1384, + 981, + 1416, + 907, + 1416 + ], + "score": 0.91, + "latex": "x \\not \\in C" + }, + { + "category_id": 13, + "poly": [ + 1154, + 879, + 1214, + 879, + 1214, + 912, + 1154, + 912 + ], + "score": 0.91, + "latex": "T ( x )" + }, + { + "category_id": 13, + "poly": [ + 542, + 708, + 788, + 708, + 788, + 742, + 542, + 742 + ], + "score": 0.91, + "latex": "f : \\mathbb { R } ^ { d } \\mathbb { R } \\cup \\{ \\infty \\}" + }, + { + "category_id": 13, + "poly": [ + 547, + 1322, + 1027, + 1322, + 1027, + 1358, + 547, + 1358 + ], + "score": 0.91, + "latex": "\\textstyle \\sum _ { i = 1 } ^ { n } S _ { i } = \\{ \\sum _ { i = 1 } ^ { n } s _ { i } \\ | ^ { \\cdot } s _ { i } \\in S _ { i } \\forall i \\in { 1 . . n } \\}" + }, + { + "category_id": 13, + "poly": [ + 584, + 1385, + 658, + 1385, + 658, + 1412, + 584, + 1412 + ], + "score": 0.9, + "latex": "x \\in C" + }, + { + "category_id": 13, + "poly": [ + 1304, + 878, + 1400, + 878, + 1400, + 908, + 1304, + 908 + ], + "score": 0.9, + "latex": "\\boldsymbol { x } ^ { \\mathrm { ~ \\scriptsize ~ \\in ~ } \\mathbb { R } ^ { d } }" + }, + { + "category_id": 14, + "poly": [ + 535, + 1790, + 1161, + 1790, + 1161, + 1849, + 535, + 1849 + ], + "score": 0.9, + "latex": "x ^ { * } \\in \\arg \\operatorname* { m i n } _ { x \\in \\Theta } F ( x , y ^ { * } ) \\quad { \\mathrm { a n d } } \\quad y ^ { * } \\in \\arg \\operatorname* { m i n } _ { y \\in \\Omega } G ( x ^ { * } , y ) ." + }, + { + "category_id": 13, + "poly": [ + 594, + 1445, + 637, + 1445, + 637, + 1475, + 594, + 1475 + ], + "score": 0.89, + "latex": "N _ { C }" + }, + { + "category_id": 13, + "poly": [ + 299, + 1357, + 371, + 1357, + 371, + 1382, + 299, + 1382 + ], + "score": 0.88, + "latex": "x \\in C" + }, + { + "category_id": 13, + "poly": [ + 1274, + 741, + 1310, + 741, + 1310, + 773, + 1274, + 773 + ], + "score": 0.88, + "latex": "\\partial f" + }, + { + "category_id": 13, + "poly": [ + 470, + 741, + 506, + 741, + 506, + 773, + 470, + 773 + ], + "score": 0.88, + "latex": "\\partial f" + }, + { + "category_id": 13, + "poly": [ + 1036, + 1355, + 1062, + 1355, + 1062, + 1385, + 1036, + 1385 + ], + "score": 0.88, + "latex": "f _ { i }" + }, + { + "category_id": 14, + "poly": [ + 559, + 806, + 1135, + 806, + 1135, + 845, + 559, + 845 + ], + "score": 0.87, + "latex": "u \\in \\partial f ( x ) , v \\in \\partial f ( y ) \\implies ( u - v ) ^ { \\top } ( x - y ) \\geq 0 ," + }, + { + "category_id": 13, + "poly": [ + 1198, + 1003, + 1218, + 1003, + 1218, + 1034, + 1198, + 1034 + ], + "score": 0.87, + "latex": "f" + }, + { + "category_id": 13, + "poly": [ + 1355, + 942, + 1374, + 942, + 1374, + 972, + 1355, + 972 + ], + "score": 0.86, + "latex": "f" + }, + { + "category_id": 13, + "poly": [ + 345, + 972, + 377, + 972, + 377, + 999, + 345, + 999 + ], + "score": 0.86, + "latex": "x ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 1202, + 1418, + 1233, + 1418, + 1233, + 1445, + 1202, + 1445 + ], + "score": 0.85, + "latex": "\\iota _ { C }" + }, + { + "category_id": 13, + "poly": [ + 298, + 1891, + 322, + 1891, + 322, + 1916, + 298, + 1916 + ], + "score": 0.84, + "latex": "F" + }, + { + "category_id": 13, + "poly": [ + 374, + 1890, + 399, + 1890, + 399, + 1917, + 374, + 1917 + ], + "score": 0.83, + "latex": "G" + }, + { + "category_id": 13, + "poly": [ + 859, + 1003, + 883, + 1003, + 883, + 1029, + 859, + 1029 + ], + "score": 0.83, + "latex": "T" + }, + { + "category_id": 13, + "poly": [ + 597, + 1358, + 621, + 1358, + 621, + 1381, + 597, + 1381 + ], + "score": 0.83, + "latex": "C" + }, + { + "category_id": 13, + "poly": [ + 672, + 1445, + 697, + 1445, + 697, + 1473, + 672, + 1473 + ], + "score": 0.82, + "latex": "C" + }, + { + "category_id": 13, + "poly": [ + 1228, + 1325, + 1256, + 1325, + 1256, + 1351, + 1228, + 1351 + ], + "score": 0.81, + "latex": "X" + }, + { + "category_id": 13, + "poly": [ + 297, + 881, + 321, + 881, + 321, + 907, + 297, + 907 + ], + "score": 0.8, + "latex": "T" + }, + { + "category_id": 13, + "poly": [ + 1366, + 1359, + 1396, + 1359, + 1396, + 1385, + 1366, + 1385 + ], + "score": 0.78, + "latex": "\\iota _ { C }" + }, + { + "category_id": 13, + "poly": [ + 520, + 1008, + 539, + 1008, + 539, + 1029, + 520, + 1029 + ], + "score": 0.77, + "latex": "x" + }, + { + "category_id": 13, + "poly": [ + 592, + 1038, + 610, + 1038, + 610, + 1059, + 592, + 1059 + ], + "score": 0.74, + "latex": "x" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 454.0, + 975.0, + 454.0, + 975.0, + 499.0, + 290.0, + 499.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 73.0, + 857.0, + 73.0, + 857.0, + 108.0, + 298.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2118.0, + 838.0, + 2118.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2118.0, + 838.0, + 2118.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1292.0, + 1406.0, + 1292.0, + 1406.0, + 1326.0, + 295.0, + 1326.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 283.0, + 1306.0, + 546.0, + 1306.0, + 546.0, + 1372.0, + 283.0, + 1372.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1028.0, + 1306.0, + 1227.0, + 1306.0, + 1227.0, + 1372.0, + 1028.0, + 1372.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1257.0, + 1306.0, + 1416.0, + 1306.0, + 1416.0, + 1372.0, + 1257.0, + 1372.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1348.0, + 298.0, + 1348.0, + 298.0, + 1392.0, + 291.0, + 1392.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 372.0, + 1348.0, + 596.0, + 1348.0, + 596.0, + 1392.0, + 372.0, + 1392.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 622.0, + 1348.0, + 1035.0, + 1348.0, + 1035.0, + 1392.0, + 622.0, + 1392.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1063.0, + 1348.0, + 1365.0, + 1348.0, + 1365.0, + 1392.0, + 1063.0, + 1392.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1397.0, + 1348.0, + 1410.0, + 1348.0, + 1410.0, + 1392.0, + 1397.0, + 1392.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1383.0, + 421.0, + 1383.0, + 421.0, + 1418.0, + 295.0, + 1418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 542.0, + 1383.0, + 583.0, + 1383.0, + 583.0, + 1418.0, + 542.0, + 1418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 659.0, + 1383.0, + 709.0, + 1383.0, + 709.0, + 1418.0, + 659.0, + 1418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 865.0, + 1383.0, + 906.0, + 1383.0, + 906.0, + 1418.0, + 865.0, + 1418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 982.0, + 1383.0, + 1406.0, + 1383.0, + 1406.0, + 1418.0, + 982.0, + 1418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1414.0, + 1201.0, + 1414.0, + 1201.0, + 1448.0, + 292.0, + 1448.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1234.0, + 1414.0, + 1405.0, + 1414.0, + 1405.0, + 1448.0, + 1234.0, + 1448.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1444.0, + 593.0, + 1444.0, + 593.0, + 1479.0, + 294.0, + 1479.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 638.0, + 1444.0, + 671.0, + 1444.0, + 671.0, + 1479.0, + 638.0, + 1479.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 698.0, + 1444.0, + 1406.0, + 1444.0, + 1406.0, + 1479.0, + 698.0, + 1479.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1476.0, + 1021.0, + 1476.0, + 1021.0, + 1511.0, + 296.0, + 1511.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 847.0, + 1407.0, + 847.0, + 1407.0, + 886.0, + 292.0, + 886.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 875.0, + 296.0, + 875.0, + 296.0, + 915.0, + 291.0, + 915.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 875.0, + 1153.0, + 875.0, + 1153.0, + 915.0, + 322.0, + 915.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1215.0, + 875.0, + 1303.0, + 875.0, + 1303.0, + 915.0, + 1215.0, + 915.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1401.0, + 875.0, + 1407.0, + 875.0, + 1407.0, + 915.0, + 1401.0, + 915.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 907.0, + 1406.0, + 907.0, + 1406.0, + 946.0, + 292.0, + 946.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 940.0, + 1354.0, + 940.0, + 1354.0, + 974.0, + 295.0, + 974.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1375.0, + 940.0, + 1405.0, + 940.0, + 1405.0, + 974.0, + 1375.0, + 974.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 968.0, + 344.0, + 968.0, + 344.0, + 1006.0, + 294.0, + 1006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 378.0, + 968.0, + 492.0, + 968.0, + 492.0, + 1006.0, + 378.0, + 1006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 630.0, + 968.0, + 1405.0, + 968.0, + 1405.0, + 1006.0, + 630.0, + 1006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1002.0, + 519.0, + 1002.0, + 519.0, + 1036.0, + 295.0, + 1036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 540.0, + 1002.0, + 655.0, + 1002.0, + 655.0, + 1036.0, + 540.0, + 1036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 769.0, + 1002.0, + 858.0, + 1002.0, + 858.0, + 1036.0, + 769.0, + 1036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 884.0, + 1002.0, + 1197.0, + 1002.0, + 1197.0, + 1036.0, + 884.0, + 1036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1219.0, + 1002.0, + 1405.0, + 1002.0, + 1405.0, + 1036.0, + 1219.0, + 1036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1029.0, + 297.0, + 1029.0, + 297.0, + 1070.0, + 294.0, + 1070.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 513.0, + 1029.0, + 591.0, + 1029.0, + 591.0, + 1070.0, + 513.0, + 1070.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 611.0, + 1029.0, + 940.0, + 1029.0, + 940.0, + 1070.0, + 611.0, + 1070.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1062.0, + 1029.0, + 1406.0, + 1029.0, + 1406.0, + 1070.0, + 1062.0, + 1070.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1062.0, + 530.0, + 1062.0, + 530.0, + 1100.0, + 294.0, + 1100.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 661.0, + 1062.0, + 671.0, + 1062.0, + 671.0, + 1100.0, + 661.0, + 1100.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 228.0, + 1405.0, + 228.0, + 1405.0, + 265.0, + 293.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 261.0, + 1405.0, + 261.0, + 1405.0, + 297.0, + 293.0, + 297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 291.0, + 1405.0, + 291.0, + 1405.0, + 325.0, + 292.0, + 325.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 322.0, + 1407.0, + 322.0, + 1407.0, + 355.0, + 293.0, + 355.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 352.0, + 1405.0, + 352.0, + 1405.0, + 388.0, + 295.0, + 388.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 382.0, + 587.0, + 382.0, + 587.0, + 416.0, + 293.0, + 416.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 526.0, + 1407.0, + 526.0, + 1407.0, + 559.0, + 297.0, + 559.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 555.0, + 1403.0, + 555.0, + 1403.0, + 588.0, + 296.0, + 588.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 584.0, + 1406.0, + 584.0, + 1406.0, + 619.0, + 295.0, + 619.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 614.0, + 1406.0, + 614.0, + 1406.0, + 653.0, + 293.0, + 653.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 647.0, + 936.0, + 647.0, + 936.0, + 681.0, + 296.0, + 681.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1538.0, + 1406.0, + 1538.0, + 1406.0, + 1575.0, + 294.0, + 1575.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1569.0, + 1406.0, + 1569.0, + 1406.0, + 1605.0, + 292.0, + 1605.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1600.0, + 1408.0, + 1600.0, + 1408.0, + 1633.0, + 295.0, + 1633.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1630.0, + 903.0, + 1630.0, + 903.0, + 1665.0, + 295.0, + 1665.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1693.0, + 1405.0, + 1693.0, + 1405.0, + 1730.0, + 295.0, + 1730.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1724.0, + 1405.0, + 1724.0, + 1405.0, + 1758.0, + 295.0, + 1758.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1752.0, + 1239.0, + 1752.0, + 1239.0, + 1795.0, + 292.0, + 1795.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 706.0, + 541.0, + 706.0, + 541.0, + 744.0, + 295.0, + 744.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 789.0, + 706.0, + 1405.0, + 706.0, + 1405.0, + 744.0, + 789.0, + 744.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 737.0, + 469.0, + 737.0, + 469.0, + 779.0, + 292.0, + 779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 507.0, + 737.0, + 647.0, + 737.0, + 647.0, + 779.0, + 507.0, + 779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1150.0, + 737.0, + 1273.0, + 737.0, + 1273.0, + 779.0, + 1150.0, + 779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1311.0, + 737.0, + 1405.0, + 737.0, + 1405.0, + 779.0, + 1311.0, + 779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 773.0, + 399.0, + 773.0, + 399.0, + 810.0, + 292.0, + 810.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1855.0, + 782.0, + 1855.0, + 782.0, + 1893.0, + 294.0, + 1893.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 889.0, + 1855.0, + 941.0, + 1855.0, + 941.0, + 1893.0, + 889.0, + 1893.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1047.0, + 1855.0, + 1406.0, + 1855.0, + 1406.0, + 1893.0, + 1047.0, + 1893.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1886.0, + 297.0, + 1886.0, + 297.0, + 1923.0, + 294.0, + 1923.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1886.0, + 373.0, + 1886.0, + 373.0, + 1923.0, + 323.0, + 1923.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 400.0, + 1886.0, + 1404.0, + 1886.0, + 1404.0, + 1923.0, + 400.0, + 1923.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1918.0, + 538.0, + 1918.0, + 538.0, + 1953.0, + 293.0, + 1953.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1108.0, + 1406.0, + 1108.0, + 1406.0, + 1142.0, + 297.0, + 1142.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1136.0, + 468.0, + 1136.0, + 468.0, + 1176.0, + 293.0, + 1176.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 588.0, + 1136.0, + 1406.0, + 1136.0, + 1406.0, + 1176.0, + 588.0, + 1176.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1168.0, + 542.0, + 1168.0, + 542.0, + 1202.0, + 296.0, + 1202.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 2, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 678, + 1404, + 678, + 1404, + 925, + 298, + 925 + ], + "score": 0.985 + }, + { + "category_id": 1, + "poly": [ + 297, + 1297, + 1405, + 1297, + 1405, + 1517, + 297, + 1517 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 297, + 228, + 1405, + 228, + 1405, + 507, + 297, + 507 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 297, + 939, + 1404, + 939, + 1404, + 1125, + 297, + 1125 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 298, + 1607, + 1406, + 1607, + 1406, + 1731, + 298, + 1731 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 297, + 1138, + 1405, + 1138, + 1405, + 1263, + 297, + 1263 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 299, + 520, + 1405, + 520, + 1405, + 645, + 299, + 645 + ], + "score": 0.975 + }, + { + "category_id": 1, + "poly": [ + 297, + 1766, + 1404, + 1766, + 1404, + 1953, + 297, + 1953 + ], + "score": 0.974 + }, + { + "category_id": 2, + "poly": [ + 294, + 1977, + 1402, + 1977, + 1402, + 2034, + 294, + 2034 + ], + "score": 0.939 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 104, + 298, + 104 + ], + "score": 0.892 + }, + { + "category_id": 8, + "poly": [ + 570, + 1530, + 1125, + 1530, + 1125, + 1590, + 570, + 1590 + ], + "score": 0.868 + }, + { + "category_id": 2, + "poly": [ + 840, + 2089, + 858, + 2089, + 858, + 2111, + 840, + 2111 + ], + "score": 0.79 + }, + { + "category_id": 8, + "poly": [ + 569, + 1530, + 1126, + 1530, + 1126, + 1590, + 569, + 1590 + ], + "score": 0.098 + }, + { + "category_id": 13, + "poly": [ + 297, + 612, + 631, + 612, + 631, + 646, + 297, + 646 + ], + "score": 0.92, + "latex": "A _ { 2 } : ( x , y ) \\mapsto \\bar { \\partial r } ( x ) \\times \\partial d ( y )" + }, + { + "category_id": 13, + "poly": [ + 942, + 550, + 1199, + 550, + 1199, + 585, + 942, + 585 + ], + "score": 0.92, + "latex": "r : \\bar { \\mathbb { R } } ^ { d _ { x } } \\bar { \\mathbb { R } } \\cup \\{ + \\infty \\}" + }, + { + "category_id": 13, + "poly": [ + 1253, + 230, + 1404, + 230, + 1404, + 264, + 1253, + 264 + ], + "score": 0.92, + "latex": "B : ( x , y ) \\mapsto" + }, + { + "category_id": 13, + "poly": [ + 374, + 323, + 450, + 323, + 450, + 354, + 374, + 354 + ], + "score": 0.92, + "latex": "N _ { \\Theta \\times \\Omega }" + }, + { + "category_id": 13, + "poly": [ + 522, + 1640, + 601, + 1640, + 601, + 1671, + 522, + 1671 + ], + "score": 0.91, + "latex": "f = \\iota _ { \\mathcal { C } }" + }, + { + "category_id": 13, + "poly": [ + 1162, + 1798, + 1329, + 1798, + 1329, + 1829, + 1162, + 1829 + ], + "score": 0.91, + "latex": "A _ { 1 } , \\ldots , A _ { n } , B" + }, + { + "category_id": 13, + "poly": [ + 332, + 1358, + 529, + 1358, + 529, + 1391, + 332, + 1391 + ], + "score": 0.91, + "latex": "J _ { A } \\overset { \\cdot } { = } ( I + A ) ^ { - 1 }" + }, + { + "category_id": 13, + "poly": [ + 807, + 259, + 895, + 259, + 895, + 289, + 807, + 289 + ], + "score": 0.91, + "latex": "\\mathbb { R } ^ { d _ { x } + d _ { y } }" + }, + { + "category_id": 13, + "poly": [ + 668, + 1641, + 941, + 1641, + 941, + 1675, + 668, + 1675 + ], + "score": 0.91, + "latex": "J _ { \\rho N _ { C } } = \\mathrm { p r o x } _ { \\rho f } = \\mathrm { p r o j } _ { \\mathcal { C } }" + }, + { + "category_id": 14, + "poly": [ + 569, + 1529, + 1128, + 1529, + 1128, + 1591, + 569, + 1591 + ], + "score": 0.9, + "latex": "\\operatorname { p r o x } _ { \\rho f } ( t ) \\doteq \\underset { x \\in \\mathbb { R } ^ { d } } { \\arg \\operatorname* { m i n } } \\left\\{ \\rho f ( x ) + ( 1 / 2 ) \\| x - t \\| ^ { 2 } \\right\\} ." + }, + { + "category_id": 13, + "poly": [ + 1032, + 1640, + 1100, + 1640, + 1100, + 1671, + 1032, + 1671 + ], + "score": 0.9, + "latex": "\\rho > 0" + }, + { + "category_id": 13, + "poly": [ + 587, + 580, + 861, + 580, + 861, + 616, + 587, + 616 + ], + "score": 0.9, + "latex": "d : \\mathbb { R } ^ { d _ { y } } \\mathbb { R } \\cup \\{ + \\infty \\}" + }, + { + "category_id": 13, + "poly": [ + 323, + 231, + 427, + 231, + 427, + 259, + 323, + 259 + ], + "score": 0.9, + "latex": "G = - F" + }, + { + "category_id": 13, + "poly": [ + 1018, + 1982, + 1113, + 1982, + 1113, + 2007, + 1018, + 2007 + ], + "score": 0.9, + "latex": "G \\neq - F" + }, + { + "category_id": 13, + "poly": [ + 297, + 1390, + 598, + 1390, + 598, + 1423, + 297, + 1423 + ], + "score": 0.89, + "latex": "T ^ { - 1 } : x \\mapsto \\{ y : T y \\ni x \\}" + }, + { + "category_id": 13, + "poly": [ + 729, + 1889, + 763, + 1889, + 763, + 1919, + 729, + 1919 + ], + "score": 0.89, + "latex": "A _ { i }" + }, + { + "category_id": 13, + "poly": [ + 295, + 260, + 614, + 260, + 614, + 295, + 295, + 295 + ], + "score": 0.89, + "latex": "( \\nabla _ { x } F ( x , y ) , - \\nabla _ { y } F ( x , y ) ) ^ { \\top }" + }, + { + "category_id": 13, + "poly": [ + 561, + 1609, + 733, + 1609, + 733, + 1645, + 561, + 1645 + ], + "score": 0.89, + "latex": "\\mathrm { p r o x } _ { \\rho f } = \\underset { - } { J } _ { \\rho \\partial f }" + }, + { + "category_id": 13, + "poly": [ + 755, + 1421, + 791, + 1421, + 791, + 1452, + 755, + 1452 + ], + "score": 0.88, + "latex": "\\mathbb { R } ^ { d }" + }, + { + "category_id": 13, + "poly": [ + 1079, + 1391, + 1151, + 1391, + 1151, + 1422, + 1079, + 1422 + ], + "score": 0.87, + "latex": "\\rho > 0" + }, + { + "category_id": 13, + "poly": [ + 587, + 1003, + 607, + 1003, + 607, + 1034, + 587, + 1034 + ], + "score": 0.86, + "latex": "f" + }, + { + "category_id": 13, + "poly": [ + 397, + 1486, + 416, + 1486, + 416, + 1516, + 397, + 1516 + ], + "score": 0.85, + "latex": "f" + }, + { + "category_id": 13, + "poly": [ + 1164, + 1390, + 1212, + 1390, + 1212, + 1424, + 1164, + 1424 + ], + "score": 0.85, + "latex": "J _ { \\rho A }" + }, + { + "category_id": 13, + "poly": [ + 772, + 232, + 797, + 232, + 797, + 258, + 772, + 258 + ], + "score": 0.84, + "latex": "F" + }, + { + "category_id": 13, + "poly": [ + 1267, + 1361, + 1291, + 1361, + 1291, + 1387, + 1267, + 1387 + ], + "score": 0.83, + "latex": "T" + }, + { + "category_id": 13, + "poly": [ + 298, + 323, + 323, + 323, + 323, + 350, + 298, + 350 + ], + "score": 0.82, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 1250, + 1890, + 1275, + 1890, + 1275, + 1916, + 1250, + 1916 + ], + "score": 0.82, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 925, + 384, + 951, + 384, + 951, + 410, + 925, + 410 + ], + "score": 0.82, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 642, + 1391, + 666, + 1391, + 666, + 1417, + 642, + 1417 + ], + "score": 0.81, + "latex": "A" + }, + { + "category_id": 13, + "poly": [ + 1234, + 1330, + 1258, + 1330, + 1258, + 1356, + 1234, + 1356 + ], + "score": 0.81, + "latex": "A" + }, + { + "category_id": 13, + "poly": [ + 940, + 588, + 959, + 588, + 959, + 615, + 940, + 615 + ], + "score": 0.8, + "latex": "y" + }, + { + "category_id": 13, + "poly": [ + 517, + 323, + 543, + 323, + 543, + 350, + 517, + 350 + ], + "score": 0.79, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 619, + 1361, + 638, + 1361, + 638, + 1387, + 619, + 1387 + ], + "score": 0.78, + "latex": "I" + }, + { + "category_id": 13, + "poly": [ + 1376, + 263, + 1401, + 263, + 1401, + 290, + 1376, + 290 + ], + "score": 0.75, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 1273, + 557, + 1293, + 557, + 1293, + 579, + 1273, + 579 + ], + "score": 0.75, + "latex": "x" + }, + { + "category_id": 13, + "poly": [ + 983, + 237, + 1003, + 237, + 1003, + 258, + 983, + 258 + ], + "score": 0.71, + "latex": "x" + }, + { + "category_id": 13, + "poly": [ + 1171, + 236, + 1190, + 236, + 1190, + 262, + 1171, + 262 + ], + "score": 0.69, + "latex": "y" + }, + { + "category_id": 13, + "poly": [ + 374, + 741, + 410, + 741, + 410, + 769, + 374, + 769 + ], + "score": 0.26, + "latex": "\\mathrm { T u }" + }, + { + "category_id": 15, + "poly": [ + 331.0, + 1972.0, + 1017.0, + 1972.0, + 1017.0, + 2009.0, + 331.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1114.0, + 1972.0, + 1404.0, + 1972.0, + 1404.0, + 2009.0, + 1114.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 2004.0, + 796.0, + 2004.0, + 796.0, + 2035.0, + 296.0, + 2035.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 837.0, + 2086.0, + 862.0, + 2086.0, + 862.0, + 2118.0, + 837.0, + 2118.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 677.0, + 1405.0, + 677.0, + 1405.0, + 718.0, + 292.0, + 718.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 711.0, + 1405.0, + 711.0, + 1405.0, + 745.0, + 294.0, + 745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 741.0, + 373.0, + 741.0, + 373.0, + 775.0, + 294.0, + 775.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 411.0, + 741.0, + 1404.0, + 741.0, + 1404.0, + 775.0, + 411.0, + 775.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 770.0, + 1405.0, + 770.0, + 1405.0, + 806.0, + 293.0, + 806.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 799.0, + 1406.0, + 799.0, + 1406.0, + 839.0, + 292.0, + 839.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 830.0, + 1408.0, + 830.0, + 1408.0, + 869.0, + 291.0, + 869.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 859.0, + 1406.0, + 859.0, + 1406.0, + 899.0, + 292.0, + 899.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 897.0, + 654.0, + 897.0, + 654.0, + 927.0, + 293.0, + 927.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1298.0, + 1406.0, + 1298.0, + 1406.0, + 1333.0, + 295.0, + 1333.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1329.0, + 1233.0, + 1329.0, + 1233.0, + 1361.0, + 296.0, + 1361.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1259.0, + 1329.0, + 1403.0, + 1329.0, + 1403.0, + 1361.0, + 1259.0, + 1361.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1356.0, + 331.0, + 1356.0, + 331.0, + 1396.0, + 292.0, + 1396.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 530.0, + 1356.0, + 618.0, + 1356.0, + 618.0, + 1396.0, + 530.0, + 1396.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 639.0, + 1356.0, + 1266.0, + 1356.0, + 1266.0, + 1396.0, + 639.0, + 1396.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1292.0, + 1356.0, + 1405.0, + 1356.0, + 1405.0, + 1396.0, + 1292.0, + 1396.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1384.0, + 296.0, + 1384.0, + 296.0, + 1428.0, + 292.0, + 1428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 599.0, + 1384.0, + 641.0, + 1384.0, + 641.0, + 1428.0, + 599.0, + 1428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 667.0, + 1384.0, + 1078.0, + 1384.0, + 1078.0, + 1428.0, + 667.0, + 1428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1152.0, + 1384.0, + 1163.0, + 1384.0, + 1163.0, + 1428.0, + 1152.0, + 1428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1213.0, + 1384.0, + 1408.0, + 1384.0, + 1408.0, + 1428.0, + 1213.0, + 1428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1417.0, + 754.0, + 1417.0, + 754.0, + 1463.0, + 291.0, + 1463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 792.0, + 1417.0, + 1410.0, + 1417.0, + 1410.0, + 1463.0, + 792.0, + 1463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1449.0, + 1406.0, + 1449.0, + 1406.0, + 1493.0, + 292.0, + 1493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1481.0, + 396.0, + 1481.0, + 396.0, + 1521.0, + 293.0, + 1521.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 417.0, + 1481.0, + 447.0, + 1481.0, + 447.0, + 1521.0, + 417.0, + 1521.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 227.0, + 322.0, + 227.0, + 322.0, + 266.0, + 291.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 428.0, + 227.0, + 771.0, + 227.0, + 771.0, + 266.0, + 428.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 798.0, + 227.0, + 982.0, + 227.0, + 982.0, + 266.0, + 798.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1004.0, + 227.0, + 1170.0, + 227.0, + 1170.0, + 266.0, + 1004.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1191.0, + 227.0, + 1252.0, + 227.0, + 1252.0, + 266.0, + 1191.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 252.0, + 294.0, + 252.0, + 294.0, + 300.0, + 290.0, + 300.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 615.0, + 252.0, + 806.0, + 252.0, + 806.0, + 300.0, + 615.0, + 300.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 896.0, + 252.0, + 1375.0, + 252.0, + 1375.0, + 300.0, + 896.0, + 300.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1402.0, + 252.0, + 1408.0, + 252.0, + 1408.0, + 300.0, + 1402.0, + 300.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 290.0, + 1406.0, + 290.0, + 1406.0, + 328.0, + 291.0, + 328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 320.0, + 373.0, + 320.0, + 373.0, + 357.0, + 324.0, + 357.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 451.0, + 320.0, + 516.0, + 320.0, + 516.0, + 357.0, + 451.0, + 357.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 544.0, + 320.0, + 1406.0, + 320.0, + 1406.0, + 357.0, + 544.0, + 357.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 352.0, + 1408.0, + 352.0, + 1408.0, + 387.0, + 292.0, + 387.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 379.0, + 924.0, + 379.0, + 924.0, + 419.0, + 292.0, + 419.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 952.0, + 379.0, + 1410.0, + 379.0, + 1410.0, + 419.0, + 952.0, + 419.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 411.0, + 1410.0, + 411.0, + 1410.0, + 449.0, + 294.0, + 449.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 443.0, + 1410.0, + 443.0, + 1410.0, + 479.0, + 294.0, + 479.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 473.0, + 721.0, + 473.0, + 721.0, + 511.0, + 292.0, + 511.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 938.0, + 1405.0, + 938.0, + 1405.0, + 975.0, + 294.0, + 975.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 968.0, + 1405.0, + 968.0, + 1405.0, + 1006.0, + 294.0, + 1006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1001.0, + 586.0, + 1001.0, + 586.0, + 1036.0, + 294.0, + 1036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 608.0, + 1001.0, + 1406.0, + 1001.0, + 1406.0, + 1036.0, + 608.0, + 1036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1031.0, + 1406.0, + 1031.0, + 1406.0, + 1066.0, + 294.0, + 1066.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1059.0, + 1405.0, + 1059.0, + 1405.0, + 1097.0, + 292.0, + 1097.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1089.0, + 543.0, + 1089.0, + 543.0, + 1129.0, + 294.0, + 1129.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1605.0, + 560.0, + 1605.0, + 560.0, + 1647.0, + 291.0, + 1647.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 734.0, + 1605.0, + 1406.0, + 1605.0, + 1406.0, + 1647.0, + 734.0, + 1647.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1636.0, + 521.0, + 1636.0, + 521.0, + 1679.0, + 291.0, + 1679.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 602.0, + 1636.0, + 667.0, + 1636.0, + 667.0, + 1679.0, + 602.0, + 1679.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 942.0, + 1636.0, + 1031.0, + 1636.0, + 1031.0, + 1679.0, + 942.0, + 1679.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1101.0, + 1636.0, + 1412.0, + 1636.0, + 1412.0, + 1679.0, + 1101.0, + 1679.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1671.0, + 1407.0, + 1671.0, + 1407.0, + 1704.0, + 296.0, + 1704.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1699.0, + 682.0, + 1699.0, + 682.0, + 1734.0, + 293.0, + 1734.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1138.0, + 1408.0, + 1138.0, + 1408.0, + 1175.0, + 295.0, + 1175.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1169.0, + 1406.0, + 1169.0, + 1406.0, + 1205.0, + 294.0, + 1205.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1202.0, + 1406.0, + 1202.0, + 1406.0, + 1234.0, + 294.0, + 1234.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1229.0, + 1076.0, + 1229.0, + 1076.0, + 1266.0, + 295.0, + 1266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 520.0, + 1407.0, + 520.0, + 1407.0, + 557.0, + 294.0, + 557.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 547.0, + 941.0, + 547.0, + 941.0, + 587.0, + 292.0, + 587.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1200.0, + 547.0, + 1272.0, + 547.0, + 1272.0, + 587.0, + 1200.0, + 587.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1294.0, + 547.0, + 1407.0, + 547.0, + 1407.0, + 587.0, + 1294.0, + 587.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 579.0, + 586.0, + 579.0, + 586.0, + 618.0, + 293.0, + 618.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 862.0, + 579.0, + 939.0, + 579.0, + 939.0, + 618.0, + 862.0, + 618.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 960.0, + 579.0, + 1406.0, + 579.0, + 1406.0, + 618.0, + 960.0, + 618.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 632.0, + 612.0, + 955.0, + 612.0, + 955.0, + 648.0, + 632.0, + 648.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1765.0, + 1406.0, + 1765.0, + 1406.0, + 1802.0, + 293.0, + 1802.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1797.0, + 1161.0, + 1797.0, + 1161.0, + 1833.0, + 292.0, + 1833.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1330.0, + 1797.0, + 1405.0, + 1797.0, + 1405.0, + 1833.0, + 1330.0, + 1833.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1827.0, + 1404.0, + 1827.0, + 1404.0, + 1863.0, + 294.0, + 1863.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1858.0, + 1406.0, + 1858.0, + 1406.0, + 1893.0, + 293.0, + 1893.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1890.0, + 728.0, + 1890.0, + 728.0, + 1922.0, + 295.0, + 1922.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 764.0, + 1890.0, + 1249.0, + 1890.0, + 1249.0, + 1922.0, + 764.0, + 1922.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1276.0, + 1890.0, + 1405.0, + 1890.0, + 1405.0, + 1922.0, + 1276.0, + 1922.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1920.0, + 1401.0, + 1920.0, + 1401.0, + 1952.0, + 295.0, + 1952.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 3, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 296, + 1726, + 1405, + 1726, + 1405, + 2039, + 296, + 2039 + ], + "score": 0.984 + }, + { + "category_id": 1, + "poly": [ + 296, + 919, + 1404, + 919, + 1404, + 1107, + 296, + 1107 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 296, + 702, + 1406, + 702, + 1406, + 890, + 296, + 890 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 297, + 1345, + 1404, + 1345, + 1404, + 1471, + 297, + 1471 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 296, + 1185, + 1404, + 1185, + 1404, + 1280, + 296, + 1280 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 298, + 1537, + 1404, + 1537, + 1404, + 1622, + 298, + 1622 + ], + "score": 0.969 + }, + { + "category_id": 1, + "poly": [ + 297, + 229, + 1404, + 229, + 1404, + 325, + 297, + 325 + ], + "score": 0.965 + }, + { + "category_id": 1, + "poly": [ + 296, + 529, + 1405, + 529, + 1405, + 624, + 296, + 624 + ], + "score": 0.962 + }, + { + "category_id": 8, + "poly": [ + 570, + 1291, + 1128, + 1291, + 1128, + 1337, + 570, + 1337 + ], + "score": 0.953 + }, + { + "category_id": 8, + "poly": [ + 534, + 1117, + 1163, + 1117, + 1163, + 1175, + 534, + 1175 + ], + "score": 0.949 + }, + { + "category_id": 1, + "poly": [ + 296, + 433, + 1404, + 433, + 1404, + 499, + 296, + 499 + ], + "score": 0.948 + }, + { + "category_id": 8, + "poly": [ + 515, + 1484, + 1182, + 1484, + 1182, + 1525, + 515, + 1525 + ], + "score": 0.932 + }, + { + "category_id": 0, + "poly": [ + 297, + 364, + 959, + 364, + 959, + 404, + 297, + 404 + ], + "score": 0.93 + }, + { + "category_id": 8, + "poly": [ + 385, + 636, + 1310, + 636, + 1310, + 694, + 385, + 694 + ], + "score": 0.924 + }, + { + "category_id": 0, + "poly": [ + 300, + 1658, + 643, + 1658, + 643, + 1695, + 300, + 1695 + ], + "score": 0.921 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 857, + 74, + 857, + 105, + 298, + 105 + ], + "score": 0.919 + }, + { + "category_id": 9, + "poly": [ + 1366, + 1299, + 1400, + 1299, + 1400, + 1330, + 1366, + 1330 + ], + "score": 0.877 + }, + { + "category_id": 9, + "poly": [ + 1366, + 1132, + 1400, + 1132, + 1400, + 1161, + 1366, + 1161 + ], + "score": 0.871 + }, + { + "category_id": 9, + "poly": [ + 1366, + 1489, + 1400, + 1489, + 1400, + 1519, + 1366, + 1519 + ], + "score": 0.866 + }, + { + "category_id": 9, + "poly": [ + 1366, + 649, + 1400, + 649, + 1400, + 680, + 1366, + 680 + ], + "score": 0.849 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 859, + 2088, + 859, + 2112, + 841, + 2112 + ], + "score": 0.748 + }, + { + "category_id": 13, + "poly": [ + 1193, + 1217, + 1327, + 1217, + 1327, + 1251, + 1193, + 1251 + ], + "score": 0.93, + "latex": "\\varphi _ { k } ( p ^ { * } ) \\leq 0" + }, + { + "category_id": 13, + "poly": [ + 1004, + 736, + 1283, + 736, + 1283, + 771, + 1004, + 771 + ], + "score": 0.93, + "latex": "( z ^ { \\ast } , w _ { 1 } ^ { \\ast } , \\dots , w _ { n + 1 } ^ { \\ast } ) \\in \\mathcal { S }" + }, + { + "category_id": 13, + "poly": [ + 1074, + 1971, + 1317, + 1971, + 1317, + 2005, + 1074, + 2005 + ], + "score": 0.93, + "latex": "\\boldsymbol { p } ^ { k + 1 } = \\boldsymbol { p } ^ { k } - \\alpha _ { k } \\nabla \\varphi _ { k }" + }, + { + "category_id": 13, + "poly": [ + 752, + 1789, + 872, + 1789, + 872, + 1823, + 752, + 1823 + ], + "score": 0.93, + "latex": "\\varphi _ { k } ( p ) = 0" + }, + { + "category_id": 13, + "poly": [ + 806, + 1574, + 1035, + 1574, + 1035, + 1619, + 806, + 1619 + ], + "score": 0.92, + "latex": "\\begin{array} { r } { \\bar { x } ^ { k } = \\frac { 1 } { n + 1 } \\sum _ { i = 1 } ^ { n + 1 } x _ { i } ^ { k } } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 607, + 1820, + 668, + 1820, + 668, + 1854, + 607, + 1854 + ], + "score": 0.92, + "latex": "\\{ \\alpha _ { k } \\}" + }, + { + "category_id": 14, + "poly": [ + 536, + 1116, + 1159, + 1116, + 1159, + 1176, + 536, + 1176 + ], + "score": 0.92, + "latex": "\\begin{array} { r } { \\mathcal { P } \\doteq \\left\\{ ( z , w _ { 1 } , \\ldots , w _ { n + 1 } ) \\in \\mathbb { R } ^ { ( n + 2 ) d } \\ \\Big | \\ \\sum _ { i = 1 } ^ { n + 1 } w _ { i } = 0 \\right\\} , } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 990, + 1407, + 1048, + 1407, + 1048, + 1442, + 990, + 1442 + ], + "score": 0.92, + "latex": "p ^ { k + 1 }" + }, + { + "category_id": 13, + "poly": [ + 1095, + 1536, + 1228, + 1536, + 1228, + 1572, + 1095, + 1572 + ], + "score": 0.92, + "latex": "p ^ { k + 1 } \\ \\in \\ { \\mathcal { P } }" + }, + { + "category_id": 13, + "poly": [ + 751, + 1012, + 840, + 1012, + 840, + 1045, + 751, + 1045 + ], + "score": 0.92, + "latex": "p ^ { k } \\in \\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 1322, + 1790, + 1400, + 1790, + 1400, + 1822, + 1322, + 1822 + ], + "score": 0.91, + "latex": "- \\nabla \\varphi _ { k }" + }, + { + "category_id": 13, + "poly": [ + 725, + 737, + 885, + 737, + 885, + 770, + 725, + 770 + ], + "score": 0.91, + "latex": "w _ { 1 } ^ { * } , \\ldots , w _ { n + 1 } ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 297, + 2000, + 583, + 2000, + 583, + 2038, + 297, + 2038 + ], + "score": 0.91, + "latex": "\\mathbf { \\chi } ^ { \\dot { k } } = ( z ^ { k } , w _ { 1 } ^ { k } , \\dots , w _ { n + 1 } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 817, + 1185, + 1056, + 1185, + 1056, + 1220, + 817, + 1220 + ], + "score": 0.91, + "latex": "\\{ p \\in { \\mathcal { P } } \\mid \\varphi _ { k } ( p ) \\leq 0 \\}" + }, + { + "category_id": 13, + "poly": [ + 1006, + 1217, + 1140, + 1217, + 1140, + 1251, + 1006, + 1251 + ], + "score": 0.91, + "latex": "\\varphi _ { k } \\tilde { ( p ^ { k } ) } > 0" + }, + { + "category_id": 14, + "poly": [ + 387, + 634, + 1309, + 634, + 1309, + 695, + 387, + 695 + ], + "score": 0.91, + "latex": "\\begin{array} { r } { \\mathcal { S } \\doteq \\left\\{ ( z , w _ { 1 } , \\ldots , w _ { n + 1 } ) \\ \\middle | \\ w _ { i } \\in A _ { i } ( z ) \\forall i \\in 1 . . n , w _ { n + 1 } = B ( z ) , \\sum _ { i = 1 } ^ { n + 1 } w _ { i } = 0 \\right\\} . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 370, + 704, + 700, + 704, + 700, + 740, + 370, + 740 + ], + "score": 0.91, + "latex": "p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }" + }, + { + "category_id": 13, + "poly": [ + 555, + 1187, + 594, + 1187, + 594, + 1217, + 555, + 1217 + ], + "score": 0.91, + "latex": "H _ { k }" + }, + { + "category_id": 13, + "poly": [ + 674, + 1347, + 841, + 1347, + 841, + 1382, + 674, + 1382 + ], + "score": 0.9, + "latex": "i \\in { 1 . . ( n + 1 ) }" + }, + { + "category_id": 14, + "poly": [ + 572, + 1290, + 1127, + 1290, + 1127, + 1335, + 572, + 1335 + ], + "score": 0.9, + "latex": "\\begin{array} { r } { \\varphi _ { k } ( z , w _ { 1 } , \\ldots , w _ { n + 1 } ) \\doteq \\sum _ { i = 1 } ^ { n + 1 } \\langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \\rangle } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 435, + 801, + 593, + 801, + 593, + 831, + 435, + 831 + ], + "score": 0.9, + "latex": "w _ { 1 } ^ { * } , \\ldots , w _ { n + 1 } ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 298, + 1218, + 440, + 1218, + 440, + 1248, + 298, + 1248 + ], + "score": 0.9, + "latex": "\\varphi _ { k } : \\mathscr { P } \\mathbb { R }" + }, + { + "category_id": 13, + "poly": [ + 1185, + 1042, + 1216, + 1042, + 1216, + 1075, + 1185, + 1075 + ], + "score": 0.9, + "latex": "p ^ { k }" + }, + { + "category_id": 14, + "poly": [ + 517, + 1482, + 1178, + 1482, + 1178, + 1524, + 517, + 1524 + ], + "score": 0.89, + "latex": "p ^ { k + 1 } = p ^ { k } - \\alpha _ { k } \\nabla \\varphi _ { k } , \\quad \\mathrm { ~ w h e r e ~ } \\quad \\alpha _ { k } = \\varphi _ { k } ( p ^ { k } ) / \\| \\nabla \\varphi _ { k } \\| ^ { 2 } ," + }, + { + "category_id": 13, + "poly": [ + 298, + 1251, + 379, + 1251, + 379, + 1280, + 298, + 1280 + ], + "score": 0.89, + "latex": "p ^ { * } \\in { \\mathcal { S } }" + }, + { + "category_id": 13, + "poly": [ + 501, + 1540, + 559, + 1540, + 559, + 1571, + 501, + 1571 + ], + "score": 0.89, + "latex": "\\nabla \\varphi _ { k }" + }, + { + "category_id": 13, + "poly": [ + 720, + 232, + 754, + 232, + 754, + 261, + 720, + 261 + ], + "score": 0.89, + "latex": "A _ { i }" + }, + { + "category_id": 13, + "poly": [ + 485, + 1345, + 660, + 1345, + 660, + 1382, + 485, + 1382 + ], + "score": 0.89, + "latex": "( x _ { i } ^ { k } , y _ { i } ^ { k } ) \\in \\mathbb { R } ^ { 2 d }" + }, + { + "category_id": 13, + "poly": [ + 1206, + 1408, + 1236, + 1408, + 1236, + 1442, + 1206, + 1442 + ], + "score": 0.89, + "latex": "p ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 298, + 1044, + 327, + 1044, + 327, + 1076, + 298, + 1076 + ], + "score": 0.89, + "latex": "p ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 300, + 1570, + 726, + 1570, + 726, + 1625, + 300, + 1625 + ], + "score": 0.89, + "latex": "\\left( \\sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k } , x _ { 1 } ^ { k } - { \\bar { x } } ^ { k } , \\dots , x _ { n + 1 } - { \\bar { x } } ^ { k } \\right)" + }, + { + "category_id": 13, + "poly": [ + 1297, + 1410, + 1335, + 1410, + 1335, + 1440, + 1297, + 1440 + ], + "score": 0.89, + "latex": "H _ { k }" + }, + { + "category_id": 13, + "poly": [ + 297, + 766, + 633, + 766, + 633, + 799, + 297, + 799 + ], + "score": 0.89, + "latex": "p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }" + }, + { + "category_id": 13, + "poly": [ + 1278, + 1044, + 1317, + 1044, + 1317, + 1074, + 1278, + 1074 + ], + "score": 0.88, + "latex": "H _ { k }" + }, + { + "category_id": 13, + "poly": [ + 337, + 1014, + 376, + 1014, + 376, + 1044, + 337, + 1044 + ], + "score": 0.88, + "latex": "H _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1049, + 706, + 1078, + 706, + 1078, + 732, + 1049, + 732 + ], + "score": 0.87, + "latex": "z ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 1303, + 1539, + 1403, + 1539, + 1403, + 1573, + 1303, + 1573 + ], + "score": 0.87, + "latex": "\\nabla \\varphi _ { k } \\ =" + }, + { + "category_id": 13, + "poly": [ + 1042, + 1854, + 1076, + 1854, + 1076, + 1881, + 1042, + 1881 + ], + "score": 0.86, + "latex": "\\alpha _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1195, + 1885, + 1226, + 1885, + 1226, + 1913, + 1195, + 1913 + ], + "score": 0.86, + "latex": "\\rho _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1111, + 1887, + 1145, + 1887, + 1145, + 1912, + 1111, + 1912 + ], + "score": 0.86, + "latex": "\\alpha _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1079, + 1763, + 1114, + 1763, + 1114, + 1790, + 1079, + 1790 + ], + "score": 0.86, + "latex": "\\alpha _ { k }" + }, + { + "category_id": 13, + "poly": [ + 861, + 1224, + 895, + 1224, + 895, + 1249, + 861, + 1249 + ], + "score": 0.85, + "latex": "\\varphi _ { k }" + }, + { + "category_id": 13, + "poly": [ + 741, + 1251, + 775, + 1251, + 775, + 1280, + 741, + 1280 + ], + "score": 0.85, + "latex": "\\varphi _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1316, + 1077, + 1340, + 1077, + 1340, + 1102, + 1316, + 1102 + ], + "score": 0.85, + "latex": "\\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 1037, + 1944, + 1060, + 1944, + 1060, + 1970, + 1037, + 1970 + ], + "score": 0.84, + "latex": "\\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 717, + 769, + 746, + 769, + 746, + 793, + 717, + 793 + ], + "score": 0.84, + "latex": "z ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 419, + 739, + 447, + 739, + 447, + 763, + 419, + 763 + ], + "score": 0.84, + "latex": "z ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 459, + 986, + 482, + 986, + 482, + 1010, + 459, + 1010 + ], + "score": 0.84, + "latex": "\\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 297, + 1977, + 332, + 1977, + 332, + 2003, + 297, + 2003 + ], + "score": 0.83, + "latex": "\\varphi _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1078, + 1381, + 1102, + 1381, + 1102, + 1407, + 1078, + 1407 + ], + "score": 0.83, + "latex": "\\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 876, + 769, + 906, + 769, + 906, + 794, + 876, + 794 + ], + "score": 0.83, + "latex": "z ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 850, + 1541, + 875, + 1541, + 875, + 1567, + 850, + 1567 + ], + "score": 0.83, + "latex": "\\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 959, + 1045, + 981, + 1045, + 981, + 1071, + 959, + 1071 + ], + "score": 0.82, + "latex": "s" + }, + { + "category_id": 13, + "poly": [ + 625, + 829, + 647, + 829, + 647, + 854, + 625, + 854 + ], + "score": 0.82, + "latex": "s" + }, + { + "category_id": 13, + "poly": [ + 1099, + 859, + 1121, + 859, + 1121, + 885, + 1099, + 885 + ], + "score": 0.82, + "latex": "s" + }, + { + "category_id": 13, + "poly": [ + 1217, + 1944, + 1241, + 1944, + 1241, + 1970, + 1217, + 1970 + ], + "score": 0.82, + "latex": "\\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 662, + 985, + 680, + 985, + 680, + 1010, + 662, + 1010 + ], + "score": 0.81, + "latex": "k" + }, + { + "category_id": 13, + "poly": [ + 1098, + 1015, + 1120, + 1015, + 1120, + 1041, + 1098, + 1041 + ], + "score": 0.81, + "latex": "s" + }, + { + "category_id": 13, + "poly": [ + 1300, + 954, + 1323, + 954, + 1323, + 980, + 1300, + 980 + ], + "score": 0.81, + "latex": "s" + }, + { + "category_id": 13, + "poly": [ + 403, + 1944, + 429, + 1944, + 429, + 1971, + 403, + 1971 + ], + "score": 0.8, + "latex": "\\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 327, + 262, + 353, + 262, + 353, + 289, + 327, + 289 + ], + "score": 0.79, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 908, + 1015, + 930, + 1015, + 930, + 1041, + 908, + 1041 + ], + "score": 0.78, + "latex": "s" + }, + { + "category_id": 13, + "poly": [ + 1182, + 828, + 1206, + 828, + 1206, + 855, + 1182, + 855 + ], + "score": 0.31, + "latex": "\\&" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 362.0, + 961.0, + 362.0, + 961.0, + 408.0, + 291.0, + 408.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1656.0, + 644.0, + 1656.0, + 644.0, + 1699.0, + 293.0, + 1699.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2119.0, + 838.0, + 2119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1729.0, + 1403.0, + 1729.0, + 1403.0, + 1761.0, + 295.0, + 1761.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1757.0, + 1078.0, + 1757.0, + 1078.0, + 1793.0, + 294.0, + 1793.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1115.0, + 1757.0, + 1405.0, + 1757.0, + 1405.0, + 1793.0, + 1115.0, + 1793.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1786.0, + 751.0, + 1786.0, + 751.0, + 1828.0, + 291.0, + 1828.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 873.0, + 1786.0, + 1321.0, + 1786.0, + 1321.0, + 1828.0, + 873.0, + 1828.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1401.0, + 1786.0, + 1406.0, + 1786.0, + 1406.0, + 1828.0, + 1401.0, + 1828.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1819.0, + 606.0, + 1819.0, + 606.0, + 1855.0, + 294.0, + 1855.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 669.0, + 1819.0, + 1405.0, + 1819.0, + 1405.0, + 1855.0, + 669.0, + 1855.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1852.0, + 1041.0, + 1852.0, + 1041.0, + 1884.0, + 295.0, + 1884.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1077.0, + 1852.0, + 1405.0, + 1852.0, + 1405.0, + 1884.0, + 1077.0, + 1884.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1881.0, + 1110.0, + 1881.0, + 1110.0, + 1917.0, + 294.0, + 1917.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1146.0, + 1881.0, + 1194.0, + 1881.0, + 1194.0, + 1917.0, + 1146.0, + 1917.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1227.0, + 1881.0, + 1405.0, + 1881.0, + 1405.0, + 1917.0, + 1227.0, + 1917.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1912.0, + 1403.0, + 1912.0, + 1403.0, + 1944.0, + 294.0, + 1944.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1942.0, + 402.0, + 1942.0, + 402.0, + 1975.0, + 293.0, + 1975.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 430.0, + 1942.0, + 1036.0, + 1942.0, + 1036.0, + 1975.0, + 430.0, + 1975.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1061.0, + 1942.0, + 1216.0, + 1942.0, + 1216.0, + 1975.0, + 1061.0, + 1975.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1242.0, + 1942.0, + 1405.0, + 1942.0, + 1405.0, + 1975.0, + 1242.0, + 1975.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 285.0, + 1965.0, + 296.0, + 1965.0, + 296.0, + 2051.0, + 285.0, + 2051.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 584.0, + 1965.0, + 1073.0, + 1965.0, + 1073.0, + 2051.0, + 584.0, + 2051.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1318.0, + 1965.0, + 1408.0, + 1965.0, + 1408.0, + 2051.0, + 1318.0, + 2051.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 920.0, + 1405.0, + 920.0, + 1405.0, + 956.0, + 294.0, + 956.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 953.0, + 1299.0, + 953.0, + 1299.0, + 986.0, + 293.0, + 986.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1324.0, + 953.0, + 1405.0, + 953.0, + 1405.0, + 986.0, + 1324.0, + 986.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 978.0, + 458.0, + 978.0, + 458.0, + 1021.0, + 291.0, + 1021.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 483.0, + 978.0, + 661.0, + 978.0, + 661.0, + 1021.0, + 483.0, + 1021.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 681.0, + 978.0, + 1407.0, + 978.0, + 1407.0, + 1021.0, + 681.0, + 1021.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1014.0, + 336.0, + 1014.0, + 336.0, + 1046.0, + 295.0, + 1046.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 377.0, + 1014.0, + 750.0, + 1014.0, + 750.0, + 1046.0, + 377.0, + 1046.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 841.0, + 1014.0, + 907.0, + 1014.0, + 907.0, + 1046.0, + 841.0, + 1046.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 931.0, + 1014.0, + 1097.0, + 1014.0, + 1097.0, + 1046.0, + 931.0, + 1046.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1121.0, + 1014.0, + 1405.0, + 1014.0, + 1405.0, + 1046.0, + 1121.0, + 1046.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1041.0, + 297.0, + 1041.0, + 297.0, + 1081.0, + 293.0, + 1081.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 328.0, + 1041.0, + 958.0, + 1041.0, + 958.0, + 1081.0, + 328.0, + 1081.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 982.0, + 1041.0, + 1184.0, + 1041.0, + 1184.0, + 1081.0, + 982.0, + 1081.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1217.0, + 1041.0, + 1277.0, + 1041.0, + 1277.0, + 1081.0, + 1217.0, + 1081.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1318.0, + 1041.0, + 1406.0, + 1041.0, + 1406.0, + 1081.0, + 1318.0, + 1081.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1073.0, + 1315.0, + 1073.0, + 1315.0, + 1111.0, + 291.0, + 1111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1341.0, + 1073.0, + 1404.0, + 1073.0, + 1404.0, + 1111.0, + 1341.0, + 1111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 698.0, + 369.0, + 698.0, + 369.0, + 746.0, + 293.0, + 746.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 701.0, + 698.0, + 1048.0, + 698.0, + 1048.0, + 746.0, + 701.0, + 746.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1079.0, + 698.0, + 1408.0, + 698.0, + 1408.0, + 746.0, + 1079.0, + 746.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 730.0, + 418.0, + 730.0, + 418.0, + 778.0, + 289.0, + 778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 448.0, + 730.0, + 724.0, + 730.0, + 724.0, + 778.0, + 448.0, + 778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 886.0, + 730.0, + 1003.0, + 730.0, + 1003.0, + 778.0, + 886.0, + 778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1284.0, + 730.0, + 1409.0, + 730.0, + 1409.0, + 778.0, + 1284.0, + 778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 760.0, + 296.0, + 760.0, + 296.0, + 806.0, + 289.0, + 806.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 634.0, + 760.0, + 716.0, + 760.0, + 716.0, + 806.0, + 634.0, + 806.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 747.0, + 760.0, + 875.0, + 760.0, + 875.0, + 806.0, + 747.0, + 806.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 907.0, + 760.0, + 1412.0, + 760.0, + 1412.0, + 806.0, + 907.0, + 806.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 792.0, + 434.0, + 792.0, + 434.0, + 836.0, + 290.0, + 836.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 594.0, + 792.0, + 1409.0, + 792.0, + 1409.0, + 836.0, + 594.0, + 836.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 827.0, + 624.0, + 827.0, + 624.0, + 859.0, + 295.0, + 859.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 648.0, + 827.0, + 1181.0, + 827.0, + 1181.0, + 859.0, + 648.0, + 859.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1207.0, + 827.0, + 1408.0, + 827.0, + 1408.0, + 859.0, + 1207.0, + 859.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 851.0, + 1098.0, + 851.0, + 1098.0, + 896.0, + 291.0, + 896.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1122.0, + 851.0, + 1273.0, + 851.0, + 1273.0, + 896.0, + 1122.0, + 896.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1341.0, + 484.0, + 1341.0, + 484.0, + 1387.0, + 291.0, + 1387.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 661.0, + 1341.0, + 673.0, + 1341.0, + 673.0, + 1387.0, + 661.0, + 1387.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 842.0, + 1341.0, + 1407.0, + 1341.0, + 1407.0, + 1387.0, + 842.0, + 1387.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1380.0, + 1077.0, + 1380.0, + 1077.0, + 1413.0, + 296.0, + 1413.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1103.0, + 1380.0, + 1404.0, + 1380.0, + 1404.0, + 1413.0, + 1103.0, + 1413.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1407.0, + 989.0, + 1407.0, + 989.0, + 1444.0, + 291.0, + 1444.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1049.0, + 1407.0, + 1205.0, + 1407.0, + 1205.0, + 1444.0, + 1049.0, + 1444.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1237.0, + 1407.0, + 1296.0, + 1407.0, + 1296.0, + 1444.0, + 1237.0, + 1444.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1336.0, + 1407.0, + 1406.0, + 1407.0, + 1406.0, + 1444.0, + 1336.0, + 1444.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1441.0, + 1072.0, + 1441.0, + 1072.0, + 1474.0, + 296.0, + 1474.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1183.0, + 554.0, + 1183.0, + 554.0, + 1224.0, + 294.0, + 1224.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 595.0, + 1183.0, + 816.0, + 1183.0, + 816.0, + 1224.0, + 595.0, + 1224.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1057.0, + 1183.0, + 1406.0, + 1183.0, + 1406.0, + 1224.0, + 1057.0, + 1224.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1215.0, + 297.0, + 1215.0, + 297.0, + 1253.0, + 291.0, + 1253.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 441.0, + 1215.0, + 860.0, + 1215.0, + 860.0, + 1253.0, + 441.0, + 1253.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 896.0, + 1215.0, + 1005.0, + 1215.0, + 1005.0, + 1253.0, + 896.0, + 1253.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1141.0, + 1215.0, + 1192.0, + 1215.0, + 1192.0, + 1253.0, + 1141.0, + 1253.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1328.0, + 1215.0, + 1406.0, + 1215.0, + 1406.0, + 1253.0, + 1328.0, + 1253.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1246.0, + 297.0, + 1246.0, + 297.0, + 1282.0, + 291.0, + 1282.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 380.0, + 1246.0, + 740.0, + 1246.0, + 740.0, + 1282.0, + 380.0, + 1282.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 776.0, + 1246.0, + 1217.0, + 1246.0, + 1217.0, + 1282.0, + 776.0, + 1282.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1529.0, + 500.0, + 1529.0, + 500.0, + 1578.0, + 290.0, + 1578.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 560.0, + 1529.0, + 849.0, + 1529.0, + 849.0, + 1578.0, + 560.0, + 1578.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 876.0, + 1529.0, + 1094.0, + 1529.0, + 1094.0, + 1578.0, + 876.0, + 1578.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1229.0, + 1529.0, + 1302.0, + 1529.0, + 1302.0, + 1578.0, + 1229.0, + 1578.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1404.0, + 1529.0, + 1408.0, + 1529.0, + 1408.0, + 1578.0, + 1404.0, + 1578.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1556.0, + 299.0, + 1556.0, + 299.0, + 1636.0, + 292.0, + 1636.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 727.0, + 1556.0, + 805.0, + 1556.0, + 805.0, + 1636.0, + 727.0, + 1636.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1036.0, + 1556.0, + 1060.0, + 1556.0, + 1060.0, + 1636.0, + 1036.0, + 1636.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 230.0, + 719.0, + 230.0, + 719.0, + 265.0, + 295.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 755.0, + 230.0, + 1402.0, + 230.0, + 1402.0, + 265.0, + 755.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 262.0, + 326.0, + 262.0, + 326.0, + 297.0, + 295.0, + 297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 262.0, + 1404.0, + 262.0, + 1404.0, + 297.0, + 354.0, + 297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 290.0, + 1302.0, + 290.0, + 1302.0, + 328.0, + 295.0, + 328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 528.0, + 1406.0, + 528.0, + 1406.0, + 568.0, + 294.0, + 568.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 562.0, + 1403.0, + 562.0, + 1403.0, + 592.0, + 296.0, + 592.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 592.0, + 906.0, + 592.0, + 906.0, + 626.0, + 294.0, + 626.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 431.0, + 1405.0, + 431.0, + 1405.0, + 473.0, + 294.0, + 473.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 465.0, + 435.0, + 465.0, + 435.0, + 498.0, + 295.0, + 498.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 4, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 413, + 1404, + 413, + 1404, + 635, + 298, + 635 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 297, + 228, + 1403, + 228, + 1403, + 385, + 297, + 385 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 298, + 648, + 1404, + 648, + 1404, + 774, + 298, + 774 + ], + "score": 0.975 + }, + { + "category_id": 1, + "poly": [ + 297, + 1166, + 1402, + 1166, + 1402, + 1289, + 297, + 1289 + ], + "score": 0.973 + }, + { + "category_id": 1, + "poly": [ + 298, + 802, + 1404, + 802, + 1404, + 899, + 298, + 899 + ], + "score": 0.973 + }, + { + "category_id": 1, + "poly": [ + 293, + 1436, + 1401, + 1436, + 1401, + 1501, + 293, + 1501 + ], + "score": 0.945 + }, + { + "category_id": 8, + "poly": [ + 639, + 909, + 1056, + 909, + 1056, + 950, + 639, + 950 + ], + "score": 0.925 + }, + { + "category_id": 1, + "poly": [ + 297, + 959, + 1100, + 959, + 1100, + 991, + 297, + 991 + ], + "score": 0.922 + }, + { + "category_id": 8, + "poly": [ + 613, + 1000, + 1090, + 1000, + 1090, + 1043, + 613, + 1043 + ], + "score": 0.921 + }, + { + "category_id": 1, + "poly": [ + 290, + 1099, + 1363, + 1099, + 1363, + 1136, + 290, + 1136 + ], + "score": 0.915 + }, + { + "category_id": 8, + "poly": [ + 567, + 1047, + 1132, + 1047, + 1132, + 1091, + 567, + 1091 + ], + "score": 0.907 + }, + { + "category_id": 8, + "poly": [ + 391, + 1387, + 1301, + 1387, + 1301, + 1427, + 391, + 1427 + ], + "score": 0.902 + }, + { + "category_id": 9, + "poly": [ + 1352, + 1053, + 1400, + 1053, + 1400, + 1084, + 1352, + 1084 + ], + "score": 0.893 + }, + { + "category_id": 9, + "poly": [ + 1352, + 1304, + 1400, + 1304, + 1400, + 1335, + 1352, + 1335 + ], + "score": 0.886 + }, + { + "category_id": 1, + "poly": [ + 298, + 1348, + 1257, + 1348, + 1257, + 1380, + 298, + 1380 + ], + "score": 0.884 + }, + { + "category_id": 9, + "poly": [ + 1352, + 1008, + 1400, + 1008, + 1400, + 1038, + 1352, + 1038 + ], + "score": 0.88 + }, + { + "category_id": 9, + "poly": [ + 1366, + 915, + 1400, + 915, + 1400, + 945, + 1366, + 945 + ], + "score": 0.876 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 105, + 298, + 105 + ], + "score": 0.869 + }, + { + "category_id": 0, + "poly": [ + 295, + 1558, + 869, + 1558, + 869, + 1592, + 295, + 1592 + ], + "score": 0.844 + }, + { + "category_id": 8, + "poly": [ + 366, + 1298, + 1309, + 1298, + 1309, + 1342, + 366, + 1342 + ], + "score": 0.823 + }, + { + "category_id": 2, + "poly": [ + 839, + 2088, + 859, + 2088, + 859, + 2113, + 839, + 2113 + ], + "score": 0.798 + }, + { + "category_id": 8, + "poly": [ + 273, + 1597, + 1376, + 1597, + 1376, + 2010, + 273, + 2010 + ], + "score": 0.519 + }, + { + "category_id": 1, + "poly": [ + 366, + 1298, + 1309, + 1298, + 1309, + 1342, + 366, + 1342 + ], + "score": 0.109 + }, + { + "category_id": 1, + "poly": [ + 273, + 1597, + 1376, + 1597, + 1376, + 2010, + 273, + 2010 + ], + "score": 0.108 + }, + { + "category_id": 5, + "poly": [ + 273, + 1597, + 1376, + 1597, + 1376, + 2010, + 273, + 2010 + ], + "score": 0.095, + "html": "
1 for k =1,2,... do
2 fori∈1..n do
3t=2k+Tw
4=JrA(tοΌ‰
5=T-1(t-xοΌ‰
6rk=B(zk)+∈k
7
8yn+1=B(+1οΌ‰+ek // ek is unknown noise term n+1
9w+1=w- 1 )n+1
101xοΌ‰ i∈1..(n+1) n+1 β‰₯i=1
" + }, + { + "category_id": 13, + "poly": [ + 590, + 803, + 820, + 803, + 820, + 839, + 590, + 839 + ], + "score": 0.95, + "latex": "\\mathcal { F } _ { k } \\doteq \\sigma ( p ^ { 1 } , \\ldots , p ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 491, + 835, + 550, + 835, + 550, + 872, + 491, + 872 + ], + "score": 0.94, + "latex": "y _ { n + 1 } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 875, + 803, + 1010, + 803, + 1010, + 839, + 875, + 839 + ], + "score": 0.93, + "latex": "\\mathcal { E } _ { k } \\doteq \\sigma ( \\epsilon ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 697, + 415, + 790, + 415, + 790, + 451, + 697, + 451 + ], + "score": 0.93, + "latex": "( x _ { i } ^ { k } , y _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 393, + 259, + 485, + 259, + 485, + 295, + 393, + 295 + ], + "score": 0.93, + "latex": "( x _ { i } ^ { k } , y _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 1097, + 450, + 1208, + 450, + 1208, + 484, + 1097, + 484 + ], + "score": 0.92, + "latex": "( \\Omega , { \\mathcal { F } } , P )" + }, + { + "category_id": 13, + "poly": [ + 1083, + 322, + 1180, + 322, + 1180, + 355, + 1083, + 355 + ], + "score": 0.92, + "latex": "( n + 7 ) d" + }, + { + "category_id": 13, + "poly": [ + 531, + 261, + 692, + 261, + 692, + 295, + 531, + 295 + ], + "score": 0.92, + "latex": "i \\in { 1 . . ( n + 1 ) }" + }, + { + "category_id": 13, + "poly": [ + 1134, + 573, + 1318, + 573, + 1318, + 606, + 1134, + 606 + ], + "score": 0.92, + "latex": "\\{ p : { \\varphi } _ { k } ( p ) = 0 \\}" + }, + { + "category_id": 13, + "poly": [ + 784, + 539, + 926, + 539, + 926, + 570, + 784, + 570 + ], + "score": 0.92, + "latex": "\\epsilon ^ { k } = e ^ { k } = 0" + }, + { + "category_id": 13, + "poly": [ + 417, + 415, + 510, + 415, + 510, + 451, + 417, + 451 + ], + "score": 0.91, + "latex": "( x _ { i } ^ { k } , y _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 296, + 322, + 407, + 322, + 407, + 355, + 296, + 355 + ], + "score": 0.91, + "latex": "( 3 n + 5 ) d" + }, + { + "category_id": 13, + "poly": [ + 371, + 1103, + 665, + 1103, + 665, + 1135, + 371, + 1135 + ], + "score": 0.91, + "latex": "0 \\le N _ { 1 } , N _ { 2 } , N _ { 3 } , N _ { 4 } < \\infty" + }, + { + "category_id": 14, + "poly": [ + 638, + 908, + 1060, + 908, + 1060, + 950, + 638, + 950 + ], + "score": 0.91, + "latex": "\\mathbb { E } [ \\epsilon ^ { k } | \\mathcal { F } _ { k } ] = 0 , \\quad \\mathbb { E } [ e ^ { k } | \\mathcal { F } _ { k } ] = 0 \\quad a . s ." + }, + { + "category_id": 13, + "poly": [ + 1153, + 540, + 1245, + 540, + 1245, + 574, + 1153, + 574 + ], + "score": 0.91, + "latex": "( \\bar { x _ { i } ^ { k } } , y _ { i } ^ { k } )" + }, + { + "category_id": 14, + "poly": [ + 564, + 998, + 1128, + 998, + 1128, + 1093, + 564, + 1093 + ], + "score": 0.91, + "latex": "\\begin{array} { r l } & { \\mathbb { E } \\left[ \\| \\epsilon ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\right] \\leq N _ { 1 } + N _ { 2 } \\| B ( z ^ { k } ) \\| ^ { 2 } \\quad a . s . } \\\\ & { \\mathbb { E } \\left[ \\| e ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } , \\mathcal { E } _ { k } \\right] \\leq N _ { 3 } + N _ { 4 } \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } \\quad a . s . , } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 573, + 1350, + 645, + 1350, + 645, + 1377, + 573, + 1377 + ], + "score": 0.9, + "latex": "L = 1" + }, + { + "category_id": 13, + "poly": [ + 417, + 601, + 448, + 601, + 448, + 635, + 417, + 635 + ], + "score": 0.89, + "latex": "p ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 340, + 1890, + 597, + 1890, + 597, + 1924, + 340, + 1924 + ], + "score": 0.88, + "latex": "y _ { n + 1 } ^ { k } = B ( x _ { n + 1 } ^ { k } ) + e ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 341, + 1853, + 673, + 1853, + 673, + 1887, + 341, + 1887 + ], + "score": 0.88, + "latex": "x _ { n + 1 } ^ { k } = z ^ { k } - \\rho _ { k } ( r ^ { k } - w _ { n + 1 } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 725, + 651, + 758, + 651, + 758, + 680, + 725, + 680 + ], + "score": 0.88, + "latex": "A _ { i }" + }, + { + "category_id": 13, + "poly": [ + 1060, + 682, + 1093, + 682, + 1093, + 711, + 1060, + 711 + ], + "score": 0.88, + "latex": "A _ { i }" + }, + { + "category_id": 13, + "poly": [ + 883, + 1100, + 912, + 1100, + 912, + 1130, + 883, + 1130 + ], + "score": 0.88, + "latex": "e ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 963, + 1100, + 990, + 1100, + 990, + 1130, + 963, + 1130 + ], + "score": 0.87, + "latex": "\\epsilon ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 420, + 451, + 456, + 451, + 456, + 478, + 420, + 478 + ], + "score": 0.87, + "latex": "\\mathbb { R } ^ { d }" + }, + { + "category_id": 13, + "poly": [ + 341, + 1818, + 537, + 1818, + 537, + 1851, + 341, + 1851 + ], + "score": 0.87, + "latex": "r ^ { k } = B ( z ^ { k } ) + \\epsilon ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 411, + 835, + 441, + 835, + 441, + 865, + 411, + 865 + ], + "score": 0.87, + "latex": "r ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 345, + 448, + 374, + 448, + 374, + 478, + 345, + 478 + ], + "score": 0.87, + "latex": "\\epsilon ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 673, + 1173, + 705, + 1173, + 705, + 1200, + 673, + 1200 + ], + "score": 0.86, + "latex": "\\rho _ { k }" + }, + { + "category_id": 13, + "poly": [ + 758, + 1173, + 793, + 1173, + 793, + 1198, + 758, + 1198 + ], + "score": 0.86, + "latex": "\\alpha _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1371, + 415, + 1401, + 415, + 1401, + 445, + 1371, + 445 + ], + "score": 0.86, + "latex": "e ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 778, + 236, + 813, + 236, + 813, + 263, + 778, + 263 + ], + "score": 0.86, + "latex": "\\varphi _ { k }" + }, + { + "category_id": 13, + "poly": [ + 340, + 1927, + 638, + 1927, + 638, + 1961, + 340, + 1961 + ], + "score": 0.85, + "latex": "z ^ { k + 1 } = z ^ { k } - \\alpha _ { k } \\textstyle \\sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k }" + }, + { + "category_id": 14, + "poly": [ + 391, + 1299, + 1305, + 1299, + 1305, + 1341, + 391, + 1341 + ], + "score": 0.85, + "latex": "\\begin{array} { r } { \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } = \\infty , \\quad \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } ^ { 2 } < \\infty , \\quad \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } ^ { 2 } < \\infty , \\mathrm { a n d } \\rho _ { k } \\leq \\overline { \\rho } < 1 / L . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 384, + 1774, + 604, + 1774, + 604, + 1806, + 384, + 1806 + ], + "score": 0.84, + "latex": "y _ { i } ^ { k } = \\tau ^ { - 1 } ( t _ { i } ^ { k } - x _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 1270, + 452, + 1295, + 452, + 1295, + 478, + 1270, + 478 + ], + "score": 0.82, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 651, + 1469, + 670, + 1469, + 670, + 1495, + 651, + 1495 + ], + "score": 0.82, + "latex": "k" + }, + { + "category_id": 13, + "poly": [ + 360, + 682, + 386, + 682, + 386, + 708, + 360, + 708 + ], + "score": 0.8, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 587, + 1470, + 600, + 1470, + 600, + 1495, + 587, + 1495 + ], + "score": 0.78, + "latex": "i" + }, + { + "category_id": 13, + "poly": [ + 609, + 1444, + 629, + 1444, + 629, + 1465, + 609, + 1465 + ], + "score": 0.78, + "latex": "\\tau" + }, + { + "category_id": 13, + "poly": [ + 341, + 513, + 433, + 513, + 433, + 541, + 341, + 541 + ], + "score": 0.77, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 513, + 604, + 535, + 604, + 535, + 630, + 513, + 630 + ], + "score": 0.74, + "latex": "s" + }, + { + "category_id": 13, + "poly": [ + 298, + 512, + 330, + 512, + 330, + 542, + 298, + 542 + ], + "score": 0.74, + "latex": "A _ { i }" + }, + { + "category_id": 13, + "poly": [ + 671, + 298, + 692, + 298, + 692, + 319, + 671, + 319 + ], + "score": 0.74, + "latex": "n" + }, + { + "category_id": 13, + "poly": [ + 964, + 1813, + 995, + 1813, + 995, + 1846, + 964, + 1846 + ], + "score": 0.73, + "latex": "\\epsilon ^ { k }" + }, + { + "category_id": 14, + "poly": [ + 396, + 1387, + 768, + 1387, + 768, + 1426, + 396, + 1426 + ], + "score": 0.73, + "latex": "\\alpha _ { k } = k ^ { - 0 . 5 - p } \\mathrm { f o r } 0 < p < 0 . 5 ," + }, + { + "category_id": 13, + "poly": [ + 827, + 359, + 847, + 359, + 847, + 380, + 827, + 380 + ], + "score": 0.72, + "latex": "n" + }, + { + "category_id": 13, + "poly": [ + 841, + 1387, + 1000, + 1387, + 1000, + 1426, + 841, + 1426 + ], + "score": 0.71, + "latex": "\\rho _ { k } = k ^ { - 0 . 5 + t }" + }, + { + "category_id": 13, + "poly": [ + 380, + 1603, + 664, + 1603, + 664, + 1641, + 380, + 1641 + ], + "score": 0.71, + "latex": "p ^ { 1 } = ( z ^ { 1 } , w _ { 1 } ^ { 1 } , \\ldots , w _ { n + 1 } ^ { 1 } )" + }, + { + "category_id": 13, + "poly": [ + 342, + 1963, + 974, + 1963, + 974, + 2003, + 342, + 2003 + ], + "score": 0.67, + "latex": "\\begin{array} { r } { w _ { i } ^ { k + 1 } = w _ { i } ^ { k } - \\alpha _ { k } ( x _ { i } ^ { k } - \\frac { 1 } { n + 1 } \\sum _ { i = 1 } ^ { n + 1 } x _ { i } ^ { k } ) \\quad i \\in { 1 . . ( n + 1 ) } } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 396, + 1386, + 1304, + 1386, + 1304, + 1426, + 396, + 1426 + ], + "score": 0.62, + "latex": "\\alpha _ { k } = k ^ { - 0 . 5 - p } \\mathrm { f o r } 0 < p < 0 . 5 , \\mathrm { a n d } \\rho _ { k } = k ^ { - 0 . 5 + t } \\mathrm { f o r } p \\leq t < 0 . 5 p + 0 . 2 5 ." + }, + { + "category_id": 13, + "poly": [ + 381, + 1738, + 552, + 1738, + 552, + 1771, + 381, + 1771 + ], + "score": 0.61, + "latex": "x _ { i } ^ { k } = J _ { \\tau A _ { i } } ( t _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 380, + 1699, + 557, + 1699, + 557, + 1735, + 380, + 1735 + ], + "score": 0.61, + "latex": "t _ { i } ^ { k } = z ^ { k } + \\tau w _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 956, + 1884, + 988, + 1884, + 988, + 1917, + 956, + 1917 + ], + "score": 0.57, + "latex": "e ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 381, + 1671, + 475, + 1671, + 475, + 1698, + 381, + 1698 + ], + "score": 0.54, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 1056, + 1391, + 1299, + 1391, + 1299, + 1426, + 1056, + 1426 + ], + "score": 0.46, + "latex": "p \\leq t < 0 . 5 p + 0 . 2 5 ." + }, + { + "category_id": 13, + "poly": [ + 709, + 1599, + 873, + 1599, + 873, + 1641, + 709, + 1641 + ], + "score": 0.4, + "latex": "\\begin{array} { r } { \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { 1 } = 0 } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 340, + 1640, + 480, + 1640, + 480, + 1670, + 340, + 1670 + ], + "score": 0.29, + "latex": "k = 1 , 2 , \\dots" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 73.0, + 857.0, + 73.0, + 857.0, + 108.0, + 298.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1553.0, + 870.0, + 1553.0, + 870.0, + 1601.0, + 293.0, + 1601.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 2087.0, + 861.0, + 2087.0, + 861.0, + 2117.0, + 840.0, + 2117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 411.0, + 416.0, + 411.0, + 416.0, + 454.0, + 292.0, + 454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 511.0, + 411.0, + 696.0, + 411.0, + 696.0, + 454.0, + 511.0, + 454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 791.0, + 411.0, + 1370.0, + 411.0, + 1370.0, + 454.0, + 791.0, + 454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1402.0, + 411.0, + 1406.0, + 411.0, + 1406.0, + 454.0, + 1402.0, + 454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 447.0, + 344.0, + 447.0, + 344.0, + 485.0, + 293.0, + 485.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 375.0, + 447.0, + 419.0, + 447.0, + 419.0, + 485.0, + 375.0, + 485.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 457.0, + 447.0, + 1096.0, + 447.0, + 1096.0, + 485.0, + 457.0, + 485.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1209.0, + 447.0, + 1269.0, + 447.0, + 1269.0, + 485.0, + 1209.0, + 485.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1296.0, + 447.0, + 1406.0, + 447.0, + 1406.0, + 485.0, + 1296.0, + 485.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 481.0, + 1404.0, + 481.0, + 1404.0, + 516.0, + 293.0, + 516.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 508.0, + 297.0, + 508.0, + 297.0, + 550.0, + 293.0, + 550.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 331.0, + 508.0, + 340.0, + 508.0, + 340.0, + 550.0, + 331.0, + 550.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 434.0, + 508.0, + 1406.0, + 508.0, + 1406.0, + 550.0, + 434.0, + 550.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 534.0, + 783.0, + 534.0, + 783.0, + 581.0, + 291.0, + 581.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 927.0, + 534.0, + 1152.0, + 534.0, + 1152.0, + 581.0, + 927.0, + 581.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1246.0, + 534.0, + 1409.0, + 534.0, + 1409.0, + 581.0, + 1246.0, + 581.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 569.0, + 1133.0, + 569.0, + 1133.0, + 611.0, + 292.0, + 611.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1319.0, + 569.0, + 1405.0, + 569.0, + 1405.0, + 611.0, + 1319.0, + 611.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 601.0, + 416.0, + 601.0, + 416.0, + 638.0, + 293.0, + 638.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 449.0, + 601.0, + 512.0, + 601.0, + 512.0, + 638.0, + 449.0, + 638.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 536.0, + 601.0, + 548.0, + 601.0, + 548.0, + 638.0, + 536.0, + 638.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 228.0, + 777.0, + 228.0, + 777.0, + 265.0, + 295.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 814.0, + 228.0, + 1404.0, + 228.0, + 1404.0, + 265.0, + 814.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 256.0, + 392.0, + 256.0, + 392.0, + 301.0, + 292.0, + 301.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 486.0, + 256.0, + 530.0, + 256.0, + 530.0, + 301.0, + 486.0, + 301.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 693.0, + 256.0, + 1408.0, + 256.0, + 1408.0, + 301.0, + 693.0, + 301.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 288.0, + 670.0, + 288.0, + 670.0, + 329.0, + 292.0, + 329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 693.0, + 288.0, + 1405.0, + 288.0, + 1405.0, + 329.0, + 693.0, + 329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 408.0, + 317.0, + 1082.0, + 317.0, + 1082.0, + 359.0, + 408.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1181.0, + 317.0, + 1405.0, + 317.0, + 1405.0, + 359.0, + 1181.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 349.0, + 826.0, + 349.0, + 826.0, + 388.0, + 294.0, + 388.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 848.0, + 349.0, + 1227.0, + 349.0, + 1227.0, + 388.0, + 848.0, + 388.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 649.0, + 724.0, + 649.0, + 724.0, + 685.0, + 294.0, + 685.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 759.0, + 649.0, + 1405.0, + 649.0, + 1405.0, + 685.0, + 759.0, + 685.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 682.0, + 359.0, + 682.0, + 359.0, + 715.0, + 296.0, + 715.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 387.0, + 682.0, + 1059.0, + 682.0, + 1059.0, + 715.0, + 387.0, + 715.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1094.0, + 682.0, + 1405.0, + 682.0, + 1405.0, + 715.0, + 1094.0, + 715.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 712.0, + 1405.0, + 712.0, + 1405.0, + 745.0, + 294.0, + 745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 743.0, + 1026.0, + 743.0, + 1026.0, + 777.0, + 292.0, + 777.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1165.0, + 672.0, + 1165.0, + 672.0, + 1202.0, + 293.0, + 1202.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 706.0, + 1165.0, + 757.0, + 1165.0, + 757.0, + 1202.0, + 706.0, + 1202.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 794.0, + 1165.0, + 1406.0, + 1165.0, + 1406.0, + 1202.0, + 794.0, + 1202.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1195.0, + 1404.0, + 1195.0, + 1404.0, + 1232.0, + 295.0, + 1232.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1226.0, + 1407.0, + 1226.0, + 1407.0, + 1265.0, + 291.0, + 1265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1255.0, + 371.0, + 1255.0, + 371.0, + 1294.0, + 290.0, + 1294.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 802.0, + 589.0, + 802.0, + 589.0, + 841.0, + 293.0, + 841.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 821.0, + 802.0, + 874.0, + 802.0, + 874.0, + 841.0, + 821.0, + 841.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1011.0, + 802.0, + 1405.0, + 802.0, + 1405.0, + 841.0, + 1011.0, + 841.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 287.0, + 823.0, + 410.0, + 823.0, + 410.0, + 881.0, + 287.0, + 881.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 442.0, + 823.0, + 490.0, + 823.0, + 490.0, + 881.0, + 442.0, + 881.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 551.0, + 823.0, + 1413.0, + 823.0, + 1413.0, + 881.0, + 551.0, + 881.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 864.0, + 435.0, + 864.0, + 435.0, + 903.0, + 293.0, + 903.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1436.0, + 608.0, + 1436.0, + 608.0, + 1472.0, + 295.0, + 1472.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 630.0, + 1436.0, + 1404.0, + 1436.0, + 1404.0, + 1472.0, + 630.0, + 1472.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1467.0, + 586.0, + 1467.0, + 586.0, + 1503.0, + 295.0, + 1503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 601.0, + 1467.0, + 650.0, + 1467.0, + 650.0, + 1503.0, + 601.0, + 1503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 671.0, + 1467.0, + 1339.0, + 1467.0, + 1339.0, + 1503.0, + 671.0, + 1503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 956.0, + 1102.0, + 956.0, + 1102.0, + 996.0, + 295.0, + 996.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1098.0, + 370.0, + 1098.0, + 370.0, + 1139.0, + 295.0, + 1139.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 666.0, + 1098.0, + 882.0, + 1098.0, + 882.0, + 1139.0, + 666.0, + 1139.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 913.0, + 1098.0, + 962.0, + 1098.0, + 962.0, + 1139.0, + 913.0, + 1139.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 991.0, + 1098.0, + 1370.0, + 1098.0, + 1370.0, + 1139.0, + 991.0, + 1139.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1346.0, + 572.0, + 1346.0, + 572.0, + 1382.0, + 293.0, + 1382.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 646.0, + 1346.0, + 1255.0, + 1346.0, + 1255.0, + 1382.0, + 646.0, + 1382.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1306.0, + 1287.0, + 1311.0, + 1287.0, + 1311.0, + 1352.0, + 1306.0, + 1352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 270.0, + 1583.0, + 339.0, + 1583.0, + 339.0, + 1672.0, + 270.0, + 1672.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 665.0, + 1583.0, + 708.0, + 1583.0, + 708.0, + 1672.0, + 665.0, + 1672.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 874.0, + 1583.0, + 1123.0, + 1583.0, + 1123.0, + 1672.0, + 874.0, + 1672.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 273.0, + 1675.0, + 293.0, + 1675.0, + 293.0, + 1699.0, + 273.0, + 1699.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 335.0, + 1668.0, + 380.0, + 1668.0, + 380.0, + 1700.0, + 335.0, + 1700.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 476.0, + 1668.0, + 516.0, + 1668.0, + 516.0, + 1700.0, + 476.0, + 1700.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 274.0, + 1709.0, + 293.0, + 1709.0, + 293.0, + 1731.0, + 274.0, + 1731.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 371.0, + 1692.0, + 379.0, + 1692.0, + 379.0, + 1742.0, + 371.0, + 1742.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 558.0, + 1692.0, + 566.0, + 1692.0, + 566.0, + 1742.0, + 558.0, + 1742.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 274.0, + 1744.0, + 294.0, + 1744.0, + 294.0, + 1767.0, + 274.0, + 1767.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 373.0, + 1726.0, + 380.0, + 1726.0, + 380.0, + 1778.0, + 373.0, + 1778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 553.0, + 1726.0, + 559.0, + 1726.0, + 559.0, + 1778.0, + 553.0, + 1778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 273.0, + 1778.0, + 297.0, + 1778.0, + 297.0, + 1805.0, + 273.0, + 1805.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 365.0, + 1764.0, + 383.0, + 1764.0, + 383.0, + 1812.0, + 365.0, + 1812.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 605.0, + 1764.0, + 610.0, + 1764.0, + 610.0, + 1812.0, + 605.0, + 1812.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 273.0, + 1823.0, + 293.0, + 1823.0, + 293.0, + 1847.0, + 273.0, + 1847.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 335.0, + 1807.0, + 340.0, + 1807.0, + 340.0, + 1852.0, + 335.0, + 1852.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 538.0, + 1807.0, + 545.0, + 1807.0, + 545.0, + 1852.0, + 538.0, + 1852.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 916.0, + 1812.0, + 963.0, + 1812.0, + 963.0, + 1850.0, + 916.0, + 1850.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 996.0, + 1812.0, + 1365.0, + 1812.0, + 1365.0, + 1850.0, + 996.0, + 1850.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 273.0, + 1858.0, + 293.0, + 1858.0, + 293.0, + 1881.0, + 273.0, + 1881.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 1833.0, + 340.0, + 1833.0, + 340.0, + 1901.0, + 325.0, + 1901.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 674.0, + 1833.0, + 684.0, + 1833.0, + 684.0, + 1901.0, + 674.0, + 1901.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 1879.0, + 339.0, + 1879.0, + 339.0, + 1929.0, + 327.0, + 1929.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 598.0, + 1879.0, + 611.0, + 1879.0, + 611.0, + 1929.0, + 598.0, + 1929.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 906.0, + 1881.0, + 955.0, + 1881.0, + 955.0, + 1922.0, + 906.0, + 1922.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 989.0, + 1881.0, + 1358.0, + 1881.0, + 1358.0, + 1922.0, + 989.0, + 1922.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 273.0, + 1894.0, + 292.0, + 1894.0, + 292.0, + 1954.0, + 273.0, + 1954.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1900.0, + 339.0, + 1900.0, + 339.0, + 1979.0, + 322.0, + 1979.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 639.0, + 1900.0, + 657.0, + 1900.0, + 657.0, + 1979.0, + 639.0, + 1979.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 267.0, + 1969.0, + 294.0, + 1969.0, + 294.0, + 1995.0, + 267.0, + 1995.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 326.0, + 1945.0, + 341.0, + 1945.0, + 341.0, + 2012.0, + 326.0, + 2012.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 975.0, + 1945.0, + 981.0, + 1945.0, + 981.0, + 2012.0, + 975.0, + 2012.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 5, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 296, + 1632, + 1407, + 1632, + 1407, + 2034, + 296, + 2034 + ], + "score": 0.984 + }, + { + "category_id": 1, + "poly": [ + 297, + 742, + 1405, + 742, + 1405, + 898, + 297, + 898 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 298, + 416, + 1407, + 416, + 1407, + 541, + 298, + 541 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 297, + 293, + 1404, + 293, + 1404, + 388, + 297, + 388 + ], + "score": 0.969 + }, + { + "category_id": 1, + "poly": [ + 298, + 1131, + 1405, + 1131, + 1405, + 1228, + 298, + 1228 + ], + "score": 0.965 + }, + { + "category_id": 1, + "poly": [ + 298, + 1239, + 1402, + 1239, + 1402, + 1303, + 298, + 1303 + ], + "score": 0.948 + }, + { + "category_id": 8, + "poly": [ + 593, + 700, + 1103, + 700, + 1103, + 740, + 593, + 740 + ], + "score": 0.943 + }, + { + "category_id": 8, + "poly": [ + 453, + 545, + 1246, + 545, + 1246, + 630, + 453, + 630 + ], + "score": 0.942 + }, + { + "category_id": 8, + "poly": [ + 663, + 1533, + 1041, + 1533, + 1041, + 1578, + 663, + 1578 + ], + "score": 0.941 + }, + { + "category_id": 1, + "poly": [ + 297, + 633, + 1403, + 633, + 1403, + 696, + 297, + 696 + ], + "score": 0.936 + }, + { + "category_id": 1, + "poly": [ + 298, + 1333, + 1401, + 1333, + 1401, + 1397, + 298, + 1397 + ], + "score": 0.933 + }, + { + "category_id": 1, + "poly": [ + 298, + 1017, + 1399, + 1017, + 1399, + 1118, + 298, + 1118 + ], + "score": 0.914 + }, + { + "category_id": 2, + "poly": [ + 297, + 74, + 857, + 74, + 857, + 106, + 297, + 106 + ], + "score": 0.912 + }, + { + "category_id": 0, + "poly": [ + 298, + 225, + 786, + 225, + 786, + 262, + 298, + 262 + ], + "score": 0.911 + }, + { + "category_id": 8, + "poly": [ + 449, + 1437, + 1249, + 1437, + 1249, + 1492, + 449, + 1492 + ], + "score": 0.903 + }, + { + "category_id": 1, + "poly": [ + 297, + 1581, + 1048, + 1581, + 1048, + 1614, + 297, + 1614 + ], + "score": 0.902 + }, + { + "category_id": 1, + "poly": [ + 298, + 1400, + 1039, + 1400, + 1039, + 1434, + 298, + 1434 + ], + "score": 0.898 + }, + { + "category_id": 9, + "poly": [ + 1351, + 905, + 1400, + 905, + 1400, + 935, + 1351, + 935 + ], + "score": 0.889 + }, + { + "category_id": 1, + "poly": [ + 295, + 1495, + 821, + 1495, + 821, + 1528, + 295, + 1528 + ], + "score": 0.889 + }, + { + "category_id": 8, + "poly": [ + 456, + 900, + 1232, + 900, + 1232, + 941, + 456, + 941 + ], + "score": 0.887 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1448, + 1400, + 1448, + 1400, + 1480, + 1351, + 1480 + ], + "score": 0.876 + }, + { + "category_id": 9, + "poly": [ + 1352, + 595, + 1400, + 595, + 1400, + 626, + 1352, + 626 + ], + "score": 0.872 + }, + { + "category_id": 1, + "poly": [ + 299, + 976, + 1121, + 976, + 1121, + 1014, + 299, + 1014 + ], + "score": 0.818 + }, + { + "category_id": 1, + "poly": [ + 317, + 942, + 1229, + 942, + 1229, + 973, + 317, + 973 + ], + "score": 0.777 + }, + { + "category_id": 2, + "poly": [ + 841, + 2087, + 858, + 2087, + 858, + 2111, + 841, + 2111 + ], + "score": 0.762 + }, + { + "category_id": 13, + "poly": [ + 603, + 1192, + 804, + 1192, + 804, + 1228, + 603, + 1228 + ], + "score": 0.96, + "latex": "G _ { k } = \\| \\bar { \\nabla } f ( z ^ { k } ) \\| ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 906, + 1049, + 1073, + 1049, + 1073, + 1090, + 906, + 1090 + ], + "score": 0.94, + "latex": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0" + }, + { + "category_id": 13, + "poly": [ + 1027, + 1162, + 1202, + 1162, + 1202, + 1196, + 1027, + 1196 + ], + "score": 0.94, + "latex": "\\bar { B } ( z ) = \\nabla f ( z )" + }, + { + "category_id": 13, + "poly": [ + 995, + 835, + 1192, + 835, + 1192, + 870, + 995, + 870 + ], + "score": 0.93, + "latex": "\\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } )" + }, + { + "category_id": 13, + "poly": [ + 472, + 1789, + 573, + 1789, + 573, + 1823, + 472, + 1823 + ], + "score": 0.93, + "latex": "\\bar { \\mathcal { O } } ( K ^ { - 1 } )" + }, + { + "category_id": 13, + "poly": [ + 371, + 1014, + 529, + 1014, + 529, + 1052, + 371, + 1052 + ], + "score": 0.93, + "latex": "y _ { i } ^ { k } \\ \\in \\ A _ { i } ( x _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 676, + 1052, + 852, + 1052, + 852, + 1089, + 676, + 1089 + ], + "score": 0.93, + "latex": "w _ { n + 1 } ^ { k } = B ( z ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 298, + 1053, + 450, + 1053, + 450, + 1089, + 298, + 1089 + ], + "score": 0.93, + "latex": "w _ { i } ^ { k } \\in A _ { i } ( z ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 464, + 1664, + 587, + 1664, + 587, + 1702, + 464, + 1702 + ], + "score": 0.93, + "latex": "\\mathcal { O } ( K ^ { - 1 / 4 } )" + }, + { + "category_id": 13, + "poly": [ + 619, + 1820, + 720, + 1820, + 720, + 1853, + 619, + 1853 + ], + "score": 0.93, + "latex": "\\mathcal { O } ( K ^ { - 2 } )" + }, + { + "category_id": 13, + "poly": [ + 401, + 1496, + 488, + 1496, + 488, + 1530, + 401, + 1530 + ], + "score": 0.93, + "latex": "C _ { f } > 0" + }, + { + "category_id": 13, + "poly": [ + 972, + 353, + 1073, + 353, + 1073, + 389, + 972, + 389 + ], + "score": 0.92, + "latex": "x _ { i } ^ { k } \\to z ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 840, + 511, + 924, + 511, + 924, + 541, + 840, + 541 + ], + "score": 0.92, + "latex": "p ^ { * } \\in \\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 366, + 477, + 609, + 477, + 609, + 512, + 366, + 512 + ], + "score": 0.92, + "latex": "p ^ { k + 1 } = p ^ { k } - \\alpha _ { k } \\nabla \\bar { \\varphi } _ { k }" + }, + { + "category_id": 13, + "poly": [ + 298, + 354, + 396, + 354, + 396, + 382, + 298, + 382 + ], + "score": 0.92, + "latex": "z ^ { k } \\to z ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 835, + 634, + 905, + 634, + 905, + 667, + 835, + 667 + ], + "score": 0.92, + "latex": "\\varphi _ { k } ( p )" + }, + { + "category_id": 13, + "poly": [ + 1026, + 978, + 1114, + 978, + 1114, + 1011, + 1026, + 1011 + ], + "score": 0.92, + "latex": "G _ { k } = 0" + }, + { + "category_id": 13, + "poly": [ + 1118, + 356, + 1258, + 356, + 1258, + 387, + 1118, + 387 + ], + "score": 0.92, + "latex": "i = 1 , \\ldots , n" + }, + { + "category_id": 13, + "poly": [ + 534, + 975, + 874, + 975, + 874, + 1014, + 534, + 1014 + ], + "score": 0.92, + "latex": "p ^ { k } = ( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\in \\mathcal { S }" + }, + { + "category_id": 13, + "poly": [ + 549, + 296, + 682, + 296, + 682, + 326, + 549, + 326 + ], + "score": 0.92, + "latex": "A _ { 1 } , \\ldots , A _ { n }" + }, + { + "category_id": 13, + "poly": [ + 699, + 1403, + 778, + 1403, + 778, + 1432, + 699, + 1432 + ], + "score": 0.92, + "latex": "K \\geq 1" + }, + { + "category_id": 14, + "poly": [ + 448, + 1436, + 1250, + 1436, + 1250, + 1495, + 448, + 1495 + ], + "score": 0.92, + "latex": "\\forall k = 1 , \\ldots , K : \\rho _ { k } = \\rho \\doteq \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , 1 / 2 L \\right\\} \\quad \\ a n d \\quad \\alpha _ { k } = C _ { f } \\rho ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 438, + 665, + 520, + 665, + 520, + 696, + 438, + 696 + ], + "score": 0.92, + "latex": "p ^ { * } \\in { \\mathcal { S } }" + }, + { + "category_id": 13, + "poly": [ + 405, + 743, + 540, + 743, + 540, + 774, + 405, + 774 + ], + "score": 0.92, + "latex": "C _ { 1 } , C _ { 2 } > 0" + }, + { + "category_id": 14, + "poly": [ + 592, + 698, + 1106, + 698, + 1106, + 739, + 592, + 739 + ], + "score": 0.92, + "latex": "\\begin{array} { r } { \\mathbb { E } [ \\| \\nabla \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le C _ { 1 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + C _ { 2 } \\quad a . s . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 1170, + 803, + 1403, + 803, + 1403, + 838, + 1170, + 838 + ], + "score": 0.91, + "latex": "\\varphi _ { v _ { k } } ( p ^ { v _ { k } } ) - \\varphi _ { v _ { k } } ( p ^ { * } )" + }, + { + "category_id": 13, + "poly": [ + 788, + 1019, + 890, + 1019, + 890, + 1049, + 788, + 1049 + ], + "score": 0.9, + "latex": "G _ { k } ~ = ~ 0" + }, + { + "category_id": 13, + "poly": [ + 901, + 1164, + 974, + 1164, + 974, + 1191, + 901, + 1191 + ], + "score": 0.9, + "latex": "n = 0" + }, + { + "category_id": 13, + "poly": [ + 613, + 1241, + 650, + 1241, + 650, + 1271, + 613, + 1271 + ], + "score": 0.9, + "latex": "G _ { k }" + }, + { + "category_id": 14, + "poly": [ + 453, + 543, + 1224, + 543, + 1224, + 633, + 453, + 633 + ], + "score": 0.9, + "latex": "\\begin{array} { r l } & { \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } \\langle \\nabla \\varphi _ { k } , p ^ { k } - p ^ { * } \\rangle + \\alpha _ { k } ^ { 2 } \\| \\nabla \\varphi _ { k } \\| ^ { 2 } } \\\\ & { \\qquad = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } ( \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) ) + \\alpha _ { k } ^ { 2 } \\| \\nabla \\varphi _ { k } \\| ^ { 2 } } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 392, + 1366, + 430, + 1366, + 430, + 1396, + 392, + 1396 + ], + "score": 0.89, + "latex": "G _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1106, + 1974, + 1143, + 1974, + 1143, + 2003, + 1106, + 2003 + ], + "score": 0.89, + "latex": "G _ { k }" + }, + { + "category_id": 13, + "poly": [ + 394, + 1668, + 434, + 1668, + 434, + 1700, + 394, + 1700 + ], + "score": 0.89, + "latex": "G _ { J }" + }, + { + "category_id": 13, + "poly": [ + 496, + 1055, + 590, + 1055, + 590, + 1084, + 496, + 1084 + ], + "score": 0.89, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 448, + 1134, + 486, + 1134, + 486, + 1164, + 448, + 1164 + ], + "score": 0.89, + "latex": "G _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1034, + 1852, + 1066, + 1852, + 1066, + 1881, + 1034, + 1881 + ], + "score": 0.89, + "latex": "A _ { i }" + }, + { + "category_id": 14, + "poly": [ + 659, + 1532, + 1041, + 1532, + 1041, + 1580, + 659, + 1580 + ], + "score": 0.88, + "latex": "\\begin{array} { r } { ( 1 / K ) { \\sum } _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] = \\mathcal { O } ( K ^ { - 1 / 4 } ) } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 580, + 1019, + 686, + 1019, + 686, + 1048, + 580, + 1048 + ], + "score": 0.88, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 1168, + 1015, + 1283, + 1015, + 1283, + 1052, + 1168, + 1052 + ], + "score": 0.88, + "latex": "w _ { i } ^ { k } \\ = \\ y _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 1043, + 1015, + 1153, + 1015, + 1153, + 1052, + 1043, + 1052 + ], + "score": 0.87, + "latex": "z ^ { k } = x _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 1250, + 1053, + 1281, + 1053, + 1281, + 1082, + 1250, + 1082 + ], + "score": 0.87, + "latex": "z ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 638, + 515, + 672, + 515, + 672, + 542, + 638, + 542 + ], + "score": 0.86, + "latex": "\\varphi _ { k }" + }, + { + "category_id": 13, + "poly": [ + 297, + 943, + 336, + 943, + 336, + 974, + 297, + 974 + ], + "score": 0.86, + "latex": "G _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1020, + 808, + 1050, + 808, + 1050, + 834, + 1020, + 834 + ], + "score": 0.86, + "latex": "v _ { k }" + }, + { + "category_id": 14, + "poly": [ + 462, + 899, + 1234, + 899, + 1234, + 941, + 462, + 941 + ], + "score": 0.85, + "latex": "\\begin{array} { r } { G _ { k } \\doteq \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 482, + 357, + 512, + 357, + 512, + 382, + 482, + 382 + ], + "score": 0.85, + "latex": "z ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 482, + 1195, + 501, + 1195, + 501, + 1226, + 482, + 1226 + ], + "score": 0.84, + "latex": "f" + }, + { + "category_id": 13, + "poly": [ + 1039, + 635, + 1063, + 635, + 1063, + 662, + 1039, + 662 + ], + "score": 0.83, + "latex": "\\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 801, + 1636, + 823, + 1636, + 823, + 1663, + 801, + 1663 + ], + "score": 0.8, + "latex": "J" + }, + { + "category_id": 13, + "poly": [ + 1128, + 774, + 1150, + 774, + 1150, + 800, + 1128, + 800 + ], + "score": 0.79, + "latex": "s" + }, + { + "category_id": 13, + "poly": [ + 736, + 1857, + 755, + 1857, + 755, + 1878, + 736, + 1878 + ], + "score": 0.77, + "latex": "n" + }, + { + "category_id": 13, + "poly": [ + 962, + 296, + 988, + 296, + 988, + 322, + 962, + 322 + ], + "score": 0.7, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 1130, + 1635, + 1188, + 1635, + 1188, + 1664, + 1130, + 1664 + ], + "score": 0.7, + "latex": "1 . . K" + }, + { + "category_id": 13, + "poly": [ + 1019, + 296, + 1041, + 296, + 1041, + 322, + 1019, + 322 + ], + "score": 0.63, + "latex": "L" + }, + { + "category_id": 13, + "poly": [ + 1022, + 1584, + 1043, + 1584, + 1043, + 1609, + 1022, + 1609 + ], + "score": 0.51, + "latex": "E" + }, + { + "category_id": 13, + "poly": [ + 698, + 327, + 714, + 327, + 714, + 352, + 698, + 352 + ], + "score": 0.47, + "latex": "I" + }, + { + "category_id": 13, + "poly": [ + 790, + 1090, + 813, + 1090, + 813, + 1113, + 790, + 1113 + ], + "score": 0.34, + "latex": "\\mathrm { D }" + }, + { + "category_id": 13, + "poly": [ + 742, + 1791, + 766, + 1791, + 766, + 1817, + 742, + 1817 + ], + "score": 0.3, + "latex": "\\&" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 859.0, + 72.0, + 859.0, + 109.0, + 297.0, + 109.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 225.0, + 789.0, + 225.0, + 789.0, + 266.0, + 294.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2086.0, + 860.0, + 2086.0, + 860.0, + 2118.0, + 839.0, + 2118.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1633.0, + 800.0, + 1633.0, + 800.0, + 1670.0, + 295.0, + 1670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 824.0, + 1633.0, + 1129.0, + 1633.0, + 1129.0, + 1670.0, + 824.0, + 1670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1189.0, + 1633.0, + 1405.0, + 1633.0, + 1405.0, + 1670.0, + 1189.0, + 1670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1662.0, + 393.0, + 1662.0, + 393.0, + 1706.0, + 293.0, + 1706.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 435.0, + 1662.0, + 463.0, + 1662.0, + 463.0, + 1706.0, + 435.0, + 1706.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 588.0, + 1662.0, + 1412.0, + 1662.0, + 1412.0, + 1706.0, + 588.0, + 1706.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1694.0, + 1410.0, + 1694.0, + 1410.0, + 1738.0, + 293.0, + 1738.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1730.0, + 1407.0, + 1730.0, + 1407.0, + 1762.0, + 295.0, + 1762.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1760.0, + 1404.0, + 1760.0, + 1404.0, + 1792.0, + 294.0, + 1792.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1786.0, + 471.0, + 1786.0, + 471.0, + 1825.0, + 293.0, + 1825.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 574.0, + 1786.0, + 741.0, + 1786.0, + 741.0, + 1825.0, + 574.0, + 1825.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 767.0, + 1786.0, + 1407.0, + 1786.0, + 1407.0, + 1825.0, + 767.0, + 1825.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1816.0, + 618.0, + 1816.0, + 618.0, + 1857.0, + 293.0, + 1857.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 721.0, + 1816.0, + 1408.0, + 1816.0, + 1408.0, + 1857.0, + 721.0, + 1857.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1850.0, + 735.0, + 1850.0, + 735.0, + 1887.0, + 293.0, + 1887.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 756.0, + 1850.0, + 1033.0, + 1850.0, + 1033.0, + 1887.0, + 756.0, + 1887.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1067.0, + 1850.0, + 1407.0, + 1850.0, + 1407.0, + 1887.0, + 1067.0, + 1887.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1881.0, + 1407.0, + 1881.0, + 1407.0, + 1918.0, + 294.0, + 1918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1911.0, + 1407.0, + 1911.0, + 1407.0, + 1947.0, + 293.0, + 1947.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1943.0, + 1408.0, + 1943.0, + 1408.0, + 1975.0, + 295.0, + 1975.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1972.0, + 1105.0, + 1972.0, + 1105.0, + 2007.0, + 294.0, + 2007.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1144.0, + 1972.0, + 1405.0, + 1972.0, + 1405.0, + 2007.0, + 1144.0, + 2007.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 2003.0, + 719.0, + 2003.0, + 719.0, + 2036.0, + 294.0, + 2036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 739.0, + 404.0, + 739.0, + 404.0, + 779.0, + 294.0, + 779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 541.0, + 739.0, + 1405.0, + 739.0, + 1405.0, + 779.0, + 541.0, + 779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 773.0, + 1127.0, + 773.0, + 1127.0, + 807.0, + 296.0, + 807.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1151.0, + 773.0, + 1405.0, + 773.0, + 1405.0, + 807.0, + 1151.0, + 807.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 801.0, + 1019.0, + 801.0, + 1019.0, + 843.0, + 294.0, + 843.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1051.0, + 801.0, + 1169.0, + 801.0, + 1169.0, + 843.0, + 1051.0, + 843.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1404.0, + 801.0, + 1407.0, + 801.0, + 1407.0, + 843.0, + 1404.0, + 843.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 832.0, + 994.0, + 832.0, + 994.0, + 876.0, + 291.0, + 876.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1193.0, + 832.0, + 1407.0, + 832.0, + 1407.0, + 876.0, + 1193.0, + 876.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 865.0, + 949.0, + 865.0, + 949.0, + 900.0, + 294.0, + 900.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 416.0, + 1408.0, + 416.0, + 1408.0, + 453.0, + 293.0, + 453.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 449.0, + 1408.0, + 449.0, + 1408.0, + 482.0, + 295.0, + 482.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 474.0, + 365.0, + 474.0, + 365.0, + 517.0, + 292.0, + 517.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 610.0, + 474.0, + 1408.0, + 474.0, + 1408.0, + 517.0, + 610.0, + 517.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 510.0, + 637.0, + 510.0, + 637.0, + 543.0, + 295.0, + 543.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 673.0, + 510.0, + 839.0, + 510.0, + 839.0, + 543.0, + 673.0, + 543.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 925.0, + 510.0, + 937.0, + 510.0, + 937.0, + 543.0, + 925.0, + 543.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 293.0, + 548.0, + 293.0, + 548.0, + 331.0, + 295.0, + 331.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 683.0, + 293.0, + 961.0, + 293.0, + 961.0, + 331.0, + 683.0, + 331.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 989.0, + 293.0, + 1018.0, + 293.0, + 1018.0, + 331.0, + 989.0, + 331.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1042.0, + 293.0, + 1406.0, + 293.0, + 1406.0, + 331.0, + 1042.0, + 331.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 325.0, + 697.0, + 325.0, + 697.0, + 359.0, + 295.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 715.0, + 325.0, + 1405.0, + 325.0, + 1405.0, + 359.0, + 715.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 346.0, + 297.0, + 346.0, + 297.0, + 395.0, + 292.0, + 395.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 397.0, + 346.0, + 481.0, + 346.0, + 481.0, + 395.0, + 397.0, + 395.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 513.0, + 346.0, + 971.0, + 346.0, + 971.0, + 395.0, + 513.0, + 395.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1074.0, + 346.0, + 1117.0, + 346.0, + 1117.0, + 395.0, + 1074.0, + 395.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1259.0, + 346.0, + 1272.0, + 346.0, + 1272.0, + 395.0, + 1259.0, + 395.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1130.0, + 447.0, + 1130.0, + 447.0, + 1168.0, + 295.0, + 1168.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 487.0, + 1130.0, + 1403.0, + 1130.0, + 1403.0, + 1168.0, + 487.0, + 1168.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1161.0, + 900.0, + 1161.0, + 900.0, + 1199.0, + 295.0, + 1199.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 975.0, + 1161.0, + 1026.0, + 1161.0, + 1026.0, + 1199.0, + 975.0, + 1199.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1203.0, + 1161.0, + 1405.0, + 1161.0, + 1405.0, + 1199.0, + 1203.0, + 1199.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1191.0, + 481.0, + 1191.0, + 481.0, + 1229.0, + 292.0, + 1229.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 502.0, + 1191.0, + 602.0, + 1191.0, + 602.0, + 1229.0, + 502.0, + 1229.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 805.0, + 1191.0, + 820.0, + 1191.0, + 820.0, + 1229.0, + 805.0, + 1229.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1239.0, + 612.0, + 1239.0, + 612.0, + 1275.0, + 296.0, + 1275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 651.0, + 1239.0, + 1404.0, + 1239.0, + 1404.0, + 1275.0, + 651.0, + 1275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1271.0, + 1157.0, + 1271.0, + 1157.0, + 1303.0, + 296.0, + 1303.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 629.0, + 834.0, + 629.0, + 834.0, + 670.0, + 292.0, + 670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 906.0, + 629.0, + 1038.0, + 629.0, + 1038.0, + 670.0, + 906.0, + 670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1064.0, + 629.0, + 1407.0, + 629.0, + 1407.0, + 670.0, + 1064.0, + 670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 662.0, + 437.0, + 662.0, + 437.0, + 698.0, + 295.0, + 698.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 521.0, + 662.0, + 532.0, + 662.0, + 532.0, + 698.0, + 521.0, + 698.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1329.0, + 1405.0, + 1329.0, + 1405.0, + 1371.0, + 294.0, + 1371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1357.0, + 391.0, + 1357.0, + 391.0, + 1403.0, + 293.0, + 1403.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 431.0, + 1357.0, + 445.0, + 1357.0, + 445.0, + 1403.0, + 431.0, + 1403.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1012.0, + 370.0, + 1012.0, + 370.0, + 1055.0, + 293.0, + 1055.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 530.0, + 1012.0, + 579.0, + 1012.0, + 579.0, + 1055.0, + 530.0, + 1055.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 687.0, + 1012.0, + 787.0, + 1012.0, + 787.0, + 1055.0, + 687.0, + 1055.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 891.0, + 1012.0, + 1042.0, + 1012.0, + 1042.0, + 1055.0, + 891.0, + 1055.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1154.0, + 1012.0, + 1167.0, + 1012.0, + 1167.0, + 1055.0, + 1154.0, + 1055.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1284.0, + 1012.0, + 1406.0, + 1012.0, + 1406.0, + 1055.0, + 1284.0, + 1055.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 286.0, + 1031.0, + 297.0, + 1031.0, + 297.0, + 1107.0, + 286.0, + 1107.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 451.0, + 1031.0, + 495.0, + 1031.0, + 495.0, + 1107.0, + 451.0, + 1107.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 591.0, + 1031.0, + 675.0, + 1031.0, + 675.0, + 1107.0, + 591.0, + 1107.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 853.0, + 1031.0, + 905.0, + 1031.0, + 905.0, + 1107.0, + 853.0, + 1107.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1074.0, + 1049.0, + 1249.0, + 1049.0, + 1249.0, + 1090.0, + 1074.0, + 1090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1282.0, + 1049.0, + 1403.0, + 1049.0, + 1403.0, + 1090.0, + 1282.0, + 1090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1084.0, + 789.0, + 1084.0, + 789.0, + 1119.0, + 296.0, + 1119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 814.0, + 1084.0, + 825.0, + 1084.0, + 825.0, + 1119.0, + 814.0, + 1119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 966.0, + 1050.5, + 1042.0, + 1050.5, + 1042.0, + 1093.5, + 966.0, + 1093.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1578.0, + 1021.0, + 1578.0, + 1021.0, + 1618.0, + 294.0, + 1618.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1044.0, + 1578.0, + 1051.0, + 1578.0, + 1051.0, + 1618.0, + 1044.0, + 1618.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1396.0, + 698.0, + 1396.0, + 698.0, + 1438.0, + 294.0, + 1438.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 779.0, + 1396.0, + 1037.0, + 1396.0, + 1037.0, + 1438.0, + 779.0, + 1438.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1490.0, + 400.0, + 1490.0, + 400.0, + 1534.0, + 289.0, + 1534.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 489.0, + 1490.0, + 825.0, + 1490.0, + 825.0, + 1534.0, + 489.0, + 1534.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 969.0, + 533.0, + 969.0, + 533.0, + 1020.0, + 293.0, + 1020.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 875.0, + 969.0, + 1025.0, + 969.0, + 1025.0, + 1020.0, + 875.0, + 1020.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1115.0, + 969.0, + 1126.0, + 969.0, + 1126.0, + 1020.0, + 1115.0, + 1020.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 337.0, + 938.0, + 1236.0, + 938.0, + 1236.0, + 982.0, + 337.0, + 982.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 6, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 300, + 1405, + 300, + 1405, + 757, + 298, + 757 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 298, + 1559, + 1404, + 1559, + 1404, + 1836, + 298, + 1836 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 973, + 1404, + 973, + 1404, + 1187, + 298, + 1187 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 298, + 1850, + 1403, + 1850, + 1403, + 2033, + 298, + 2033 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 298, + 772, + 1403, + 772, + 1403, + 958, + 298, + 958 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 298, + 1202, + 1404, + 1202, + 1404, + 1386, + 298, + 1386 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 300, + 1401, + 878, + 1401, + 878, + 1433, + 300, + 1433 + ], + "score": 0.911 + }, + { + "category_id": 0, + "poly": [ + 300, + 226, + 586, + 226, + 586, + 261, + 300, + 261 + ], + "score": 0.892 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 104, + 298, + 104 + ], + "score": 0.89 + }, + { + "category_id": 0, + "poly": [ + 300, + 1485, + 557, + 1485, + 557, + 1520, + 300, + 1520 + ], + "score": 0.878 + }, + { + "category_id": 2, + "poly": [ + 841, + 2089, + 858, + 2089, + 858, + 2111, + 841, + 2111 + ], + "score": 0.783 + }, + { + "category_id": 13, + "poly": [ + 298, + 1910, + 470, + 1910, + 470, + 1943, + 298, + 1943 + ], + "score": 0.93, + "latex": "\\rho _ { k } = C _ { d } k ^ { - 0 . 2 5 }" + }, + { + "category_id": 13, + "poly": [ + 1176, + 1880, + 1353, + 1880, + 1353, + 1912, + 1176, + 1912 + ], + "score": 0.93, + "latex": "\\alpha _ { k } = C _ { d } k ^ { - 0 . 5 1 }" + }, + { + "category_id": 13, + "poly": [ + 671, + 806, + 703, + 806, + 703, + 835, + 671, + 835 + ], + "score": 0.89, + "latex": "A _ { i }" + }, + { + "category_id": 13, + "poly": [ + 1375, + 1774, + 1403, + 1774, + 1403, + 1804, + 1375, + 1804 + ], + "score": 0.86, + "latex": "\\ell _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 1221, + 485, + 1246, + 485, + 1246, + 511, + 1221, + 511 + ], + "score": 0.81, + "latex": "B" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 220.0, + 593.0, + 220.0, + 593.0, + 267.0, + 292.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1480.0, + 562.0, + 1480.0, + 562.0, + 1528.0, + 293.0, + 1528.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2086.0, + 860.0, + 2086.0, + 860.0, + 2116.0, + 839.0, + 2116.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 301.0, + 1405.0, + 301.0, + 1405.0, + 336.0, + 295.0, + 336.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 331.0, + 1410.0, + 331.0, + 1410.0, + 368.0, + 292.0, + 368.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 358.0, + 1406.0, + 358.0, + 1406.0, + 399.0, + 292.0, + 399.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 393.0, + 1405.0, + 393.0, + 1405.0, + 428.0, + 293.0, + 428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 420.0, + 1407.0, + 420.0, + 1407.0, + 459.0, + 293.0, + 459.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 451.0, + 1406.0, + 451.0, + 1406.0, + 490.0, + 293.0, + 490.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 484.0, + 1220.0, + 484.0, + 1220.0, + 519.0, + 295.0, + 519.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1247.0, + 484.0, + 1405.0, + 484.0, + 1405.0, + 519.0, + 1247.0, + 519.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 512.0, + 1405.0, + 512.0, + 1405.0, + 550.0, + 292.0, + 550.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 545.0, + 1405.0, + 545.0, + 1405.0, + 579.0, + 295.0, + 579.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 575.0, + 1406.0, + 575.0, + 1406.0, + 612.0, + 292.0, + 612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 606.0, + 1407.0, + 606.0, + 1407.0, + 644.0, + 293.0, + 644.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 635.0, + 1406.0, + 635.0, + 1406.0, + 670.0, + 293.0, + 670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 663.0, + 1406.0, + 663.0, + 1406.0, + 704.0, + 292.0, + 704.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 695.0, + 1405.0, + 695.0, + 1405.0, + 731.0, + 293.0, + 731.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 725.0, + 1398.0, + 725.0, + 1398.0, + 762.0, + 292.0, + 762.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1561.0, + 1405.0, + 1561.0, + 1405.0, + 1594.0, + 297.0, + 1594.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1591.0, + 1406.0, + 1591.0, + 1406.0, + 1624.0, + 296.0, + 1624.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1619.0, + 1408.0, + 1619.0, + 1408.0, + 1656.0, + 294.0, + 1656.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1650.0, + 1406.0, + 1650.0, + 1406.0, + 1687.0, + 294.0, + 1687.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1683.0, + 1405.0, + 1683.0, + 1405.0, + 1716.0, + 296.0, + 1716.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1711.0, + 1406.0, + 1711.0, + 1406.0, + 1750.0, + 292.0, + 1750.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1741.0, + 1404.0, + 1741.0, + 1404.0, + 1779.0, + 293.0, + 1779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1772.0, + 1374.0, + 1772.0, + 1374.0, + 1809.0, + 294.0, + 1809.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1804.0, + 1371.0, + 1804.0, + 1371.0, + 1840.0, + 294.0, + 1840.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 973.0, + 1404.0, + 973.0, + 1404.0, + 1008.0, + 294.0, + 1008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 997.0, + 1406.0, + 997.0, + 1406.0, + 1044.0, + 291.0, + 1044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1033.0, + 1404.0, + 1033.0, + 1404.0, + 1070.0, + 293.0, + 1070.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1062.0, + 1410.0, + 1062.0, + 1410.0, + 1102.0, + 292.0, + 1102.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1096.0, + 1406.0, + 1096.0, + 1406.0, + 1130.0, + 294.0, + 1130.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1124.0, + 1406.0, + 1124.0, + 1406.0, + 1160.0, + 293.0, + 1160.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1157.0, + 752.0, + 1157.0, + 752.0, + 1189.0, + 294.0, + 1189.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1849.0, + 1405.0, + 1849.0, + 1405.0, + 1885.0, + 296.0, + 1885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1873.0, + 1175.0, + 1873.0, + 1175.0, + 1920.0, + 291.0, + 1920.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1354.0, + 1873.0, + 1412.0, + 1873.0, + 1412.0, + 1920.0, + 1354.0, + 1920.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 287.0, + 1901.0, + 297.0, + 1901.0, + 297.0, + 1953.0, + 287.0, + 1953.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 471.0, + 1901.0, + 1413.0, + 1901.0, + 1413.0, + 1953.0, + 471.0, + 1953.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1940.0, + 1408.0, + 1940.0, + 1408.0, + 1978.0, + 293.0, + 1978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1971.0, + 1407.0, + 1971.0, + 1407.0, + 2008.0, + 292.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 2001.0, + 1404.0, + 2001.0, + 1404.0, + 2036.0, + 294.0, + 2036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 772.0, + 1405.0, + 772.0, + 1405.0, + 808.0, + 293.0, + 808.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 802.0, + 670.0, + 802.0, + 670.0, + 841.0, + 292.0, + 841.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 704.0, + 802.0, + 1407.0, + 802.0, + 1407.0, + 841.0, + 704.0, + 841.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 833.0, + 1405.0, + 833.0, + 1405.0, + 870.0, + 293.0, + 870.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 866.0, + 1404.0, + 866.0, + 1404.0, + 898.0, + 294.0, + 898.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 893.0, + 1405.0, + 893.0, + 1405.0, + 931.0, + 293.0, + 931.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 926.0, + 972.0, + 926.0, + 972.0, + 959.0, + 293.0, + 959.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1201.0, + 1404.0, + 1201.0, + 1404.0, + 1237.0, + 296.0, + 1237.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1232.0, + 1406.0, + 1232.0, + 1406.0, + 1269.0, + 293.0, + 1269.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1264.0, + 1404.0, + 1264.0, + 1404.0, + 1298.0, + 293.0, + 1298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1294.0, + 1405.0, + 1294.0, + 1405.0, + 1328.0, + 292.0, + 1328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1324.0, + 1404.0, + 1324.0, + 1404.0, + 1359.0, + 293.0, + 1359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1356.0, + 1110.0, + 1356.0, + 1110.0, + 1388.0, + 294.0, + 1388.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1401.0, + 882.0, + 1401.0, + 882.0, + 1437.0, + 296.0, + 1437.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 7, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 992, + 1404, + 992, + 1404, + 1177, + 297, + 1177 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 299, + 1589, + 1403, + 1589, + 1403, + 1743, + 299, + 1743 + ], + "score": 0.975 + }, + { + "category_id": 1, + "poly": [ + 299, + 1481, + 1403, + 1481, + 1403, + 1574, + 299, + 1574 + ], + "score": 0.975 + }, + { + "category_id": 1, + "poly": [ + 298, + 1192, + 1405, + 1192, + 1405, + 1344, + 298, + 1344 + ], + "score": 0.974 + }, + { + "category_id": 3, + "poly": [ + 305, + 228, + 1393, + 228, + 1393, + 494, + 305, + 494 + ], + "score": 0.969 + }, + { + "category_id": 1, + "poly": [ + 297, + 882, + 1406, + 882, + 1406, + 977, + 297, + 977 + ], + "score": 0.963 + }, + { + "category_id": 4, + "poly": [ + 296, + 518, + 1406, + 518, + 1406, + 703, + 296, + 703 + ], + "score": 0.962 + }, + { + "category_id": 1, + "poly": [ + 301, + 776, + 1399, + 776, + 1399, + 869, + 301, + 869 + ], + "score": 0.959 + }, + { + "category_id": 2, + "poly": [ + 327, + 1975, + 1260, + 1975, + 1260, + 2034, + 327, + 2034 + ], + "score": 0.943 + }, + { + "category_id": 1, + "poly": [ + 299, + 1864, + 1403, + 1864, + 1403, + 1927, + 299, + 1927 + ], + "score": 0.933 + }, + { + "category_id": 0, + "poly": [ + 299, + 1404, + 859, + 1404, + 859, + 1440, + 299, + 1440 + ], + "score": 0.907 + }, + { + "category_id": 0, + "poly": [ + 300, + 1803, + 488, + 1803, + 488, + 1837, + 300, + 1837 + ], + "score": 0.879 + }, + { + "category_id": 2, + "poly": [ + 297, + 76, + 857, + 76, + 857, + 104, + 297, + 104 + ], + "score": 0.875 + }, + { + "category_id": 2, + "poly": [ + 840, + 2088, + 859, + 2088, + 859, + 2111, + 840, + 2111 + ], + "score": 0.801 + }, + { + "category_id": 13, + "poly": [ + 686, + 1024, + 784, + 1024, + 784, + 1054, + 686, + 1054 + ], + "score": 0.92, + "latex": "R _ { k } = 0" + }, + { + "category_id": 13, + "poly": [ + 1196, + 1025, + 1234, + 1025, + 1234, + 1054, + 1196, + 1054 + ], + "score": 0.89, + "latex": "R _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1270, + 1115, + 1308, + 1115, + 1308, + 1145, + 1270, + 1145 + ], + "score": 0.89, + "latex": "R _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1240, + 994, + 1278, + 994, + 1278, + 1024, + 1240, + 1024 + ], + "score": 0.89, + "latex": "R _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1359, + 1115, + 1396, + 1115, + 1396, + 1145, + 1359, + 1145 + ], + "score": 0.88, + "latex": "G _ { k }" + }, + { + "category_id": 13, + "poly": [ + 548, + 1024, + 586, + 1024, + 586, + 1054, + 548, + 1054 + ], + "score": 0.88, + "latex": "G _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1362, + 1024, + 1401, + 1024, + 1401, + 1054, + 1362, + 1054 + ], + "score": 0.88, + "latex": "G _ { k }" + }, + { + "category_id": 13, + "poly": [ + 933, + 1022, + 964, + 1022, + 964, + 1051, + 933, + 1051 + ], + "score": 0.87, + "latex": "z ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 1103, + 882, + 1254, + 882, + 1254, + 913, + 1103, + 913 + ], + "score": 0.86, + "latex": "m = 4 \\cdot 1 0 ^ { 5 }" + }, + { + "category_id": 13, + "poly": [ + 829, + 914, + 969, + 914, + 969, + 945, + 829, + 945 + ], + "score": 0.86, + "latex": "m = 2 \\cdot 1 0 ^ { 6 }" + }, + { + "category_id": 13, + "poly": [ + 981, + 916, + 1066, + 916, + 1066, + 944, + 981, + 944 + ], + "score": 0.85, + "latex": "d = 1 8" + }, + { + "category_id": 13, + "poly": [ + 298, + 946, + 432, + 946, + 432, + 976, + 298, + 976 + ], + "score": 0.85, + "latex": "d = 2 0 { , } 9 5 8 _ { , }" + }, + { + "category_id": 13, + "poly": [ + 1269, + 884, + 1391, + 884, + 1391, + 914, + 1269, + 914 + ], + "score": 0.84, + "latex": "d = 2 0 0 0 \\mathrm { \\Omega }" + }, + { + "category_id": 13, + "poly": [ + 1254, + 916, + 1397, + 916, + 1397, + 946, + 1254, + 946 + ], + "score": 0.82, + "latex": "m = 7 2 { , } 3 0 9" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 232.0, + 364.0, + 232.0, + 364.0, + 254.0, + 321.0, + 254.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 373.0, + 240.0, + 382.0, + 240.0, + 382.0, + 252.0, + 373.0, + 252.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 714.0, + 244.0, + 725.0, + 244.0, + 725.0, + 257.0, + 714.0, + 257.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1045.0, + 234.0, + 1079.0, + 234.0, + 1079.0, + 255.0, + 1045.0, + 255.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1229.0, + 248.0, + 1243.0, + 248.0, + 1243.0, + 258.0, + 1229.0, + 258.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1255.0, + 243.0, + 1376.0, + 243.0, + 1376.0, + 263.0, + 1255.0, + 263.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 301.0, + 262.0, + 328.0, + 262.0, + 328.0, + 431.0, + 301.0, + 431.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1046.0, + 261.0, + 1079.0, + 261.0, + 1079.0, + 285.0, + 1046.0, + 285.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1230.0, + 268.0, + 1243.0, + 268.0, + 1243.0, + 277.0, + 1230.0, + 277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1254.0, + 261.0, + 1371.0, + 261.0, + 1371.0, + 285.0, + 1254.0, + 285.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1254.0, + 278.0, + 1374.0, + 278.0, + 1374.0, + 303.0, + 1254.0, + 303.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 675.0, + 290.0, + 713.0, + 290.0, + 713.0, + 318.0, + 675.0, + 318.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1046.0, + 290.0, + 1080.0, + 290.0, + 1080.0, + 314.0, + 1046.0, + 314.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1253.0, + 299.0, + 1370.0, + 299.0, + 1370.0, + 327.0, + 1253.0, + 327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 329.0, + 328.0, + 362.0, + 328.0, + 362.0, + 349.0, + 329.0, + 349.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 677.0, + 324.0, + 712.0, + 324.0, + 712.0, + 347.0, + 677.0, + 347.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 809.0, + 329.0, + 854.0, + 329.0, + 854.0, + 346.0, + 809.0, + 346.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 934.0, + 332.0, + 997.0, + 332.0, + 997.0, + 344.0, + 934.0, + 344.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1046.0, + 319.0, + 1080.0, + 319.0, + 1080.0, + 342.0, + 1046.0, + 342.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1111.0, + 333.0, + 1124.0, + 333.0, + 1124.0, + 343.0, + 1111.0, + 343.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1254.0, + 320.0, + 1291.0, + 320.0, + 1291.0, + 342.0, + 1254.0, + 342.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 359.0, + 362.0, + 359.0, + 362.0, + 380.0, + 327.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 373.0, + 352.0, + 394.0, + 352.0, + 394.0, + 380.0, + 373.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 567.0, + 357.0, + 647.0, + 357.0, + 647.0, + 374.0, + 567.0, + 374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 676.0, + 353.0, + 714.0, + 353.0, + 714.0, + 381.0, + 676.0, + 381.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 722.0, + 351.0, + 756.0, + 351.0, + 756.0, + 391.0, + 722.0, + 391.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 851.0, + 343.0, + 866.0, + 343.0, + 866.0, + 360.0, + 851.0, + 360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 927.0, + 383.0, + 933.0, + 383.0, + 933.0, + 389.0, + 927.0, + 389.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 962.0, + 344.0, + 975.0, + 344.0, + 975.0, + 353.0, + 962.0, + 353.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1042.0, + 347.0, + 1081.0, + 347.0, + 1081.0, + 398.0, + 1042.0, + 398.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1089.0, + 367.0, + 1106.0, + 367.0, + 1106.0, + 394.0, + 1089.0, + 394.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1127.0, + 357.0, + 1139.0, + 357.0, + 1139.0, + 367.0, + 1127.0, + 367.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1229.0, + 339.0, + 1315.0, + 339.0, + 1315.0, + 381.0, + 1229.0, + 381.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 329.0, + 392.0, + 362.0, + 392.0, + 362.0, + 413.0, + 329.0, + 413.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 741.0, + 386.0, + 772.0, + 386.0, + 772.0, + 403.0, + 741.0, + 403.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 912.0, + 394.0, + 923.0, + 394.0, + 923.0, + 405.0, + 912.0, + 405.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1161.0, + 394.0, + 1173.0, + 394.0, + 1173.0, + 404.0, + 1161.0, + 404.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 760.0, + 402.0, + 772.0, + 402.0, + 772.0, + 411.0, + 760.0, + 411.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 851.0, + 407.0, + 861.0, + 407.0, + 861.0, + 416.0, + 851.0, + 416.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 866.0, + 407.0, + 880.0, + 407.0, + 880.0, + 417.0, + 866.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 885.0, + 408.0, + 896.0, + 408.0, + 896.0, + 418.0, + 885.0, + 418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 974.0, + 404.0, + 989.0, + 404.0, + 989.0, + 419.0, + 974.0, + 419.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1178.0, + 411.0, + 1189.0, + 411.0, + 1189.0, + 421.0, + 1178.0, + 421.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1179.0, + 421.0, + 1188.0, + 421.0, + 1188.0, + 431.0, + 1179.0, + 431.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 520.0, + 430.0, + 647.0, + 430.0, + 647.0, + 454.0, + 520.0, + 454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1035.0, + 433.0, + 1081.0, + 433.0, + 1081.0, + 456.0, + 1035.0, + 456.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 356.0, + 458.0, + 371.0, + 458.0, + 371.0, + 475.0, + 356.0, + 475.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 386.0, + 456.0, + 412.0, + 456.0, + 412.0, + 477.0, + 386.0, + 477.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 421.0, + 456.0, + 447.0, + 456.0, + 447.0, + 477.0, + 421.0, + 477.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 458.0, + 456.0, + 662.0, + 456.0, + 662.0, + 477.0, + 458.0, + 477.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 703.0, + 458.0, + 719.0, + 458.0, + 719.0, + 475.0, + 703.0, + 475.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 756.0, + 455.0, + 785.0, + 455.0, + 785.0, + 479.0, + 756.0, + 479.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 817.0, + 456.0, + 843.0, + 456.0, + 843.0, + 477.0, + 817.0, + 477.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 876.0, + 456.0, + 902.0, + 456.0, + 902.0, + 477.0, + 876.0, + 477.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 936.0, + 456.0, + 962.0, + 456.0, + 962.0, + 477.0, + 936.0, + 477.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 990.0, + 455.0, + 1027.0, + 455.0, + 1027.0, + 478.0, + 990.0, + 478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1072.0, + 458.0, + 1088.0, + 458.0, + 1088.0, + 475.0, + 1072.0, + 475.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1132.0, + 458.0, + 1147.0, + 458.0, + 1147.0, + 477.0, + 1132.0, + 477.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1193.0, + 455.0, + 1212.0, + 455.0, + 1212.0, + 477.0, + 1193.0, + 477.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1255.0, + 455.0, + 1273.0, + 455.0, + 1273.0, + 478.0, + 1255.0, + 478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1317.0, + 458.0, + 1333.0, + 458.0, + 1333.0, + 475.0, + 1317.0, + 475.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1379.0, + 458.0, + 1392.0, + 458.0, + 1392.0, + 475.0, + 1379.0, + 475.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 444.0, + 472.0, + 563.0, + 472.0, + 563.0, + 496.0, + 444.0, + 496.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 800.0, + 472.0, + 918.0, + 472.0, + 918.0, + 496.0, + 800.0, + 496.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1173.0, + 472.0, + 1292.0, + 472.0, + 1292.0, + 496.0, + 1173.0, + 496.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 667.0, + 227.0, + 713.0, + 227.0, + 713.0, + 254.0, + 667.0, + 254.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 326.0, + 262.5, + 366.0, + 262.5, + 366.0, + 285.0, + 326.0, + 285.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 667.25, + 257.0, + 714.25, + 257.0, + 714.25, + 284.0, + 667.25, + 284.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.25, + 294.0, + 366.25, + 294.0, + 366.25, + 318.5, + 327.25, + 318.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 727.0, + 327.0, + 734.0, + 327.0, + 734.0, + 340.0, + 727.0, + 340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 850.0, + 329.5, + 892.0, + 329.5, + 892.0, + 342.0, + 850.0, + 342.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 388.25, + 346.0, + 416.25, + 346.0, + 416.25, + 359.5, + 388.25, + 359.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 397.0, + 393.0, + 410.0, + 393.0, + 410.0, + 406.0, + 397.0, + 406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 673.0, + 387.0, + 714.0, + 387.0, + 714.0, + 413.0, + 673.0, + 413.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 367.0, + 418.0, + 416.0, + 418.0, + 416.0, + 434.5, + 367.0, + 434.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 415.75, + 427.0, + 450.75, + 427.0, + 450.75, + 445.0, + 415.75, + 445.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1042.0, + 403.5, + 1083.0, + 403.5, + 1083.0, + 428.0, + 1042.0, + 428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1117.0, + 418.5, + 1153.0, + 418.5, + 1153.0, + 434.5, + 1117.0, + 434.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 422.5, + 366.0, + 422.5, + 366.0, + 445.5, + 321.0, + 445.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 665.0, + 417.0, + 715.0, + 417.0, + 715.0, + 443.5, + 665.0, + 443.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 519.0, + 1408.0, + 519.0, + 1408.0, + 555.0, + 294.0, + 555.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 552.0, + 1404.0, + 552.0, + 1404.0, + 584.0, + 295.0, + 584.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 580.0, + 1404.0, + 580.0, + 1404.0, + 616.0, + 294.0, + 616.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 608.0, + 1406.0, + 608.0, + 1406.0, + 650.0, + 293.0, + 650.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 637.0, + 1406.0, + 637.0, + 1406.0, + 681.0, + 291.0, + 681.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 672.0, + 402.0, + 672.0, + 402.0, + 705.0, + 294.0, + 705.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 330.0, + 1970.0, + 1259.0, + 1970.0, + 1259.0, + 2008.0, + 330.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 330.0, + 2000.0, + 1258.0, + 2000.0, + 1258.0, + 2037.0, + 330.0, + 2037.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1400.0, + 863.0, + 1400.0, + 863.0, + 1445.0, + 292.0, + 1445.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1803.0, + 490.0, + 1803.0, + 490.0, + 1840.0, + 296.0, + 1840.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 73.0, + 859.0, + 73.0, + 859.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2087.0, + 861.0, + 2087.0, + 861.0, + 2117.0, + 839.0, + 2117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 990.0, + 1239.0, + 990.0, + 1239.0, + 1026.0, + 295.0, + 1026.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1279.0, + 990.0, + 1404.0, + 990.0, + 1404.0, + 1026.0, + 1279.0, + 1026.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1021.0, + 547.0, + 1021.0, + 547.0, + 1059.0, + 294.0, + 1059.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 587.0, + 1021.0, + 685.0, + 1021.0, + 685.0, + 1059.0, + 587.0, + 1059.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 785.0, + 1021.0, + 932.0, + 1021.0, + 932.0, + 1059.0, + 785.0, + 1059.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 965.0, + 1021.0, + 1195.0, + 1021.0, + 1195.0, + 1059.0, + 965.0, + 1059.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1235.0, + 1021.0, + 1361.0, + 1021.0, + 1361.0, + 1059.0, + 1235.0, + 1059.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1053.0, + 1405.0, + 1053.0, + 1405.0, + 1090.0, + 292.0, + 1090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1082.0, + 1404.0, + 1082.0, + 1404.0, + 1119.0, + 294.0, + 1119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1111.0, + 1269.0, + 1111.0, + 1269.0, + 1151.0, + 294.0, + 1151.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1309.0, + 1111.0, + 1358.0, + 1111.0, + 1358.0, + 1151.0, + 1309.0, + 1151.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1397.0, + 1111.0, + 1408.0, + 1111.0, + 1408.0, + 1151.0, + 1397.0, + 1151.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1146.0, + 829.0, + 1146.0, + 829.0, + 1179.0, + 295.0, + 1179.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1589.0, + 1408.0, + 1589.0, + 1408.0, + 1625.0, + 293.0, + 1625.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1622.0, + 1404.0, + 1622.0, + 1404.0, + 1655.0, + 295.0, + 1655.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1651.0, + 1405.0, + 1651.0, + 1405.0, + 1688.0, + 294.0, + 1688.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1681.0, + 1403.0, + 1681.0, + 1403.0, + 1718.0, + 295.0, + 1718.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1714.0, + 1398.0, + 1714.0, + 1398.0, + 1747.0, + 297.0, + 1747.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1482.0, + 1407.0, + 1482.0, + 1407.0, + 1519.0, + 294.0, + 1519.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1514.0, + 1404.0, + 1514.0, + 1404.0, + 1548.0, + 295.0, + 1548.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1543.0, + 1404.0, + 1543.0, + 1404.0, + 1581.0, + 294.0, + 1581.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1192.0, + 1407.0, + 1192.0, + 1407.0, + 1229.0, + 295.0, + 1229.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1222.0, + 1405.0, + 1222.0, + 1405.0, + 1259.0, + 293.0, + 1259.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1248.0, + 1406.0, + 1248.0, + 1406.0, + 1292.0, + 291.0, + 1292.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1281.0, + 1406.0, + 1281.0, + 1406.0, + 1320.0, + 291.0, + 1320.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1313.0, + 1227.0, + 1313.0, + 1227.0, + 1348.0, + 293.0, + 1348.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 881.0, + 1102.0, + 881.0, + 1102.0, + 919.0, + 294.0, + 919.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1255.0, + 881.0, + 1268.0, + 881.0, + 1268.0, + 919.0, + 1255.0, + 919.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1392.0, + 881.0, + 1407.0, + 881.0, + 1407.0, + 919.0, + 1392.0, + 919.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 911.0, + 828.0, + 911.0, + 828.0, + 950.0, + 291.0, + 950.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 970.0, + 911.0, + 980.0, + 911.0, + 980.0, + 950.0, + 970.0, + 950.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1067.0, + 911.0, + 1253.0, + 911.0, + 1253.0, + 950.0, + 1067.0, + 950.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1398.0, + 911.0, + 1409.0, + 911.0, + 1409.0, + 950.0, + 1398.0, + 950.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 944.0, + 297.0, + 944.0, + 297.0, + 979.0, + 292.0, + 979.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 433.0, + 944.0, + 451.0, + 944.0, + 451.0, + 979.0, + 433.0, + 979.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 777.0, + 1403.0, + 777.0, + 1403.0, + 810.0, + 295.0, + 810.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 806.0, + 1405.0, + 806.0, + 1405.0, + 844.0, + 292.0, + 844.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 838.0, + 1369.0, + 838.0, + 1369.0, + 872.0, + 296.0, + 872.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1864.0, + 1404.0, + 1864.0, + 1404.0, + 1898.0, + 295.0, + 1898.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1894.0, + 1338.0, + 1894.0, + 1338.0, + 1930.0, + 323.0, + 1930.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 8, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 299, + 1519, + 1402, + 1519, + 1402, + 1616, + 299, + 1616 + ], + "score": 0.882 + }, + { + "category_id": 1, + "poly": [ + 299, + 342, + 1406, + 342, + 1406, + 466, + 299, + 466 + ], + "score": 0.861 + }, + { + "category_id": 1, + "poly": [ + 297, + 709, + 1406, + 709, + 1406, + 836, + 297, + 836 + ], + "score": 0.859 + }, + { + "category_id": 1, + "poly": [ + 296, + 485, + 1405, + 485, + 1405, + 609, + 296, + 609 + ], + "score": 0.858 + }, + { + "category_id": 1, + "poly": [ + 301, + 229, + 1403, + 229, + 1403, + 323, + 301, + 323 + ], + "score": 0.856 + }, + { + "category_id": 1, + "poly": [ + 291, + 1632, + 1401, + 1632, + 1401, + 1697, + 291, + 1697 + ], + "score": 0.855 + }, + { + "category_id": 1, + "poly": [ + 289, + 1325, + 1403, + 1325, + 1403, + 1389, + 289, + 1389 + ], + "score": 0.845 + }, + { + "category_id": 1, + "poly": [ + 300, + 1408, + 1401, + 1408, + 1401, + 1502, + 300, + 1502 + ], + "score": 0.843 + }, + { + "category_id": 1, + "poly": [ + 299, + 852, + 1401, + 852, + 1401, + 918, + 299, + 918 + ], + "score": 0.838 + }, + { + "category_id": 1, + "poly": [ + 295, + 935, + 1406, + 935, + 1406, + 1000, + 295, + 1000 + ], + "score": 0.835 + }, + { + "category_id": 1, + "poly": [ + 296, + 627, + 1403, + 627, + 1403, + 692, + 296, + 692 + ], + "score": 0.829 + }, + { + "category_id": 2, + "poly": [ + 836, + 2088, + 864, + 2088, + 864, + 2112, + 836, + 2112 + ], + "score": 0.829 + }, + { + "category_id": 1, + "poly": [ + 293, + 1129, + 1401, + 1129, + 1401, + 1195, + 293, + 1195 + ], + "score": 0.821 + }, + { + "category_id": 1, + "poly": [ + 297, + 1213, + 1399, + 1213, + 1399, + 1309, + 297, + 1309 + ], + "score": 0.788 + }, + { + "category_id": 1, + "poly": [ + 297, + 1714, + 1406, + 1714, + 1406, + 1839, + 297, + 1839 + ], + "score": 0.761 + }, + { + "category_id": 1, + "poly": [ + 297, + 1017, + 1406, + 1017, + 1406, + 1112, + 297, + 1112 + ], + "score": 0.758 + }, + { + "category_id": 1, + "poly": [ + 297, + 1972, + 1397, + 1972, + 1397, + 2034, + 297, + 2034 + ], + "score": 0.751 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 105, + 298, + 105 + ], + "score": 0.724 + }, + { + "category_id": 1, + "poly": [ + 297, + 1858, + 1401, + 1858, + 1401, + 1953, + 297, + 1953 + ], + "score": 0.688 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 105, + 298, + 105 + ], + "score": 0.17 + }, + { + "category_id": 13, + "poly": [ + 541, + 747, + 560, + 747, + 560, + 771, + 541, + 771 + ], + "score": 0.76, + "latex": "n" + }, + { + "category_id": 13, + "poly": [ + 976, + 1586, + 994, + 1586, + 994, + 1608, + 976, + 1608 + ], + "score": 0.37, + "latex": "=" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2084.0, + 868.0, + 2084.0, + 868.0, + 2124.0, + 831.0, + 2124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 73.0, + 857.0, + 73.0, + 857.0, + 108.0, + 298.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 73.0, + 857.0, + 73.0, + 857.0, + 108.0, + 298.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1517.0, + 1407.0, + 1517.0, + 1407.0, + 1559.0, + 293.0, + 1559.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1551.0, + 1409.0, + 1551.0, + 1409.0, + 1588.0, + 320.0, + 1588.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1581.0, + 975.0, + 1581.0, + 975.0, + 1618.0, + 322.0, + 1618.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 995.0, + 1581.0, + 1188.0, + 1581.0, + 1188.0, + 1618.0, + 995.0, + 1618.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 344.0, + 1408.0, + 344.0, + 1408.0, + 377.0, + 296.0, + 377.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 374.0, + 1408.0, + 374.0, + 1408.0, + 407.0, + 321.0, + 407.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 403.0, + 1406.0, + 403.0, + 1406.0, + 440.0, + 321.0, + 440.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 433.0, + 971.0, + 433.0, + 971.0, + 469.0, + 319.0, + 469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 711.0, + 1408.0, + 711.0, + 1408.0, + 744.0, + 295.0, + 744.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 743.0, + 540.0, + 743.0, + 540.0, + 776.0, + 323.0, + 776.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 561.0, + 743.0, + 1408.0, + 743.0, + 1408.0, + 776.0, + 561.0, + 776.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 772.0, + 1404.0, + 772.0, + 1404.0, + 808.0, + 321.0, + 808.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 804.0, + 1106.0, + 804.0, + 1106.0, + 836.0, + 323.0, + 836.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 486.0, + 1407.0, + 486.0, + 1407.0, + 519.0, + 295.0, + 519.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 515.0, + 1406.0, + 515.0, + 1406.0, + 552.0, + 320.0, + 552.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 547.0, + 1405.0, + 547.0, + 1405.0, + 583.0, + 319.0, + 583.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 575.0, + 450.0, + 575.0, + 450.0, + 613.0, + 320.0, + 613.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 226.0, + 1407.0, + 226.0, + 1407.0, + 266.0, + 294.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 261.0, + 1404.0, + 261.0, + 1404.0, + 295.0, + 322.0, + 295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 292.0, + 759.0, + 292.0, + 759.0, + 323.0, + 322.0, + 323.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1631.0, + 1404.0, + 1631.0, + 1404.0, + 1670.0, + 294.0, + 1670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1663.0, + 1324.0, + 1663.0, + 1324.0, + 1699.0, + 323.0, + 1699.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1321.0, + 1405.0, + 1321.0, + 1405.0, + 1363.0, + 292.0, + 1363.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1356.0, + 611.0, + 1356.0, + 611.0, + 1389.0, + 321.0, + 1389.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1409.0, + 1406.0, + 1409.0, + 1406.0, + 1443.0, + 296.0, + 1443.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1439.0, + 1406.0, + 1439.0, + 1406.0, + 1472.0, + 321.0, + 1472.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1468.0, + 1003.0, + 1468.0, + 1003.0, + 1505.0, + 321.0, + 1505.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 850.0, + 1405.0, + 850.0, + 1405.0, + 892.0, + 294.0, + 892.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 884.0, + 819.0, + 884.0, + 819.0, + 920.0, + 324.0, + 920.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 938.0, + 1378.0, + 938.0, + 1378.0, + 970.0, + 298.0, + 970.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 966.0, + 1358.0, + 966.0, + 1358.0, + 1001.0, + 322.0, + 1001.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 622.0, + 1404.0, + 622.0, + 1404.0, + 669.0, + 293.0, + 669.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 660.0, + 1095.0, + 660.0, + 1095.0, + 693.0, + 321.0, + 693.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1132.0, + 1402.0, + 1132.0, + 1402.0, + 1165.0, + 296.0, + 1165.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1162.0, + 1314.0, + 1162.0, + 1314.0, + 1196.0, + 322.0, + 1196.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1211.0, + 1405.0, + 1211.0, + 1405.0, + 1250.0, + 292.0, + 1250.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1243.0, + 1403.0, + 1243.0, + 1403.0, + 1282.0, + 322.0, + 1282.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1273.0, + 1138.0, + 1273.0, + 1138.0, + 1311.0, + 321.0, + 1311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1713.0, + 1409.0, + 1713.0, + 1409.0, + 1753.0, + 292.0, + 1753.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1746.0, + 1408.0, + 1746.0, + 1408.0, + 1781.0, + 323.0, + 1781.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1775.0, + 1409.0, + 1775.0, + 1409.0, + 1815.0, + 318.0, + 1815.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1807.0, + 397.0, + 1807.0, + 397.0, + 1840.0, + 321.0, + 1840.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1017.0, + 1408.0, + 1017.0, + 1408.0, + 1055.0, + 294.0, + 1055.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1047.0, + 1404.0, + 1047.0, + 1404.0, + 1085.0, + 323.0, + 1085.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1080.0, + 609.0, + 1080.0, + 609.0, + 1110.0, + 322.0, + 1110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1971.0, + 1403.0, + 1971.0, + 1403.0, + 2007.0, + 296.0, + 2007.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 2003.0, + 1399.0, + 2003.0, + 1399.0, + 2036.0, + 322.0, + 2036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1854.0, + 1405.0, + 1854.0, + 1405.0, + 1897.0, + 292.0, + 1897.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1888.0, + 1405.0, + 1888.0, + 1405.0, + 1926.0, + 322.0, + 1926.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1920.0, + 979.0, + 1920.0, + 979.0, + 1954.0, + 322.0, + 1954.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 9, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1288, + 1406, + 1288, + 1406, + 1413, + 298, + 1413 + ], + "score": 0.942 + }, + { + "category_id": 1, + "poly": [ + 298, + 1556, + 1407, + 1556, + 1407, + 1649, + 298, + 1649 + ], + "score": 0.933 + }, + { + "category_id": 1, + "poly": [ + 297, + 1139, + 1406, + 1139, + 1406, + 1264, + 297, + 1264 + ], + "score": 0.929 + }, + { + "category_id": 1, + "poly": [ + 298, + 1022, + 1403, + 1022, + 1403, + 1117, + 298, + 1117 + ], + "score": 0.92 + }, + { + "category_id": 1, + "poly": [ + 299, + 1674, + 1406, + 1674, + 1406, + 1798, + 299, + 1798 + ], + "score": 0.919 + }, + { + "category_id": 1, + "poly": [ + 299, + 1822, + 1405, + 1822, + 1405, + 1948, + 299, + 1948 + ], + "score": 0.914 + }, + { + "category_id": 1, + "poly": [ + 299, + 1436, + 1403, + 1436, + 1403, + 1531, + 299, + 1531 + ], + "score": 0.912 + }, + { + "category_id": 1, + "poly": [ + 297, + 641, + 1404, + 641, + 1404, + 737, + 297, + 737 + ], + "score": 0.906 + }, + { + "category_id": 1, + "poly": [ + 295, + 933, + 1403, + 933, + 1403, + 998, + 295, + 998 + ], + "score": 0.896 + }, + { + "category_id": 1, + "poly": [ + 298, + 1971, + 1400, + 1971, + 1400, + 2035, + 298, + 2035 + ], + "score": 0.892 + }, + { + "category_id": 1, + "poly": [ + 296, + 435, + 1408, + 435, + 1408, + 528, + 296, + 528 + ], + "score": 0.885 + }, + { + "category_id": 1, + "poly": [ + 299, + 347, + 1400, + 347, + 1400, + 412, + 299, + 412 + ], + "score": 0.885 + }, + { + "category_id": 1, + "poly": [ + 295, + 758, + 1403, + 758, + 1403, + 823, + 295, + 823 + ], + "score": 0.883 + }, + { + "category_id": 1, + "poly": [ + 293, + 552, + 1403, + 552, + 1403, + 618, + 293, + 618 + ], + "score": 0.882 + }, + { + "category_id": 1, + "poly": [ + 301, + 228, + 1403, + 228, + 1403, + 324, + 301, + 324 + ], + "score": 0.881 + }, + { + "category_id": 1, + "poly": [ + 295, + 846, + 1402, + 846, + 1402, + 911, + 295, + 911 + ], + "score": 0.881 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 861, + 2088, + 861, + 2113, + 835, + 2113 + ], + "score": 0.81 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 105, + 298, + 105 + ], + "score": 0.691 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 105, + 298, + 105 + ], + "score": 0.217 + }, + { + "category_id": 13, + "poly": [ + 739, + 295, + 759, + 295, + 759, + 318, + 739, + 318 + ], + "score": 0.45, + "latex": "{ . } =" + }, + { + "category_id": 13, + "poly": [ + 1383, + 1207, + 1403, + 1207, + 1403, + 1229, + 1383, + 1229 + ], + "score": 0.39, + "latex": "=" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2085.0, + 867.0, + 2085.0, + 867.0, + 2125.0, + 831.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 73.0, + 857.0, + 73.0, + 857.0, + 108.0, + 298.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 73.0, + 857.0, + 73.0, + 857.0, + 108.0, + 298.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1290.0, + 1409.0, + 1290.0, + 1409.0, + 1326.0, + 293.0, + 1326.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 1314.0, + 1411.0, + 1314.0, + 1411.0, + 1360.0, + 317.0, + 1360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1349.0, + 1406.0, + 1349.0, + 1406.0, + 1389.0, + 320.0, + 1389.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1381.0, + 841.0, + 1381.0, + 841.0, + 1414.0, + 322.0, + 1414.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1555.0, + 1408.0, + 1555.0, + 1408.0, + 1593.0, + 293.0, + 1593.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1587.0, + 1408.0, + 1587.0, + 1408.0, + 1624.0, + 321.0, + 1624.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1617.0, + 572.0, + 1617.0, + 572.0, + 1651.0, + 322.0, + 1651.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1142.0, + 1407.0, + 1142.0, + 1407.0, + 1175.0, + 297.0, + 1175.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1173.0, + 1409.0, + 1173.0, + 1409.0, + 1206.0, + 321.0, + 1206.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1202.0, + 1382.0, + 1202.0, + 1382.0, + 1238.0, + 320.0, + 1238.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1404.0, + 1202.0, + 1407.0, + 1202.0, + 1407.0, + 1238.0, + 1404.0, + 1238.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1233.0, + 504.0, + 1233.0, + 504.0, + 1266.0, + 323.0, + 1266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1022.0, + 1407.0, + 1022.0, + 1407.0, + 1060.0, + 294.0, + 1060.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1054.0, + 1405.0, + 1054.0, + 1405.0, + 1091.0, + 321.0, + 1091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1083.0, + 1184.0, + 1083.0, + 1184.0, + 1119.0, + 322.0, + 1119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1673.0, + 1404.0, + 1673.0, + 1404.0, + 1709.0, + 296.0, + 1709.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1703.0, + 1409.0, + 1703.0, + 1409.0, + 1741.0, + 321.0, + 1741.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1735.0, + 1410.0, + 1735.0, + 1410.0, + 1773.0, + 319.0, + 1773.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1769.0, + 739.0, + 1769.0, + 739.0, + 1798.0, + 324.0, + 1798.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1818.0, + 1409.0, + 1818.0, + 1409.0, + 1860.0, + 296.0, + 1860.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1852.0, + 1409.0, + 1852.0, + 1409.0, + 1891.0, + 319.0, + 1891.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1883.0, + 1406.0, + 1883.0, + 1406.0, + 1918.0, + 321.0, + 1918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1914.0, + 1318.0, + 1914.0, + 1318.0, + 1950.0, + 321.0, + 1950.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1435.0, + 1405.0, + 1435.0, + 1405.0, + 1474.0, + 293.0, + 1474.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1470.0, + 1408.0, + 1470.0, + 1408.0, + 1504.0, + 322.0, + 1504.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1493.0, + 399.0, + 1493.0, + 399.0, + 1534.0, + 320.0, + 1534.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 639.0, + 1405.0, + 639.0, + 1405.0, + 680.0, + 294.0, + 680.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 671.0, + 1407.0, + 671.0, + 1407.0, + 710.0, + 321.0, + 710.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 704.0, + 1025.0, + 704.0, + 1025.0, + 738.0, + 324.0, + 738.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 929.0, + 1405.0, + 929.0, + 1405.0, + 973.0, + 293.0, + 973.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 966.0, + 611.0, + 966.0, + 611.0, + 996.0, + 323.0, + 996.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1968.0, + 1405.0, + 1968.0, + 1405.0, + 2009.0, + 293.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 2003.0, + 978.0, + 2003.0, + 978.0, + 2036.0, + 322.0, + 2036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 436.0, + 1404.0, + 436.0, + 1404.0, + 469.0, + 294.0, + 469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 467.0, + 1409.0, + 467.0, + 1409.0, + 501.0, + 323.0, + 501.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 497.0, + 482.0, + 497.0, + 482.0, + 529.0, + 323.0, + 529.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 343.0, + 1404.0, + 343.0, + 1404.0, + 386.0, + 293.0, + 386.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 379.0, + 983.0, + 379.0, + 983.0, + 412.0, + 323.0, + 412.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 760.0, + 1406.0, + 760.0, + 1406.0, + 796.0, + 295.0, + 796.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 791.0, + 1304.0, + 791.0, + 1304.0, + 824.0, + 322.0, + 824.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 550.0, + 1409.0, + 550.0, + 1409.0, + 590.0, + 293.0, + 590.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 586.0, + 570.0, + 586.0, + 570.0, + 617.0, + 324.0, + 617.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 228.0, + 1405.0, + 228.0, + 1405.0, + 266.0, + 296.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 260.0, + 1409.0, + 260.0, + 1409.0, + 298.0, + 321.0, + 298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 291.0, + 738.0, + 291.0, + 738.0, + 327.0, + 322.0, + 327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 760.0, + 291.0, + 918.0, + 291.0, + 918.0, + 327.0, + 760.0, + 327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 848.0, + 1405.0, + 848.0, + 1405.0, + 884.0, + 294.0, + 884.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 879.0, + 1304.0, + 879.0, + 1304.0, + 912.0, + 322.0, + 912.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 10, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 105, + 298, + 105 + ], + "score": 0.88 + }, + { + "category_id": 1, + "poly": [ + 294, + 775, + 1402, + 775, + 1402, + 839, + 294, + 839 + ], + "score": 0.861 + }, + { + "category_id": 1, + "poly": [ + 299, + 518, + 1402, + 518, + 1402, + 582, + 299, + 582 + ], + "score": 0.842 + }, + { + "category_id": 1, + "poly": [ + 295, + 604, + 1401, + 604, + 1401, + 668, + 295, + 668 + ], + "score": 0.836 + }, + { + "category_id": 1, + "poly": [ + 299, + 1124, + 1407, + 1124, + 1407, + 1249, + 299, + 1249 + ], + "score": 0.832 + }, + { + "category_id": 1, + "poly": [ + 299, + 1942, + 1404, + 1942, + 1404, + 2035, + 299, + 2035 + ], + "score": 0.829 + }, + { + "category_id": 1, + "poly": [ + 295, + 689, + 1404, + 689, + 1404, + 754, + 295, + 754 + ], + "score": 0.823 + }, + { + "category_id": 1, + "poly": [ + 298, + 861, + 1407, + 861, + 1407, + 987, + 298, + 987 + ], + "score": 0.823 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 864, + 2088, + 864, + 2113, + 835, + 2113 + ], + "score": 0.822 + }, + { + "category_id": 1, + "poly": [ + 298, + 1474, + 1406, + 1474, + 1406, + 1569, + 298, + 1569 + ], + "score": 0.817 + }, + { + "category_id": 1, + "poly": [ + 296, + 1357, + 1404, + 1357, + 1404, + 1453, + 296, + 1453 + ], + "score": 0.817 + }, + { + "category_id": 1, + "poly": [ + 299, + 1009, + 1405, + 1009, + 1405, + 1104, + 299, + 1104 + ], + "score": 0.811 + }, + { + "category_id": 1, + "poly": [ + 288, + 1271, + 1404, + 1271, + 1404, + 1337, + 288, + 1337 + ], + "score": 0.79 + }, + { + "category_id": 1, + "poly": [ + 298, + 1590, + 1406, + 1590, + 1406, + 1686, + 298, + 1686 + ], + "score": 0.785 + }, + { + "category_id": 1, + "poly": [ + 295, + 1708, + 1401, + 1708, + 1401, + 1772, + 295, + 1772 + ], + "score": 0.78 + }, + { + "category_id": 1, + "poly": [ + 295, + 432, + 1400, + 432, + 1400, + 496, + 295, + 496 + ], + "score": 0.78 + }, + { + "category_id": 1, + "poly": [ + 298, + 229, + 1400, + 229, + 1400, + 324, + 298, + 324 + ], + "score": 0.767 + }, + { + "category_id": 1, + "poly": [ + 298, + 1794, + 1407, + 1794, + 1407, + 1920, + 298, + 1920 + ], + "score": 0.752 + }, + { + "category_id": 1, + "poly": [ + 294, + 346, + 1402, + 346, + 1402, + 409, + 294, + 409 + ], + "score": 0.738 + }, + { + "category_id": 15, + "poly": [ + 298.0, + 73.0, + 857.0, + 73.0, + 857.0, + 108.0, + 298.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2085.0, + 869.0, + 2085.0, + 869.0, + 2124.0, + 831.0, + 2124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 772.0, + 1403.0, + 772.0, + 1403.0, + 812.0, + 294.0, + 812.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 806.0, + 520.0, + 806.0, + 520.0, + 843.0, + 322.0, + 843.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 517.0, + 1406.0, + 517.0, + 1406.0, + 556.0, + 294.0, + 556.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 549.0, + 980.0, + 549.0, + 980.0, + 585.0, + 324.0, + 585.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 601.0, + 1405.0, + 601.0, + 1405.0, + 642.0, + 294.0, + 642.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 636.0, + 1254.0, + 636.0, + 1254.0, + 669.0, + 323.0, + 669.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1127.0, + 1404.0, + 1127.0, + 1404.0, + 1160.0, + 297.0, + 1160.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1157.0, + 1407.0, + 1157.0, + 1407.0, + 1191.0, + 321.0, + 1191.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1185.0, + 1410.0, + 1185.0, + 1410.0, + 1225.0, + 321.0, + 1225.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1217.0, + 620.0, + 1217.0, + 620.0, + 1250.0, + 323.0, + 1250.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1939.0, + 1410.0, + 1939.0, + 1410.0, + 1978.0, + 293.0, + 1978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1973.0, + 1406.0, + 1973.0, + 1406.0, + 2007.0, + 322.0, + 2007.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 2002.0, + 1193.0, + 2002.0, + 1193.0, + 2036.0, + 321.0, + 2036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 688.0, + 1404.0, + 688.0, + 1404.0, + 728.0, + 295.0, + 728.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 720.0, + 1130.0, + 720.0, + 1130.0, + 756.0, + 322.0, + 756.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 864.0, + 1408.0, + 864.0, + 1408.0, + 897.0, + 295.0, + 897.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 890.0, + 1409.0, + 890.0, + 1409.0, + 931.0, + 321.0, + 931.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 923.0, + 1409.0, + 923.0, + 1409.0, + 962.0, + 320.0, + 962.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 953.0, + 1062.0, + 953.0, + 1062.0, + 989.0, + 320.0, + 989.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1474.0, + 1406.0, + 1474.0, + 1406.0, + 1512.0, + 293.0, + 1512.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1507.0, + 1406.0, + 1507.0, + 1406.0, + 1541.0, + 324.0, + 1541.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1535.0, + 1048.0, + 1535.0, + 1048.0, + 1573.0, + 324.0, + 1573.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1357.0, + 1406.0, + 1357.0, + 1406.0, + 1395.0, + 295.0, + 1395.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1389.0, + 1404.0, + 1389.0, + 1404.0, + 1423.0, + 323.0, + 1423.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1420.0, + 532.0, + 1420.0, + 532.0, + 1455.0, + 323.0, + 1455.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1010.0, + 1405.0, + 1010.0, + 1405.0, + 1044.0, + 295.0, + 1044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1041.0, + 1405.0, + 1041.0, + 1405.0, + 1075.0, + 322.0, + 1075.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1074.0, + 901.0, + 1074.0, + 901.0, + 1104.0, + 323.0, + 1104.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1270.0, + 1408.0, + 1270.0, + 1408.0, + 1308.0, + 294.0, + 1308.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1303.0, + 1001.0, + 1303.0, + 1001.0, + 1339.0, + 322.0, + 1339.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1593.0, + 1407.0, + 1593.0, + 1407.0, + 1627.0, + 295.0, + 1627.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1625.0, + 1404.0, + 1625.0, + 1404.0, + 1659.0, + 324.0, + 1659.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1653.0, + 1111.0, + 1653.0, + 1111.0, + 1691.0, + 324.0, + 1691.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1707.0, + 1405.0, + 1707.0, + 1405.0, + 1744.0, + 297.0, + 1744.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1739.0, + 1197.0, + 1739.0, + 1197.0, + 1773.0, + 322.0, + 1773.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 431.0, + 1404.0, + 431.0, + 1404.0, + 469.0, + 294.0, + 469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 463.0, + 954.0, + 463.0, + 954.0, + 499.0, + 323.0, + 499.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 225.0, + 1405.0, + 225.0, + 1405.0, + 268.0, + 292.0, + 268.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 261.0, + 1404.0, + 261.0, + 1404.0, + 297.0, + 322.0, + 297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 290.0, + 1107.0, + 290.0, + 1107.0, + 329.0, + 322.0, + 329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1795.0, + 1408.0, + 1795.0, + 1408.0, + 1831.0, + 293.0, + 1831.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1825.0, + 1410.0, + 1825.0, + 1410.0, + 1862.0, + 322.0, + 1862.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1856.0, + 1412.0, + 1856.0, + 1412.0, + 1892.0, + 322.0, + 1892.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1886.0, + 900.0, + 1886.0, + 900.0, + 1922.0, + 322.0, + 1922.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 344.0, + 1404.0, + 344.0, + 1404.0, + 383.0, + 294.0, + 383.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 378.0, + 794.0, + 378.0, + 794.0, + 411.0, + 323.0, + 411.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 11, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1244, + 1406, + 1244, + 1406, + 1400, + 298, + 1400 + ], + "score": 0.904 + }, + { + "category_id": 1, + "poly": [ + 298, + 839, + 1407, + 839, + 1407, + 935, + 298, + 935 + ], + "score": 0.901 + }, + { + "category_id": 1, + "poly": [ + 298, + 956, + 1407, + 956, + 1407, + 1051, + 298, + 1051 + ], + "score": 0.891 + }, + { + "category_id": 1, + "poly": [ + 307, + 723, + 1405, + 723, + 1405, + 820, + 307, + 820 + ], + "score": 0.88 + }, + { + "category_id": 1, + "poly": [ + 298, + 1073, + 1401, + 1073, + 1401, + 1137, + 298, + 1137 + ], + "score": 0.876 + }, + { + "category_id": 1, + "poly": [ + 296, + 1536, + 1405, + 1536, + 1405, + 1663, + 296, + 1663 + ], + "score": 0.876 + }, + { + "category_id": 1, + "poly": [ + 297, + 1421, + 1405, + 1421, + 1405, + 1516, + 297, + 1516 + ], + "score": 0.871 + }, + { + "category_id": 1, + "poly": [ + 295, + 1158, + 1401, + 1158, + 1401, + 1222, + 295, + 1222 + ], + "score": 0.869 + }, + { + "category_id": 1, + "poly": [ + 293, + 1886, + 1402, + 1886, + 1402, + 1950, + 293, + 1950 + ], + "score": 0.864 + }, + { + "category_id": 1, + "poly": [ + 298, + 1682, + 1402, + 1682, + 1402, + 1779, + 298, + 1779 + ], + "score": 0.861 + }, + { + "category_id": 1, + "poly": [ + 291, + 1800, + 1403, + 1800, + 1403, + 1864, + 291, + 1864 + ], + "score": 0.859 + }, + { + "category_id": 1, + "poly": [ + 297, + 492, + 1407, + 492, + 1407, + 586, + 297, + 586 + ], + "score": 0.855 + }, + { + "category_id": 1, + "poly": [ + 297, + 608, + 1405, + 608, + 1405, + 701, + 297, + 701 + ], + "score": 0.852 + }, + { + "category_id": 1, + "poly": [ + 298, + 228, + 1406, + 228, + 1406, + 353, + 298, + 353 + ], + "score": 0.847 + }, + { + "category_id": 1, + "poly": [ + 294, + 1972, + 1400, + 1972, + 1400, + 2034, + 294, + 2034 + ], + "score": 0.846 + }, + { + "category_id": 1, + "poly": [ + 298, + 376, + 1407, + 376, + 1407, + 470, + 298, + 470 + ], + "score": 0.834 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 863, + 2088, + 863, + 2113, + 835, + 2113 + ], + "score": 0.831 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 105, + 298, + 105 + ], + "score": 0.706 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 105, + 298, + 105 + ], + "score": 0.204 + }, + { + "category_id": 13, + "poly": [ + 961, + 957, + 1044, + 957, + 1044, + 991, + 961, + 991 + ], + "score": 0.9, + "latex": "\\mathrm { O } ( 1 / t )" + }, + { + "category_id": 13, + "poly": [ + 528, + 874, + 547, + 874, + 547, + 905, + 528, + 905 + ], + "score": 0.85, + "latex": "f" + }, + { + "category_id": 13, + "poly": [ + 1177, + 494, + 1246, + 494, + 1246, + 525, + 1177, + 525 + ], + "score": 0.69, + "latex": "\\mathbf { o } ( 1 / \\mathrm { k } )" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2085.0, + 868.0, + 2085.0, + 868.0, + 2123.0, + 831.0, + 2123.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 73.0, + 857.0, + 73.0, + 857.0, + 108.0, + 298.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 73.0, + 857.0, + 73.0, + 857.0, + 108.0, + 298.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1245.0, + 1404.0, + 1245.0, + 1404.0, + 1278.0, + 295.0, + 1278.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1274.0, + 1408.0, + 1274.0, + 1408.0, + 1310.0, + 322.0, + 1310.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1305.0, + 1409.0, + 1305.0, + 1409.0, + 1342.0, + 322.0, + 1342.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1335.0, + 1404.0, + 1335.0, + 1404.0, + 1371.0, + 321.0, + 1371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1366.0, + 1037.0, + 1366.0, + 1037.0, + 1402.0, + 322.0, + 1402.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 840.0, + 1407.0, + 840.0, + 1407.0, + 878.0, + 295.0, + 878.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 871.0, + 527.0, + 871.0, + 527.0, + 909.0, + 322.0, + 909.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 548.0, + 871.0, + 1407.0, + 871.0, + 1407.0, + 909.0, + 548.0, + 909.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 903.0, + 1408.0, + 903.0, + 1408.0, + 937.0, + 324.0, + 937.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 956.0, + 960.0, + 956.0, + 960.0, + 994.0, + 295.0, + 994.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1045.0, + 956.0, + 1404.0, + 956.0, + 1404.0, + 994.0, + 1045.0, + 994.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 989.0, + 1408.0, + 989.0, + 1408.0, + 1022.0, + 322.0, + 1022.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1018.0, + 926.0, + 1018.0, + 926.0, + 1052.0, + 322.0, + 1052.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 302.0, + 725.0, + 1405.0, + 725.0, + 1405.0, + 759.0, + 302.0, + 759.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 754.0, + 1408.0, + 754.0, + 1408.0, + 789.0, + 319.0, + 789.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 786.0, + 1334.0, + 786.0, + 1334.0, + 821.0, + 319.0, + 821.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1070.0, + 1406.0, + 1070.0, + 1406.0, + 1110.0, + 297.0, + 1110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1104.0, + 1035.0, + 1104.0, + 1035.0, + 1138.0, + 322.0, + 1138.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1538.0, + 1405.0, + 1538.0, + 1405.0, + 1571.0, + 295.0, + 1571.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1568.0, + 1407.0, + 1568.0, + 1407.0, + 1603.0, + 322.0, + 1603.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1601.0, + 1403.0, + 1601.0, + 1403.0, + 1634.0, + 323.0, + 1634.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1630.0, + 942.0, + 1630.0, + 942.0, + 1663.0, + 322.0, + 1663.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1421.0, + 1405.0, + 1421.0, + 1405.0, + 1459.0, + 294.0, + 1459.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1453.0, + 1405.0, + 1453.0, + 1405.0, + 1487.0, + 324.0, + 1487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1483.0, + 1308.0, + 1483.0, + 1308.0, + 1517.0, + 322.0, + 1517.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1154.0, + 1407.0, + 1154.0, + 1407.0, + 1196.0, + 292.0, + 1196.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1190.0, + 504.0, + 1190.0, + 504.0, + 1222.0, + 323.0, + 1222.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1885.0, + 1407.0, + 1885.0, + 1407.0, + 1921.0, + 295.0, + 1921.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1917.0, + 963.0, + 1917.0, + 963.0, + 1953.0, + 321.0, + 1953.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1684.0, + 1404.0, + 1684.0, + 1404.0, + 1718.0, + 294.0, + 1718.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 1717.0, + 1404.0, + 1717.0, + 1404.0, + 1748.0, + 325.0, + 1748.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 1745.0, + 1095.0, + 1745.0, + 1095.0, + 1780.0, + 325.0, + 1780.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1800.0, + 1405.0, + 1800.0, + 1405.0, + 1836.0, + 295.0, + 1836.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1831.0, + 659.0, + 1831.0, + 659.0, + 1864.0, + 322.0, + 1864.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 493.0, + 1176.0, + 493.0, + 1176.0, + 527.0, + 297.0, + 527.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1247.0, + 493.0, + 1404.0, + 493.0, + 1404.0, + 527.0, + 1247.0, + 527.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 523.0, + 1407.0, + 523.0, + 1407.0, + 558.0, + 321.0, + 558.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 554.0, + 883.0, + 554.0, + 883.0, + 587.0, + 321.0, + 587.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 606.0, + 1405.0, + 606.0, + 1405.0, + 644.0, + 294.0, + 644.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 638.0, + 1410.0, + 638.0, + 1410.0, + 675.0, + 321.0, + 675.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 665.0, + 399.0, + 665.0, + 399.0, + 706.0, + 320.0, + 706.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 231.0, + 1404.0, + 231.0, + 1404.0, + 264.0, + 295.0, + 264.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 262.0, + 1406.0, + 262.0, + 1406.0, + 295.0, + 324.0, + 295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 289.0, + 1409.0, + 289.0, + 1409.0, + 329.0, + 321.0, + 329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 319.0, + 622.0, + 319.0, + 622.0, + 355.0, + 321.0, + 355.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1968.0, + 1406.0, + 1968.0, + 1406.0, + 2009.0, + 293.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 2002.0, + 463.0, + 2002.0, + 463.0, + 2035.0, + 322.0, + 2035.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 377.0, + 1405.0, + 377.0, + 1405.0, + 411.0, + 296.0, + 411.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 406.0, + 1410.0, + 406.0, + 1410.0, + 445.0, + 320.0, + 445.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 438.0, + 664.0, + 438.0, + 664.0, + 470.0, + 320.0, + 470.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 12, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 1768, + 1404, + 1768, + 1404, + 1925, + 297, + 1925 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 297, + 1551, + 1404, + 1551, + 1404, + 1646, + 297, + 1646 + ], + "score": 0.969 + }, + { + "category_id": 8, + "poly": [ + 690, + 1944, + 1007, + 1944, + 1007, + 2029, + 690, + 2029 + ], + "score": 0.956 + }, + { + "category_id": 1, + "poly": [ + 299, + 1454, + 1403, + 1454, + 1403, + 1519, + 299, + 1519 + ], + "score": 0.94 + }, + { + "category_id": 9, + "poly": [ + 1352, + 1690, + 1399, + 1690, + 1399, + 1720, + 1352, + 1720 + ], + "score": 0.907 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 105, + 298, + 105 + ], + "score": 0.906 + }, + { + "category_id": 1, + "poly": [ + 294, + 229, + 1403, + 229, + 1403, + 294, + 294, + 294 + ], + "score": 0.897 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1967, + 1400, + 1967, + 1400, + 1999, + 1351, + 1999 + ], + "score": 0.893 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 864, + 2088, + 864, + 2113, + 835, + 2113 + ], + "score": 0.874 + }, + { + "category_id": 1, + "poly": [ + 299, + 1232, + 1406, + 1232, + 1406, + 1326, + 299, + 1326 + ], + "score": 0.873 + }, + { + "category_id": 1, + "poly": [ + 299, + 313, + 1402, + 313, + 1402, + 408, + 299, + 408 + ], + "score": 0.854 + }, + { + "category_id": 1, + "poly": [ + 294, + 1030, + 1404, + 1030, + 1404, + 1096, + 294, + 1096 + ], + "score": 0.819 + }, + { + "category_id": 1, + "poly": [ + 295, + 945, + 1400, + 945, + 1400, + 1010, + 295, + 1010 + ], + "score": 0.799 + }, + { + "category_id": 1, + "poly": [ + 298, + 691, + 1403, + 691, + 1403, + 755, + 298, + 755 + ], + "score": 0.794 + }, + { + "category_id": 1, + "poly": [ + 297, + 775, + 1404, + 775, + 1404, + 840, + 297, + 840 + ], + "score": 0.782 + }, + { + "category_id": 1, + "poly": [ + 298, + 860, + 1402, + 860, + 1402, + 926, + 298, + 926 + ], + "score": 0.755 + }, + { + "category_id": 1, + "poly": [ + 299, + 1115, + 1405, + 1115, + 1405, + 1211, + 299, + 1211 + ], + "score": 0.752 + }, + { + "category_id": 1, + "poly": [ + 297, + 574, + 1407, + 574, + 1407, + 670, + 297, + 670 + ], + "score": 0.742 + }, + { + "category_id": 1, + "poly": [ + 298, + 429, + 1406, + 429, + 1406, + 554, + 298, + 554 + ], + "score": 0.738 + }, + { + "category_id": 0, + "poly": [ + 300, + 1384, + 1134, + 1384, + 1134, + 1422, + 300, + 1422 + ], + "score": 0.725 + }, + { + "category_id": 8, + "poly": [ + 676, + 1663, + 1023, + 1663, + 1023, + 1753, + 676, + 1753 + ], + "score": 0.389 + }, + { + "category_id": 8, + "poly": [ + 678, + 1663, + 1021, + 1663, + 1021, + 1753, + 678, + 1753 + ], + "score": 0.279 + }, + { + "category_id": 1, + "poly": [ + 300, + 1384, + 1134, + 1384, + 1134, + 1422, + 300, + 1422 + ], + "score": 0.223 + }, + { + "category_id": 14, + "poly": [ + 690, + 1940, + 1008, + 1940, + 1008, + 2030, + 690, + 2030 + ], + "score": 0.94, + "latex": "0 \\in \\nabla f ( x ^ { * } ) + \\sum _ { i = 1 } ^ { n } \\partial r _ { i } ( x ^ { * } ) ," + }, + { + "category_id": 14, + "poly": [ + 676, + 1661, + 1023, + 1661, + 1023, + 1755, + 676, + 1755 + ], + "score": 0.94, + "latex": "\\operatorname* { m i n } _ { x \\in \\mathbb { R } ^ { d } } \\frac { 1 } { m } \\sum _ { j = 1 } ^ { m } f _ { j } ( x ) + \\sum _ { i = 1 } ^ { n } r _ { i } ( x )" + }, + { + "category_id": 13, + "poly": [ + 1008, + 1771, + 1036, + 1771, + 1036, + 1805, + 1008, + 1805 + ], + "score": 0.89, + "latex": "f _ { j }" + }, + { + "category_id": 13, + "poly": [ + 1249, + 1773, + 1264, + 1773, + 1264, + 1803, + 1249, + 1803 + ], + "score": 0.84, + "latex": "j" + }, + { + "category_id": 13, + "poly": [ + 566, + 1807, + 592, + 1807, + 592, + 1832, + 566, + 1832 + ], + "score": 0.83, + "latex": "r _ { i }" + }, + { + "category_id": 13, + "poly": [ + 426, + 1776, + 452, + 1776, + 452, + 1798, + 426, + 1798 + ], + "score": 0.71, + "latex": "m" + }, + { + "category_id": 13, + "poly": [ + 904, + 642, + 922, + 642, + 922, + 664, + 904, + 664 + ], + "score": 0.49, + "latex": "=" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 73.0, + 857.0, + 73.0, + 857.0, + 108.0, + 298.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2085.0, + 869.0, + 2085.0, + 869.0, + 2123.0, + 831.0, + 2123.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1381.0, + 1139.0, + 1381.0, + 1139.0, + 1428.0, + 293.0, + 1428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1772.0, + 425.0, + 1772.0, + 425.0, + 1805.0, + 296.0, + 1805.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 453.0, + 1772.0, + 1007.0, + 1772.0, + 1007.0, + 1805.0, + 453.0, + 1805.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1037.0, + 1772.0, + 1248.0, + 1772.0, + 1248.0, + 1805.0, + 1037.0, + 1805.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1265.0, + 1772.0, + 1404.0, + 1772.0, + 1404.0, + 1805.0, + 1265.0, + 1805.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1802.0, + 565.0, + 1802.0, + 565.0, + 1836.0, + 295.0, + 1836.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 593.0, + 1802.0, + 1404.0, + 1802.0, + 1404.0, + 1836.0, + 593.0, + 1836.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1830.0, + 1407.0, + 1830.0, + 1407.0, + 1868.0, + 292.0, + 1868.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1864.0, + 1402.0, + 1864.0, + 1402.0, + 1897.0, + 296.0, + 1897.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1889.0, + 1141.0, + 1889.0, + 1141.0, + 1928.0, + 294.0, + 1928.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1551.0, + 1405.0, + 1551.0, + 1405.0, + 1588.0, + 296.0, + 1588.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1584.0, + 1402.0, + 1584.0, + 1402.0, + 1618.0, + 295.0, + 1618.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1614.0, + 710.0, + 1614.0, + 710.0, + 1648.0, + 296.0, + 1648.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1453.0, + 1404.0, + 1453.0, + 1404.0, + 1493.0, + 294.0, + 1493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1488.0, + 384.0, + 1488.0, + 384.0, + 1522.0, + 290.0, + 1522.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 229.0, + 1403.0, + 229.0, + 1403.0, + 265.0, + 296.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 260.0, + 818.0, + 260.0, + 818.0, + 295.0, + 322.0, + 295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1233.0, + 1404.0, + 1233.0, + 1404.0, + 1267.0, + 296.0, + 1267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1263.0, + 1408.0, + 1263.0, + 1408.0, + 1297.0, + 322.0, + 1297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1291.0, + 450.0, + 1291.0, + 450.0, + 1326.0, + 321.0, + 1326.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 313.0, + 1406.0, + 313.0, + 1406.0, + 351.0, + 295.0, + 351.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 345.0, + 1409.0, + 345.0, + 1409.0, + 382.0, + 320.0, + 382.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 370.0, + 399.0, + 370.0, + 399.0, + 411.0, + 320.0, + 411.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1027.0, + 1406.0, + 1027.0, + 1406.0, + 1071.0, + 294.0, + 1071.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1062.0, + 1290.0, + 1062.0, + 1290.0, + 1097.0, + 321.0, + 1097.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 944.0, + 1404.0, + 944.0, + 1404.0, + 983.0, + 294.0, + 983.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 974.0, + 1061.0, + 974.0, + 1061.0, + 1014.0, + 320.0, + 1014.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 690.0, + 1405.0, + 690.0, + 1405.0, + 728.0, + 293.0, + 728.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 723.0, + 995.0, + 723.0, + 995.0, + 755.0, + 321.0, + 755.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 775.0, + 1407.0, + 775.0, + 1407.0, + 811.0, + 295.0, + 811.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 806.0, + 1113.0, + 806.0, + 1113.0, + 841.0, + 320.0, + 841.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 861.0, + 1404.0, + 861.0, + 1404.0, + 897.0, + 296.0, + 897.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 892.0, + 1325.0, + 892.0, + 1325.0, + 927.0, + 322.0, + 927.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1115.0, + 1408.0, + 1115.0, + 1408.0, + 1152.0, + 294.0, + 1152.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1148.0, + 1405.0, + 1148.0, + 1405.0, + 1183.0, + 323.0, + 1183.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1179.0, + 1110.0, + 1179.0, + 1110.0, + 1213.0, + 323.0, + 1213.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 574.0, + 1405.0, + 574.0, + 1405.0, + 612.0, + 295.0, + 612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 609.0, + 1409.0, + 609.0, + 1409.0, + 640.0, + 323.0, + 640.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 634.0, + 903.0, + 634.0, + 903.0, + 675.0, + 324.0, + 675.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 923.0, + 634.0, + 1083.0, + 634.0, + 1083.0, + 675.0, + 923.0, + 675.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 428.0, + 1406.0, + 428.0, + 1406.0, + 467.0, + 293.0, + 467.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 460.0, + 1409.0, + 460.0, + 1409.0, + 497.0, + 320.0, + 497.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 489.0, + 1409.0, + 489.0, + 1409.0, + 529.0, + 320.0, + 529.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 524.0, + 744.0, + 524.0, + 744.0, + 554.0, + 324.0, + 554.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1381.0, + 1139.0, + 1381.0, + 1139.0, + 1428.0, + 293.0, + 1428.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 13, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 628, + 1402, + 628, + 1402, + 755, + 297, + 755 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 298, + 1310, + 1406, + 1310, + 1406, + 1403, + 298, + 1403 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 297, + 880, + 1408, + 880, + 1408, + 975, + 297, + 975 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 296, + 1005, + 1406, + 1005, + 1406, + 1101, + 296, + 1101 + ], + "score": 0.972 + }, + { + "category_id": 1, + "poly": [ + 296, + 1718, + 1407, + 1718, + 1407, + 1815, + 296, + 1815 + ], + "score": 0.97 + }, + { + "category_id": 8, + "poly": [ + 574, + 1418, + 1120, + 1418, + 1120, + 1525, + 574, + 1525 + ], + "score": 0.961 + }, + { + "category_id": 8, + "poly": [ + 520, + 770, + 1175, + 770, + 1175, + 866, + 520, + 866 + ], + "score": 0.954 + }, + { + "category_id": 8, + "poly": [ + 506, + 1205, + 1190, + 1205, + 1190, + 1298, + 506, + 1298 + ], + "score": 0.952 + }, + { + "category_id": 8, + "poly": [ + 507, + 1615, + 1187, + 1615, + 1187, + 1702, + 507, + 1702 + ], + "score": 0.951 + }, + { + "category_id": 1, + "poly": [ + 297, + 1539, + 1402, + 1539, + 1402, + 1602, + 297, + 1602 + ], + "score": 0.95 + }, + { + "category_id": 1, + "poly": [ + 289, + 225, + 1404, + 225, + 1404, + 300, + 289, + 300 + ], + "score": 0.949 + }, + { + "category_id": 8, + "poly": [ + 758, + 315, + 944, + 315, + 944, + 397, + 758, + 397 + ], + "score": 0.948 + }, + { + "category_id": 8, + "poly": [ + 397, + 526, + 1300, + 526, + 1300, + 613, + 397, + 613 + ], + "score": 0.946 + }, + { + "category_id": 8, + "poly": [ + 573, + 1924, + 1126, + 1924, + 1126, + 2032, + 573, + 2032 + ], + "score": 0.94 + }, + { + "category_id": 1, + "poly": [ + 294, + 1132, + 1404, + 1132, + 1404, + 1197, + 294, + 1197 + ], + "score": 0.933 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 294, + 410, + 1198, + 410, + 1198, + 447, + 294, + 447 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 294, + 476, + 1099, + 476, + 1099, + 512, + 294, + 512 + ], + "score": 0.925 + }, + { + "category_id": 1, + "poly": [ + 295, + 1846, + 1400, + 1846, + 1400, + 1911, + 295, + 1911 + ], + "score": 0.921 + }, + { + "category_id": 9, + "poly": [ + 1350, + 803, + 1400, + 803, + 1400, + 835, + 1350, + 835 + ], + "score": 0.898 + }, + { + "category_id": 9, + "poly": [ + 1352, + 551, + 1400, + 551, + 1400, + 583, + 1352, + 583 + ], + "score": 0.884 + }, + { + "category_id": 2, + "poly": [ + 835, + 2087, + 864, + 2087, + 864, + 2113, + 835, + 2113 + ], + "score": 0.87 + }, + { + "category_id": 14, + "poly": [ + 521, + 768, + 1176, + 768, + 1176, + 867, + 521, + 867 + ], + "score": 0.94, + "latex": "0 \\in \\left[ \\begin{array} { l } { \\nabla _ { x } F ( x ^ { * } , y ^ { * } ) } \\\\ { \\nabla _ { y } G ( x ^ { * } , y ^ { * } ) } \\end{array} \\right] + \\sum _ { i = 1 } ^ { \\operatorname* { m a x } \\{ n _ { 1 } , n _ { 2 } \\} } \\left( \\partial r _ { i } ( x ^ { * } ) \\times \\partial d _ { i } ( y ^ { * } ) \\right)" + }, + { + "category_id": 14, + "poly": [ + 506, + 1203, + 1193, + 1203, + 1193, + 1298, + 506, + 1298 + ], + "score": 0.94, + "latex": "\\operatorname* { m i n } _ { x \\in \\mathbb { R } ^ { d } } \\left\\{ f ( x ) + \\sum _ { i = 1 } ^ { n } r _ { i } ( x ) \\right\\} \\quad { \\mathrm { s . t . } } \\quad h _ { j } ( x ) \\leq 0 \\quad j = 1 , \\ldots , p ." + }, + { + "category_id": 13, + "poly": [ + 695, + 227, + 1001, + 227, + 1001, + 269, + 695, + 269 + ], + "score": 0.93, + "latex": "\\begin{array} { r } { \\nabla f ( x ) \\doteq \\frac { 1 } { m } \\sum _ { j = 1 } ^ { m } \\nabla f _ { j } ( x ) } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 508, + 1612, + 1188, + 1612, + 1188, + 1706, + 508, + 1706 + ], + "score": 0.93, + "latex": "0 \\in \\left[ \\begin{array} { c } { \\nabla f ( x ) + \\sum _ { j = 1 } ^ { p } \\gamma _ { j } \\nabla h _ { j } ( x ) } \\\\ { - h ( x ) } \\end{array} \\right] + \\sum _ { i = 1 } ^ { n } \\left( \\partial r _ { i } ( x ^ { * } ) \\times \\{ 0 \\} \\right)" + }, + { + "category_id": 14, + "poly": [ + 573, + 1416, + 1123, + 1416, + 1123, + 1526, + 573, + 1526 + ], + "score": 0.93, + "latex": "\\operatorname* { m i n } _ { x \\in \\mathbb { R } ^ { d } } \\operatorname* { m a x } _ { \\gamma \\in \\mathbb { R } _ { + } ^ { p } } \\left\\{ f ( x ) + \\sum _ { i = 1 } ^ { n } r _ { i } ( x ) + \\sum _ { j = 1 } ^ { p } \\gamma _ { j } h _ { j } ( x ) \\right\\} ." + }, + { + "category_id": 14, + "poly": [ + 399, + 522, + 1300, + 522, + 1300, + 615, + 399, + 615 + ], + "score": 0.93, + "latex": "x ^ { * } \\in \\underset { x \\in \\mathbb { R } ^ { d _ { x } } } { \\arg \\operatorname* { m i n } } F ( x , y ^ { * } ) + \\underset { i = 1 } { \\overset { n _ { 1 } } { \\sum } } r _ { i } ( x ) \\quad \\mathrm { a n d } \\quad y ^ { * } \\in \\underset { y \\in \\mathbb { R } ^ { d _ { y } } } { \\arg \\operatorname* { m i n } } G ( x ^ { * } , y ) + \\underset { i = 1 } { \\overset { n _ { 2 } } { \\sum } } d _ { i } ( y ) ." + }, + { + "category_id": 13, + "poly": [ + 420, + 629, + 552, + 629, + 552, + 666, + 420, + 666 + ], + "score": 0.93, + "latex": "\\scriptstyle \\sum _ { i = 1 } ^ { n _ { 1 } } r _ { i } ( x )" + }, + { + "category_id": 13, + "poly": [ + 1069, + 881, + 1189, + 881, + 1189, + 915, + 1069, + 915 + ], + "score": 0.93, + "latex": "r _ { i } ( x ) = 0" + }, + { + "category_id": 14, + "poly": [ + 754, + 311, + 944, + 311, + 944, + 398, + 754, + 398 + ], + "score": 0.93, + "latex": "\\frac { 1 } { | \\mathbf { B } | } \\sum _ { j \\in \\mathbf { B } } \\nabla f _ { j } ( z )" + }, + { + "category_id": 13, + "poly": [ + 1024, + 266, + 1103, + 266, + 1103, + 300, + 1024, + 300 + ], + "score": 0.92, + "latex": "\\nabla f ( x )" + }, + { + "category_id": 13, + "poly": [ + 417, + 881, + 616, + 881, + 616, + 915, + 417, + 915 + ], + "score": 0.92, + "latex": "i > \\operatorname* { m i n } \\{ n _ { 1 } , n _ { 2 } \\}" + }, + { + "category_id": 13, + "poly": [ + 607, + 628, + 740, + 628, + 740, + 667, + 607, + 667 + ], + "score": 0.92, + "latex": "\\textstyle \\sum _ { i = 1 } ^ { n _ { 2 } } d _ { i } ( y )" + }, + { + "category_id": 13, + "poly": [ + 374, + 1719, + 781, + 1719, + 781, + 1756, + 374, + 1756 + ], + "score": 0.92, + "latex": "h ( \\boldsymbol { x } ) = [ h _ { 1 } ( \\boldsymbol { x } ) , h _ { 2 } ( \\boldsymbol { x } ) , \\ldots , h _ { p } ( \\boldsymbol { x } ) ] ^ { \\top }" + }, + { + "category_id": 13, + "poly": [ + 425, + 1372, + 509, + 1372, + 509, + 1403, + 425, + 1403 + ], + "score": 0.92, + "latex": "\\gamma \\in \\mathbb { R } ^ { p }" + }, + { + "category_id": 13, + "poly": [ + 1002, + 412, + 1189, + 412, + 1189, + 446, + 1002, + 446 + ], + "score": 0.92, + "latex": "\\mathbf { B } \\in \\{ 1 , \\dots , m \\}" + }, + { + "category_id": 13, + "poly": [ + 298, + 911, + 414, + 911, + 414, + 945, + 298, + 945 + ], + "score": 0.92, + "latex": "d _ { i } ( y ) = 0" + }, + { + "category_id": 13, + "poly": [ + 298, + 692, + 407, + 692, + 407, + 725, + 298, + 725 + ], + "score": 0.92, + "latex": "- G ( x , y )" + }, + { + "category_id": 13, + "poly": [ + 1277, + 661, + 1404, + 661, + 1404, + 695, + 1277, + 695 + ], + "score": 0.91, + "latex": "F ( x , y ) =" + }, + { + "category_id": 13, + "poly": [ + 371, + 227, + 631, + 227, + 631, + 269, + 371, + 269 + ], + "score": 0.91, + "latex": "\\begin{array} { r } { f ( x ) \\doteq \\frac { 1 } { m } \\sum _ { j = 1 } ^ { m } f _ { j } ( x ) } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 572, + 1921, + 1125, + 1921, + 1125, + 2034, + 572, + 2034 + ], + "score": 0.91, + "latex": "\\begin{array} { l l l } { \\underset { x \\in \\mathbb { R } ^ { d } } { \\operatorname* { m i n } } \\underset { y \\in \\mathbb { R } ^ { d } } { \\operatorname* { m a x } } x ^ { \\top } D y } & { \\mathrm { s . t . } } & { x \\in \\mathcal { C } _ { j } ^ { 1 } } & { j = 1 , \\dots , n _ { 1 } , } \\\\ & { } & { y \\in \\mathcal { C } _ { j } ^ { 2 } } & { j = 1 , \\dots , n _ { 2 } . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 486, + 916, + 586, + 916, + 586, + 943, + 486, + 943 + ], + "score": 0.9, + "latex": "n _ { 1 } < n _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 1264, + 886, + 1369, + 886, + 1369, + 912, + 1264, + 912 + ], + "score": 0.89, + "latex": "n _ { 1 } < n _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 429, + 1312, + 449, + 1312, + 449, + 1343, + 429, + 1343 + ], + "score": 0.85, + "latex": "f" + }, + { + "category_id": 13, + "poly": [ + 750, + 1315, + 776, + 1315, + 776, + 1342, + 750, + 1342 + ], + "score": 0.84, + "latex": "r _ { i }" + }, + { + "category_id": 13, + "poly": [ + 1294, + 914, + 1314, + 914, + 1314, + 944, + 1294, + 944 + ], + "score": 0.84, + "latex": "f" + }, + { + "category_id": 13, + "poly": [ + 782, + 913, + 807, + 913, + 807, + 939, + 782, + 939 + ], + "score": 0.84, + "latex": "F" + }, + { + "category_id": 13, + "poly": [ + 859, + 913, + 885, + 913, + 885, + 939, + 859, + 939 + ], + "score": 0.83, + "latex": "G" + }, + { + "category_id": 13, + "poly": [ + 1060, + 1723, + 1079, + 1723, + 1079, + 1749, + 1060, + 1749 + ], + "score": 0.82, + "latex": "h" + }, + { + "category_id": 13, + "poly": [ + 544, + 1347, + 563, + 1347, + 563, + 1373, + 544, + 1373 + ], + "score": 0.81, + "latex": "p" + }, + { + "category_id": 13, + "poly": [ + 1114, + 1348, + 1132, + 1348, + 1132, + 1369, + 1114, + 1369 + ], + "score": 0.75, + "latex": "x" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2085.0, + 868.0, + 2085.0, + 868.0, + 2124.0, + 831.0, + 2124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 281.0, + 612.0, + 419.0, + 612.0, + 419.0, + 697.0, + 281.0, + 697.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 553.0, + 612.0, + 606.0, + 612.0, + 606.0, + 697.0, + 553.0, + 697.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 741.0, + 612.0, + 1276.0, + 612.0, + 1276.0, + 697.0, + 741.0, + 697.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1405.0, + 612.0, + 1416.0, + 612.0, + 1416.0, + 697.0, + 1405.0, + 697.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 408.0, + 690.0, + 1407.0, + 690.0, + 1407.0, + 727.0, + 408.0, + 727.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 722.0, + 1220.0, + 722.0, + 1220.0, + 755.0, + 293.0, + 755.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1308.0, + 428.0, + 1308.0, + 428.0, + 1344.0, + 293.0, + 1344.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 450.0, + 1308.0, + 749.0, + 1308.0, + 749.0, + 1344.0, + 450.0, + 1344.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 777.0, + 1308.0, + 1406.0, + 1308.0, + 1406.0, + 1344.0, + 777.0, + 1344.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1340.0, + 543.0, + 1340.0, + 543.0, + 1377.0, + 293.0, + 1377.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 564.0, + 1340.0, + 1113.0, + 1340.0, + 1113.0, + 1377.0, + 564.0, + 1377.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1133.0, + 1340.0, + 1403.0, + 1340.0, + 1403.0, + 1377.0, + 1133.0, + 1377.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1372.0, + 424.0, + 1372.0, + 424.0, + 1406.0, + 296.0, + 1406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 510.0, + 1372.0, + 856.0, + 1372.0, + 856.0, + 1406.0, + 510.0, + 1406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 878.0, + 416.0, + 878.0, + 416.0, + 918.0, + 292.0, + 918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 617.0, + 878.0, + 1068.0, + 878.0, + 1068.0, + 918.0, + 617.0, + 918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1190.0, + 878.0, + 1263.0, + 878.0, + 1263.0, + 918.0, + 1190.0, + 918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1370.0, + 878.0, + 1408.0, + 878.0, + 1408.0, + 918.0, + 1370.0, + 918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 415.0, + 912.0, + 485.0, + 912.0, + 485.0, + 946.0, + 415.0, + 946.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 587.0, + 912.0, + 781.0, + 912.0, + 781.0, + 946.0, + 587.0, + 946.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 808.0, + 912.0, + 858.0, + 912.0, + 858.0, + 946.0, + 808.0, + 946.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 886.0, + 912.0, + 1293.0, + 912.0, + 1293.0, + 946.0, + 886.0, + 946.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1315.0, + 912.0, + 1408.0, + 912.0, + 1408.0, + 946.0, + 1315.0, + 946.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 942.0, + 969.0, + 942.0, + 969.0, + 976.0, + 295.0, + 976.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1005.0, + 1406.0, + 1005.0, + 1406.0, + 1043.0, + 294.0, + 1043.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1038.0, + 1404.0, + 1038.0, + 1404.0, + 1073.0, + 295.0, + 1073.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1067.0, + 437.0, + 1067.0, + 437.0, + 1103.0, + 292.0, + 1103.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1718.0, + 373.0, + 1718.0, + 373.0, + 1757.0, + 294.0, + 1757.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 782.0, + 1718.0, + 1059.0, + 1718.0, + 1059.0, + 1757.0, + 782.0, + 1757.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1080.0, + 1718.0, + 1407.0, + 1718.0, + 1407.0, + 1757.0, + 1080.0, + 1757.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1749.0, + 1409.0, + 1749.0, + 1409.0, + 1787.0, + 293.0, + 1787.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1776.0, + 380.0, + 1776.0, + 380.0, + 1819.0, + 292.0, + 1819.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1532.0, + 1406.0, + 1532.0, + 1406.0, + 1579.0, + 294.0, + 1579.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1571.0, + 612.0, + 1571.0, + 612.0, + 1602.0, + 296.0, + 1602.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 283.0, + 213.0, + 370.0, + 213.0, + 370.0, + 283.0, + 283.0, + 283.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 632.0, + 213.0, + 694.0, + 213.0, + 694.0, + 283.0, + 632.0, + 283.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1002.0, + 213.0, + 1415.0, + 213.0, + 1415.0, + 283.0, + 1002.0, + 283.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 267.0, + 1023.0, + 267.0, + 1023.0, + 301.0, + 296.0, + 301.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1104.0, + 267.0, + 1200.0, + 267.0, + 1200.0, + 301.0, + 1104.0, + 301.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1128.0, + 1409.0, + 1128.0, + 1409.0, + 1172.0, + 292.0, + 1172.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1161.0, + 408.0, + 1161.0, + 408.0, + 1199.0, + 293.0, + 1199.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 406.0, + 1001.0, + 406.0, + 1001.0, + 452.0, + 293.0, + 452.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1190.0, + 406.0, + 1200.0, + 406.0, + 1200.0, + 452.0, + 1190.0, + 452.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 471.0, + 1099.0, + 471.0, + 1099.0, + 521.0, + 293.0, + 521.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1845.0, + 1404.0, + 1845.0, + 1404.0, + 1884.0, + 294.0, + 1884.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1880.0, + 559.0, + 1880.0, + 559.0, + 1912.0, + 295.0, + 1912.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 14, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 653, + 1406, + 653, + 1406, + 901, + 297, + 901 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 1052, + 1404, + 1052, + 1404, + 1207, + 298, + 1207 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 297, + 386, + 1407, + 386, + 1407, + 543, + 297, + 543 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 298, + 914, + 1404, + 914, + 1404, + 1039, + 298, + 1039 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 299, + 1221, + 1405, + 1221, + 1405, + 1377, + 299, + 1377 + ], + "score": 0.975 + }, + { + "category_id": 1, + "poly": [ + 298, + 1615, + 1405, + 1615, + 1405, + 1743, + 298, + 1743 + ], + "score": 0.973 + }, + { + "category_id": 8, + "poly": [ + 535, + 276, + 1159, + 276, + 1159, + 372, + 535, + 372 + ], + "score": 0.955 + }, + { + "category_id": 1, + "poly": [ + 295, + 1545, + 1402, + 1545, + 1402, + 1609, + 295, + 1609 + ], + "score": 0.942 + }, + { + "category_id": 1, + "poly": [ + 296, + 228, + 1095, + 228, + 1095, + 263, + 296, + 263 + ], + "score": 0.923 + }, + { + "category_id": 2, + "poly": [ + 297, + 74, + 857, + 74, + 857, + 106, + 297, + 106 + ], + "score": 0.921 + }, + { + "category_id": 1, + "poly": [ + 355, + 1967, + 1405, + 1967, + 1405, + 2034, + 355, + 2034 + ], + "score": 0.905 + }, + { + "category_id": 0, + "poly": [ + 301, + 586, + 786, + 586, + 786, + 622, + 301, + 622 + ], + "score": 0.904 + }, + { + "category_id": 0, + "poly": [ + 300, + 1419, + 677, + 1419, + 677, + 1455, + 300, + 1455 + ], + "score": 0.901 + }, + { + "category_id": 0, + "poly": [ + 299, + 1488, + 891, + 1488, + 891, + 1521, + 299, + 1521 + ], + "score": 0.899 + }, + { + "category_id": 1, + "poly": [ + 297, + 1803, + 573, + 1803, + 573, + 1836, + 297, + 1836 + ], + "score": 0.867 + }, + { + "category_id": 1, + "poly": [ + 362, + 1915, + 600, + 1915, + 600, + 1951, + 362, + 1951 + ], + "score": 0.864 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 864, + 2088, + 864, + 2113, + 835, + 2113 + ], + "score": 0.858 + }, + { + "category_id": 8, + "poly": [ + 429, + 1752, + 1272, + 1752, + 1272, + 1795, + 429, + 1795 + ], + "score": 0.842 + }, + { + "category_id": 1, + "poly": [ + 364, + 1855, + 807, + 1855, + 807, + 1899, + 364, + 1899 + ], + "score": 0.753 + }, + { + "category_id": 8, + "poly": [ + 364, + 1855, + 807, + 1855, + 807, + 1899, + 364, + 1899 + ], + "score": 0.193 + }, + { + "category_id": 13, + "poly": [ + 815, + 386, + 918, + 386, + 918, + 426, + 815, + 426 + ], + "score": 0.95, + "latex": "\\mathcal { C } _ { j } ^ { 1 } = \\mathbb { R } ^ { d }" + }, + { + "category_id": 13, + "poly": [ + 855, + 1969, + 1033, + 1969, + 1033, + 2010, + 855, + 2010 + ], + "score": 0.95, + "latex": "\\left\\{ \\| p ^ { k } ( \\omega ) - p \\| \\right\\}" + }, + { + "category_id": 14, + "poly": [ + 535, + 273, + 1158, + 273, + 1158, + 375, + 535, + 375 + ], + "score": 0.94, + "latex": "0 \\in \\left[ \\begin{array} { c } { D y ^ { * } } \\\\ { - D ^ { \\top } x ^ { * } } \\end{array} \\right] + \\sum _ { j = 1 } ^ { \\operatorname* { m a x } \\{ n _ { 1 } , n _ { 2 } \\} } \\big ( N _ { { \\mathcal C } _ { j } ^ { 1 } } ( x ^ { * } ) \\times N _ { { \\mathcal C } _ { j } ^ { 2 } } ( y ^ { * } ) \\big ) ," + }, + { + "category_id": 13, + "poly": [ + 682, + 1967, + 798, + 1967, + 798, + 2006, + 682, + 2006 + ], + "score": 0.93, + "latex": "P [ \\tilde { \\Omega } ] = 1" + }, + { + "category_id": 13, + "poly": [ + 859, + 1647, + 969, + 1647, + 969, + 1679, + 859, + 1679 + ], + "score": 0.92, + "latex": "( \\Omega , { \\mathcal { F } } , P )" + }, + { + "category_id": 13, + "poly": [ + 298, + 1706, + 442, + 1706, + 442, + 1741, + 298, + 1741 + ], + "score": 0.92, + "latex": "0 , \\nu ^ { k } ( \\hat { p } ) \\stackrel { \\cdot } { \\geq } 0" + }, + { + "category_id": 13, + "poly": [ + 953, + 387, + 1057, + 387, + 1057, + 426, + 953, + 426 + ], + "score": 0.92, + "latex": "\\mathcal { C } _ { j } ^ { 2 } = \\mathbb { R } ^ { d }" + }, + { + "category_id": 13, + "poly": [ + 1031, + 1645, + 1258, + 1645, + 1258, + 1679, + 1031, + 1679 + ], + "score": 0.91, + "latex": "\\mathcal { F } _ { k } \\overset { \\cdot } { = } \\sigma ( p ^ { 1 } , \\cdot \\cdot \\cdot , p ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 1129, + 390, + 1229, + 390, + 1229, + 422, + 1129, + 422 + ], + "score": 0.91, + "latex": "n _ { 1 } \\neq n _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 1166, + 1677, + 1403, + 1677, + 1403, + 1711, + 1166, + 1711 + ], + "score": 0.91, + "latex": "\\chi ^ { k } ( p ) \\geq 0 , \\eta ^ { k } ( p ) \\geq" + }, + { + "category_id": 13, + "poly": [ + 1269, + 1967, + 1351, + 1967, + 1351, + 2002, + 1269, + 2002 + ], + "score": 0.91, + "latex": "\\omega \\in \\tilde { \\Omega }" + }, + { + "category_id": 13, + "poly": [ + 396, + 2004, + 469, + 2004, + 469, + 2035, + 396, + 2035 + ], + "score": 0.91, + "latex": "p \\in F" + }, + { + "category_id": 13, + "poly": [ + 774, + 1706, + 980, + 1706, + 980, + 1744, + 774, + 1744 + ], + "score": 0.9, + "latex": "\\scriptstyle \\sum _ { k = 1 } ^ { \\infty } \\eta ^ { k } ( p ) < \\infty" + }, + { + "category_id": 13, + "poly": [ + 938, + 1679, + 1016, + 1679, + 1016, + 1709, + 938, + 1709 + ], + "score": 0.89, + "latex": "p \\in F" + }, + { + "category_id": 13, + "poly": [ + 297, + 1147, + 368, + 1147, + 368, + 1174, + 297, + 1174 + ], + "score": 0.88, + "latex": "n = 0" + }, + { + "category_id": 13, + "poly": [ + 1282, + 1617, + 1317, + 1617, + 1317, + 1644, + 1282, + 1644 + ], + "score": 0.88, + "latex": "\\mathbb { R } ^ { d }" + }, + { + "category_id": 13, + "poly": [ + 554, + 1706, + 762, + 1706, + 762, + 1743, + 554, + 1743 + ], + "score": 0.87, + "latex": "\\scriptstyle \\sum _ { k = 1 } ^ { \\infty } \\chi ^ { k } ( p ) ^ { \\widehat { < } } \\infty" + }, + { + "category_id": 13, + "poly": [ + 1064, + 1614, + 1095, + 1614, + 1095, + 1647, + 1064, + 1647 + ], + "score": 0.87, + "latex": "p ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 396, + 1915, + 427, + 1915, + 427, + 1950, + 396, + 1950 + ], + "score": 0.86, + "latex": "p ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 612, + 1676, + 647, + 1676, + 647, + 1705, + 612, + 1705 + ], + "score": 0.85, + "latex": "\\mathbb { R } ^ { d }" + }, + { + "category_id": 14, + "poly": [ + 361, + 1854, + 809, + 1854, + 809, + 1898, + 361, + 1898 + ], + "score": 0.85, + "latex": "\\begin{array} { r l } { I . \\ ( \\forall p \\in F ) : } & { { } \\sum _ { k = 1 } ^ { \\infty } \\nu ^ { k } ( p ) < \\infty a . s . } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 422, + 1752, + 1271, + 1752, + 1271, + 1794, + 422, + 1794 + ], + "score": 0.84, + "latex": "\\begin{array} { r l } { ( \\forall k \\in \\mathbb { N } ) } & { \\mathbb { E } [ \\| p ^ { k + 1 } - p \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\leq ( 1 + \\chi ^ { k } ( p ) ) \\| p ^ { k } - p \\| ^ { 2 } - \\nu ^ { k } ( p ) + \\eta ^ { k } ( p ) . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 541, + 1967, + 565, + 1967, + 565, + 2000, + 541, + 2000 + ], + "score": 0.83, + "latex": "\\tilde { \\Omega }" + }, + { + "category_id": 13, + "poly": [ + 1321, + 1652, + 1343, + 1652, + 1343, + 1673, + 1321, + 1673 + ], + "score": 0.82, + "latex": "F" + }, + { + "category_id": 13, + "poly": [ + 1116, + 840, + 1142, + 840, + 1142, + 866, + 1116, + 866 + ], + "score": 0.81, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 1385, + 1659, + 1400, + 1659, + 1400, + 1673, + 1385, + 1673 + ], + "score": 0.39, + "latex": "a" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 859.0, + 72.0, + 859.0, + 109.0, + 297.0, + 109.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 585.0, + 791.0, + 585.0, + 791.0, + 625.0, + 294.0, + 625.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1417.0, + 681.0, + 1417.0, + 681.0, + 1460.0, + 296.0, + 1460.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1487.0, + 895.0, + 1487.0, + 895.0, + 1526.0, + 295.0, + 1526.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2085.0, + 869.0, + 2085.0, + 869.0, + 2124.0, + 831.0, + 2124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 656.0, + 1407.0, + 656.0, + 1407.0, + 690.0, + 295.0, + 690.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 688.0, + 1404.0, + 688.0, + 1404.0, + 721.0, + 295.0, + 721.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 718.0, + 1407.0, + 718.0, + 1407.0, + 749.0, + 294.0, + 749.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 746.0, + 1407.0, + 746.0, + 1407.0, + 782.0, + 294.0, + 782.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 776.0, + 1407.0, + 776.0, + 1407.0, + 816.0, + 292.0, + 816.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 808.0, + 1408.0, + 808.0, + 1408.0, + 842.0, + 294.0, + 842.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 837.0, + 1115.0, + 837.0, + 1115.0, + 874.0, + 294.0, + 874.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1143.0, + 837.0, + 1409.0, + 837.0, + 1409.0, + 874.0, + 1143.0, + 874.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 871.0, + 892.0, + 871.0, + 892.0, + 901.0, + 296.0, + 901.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1050.0, + 1405.0, + 1050.0, + 1405.0, + 1092.0, + 293.0, + 1092.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1086.0, + 1405.0, + 1086.0, + 1405.0, + 1119.0, + 296.0, + 1119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1114.0, + 1406.0, + 1114.0, + 1406.0, + 1148.0, + 292.0, + 1148.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 369.0, + 1147.0, + 1404.0, + 1147.0, + 1404.0, + 1180.0, + 369.0, + 1180.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1175.0, + 1178.0, + 1175.0, + 1178.0, + 1208.0, + 294.0, + 1208.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 383.0, + 814.0, + 383.0, + 814.0, + 426.0, + 292.0, + 426.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 919.0, + 383.0, + 952.0, + 383.0, + 952.0, + 426.0, + 919.0, + 426.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1058.0, + 383.0, + 1128.0, + 383.0, + 1128.0, + 426.0, + 1058.0, + 426.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1230.0, + 383.0, + 1405.0, + 383.0, + 1405.0, + 426.0, + 1230.0, + 426.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 418.0, + 1405.0, + 418.0, + 1405.0, + 455.0, + 294.0, + 455.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 450.0, + 1405.0, + 450.0, + 1405.0, + 483.0, + 292.0, + 483.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 478.0, + 1407.0, + 478.0, + 1407.0, + 517.0, + 292.0, + 517.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 511.0, + 867.0, + 511.0, + 867.0, + 547.0, + 294.0, + 547.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 916.0, + 1405.0, + 916.0, + 1405.0, + 948.0, + 294.0, + 948.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 946.0, + 1406.0, + 946.0, + 1406.0, + 979.0, + 292.0, + 979.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 975.0, + 1406.0, + 975.0, + 1406.0, + 1012.0, + 293.0, + 1012.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1008.0, + 837.0, + 1008.0, + 837.0, + 1041.0, + 296.0, + 1041.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1220.0, + 1409.0, + 1220.0, + 1409.0, + 1258.0, + 293.0, + 1258.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1254.0, + 1406.0, + 1254.0, + 1406.0, + 1287.0, + 297.0, + 1287.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1280.0, + 1407.0, + 1280.0, + 1407.0, + 1322.0, + 293.0, + 1322.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1312.0, + 1406.0, + 1312.0, + 1406.0, + 1350.0, + 294.0, + 1350.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1343.0, + 459.0, + 1343.0, + 459.0, + 1376.0, + 294.0, + 1376.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1611.0, + 1063.0, + 1611.0, + 1063.0, + 1650.0, + 292.0, + 1650.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1096.0, + 1611.0, + 1281.0, + 1611.0, + 1281.0, + 1650.0, + 1096.0, + 1650.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1318.0, + 1611.0, + 1408.0, + 1611.0, + 1408.0, + 1650.0, + 1318.0, + 1650.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1642.0, + 858.0, + 1642.0, + 858.0, + 1683.0, + 292.0, + 1683.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 970.0, + 1642.0, + 1030.0, + 1642.0, + 1030.0, + 1683.0, + 970.0, + 1683.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1259.0, + 1642.0, + 1320.0, + 1642.0, + 1320.0, + 1683.0, + 1259.0, + 1683.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1344.0, + 1642.0, + 1384.0, + 1642.0, + 1384.0, + 1683.0, + 1344.0, + 1683.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1401.0, + 1642.0, + 1408.0, + 1642.0, + 1408.0, + 1683.0, + 1401.0, + 1683.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 285.0, + 1672.0, + 297.0, + 1672.0, + 297.0, + 1761.0, + 285.0, + 1761.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 443.0, + 1672.0, + 553.0, + 1672.0, + 553.0, + 1761.0, + 443.0, + 1761.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 763.0, + 1672.0, + 773.0, + 1672.0, + 773.0, + 1761.0, + 763.0, + 1761.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1017.0, + 1672.0, + 1165.0, + 1672.0, + 1165.0, + 1761.0, + 1017.0, + 1761.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1404.0, + 1672.0, + 1407.0, + 1672.0, + 1407.0, + 1761.0, + 1404.0, + 1761.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1541.0, + 1406.0, + 1541.0, + 1406.0, + 1583.0, + 292.0, + 1583.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1577.0, + 635.0, + 1577.0, + 635.0, + 1609.0, + 295.0, + 1609.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 226.0, + 1100.0, + 226.0, + 1100.0, + 268.0, + 293.0, + 268.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 1964.0, + 540.0, + 1964.0, + 540.0, + 2012.0, + 355.0, + 2012.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 566.0, + 1964.0, + 681.0, + 1964.0, + 681.0, + 2012.0, + 566.0, + 2012.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 799.0, + 1964.0, + 854.0, + 1964.0, + 854.0, + 2012.0, + 799.0, + 2012.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1034.0, + 1964.0, + 1268.0, + 1964.0, + 1268.0, + 2012.0, + 1034.0, + 2012.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1352.0, + 1964.0, + 1408.0, + 1964.0, + 1408.0, + 2012.0, + 1352.0, + 2012.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 387.0, + 2003.0, + 395.0, + 2003.0, + 395.0, + 2035.0, + 387.0, + 2035.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 470.0, + 2003.0, + 481.0, + 2003.0, + 481.0, + 2035.0, + 470.0, + 2035.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1803.0, + 574.0, + 1803.0, + 574.0, + 1839.0, + 295.0, + 1839.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 359.0, + 1911.0, + 395.0, + 1911.0, + 395.0, + 1954.0, + 359.0, + 1954.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 428.0, + 1911.0, + 604.0, + 1911.0, + 604.0, + 1954.0, + 428.0, + 1954.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 357.0, + 1848.0, + 360.0, + 1848.0, + 360.0, + 1905.0, + 357.0, + 1905.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 810.0, + 1848.0, + 813.0, + 1848.0, + 813.0, + 1905.0, + 810.0, + 1905.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 15, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 8, + "poly": [ + 576, + 1851, + 1122, + 1851, + 1122, + 2031, + 576, + 2031 + ], + "score": 0.971 + }, + { + "category_id": 1, + "poly": [ + 297, + 756, + 1406, + 756, + 1406, + 852, + 297, + 852 + ], + "score": 0.97 + }, + { + "category_id": 8, + "poly": [ + 312, + 1064, + 1387, + 1064, + 1387, + 1244, + 312, + 1244 + ], + "score": 0.967 + }, + { + "category_id": 8, + "poly": [ + 410, + 1323, + 1282, + 1323, + 1282, + 1505, + 410, + 1505 + ], + "score": 0.966 + }, + { + "category_id": 1, + "poly": [ + 294, + 941, + 1403, + 941, + 1403, + 1008, + 294, + 1008 + ], + "score": 0.953 + }, + { + "category_id": 8, + "poly": [ + 402, + 475, + 1294, + 475, + 1294, + 561, + 402, + 561 + ], + "score": 0.946 + }, + { + "category_id": 1, + "poly": [ + 294, + 1252, + 1402, + 1252, + 1402, + 1317, + 294, + 1317 + ], + "score": 0.946 + }, + { + "category_id": 1, + "poly": [ + 295, + 285, + 1401, + 285, + 1401, + 351, + 295, + 351 + ], + "score": 0.944 + }, + { + "category_id": 1, + "poly": [ + 297, + 1515, + 1404, + 1515, + 1404, + 1582, + 297, + 1582 + ], + "score": 0.937 + }, + { + "category_id": 8, + "poly": [ + 440, + 1593, + 1256, + 1593, + 1256, + 1674, + 440, + 1674 + ], + "score": 0.933 + }, + { + "category_id": 8, + "poly": [ + 760, + 397, + 936, + 397, + 936, + 435, + 760, + 435 + ], + "score": 0.929 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.924 + }, + { + "category_id": 1, + "poly": [ + 296, + 1018, + 1401, + 1018, + 1401, + 1055, + 296, + 1055 + ], + "score": 0.922 + }, + { + "category_id": 1, + "poly": [ + 297, + 1816, + 464, + 1816, + 464, + 1847, + 297, + 1847 + ], + "score": 0.922 + }, + { + "category_id": 0, + "poly": [ + 301, + 885, + 782, + 885, + 782, + 918, + 301, + 918 + ], + "score": 0.919 + }, + { + "category_id": 1, + "poly": [ + 297, + 1698, + 951, + 1698, + 951, + 1732, + 297, + 1732 + ], + "score": 0.916 + }, + { + "category_id": 0, + "poly": [ + 298, + 1762, + 631, + 1762, + 631, + 1794, + 298, + 1794 + ], + "score": 0.911 + }, + { + "category_id": 1, + "poly": [ + 297, + 355, + 925, + 355, + 925, + 389, + 297, + 389 + ], + "score": 0.907 + }, + { + "category_id": 1, + "poly": [ + 297, + 568, + 749, + 568, + 749, + 602, + 297, + 602 + ], + "score": 0.907 + }, + { + "category_id": 1, + "poly": [ + 293, + 609, + 1398, + 609, + 1398, + 691, + 293, + 691 + ], + "score": 0.896 + }, + { + "category_id": 1, + "poly": [ + 295, + 441, + 339, + 441, + 339, + 471, + 295, + 471 + ], + "score": 0.896 + }, + { + "category_id": 0, + "poly": [ + 301, + 228, + 771, + 228, + 771, + 262, + 301, + 262 + ], + "score": 0.893 + }, + { + "category_id": 9, + "poly": [ + 1351, + 398, + 1401, + 398, + 1401, + 430, + 1351, + 430 + ], + "score": 0.892 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1993, + 1402, + 1993, + 1402, + 2025, + 1351, + 2025 + ], + "score": 0.891 + }, + { + "category_id": 1, + "poly": [ + 298, + 708, + 1204, + 708, + 1204, + 744, + 298, + 744 + ], + "score": 0.876 + }, + { + "category_id": 2, + "poly": [ + 835, + 2087, + 864, + 2087, + 864, + 2113, + 835, + 2113 + ], + "score": 0.864 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1614, + 1401, + 1614, + 1401, + 1645, + 1351, + 1645 + ], + "score": 0.863 + }, + { + "category_id": 9, + "poly": [ + 1352, + 614, + 1400, + 614, + 1400, + 646, + 1352, + 646 + ], + "score": 0.83 + }, + { + "category_id": 14, + "poly": [ + 411, + 1320, + 1287, + 1320, + 1287, + 1509, + 411, + 1509 + ], + "score": 0.95, + "latex": "\\begin{array} { r l r } { { \\mathbb { E } [ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } , \\mathcal { E } _ { k } ] \\leq \\mathbb { E } [ 4 \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 \\| e ^ { k } \\| ^ { 2 } \\ \\Big | \\ \\mathcal { F } _ { k } , \\mathcal { E } _ { k } ] } } \\\\ & { } & { \\leq 4 ( N + 1 ) \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 N , ~ } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 576, + 1848, + 1120, + 1848, + 1120, + 2038, + 576, + 2038 + ], + "score": 0.95, + "latex": "\\begin{array} { r l } & { \\| B ( z ^ { k } ) \\| ^ { 2 } = \\| B ( z ^ { k } ) - B ( z ^ { * } ) + B ( z ^ { * } ) ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 \\| B ( z ^ { k } ) - B ( z ^ { * } ) \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 L ^ { 2 } \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 443, + 789, + 663, + 789, + 663, + 823, + 443, + 823 + ], + "score": 0.94, + "latex": "N \\doteq \\mathrm { m a x } _ { j \\in 1 \\ldots 4 } N _ { j }" + }, + { + "category_id": 13, + "poly": [ + 900, + 1020, + 997, + 1020, + 997, + 1055, + 900, + 1055 + ], + "score": 0.93, + "latex": "\\| \\nabla \\varphi _ { k } \\| ^ { 2 }" + }, + { + "category_id": 14, + "poly": [ + 440, + 1585, + 1257, + 1585, + 1257, + 1677, + 440, + 1677 + ], + "score": 0.93, + "latex": "\\mathbb { E } \\left[ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\right] \\leq 4 ( N + 1 ) \\mathbb { E } [ \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } | \\mathcal { F } _ { k } ] + 2 \\Big \\| \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 N ." + }, + { + "category_id": 13, + "poly": [ + 371, + 656, + 499, + 656, + 499, + 687, + 371, + 687 + ], + "score": 0.93, + "latex": "C _ { 1 } , \\ldots , C _ { 4 }" + }, + { + "category_id": 14, + "poly": [ + 312, + 1062, + 1390, + 1062, + 1390, + 1249, + 312, + 1249 + ], + "score": 0.93, + "latex": "\\begin{array} { r l r } & { } & { \\displaystyle \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } = \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } y _ { i } ^ { k } \\Big \\| ^ { 2 } \\leq 2 \\| y _ { n + 1 } ^ { k } \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } = 2 \\Big \\| B ( x _ { n + 1 } ^ { k } ) + e ^ { k } \\Big \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } } \\\\ & { } & { \\leq 4 \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } + 2 \\Big \\| \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } + 4 \\| e ^ { k } \\| ^ { 2 } . } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 404, + 472, + 1293, + 472, + 1293, + 564, + 404, + 564 + ], + "score": 0.93, + "latex": "T _ { k } \\doteq \\frac { \\tau } { \\overline { { \\rho } } } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\overline { { \\rho } } \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\big ( 1 - \\overline { { \\rho } } L \\big ) \\| B \\big ( z ^ { k } \\big ) - w _ { n + 1 } ^ { k } \\| ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 748, + 943, + 1104, + 943, + 1104, + 979, + 748, + 979 + ], + "score": 0.93, + "latex": "p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }" + }, + { + "category_id": 13, + "poly": [ + 1327, + 1022, + 1396, + 1022, + 1396, + 1054, + 1327, + 1054 + ], + "score": 0.91, + "latex": "\\nabla _ { z } \\varphi _ { k }" + }, + { + "category_id": 14, + "poly": [ + 760, + 396, + 938, + 396, + 938, + 433, + 760, + 433 + ], + "score": 0.91, + "latex": "\\rho _ { k } \\leq \\overline { { \\rho } } < 1 / L ." + }, + { + "category_id": 13, + "poly": [ + 427, + 570, + 509, + 570, + 509, + 602, + 427, + 602 + ], + "score": 0.91, + "latex": "p ^ { * } \\in { \\mathcal { S } }" + }, + { + "category_id": 13, + "poly": [ + 593, + 1515, + 624, + 1515, + 624, + 1551, + 593, + 1551 + ], + "score": 0.9, + "latex": "y _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 769, + 1255, + 805, + 1255, + 805, + 1284, + 769, + 1284 + ], + "score": 0.9, + "latex": "\\mathcal { F } _ { k }" + }, + { + "category_id": 14, + "poly": [ + 322, + 606, + 1330, + 606, + 1330, + 649, + 322, + 649 + ], + "score": 0.9, + "latex": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le \\big ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } + C _ { 3 } \\alpha _ { k } \\rho _ { k } ^ { 2 } \\big ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha _ { k } \\rho _ { k } T _ { k } + C _ { 2 } \\alpha _ { k } ^ { 2 } + C _ { 4 } \\alpha _ { k } \\rho _ { k } ^ { 2 } } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 1281, + 790, + 1317, + 790, + 1317, + 823, + 1281, + 823 + ], + "score": 0.9, + "latex": "N _ { j }" + }, + { + "category_id": 13, + "poly": [ + 986, + 711, + 1024, + 711, + 1024, + 742, + 986, + 742 + ], + "score": 0.89, + "latex": "G _ { k }" + }, + { + "category_id": 13, + "poly": [ + 883, + 1518, + 985, + 1518, + 985, + 1547, + 883, + 1547 + ], + "score": 0.89, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 470, + 1550, + 506, + 1550, + 506, + 1579, + 470, + 1579 + ], + "score": 0.89, + "latex": "\\mathcal { F } _ { k }" + }, + { + "category_id": 13, + "poly": [ + 659, + 1519, + 694, + 1519, + 694, + 1548, + 659, + 1548 + ], + "score": 0.89, + "latex": "\\mathcal { F } _ { k }" + }, + { + "category_id": 13, + "poly": [ + 406, + 712, + 439, + 712, + 439, + 742, + 406, + 742 + ], + "score": 0.89, + "latex": "T _ { k }" + }, + { + "category_id": 13, + "poly": [ + 855, + 1255, + 885, + 1255, + 885, + 1284, + 855, + 1284 + ], + "score": 0.88, + "latex": "\\mathcal { E } _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1370, + 1519, + 1400, + 1519, + 1400, + 1548, + 1370, + 1548 + ], + "score": 0.87, + "latex": "\\mathcal { E } _ { k }" + }, + { + "category_id": 13, + "poly": [ + 876, + 319, + 901, + 319, + 901, + 346, + 876, + 346 + ], + "score": 0.82, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 545, + 320, + 567, + 320, + 567, + 346, + 545, + 346 + ], + "score": 0.81, + "latex": "L" + }, + { + "category_id": 13, + "poly": [ + 1119, + 790, + 1147, + 790, + 1147, + 816, + 1119, + 816 + ], + "score": 0.81, + "latex": "N" + }, + { + "category_id": 13, + "poly": [ + 595, + 358, + 610, + 358, + 610, + 384, + 595, + 384 + ], + "score": 0.34, + "latex": "I" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 883.0, + 784.0, + 883.0, + 784.0, + 920.0, + 293.0, + 920.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1755.0, + 632.0, + 1755.0, + 632.0, + 1802.0, + 295.0, + 1802.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 228.0, + 774.0, + 228.0, + 774.0, + 265.0, + 296.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2085.0, + 868.0, + 2085.0, + 868.0, + 2125.0, + 831.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 754.0, + 1406.0, + 754.0, + 1406.0, + 793.0, + 294.0, + 793.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 787.0, + 442.0, + 787.0, + 442.0, + 825.0, + 294.0, + 825.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 664.0, + 787.0, + 1118.0, + 787.0, + 1118.0, + 825.0, + 664.0, + 825.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1148.0, + 787.0, + 1280.0, + 787.0, + 1280.0, + 825.0, + 1148.0, + 825.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1318.0, + 787.0, + 1406.0, + 787.0, + 1406.0, + 825.0, + 1318.0, + 825.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 819.0, + 761.0, + 819.0, + 761.0, + 853.0, + 295.0, + 853.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 939.0, + 747.0, + 939.0, + 747.0, + 984.0, + 292.0, + 984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1105.0, + 939.0, + 1406.0, + 939.0, + 1406.0, + 984.0, + 1105.0, + 984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 973.0, + 1386.0, + 973.0, + 1386.0, + 1008.0, + 294.0, + 1008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1250.0, + 768.0, + 1250.0, + 768.0, + 1290.0, + 293.0, + 1290.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 806.0, + 1250.0, + 854.0, + 1250.0, + 854.0, + 1290.0, + 806.0, + 1290.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 886.0, + 1250.0, + 1407.0, + 1250.0, + 1407.0, + 1290.0, + 886.0, + 1290.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1279.0, + 498.0, + 1279.0, + 498.0, + 1321.0, + 294.0, + 1321.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 280.0, + 1405.0, + 280.0, + 1405.0, + 326.0, + 293.0, + 326.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 317.0, + 544.0, + 317.0, + 544.0, + 352.0, + 295.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 568.0, + 317.0, + 875.0, + 317.0, + 875.0, + 352.0, + 568.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 902.0, + 317.0, + 911.0, + 317.0, + 911.0, + 352.0, + 902.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1511.0, + 592.0, + 1511.0, + 592.0, + 1555.0, + 292.0, + 1555.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 625.0, + 1511.0, + 658.0, + 1511.0, + 658.0, + 1555.0, + 625.0, + 1555.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 695.0, + 1511.0, + 882.0, + 1511.0, + 882.0, + 1555.0, + 695.0, + 1555.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 986.0, + 1511.0, + 1369.0, + 1511.0, + 1369.0, + 1555.0, + 986.0, + 1555.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1401.0, + 1511.0, + 1404.0, + 1511.0, + 1404.0, + 1555.0, + 1401.0, + 1555.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1547.0, + 469.0, + 1547.0, + 469.0, + 1582.0, + 293.0, + 1582.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 507.0, + 1547.0, + 584.0, + 1547.0, + 584.0, + 1582.0, + 507.0, + 1582.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1012.0, + 899.0, + 1012.0, + 899.0, + 1062.0, + 293.0, + 1062.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 998.0, + 1012.0, + 1326.0, + 1012.0, + 1326.0, + 1062.0, + 998.0, + 1062.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1397.0, + 1012.0, + 1411.0, + 1012.0, + 1411.0, + 1062.0, + 1397.0, + 1062.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1814.0, + 467.0, + 1814.0, + 467.0, + 1850.0, + 294.0, + 1850.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1694.0, + 951.0, + 1694.0, + 951.0, + 1736.0, + 294.0, + 1736.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 355.0, + 594.0, + 355.0, + 594.0, + 392.0, + 296.0, + 392.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 611.0, + 355.0, + 922.0, + 355.0, + 922.0, + 392.0, + 611.0, + 392.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 565.0, + 426.0, + 565.0, + 426.0, + 607.0, + 295.0, + 607.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 510.0, + 565.0, + 751.0, + 565.0, + 751.0, + 607.0, + 510.0, + 607.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 600.0, + 321.0, + 600.0, + 321.0, + 657.0, + 317.0, + 657.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1331.0, + 600.0, + 1407.0, + 600.0, + 1407.0, + 657.0, + 1331.0, + 657.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 649.0, + 370.0, + 649.0, + 370.0, + 694.0, + 293.0, + 694.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 500.0, + 649.0, + 1394.0, + 649.0, + 1394.0, + 694.0, + 500.0, + 694.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 440.0, + 343.0, + 440.0, + 343.0, + 478.0, + 291.0, + 478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 706.0, + 405.0, + 706.0, + 405.0, + 747.0, + 293.0, + 747.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 440.0, + 706.0, + 985.0, + 706.0, + 985.0, + 747.0, + 440.0, + 747.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1025.0, + 706.0, + 1203.0, + 706.0, + 1203.0, + 747.0, + 1025.0, + 747.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 16, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 8, + "poly": [ + 329, + 977, + 1369, + 977, + 1369, + 1283, + 329, + 1283 + ], + "score": 0.972 + }, + { + "category_id": 8, + "poly": [ + 537, + 648, + 1161, + 648, + 1161, + 785, + 537, + 785 + ], + "score": 0.97 + }, + { + "category_id": 8, + "poly": [ + 298, + 1576, + 1404, + 1576, + 1404, + 2030, + 298, + 2030 + ], + "score": 0.967 + }, + { + "category_id": 8, + "poly": [ + 470, + 273, + 1232, + 273, + 1232, + 456, + 470, + 456 + ], + "score": 0.963 + }, + { + "category_id": 8, + "poly": [ + 446, + 838, + 1248, + 838, + 1248, + 929, + 446, + 929 + ], + "score": 0.954 + }, + { + "category_id": 8, + "poly": [ + 436, + 1341, + 1261, + 1341, + 1261, + 1446, + 436, + 1446 + ], + "score": 0.938 + }, + { + "category_id": 1, + "poly": [ + 296, + 228, + 970, + 228, + 970, + 264, + 296, + 264 + ], + "score": 0.928 + }, + { + "category_id": 1, + "poly": [ + 289, + 794, + 1333, + 794, + 1333, + 828, + 289, + 828 + ], + "score": 0.927 + }, + { + "category_id": 8, + "poly": [ + 494, + 555, + 1201, + 555, + 1201, + 601, + 494, + 601 + ], + "score": 0.925 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.925 + }, + { + "category_id": 1, + "poly": [ + 299, + 510, + 1253, + 510, + 1253, + 545, + 299, + 545 + ], + "score": 0.925 + }, + { + "category_id": 1, + "poly": [ + 297, + 609, + 450, + 609, + 450, + 641, + 297, + 641 + ], + "score": 0.924 + }, + { + "category_id": 1, + "poly": [ + 297, + 463, + 741, + 463, + 741, + 497, + 297, + 497 + ], + "score": 0.923 + }, + { + "category_id": 1, + "poly": [ + 292, + 1521, + 1300, + 1521, + 1300, + 1561, + 292, + 1561 + ], + "score": 0.922 + }, + { + "category_id": 0, + "poly": [ + 298, + 1470, + 659, + 1470, + 659, + 1504, + 298, + 1504 + ], + "score": 0.911 + }, + { + "category_id": 1, + "poly": [ + 296, + 1292, + 1306, + 1292, + 1306, + 1329, + 296, + 1329 + ], + "score": 0.909 + }, + { + "category_id": 1, + "poly": [ + 296, + 939, + 412, + 939, + 412, + 970, + 296, + 970 + ], + "score": 0.894 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1408, + 1401, + 1408, + 1401, + 1440, + 1351, + 1440 + ], + "score": 0.877 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1244, + 1401, + 1244, + 1401, + 1276, + 1351, + 1276 + ], + "score": 0.875 + }, + { + "category_id": 9, + "poly": [ + 1350, + 416, + 1402, + 416, + 1402, + 448, + 1350, + 448 + ], + "score": 0.873 + }, + { + "category_id": 2, + "poly": [ + 835, + 2087, + 864, + 2087, + 864, + 2114, + 835, + 2114 + ], + "score": 0.834 + }, + { + "category_id": 9, + "poly": [ + 1350, + 1968, + 1402, + 1968, + 1402, + 1999, + 1350, + 1999 + ], + "score": 0.803 + }, + { + "category_id": 14, + "poly": [ + 328, + 977, + 1370, + 977, + 1370, + 1286, + 328, + 1286 + ], + "score": 0.95, + "latex": "\\begin{array} { r l } & { \\mathbb { E } [ \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\leq 6 \\bar { \\rho } ^ { 2 } \\big ( ( N + 1 ) \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { * } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { * } \\| ^ { 2 } \\big ) + 3 \\bar { \\rho } ^ { 2 } N } \\\\ & { \\qquad = 6 \\bar { \\rho } ^ { 2 } \\Big ( 2 ( N + 1 ) L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 2 ( N + 1 ) \\| B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad } \\\\ & { \\qquad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad } \\\\ & { \\leq 6 \\bar { \\rho } ^ { 2 } \\big ( 2 ( N + 1 ) L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { * } \\| ^ { 2 } \\big ) } \\\\ & { \\qquad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad \\quad + 1 8 \\bar { \\rho } ^ { 2 } ( N + 1 ) \\| B ( z ^ { * } ) \\| ^ { 2 } + 3 \\bar { \\rho } ^ { 2 } N } \\\\ & { \\leq 1 8 \\bar { \\rho } ^ { 2 } ( N + 1 ) \\big ( ( L ^ { 2 } + 1 ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + \\| B ( z ^ { * } ) \\| ^ { 2 } \\big ) + 3 \\bar { \\rho } ^ { 2 } N } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 468, + 272, + 1230, + 272, + 1230, + 458, + 468, + 458 + ], + "score": 0.94, + "latex": "\\begin{array} { r l } & { \\| B ( x _ { n + 1 } ^ { k } ) \\| ^ { 2 } = \\| B ( z ^ { k } ) + B ( x _ { n + 1 } ^ { k } ) - B ( z ^ { k } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 \\| B ( z ^ { k } ) \\| ^ { 2 } + 2 \\| B ( x _ { n + 1 } ^ { k } ) - B ( z ^ { k } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 \\| B ( z ^ { k } ) \\| ^ { 2 } + 2 L ^ { 2 } \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad \\leq 4 L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 4 \\| B ( z ^ { * } ) \\| ^ { 2 } + 2 L ^ { 2 } \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 447, + 836, + 1250, + 836, + 1250, + 933, + 447, + 933 + ], + "score": 0.93, + "latex": "\\begin{array} { r l } & { \\mathbb { E } \\big [ \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\big ] \\leq \\mathbb { E } \\big [ 3 \\overline { { \\rho } } ^ { 2 } \\big ( \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } \\| ^ { 2 } \\big ) | \\mathcal { F } _ { k } \\big ] } \\\\ & { \\qquad \\leq 3 \\overline { { \\rho } } ^ { 2 } \\big ( ( N + 1 ) \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } \\| ^ { 2 } + N \\big ) . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 691, + 1295, + 864, + 1295, + 864, + 1330, + 691, + 1330 + ], + "score": 0.93, + "latex": "w _ { n + 1 } ^ { * } = B ( z ^ { * } )" + }, + { + "category_id": 14, + "poly": [ + 537, + 648, + 1161, + 648, + 1161, + 789, + 537, + 789 + ], + "score": 0.93, + "latex": "\\begin{array} { r l } & { \\| x _ { n + 1 } ^ { k } - z ^ { k } \\| ^ { 2 } = \\rho _ { k } ^ { 2 } \\| B ( z ^ { k } ) + \\epsilon ^ { k } - w _ { n + 1 } ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad \\leq \\overline { { \\rho } } ^ { 2 } \\| B ( z ^ { k } ) + \\epsilon ^ { k } - w _ { n + 1 } ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad \\leq 3 \\overline { { \\rho } } ^ { 2 } ( \\| B ( z ^ { k } ) \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + \\| w _ { n + 1 } ^ { k } \\| ^ { 2 } ) . } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 310, + 1571, + 1400, + 1571, + 1400, + 2039, + 310, + 2039 + ], + "score": 0.92, + "latex": "\\begin{array} { r l r } { { \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\bigg \\| ^ { 2 } = \\bigg \\| \\sum _ { i = 1 } ^ { n } ( \\tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) + w _ { i } ^ { k } ) \\bigg \\| ^ { 2 } } } \\\\ & { } & { \\leq 2 \\bigg \\| \\tau ^ { - 1 } \\sum _ { i = 1 } ^ { n } ( z ^ { k } - x _ { i } ^ { k } ) \\bigg \\| ^ { 2 } + 2 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { k } \\bigg \\| ^ { 2 } } \\\\ & { } & { \\leq 2 \\pi \\tau ^ { - 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { k } \\bigg \\| ^ { 2 } } \\\\ & { } & { \\leq 4 \\pi ^ { 2 } \\tau ^ { - 2 } \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 4 \\pi \\tau ^ { - 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { * } - x _ { i } ^ { k } \\| ^ { 2 } + 4 m \\sum _ { i = 1 } ^ { n } \\| w _ { i } ^ { k } - w _ { i } ^ { * } \\| ^ { 2 } + 4 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\bigg \\| ^ { 2 } } \\\\ & { } & { \\leq 4 \\pi ^ { 2 } ( \\tau ^ { - 2 } + 1 ) \\| y ^ { k } - p ^ { * } \\| ^ { 2 } + 4 \\pi \\tau ^ { - 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { * } - x _ { i } ^ { k } \\| ^ { 2 } + 4 \\bigg \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\bigg \\| ^ { 2 } . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 963, + 1523, + 1257, + 1523, + 1257, + 1560, + 963, + 1560 + ], + "score": 0.91, + "latex": "y _ { i } ^ { k } = \\tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) + w _ { i } ^ { k }" + }, + { + "category_id": 14, + "poly": [ + 438, + 1341, + 1262, + 1341, + 1262, + 1448, + 438, + 1448 + ], + "score": 0.91, + "latex": "\\begin{array} { r l } & { \\mathbb { E } \\left[ \\left. B ( x _ { n + 1 } ^ { k } ) \\right. ^ { 2 } \\middle | \\mathcal { F } _ { k } \\right] \\leq 4 L ^ { 2 } \\left[ 1 + 9 \\overline { { \\rho } } ^ { 2 } ( L ^ { 2 } + 1 ) ( N + 1 ) \\right] \\Vert p ^ { k } - p ^ { * } \\Vert ^ { 2 } } \\\\ & { \\qquad + 4 \\big ( 1 + 9 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) \\big ) \\Vert B ( z ^ { * } ) \\Vert ^ { 2 } + 6 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } N . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 764, + 797, + 799, + 797, + 799, + 826, + 764, + 826 + ], + "score": 0.9, + "latex": "\\mathcal { F } _ { k }" + }, + { + "category_id": 13, + "poly": [ + 342, + 1527, + 435, + 1527, + 435, + 1555, + 342, + 1555 + ], + "score": 0.89, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 14, + "poly": [ + 498, + 554, + 1197, + 554, + 1197, + 600, + 498, + 600 + ], + "score": 0.88, + "latex": "\\begin{array} { r } { x _ { n + 1 } ^ { k } - z ^ { k } = - \\rho _ { k } ( r ^ { k } - w _ { n + 1 } ^ { k } ) = - \\rho _ { k } ( B ( z ^ { k } ) + \\epsilon ^ { k } - w _ { n + 1 } ^ { k } ) , } \\end{array}" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1464.0, + 660.0, + 1464.0, + 660.0, + 1511.0, + 293.0, + 1511.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2085.0, + 869.0, + 2085.0, + 869.0, + 2121.0, + 831.0, + 2121.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 226.0, + 970.0, + 226.0, + 970.0, + 267.0, + 293.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 792.0, + 763.0, + 792.0, + 763.0, + 831.0, + 294.0, + 831.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 800.0, + 792.0, + 1335.0, + 792.0, + 1335.0, + 831.0, + 800.0, + 831.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 509.0, + 1254.0, + 509.0, + 1254.0, + 546.0, + 293.0, + 546.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 609.0, + 452.0, + 609.0, + 452.0, + 641.0, + 295.0, + 641.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 459.0, + 742.0, + 459.0, + 742.0, + 501.0, + 294.0, + 501.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1515.0, + 341.0, + 1515.0, + 341.0, + 1566.0, + 289.0, + 1566.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 436.0, + 1515.0, + 962.0, + 1515.0, + 962.0, + 1566.0, + 436.0, + 1566.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1258.0, + 1515.0, + 1302.0, + 1515.0, + 1302.0, + 1566.0, + 1258.0, + 1566.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1290.0, + 690.0, + 1290.0, + 690.0, + 1334.0, + 293.0, + 1334.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 865.0, + 1290.0, + 1309.0, + 1290.0, + 1309.0, + 1334.0, + 865.0, + 1334.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 938.0, + 412.0, + 938.0, + 412.0, + 972.0, + 295.0, + 972.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 17, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 377, + 1406, + 377, + 1406, + 472, + 297, + 472 + ], + "score": 0.975 + }, + { + "category_id": 8, + "poly": [ + 423, + 1559, + 1274, + 1559, + 1274, + 1857, + 423, + 1857 + ], + "score": 0.968 + }, + { + "category_id": 8, + "poly": [ + 491, + 492, + 1208, + 492, + 1208, + 904, + 491, + 904 + ], + "score": 0.965 + }, + { + "category_id": 8, + "poly": [ + 363, + 1157, + 1333, + 1157, + 1333, + 1340, + 363, + 1340 + ], + "score": 0.961 + }, + { + "category_id": 8, + "poly": [ + 597, + 1938, + 1100, + 1938, + 1100, + 2030, + 597, + 2030 + ], + "score": 0.959 + }, + { + "category_id": 8, + "poly": [ + 511, + 979, + 1186, + 979, + 1186, + 1067, + 511, + 1067 + ], + "score": 0.957 + }, + { + "category_id": 1, + "poly": [ + 298, + 1432, + 1403, + 1432, + 1403, + 1498, + 298, + 1498 + ], + "score": 0.955 + }, + { + "category_id": 1, + "poly": [ + 290, + 227, + 1402, + 227, + 1402, + 295, + 290, + 295 + ], + "score": 0.955 + }, + { + "category_id": 8, + "poly": [ + 544, + 314, + 1153, + 314, + 1153, + 356, + 544, + 356 + ], + "score": 0.935 + }, + { + "category_id": 1, + "poly": [ + 295, + 1878, + 1086, + 1878, + 1086, + 1915, + 295, + 1915 + ], + "score": 0.935 + }, + { + "category_id": 1, + "poly": [ + 296, + 922, + 654, + 922, + 654, + 956, + 296, + 956 + ], + "score": 0.931 + }, + { + "category_id": 0, + "poly": [ + 299, + 1375, + 685, + 1375, + 685, + 1408, + 299, + 1408 + ], + "score": 0.929 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 297, + 1104, + 761, + 1104, + 761, + 1138, + 297, + 1138 + ], + "score": 0.926 + }, + { + "category_id": 9, + "poly": [ + 1351, + 866, + 1401, + 866, + 1401, + 897, + 1351, + 897 + ], + "score": 0.897 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1303, + 1401, + 1303, + 1401, + 1334, + 1351, + 1334 + ], + "score": 0.89 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1004, + 1401, + 1004, + 1401, + 1036, + 1351, + 1036 + ], + "score": 0.887 + }, + { + "category_id": 2, + "poly": [ + 835, + 2087, + 864, + 2087, + 864, + 2113, + 835, + 2113 + ], + "score": 0.869 + }, + { + "category_id": 14, + "poly": [ + 491, + 490, + 1205, + 490, + 1205, + 907, + 491, + 907 + ], + "score": 0.97, + "latex": "\\begin{array} { r l } { \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { * } - x _ { i } ^ { k } \\| ^ { 2 } = \\displaystyle \\sum _ { i = 1 } ^ { n } \\left\\| J _ { T , 4 _ { i } } ( z ^ { k } + \\tau w _ { i } ^ { k } ) - J _ { \\tau , 4 _ { i } } ( z ^ { * } + \\tau w _ { i } ^ { * } ) \\right\\| ^ { 2 } } & { } \\\\ { \\leq \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } + \\tau w _ { i } ^ { k } - z ^ { * } - \\tau w _ { i } ^ { * } \\| ^ { 2 } } & { } \\\\ { = \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - z ^ { * } + \\tau ( w _ { i } ^ { k } - w _ { i } ^ { * } ) \\| ^ { 2 } } & { } \\\\ { \\leq 2 n \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 2 \\tau ^ { 2 } \\displaystyle \\sum _ { i = 1 } ^ { n } \\| w _ { i } ^ { k } - w _ { i } ^ { * } \\| ^ { 2 } } & { } \\\\ { \\leq 2 ( n + \\tau ^ { 2 } ) \\| y ^ { k } - p ^ { * } \\| ^ { 2 } . } & { } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 426, + 1558, + 1275, + 1558, + 1275, + 1859, + 426, + 1859 + ], + "score": 0.95, + "latex": "\\begin{array} { l } { \\displaystyle \\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 } = \\left\\| x _ { i } ^ { k } - \\frac { 1 } { n + 1 } \\sum _ { j = 1 } ^ { n + 1 } x _ { j } ^ { k } \\right\\| ^ { 2 } = \\left\\| \\frac { 1 } { n + 1 } \\sum _ { j = 1 } ^ { n + 1 } ( x _ { i } ^ { k } - x _ { j } ^ { k } ) \\right\\| ^ { 2 } } \\\\ { \\displaystyle \\leq \\sum _ { j = 1 } ^ { n + 1 } \\| x _ { i } ^ { k } - x _ { j } ^ { k } \\| ^ { 2 } } \\\\ { \\displaystyle \\leq 2 \\sum _ { j = 1 } ^ { n + 1 } \\big ( \\| x _ { i } ^ { k } - z ^ { k } \\| ^ { 2 } + \\| z ^ { k } - x _ { j } ^ { k } \\| ^ { 2 } \\big ) } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 511, + 977, + 1188, + 977, + 1188, + 1069, + 511, + 1069 + ], + "score": 0.93, + "latex": "\\Big \\| \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\Big \\| ^ { 2 } \\leq 1 2 n ^ { 2 } \\tau ^ { - 2 } ( n + \\tau ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 4 \\Big \\| \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\Big \\| ^ { 2 } ." + }, + { + "category_id": 13, + "poly": [ + 861, + 377, + 1118, + 377, + 1118, + 415, + 861, + 415 + ], + "score": 0.93, + "latex": "x _ { i } ^ { k } = J _ { \\tau A _ { i } } ( z ^ { k } + \\tau w _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 787, + 230, + 934, + 230, + 934, + 264, + 787, + 264 + ], + "score": 0.93, + "latex": "w _ { i } ^ { * } \\in A _ { i } ( z ^ { * } )" + }, + { + "category_id": 14, + "poly": [ + 596, + 1934, + 1098, + 1934, + 1098, + 2031, + 596, + 2031 + ], + "score": 0.93, + "latex": "\\sum _ { i = 1 } ^ { n + 1 } \\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 } \\leq 4 ( n + 1 ) \\sum _ { i = 1 } ^ { n + 1 } \\| x _ { i } ^ { k } - z ^ { k } \\| ^ { 2 } ," + }, + { + "category_id": 14, + "poly": [ + 362, + 1154, + 1333, + 1154, + 1333, + 1342, + 362, + 1342 + ], + "score": 0.92, + "latex": "\\begin{array} { r l } & { \\mathbb { E } \\left[ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } \\right] \\le 2 4 \\left[ ( 1 + 9 \\overline { { \\rho } } ^ { 2 } ) ( L ^ { 2 } + 1 ) ^ { 2 } ( N + 1 ) ^ { 2 } + n ^ { 2 } \\tau ^ { - 2 } ( n + \\tau ^ { 2 } ) \\right] \\| p ^ { k } - p ^ { * } \\| ^ { 2 } } \\\\ & { \\qquad + 1 6 ( N + 1 ) \\big ( 1 + 9 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) \\big ) \\| B ( z ^ { * } ) \\| ^ { 2 } + 8 \\bigg \\| \\displaystyle \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\bigg \\| ^ { 2 } } \\\\ & { \\qquad + 2 4 \\overline { { \\rho } } ^ { 2 } L ^ { 2 } ( N + 1 ) N + 4 N . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 616, + 1881, + 776, + 1881, + 776, + 1914, + 616, + 1914 + ], + "score": 0.92, + "latex": "i \\in { 1 . . ( n + 1 ) }" + }, + { + "category_id": 13, + "poly": [ + 977, + 230, + 1284, + 230, + 1284, + 264, + 977, + 264 + ], + "score": 0.92, + "latex": "z ^ { * } + \\tau w _ { i } ^ { * } \\in ( I + \\tau A _ { i } ) ( z ^ { * } )" + }, + { + "category_id": 13, + "poly": [ + 1088, + 1464, + 1247, + 1464, + 1247, + 1498, + 1088, + 1498 + ], + "score": 0.91, + "latex": "i \\in { 1 . . ( n + 1 ) }" + }, + { + "category_id": 13, + "poly": [ + 488, + 1435, + 545, + 1435, + 545, + 1466, + 488, + 1466 + ], + "score": 0.91, + "latex": "\\nabla \\varphi _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1162, + 381, + 1254, + 381, + 1254, + 410, + 1162, + 410 + ], + "score": 0.89, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 14, + "poly": [ + 546, + 312, + 1153, + 312, + 1153, + 354, + 546, + 354 + ], + "score": 0.89, + "latex": "z ^ { * } = ( I + \\tau A _ { i } ) ^ { - 1 } ( I + \\tau A _ { i } ) ( z ^ { * } ) = J _ { \\tau A _ { i } } ( z ^ { * } + \\tau w _ { i } ^ { * } ) ." + }, + { + "category_id": 13, + "poly": [ + 956, + 1435, + 981, + 1435, + 981, + 1462, + 956, + 1462 + ], + "score": 0.83, + "latex": "\\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 687, + 232, + 710, + 232, + 710, + 259, + 687, + 259 + ], + "score": 0.81, + "latex": "s" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1371.0, + 687.0, + 1371.0, + 687.0, + 1412.0, + 295.0, + 1412.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2085.0, + 869.0, + 2085.0, + 869.0, + 2125.0, + 831.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 372.0, + 860.0, + 372.0, + 860.0, + 418.0, + 291.0, + 418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1119.0, + 372.0, + 1161.0, + 372.0, + 1161.0, + 418.0, + 1119.0, + 418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1255.0, + 372.0, + 1407.0, + 372.0, + 1407.0, + 418.0, + 1255.0, + 418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 411.0, + 1404.0, + 411.0, + 1404.0, + 445.0, + 295.0, + 445.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 439.0, + 360.0, + 439.0, + 360.0, + 476.0, + 292.0, + 476.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1431.0, + 487.0, + 1431.0, + 487.0, + 1468.0, + 297.0, + 1468.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 546.0, + 1431.0, + 955.0, + 1431.0, + 955.0, + 1468.0, + 546.0, + 1468.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 982.0, + 1431.0, + 1404.0, + 1431.0, + 1404.0, + 1468.0, + 982.0, + 1468.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1465.0, + 1087.0, + 1465.0, + 1087.0, + 1497.0, + 296.0, + 1497.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1248.0, + 1465.0, + 1258.0, + 1465.0, + 1258.0, + 1497.0, + 1248.0, + 1497.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 228.0, + 686.0, + 228.0, + 686.0, + 265.0, + 294.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 711.0, + 228.0, + 786.0, + 228.0, + 786.0, + 265.0, + 711.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 935.0, + 228.0, + 976.0, + 228.0, + 976.0, + 265.0, + 935.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1285.0, + 228.0, + 1404.0, + 228.0, + 1404.0, + 265.0, + 1285.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 260.0, + 1323.0, + 260.0, + 1323.0, + 296.0, + 296.0, + 296.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1876.0, + 615.0, + 1876.0, + 615.0, + 1920.0, + 296.0, + 1920.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 777.0, + 1876.0, + 1086.0, + 1876.0, + 1086.0, + 1920.0, + 777.0, + 1920.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 920.0, + 655.0, + 920.0, + 655.0, + 960.0, + 297.0, + 960.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1102.0, + 762.0, + 1102.0, + 762.0, + 1142.0, + 297.0, + 1142.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 18, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 8, + "poly": [ + 502, + 952, + 1197, + 952, + 1197, + 1091, + 502, + 1091 + ], + "score": 0.973 + }, + { + "category_id": 8, + "poly": [ + 326, + 285, + 1372, + 285, + 1372, + 835, + 326, + 835 + ], + "score": 0.969 + }, + { + "category_id": 8, + "poly": [ + 404, + 1309, + 1293, + 1309, + 1293, + 1440, + 404, + 1440 + ], + "score": 0.964 + }, + { + "category_id": 8, + "poly": [ + 511, + 1153, + 1185, + 1153, + 1185, + 1243, + 511, + 1243 + ], + "score": 0.959 + }, + { + "category_id": 8, + "poly": [ + 667, + 1852, + 1031, + 1852, + 1031, + 1942, + 667, + 1942 + ], + "score": 0.958 + }, + { + "category_id": 1, + "poly": [ + 295, + 1961, + 1401, + 1961, + 1401, + 2042, + 295, + 2042 + ], + "score": 0.953 + }, + { + "category_id": 1, + "poly": [ + 295, + 1762, + 1408, + 1762, + 1408, + 1832, + 295, + 1832 + ], + "score": 0.938 + }, + { + "category_id": 1, + "poly": [ + 297, + 1647, + 871, + 1647, + 871, + 1681, + 297, + 1681 + ], + "score": 0.933 + }, + { + "category_id": 1, + "poly": [ + 297, + 896, + 1088, + 896, + 1088, + 931, + 297, + 931 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 297, + 1535, + 549, + 1535, + 549, + 1569, + 297, + 1569 + ], + "score": 0.927 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.925 + }, + { + "category_id": 8, + "poly": [ + 449, + 1587, + 1245, + 1587, + 1245, + 1633, + 449, + 1633 + ], + "score": 0.924 + }, + { + "category_id": 1, + "poly": [ + 296, + 1107, + 370, + 1107, + 370, + 1137, + 296, + 1137 + ], + "score": 0.924 + }, + { + "category_id": 1, + "poly": [ + 297, + 849, + 841, + 849, + 841, + 883, + 297, + 883 + ], + "score": 0.916 + }, + { + "category_id": 1, + "poly": [ + 297, + 228, + 867, + 228, + 867, + 264, + 297, + 264 + ], + "score": 0.907 + }, + { + "category_id": 1, + "poly": [ + 295, + 1261, + 345, + 1261, + 345, + 1291, + 295, + 1291 + ], + "score": 0.904 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1204, + 1401, + 1204, + 1401, + 1236, + 1351, + 1236 + ], + "score": 0.898 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1051, + 1401, + 1051, + 1401, + 1083, + 1351, + 1083 + ], + "score": 0.889 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1403, + 1401, + 1403, + 1401, + 1434, + 1351, + 1434 + ], + "score": 0.884 + }, + { + "category_id": 8, + "poly": [ + 314, + 1700, + 1322, + 1700, + 1322, + 1744, + 314, + 1744 + ], + "score": 0.878 + }, + { + "category_id": 9, + "poly": [ + 1350, + 796, + 1402, + 796, + 1402, + 828, + 1350, + 828 + ], + "score": 0.864 + }, + { + "category_id": 9, + "poly": [ + 1353, + 1706, + 1401, + 1706, + 1401, + 1738, + 1353, + 1738 + ], + "score": 0.86 + }, + { + "category_id": 0, + "poly": [ + 299, + 1476, + 709, + 1476, + 709, + 1509, + 299, + 1509 + ], + "score": 0.852 + }, + { + "category_id": 2, + "poly": [ + 834, + 2087, + 867, + 2087, + 867, + 2114, + 834, + 2114 + ], + "score": 0.81 + }, + { + "category_id": 2, + "poly": [ + 834, + 2086, + 867, + 2086, + 867, + 2114, + 834, + 2114 + ], + "score": 0.148 + }, + { + "category_id": 14, + "poly": [ + 329, + 281, + 1377, + 281, + 1377, + 840, + 329, + 840 + ], + "score": 0.94, + "latex": "\\begin{array} { r l } { \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } \\| \\nabla _ { x _ { i } } \\varphi _ { i } \\| ^ { 2 } | \\mathcal { F } _ { k } | \\leq 4 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\leq 4 ( n + 1 ) \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | + 4 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\leq 4 ( n + 1 ) \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\qquad + s ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | + 8 ( n + 1 ) ^ { 2 } \\| z ^ { k } - z ^ { k } | ^ { 2 } } \\\\ & { \\qquad + s ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\leq 4 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | } \\\\ & { \\qquad + s ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| | x _ { i } ^ { k } - z ^ { k } | ^ { 2 } | \\mathcal { F } _ { k } | + 8 ( n + 1 ) ^ { 2 } \\| n ^ { k } - p ^ { k } | ^ { 2 } } \\\\ & { \\leq 8 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } 2 \\tau ^ { 2 } + 1 + 9 s ^ { 2 } ( 2 ^ { k } + 1 ) | | b ^ { k } - z ^ { k } | ^ { 2 } | } \\\\ & \\leq 8 ( n + 1 ) \\displaystyle \\sum _ { i = 1 } ^ { n } 2 \\tau ^ { 2 } + 1 + 9 s ^ { 2 } ( 2 ^ { k } + 1 ) | | b ^ { k } - z \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 502, + 946, + 1196, + 946, + 1196, + 1092, + 502, + 1092 + ], + "score": 0.94, + "latex": "\\begin{array} { r l r } { { \\mathbb { E } [ \\| \\nabla \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] = \\mathbb { E } [ \\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] + \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } [ \\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 } | \\mathcal { F } _ { k } ] } } \\\\ & { } & { \\leq C _ { 1 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + C _ { 2 } , } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 668, + 1848, + 1029, + 1848, + 1029, + 1943, + 668, + 1943 + ], + "score": 0.94, + "latex": "\\varphi _ { k } ( p ) = \\sum _ { i = 1 } ^ { n + 1 } \\langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \\rangle ." + }, + { + "category_id": 13, + "poly": [ + 903, + 1762, + 1186, + 1762, + 1186, + 1800, + 903, + 1800 + ], + "score": 0.93, + "latex": "\\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ]" + }, + { + "category_id": 13, + "poly": [ + 397, + 1965, + 552, + 1965, + 552, + 1999, + 397, + 1999 + ], + "score": 0.92, + "latex": "i \\in { 1 . . ( n + 1 ) }" + }, + { + "category_id": 13, + "poly": [ + 583, + 1999, + 847, + 1999, + 847, + 2039, + 583, + 2039 + ], + "score": 0.92, + "latex": "\\begin{array} { r } { \\varphi _ { k } ( p ) = \\sum _ { i = 1 } ^ { n + 1 } \\varphi _ { i , k } ( p ) } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 580, + 1797, + 838, + 1797, + 838, + 1831, + 580, + 1831 + ], + "score": 0.92, + "latex": "p = ( z , w _ { 1 } , \\ldots , w _ { n + 1 } )" + }, + { + "category_id": 13, + "poly": [ + 1096, + 1963, + 1402, + 1963, + 1402, + 2000, + 1096, + 2000 + ], + "score": 0.91, + "latex": "\\mathbb { E } [ \\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) \\vert \\mathcal { F } _ { k } ]" + }, + { + "category_id": 14, + "poly": [ + 321, + 1699, + 1331, + 1699, + 1331, + 1742, + 321, + 1742 + ], + "score": 0.91, + "latex": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } \\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] + C _ { 2 } \\alpha _ { k } ^ { 2 } . } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 405, + 1306, + 1291, + 1306, + 1291, + 1444, + 405, + 1444 + ], + "score": 0.91, + "latex": "\\begin{array} { l } { { C _ { 2 } = 1 6 ( N + 1 ) \\left[ 1 + 4 { \\overline { { \\rho } } } ^ { 2 } ( n + 1 ) + 9 { \\overline { { \\rho } } } ^ { 2 } L ^ { 2 } ( N + 1 ) \\right] \\| B ( z ^ { * } ) \\| ^ { 2 } + 8 \\| \\displaystyle \\sum _ { i = 1 } ^ { n } w _ { i } ^ { * } \\| ^ { 2 } } } \\\\ { { \\nonumber } } \\\\ { { \\qquad + 1 2 { \\overline { { \\rho } } } ^ { 2 } N ( 2 L ^ { 2 } ( N + 1 ) + n + 1 ) + 4 N . } } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 722, + 232, + 758, + 232, + 758, + 262, + 722, + 262 + ], + "score": 0.91, + "latex": "\\mathcal { F } _ { k }" + }, + { + "category_id": 13, + "poly": [ + 636, + 1963, + 950, + 1963, + 950, + 2000, + 636, + 2000 + ], + "score": 0.91, + "latex": "\\varphi _ { i , k } ( p ) \\doteq \\langle z - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } \\rangle" + }, + { + "category_id": 14, + "poly": [ + 512, + 1152, + 1184, + 1152, + 1184, + 1248, + 512, + 1248 + ], + "score": 0.9, + "latex": "\\begin{array} { c } { { C _ { 1 } = 2 4 ( 1 + 1 0 \\overline { { { \\rho } } } ^ { 2 } ) ( n + 1 ) ( L ^ { 2 } + 1 ) ^ { 2 } ( N + 1 ) ^ { 2 } } } \\\\ { { { } } } \\\\ { { + 8 ( n + 1 ) \\left( 2 \\tau ^ { 2 } + 6 ( n + 1 ) + 1 + 3 ( n + 1 ) ^ { 2 } \\tau ^ { - 2 } \\right) } } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 454, + 1586, + 1243, + 1586, + 1243, + 1630, + 454, + 1630 + ], + "score": 0.89, + "latex": "\\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } ( \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) ) + \\alpha _ { k } ^ { 2 } \\| \\nabla \\varphi _ { k } \\| ^ { 2 } ." + }, + { + "category_id": 13, + "poly": [ + 614, + 1483, + 648, + 1483, + 648, + 1510, + 614, + 1510 + ], + "score": 0.84, + "latex": "\\varphi _ { k }" + }, + { + "category_id": 13, + "poly": [ + 349, + 2004, + 399, + 2004, + 399, + 2038, + 349, + 2038 + ], + "score": 0.81, + "latex": "\\varphi _ { i , k }" + }, + { + "category_id": 13, + "poly": [ + 306, + 1799, + 340, + 1799, + 340, + 1829, + 306, + 1829 + ], + "score": 0.74, + "latex": "\\varphi _ { k }" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1473.0, + 613.0, + 1473.0, + 613.0, + 1515.0, + 294.0, + 1515.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 649.0, + 1473.0, + 712.0, + 1473.0, + 712.0, + 1515.0, + 649.0, + 1515.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 870.0, + 2084.0, + 870.0, + 2122.0, + 830.0, + 2122.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 870.0, + 2084.0, + 870.0, + 2123.0, + 830.0, + 2123.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1958.0, + 396.0, + 1958.0, + 396.0, + 2006.0, + 293.0, + 2006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 553.0, + 1958.0, + 635.0, + 1958.0, + 635.0, + 2006.0, + 553.0, + 2006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 951.0, + 1958.0, + 1095.0, + 1958.0, + 1095.0, + 2006.0, + 951.0, + 2006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 286.0, + 1980.0, + 348.0, + 1980.0, + 348.0, + 2052.0, + 286.0, + 2052.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 400.0, + 1980.0, + 582.0, + 1980.0, + 582.0, + 2052.0, + 400.0, + 2052.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 848.0, + 1980.0, + 869.0, + 1980.0, + 869.0, + 2052.0, + 848.0, + 2052.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1761.0, + 902.0, + 1761.0, + 902.0, + 1802.0, + 293.0, + 1802.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1187.0, + 1761.0, + 1406.0, + 1761.0, + 1406.0, + 1802.0, + 1187.0, + 1802.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1793.0, + 305.0, + 1793.0, + 305.0, + 1834.0, + 290.0, + 1834.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 341.0, + 1793.0, + 579.0, + 1793.0, + 579.0, + 1834.0, + 341.0, + 1834.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 1793.0, + 851.0, + 1793.0, + 851.0, + 1834.0, + 839.0, + 1834.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1646.0, + 870.0, + 1646.0, + 870.0, + 1684.0, + 294.0, + 1684.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 896.0, + 1088.0, + 896.0, + 1088.0, + 935.0, + 295.0, + 935.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1533.0, + 552.0, + 1533.0, + 552.0, + 1573.0, + 295.0, + 1573.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1102.0, + 375.0, + 1102.0, + 375.0, + 1141.0, + 294.0, + 1141.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 848.0, + 845.0, + 848.0, + 845.0, + 888.0, + 295.0, + 888.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 226.0, + 721.0, + 226.0, + 721.0, + 268.0, + 293.0, + 268.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 759.0, + 226.0, + 871.0, + 226.0, + 871.0, + 268.0, + 759.0, + 268.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1260.0, + 344.0, + 1260.0, + 344.0, + 1290.0, + 293.0, + 1290.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 19, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 8, + "poly": [ + 480, + 1898, + 1221, + 1898, + 1221, + 2032, + 480, + 2032 + ], + "score": 0.971 + }, + { + "category_id": 8, + "poly": [ + 318, + 1129, + 1376, + 1129, + 1376, + 1768, + 318, + 1768 + ], + "score": 0.959 + }, + { + "category_id": 1, + "poly": [ + 297, + 1777, + 1399, + 1777, + 1399, + 1842, + 297, + 1842 + ], + "score": 0.953 + }, + { + "category_id": 8, + "poly": [ + 530, + 668, + 1167, + 668, + 1167, + 733, + 530, + 733 + ], + "score": 0.952 + }, + { + "category_id": 8, + "poly": [ + 492, + 789, + 1205, + 789, + 1205, + 852, + 492, + 852 + ], + "score": 0.949 + }, + { + "category_id": 8, + "poly": [ + 594, + 438, + 1099, + 438, + 1099, + 503, + 594, + 503 + ], + "score": 0.94 + }, + { + "category_id": 8, + "poly": [ + 625, + 1036, + 1072, + 1036, + 1072, + 1082, + 625, + 1082 + ], + "score": 0.94 + }, + { + "category_id": 8, + "poly": [ + 638, + 571, + 1064, + 571, + 1064, + 613, + 638, + 613 + ], + "score": 0.938 + }, + { + "category_id": 8, + "poly": [ + 711, + 331, + 985, + 331, + 985, + 375, + 711, + 375 + ], + "score": 0.938 + }, + { + "category_id": 1, + "poly": [ + 296, + 621, + 1005, + 621, + 1005, + 656, + 296, + 656 + ], + "score": 0.929 + }, + { + "category_id": 1, + "poly": [ + 297, + 386, + 1130, + 386, + 1130, + 426, + 297, + 426 + ], + "score": 0.928 + }, + { + "category_id": 1, + "poly": [ + 298, + 864, + 1107, + 864, + 1107, + 900, + 298, + 900 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 295, + 526, + 929, + 526, + 929, + 561, + 295, + 561 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 298, + 741, + 870, + 741, + 870, + 775, + 298, + 775 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 299, + 991, + 756, + 991, + 756, + 1024, + 299, + 1024 + ], + "score": 0.924 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.922 + }, + { + "category_id": 1, + "poly": [ + 298, + 1856, + 507, + 1856, + 507, + 1888, + 298, + 1888 + ], + "score": 0.92 + }, + { + "category_id": 1, + "poly": [ + 296, + 285, + 907, + 285, + 907, + 320, + 296, + 320 + ], + "score": 0.914 + }, + { + "category_id": 1, + "poly": [ + 297, + 1090, + 416, + 1090, + 416, + 1121, + 297, + 1121 + ], + "score": 0.891 + }, + { + "category_id": 9, + "poly": [ + 1351, + 575, + 1402, + 575, + 1402, + 607, + 1351, + 607 + ], + "score": 0.886 + }, + { + "category_id": 9, + "poly": [ + 1350, + 804, + 1402, + 804, + 1402, + 836, + 1350, + 836 + ], + "score": 0.884 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1732, + 1400, + 1732, + 1400, + 1762, + 1351, + 1762 + ], + "score": 0.88 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1994, + 1401, + 1994, + 1401, + 2024, + 1351, + 2024 + ], + "score": 0.875 + }, + { + "category_id": 2, + "poly": [ + 834, + 2088, + 863, + 2088, + 863, + 2113, + 834, + 2113 + ], + "score": 0.865 + }, + { + "category_id": 9, + "poly": [ + 1350, + 1138, + 1401, + 1138, + 1401, + 1169, + 1350, + 1169 + ], + "score": 0.856 + }, + { + "category_id": 0, + "poly": [ + 300, + 934, + 757, + 934, + 757, + 968, + 300, + 968 + ], + "score": 0.812 + }, + { + "category_id": 1, + "poly": [ + 296, + 228, + 894, + 228, + 894, + 264, + 296, + 264 + ], + "score": 0.665 + }, + { + "category_id": 0, + "poly": [ + 296, + 228, + 894, + 228, + 894, + 264, + 296, + 264 + ], + "score": 0.176 + }, + { + "category_id": 1, + "poly": [ + 300, + 934, + 757, + 934, + 757, + 968, + 300, + 968 + ], + "score": 0.103 + }, + { + "category_id": 14, + "poly": [ + 318, + 1131, + 1377, + 1131, + 1377, + 1772, + 318, + 1772 + ], + "score": 0.94, + "latex": "\\begin{array} { r l } { \\hat { \\sigma } _ { \\beta 1 , 1 } \\hat { x } _ { \\beta ^ { \\prime } 1 , 1 } ^ { ( f ) } = \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } - \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle } & { \\mathrm { ~ C ~ e ~ } } \\\\ & { = \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { x } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { y } _ { \\beta ^ { \\prime } 1 } ^ { ( f ) } - \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 2 } - \\boldsymbol { x } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { y } _ { \\beta 1 } ^ { ( f ) } - \\boldsymbol { B } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle } \\\\ & { - \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { u } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { u } _ { \\alpha + 1 , \\beta } ^ { ( f ) } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 2 } \\boldsymbol { \\cdot } \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\boldsymbol { y } _ { \\beta 1 } ^ { ( f ) } - \\boldsymbol { u } _ { \\alpha - 1 , \\beta ^ { \\prime } } ^ { ( f ) } \\rangle } \\\\ & - \\langle \\boldsymbol { u } _ { \\alpha } ^ { \\beta } \\boldsymbol { y } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { \\xi } ^ { 4 } \\boldsymbol { x } _ { \\alpha + 1 , \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { u } _ { \\alpha } ^ { \\beta } \\boldsymbol { u } _ { \\alpha + 1 , \\beta ^ { \\prime } } ^ { ( f ) } \\boldsymbol { y } _ { \\beta ^ { \\prime } 1 } ^ { ( f ) } \\rangle + \\langle \\boldsymbol { u } _ { \\alpha } ^ { \\beta } \\boldsymbol { u } _ \\alpha + 1 , \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 367, + 387, + 722, + 387, + 722, + 425, + 367, + 425 + ], + "score": 0.93, + "latex": "\\varphi _ { i , k } ( p ^ { k } ) = \\langle z ^ { k } - x _ { i } ^ { k } , y _ { i } ^ { k } - w _ { i } ^ { k } \\rangle" + }, + { + "category_id": 14, + "poly": [ + 476, + 1895, + 1220, + 1895, + 1220, + 2036, + 476, + 2036 + ], + "score": 0.93, + "latex": "\\begin{array} { r l } & { - \\varphi _ { n + 1 , k } ( p ^ { * } ) = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - y _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , B ( z ^ { * } ) - B ( x _ { i } ^ { k } ) \\rangle + \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle } \\\\ & { \\qquad \\geq \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle , } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 597, + 434, + 1102, + 434, + 1102, + 503, + 597, + 503 + ], + "score": 0.92, + "latex": "\\varphi _ { i , k } ( p ^ { k } ) = \\frac { \\tau } { 2 } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } ." + }, + { + "category_id": 13, + "poly": [ + 550, + 530, + 632, + 530, + 632, + 560, + 550, + 560 + ], + "score": 0.92, + "latex": "p ^ { * } \\in { \\mathcal { S } }" + }, + { + "category_id": 14, + "poly": [ + 631, + 569, + 1066, + 569, + 1066, + 611, + 631, + 611 + ], + "score": 0.91, + "latex": "- \\varphi _ { i , k } \\mathopen { } \\mathclose \\bgroup \\left( p ^ { * } \\aftergroup \\egroup \\right) = \\mathopen { } \\mathclose \\bgroup \\left. z ^ { * } - x _ { i } ^ { k } , w _ { i } ^ { * } - y _ { i } ^ { k } \\aftergroup \\egroup \\right. \\geq 0" + }, + { + "category_id": 14, + "poly": [ + 533, + 666, + 1162, + 666, + 1162, + 733, + 533, + 733 + ], + "score": 0.91, + "latex": "\\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) \\geq \\frac { \\tau } { 2 } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } ," + }, + { + "category_id": 13, + "poly": [ + 658, + 864, + 688, + 864, + 688, + 900, + 658, + 900 + ], + "score": 0.91, + "latex": "y _ { i } ^ { k }" + }, + { + "category_id": 14, + "poly": [ + 625, + 1035, + 1073, + 1035, + 1073, + 1080, + 625, + 1080 + ], + "score": 0.91, + "latex": "z ^ { k } - x _ { n + 1 } ^ { k } = \\rho _ { k } ( B ( z ^ { k } ) - w _ { n + 1 } ^ { k } + \\epsilon ^ { k } ) ." + }, + { + "category_id": 14, + "poly": [ + 711, + 330, + 986, + 330, + 986, + 374, + 711, + 374 + ], + "score": 0.91, + "latex": "z ^ { k } - x _ { i } ^ { k } = \\tau ( y _ { i } ^ { k } - w _ { i } ^ { k } ) ." + }, + { + "category_id": 14, + "poly": [ + 493, + 785, + 1205, + 785, + 1205, + 852, + 493, + 852 + ], + "score": 0.9, + "latex": "\\mathbb { E } [ \\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] \\ge \\frac { \\tau } { 2 } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 575, + 865, + 607, + 865, + 607, + 900, + 575, + 900 + ], + "score": 0.9, + "latex": "x _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 683, + 530, + 775, + 530, + 775, + 558, + 683, + 558 + ], + "score": 0.9, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 738, + 744, + 773, + 744, + 773, + 773, + 738, + 773 + ], + "score": 0.9, + "latex": "\\mathcal { F } _ { k }" + }, + { + "category_id": 13, + "poly": [ + 765, + 624, + 856, + 624, + 856, + 651, + 765, + 651 + ], + "score": 0.89, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 790, + 867, + 825, + 867, + 825, + 897, + 790, + 897 + ], + "score": 0.89, + "latex": "\\mathcal { F } _ { k }" + }, + { + "category_id": 13, + "poly": [ + 342, + 289, + 434, + 289, + 434, + 317, + 342, + 317 + ], + "score": 0.89, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 1004, + 868, + 1096, + 868, + 1096, + 896, + 1004, + 896 + ], + "score": 0.88, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 558, + 624, + 590, + 624, + 590, + 654, + 558, + 654 + ], + "score": 0.88, + "latex": "A _ { i }" + }, + { + "category_id": 13, + "poly": [ + 1030, + 392, + 1122, + 392, + 1122, + 420, + 1030, + 420 + ], + "score": 0.88, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 614, + 236, + 662, + 236, + 662, + 265, + 614, + 265 + ], + "score": 0.86, + "latex": "\\varphi _ { i , k }" + }, + { + "category_id": 13, + "poly": [ + 814, + 1812, + 838, + 1812, + 838, + 1837, + 814, + 1837 + ], + "score": 0.84, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 614, + 941, + 696, + 941, + 696, + 970, + 614, + 970 + ], + "score": 0.84, + "latex": "\\varphi _ { n + 1 , k }" + }, + { + "category_id": 13, + "poly": [ + 797, + 232, + 890, + 232, + 890, + 260, + 797, + 260 + ], + "score": 0.74, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2085.0, + 868.0, + 2085.0, + 868.0, + 2124.0, + 830.0, + 2124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 929.0, + 613.0, + 929.0, + 613.0, + 974.0, + 294.0, + 974.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 697.0, + 929.0, + 761.0, + 929.0, + 761.0, + 974.0, + 697.0, + 974.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 226.0, + 613.0, + 226.0, + 613.0, + 267.0, + 295.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 663.0, + 226.0, + 796.0, + 226.0, + 796.0, + 267.0, + 663.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 891.0, + 226.0, + 894.0, + 226.0, + 894.0, + 267.0, + 891.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1776.0, + 1404.0, + 1776.0, + 1404.0, + 1816.0, + 295.0, + 1816.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1808.0, + 813.0, + 1808.0, + 813.0, + 1844.0, + 295.0, + 1844.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 1808.0, + 850.0, + 1808.0, + 850.0, + 1844.0, + 839.0, + 1844.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 619.0, + 557.0, + 619.0, + 557.0, + 657.0, + 293.0, + 657.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 591.0, + 619.0, + 764.0, + 619.0, + 764.0, + 657.0, + 591.0, + 657.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 857.0, + 619.0, + 1008.0, + 619.0, + 1008.0, + 657.0, + 857.0, + 657.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 384.0, + 366.0, + 384.0, + 366.0, + 429.0, + 295.0, + 429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 723.0, + 384.0, + 1029.0, + 384.0, + 1029.0, + 429.0, + 723.0, + 429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1123.0, + 384.0, + 1135.0, + 384.0, + 1135.0, + 429.0, + 1123.0, + 429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 862.0, + 574.0, + 862.0, + 574.0, + 902.0, + 295.0, + 902.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 608.0, + 862.0, + 657.0, + 862.0, + 657.0, + 902.0, + 608.0, + 902.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 689.0, + 862.0, + 789.0, + 862.0, + 789.0, + 902.0, + 689.0, + 902.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 826.0, + 862.0, + 1003.0, + 862.0, + 1003.0, + 902.0, + 826.0, + 902.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1097.0, + 862.0, + 1109.0, + 862.0, + 1109.0, + 902.0, + 1097.0, + 902.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 523.0, + 549.0, + 523.0, + 549.0, + 565.0, + 295.0, + 565.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 633.0, + 523.0, + 682.0, + 523.0, + 682.0, + 565.0, + 633.0, + 565.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 776.0, + 523.0, + 928.0, + 523.0, + 928.0, + 565.0, + 776.0, + 565.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 739.0, + 737.0, + 739.0, + 737.0, + 777.0, + 294.0, + 777.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 774.0, + 739.0, + 871.0, + 739.0, + 871.0, + 777.0, + 774.0, + 777.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 987.0, + 758.0, + 987.0, + 758.0, + 1029.0, + 293.0, + 1029.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1854.0, + 510.0, + 1854.0, + 510.0, + 1891.0, + 297.0, + 1891.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 285.0, + 341.0, + 285.0, + 341.0, + 323.0, + 294.0, + 323.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 435.0, + 285.0, + 907.0, + 285.0, + 907.0, + 323.0, + 435.0, + 323.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1086.0, + 421.0, + 1086.0, + 421.0, + 1126.0, + 294.0, + 1126.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 226.0, + 613.0, + 226.0, + 613.0, + 267.0, + 295.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 663.0, + 226.0, + 796.0, + 226.0, + 796.0, + 267.0, + 663.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 891.0, + 226.0, + 894.0, + 226.0, + 894.0, + 267.0, + 891.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 929.0, + 613.0, + 929.0, + 613.0, + 974.0, + 294.0, + 974.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 697.0, + 929.0, + 761.0, + 929.0, + 761.0, + 974.0, + 697.0, + 974.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 20, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 8, + "poly": [ + 351, + 1238, + 1348, + 1238, + 1348, + 1510, + 351, + 1510 + ], + "score": 0.968 + }, + { + "category_id": 8, + "poly": [ + 296, + 344, + 1403, + 344, + 1403, + 523, + 296, + 523 + ], + "score": 0.957 + }, + { + "category_id": 8, + "poly": [ + 399, + 1714, + 1298, + 1714, + 1298, + 1798, + 399, + 1798 + ], + "score": 0.945 + }, + { + "category_id": 1, + "poly": [ + 294, + 228, + 1402, + 228, + 1402, + 294, + 294, + 294 + ], + "score": 0.942 + }, + { + "category_id": 1, + "poly": [ + 297, + 857, + 1404, + 857, + 1404, + 919, + 297, + 919 + ], + "score": 0.942 + }, + { + "category_id": 8, + "poly": [ + 329, + 1093, + 1323, + 1093, + 1323, + 1179, + 329, + 1179 + ], + "score": 0.942 + }, + { + "category_id": 8, + "poly": [ + 417, + 968, + 1279, + 968, + 1279, + 1053, + 417, + 1053 + ], + "score": 0.941 + }, + { + "category_id": 8, + "poly": [ + 551, + 576, + 1139, + 576, + 1139, + 618, + 551, + 618 + ], + "score": 0.931 + }, + { + "category_id": 1, + "poly": [ + 298, + 2002, + 725, + 2002, + 725, + 2035, + 298, + 2035 + ], + "score": 0.928 + }, + { + "category_id": 1, + "poly": [ + 298, + 619, + 576, + 619, + 576, + 651, + 298, + 651 + ], + "score": 0.924 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.924 + }, + { + "category_id": 1, + "poly": [ + 298, + 1056, + 777, + 1056, + 777, + 1089, + 298, + 1089 + ], + "score": 0.923 + }, + { + "category_id": 8, + "poly": [ + 477, + 655, + 1219, + 655, + 1219, + 698, + 477, + 698 + ], + "score": 0.921 + }, + { + "category_id": 1, + "poly": [ + 297, + 934, + 537, + 934, + 537, + 964, + 297, + 964 + ], + "score": 0.921 + }, + { + "category_id": 1, + "poly": [ + 298, + 1803, + 791, + 1803, + 791, + 1834, + 298, + 1834 + ], + "score": 0.919 + }, + { + "category_id": 1, + "poly": [ + 290, + 1196, + 1368, + 1196, + 1368, + 1231, + 290, + 1231 + ], + "score": 0.91 + }, + { + "category_id": 8, + "poly": [ + 320, + 736, + 1374, + 736, + 1374, + 823, + 320, + 823 + ], + "score": 0.897 + }, + { + "category_id": 9, + "poly": [ + 1351, + 821, + 1401, + 821, + 1401, + 849, + 1351, + 849 + ], + "score": 0.889 + }, + { + "category_id": 1, + "poly": [ + 297, + 307, + 654, + 307, + 654, + 340, + 297, + 340 + ], + "score": 0.888 + }, + { + "category_id": 1, + "poly": [ + 303, + 1632, + 1356, + 1632, + 1356, + 1708, + 303, + 1708 + ], + "score": 0.884 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1921, + 1401, + 1921, + 1401, + 1951, + 1351, + 1951 + ], + "score": 0.879 + }, + { + "category_id": 1, + "poly": [ + 316, + 699, + 1002, + 699, + 1002, + 731, + 316, + 731 + ], + "score": 0.878 + }, + { + "category_id": 9, + "poly": [ + 1350, + 661, + 1401, + 661, + 1401, + 692, + 1350, + 692 + ], + "score": 0.875 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1966, + 1401, + 1966, + 1401, + 1996, + 1351, + 1996 + ], + "score": 0.873 + }, + { + "category_id": 9, + "poly": [ + 1350, + 582, + 1401, + 582, + 1401, + 613, + 1350, + 613 + ], + "score": 0.873 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1472, + 1401, + 1472, + 1401, + 1502, + 1351, + 1502 + ], + "score": 0.868 + }, + { + "category_id": 9, + "poly": [ + 1352, + 1142, + 1401, + 1142, + 1401, + 1174, + 1352, + 1174 + ], + "score": 0.866 + }, + { + "category_id": 9, + "poly": [ + 1351, + 485, + 1401, + 485, + 1401, + 516, + 1351, + 516 + ], + "score": 0.86 + }, + { + "category_id": 2, + "poly": [ + 834, + 2087, + 866, + 2087, + 866, + 2113, + 834, + 2113 + ], + "score": 0.857 + }, + { + "category_id": 9, + "poly": [ + 1353, + 1844, + 1401, + 1844, + 1401, + 1876, + 1353, + 1876 + ], + "score": 0.851 + }, + { + "category_id": 8, + "poly": [ + 682, + 1918, + 826, + 1918, + 826, + 1952, + 682, + 1952 + ], + "score": 0.847 + }, + { + "category_id": 1, + "poly": [ + 309, + 538, + 1087, + 538, + 1087, + 572, + 309, + 572 + ], + "score": 0.843 + }, + { + "category_id": 8, + "poly": [ + 287, + 1839, + 1328, + 1839, + 1328, + 1880, + 287, + 1880 + ], + "score": 0.83 + }, + { + "category_id": 1, + "poly": [ + 297, + 1595, + 500, + 1595, + 500, + 1627, + 297, + 1627 + ], + "score": 0.797 + }, + { + "category_id": 8, + "poly": [ + 683, + 1961, + 1014, + 1961, + 1014, + 1999, + 683, + 1999 + ], + "score": 0.796 + }, + { + "category_id": 1, + "poly": [ + 299, + 1536, + 1075, + 1536, + 1075, + 1571, + 299, + 1571 + ], + "score": 0.651 + }, + { + "category_id": 1, + "poly": [ + 324, + 1883, + 960, + 1883, + 960, + 1914, + 324, + 1914 + ], + "score": 0.348 + }, + { + "category_id": 0, + "poly": [ + 299, + 1536, + 1075, + 1536, + 1075, + 1571, + 299, + 1571 + ], + "score": 0.333 + }, + { + "category_id": 8, + "poly": [ + 680, + 1917, + 1015, + 1917, + 1015, + 2000, + 680, + 2000 + ], + "score": 0.229 + }, + { + "category_id": 0, + "poly": [ + 297, + 1595, + 500, + 1595, + 500, + 1627, + 297, + 1627 + ], + "score": 0.121 + }, + { + "category_id": 8, + "poly": [ + 329, + 735, + 1355, + 735, + 1355, + 823, + 329, + 823 + ], + "score": 0.091 + }, + { + "category_id": 14, + "poly": [ + 350, + 1233, + 1347, + 1233, + 1347, + 1515, + 350, + 1515 + ], + "score": 0.95, + "latex": "\\begin{array} { r l r } { { \\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] = \\sum _ { i = 1 } ^ { n + 1 } \\mathbb { E } [ \\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] } } \\\\ & { } & { \\geq \\frac { 7 } { 2 } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { 2 \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } } \\\\ & { } & { + \\rho _ { k } ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - 2 \\rho _ { k } ^ { 2 } N L ^ { 3 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } } \\\\ & { } & { - \\rho _ { k } ^ { 2 } N L ( 1 + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } ) . } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 310, + 340, + 1407, + 340, + 1407, + 526, + 310, + 526 + ], + "score": 0.93, + "latex": "\\begin{array} { r l } & { \\circ _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } ) \\geq \\rho _ { k } ( 1 - \\rho _ { k } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } + \\rho _ { k } ( 1 - 2 \\rho _ { k } L ) \\langle \\epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad + \\langle z ^ { k } - x _ { n + 1 } ^ { k } , e ^ { k } \\rangle + \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle - \\rho _ { k } ^ { 2 } L \\| \\epsilon ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad = \\rho _ { k } ( 1 - \\rho _ { k } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } L \\| \\epsilon ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad + \\rho _ { k } ( 1 - 2 \\rho _ { k } L ) \\langle \\epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle + \\langle z ^ { k } - z ^ { * } , e ^ { k } \\rangle . \\qquad ( 4 1 ) } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 323, + 730, + 1373, + 730, + 1373, + 828, + 323, + 828 + ], + "score": 0.92, + "latex": "\\begin{array} { r l } & { \\mathbb { E } [ \\varphi _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } ) \\mid \\mathcal { F } _ { k } ] \\ge \\rho _ { k } ( 1 - \\rho _ { k } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } L \\mathbb { E } [ \\| \\epsilon ^ { k } \\| ^ { 2 } \\vert \\mathcal { F } _ { k } ] } \\\\ & { \\qquad \\ge \\rho _ { k } ( 1 - \\bar { \\rho } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } N L ( 1 + \\| B ( z ^ { k } ) \\| ^ { 2 } ) , } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 400, + 1710, + 1297, + 1710, + 1297, + 1801, + 400, + 1801 + ], + "score": 0.92, + "latex": "T _ { k } \\doteq \\frac { \\tau } { \\overline { { \\rho } } } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\overline { { \\rho } } \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } ," + }, + { + "category_id": 13, + "poly": [ + 296, + 888, + 430, + 888, + 430, + 920, + 296, + 920 + ], + "score": 0.9, + "latex": "1 - \\overline { { \\rho } } L > 0" + }, + { + "category_id": 13, + "poly": [ + 1036, + 1199, + 1129, + 1199, + 1129, + 1226, + 1036, + 1226 + ], + "score": 0.9, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 14, + "poly": [ + 327, + 1088, + 1326, + 1088, + 1326, + 1184, + 327, + 1184 + ], + "score": 0.9, + "latex": "\\begin{array} { r l } & { \\mathbb { E } [ \\varphi _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] \\geq \\rho _ { k } ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } } \\\\ & { \\qquad - 2 \\rho _ { k } ^ { 2 } N L ^ { 3 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\rho _ { k } ^ { 2 } N L ( 1 + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } ) . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 796, + 541, + 832, + 541, + 832, + 570, + 796, + 570 + ], + "score": 0.89, + "latex": "\\mathcal { F } _ { k }" + }, + { + "category_id": 14, + "poly": [ + 681, + 1914, + 1018, + 1914, + 1018, + 2001, + 681, + 2001 + ], + "score": 0.89, + "latex": "\\begin{array} { l } { C _ { 3 } = 4 N L ^ { 3 } } \\\\ { C _ { 4 } = 2 N L ( 1 + 2 \\| B ( z ^ { \\ast } ) \\| ^ { 2 } ) . } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 416, + 964, + 1284, + 964, + 1284, + 1056, + 416, + 1056 + ], + "score": 0.88, + "latex": "\\begin{array} { r l } & { \\| B ( z ^ { k } ) \\| ^ { 2 } = \\| B ( z ^ { k } ) - B ( z ^ { * } ) + B ( z ^ { * } ) \\| ^ { 2 } } \\\\ & { \\qquad \\leq 2 L ^ { 2 } \\| z ^ { k } - z ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } \\leq 2 L ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + 2 \\| B ( z ^ { * } ) \\| ^ { 2 } . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 371, + 1884, + 405, + 1884, + 405, + 1913, + 371, + 1913 + ], + "score": 0.88, + "latex": "C _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 457, + 1883, + 492, + 1883, + 492, + 1913, + 457, + 1913 + ], + "score": 0.88, + "latex": "C _ { 2 }" + }, + { + "category_id": 14, + "poly": [ + 317, + 1836, + 1335, + 1836, + 1335, + 1879, + 317, + 1879 + ], + "score": 0.88, + "latex": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le \\big ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } + C _ { 3 } \\alpha _ { k } \\rho _ { k } ^ { 2 } \\big ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha _ { k } \\rho _ { k } T _ { k } + C _ { 2 } \\alpha _ { k } ^ { 2 } + C _ { 4 } \\alpha _ { k } \\rho _ { k } ^ { 2 } } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 477, + 654, + 1217, + 654, + 1217, + 699, + 477, + 699 + ], + "score": 0.87, + "latex": "\\begin{array} { r } { \\mathbb { E } \\big [ \\langle \\epsilon ^ { k } , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle \\big | \\mathcal { F } _ { k } \\big ] = \\langle \\mathbb { E } [ \\epsilon ^ { k } | \\mathcal { F } _ { k } ] , B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\rangle = 0 . } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 343, + 1631, + 1352, + 1631, + 1352, + 1673, + 343, + 1673 + ], + "score": 0.87, + "latex": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le ( 1 + C _ { 1 } \\alpha _ { k } ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - 2 \\alpha _ { k } \\mathbb { E } [ \\varphi _ { k } ( p ^ { k } ) - \\varphi _ { k } ( p ^ { * } ) | \\mathcal { F } _ { k } ] + C _ { 2 } \\alpha _ { k } ^ { 2 } , } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 554, + 574, + 1141, + 574, + 1141, + 619, + 554, + 619 + ], + "score": 0.86, + "latex": "{ \\mathbb E } \\big [ \\langle z ^ { k } - z ^ { * } , e ^ { k } \\rangle \\bigm | \\mathcal F _ { k } \\big ] = \\langle z ^ { k } - z ^ { * } , { \\mathbb E } [ e ^ { k } | \\mathcal F _ { k } ] \\rangle = 0 ." + }, + { + "category_id": 13, + "poly": [ + 702, + 1202, + 735, + 1202, + 735, + 1230, + 702, + 1230 + ], + "score": 0.84, + "latex": "\\varphi _ { k }" + }, + { + "category_id": 13, + "poly": [ + 328, + 263, + 352, + 263, + 352, + 289, + 328, + 289 + ], + "score": 0.83, + "latex": "B" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2085.0, + 870.0, + 2085.0, + 870.0, + 2125.0, + 830.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1536.0, + 1078.0, + 1536.0, + 1078.0, + 1575.0, + 294.0, + 1575.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1590.0, + 505.0, + 1590.0, + 505.0, + 1633.0, + 293.0, + 1633.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 226.0, + 1405.0, + 226.0, + 1405.0, + 268.0, + 291.0, + 268.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 257.0, + 327.0, + 257.0, + 327.0, + 293.0, + 291.0, + 293.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 257.0, + 365.0, + 257.0, + 365.0, + 293.0, + 353.0, + 293.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 854.0, + 1407.0, + 854.0, + 1407.0, + 892.0, + 292.0, + 892.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 884.0, + 295.0, + 884.0, + 295.0, + 921.0, + 291.0, + 921.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 431.0, + 884.0, + 442.0, + 884.0, + 442.0, + 921.0, + 431.0, + 921.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 2000.0, + 729.0, + 2000.0, + 729.0, + 2039.0, + 295.0, + 2039.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 616.0, + 576.0, + 616.0, + 576.0, + 656.0, + 296.0, + 656.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1054.0, + 777.0, + 1054.0, + 777.0, + 1093.0, + 296.0, + 1093.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 933.0, + 539.0, + 933.0, + 539.0, + 968.0, + 295.0, + 968.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1800.0, + 792.0, + 1800.0, + 792.0, + 1839.0, + 297.0, + 1839.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1192.0, + 701.0, + 1192.0, + 701.0, + 1236.0, + 294.0, + 1236.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 736.0, + 1192.0, + 1035.0, + 1192.0, + 1035.0, + 1236.0, + 736.0, + 1236.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1130.0, + 1192.0, + 1374.0, + 1192.0, + 1374.0, + 1236.0, + 1130.0, + 1236.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 306.0, + 655.0, + 306.0, + 655.0, + 345.0, + 297.0, + 345.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1353.0, + 1628.0, + 1357.0, + 1628.0, + 1357.0, + 1677.0, + 1353.0, + 1677.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1670.0, + 1223.0, + 1670.0, + 1223.0, + 1711.0, + 295.0, + 1711.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 310.0, + 694.0, + 1008.0, + 694.0, + 1008.0, + 737.0, + 310.0, + 737.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 303.0, + 536.0, + 795.0, + 536.0, + 795.0, + 575.0, + 303.0, + 575.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 833.0, + 536.0, + 1088.0, + 536.0, + 1088.0, + 575.0, + 833.0, + 575.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1590.0, + 505.0, + 1590.0, + 505.0, + 1633.0, + 293.0, + 1633.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1536.0, + 1078.0, + 1536.0, + 1078.0, + 1575.0, + 294.0, + 1575.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1880.0, + 370.0, + 1880.0, + 370.0, + 1918.0, + 318.0, + 1918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 406.0, + 1880.0, + 456.0, + 1880.0, + 456.0, + 1918.0, + 406.0, + 1918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 493.0, + 1880.0, + 962.0, + 1880.0, + 962.0, + 1918.0, + 493.0, + 1918.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 21, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 1689, + 1406, + 1689, + 1406, + 1827, + 297, + 1827 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 297, + 1175, + 1406, + 1175, + 1406, + 1274, + 297, + 1274 + ], + "score": 0.974 + }, + { + "category_id": 1, + "poly": [ + 296, + 666, + 1405, + 666, + 1405, + 804, + 296, + 804 + ], + "score": 0.974 + }, + { + "category_id": 1, + "poly": [ + 297, + 814, + 1406, + 814, + 1406, + 916, + 297, + 916 + ], + "score": 0.971 + }, + { + "category_id": 1, + "poly": [ + 298, + 287, + 1402, + 287, + 1402, + 382, + 298, + 382 + ], + "score": 0.967 + }, + { + "category_id": 8, + "poly": [ + 322, + 1850, + 1376, + 1850, + 1376, + 1988, + 322, + 1988 + ], + "score": 0.959 + }, + { + "category_id": 1, + "poly": [ + 294, + 390, + 1399, + 390, + 1399, + 469, + 294, + 469 + ], + "score": 0.953 + }, + { + "category_id": 8, + "poly": [ + 457, + 486, + 1244, + 486, + 1244, + 573, + 457, + 573 + ], + "score": 0.948 + }, + { + "category_id": 1, + "poly": [ + 300, + 928, + 1401, + 928, + 1401, + 1002, + 300, + 1002 + ], + "score": 0.947 + }, + { + "category_id": 8, + "poly": [ + 773, + 1350, + 1025, + 1350, + 1025, + 1437, + 773, + 1437 + ], + "score": 0.947 + }, + { + "category_id": 1, + "poly": [ + 297, + 1011, + 1408, + 1011, + 1408, + 1084, + 297, + 1084 + ], + "score": 0.942 + }, + { + "category_id": 8, + "poly": [ + 779, + 1617, + 918, + 1617, + 918, + 1658, + 779, + 1658 + ], + "score": 0.929 + }, + { + "category_id": 1, + "poly": [ + 299, + 2001, + 787, + 2001, + 787, + 2036, + 299, + 2036 + ], + "score": 0.927 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.924 + }, + { + "category_id": 0, + "poly": [ + 300, + 1119, + 824, + 1119, + 824, + 1153, + 300, + 1153 + ], + "score": 0.919 + }, + { + "category_id": 1, + "poly": [ + 290, + 589, + 1371, + 589, + 1371, + 632, + 290, + 632 + ], + "score": 0.913 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1373, + 1400, + 1373, + 1400, + 1405, + 1351, + 1405 + ], + "score": 0.91 + }, + { + "category_id": 1, + "poly": [ + 364, + 1298, + 555, + 1298, + 555, + 1331, + 364, + 1331 + ], + "score": 0.908 + }, + { + "category_id": 1, + "poly": [ + 293, + 1564, + 1400, + 1564, + 1400, + 1601, + 293, + 1601 + ], + "score": 0.907 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1619, + 1400, + 1619, + 1400, + 1651, + 1351, + 1651 + ], + "score": 0.898 + }, + { + "category_id": 9, + "poly": [ + 1351, + 510, + 1401, + 510, + 1401, + 542, + 1351, + 542 + ], + "score": 0.882 + }, + { + "category_id": 2, + "poly": [ + 834, + 2087, + 865, + 2087, + 865, + 2113, + 834, + 2113 + ], + "score": 0.876 + }, + { + "category_id": 0, + "poly": [ + 300, + 228, + 684, + 228, + 684, + 262, + 300, + 262 + ], + "score": 0.856 + }, + { + "category_id": 1, + "poly": [ + 363, + 1461, + 1404, + 1461, + 1404, + 1499, + 363, + 1499 + ], + "score": 0.747 + }, + { + "category_id": 1, + "poly": [ + 362, + 1506, + 837, + 1506, + 837, + 1542, + 362, + 1542 + ], + "score": 0.414 + }, + { + "category_id": 13, + "poly": [ + 561, + 428, + 705, + 428, + 705, + 466, + 561, + 466 + ], + "score": 0.96, + "latex": "y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )" + }, + { + "category_id": 14, + "poly": [ + 322, + 1847, + 1376, + 1847, + 1376, + 1990, + 322, + 1990 + ], + "score": 0.94, + "latex": "\\begin{array} { r l r } { { \\frac { \\tau } { \\rho } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { r _ { k } ( v ) } ( v ) - w _ { i } ^ { r _ { k } ( v ) } ( v ) \\| ^ { 2 } + \\frac { 1 } { \\rho \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { r _ { k } ( v ) } ( v ) - x _ { i } ^ { r _ { k } ( v ) } ( v ) \\| ^ { 2 } } } \\\\ & { } & { \\qquad + 2 ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { r _ { k } ( v ) } ( v ) ) - w _ { n + 1 } ^ { r _ { k } ( v ) } ( v ) \\| ^ { 2 } \\to 0 . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 1228, + 1567, + 1292, + 1567, + 1292, + 1601, + 1228, + 1601 + ], + "score": 0.94, + "latex": "q _ { k } ( v )" + }, + { + "category_id": 13, + "poly": [ + 404, + 1240, + 524, + 1240, + 524, + 1274, + 404, + 1274 + ], + "score": 0.93, + "latex": "P [ \\Omega _ { i } ] = 1" + }, + { + "category_id": 14, + "poly": [ + 770, + 1346, + 1023, + 1346, + 1023, + 1438, + 770, + 1438 + ], + "score": 0.93, + "latex": "\\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } T _ { k } ( v ) < \\infty ," + }, + { + "category_id": 13, + "poly": [ + 589, + 1759, + 786, + 1759, + 786, + 1796, + 589, + 1796 + ], + "score": 0.93, + "latex": "p ^ { r _ { k } ( v ) } ( v ) \\hat { p } ( v )" + }, + { + "category_id": 13, + "poly": [ + 479, + 671, + 657, + 671, + 657, + 706, + 479, + 706 + ], + "score": 0.93, + "latex": "i \\in \\{ 1 , \\ldots , n \\}" + }, + { + "category_id": 13, + "poly": [ + 610, + 814, + 867, + 814, + 867, + 852, + 610, + 852 + ], + "score": 0.93, + "latex": "\\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| \\to 0" + }, + { + "category_id": 13, + "poly": [ + 721, + 1693, + 839, + 1693, + 839, + 1727, + 721, + 1727 + ], + "score": 0.93, + "latex": "P [ \\Omega ^ { \\prime } ] = 1" + }, + { + "category_id": 13, + "poly": [ + 913, + 1762, + 1041, + 1762, + 1041, + 1799, + 913, + 1799 + ], + "score": 0.93, + "latex": "T _ { q _ { k } ( v ) } \\to 0" + }, + { + "category_id": 13, + "poly": [ + 1308, + 704, + 1398, + 704, + 1398, + 740, + 1308, + 740 + ], + "score": 0.93, + "latex": "x _ { i } ^ { k } \\hat { z }" + }, + { + "category_id": 13, + "poly": [ + 859, + 1013, + 1001, + 1013, + 1001, + 1047, + 859, + 1047 + ], + "score": 0.93, + "latex": "\\hat { w } _ { i } \\in A _ { i } ( \\hat { z } )" + }, + { + "category_id": 13, + "poly": [ + 1100, + 1691, + 1166, + 1691, + 1166, + 1727, + 1100, + 1727 + ], + "score": 0.93, + "latex": "p ^ { k } ( v )" + }, + { + "category_id": 13, + "poly": [ + 1070, + 814, + 1241, + 814, + 1241, + 852, + 1070, + 852 + ], + "score": 0.93, + "latex": "w _ { n + 1 } ^ { k } \\to \\hat { w } _ { n + 1 }" + }, + { + "category_id": 13, + "poly": [ + 1190, + 1727, + 1353, + 1727, + 1353, + 1762, + 1190, + 1762 + ], + "score": 0.93, + "latex": "r _ { k } ( v ) \\subseteq q _ { k } ( v )" + }, + { + "category_id": 13, + "poly": [ + 840, + 960, + 999, + 960, + 999, + 1002, + 840, + 1002 + ], + "score": 0.93, + "latex": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } { \\hat { w } } _ { i } = 0" + }, + { + "category_id": 13, + "poly": [ + 298, + 963, + 607, + 963, + 607, + 1002, + 298, + 1002 + ], + "score": 0.93, + "latex": "\\{ ( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\} \\subset \\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 368, + 737, + 517, + 737, + 517, + 773, + 368, + 773 + ], + "score": 0.92, + "latex": "y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 1109, + 670, + 1231, + 670, + 1231, + 705, + 1109, + 705 + ], + "score": 0.92, + "latex": "w _ { i } ^ { k } \\hat { w } _ { i }" + }, + { + "category_id": 13, + "poly": [ + 1269, + 1764, + 1397, + 1764, + 1397, + 1799, + 1269, + 1799 + ], + "score": 0.92, + "latex": "T _ { r _ { k } ( v ) } \\to 0" + }, + { + "category_id": 13, + "poly": [ + 297, + 1044, + 458, + 1044, + 458, + 1085, + 297, + 1085 + ], + "score": 0.92, + "latex": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } { \\hat { w } } _ { i } = 0" + }, + { + "category_id": 13, + "poly": [ + 424, + 593, + 575, + 593, + 575, + 626, + 424, + 626 + ], + "score": 0.92, + "latex": "\\xi _ { 1 } , \\xi _ { 2 } , \\xi _ { 3 } > 0" + }, + { + "category_id": 13, + "poly": [ + 341, + 1693, + 567, + 1693, + 567, + 1725, + 341, + 1725 + ], + "score": 0.92, + "latex": "\\Omega ^ { \\prime } = \\Omega _ { 1 } \\cap \\Omega _ { 2 } \\cap \\Omega _ { 3 }" + }, + { + "category_id": 14, + "poly": [ + 457, + 480, + 1241, + 480, + 1241, + 574, + 457, + 574 + ], + "score": 0.92, + "latex": "\\xi _ { 1 } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\xi _ { 2 } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\xi _ { 3 } \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } \\to 0" + }, + { + "category_id": 13, + "poly": [ + 674, + 2002, + 778, + 2002, + 778, + 2036, + 674, + 2036 + ], + "score": 0.92, + "latex": "\\hat { p } ( v ) \\in S" + }, + { + "category_id": 13, + "poly": [ + 298, + 772, + 428, + 772, + 428, + 804, + 298, + 804 + ], + "score": 0.92, + "latex": "\\hat { w } _ { i } \\in A _ { i } ( \\hat { z } )" + }, + { + "category_id": 13, + "poly": [ + 755, + 670, + 955, + 670, + 955, + 705, + 755, + 705 + ], + "score": 0.92, + "latex": "\\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| \\to 0" + }, + { + "category_id": 13, + "poly": [ + 469, + 1465, + 552, + 1465, + 552, + 1495, + 469, + 1495 + ], + "score": 0.92, + "latex": "v \\in \\Omega _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 595, + 1177, + 764, + 1177, + 764, + 1214, + 595, + 1214 + ], + "score": 0.92, + "latex": "\\sum \\alpha _ { k } \\rho _ { k } ^ { 2 } < \\infty" + }, + { + "category_id": 13, + "poly": [ + 367, + 1565, + 569, + 1565, + 569, + 1602, + 367, + 1602 + ], + "score": 0.92, + "latex": "\\textstyle \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } \\rho _ { k } = \\infty" + }, + { + "category_id": 13, + "poly": [ + 843, + 1568, + 926, + 1568, + 926, + 1598, + 843, + 1598 + ], + "score": 0.92, + "latex": "v \\in \\Omega _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 877, + 885, + 1035, + 885, + 1035, + 916, + 877, + 916 + ], + "score": 0.92, + "latex": "\\hat { w } _ { n + 1 } = B ( \\hat { z } )" + }, + { + "category_id": 13, + "poly": [ + 374, + 1177, + 530, + 1177, + 530, + 1214, + 374, + 1214 + ], + "score": 0.92, + "latex": "\\textstyle \\sum _ { k } \\alpha _ { k } ^ { 2 } < \\infty" + }, + { + "category_id": 13, + "poly": [ + 298, + 704, + 401, + 704, + 401, + 740, + 298, + 740 + ], + "score": 0.92, + "latex": "y _ { i } ^ { k } \\hat { w } _ { i }" + }, + { + "category_id": 13, + "poly": [ + 573, + 737, + 788, + 737, + 788, + 773, + 573, + 773 + ], + "score": 0.92, + "latex": "( x _ { i } ^ { k } , y _ { i } ^ { k } ) ( \\hat { z } , \\hat { w } _ { i } )" + }, + { + "category_id": 13, + "poly": [ + 1277, + 1210, + 1401, + 1210, + 1401, + 1241, + 1277, + 1241 + ], + "score": 0.91, + "latex": "\\Omega _ { 1 } , \\Omega _ { 2 } , \\Omega _ { 3 }" + }, + { + "category_id": 13, + "poly": [ + 780, + 706, + 957, + 706, + 957, + 740, + 780, + 740 + ], + "score": 0.91, + "latex": "\\lVert z ^ { k } - x _ { i } ^ { k } \\rVert \\to 0" + }, + { + "category_id": 13, + "poly": [ + 1171, + 1013, + 1341, + 1013, + 1341, + 1047, + 1171, + 1047 + ], + "score": 0.91, + "latex": "\\hat { w } _ { n + 1 } = B ( \\hat { z } )" + }, + { + "category_id": 13, + "poly": [ + 1205, + 391, + 1404, + 391, + 1404, + 431, + 1205, + 431 + ], + "score": 0.91, + "latex": "\\{ ( x _ { i } ^ { k } , y _ { i } ^ { k } ) _ { i = 1 } ^ { n + 1 } \\} \\in" + }, + { + "category_id": 13, + "poly": [ + 1063, + 705, + 1152, + 705, + 1152, + 735, + 1063, + 735 + ], + "score": 0.91, + "latex": "z ^ { k } \\hat { z }" + }, + { + "category_id": 14, + "poly": [ + 779, + 1617, + 920, + 1617, + 920, + 1657, + 779, + 1657 + ], + "score": 0.91, + "latex": "T _ { q _ { k } ( v ) } \\to 0 ." + }, + { + "category_id": 13, + "poly": [ + 567, + 1241, + 683, + 1241, + 683, + 1272, + 567, + 1272 + ], + "score": 0.91, + "latex": "i = { 1 , 2 , 3 }" + }, + { + "category_id": 13, + "poly": [ + 505, + 1729, + 569, + 1729, + 569, + 1762, + 505, + 1762 + ], + "score": 0.91, + "latex": "q _ { k } ( v )" + }, + { + "category_id": 13, + "poly": [ + 940, + 1693, + 1019, + 1693, + 1019, + 1723, + 940, + 1723 + ], + "score": 0.91, + "latex": "v \\in \\Omega ^ { \\prime }" + }, + { + "category_id": 13, + "poly": [ + 768, + 850, + 1065, + 850, + 1065, + 885, + 768, + 885 + ], + "score": 0.91, + "latex": "( z ^ { k } , B ( z ^ { k } ) ) ( \\hat { z } , \\hat { w } _ { n + 1 } )" + }, + { + "category_id": 13, + "poly": [ + 1156, + 928, + 1363, + 928, + 1363, + 964, + 1156, + 964 + ], + "score": 0.91, + "latex": "\\left( \\hat { z } , \\hat { w } _ { 1 } , \\dots , \\hat { w } _ { n + 1 } \\right)" + }, + { + "category_id": 13, + "poly": [ + 483, + 1013, + 753, + 1013, + 753, + 1047, + 483, + 1047 + ], + "score": 0.91, + "latex": "\\hat { p } = ( \\hat { z } , \\hat { w } _ { 1 } , \\dots , \\hat { w } _ { n + 1 } )" + }, + { + "category_id": 13, + "poly": [ + 1296, + 594, + 1364, + 594, + 1364, + 626, + 1296, + 626 + ], + "score": 0.9, + "latex": "\\hat { p } \\in \\mathcal S" + }, + { + "category_id": 13, + "poly": [ + 298, + 850, + 485, + 850, + 485, + 886, + 298, + 886 + ], + "score": 0.9, + "latex": "B ( z ^ { k } ) \\to \\hat { w } _ { n + 1 }" + }, + { + "category_id": 13, + "poly": [ + 944, + 391, + 1136, + 391, + 1136, + 430, + 944, + 430 + ], + "score": 0.9, + "latex": "\\{ ( w _ { i } ^ { k } ) _ { i = 1 } ^ { n + 1 } \\} \\ \\in \\ { \\mathcal { P } }" + }, + { + "category_id": 13, + "poly": [ + 356, + 1726, + 460, + 1726, + 460, + 1760, + 356, + 1760 + ], + "score": 0.9, + "latex": "p ^ { q _ { k } ( v ) } ( v )" + }, + { + "category_id": 13, + "poly": [ + 298, + 1761, + 476, + 1761, + 476, + 1796, + 298, + 1796 + ], + "score": 0.9, + "latex": "\\hat { p } ( v ) \\in \\mathbb { R } ^ { ( n + 2 ) d }" + }, + { + "category_id": 13, + "poly": [ + 1242, + 1050, + 1312, + 1050, + 1312, + 1081, + 1242, + 1081 + ], + "score": 0.89, + "latex": "\\hat { p } \\in \\mathcal S" + }, + { + "category_id": 13, + "poly": [ + 1073, + 772, + 1165, + 772, + 1165, + 800, + 1073, + 800 + ], + "score": 0.88, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 701, + 321, + 734, + 321, + 734, + 350, + 701, + 350 + ], + "score": 0.88, + "latex": "T _ { k }" + }, + { + "category_id": 13, + "poly": [ + 298, + 427, + 402, + 427, + 402, + 460, + 298, + 460 + ], + "score": 0.88, + "latex": "\\mathbb { R } ^ { 2 ( n + 1 ) d }" + }, + { + "category_id": 13, + "poly": [ + 633, + 590, + 1219, + 590, + 1219, + 630, + 633, + 630 + ], + "score": 0.87, + "latex": "p ^ { k } \\doteq ( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\to \\hat { p } \\doteq ( \\hat { z } , \\hat { w } _ { 1 } , \\ldots , \\hat { w } _ { n + 1 } )" + }, + { + "category_id": 13, + "poly": [ + 766, + 391, + 932, + 391, + 932, + 425, + 766, + 425 + ], + "score": 0.86, + "latex": "z ^ { k } \\in \\mathbb { R } ^ { ( n + 1 ) d }" + }, + { + "category_id": 13, + "poly": [ + 702, + 1461, + 852, + 1461, + 852, + 1499, + 702, + 1499 + ], + "score": 0.86, + "latex": "\\| p ^ { k } ( v ) - p ^ { * } \\|" + }, + { + "category_id": 13, + "poly": [ + 608, + 1464, + 691, + 1464, + 691, + 1497, + 608, + 1497 + ], + "score": 0.83, + "latex": "p ^ { * } \\in { \\mathcal { S } }" + }, + { + "category_id": 13, + "poly": [ + 617, + 931, + 643, + 931, + 643, + 958, + 617, + 958 + ], + "score": 0.81, + "latex": "\\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 733, + 967, + 757, + 967, + 757, + 995, + 733, + 995 + ], + "score": 0.81, + "latex": "\\mathcal { P }" + }, + { + "category_id": 13, + "poly": [ + 1078, + 1051, + 1101, + 1051, + 1101, + 1078, + 1078, + 1078 + ], + "score": 0.8, + "latex": "s" + }, + { + "category_id": 13, + "poly": [ + 471, + 1300, + 554, + 1300, + 554, + 1330, + 471, + 1330 + ], + "score": 0.79, + "latex": "v \\in \\Omega _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 510, + 774, + 523, + 774, + 523, + 798, + 510, + 798 + ], + "score": 0.74, + "latex": "i" + }, + { + "category_id": 13, + "poly": [ + 1051, + 1014, + 1155, + 1014, + 1155, + 1044, + 1051, + 1044 + ], + "score": 0.71, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 853, + 428, + 1017, + 428, + 1017, + 468, + 853, + 468 + ], + "score": 0.67, + "latex": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0" + }, + { + "category_id": 13, + "poly": [ + 472, + 1505, + 631, + 1505, + 631, + 1542, + 472, + 1542 + ], + "score": 0.65, + "latex": "v \\in \\Omega _ { 3 } , p ^ { k } ( v )" + }, + { + "category_id": 13, + "poly": [ + 471, + 1507, + 553, + 1507, + 553, + 1540, + 471, + 1540 + ], + "score": 0.42, + "latex": "v \\in \\Omega _ { 3 }" + }, + { + "category_id": 13, + "poly": [ + 564, + 1505, + 631, + 1505, + 631, + 1542, + 564, + 1542 + ], + "score": 0.35, + "latex": "p ^ { k } ( v )" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1119.0, + 828.0, + 1119.0, + 828.0, + 1156.0, + 296.0, + 1156.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 870.0, + 2084.0, + 870.0, + 2123.0, + 830.0, + 2123.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 226.0, + 687.0, + 226.0, + 687.0, + 265.0, + 294.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1690.0, + 340.0, + 1690.0, + 340.0, + 1728.0, + 294.0, + 1728.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 568.0, + 1690.0, + 720.0, + 1690.0, + 720.0, + 1728.0, + 568.0, + 1728.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 1690.0, + 939.0, + 1690.0, + 939.0, + 1728.0, + 840.0, + 1728.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1020.0, + 1690.0, + 1099.0, + 1690.0, + 1099.0, + 1728.0, + 1020.0, + 1728.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1167.0, + 1690.0, + 1406.0, + 1690.0, + 1406.0, + 1728.0, + 1167.0, + 1728.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1721.0, + 355.0, + 1721.0, + 355.0, + 1765.0, + 294.0, + 1765.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 461.0, + 1721.0, + 504.0, + 1721.0, + 504.0, + 1765.0, + 461.0, + 1765.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 570.0, + 1721.0, + 1189.0, + 1721.0, + 1189.0, + 1765.0, + 570.0, + 1765.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1354.0, + 1721.0, + 1406.0, + 1721.0, + 1406.0, + 1765.0, + 1354.0, + 1765.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 287.0, + 1752.0, + 297.0, + 1752.0, + 297.0, + 1804.0, + 287.0, + 1804.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 477.0, + 1752.0, + 588.0, + 1752.0, + 588.0, + 1804.0, + 477.0, + 1804.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 787.0, + 1752.0, + 912.0, + 1752.0, + 912.0, + 1804.0, + 787.0, + 1804.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1042.0, + 1752.0, + 1268.0, + 1752.0, + 1268.0, + 1804.0, + 1042.0, + 1804.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1398.0, + 1752.0, + 1413.0, + 1752.0, + 1413.0, + 1804.0, + 1398.0, + 1804.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1787.0, + 382.0, + 1787.0, + 382.0, + 1829.0, + 291.0, + 1829.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1175.0, + 373.0, + 1175.0, + 373.0, + 1217.0, + 295.0, + 1217.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 531.0, + 1175.0, + 594.0, + 1175.0, + 594.0, + 1217.0, + 531.0, + 1217.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 765.0, + 1175.0, + 1407.0, + 1175.0, + 1407.0, + 1217.0, + 765.0, + 1217.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1207.0, + 1276.0, + 1207.0, + 1276.0, + 1246.0, + 292.0, + 1246.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1402.0, + 1207.0, + 1406.0, + 1207.0, + 1406.0, + 1246.0, + 1402.0, + 1246.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1239.0, + 403.0, + 1239.0, + 403.0, + 1274.0, + 296.0, + 1274.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 525.0, + 1239.0, + 566.0, + 1239.0, + 566.0, + 1274.0, + 525.0, + 1274.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 684.0, + 1239.0, + 733.0, + 1239.0, + 733.0, + 1274.0, + 684.0, + 1274.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 667.0, + 478.0, + 667.0, + 478.0, + 709.0, + 293.0, + 709.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 658.0, + 667.0, + 754.0, + 667.0, + 754.0, + 709.0, + 658.0, + 709.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 956.0, + 667.0, + 1108.0, + 667.0, + 1108.0, + 709.0, + 956.0, + 709.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1232.0, + 667.0, + 1406.0, + 667.0, + 1406.0, + 709.0, + 1232.0, + 709.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 699.0, + 297.0, + 699.0, + 297.0, + 745.0, + 291.0, + 745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 402.0, + 699.0, + 779.0, + 699.0, + 779.0, + 745.0, + 402.0, + 745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 958.0, + 699.0, + 1062.0, + 699.0, + 1062.0, + 745.0, + 958.0, + 745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1153.0, + 699.0, + 1307.0, + 699.0, + 1307.0, + 745.0, + 1153.0, + 745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1399.0, + 699.0, + 1413.0, + 699.0, + 1413.0, + 745.0, + 1399.0, + 745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 734.0, + 367.0, + 734.0, + 367.0, + 777.0, + 293.0, + 777.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 518.0, + 734.0, + 572.0, + 734.0, + 572.0, + 777.0, + 518.0, + 777.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 789.0, + 734.0, + 1407.0, + 734.0, + 1407.0, + 777.0, + 789.0, + 777.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 429.0, + 768.0, + 509.0, + 768.0, + 509.0, + 807.0, + 429.0, + 807.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 524.0, + 768.0, + 1072.0, + 768.0, + 1072.0, + 807.0, + 524.0, + 807.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1166.0, + 768.0, + 1178.0, + 768.0, + 1178.0, + 807.0, + 1166.0, + 807.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 287.0, + 799.0, + 609.0, + 799.0, + 609.0, + 863.0, + 287.0, + 863.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 868.0, + 799.0, + 1069.0, + 799.0, + 1069.0, + 863.0, + 868.0, + 863.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1242.0, + 799.0, + 1412.0, + 799.0, + 1412.0, + 863.0, + 1242.0, + 863.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 845.0, + 297.0, + 845.0, + 297.0, + 890.0, + 294.0, + 890.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 486.0, + 845.0, + 767.0, + 845.0, + 767.0, + 890.0, + 486.0, + 890.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1066.0, + 845.0, + 1409.0, + 845.0, + 1409.0, + 890.0, + 1066.0, + 890.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 880.0, + 876.0, + 880.0, + 876.0, + 919.0, + 296.0, + 919.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1036.0, + 880.0, + 1048.0, + 880.0, + 1048.0, + 919.0, + 1036.0, + 919.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 286.0, + 1406.0, + 286.0, + 1406.0, + 325.0, + 293.0, + 325.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 321.0, + 700.0, + 321.0, + 700.0, + 352.0, + 297.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 735.0, + 321.0, + 1403.0, + 321.0, + 1403.0, + 352.0, + 735.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 348.0, + 762.0, + 348.0, + 762.0, + 386.0, + 293.0, + 386.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 379.0, + 765.0, + 379.0, + 765.0, + 439.0, + 289.0, + 439.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 933.0, + 379.0, + 943.0, + 379.0, + 943.0, + 439.0, + 933.0, + 439.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1137.0, + 379.0, + 1204.0, + 379.0, + 1204.0, + 439.0, + 1137.0, + 439.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1405.0, + 379.0, + 1408.0, + 379.0, + 1408.0, + 439.0, + 1405.0, + 439.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 284.0, + 407.0, + 297.0, + 407.0, + 297.0, + 487.0, + 284.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 403.0, + 407.0, + 560.0, + 407.0, + 560.0, + 487.0, + 403.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 706.0, + 407.0, + 852.0, + 407.0, + 852.0, + 487.0, + 706.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1018.0, + 407.0, + 1040.0, + 407.0, + 1040.0, + 487.0, + 1018.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 923.0, + 616.0, + 923.0, + 616.0, + 968.0, + 294.0, + 968.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 644.0, + 923.0, + 1155.0, + 923.0, + 1155.0, + 968.0, + 644.0, + 968.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1364.0, + 923.0, + 1407.0, + 923.0, + 1407.0, + 968.0, + 1364.0, + 968.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 945.0, + 297.0, + 945.0, + 297.0, + 1017.0, + 288.0, + 1017.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 608.0, + 945.0, + 732.0, + 945.0, + 732.0, + 1017.0, + 608.0, + 1017.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 758.0, + 945.0, + 839.0, + 945.0, + 839.0, + 1017.0, + 758.0, + 1017.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1000.0, + 945.0, + 1021.0, + 945.0, + 1021.0, + 1017.0, + 1000.0, + 1017.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 282.0, + 1011.0, + 296.0, + 1011.0, + 296.0, + 1106.0, + 282.0, + 1106.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 459.0, + 1011.0, + 482.0, + 1011.0, + 482.0, + 1106.0, + 459.0, + 1106.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 754.0, + 1011.0, + 858.0, + 1011.0, + 858.0, + 1106.0, + 754.0, + 1106.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1002.0, + 1011.0, + 1050.0, + 1011.0, + 1050.0, + 1106.0, + 1002.0, + 1106.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1156.0, + 1011.0, + 1170.0, + 1011.0, + 1170.0, + 1106.0, + 1156.0, + 1106.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1342.0, + 1011.0, + 1406.0, + 1011.0, + 1406.0, + 1106.0, + 1342.0, + 1106.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 2001.0, + 673.0, + 2001.0, + 673.0, + 2037.0, + 296.0, + 2037.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 779.0, + 2001.0, + 788.0, + 2001.0, + 788.0, + 2037.0, + 779.0, + 2037.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 586.0, + 423.0, + 586.0, + 423.0, + 635.0, + 288.0, + 635.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 576.0, + 586.0, + 632.0, + 586.0, + 632.0, + 635.0, + 576.0, + 635.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1220.0, + 586.0, + 1295.0, + 586.0, + 1295.0, + 635.0, + 1220.0, + 635.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1365.0, + 586.0, + 1377.0, + 586.0, + 1377.0, + 635.0, + 1365.0, + 635.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 360.0, + 1294.0, + 470.0, + 1294.0, + 470.0, + 1335.0, + 360.0, + 1335.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 555.0, + 1294.0, + 558.0, + 1294.0, + 558.0, + 1335.0, + 555.0, + 1335.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1554.0, + 366.0, + 1554.0, + 366.0, + 1612.0, + 291.0, + 1612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 570.0, + 1554.0, + 842.0, + 1554.0, + 842.0, + 1612.0, + 570.0, + 1612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 927.0, + 1554.0, + 1227.0, + 1554.0, + 1227.0, + 1612.0, + 927.0, + 1612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1293.0, + 1554.0, + 1409.0, + 1554.0, + 1409.0, + 1612.0, + 1293.0, + 1612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 358.0, + 1461.0, + 468.0, + 1461.0, + 468.0, + 1502.0, + 358.0, + 1502.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 553.0, + 1461.0, + 607.0, + 1461.0, + 607.0, + 1502.0, + 553.0, + 1502.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 692.0, + 1461.0, + 701.0, + 1461.0, + 701.0, + 1502.0, + 692.0, + 1502.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 853.0, + 1461.0, + 1409.0, + 1461.0, + 1409.0, + 1502.0, + 853.0, + 1502.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 360.0, + 1504.0, + 470.0, + 1504.0, + 470.0, + 1544.0, + 360.0, + 1544.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 632.0, + 1504.0, + 838.0, + 1504.0, + 838.0, + 1544.0, + 632.0, + 1544.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 22, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 295, + 1755, + 1407, + 1755, + 1407, + 1924, + 295, + 1924 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 296, + 542, + 1404, + 542, + 1404, + 608, + 296, + 608 + ], + "score": 0.955 + }, + { + "category_id": 1, + "poly": [ + 296, + 226, + 1401, + 226, + 1401, + 294, + 296, + 294 + ], + "score": 0.949 + }, + { + "category_id": 1, + "poly": [ + 297, + 383, + 1405, + 383, + 1405, + 448, + 297, + 448 + ], + "score": 0.949 + }, + { + "category_id": 8, + "poly": [ + 705, + 1200, + 991, + 1200, + 991, + 1242, + 705, + 1242 + ], + "score": 0.945 + }, + { + "category_id": 8, + "poly": [ + 689, + 1564, + 1006, + 1564, + 1006, + 1607, + 689, + 1607 + ], + "score": 0.945 + }, + { + "category_id": 8, + "poly": [ + 618, + 1452, + 1081, + 1452, + 1081, + 1495, + 618, + 1495 + ], + "score": 0.943 + }, + { + "category_id": 8, + "poly": [ + 718, + 648, + 980, + 648, + 980, + 692, + 718, + 692 + ], + "score": 0.943 + }, + { + "category_id": 8, + "poly": [ + 695, + 1100, + 1001, + 1100, + 1001, + 1143, + 695, + 1143 + ], + "score": 0.942 + }, + { + "category_id": 1, + "poly": [ + 294, + 1039, + 1405, + 1039, + 1405, + 1102, + 294, + 1102 + ], + "score": 0.942 + }, + { + "category_id": 8, + "poly": [ + 540, + 311, + 1156, + 311, + 1156, + 364, + 540, + 364 + ], + "score": 0.939 + }, + { + "category_id": 8, + "poly": [ + 779, + 831, + 918, + 831, + 918, + 875, + 779, + 875 + ], + "score": 0.939 + }, + { + "category_id": 8, + "poly": [ + 705, + 1681, + 991, + 1681, + 991, + 1726, + 705, + 1726 + ], + "score": 0.937 + }, + { + "category_id": 1, + "poly": [ + 297, + 1255, + 1165, + 1255, + 1165, + 1293, + 297, + 1293 + ], + "score": 0.932 + }, + { + "category_id": 1, + "poly": [ + 291, + 698, + 1403, + 698, + 1403, + 757, + 291, + 757 + ], + "score": 0.932 + }, + { + "category_id": 1, + "poly": [ + 295, + 1511, + 948, + 1511, + 948, + 1548, + 295, + 1548 + ], + "score": 0.93 + }, + { + "category_id": 1, + "poly": [ + 294, + 1147, + 990, + 1147, + 990, + 1184, + 294, + 1184 + ], + "score": 0.93 + }, + { + "category_id": 8, + "poly": [ + 460, + 980, + 1235, + 980, + 1235, + 1026, + 460, + 1026 + ], + "score": 0.928 + }, + { + "category_id": 1, + "poly": [ + 297, + 885, + 969, + 885, + 969, + 921, + 297, + 921 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 293, + 1998, + 1341, + 1998, + 1341, + 2039, + 293, + 2039 + ], + "score": 0.927 + }, + { + "category_id": 8, + "poly": [ + 733, + 755, + 968, + 755, + 968, + 798, + 733, + 798 + ], + "score": 0.927 + }, + { + "category_id": 8, + "poly": [ + 556, + 1939, + 1140, + 1939, + 1140, + 1984, + 556, + 1984 + ], + "score": 0.927 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 107, + 297, + 107 + ], + "score": 0.924 + }, + { + "category_id": 1, + "poly": [ + 301, + 1624, + 1384, + 1624, + 1384, + 1666, + 301, + 1666 + ], + "score": 0.923 + }, + { + "category_id": 0, + "poly": [ + 299, + 1334, + 646, + 1334, + 646, + 1371, + 299, + 1371 + ], + "score": 0.922 + }, + { + "category_id": 1, + "poly": [ + 297, + 1405, + 477, + 1405, + 477, + 1439, + 297, + 1439 + ], + "score": 0.919 + }, + { + "category_id": 1, + "poly": [ + 296, + 807, + 350, + 807, + 350, + 837, + 296, + 837 + ], + "score": 0.914 + }, + { + "category_id": 1, + "poly": [ + 297, + 934, + 423, + 934, + 423, + 966, + 297, + 966 + ], + "score": 0.914 + }, + { + "category_id": 1, + "poly": [ + 297, + 622, + 407, + 622, + 407, + 654, + 297, + 654 + ], + "score": 0.913 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1457, + 1400, + 1457, + 1400, + 1488, + 1351, + 1488 + ], + "score": 0.903 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1570, + 1400, + 1570, + 1400, + 1601, + 1351, + 1601 + ], + "score": 0.899 + }, + { + "category_id": 0, + "poly": [ + 300, + 484, + 721, + 484, + 721, + 518, + 300, + 518 + ], + "score": 0.882 + }, + { + "category_id": 2, + "poly": [ + 834, + 2087, + 865, + 2087, + 865, + 2113, + 834, + 2113 + ], + "score": 0.88 + }, + { + "category_id": 13, + "poly": [ + 1179, + 1758, + 1353, + 1758, + 1353, + 1796, + 1179, + 1796 + ], + "score": 0.94, + "latex": "B ( z ^ { k } ) = w _ { n + 1 } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 415, + 383, + 629, + 383, + 629, + 419, + 415, + 419 + ], + "score": 0.94, + "latex": "p ^ { k } ( v ) \\hat { p } ( v ) \\in \\mathcal { S }" + }, + { + "category_id": 13, + "poly": [ + 367, + 1512, + 511, + 1512, + 511, + 1548, + 367, + 1548 + ], + "score": 0.94, + "latex": "y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 725, + 1626, + 898, + 1626, + 898, + 1665, + 725, + 1665 + ], + "score": 0.94, + "latex": "w _ { n + 1 } ^ { k } = B ( z ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 578, + 2000, + 678, + 2000, + 678, + 2037, + 578, + 2037 + ], + "score": 0.93, + "latex": "y _ { i } ^ { k } = w _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 916, + 2000, + 1089, + 2000, + 1089, + 2038, + 916, + 2038 + ], + "score": 0.93, + "latex": "w _ { n + 1 } ^ { k } = B ( z ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 728, + 227, + 995, + 227, + 995, + 264, + 728, + 264 + ], + "score": 0.93, + "latex": "\\lVert p ^ { r _ { k } ( v ) } ( v ) - \\hat { p } ( v ) \\rVert \\to 0" + }, + { + "category_id": 13, + "poly": [ + 430, + 2000, + 526, + 2000, + 526, + 2037, + 430, + 2037 + ], + "score": 0.93, + "latex": "x _ { i } ^ { k } = z ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 1022, + 383, + 1111, + 383, + 1111, + 418, + 1022, + 418 + ], + "score": 0.93, + "latex": "p ^ { k } \\hat { p }" + }, + { + "category_id": 13, + "poly": [ + 1251, + 1039, + 1402, + 1039, + 1402, + 1077, + 1251, + 1077 + ], + "score": 0.93, + "latex": "z ^ { k } - x _ { i } ^ { k } \\to 0" + }, + { + "category_id": 13, + "poly": [ + 898, + 1889, + 994, + 1889, + 994, + 1924, + 898, + 1924 + ], + "score": 0.93, + "latex": "x _ { i } ^ { k } = z ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 1065, + 1623, + 1230, + 1623, + 1230, + 1664, + 1065, + 1664 + ], + "score": 0.93, + "latex": "\\textstyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0" + }, + { + "category_id": 13, + "poly": [ + 364, + 227, + 562, + 227, + 562, + 264, + 364, + 264 + ], + "score": 0.93, + "latex": "p ^ { r _ { k } ( v ) } ( v ) \\hat { p } ( v )" + }, + { + "category_id": 13, + "poly": [ + 458, + 1148, + 614, + 1148, + 614, + 1184, + 458, + 1184 + ], + "score": 0.93, + "latex": "y _ { i } ^ { k } - w _ { i } ^ { k } \\to 0" + }, + { + "category_id": 13, + "poly": [ + 839, + 386, + 909, + 386, + 909, + 417, + 839, + 417 + ], + "score": 0.92, + "latex": "\\hat { p } \\in \\mathcal S" + }, + { + "category_id": 13, + "poly": [ + 443, + 1630, + 532, + 1630, + 532, + 1660, + 443, + 1660 + ], + "score": 0.92, + "latex": "G _ { k } = 0" + }, + { + "category_id": 13, + "poly": [ + 1186, + 542, + 1286, + 542, + 1286, + 578, + 1186, + 578 + ], + "score": 0.92, + "latex": "x _ { i } ^ { k } \\hat { z }" + }, + { + "category_id": 13, + "poly": [ + 1246, + 2003, + 1335, + 2003, + 1335, + 2034, + 1246, + 2034 + ], + "score": 0.92, + "latex": "G _ { k } = 0" + }, + { + "category_id": 13, + "poly": [ + 1175, + 699, + 1240, + 699, + 1240, + 733, + 1175, + 733 + ], + "score": 0.92, + "latex": "q _ { k } ( v )" + }, + { + "category_id": 13, + "poly": [ + 534, + 1758, + 809, + 1758, + 809, + 1795, + 534, + 1795 + ], + "score": 0.92, + "latex": "( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\in { \\mathcal { S } }" + }, + { + "category_id": 13, + "poly": [ + 917, + 1044, + 1058, + 1044, + 1058, + 1074, + 917, + 1074 + ], + "score": 0.92, + "latex": "i = 1 , \\ldots , n" + }, + { + "category_id": 13, + "poly": [ + 720, + 1152, + 863, + 1152, + 863, + 1182, + 720, + 1182 + ], + "score": 0.91, + "latex": "i = 1 , \\ldots , n" + }, + { + "category_id": 13, + "poly": [ + 409, + 1257, + 668, + 1257, + 668, + 1291, + 409, + 1291 + ], + "score": 0.91, + "latex": "( z , \\hat { w } _ { 1 } , \\dots , \\hat { w } _ { n + 1 } ) \\in S" + }, + { + "category_id": 13, + "poly": [ + 492, + 1795, + 645, + 1795, + 645, + 1829, + 492, + 1829 + ], + "score": 0.91, + "latex": "w _ { i } ^ { k } \\in A _ { i } ( z ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 678, + 1257, + 840, + 1257, + 840, + 1291, + 678, + 1291 + ], + "score": 0.91, + "latex": "\\hat { w } _ { n + 1 } = B ( \\hat { z } )" + }, + { + "category_id": 13, + "poly": [ + 595, + 576, + 691, + 576, + 691, + 606, + 595, + 606 + ], + "score": 0.91, + "latex": "G _ { k } \\to 0" + }, + { + "category_id": 14, + "poly": [ + 719, + 648, + 979, + 648, + 979, + 691, + 719, + 691 + ], + "score": 0.91, + "latex": "x _ { i } ^ { k } = J _ { \\tau A _ { i } } ( z ^ { k } + \\tau w _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 724, + 2003, + 864, + 2003, + 864, + 2035, + 724, + 2035 + ], + "score": 0.91, + "latex": "i = 1 , \\ldots , n" + }, + { + "category_id": 14, + "poly": [ + 696, + 1098, + 999, + 1098, + 999, + 1142, + 696, + 1142 + ], + "score": 0.91, + "latex": "y _ { i } ^ { k } - w _ { i } ^ { k } = \\tau ^ { - 1 } ( z ^ { k } - x _ { i } ^ { k } ) ," + }, + { + "category_id": 13, + "poly": [ + 323, + 1407, + 412, + 1407, + 412, + 1437, + 323, + 1437 + ], + "score": 0.91, + "latex": "G _ { k } = 0" + }, + { + "category_id": 13, + "poly": [ + 494, + 697, + 531, + 697, + 531, + 733, + 494, + 733 + ], + "score": 0.91, + "latex": "w _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 609, + 1040, + 641, + 1040, + 641, + 1077, + 609, + 1077 + ], + "score": 0.91, + "latex": "x _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 298, + 576, + 439, + 576, + 439, + 607, + 298, + 607 + ], + "score": 0.91, + "latex": "i = 1 , \\ldots , n" + }, + { + "category_id": 14, + "poly": [ + 688, + 1562, + 1010, + 1562, + 1010, + 1606, + 688, + 1606 + ], + "score": 0.9, + "latex": "\\forall i \\in 1 . . n : \\quad w _ { i } ^ { k } \\in A _ { i } ( z ^ { k } ) ." + }, + { + "category_id": 13, + "poly": [ + 356, + 1891, + 611, + 1891, + 611, + 1924, + 356, + 1924 + ], + "score": 0.9, + "latex": "z ^ { k } = ( I + \\tau A _ { i } ) ^ { - 1 } ( t _ { i } ^ { k } )" + }, + { + "category_id": 14, + "poly": [ + 780, + 829, + 918, + 829, + 918, + 875, + 780, + 875 + ], + "score": 0.9, + "latex": "x _ { i } ^ { q _ { k } ( v ) } \\hat { z } ." + }, + { + "category_id": 14, + "poly": [ + 733, + 751, + 967, + 751, + 967, + 798, + 733, + 798 + ], + "score": 0.9, + "latex": "z ^ { q _ { k } ( v ) } - x _ { i } ^ { q _ { k } ( v ) } 0" + }, + { + "category_id": 13, + "poly": [ + 366, + 885, + 398, + 885, + 398, + 921, + 366, + 921 + ], + "score": 0.9, + "latex": "x _ { i } ^ { k }" + }, + { + "category_id": 14, + "poly": [ + 463, + 980, + 1233, + 980, + 1233, + 1025, + 463, + 1025 + ], + "score": 0.9, + "latex": "\\begin{array} { r } { G _ { k } \\doteq \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } . } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 704, + 1200, + 993, + 1200, + 993, + 1241, + 704, + 1241 + ], + "score": 0.9, + "latex": "G _ { k } \\to \\| B ( \\hat { z } ) - \\hat { w } _ { n + 1 } \\| ^ { 2 } ." + }, + { + "category_id": 14, + "poly": [ + 541, + 308, + 1154, + 308, + 1154, + 364, + 541, + 364 + ], + "score": 0.9, + "latex": "\\operatorname* { l i m } _ { k \\to \\infty } \\| p ^ { k } ( v ) - \\hat { p } ( v ) \\| = \\operatorname* { l i m } _ { k \\to \\infty } \\| p ^ { r _ { k } ( v ) } ( v ) - \\hat { p } ( v ) \\| = 0 ." + }, + { + "category_id": 13, + "poly": [ + 828, + 698, + 859, + 698, + 859, + 733, + 828, + 733 + ], + "score": 0.9, + "latex": "x _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 970, + 1827, + 1231, + 1827, + 1231, + 1862, + 970, + 1862 + ], + "score": 0.9, + "latex": "z ^ { k } \\in ( I + \\tau A _ { i } ) ^ { - 1 } ( t _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 1000, + 1258, + 1095, + 1258, + 1095, + 1288, + 1000, + 1288 + ], + "score": 0.89, + "latex": "G _ { k } \\to 0" + }, + { + "category_id": 14, + "poly": [ + 705, + 1680, + 994, + 1680, + 994, + 1725, + 705, + 1725 + ], + "score": 0.89, + "latex": "( z ^ { k } , w _ { 1 } ^ { k } , \\ldots , w _ { n + 1 } ^ { k } ) \\in { \\mathcal { S } } ." + }, + { + "category_id": 14, + "poly": [ + 559, + 1939, + 1138, + 1939, + 1138, + 1982, + 559, + 1982 + ], + "score": 0.89, + "latex": "y _ { i } ^ { k } = \\tau ^ { - 1 } ( t _ { i } ^ { k } - x _ { i } ^ { k } ) = \\tau ^ { - 1 } ( z ^ { k } + \\tau w _ { i } ^ { k } - z ^ { k } ) = w _ { i } ^ { k } ." + }, + { + "category_id": 13, + "poly": [ + 900, + 1797, + 997, + 1797, + 997, + 1826, + 900, + 1826 + ], + "score": 0.89, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 556, + 1515, + 697, + 1515, + 697, + 1546, + 556, + 1546 + ], + "score": 0.88, + "latex": "i = 1 , \\ldots , n" + }, + { + "category_id": 13, + "poly": [ + 527, + 1040, + 557, + 1040, + 557, + 1070, + 527, + 1070 + ], + "score": 0.88, + "latex": "z ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 411, + 697, + 441, + 697, + 441, + 727, + 411, + 727 + ], + "score": 0.87, + "latex": "z ^ { k }" + }, + { + "category_id": 14, + "poly": [ + 612, + 1451, + 1085, + 1451, + 1085, + 1492, + 612, + 1492 + ], + "score": 0.87, + "latex": "\\forall i = 1 , \\ldots , n : \\quad y _ { i } ^ { k } = w _ { i } ^ { k } \\mathrm { ~ a n d ~ } z ^ { k } = x _ { i } ^ { k } ." + }, + { + "category_id": 13, + "poly": [ + 460, + 1828, + 846, + 1828, + 846, + 1862, + 460, + 1862 + ], + "score": 0.86, + "latex": "t _ { i } ^ { k } = z ^ { k } + \\tau w _ { i . } ^ { k } \\in ( I + \\tau A _ { i } ) ( z ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 692, + 1798, + 788, + 1798, + 788, + 1825, + 692, + 1825 + ], + "score": 0.86, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 404, + 1861, + 641, + 1861, + 641, + 1892, + 404, + 1892 + ], + "score": 0.85, + "latex": "J _ { \\tau A _ { i } } = ( I + \\tau A _ { i } ) ^ { - 1 }" + }, + { + "category_id": 13, + "poly": [ + 1015, + 1762, + 1037, + 1762, + 1037, + 1788, + 1015, + 1788 + ], + "score": 0.81, + "latex": "s" + }, + { + "category_id": 13, + "poly": [ + 943, + 889, + 960, + 889, + 960, + 915, + 943, + 915 + ], + "score": 0.81, + "latex": "\\hat { z }" + }, + { + "category_id": 13, + "poly": [ + 1113, + 227, + 1401, + 227, + 1401, + 265, + 1113, + 265 + ], + "score": 0.77, + "latex": "\\hat { p } ( v ) \\in S , \\| p ^ { k } ( v ) - \\hat { p } ( v ) \\|" + }, + { + "category_id": 13, + "poly": [ + 1113, + 229, + 1219, + 229, + 1219, + 264, + 1113, + 264 + ], + "score": 0.32, + "latex": "\\hat { p } ( v ) \\in \\cal S" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 109.0, + 297.0, + 109.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1332.0, + 649.0, + 1332.0, + 649.0, + 1376.0, + 295.0, + 1376.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 485.0, + 722.0, + 485.0, + 722.0, + 519.0, + 297.0, + 519.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 829.0, + 2083.0, + 871.0, + 2083.0, + 871.0, + 2125.0, + 829.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1754.0, + 533.0, + 1754.0, + 533.0, + 1799.0, + 293.0, + 1799.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 810.0, + 1754.0, + 1014.0, + 1754.0, + 1014.0, + 1799.0, + 810.0, + 1799.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1038.0, + 1754.0, + 1178.0, + 1754.0, + 1178.0, + 1799.0, + 1038.0, + 1799.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1354.0, + 1754.0, + 1409.0, + 1754.0, + 1409.0, + 1799.0, + 1354.0, + 1799.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1793.0, + 491.0, + 1793.0, + 491.0, + 1830.0, + 296.0, + 1830.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 646.0, + 1793.0, + 691.0, + 1793.0, + 691.0, + 1830.0, + 646.0, + 1830.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 789.0, + 1793.0, + 899.0, + 1793.0, + 899.0, + 1830.0, + 789.0, + 1830.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 998.0, + 1793.0, + 1409.0, + 1793.0, + 1409.0, + 1830.0, + 998.0, + 1830.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1825.0, + 459.0, + 1825.0, + 459.0, + 1866.0, + 293.0, + 1866.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 847.0, + 1825.0, + 969.0, + 1825.0, + 969.0, + 1866.0, + 847.0, + 1866.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1232.0, + 1825.0, + 1408.0, + 1825.0, + 1408.0, + 1866.0, + 1232.0, + 1866.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1855.0, + 403.0, + 1855.0, + 403.0, + 1896.0, + 292.0, + 1896.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 642.0, + 1855.0, + 1408.0, + 1855.0, + 1408.0, + 1896.0, + 642.0, + 1896.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1888.0, + 355.0, + 1888.0, + 355.0, + 1925.0, + 293.0, + 1925.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 612.0, + 1888.0, + 897.0, + 1888.0, + 897.0, + 1925.0, + 612.0, + 1925.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 995.0, + 1888.0, + 1408.0, + 1888.0, + 1408.0, + 1925.0, + 995.0, + 1925.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 536.0, + 1185.0, + 536.0, + 1185.0, + 583.0, + 291.0, + 583.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1287.0, + 536.0, + 1407.0, + 536.0, + 1407.0, + 583.0, + 1287.0, + 583.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 574.0, + 297.0, + 574.0, + 297.0, + 610.0, + 293.0, + 610.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 440.0, + 574.0, + 594.0, + 574.0, + 594.0, + 610.0, + 440.0, + 610.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 692.0, + 574.0, + 763.0, + 574.0, + 763.0, + 610.0, + 692.0, + 610.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 222.0, + 363.0, + 222.0, + 363.0, + 270.0, + 294.0, + 270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 563.0, + 222.0, + 727.0, + 222.0, + 727.0, + 270.0, + 563.0, + 270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 996.0, + 222.0, + 1112.0, + 222.0, + 1112.0, + 270.0, + 996.0, + 270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1402.0, + 222.0, + 1406.0, + 222.0, + 1406.0, + 270.0, + 1402.0, + 270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 260.0, + 676.0, + 260.0, + 676.0, + 297.0, + 295.0, + 297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 377.0, + 414.0, + 377.0, + 414.0, + 423.0, + 292.0, + 423.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 630.0, + 377.0, + 838.0, + 377.0, + 838.0, + 423.0, + 630.0, + 423.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 910.0, + 377.0, + 1021.0, + 377.0, + 1021.0, + 423.0, + 910.0, + 423.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1112.0, + 377.0, + 1405.0, + 377.0, + 1405.0, + 423.0, + 1112.0, + 423.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 414.0, + 532.0, + 414.0, + 532.0, + 449.0, + 293.0, + 449.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1035.0, + 526.0, + 1035.0, + 526.0, + 1078.0, + 291.0, + 1078.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 558.0, + 1035.0, + 608.0, + 1035.0, + 608.0, + 1078.0, + 558.0, + 1078.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 642.0, + 1035.0, + 916.0, + 1035.0, + 916.0, + 1078.0, + 642.0, + 1078.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1059.0, + 1035.0, + 1250.0, + 1035.0, + 1250.0, + 1078.0, + 1059.0, + 1078.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1403.0, + 1035.0, + 1407.0, + 1035.0, + 1407.0, + 1078.0, + 1403.0, + 1078.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1072.0, + 437.0, + 1072.0, + 437.0, + 1109.0, + 293.0, + 1109.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1252.0, + 408.0, + 1252.0, + 408.0, + 1297.0, + 294.0, + 1297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 669.0, + 1252.0, + 677.0, + 1252.0, + 677.0, + 1297.0, + 669.0, + 1297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 841.0, + 1252.0, + 999.0, + 1252.0, + 999.0, + 1297.0, + 841.0, + 1297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1096.0, + 1252.0, + 1167.0, + 1252.0, + 1167.0, + 1297.0, + 1096.0, + 1297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 693.0, + 410.0, + 693.0, + 410.0, + 737.0, + 292.0, + 737.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 442.0, + 693.0, + 493.0, + 693.0, + 493.0, + 737.0, + 442.0, + 737.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 532.0, + 693.0, + 827.0, + 693.0, + 827.0, + 737.0, + 532.0, + 737.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 860.0, + 693.0, + 1174.0, + 693.0, + 1174.0, + 737.0, + 860.0, + 737.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1241.0, + 693.0, + 1406.0, + 693.0, + 1406.0, + 737.0, + 1241.0, + 737.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 724.0, + 439.0, + 724.0, + 439.0, + 764.0, + 293.0, + 764.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1508.0, + 366.0, + 1508.0, + 366.0, + 1552.0, + 295.0, + 1552.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 512.0, + 1508.0, + 555.0, + 1508.0, + 555.0, + 1552.0, + 512.0, + 1552.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 698.0, + 1508.0, + 948.0, + 1508.0, + 948.0, + 1552.0, + 698.0, + 1552.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1146.0, + 457.0, + 1146.0, + 457.0, + 1187.0, + 295.0, + 1187.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 615.0, + 1146.0, + 719.0, + 1146.0, + 719.0, + 1187.0, + 615.0, + 1187.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 864.0, + 1146.0, + 990.0, + 1146.0, + 990.0, + 1187.0, + 864.0, + 1187.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 884.0, + 365.0, + 884.0, + 365.0, + 924.0, + 296.0, + 924.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 399.0, + 884.0, + 942.0, + 884.0, + 942.0, + 924.0, + 399.0, + 924.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 961.0, + 884.0, + 972.0, + 884.0, + 972.0, + 924.0, + 961.0, + 924.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1993.0, + 429.0, + 1993.0, + 429.0, + 2045.0, + 291.0, + 2045.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 527.0, + 1993.0, + 577.0, + 1993.0, + 577.0, + 2045.0, + 527.0, + 2045.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 679.0, + 1993.0, + 723.0, + 1993.0, + 723.0, + 2045.0, + 679.0, + 2045.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 865.0, + 1993.0, + 915.0, + 1993.0, + 915.0, + 2045.0, + 865.0, + 2045.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1090.0, + 1993.0, + 1245.0, + 1993.0, + 1245.0, + 2045.0, + 1090.0, + 2045.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1336.0, + 1993.0, + 1350.0, + 1993.0, + 1350.0, + 2045.0, + 1336.0, + 2045.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 287.0, + 1613.0, + 442.0, + 1613.0, + 442.0, + 1676.0, + 287.0, + 1676.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 533.0, + 1613.0, + 724.0, + 1613.0, + 724.0, + 1676.0, + 533.0, + 1676.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 899.0, + 1613.0, + 1064.0, + 1613.0, + 1064.0, + 1676.0, + 899.0, + 1676.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1231.0, + 1613.0, + 1397.0, + 1613.0, + 1397.0, + 1676.0, + 1231.0, + 1676.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1400.0, + 322.0, + 1400.0, + 322.0, + 1443.0, + 293.0, + 1443.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 413.0, + 1400.0, + 479.0, + 1400.0, + 479.0, + 1443.0, + 413.0, + 1443.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 805.0, + 352.0, + 805.0, + 352.0, + 840.0, + 294.0, + 840.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 933.0, + 426.0, + 933.0, + 426.0, + 966.0, + 295.0, + 966.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 619.0, + 408.0, + 619.0, + 408.0, + 657.0, + 294.0, + 657.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 23, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1507, + 1401, + 1507, + 1401, + 1604, + 298, + 1604 + ], + "score": 0.974 + }, + { + "category_id": 8, + "poly": [ + 691, + 845, + 1008, + 845, + 1008, + 935, + 691, + 935 + ], + "score": 0.963 + }, + { + "category_id": 8, + "poly": [ + 667, + 1043, + 1032, + 1043, + 1032, + 1118, + 667, + 1118 + ], + "score": 0.959 + }, + { + "category_id": 8, + "poly": [ + 404, + 1616, + 1290, + 1616, + 1290, + 1702, + 404, + 1702 + ], + "score": 0.949 + }, + { + "category_id": 1, + "poly": [ + 295, + 967, + 1406, + 967, + 1406, + 1032, + 295, + 1032 + ], + "score": 0.949 + }, + { + "category_id": 8, + "poly": [ + 399, + 1411, + 1297, + 1411, + 1297, + 1496, + 399, + 1496 + ], + "score": 0.945 + }, + { + "category_id": 1, + "poly": [ + 297, + 763, + 1403, + 763, + 1403, + 830, + 297, + 830 + ], + "score": 0.943 + }, + { + "category_id": 8, + "poly": [ + 731, + 1896, + 966, + 1896, + 966, + 1936, + 731, + 1936 + ], + "score": 0.942 + }, + { + "category_id": 8, + "poly": [ + 736, + 1988, + 961, + 1988, + 961, + 2030, + 736, + 2030 + ], + "score": 0.94 + }, + { + "category_id": 8, + "poly": [ + 366, + 1755, + 1329, + 1755, + 1329, + 1840, + 366, + 1840 + ], + "score": 0.936 + }, + { + "category_id": 1, + "poly": [ + 298, + 1128, + 633, + 1128, + 633, + 1160, + 298, + 1160 + ], + "score": 0.931 + }, + { + "category_id": 1, + "poly": [ + 296, + 1946, + 430, + 1946, + 430, + 1978, + 296, + 1978 + ], + "score": 0.928 + }, + { + "category_id": 8, + "poly": [ + 315, + 664, + 1328, + 664, + 1328, + 746, + 315, + 746 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 298, + 1271, + 838, + 1271, + 838, + 1305, + 298, + 1305 + ], + "score": 0.926 + }, + { + "category_id": 1, + "poly": [ + 297, + 1369, + 423, + 1369, + 423, + 1400, + 297, + 1400 + ], + "score": 0.925 + }, + { + "category_id": 8, + "poly": [ + 312, + 562, + 1329, + 562, + 1329, + 644, + 312, + 644 + ], + "score": 0.925 + }, + { + "category_id": 8, + "poly": [ + 865, + 381, + 1211, + 381, + 1211, + 454, + 865, + 454 + ], + "score": 0.924 + }, + { + "category_id": 0, + "poly": [ + 298, + 224, + 680, + 224, + 680, + 263, + 298, + 263 + ], + "score": 0.921 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 107, + 297, + 107 + ], + "score": 0.917 + }, + { + "category_id": 1, + "poly": [ + 296, + 1853, + 377, + 1853, + 377, + 1883, + 296, + 1883 + ], + "score": 0.916 + }, + { + "category_id": 8, + "poly": [ + 365, + 1219, + 1328, + 1219, + 1328, + 1263, + 365, + 1263 + ], + "score": 0.906 + }, + { + "category_id": 1, + "poly": [ + 295, + 1715, + 341, + 1715, + 341, + 1745, + 295, + 1745 + ], + "score": 0.905 + }, + { + "category_id": 8, + "poly": [ + 356, + 1316, + 1294, + 1316, + 1294, + 1359, + 356, + 1359 + ], + "score": 0.902 + }, + { + "category_id": 8, + "poly": [ + 864, + 460, + 1055, + 460, + 1055, + 497, + 864, + 497 + ], + "score": 0.9 + }, + { + "category_id": 1, + "poly": [ + 293, + 507, + 999, + 507, + 999, + 543, + 293, + 543 + ], + "score": 0.898 + }, + { + "category_id": 9, + "poly": [ + 1351, + 463, + 1401, + 463, + 1401, + 494, + 1351, + 494 + ], + "score": 0.89 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1322, + 1401, + 1322, + 1401, + 1354, + 1351, + 1354 + ], + "score": 0.877 + }, + { + "category_id": 9, + "poly": [ + 1351, + 402, + 1401, + 402, + 1401, + 433, + 1351, + 433 + ], + "score": 0.873 + }, + { + "category_id": 2, + "poly": [ + 834, + 2087, + 865, + 2087, + 865, + 2113, + 834, + 2113 + ], + "score": 0.872 + }, + { + "category_id": 1, + "poly": [ + 295, + 293, + 1159, + 293, + 1159, + 328, + 295, + 328 + ], + "score": 0.868 + }, + { + "category_id": 9, + "poly": [ + 1352, + 587, + 1401, + 587, + 1401, + 620, + 1352, + 620 + ], + "score": 0.86 + }, + { + "category_id": 9, + "poly": [ + 1353, + 688, + 1401, + 688, + 1401, + 720, + 1353, + 720 + ], + "score": 0.845 + }, + { + "category_id": 1, + "poly": [ + 296, + 335, + 1032, + 335, + 1032, + 370, + 296, + 370 + ], + "score": 0.828 + }, + { + "category_id": 1, + "poly": [ + 295, + 1174, + 854, + 1174, + 854, + 1209, + 295, + 1209 + ], + "score": 0.682 + }, + { + "category_id": 8, + "poly": [ + 483, + 380, + 1217, + 380, + 1217, + 496, + 483, + 496 + ], + "score": 0.29 + }, + { + "category_id": 8, + "poly": [ + 487, + 460, + 1064, + 460, + 1064, + 496, + 487, + 496 + ], + "score": 0.217 + }, + { + "category_id": 8, + "poly": [ + 486, + 381, + 1216, + 381, + 1216, + 453, + 486, + 453 + ], + "score": 0.141 + }, + { + "category_id": 13, + "poly": [ + 753, + 1569, + 917, + 1569, + 917, + 1604, + 753, + 1604 + ], + "score": 0.95, + "latex": "\\overline { { \\rho } } = ( 1 / 2 ) L ^ { - 1 }" + }, + { + "category_id": 14, + "poly": [ + 688, + 840, + 1011, + 840, + 1011, + 938, + 688, + 938 + ], + "score": 0.94, + "latex": "\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] = \\mathcal { O } ( K ^ { - 1 / 4 } ) ." + }, + { + "category_id": 14, + "poly": [ + 667, + 1041, + 1030, + 1041, + 1030, + 1118, + 667, + 1118 + ], + "score": 0.94, + "latex": "\\rho = \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , \\frac { 1 } { 2 L } \\right\\} \\leq \\frac { 1 } { 2 L } ," + }, + { + "category_id": 13, + "poly": [ + 663, + 1181, + 746, + 1181, + 746, + 1208, + 663, + 1208 + ], + "score": 0.93, + "latex": "\\rho _ { k } = \\rho" + }, + { + "category_id": 13, + "poly": [ + 298, + 1568, + 462, + 1568, + 462, + 1603, + 298, + 1603 + ], + "score": 0.93, + "latex": "\\rho \\leq ( 1 / 2 ) L ^ { - 1 }" + }, + { + "category_id": 13, + "poly": [ + 1166, + 997, + 1268, + 997, + 1268, + 1030, + 1166, + 1030 + ], + "score": 0.93, + "latex": "\\rho < L ^ { - 1 }" + }, + { + "category_id": 14, + "poly": [ + 481, + 379, + 1213, + 379, + 1213, + 501, + 481, + 501 + ], + "score": 0.93, + "latex": "\\begin{array} { l l } { { \\forall k = 1 , \\dots , K : } } & { { \\qquad \\rho _ { k } = \\rho \\doteq \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , \\frac { 1 } { 2 L } \\right\\} } } \\\\ { { \\forall k = 1 , \\dots , K : } } & { { \\qquad \\alpha _ { k } = \\alpha \\doteq C _ { f } \\rho ^ { 2 } } } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 994, + 1569, + 1155, + 1569, + 1155, + 1603, + 994, + 1603 + ], + "score": 0.92, + "latex": "1 - \\overline { { \\rho } } L = 1 / 2" + }, + { + "category_id": 14, + "poly": [ + 369, + 1750, + 1328, + 1750, + 1328, + 1842, + 369, + 1842 + ], + "score": 0.92, + "latex": "U _ { k } \\doteq \\mathbb { E } \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } \\qquad W _ { k } \\doteq \\tau \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\tau ^ { - 1 } \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } ," + }, + { + "category_id": 14, + "poly": [ + 405, + 1613, + 1293, + 1613, + 1293, + 1704, + 405, + 1704 + ], + "score": 0.92, + "latex": "\\rho \\mathbb { E } T _ { k } = \\tau \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\tau ^ { - 1 } \\sum _ { i = 1 } ^ { n } \\mathbb { E } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\rho \\mathbb { E } \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } ." + }, + { + "category_id": 14, + "poly": [ + 319, + 557, + 1340, + 557, + 1340, + 756, + 319, + 756 + ], + "score": 0.92, + "latex": "\\begin{array} { l } { \\displaystyle \\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] \\leq \\frac { 8 L ^ { 3 } \\exp \\left( C _ { f } ( C _ { 1 } + C _ { 3 } ) \\right) } { C _ { f } \\operatorname* { m i n } \\{ \\tau , \\tau ^ { - 1 } \\} K } \\left( \\| p ^ { 1 } - p ^ { * } \\| ^ { 2 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) \\mathrm { ~ } f o r ~ K < ( 2 L ) ^ { 4 } } \\\\ { \\displaystyle \\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] \\leq \\frac { \\exp \\left( C _ { f } ( C _ { 1 } + C _ { 3 } ) \\right) } { C _ { f } \\operatorname* { m i n } \\{ \\tau , \\tau ^ { - 1 } \\} K ^ { 1 / 4 } } \\left( \\| p ^ { 1 } - p ^ { * } \\| ^ { 2 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) \\mathrm { ~ } f o r ~ K \\geq ( 2 L ) ^ { 4 } . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 1009, + 766, + 1177, + 766, + 1177, + 798, + 1009, + 798 + ], + "score": 0.92, + "latex": "C _ { 1 } , C _ { 2 } , C _ { 3 } , C _ { 4 }" + }, + { + "category_id": 14, + "poly": [ + 399, + 1407, + 1297, + 1407, + 1297, + 1499, + 399, + 1499 + ], + "score": 0.91, + "latex": "T _ { k } \\doteq \\frac { \\tau } { \\rho } \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\rho \\tau } \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 ( 1 - \\overline { { \\rho } } L ) \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } ," + }, + { + "category_id": 13, + "poly": [ + 401, + 510, + 488, + 510, + 488, + 544, + 401, + 544 + ], + "score": 0.91, + "latex": "C _ { f } > 0" + }, + { + "category_id": 13, + "poly": [ + 847, + 1511, + 918, + 1511, + 918, + 1541, + 847, + 1541 + ], + "score": 0.91, + "latex": "\\rho = \\overline { { \\rho } }" + }, + { + "category_id": 13, + "poly": [ + 908, + 511, + 989, + 511, + 989, + 542, + 908, + 542 + ], + "score": 0.91, + "latex": "p ^ { * } \\in { \\mathcal { S } }" + }, + { + "category_id": 14, + "poly": [ + 738, + 1993, + 961, + 1993, + 961, + 2028, + 738, + 2028 + ], + "score": 0.9, + "latex": "V _ { k } \\doteq \\mathbb { E } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } ." + }, + { + "category_id": 13, + "poly": [ + 699, + 336, + 778, + 336, + 778, + 367, + 699, + 367 + ], + "score": 0.9, + "latex": "K \\geq 1" + }, + { + "category_id": 13, + "poly": [ + 523, + 1181, + 613, + 1181, + 613, + 1206, + 523, + 1206 + ], + "score": 0.89, + "latex": "\\alpha _ { k } = \\alpha" + }, + { + "category_id": 13, + "poly": [ + 579, + 975, + 668, + 975, + 668, + 1001, + 579, + 1001 + ], + "score": 0.89, + "latex": "\\rho _ { k } = \\rho" + }, + { + "category_id": 13, + "poly": [ + 696, + 1274, + 731, + 1274, + 731, + 1303, + 696, + 1303 + ], + "score": 0.89, + "latex": "\\mathcal { F } _ { k }" + }, + { + "category_id": 14, + "poly": [ + 365, + 1219, + 1331, + 1219, + 1331, + 1261, + 365, + 1261 + ], + "score": 0.89, + "latex": "\\begin{array} { r } { \\mathbb { E } [ \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } | \\mathcal { F } _ { k } ] \\le ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha \\rho T _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 375, + 766, + 413, + 766, + 413, + 796, + 375, + 796 + ], + "score": 0.89, + "latex": "G _ { k }" + }, + { + "category_id": 14, + "poly": [ + 734, + 1904, + 962, + 1904, + 962, + 1932, + 734, + 1932 + ], + "score": 0.88, + "latex": "\\rho \\mathbb { E } T _ { k } = \\rho U _ { k } + W _ { k } ," + }, + { + "category_id": 14, + "poly": [ + 357, + 1315, + 1298, + 1315, + 1298, + 1358, + 357, + 1358 + ], + "score": 0.87, + "latex": "\\begin{array} { r } { \\mathbb { E } \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) \\mathbb { E } \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - \\alpha \\rho \\mathbb { E } T _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 426, + 973, + 523, + 973, + 523, + 999, + 426, + 999 + ], + "score": 0.87, + "latex": "\\alpha _ { k } = \\alpha" + }, + { + "category_id": 13, + "poly": [ + 836, + 975, + 855, + 975, + 855, + 1001, + 836, + 1001 + ], + "score": 0.83, + "latex": "\\rho" + }, + { + "category_id": 13, + "poly": [ + 1050, + 1004, + 1068, + 1004, + 1068, + 1030, + 1050, + 1030 + ], + "score": 0.82, + "latex": "\\rho" + }, + { + "category_id": 13, + "poly": [ + 760, + 975, + 782, + 975, + 782, + 996, + 760, + 996 + ], + "score": 0.81, + "latex": "\\alpha" + }, + { + "category_id": 13, + "poly": [ + 956, + 1545, + 974, + 1545, + 974, + 1571, + 956, + 1571 + ], + "score": 0.79, + "latex": "\\rho" + }, + { + "category_id": 13, + "poly": [ + 890, + 1544, + 908, + 1544, + 908, + 1570, + 890, + 1570 + ], + "score": 0.76, + "latex": "\\overline { { \\rho } }" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 221.0, + 681.0, + 221.0, + 681.0, + 267.0, + 293.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 109.0, + 297.0, + 109.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 829.0, + 2083.0, + 871.0, + 2083.0, + 871.0, + 2125.0, + 829.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1505.0, + 846.0, + 1505.0, + 846.0, + 1546.0, + 292.0, + 1546.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 919.0, + 1505.0, + 1406.0, + 1505.0, + 1406.0, + 1546.0, + 919.0, + 1546.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1537.0, + 889.0, + 1537.0, + 889.0, + 1574.0, + 293.0, + 1574.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 909.0, + 1537.0, + 955.0, + 1537.0, + 955.0, + 1574.0, + 909.0, + 1574.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 975.0, + 1537.0, + 1405.0, + 1537.0, + 1405.0, + 1574.0, + 975.0, + 1574.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1567.0, + 297.0, + 1567.0, + 297.0, + 1608.0, + 292.0, + 1608.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 463.0, + 1567.0, + 752.0, + 1567.0, + 752.0, + 1608.0, + 463.0, + 1608.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 918.0, + 1567.0, + 993.0, + 1567.0, + 993.0, + 1608.0, + 918.0, + 1608.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1156.0, + 1567.0, + 1286.0, + 1567.0, + 1286.0, + 1608.0, + 1156.0, + 1608.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 966.0, + 425.0, + 966.0, + 425.0, + 1002.0, + 295.0, + 1002.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 524.0, + 966.0, + 578.0, + 966.0, + 578.0, + 1002.0, + 524.0, + 1002.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 669.0, + 966.0, + 759.0, + 966.0, + 759.0, + 1002.0, + 669.0, + 1002.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 783.0, + 966.0, + 835.0, + 966.0, + 835.0, + 1002.0, + 783.0, + 1002.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 856.0, + 966.0, + 1407.0, + 966.0, + 1407.0, + 1002.0, + 856.0, + 1002.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 994.0, + 1049.0, + 994.0, + 1049.0, + 1036.0, + 292.0, + 1036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1069.0, + 994.0, + 1165.0, + 994.0, + 1165.0, + 1036.0, + 1069.0, + 1036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1269.0, + 994.0, + 1350.0, + 994.0, + 1350.0, + 1036.0, + 1269.0, + 1036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 760.0, + 374.0, + 760.0, + 374.0, + 803.0, + 294.0, + 803.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 414.0, + 760.0, + 1008.0, + 760.0, + 1008.0, + 803.0, + 414.0, + 803.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1178.0, + 760.0, + 1405.0, + 760.0, + 1405.0, + 803.0, + 1178.0, + 803.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 794.0, + 1079.0, + 794.0, + 1079.0, + 836.0, + 292.0, + 836.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1127.0, + 634.0, + 1127.0, + 634.0, + 1164.0, + 296.0, + 1164.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1944.0, + 434.0, + 1944.0, + 434.0, + 1978.0, + 293.0, + 1978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1268.0, + 695.0, + 1268.0, + 695.0, + 1309.0, + 294.0, + 1309.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 732.0, + 1268.0, + 841.0, + 1268.0, + 841.0, + 1309.0, + 732.0, + 1309.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1368.0, + 425.0, + 1368.0, + 425.0, + 1402.0, + 295.0, + 1402.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1849.0, + 380.0, + 1849.0, + 380.0, + 1885.0, + 292.0, + 1885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1713.0, + 344.0, + 1713.0, + 344.0, + 1750.0, + 293.0, + 1750.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 504.0, + 400.0, + 504.0, + 400.0, + 548.0, + 290.0, + 548.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 489.0, + 504.0, + 907.0, + 504.0, + 907.0, + 548.0, + 489.0, + 548.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 990.0, + 504.0, + 1002.0, + 504.0, + 1002.0, + 548.0, + 990.0, + 548.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 293.0, + 1166.0, + 293.0, + 1166.0, + 332.0, + 294.0, + 332.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 335.0, + 698.0, + 335.0, + 698.0, + 371.0, + 295.0, + 371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 779.0, + 335.0, + 1037.0, + 335.0, + 1037.0, + 371.0, + 779.0, + 371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1173.0, + 522.0, + 1173.0, + 522.0, + 1213.0, + 295.0, + 1213.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 614.0, + 1173.0, + 662.0, + 1173.0, + 662.0, + 1213.0, + 614.0, + 1213.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 747.0, + 1173.0, + 854.0, + 1173.0, + 854.0, + 1213.0, + 747.0, + 1213.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 24, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 8, + "poly": [ + 327, + 363, + 1374, + 363, + 1374, + 707, + 327, + 707 + ], + "score": 0.971 + }, + { + "category_id": 8, + "poly": [ + 441, + 1811, + 1254, + 1811, + 1254, + 2032, + 441, + 2032 + ], + "score": 0.969 + }, + { + "category_id": 8, + "poly": [ + 507, + 1157, + 1189, + 1157, + 1189, + 1490, + 507, + 1490 + ], + "score": 0.963 + }, + { + "category_id": 8, + "poly": [ + 649, + 763, + 1050, + 763, + 1050, + 858, + 649, + 858 + ], + "score": 0.96 + }, + { + "category_id": 8, + "poly": [ + 626, + 1685, + 1068, + 1685, + 1068, + 1762, + 626, + 1762 + ], + "score": 0.95 + }, + { + "category_id": 8, + "poly": [ + 516, + 1541, + 1179, + 1541, + 1179, + 1617, + 516, + 1617 + ], + "score": 0.944 + }, + { + "category_id": 1, + "poly": [ + 297, + 962, + 458, + 962, + 458, + 994, + 297, + 994 + ], + "score": 0.934 + }, + { + "category_id": 8, + "poly": [ + 550, + 909, + 1151, + 909, + 1151, + 953, + 550, + 953 + ], + "score": 0.933 + }, + { + "category_id": 1, + "poly": [ + 298, + 715, + 812, + 715, + 812, + 749, + 298, + 749 + ], + "score": 0.93 + }, + { + "category_id": 1, + "poly": [ + 299, + 1642, + 764, + 1642, + 764, + 1675, + 299, + 1675 + ], + "score": 0.929 + }, + { + "category_id": 1, + "poly": [ + 296, + 870, + 447, + 870, + 447, + 900, + 296, + 900 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 298, + 229, + 741, + 229, + 741, + 262, + 298, + 262 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 296, + 1497, + 417, + 1497, + 417, + 1530, + 296, + 1530 + ], + "score": 0.924 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.923 + }, + { + "category_id": 1, + "poly": [ + 296, + 323, + 417, + 323, + 417, + 355, + 296, + 355 + ], + "score": 0.922 + }, + { + "category_id": 1, + "poly": [ + 296, + 1770, + 417, + 1770, + 417, + 1802, + 296, + 1802 + ], + "score": 0.922 + }, + { + "category_id": 1, + "poly": [ + 296, + 1113, + 360, + 1113, + 360, + 1144, + 296, + 1144 + ], + "score": 0.917 + }, + { + "category_id": 8, + "poly": [ + 370, + 1011, + 1329, + 1011, + 1329, + 1101, + 370, + 1101 + ], + "score": 0.91 + }, + { + "category_id": 8, + "poly": [ + 458, + 271, + 1238, + 271, + 1238, + 313, + 458, + 313 + ], + "score": 0.902 + }, + { + "category_id": 2, + "poly": [ + 834, + 2087, + 866, + 2087, + 866, + 2114, + 834, + 2114 + ], + "score": 0.705 + }, + { + "category_id": 2, + "poly": [ + 834, + 2087, + 866, + 2087, + 866, + 2114, + 834, + 2114 + ], + "score": 0.331 + }, + { + "category_id": 14, + "poly": [ + 329, + 362, + 1380, + 362, + 1380, + 707, + 329, + 707 + ], + "score": 0.94, + "latex": "\\begin{array} { c } { { V _ { k + 1 } + \\alpha \\rho U _ { k } + \\alpha W _ { k } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) V _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } } \\\\ { { \\Longleftrightarrow V _ { k + 1 } + \\alpha \\rho \\displaystyle \\sum _ { j = 1 } ^ { k } U _ { j } + \\alpha \\displaystyle \\sum _ { j = 1 } ^ { k } W _ { j } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) V _ { k } + \\alpha \\rho \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } U _ { j } + \\alpha \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } W _ { j } } } \\\\ { { \\qquad + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } } \\\\ { { \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) \\left[ V _ { k } + \\alpha \\rho \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } U _ { j } + \\alpha \\displaystyle \\sum _ { j = 1 } ^ { k - 1 } W _ { j } \\right] } } \\\\ { { \\qquad + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } , } } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 628, + 1685, + 1067, + 1685, + 1067, + 1761, + 628, + 1761 + ], + "score": 0.94, + "latex": "\\rho = \\operatorname* { m i n } \\left\\{ K ^ { - 1 / 4 } , \\frac { 1 } { 2 L } \\right\\} \\leq \\frac { 1 } { K ^ { 1 / 4 } } \\leq 1 ." + }, + { + "category_id": 14, + "poly": [ + 510, + 1156, + 1186, + 1156, + 1186, + 1495, + 510, + 1495 + ], + "score": 0.94, + "latex": "\\begin{array} { r l r } { { \\sum _ { j = 1 } ^ { k } ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k - j } = \\sum _ { j = 0 } ^ { k - 1 } ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { j } } } \\\\ & { } & { = \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } - 1 } { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) - 1 } } \\\\ & { } & { = \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } - 1 } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } } \\\\ & { } & { \\leq \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } . } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 442, + 1810, + 1256, + 1810, + 1256, + 2034, + 442, + 2034 + ], + "score": 0.94, + "latex": "\\begin{array} { l } { \\displaystyle \\alpha \\rho \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\alpha \\rho \\sum _ { j = 1 } ^ { K } U _ { j } + \\alpha \\sum _ { j = 1 } ^ { K } W _ { j } } \\\\ { \\leq R _ { K + 1 } } \\\\ { \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { K } \\left( R _ { 1 } + \\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } \\right) . } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 648, + 758, + 1049, + 758, + 1049, + 858, + 648, + 858 + ], + "score": 0.94, + "latex": "R _ { k } = V _ { k } + \\alpha \\rho \\sum _ { j = 1 } ^ { k - 1 } U _ { j } + \\alpha \\sum _ { j = 1 } ^ { k - 1 } W _ { j } ," + }, + { + "category_id": 14, + "poly": [ + 515, + 1543, + 1182, + 1543, + 1182, + 1616, + 515, + 1616 + ], + "score": 0.93, + "latex": "R _ { k + 1 } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } \\left( R _ { 1 } + { \\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } } \\right) ." + }, + { + "category_id": 13, + "poly": [ + 614, + 1644, + 692, + 1644, + 692, + 1674, + 614, + 1674 + ], + "score": 0.93, + "latex": "K \\geq 1" + }, + { + "category_id": 14, + "poly": [ + 370, + 1008, + 1329, + 1008, + 1329, + 1104, + 370, + 1104 + ], + "score": 0.92, + "latex": "R _ { k + 1 } \\leq ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k } R _ { 1 } + ( C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } ) \\sum _ { j = 1 } ^ { k } ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { k - j } ." + }, + { + "category_id": 13, + "poly": [ + 575, + 717, + 714, + 717, + 714, + 748, + 575, + 748 + ], + "score": 0.91, + "latex": "U _ { k } , W _ { k } \\ge 0" + }, + { + "category_id": 14, + "poly": [ + 459, + 270, + 1234, + 270, + 1234, + 313, + 459, + 313 + ], + "score": 0.89, + "latex": "\\begin{array} { r } { V _ { k + 1 } \\leq \\big ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } \\big ) V _ { k } - \\alpha \\rho U _ { k } - \\alpha W _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } . } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 550, + 909, + 1145, + 909, + 1145, + 951, + 550, + 951 + ], + "score": 0.88, + "latex": "R _ { k + 1 } \\leq { \\left( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } \\right) } R _ { k } + C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } ," + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 828.0, + 2083.0, + 872.0, + 2083.0, + 872.0, + 2123.0, + 828.0, + 2123.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 828.0, + 2083.0, + 872.0, + 2083.0, + 872.0, + 2123.0, + 828.0, + 2123.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 960.0, + 460.0, + 960.0, + 460.0, + 998.0, + 295.0, + 998.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 710.0, + 574.0, + 710.0, + 574.0, + 754.0, + 294.0, + 754.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 715.0, + 710.0, + 813.0, + 710.0, + 813.0, + 754.0, + 715.0, + 754.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1642.0, + 613.0, + 1642.0, + 613.0, + 1677.0, + 295.0, + 1677.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 693.0, + 1642.0, + 764.0, + 1642.0, + 764.0, + 1677.0, + 693.0, + 1677.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 868.0, + 450.0, + 868.0, + 450.0, + 903.0, + 294.0, + 903.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 229.0, + 741.0, + 229.0, + 741.0, + 265.0, + 297.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1493.0, + 422.0, + 1493.0, + 422.0, + 1535.0, + 293.0, + 1535.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 319.0, + 422.0, + 319.0, + 422.0, + 359.0, + 293.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1766.0, + 422.0, + 1766.0, + 422.0, + 1806.0, + 293.0, + 1806.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1109.0, + 366.0, + 1109.0, + 366.0, + 1151.0, + 292.0, + 1151.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 25, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 296, + 1849, + 1406, + 1849, + 1406, + 2036, + 296, + 2036 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 297, + 1516, + 1405, + 1516, + 1405, + 1612, + 297, + 1612 + ], + "score": 0.974 + }, + { + "category_id": 8, + "poly": [ + 468, + 685, + 1233, + 685, + 1233, + 878, + 468, + 878 + ], + "score": 0.973 + }, + { + "category_id": 8, + "poly": [ + 666, + 452, + 1034, + 452, + 1034, + 531, + 666, + 531 + ], + "score": 0.962 + }, + { + "category_id": 8, + "poly": [ + 634, + 1752, + 1061, + 1752, + 1061, + 1838, + 634, + 1838 + ], + "score": 0.957 + }, + { + "category_id": 8, + "poly": [ + 467, + 1030, + 1230, + 1030, + 1230, + 1116, + 467, + 1116 + ], + "score": 0.953 + }, + { + "category_id": 1, + "poly": [ + 295, + 942, + 1403, + 942, + 1403, + 1013, + 295, + 1013 + ], + "score": 0.953 + }, + { + "category_id": 8, + "poly": [ + 361, + 1191, + 1333, + 1191, + 1333, + 1275, + 361, + 1275 + ], + "score": 0.952 + }, + { + "category_id": 8, + "poly": [ + 440, + 279, + 1259, + 279, + 1259, + 367, + 440, + 367 + ], + "score": 0.951 + }, + { + "category_id": 8, + "poly": [ + 675, + 584, + 1024, + 584, + 1024, + 628, + 675, + 628 + ], + "score": 0.944 + }, + { + "category_id": 1, + "poly": [ + 298, + 1339, + 1401, + 1339, + 1401, + 1405, + 298, + 1405 + ], + "score": 0.94 + }, + { + "category_id": 1, + "poly": [ + 296, + 1136, + 919, + 1136, + 919, + 1174, + 296, + 1174 + ], + "score": 0.938 + }, + { + "category_id": 1, + "poly": [ + 296, + 1703, + 863, + 1703, + 863, + 1736, + 296, + 1736 + ], + "score": 0.931 + }, + { + "category_id": 1, + "poly": [ + 298, + 640, + 677, + 640, + 677, + 673, + 298, + 673 + ], + "score": 0.928 + }, + { + "category_id": 1, + "poly": [ + 298, + 403, + 686, + 403, + 686, + 438, + 298, + 438 + ], + "score": 0.928 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.925 + }, + { + "category_id": 1, + "poly": [ + 297, + 1293, + 453, + 1293, + 453, + 1326, + 297, + 1326 + ], + "score": 0.925 + }, + { + "category_id": 0, + "poly": [ + 300, + 1448, + 766, + 1448, + 766, + 1484, + 300, + 1484 + ], + "score": 0.922 + }, + { + "category_id": 1, + "poly": [ + 297, + 540, + 447, + 540, + 447, + 572, + 297, + 572 + ], + "score": 0.922 + }, + { + "category_id": 1, + "poly": [ + 298, + 229, + 715, + 229, + 715, + 263, + 298, + 263 + ], + "score": 0.921 + }, + { + "category_id": 1, + "poly": [ + 292, + 892, + 1287, + 892, + 1287, + 930, + 292, + 930 + ], + "score": 0.92 + }, + { + "category_id": 9, + "poly": [ + 1351, + 824, + 1401, + 824, + 1401, + 855, + 1351, + 855 + ], + "score": 0.89 + }, + { + "category_id": 2, + "poly": [ + 834, + 2087, + 865, + 2087, + 865, + 2112, + 834, + 2112 + ], + "score": 0.878 + }, + { + "category_id": 9, + "poly": [ + 1351, + 308, + 1401, + 308, + 1401, + 339, + 1351, + 339 + ], + "score": 0.868 + }, + { + "category_id": 1, + "poly": [ + 298, + 1645, + 1107, + 1645, + 1107, + 1680, + 298, + 1680 + ], + "score": 0.735 + }, + { + "category_id": 0, + "poly": [ + 298, + 1645, + 1107, + 1645, + 1107, + 1680, + 298, + 1680 + ], + "score": 0.206 + }, + { + "category_id": 14, + "poly": [ + 467, + 683, + 1231, + 683, + 1231, + 881, + 467, + 881 + ], + "score": 0.94, + "latex": "\\begin{array} { r } { \\displaystyle \\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\frac { \\left( 1 + \\frac { C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) } { K } \\right) ^ { K } } { \\alpha \\rho K } \\left( R _ { 1 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) } \\\\ { \\leq \\frac { \\exp ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } { \\alpha \\rho K } \\left( R _ { 1 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) , } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 1034, + 1340, + 1172, + 1340, + 1172, + 1375, + 1034, + 1375 + ], + "score": 0.94, + "latex": "\\rho = ( 2 L ) ^ { - 1 }" + }, + { + "category_id": 14, + "poly": [ + 635, + 1748, + 1062, + 1748, + 1062, + 1840, + 635, + 1840 + ], + "score": 0.94, + "latex": "{ \\mathrm { F i n d ~ } } z \\in \\mathbb { R } ^ { d } : 0 \\in \\sum _ { i = 1 } ^ { n } A _ { i } ( z ) + B ( z ) ." + }, + { + "category_id": 14, + "poly": [ + 664, + 451, + 1034, + 451, + 1034, + 530, + 664, + 530 + ], + "score": 0.94, + "latex": "\\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } = \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } ." + }, + { + "category_id": 14, + "poly": [ + 439, + 274, + 1257, + 274, + 1257, + 372, + 439, + 372 + ], + "score": 0.94, + "latex": "\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\frac { ( 1 + C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } ) ^ { K } } { \\alpha \\rho K } \\left( R _ { 1 } + \\frac { C _ { 2 } \\alpha ^ { 2 } + C _ { 4 } \\alpha \\rho ^ { 2 } } { C _ { 1 } \\alpha ^ { 2 } + C _ { 3 } \\alpha \\rho ^ { 2 } } \\right) ," + }, + { + "category_id": 13, + "poly": [ + 1076, + 892, + 1277, + 892, + 1277, + 930, + 1076, + 930 + ], + "score": 0.94, + "latex": "( 1 + t / K ) ^ { K } \\leq e ^ { t }" + }, + { + "category_id": 13, + "poly": [ + 408, + 403, + 526, + 403, + 526, + 440, + 408, + 440 + ], + "score": 0.93, + "latex": "\\alpha = C _ { f } \\rho ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 661, + 892, + 925, + 892, + 925, + 930, + 661, + 930 + ], + "score": 0.93, + "latex": "t \\ge 0 , 1 + t / K \\le e ^ { t / K }" + }, + { + "category_id": 14, + "poly": [ + 466, + 1024, + 1233, + 1024, + 1233, + 1122, + 466, + 1122 + ], + "score": 0.93, + "latex": "\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } ( U _ { j } + W _ { j } ) \\leq \\frac { \\exp ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } { C _ { f } K ^ { 1 / 4 } } \\left( R _ { 1 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) ." + }, + { + "category_id": 13, + "poly": [ + 364, + 978, + 494, + 978, + 494, + 1013, + 364, + 1013 + ], + "score": 0.93, + "latex": "K \\geq ( 2 L ) ^ { 4 }" + }, + { + "category_id": 13, + "poly": [ + 843, + 943, + 979, + 943, + 979, + 979, + 843, + 979 + ], + "score": 0.93, + "latex": "\\rho = K ^ { - 1 / 4 }" + }, + { + "category_id": 13, + "poly": [ + 430, + 1136, + 793, + 1136, + 793, + 1173, + 430, + 1173 + ], + "score": 0.92, + "latex": "G _ { k } \\leq \\operatorname* { m a x } \\{ \\tau , \\tau ^ { - 1 } \\} \\left( U _ { k } + W _ { k } \\right)" + }, + { + "category_id": 13, + "poly": [ + 371, + 1340, + 502, + 1340, + 502, + 1375, + 371, + 1375 + ], + "score": 0.92, + "latex": "K < ( 2 L ) ^ { 4 }" + }, + { + "category_id": 13, + "poly": [ + 1070, + 980, + 1200, + 980, + 1200, + 1013, + 1070, + 1013 + ], + "score": 0.92, + "latex": "K \\geq ( 2 L ) ^ { 4 }" + }, + { + "category_id": 13, + "poly": [ + 1033, + 942, + 1205, + 942, + 1205, + 979, + 1033, + 979 + ], + "score": 0.91, + "latex": "\\alpha = C _ { f } K ^ { - 1 / 2 }" + }, + { + "category_id": 14, + "poly": [ + 674, + 582, + 1024, + 582, + 1024, + 628, + 674, + 628 + ], + "score": 0.91, + "latex": "\\rho \\leq K ^ { - \\frac { 1 } { 4 } } \\implies \\alpha \\leq C _ { f } K ^ { - \\frac { 1 } { 2 } } ." + }, + { + "category_id": 14, + "poly": [ + 366, + 1186, + 1331, + 1186, + 1331, + 1283, + 366, + 1283 + ], + "score": 0.9, + "latex": "\\frac { 1 } { K } \\sum _ { j = 1 } ^ { K } \\mathbb { E } [ G _ { j } ] \\leq \\frac { \\operatorname* { m a x } \\{ \\tau , \\tau ^ { - 1 } \\} \\exp { ( C _ { f } ( C _ { f } C _ { 1 } + C _ { 3 } ) ) } } { C _ { f } K ^ { 1 / 4 } } \\left( \\| p ^ { 1 } - p ^ { * } \\| ^ { 2 } + \\frac { C _ { f } C _ { 2 } + C _ { 4 } } { C _ { f } C _ { 1 } + C _ { 3 } } \\right) ," + }, + { + "category_id": 13, + "poly": [ + 1225, + 1339, + 1401, + 1339, + 1401, + 1376, + 1225, + 1376 + ], + "score": 0.87, + "latex": "\\alpha = C _ { f } ( 2 L ) ^ { - 2 }" + }, + { + "category_id": 13, + "poly": [ + 670, + 948, + 699, + 948, + 699, + 974, + 670, + 974 + ], + "score": 0.84, + "latex": "K" + }, + { + "category_id": 13, + "poly": [ + 530, + 232, + 590, + 232, + 590, + 263, + 530, + 263 + ], + "score": 0.8, + "latex": "\\alpha \\rho K" + }, + { + "category_id": 13, + "poly": [ + 1038, + 2012, + 1057, + 2012, + 1057, + 2030, + 1038, + 2030 + ], + "score": 0.75, + "latex": "n" + }, + { + "category_id": 13, + "poly": [ + 1014, + 1914, + 1038, + 1914, + 1038, + 1939, + 1014, + 1939 + ], + "score": 0.27, + "latex": "\\&" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1444.0, + 771.0, + 1444.0, + 771.0, + 1491.0, + 291.0, + 1491.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2085.0, + 869.0, + 2085.0, + 869.0, + 2124.0, + 830.0, + 2124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1646.0, + 1108.0, + 1646.0, + 1108.0, + 1681.0, + 294.0, + 1681.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1848.0, + 1406.0, + 1848.0, + 1406.0, + 1885.0, + 293.0, + 1885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1882.0, + 1404.0, + 1882.0, + 1404.0, + 1914.0, + 295.0, + 1914.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1910.0, + 1013.0, + 1910.0, + 1013.0, + 1945.0, + 293.0, + 1945.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1039.0, + 1910.0, + 1407.0, + 1910.0, + 1407.0, + 1945.0, + 1039.0, + 1945.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1944.0, + 1404.0, + 1944.0, + 1404.0, + 1976.0, + 295.0, + 1976.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1971.0, + 1404.0, + 1971.0, + 1404.0, + 2008.0, + 293.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 2004.0, + 1037.0, + 2004.0, + 1037.0, + 2036.0, + 295.0, + 2036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1058.0, + 2004.0, + 1068.0, + 2004.0, + 1068.0, + 2036.0, + 1058.0, + 2036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1515.0, + 1405.0, + 1515.0, + 1405.0, + 1553.0, + 294.0, + 1553.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1547.0, + 1403.0, + 1547.0, + 1403.0, + 1584.0, + 292.0, + 1584.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1573.0, + 376.0, + 1573.0, + 376.0, + 1616.0, + 292.0, + 1616.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 937.0, + 669.0, + 937.0, + 669.0, + 984.0, + 290.0, + 984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 700.0, + 937.0, + 842.0, + 937.0, + 842.0, + 984.0, + 700.0, + 984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 980.0, + 937.0, + 1032.0, + 937.0, + 1032.0, + 984.0, + 980.0, + 984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1206.0, + 937.0, + 1408.0, + 937.0, + 1408.0, + 984.0, + 1206.0, + 984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 975.0, + 363.0, + 975.0, + 363.0, + 1016.0, + 294.0, + 1016.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 495.0, + 975.0, + 1069.0, + 975.0, + 1069.0, + 1016.0, + 495.0, + 1016.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1201.0, + 975.0, + 1263.0, + 975.0, + 1263.0, + 1016.0, + 1201.0, + 1016.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1332.0, + 370.0, + 1332.0, + 370.0, + 1380.0, + 291.0, + 1380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 503.0, + 1332.0, + 1033.0, + 1332.0, + 1033.0, + 1380.0, + 503.0, + 1380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1173.0, + 1332.0, + 1224.0, + 1332.0, + 1224.0, + 1380.0, + 1173.0, + 1380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1402.0, + 1332.0, + 1407.0, + 1332.0, + 1407.0, + 1380.0, + 1402.0, + 1380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1368.0, + 408.0, + 1368.0, + 408.0, + 1407.0, + 293.0, + 1407.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1373.0, + 1375.0, + 1406.0, + 1375.0, + 1406.0, + 1404.0, + 1373.0, + 1404.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1136.0, + 429.0, + 1136.0, + 429.0, + 1177.0, + 295.0, + 1177.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 794.0, + 1136.0, + 918.0, + 1136.0, + 918.0, + 1177.0, + 794.0, + 1177.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1697.0, + 864.0, + 1697.0, + 864.0, + 1745.0, + 293.0, + 1745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 639.0, + 676.0, + 639.0, + 676.0, + 675.0, + 297.0, + 675.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 401.0, + 407.0, + 401.0, + 407.0, + 442.0, + 294.0, + 442.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 527.0, + 401.0, + 689.0, + 401.0, + 689.0, + 442.0, + 527.0, + 442.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1290.0, + 456.0, + 1290.0, + 456.0, + 1329.0, + 293.0, + 1329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 537.0, + 451.0, + 537.0, + 451.0, + 575.0, + 295.0, + 575.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 228.0, + 529.0, + 228.0, + 529.0, + 266.0, + 294.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 591.0, + 228.0, + 716.0, + 228.0, + 716.0, + 266.0, + 591.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 887.0, + 660.0, + 887.0, + 660.0, + 935.0, + 292.0, + 935.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 926.0, + 887.0, + 1075.0, + 887.0, + 1075.0, + 935.0, + 926.0, + 935.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1278.0, + 887.0, + 1291.0, + 887.0, + 1291.0, + 935.0, + 1278.0, + 935.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1646.0, + 1108.0, + 1646.0, + 1108.0, + 1681.0, + 294.0, + 1681.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 26, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 296, + 1270, + 1405, + 1270, + 1405, + 1557, + 296, + 1557 + ], + "score": 0.984 + }, + { + "category_id": 1, + "poly": [ + 298, + 1103, + 1402, + 1103, + 1402, + 1261, + 298, + 1261 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 305, + 419, + 1394, + 419, + 1394, + 545, + 305, + 545 + ], + "score": 0.972 + }, + { + "category_id": 8, + "poly": [ + 552, + 1726, + 1144, + 1726, + 1144, + 1888, + 552, + 1888 + ], + "score": 0.971 + }, + { + "category_id": 8, + "poly": [ + 462, + 941, + 1233, + 941, + 1233, + 1092, + 462, + 1092 + ], + "score": 0.97 + }, + { + "category_id": 8, + "poly": [ + 475, + 556, + 1222, + 556, + 1222, + 691, + 475, + 691 + ], + "score": 0.965 + }, + { + "category_id": 8, + "poly": [ + 541, + 1942, + 1157, + 1942, + 1157, + 2029, + 541, + 2029 + ], + "score": 0.959 + }, + { + "category_id": 8, + "poly": [ + 509, + 274, + 1189, + 274, + 1189, + 409, + 509, + 409 + ], + "score": 0.957 + }, + { + "category_id": 1, + "poly": [ + 298, + 1643, + 1404, + 1643, + 1404, + 1716, + 298, + 1716 + ], + "score": 0.95 + }, + { + "category_id": 1, + "poly": [ + 298, + 1895, + 779, + 1895, + 779, + 1929, + 298, + 1929 + ], + "score": 0.934 + }, + { + "category_id": 8, + "poly": [ + 597, + 755, + 1100, + 755, + 1100, + 792, + 597, + 792 + ], + "score": 0.928 + }, + { + "category_id": 1, + "poly": [ + 297, + 804, + 647, + 804, + 647, + 836, + 297, + 836 + ], + "score": 0.927 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.924 + }, + { + "category_id": 0, + "poly": [ + 298, + 1588, + 1040, + 1588, + 1040, + 1623, + 298, + 1623 + ], + "score": 0.918 + }, + { + "category_id": 1, + "poly": [ + 298, + 229, + 746, + 229, + 746, + 263, + 298, + 263 + ], + "score": 0.918 + }, + { + "category_id": 8, + "poly": [ + 455, + 846, + 1237, + 846, + 1237, + 888, + 455, + 888 + ], + "score": 0.916 + }, + { + "category_id": 1, + "poly": [ + 290, + 706, + 1261, + 706, + 1261, + 743, + 290, + 743 + ], + "score": 0.914 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1002, + 1401, + 1002, + 1401, + 1032, + 1351, + 1032 + ], + "score": 0.897 + }, + { + "category_id": 9, + "poly": [ + 1352, + 756, + 1401, + 756, + 1401, + 787, + 1352, + 787 + ], + "score": 0.89 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1791, + 1401, + 1791, + 1401, + 1822, + 1351, + 1822 + ], + "score": 0.89 + }, + { + "category_id": 9, + "poly": [ + 1351, + 852, + 1401, + 852, + 1401, + 883, + 1351, + 883 + ], + "score": 0.889 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1968, + 1401, + 1968, + 1401, + 1999, + 1351, + 1999 + ], + "score": 0.886 + }, + { + "category_id": 2, + "poly": [ + 834, + 2087, + 865, + 2087, + 865, + 2113, + 834, + 2113 + ], + "score": 0.874 + }, + { + "category_id": 1, + "poly": [ + 297, + 898, + 699, + 898, + 699, + 930, + 297, + 930 + ], + "score": 0.852 + }, + { + "category_id": 8, + "poly": [ + 936, + 325, + 1164, + 325, + 1164, + 410, + 936, + 410 + ], + "score": 0.192 + }, + { + "category_id": 13, + "poly": [ + 689, + 1425, + 807, + 1425, + 807, + 1459, + 689, + 1459 + ], + "score": 0.94, + "latex": "\\| \\nabla f ( x ) \\| ^ { 2 }" + }, + { + "category_id": 14, + "poly": [ + 460, + 942, + 1236, + 942, + 1236, + 1093, + 460, + 1093 + ], + "score": 0.94, + "latex": "\\mathcal { B } ( w _ { 1 } , \\dots , w _ { n } , z ) \\mapsto \\left[ \\begin{array} { c c c c } { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { \\vdots } & { \\ddots } & { \\vdots } & { \\vdots } \\\\ { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { I } & { \\cdots } & { I } & { 0 } \\end{array} \\right] \\left[ \\begin{array} { c } { w _ { 1 } } \\\\ { \\vdots } \\\\ { w _ { n } } \\\\ { z } \\end{array} \\right] + \\left[ \\begin{array} { c } { 0 } \\\\ { \\vdots } \\\\ { 0 } \\\\ { B ( z ) } \\end{array} \\right] ." + }, + { + "category_id": 14, + "poly": [ + 552, + 1724, + 1146, + 1724, + 1146, + 1889, + 552, + 1889 + ], + "score": 0.94, + "latex": "\\begin{array} { r } { v ^ { k } \\doteq \\left[ \\begin{array} { c } { x _ { 1 } ^ { k } - z ^ { k } } \\\\ { \\vdots } \\\\ { x _ { n } ^ { k } - z ^ { k } } \\\\ { B ( z ^ { k } ) + \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } } \\end{array} \\right] \\in \\mathcal { T } ( y _ { 1 } ^ { k } , \\dotsc , y _ { n } ^ { k } , z ^ { k } ) . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 778, + 1365, + 916, + 1365, + 916, + 1398, + 778, + 1398 + ], + "score": 0.94, + "latex": "v _ { 2 } \\in \\mathcal { T } ( q _ { 2 } )" + }, + { + "category_id": 13, + "poly": [ + 409, + 1517, + 589, + 1517, + 589, + 1555, + 409, + 1555 + ], + "score": 0.93, + "latex": "\\mathrm { d i s t } ^ { 2 } ( 0 , \\mathcal { T } ( q _ { 1 } ) )" + }, + { + "category_id": 13, + "poly": [ + 818, + 1678, + 983, + 1678, + 983, + 1716, + 818, + 1716 + ], + "score": 0.93, + "latex": "\\hat { x _ { i } ^ { k } } \\in A _ { i } ^ { - 1 } ( y _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 1109, + 1645, + 1253, + 1645, + 1253, + 1681, + 1109, + 1681 + ], + "score": 0.93, + "latex": "y _ { i } ^ { k } \\in A _ { i } ( x _ { i } ^ { k } )" + }, + { + "category_id": 14, + "poly": [ + 542, + 1939, + 1156, + 1939, + 1156, + 2031, + 542, + 2031 + ], + "score": 0.93, + "latex": "R _ { k } \\dot { = } \\| v ^ { k } \\| ^ { 2 } = \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\left\\| B ( z ^ { k } ) + \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\right\\| ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 638, + 1226, + 751, + 1226, + 751, + 1260, + 638, + 1260 + ], + "score": 0.93, + "latex": "\\bar { 0 } \\in \\mathcal { T } ( q )" + }, + { + "category_id": 13, + "poly": [ + 330, + 1486, + 406, + 1486, + 406, + 1520, + 330, + 1520 + ], + "score": 0.93, + "latex": "\\mathcal { T } ( q _ { 1 } )" + }, + { + "category_id": 13, + "poly": [ + 802, + 1645, + 895, + 1645, + 895, + 1681, + 802, + 1681 + ], + "score": 0.92, + "latex": "( x _ { i } ^ { k } , y _ { i } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 347, + 1306, + 418, + 1306, + 418, + 1332, + 347, + 1332 + ], + "score": 0.92, + "latex": "v = 0" + }, + { + "category_id": 13, + "poly": [ + 956, + 1457, + 1032, + 1457, + 1032, + 1490, + 956, + 1490 + ], + "score": 0.92, + "latex": "\\mathcal { T } ( q _ { 1 } )" + }, + { + "category_id": 13, + "poly": [ + 938, + 1303, + 995, + 1303, + 995, + 1337, + 938, + 1337 + ], + "score": 0.92, + "latex": "\\| \\bar { v } \\| ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 296, + 1228, + 527, + 1228, + 527, + 1261, + 296, + 1261 + ], + "score": 0.92, + "latex": "( w _ { 1 } , \\dots , w _ { n } ) \\in \\mathbb { R } ^ { n d }" + }, + { + "category_id": 13, + "poly": [ + 439, + 1274, + 502, + 1274, + 502, + 1307, + 439, + 1307 + ], + "score": 0.92, + "latex": "( q , v )" + }, + { + "category_id": 13, + "poly": [ + 514, + 1395, + 688, + 1395, + 688, + 1429, + 514, + 1429 + ], + "score": 0.92, + "latex": "\\| v _ { 1 } \\| ^ { 2 } < \\| v _ { 2 } \\| ^ { 2 }" + }, + { + "category_id": 14, + "poly": [ + 476, + 554, + 1223, + 554, + 1223, + 695, + 476, + 695 + ], + "score": 0.91, + "latex": "\\begin{array} { r l } { \\mathrm { F i n d ~ } ( w _ { 1 } , \\dots , w _ { n } , z ) \\in \\mathbb { R } ^ { ( n + 1 ) d } : } & { 0 \\in A _ { i } ^ { - 1 } ( w _ { i } ) - z , \\quad i \\in 1 . . n } \\\\ & { 0 \\in \\displaystyle \\sum _ { i = 1 } ^ { n } w _ { i } + B ( z ) . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 917, + 1194, + 1001, + 1194, + 1001, + 1224, + 917, + 1224 + ], + "score": 0.91, + "latex": "z \\in \\mathbb { R } ^ { d }" + }, + { + "category_id": 13, + "poly": [ + 706, + 1193, + 801, + 1193, + 801, + 1224, + 706, + 1224 + ], + "score": 0.91, + "latex": "\\mathbb { R } ^ { ( n + 1 ) \\bar { d } }" + }, + { + "category_id": 13, + "poly": [ + 596, + 1364, + 726, + 1364, + 726, + 1396, + 596, + 1396 + ], + "score": 0.91, + "latex": "v _ { 1 } \\in T ( q _ { 1 } )" + }, + { + "category_id": 13, + "poly": [ + 838, + 1228, + 1066, + 1228, + 1066, + 1261, + 838, + 1261 + ], + "score": 0.91, + "latex": "q = ( w _ { 1 } , \\dots , w _ { n } , z )" + }, + { + "category_id": 13, + "poly": [ + 527, + 1135, + 698, + 1135, + 698, + 1165, + 527, + 1165 + ], + "score": 0.91, + "latex": "\\mathcal { T } \\doteq \\mathcal { A } + \\mathcal { B }" + }, + { + "category_id": 14, + "poly": [ + 508, + 272, + 1188, + 272, + 1188, + 415, + 508, + 415 + ], + "score": 0.9, + "latex": "\\begin{array} { l l } { \\mathrm { F i n d } \\left( w _ { 1 } , \\ldots , w _ { n } , z \\right) \\in \\mathbb { R } ^ { \\left( n + 1 \\right) d } : } & { w _ { i } \\in A _ { i } ( z ) , \\quad i \\in { 1 . . n } } \\\\ & { \\quad \\displaystyle 0 \\in \\sum _ { i = 1 } ^ { n } w _ { i } + B ( z ) . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 594, + 1649, + 687, + 1649, + 687, + 1677, + 594, + 1677 + ], + "score": 0.9, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 14, + "poly": [ + 597, + 753, + 1099, + 753, + 1099, + 791, + 597, + 791 + ], + "score": 0.89, + "latex": "0 \\in \\mathcal { A } ( w _ { 1 } , \\ldots , w _ { n } , z ) + \\mathcal { B } ( w _ { 1 } , \\ldots , w _ { n } , z ) ," + }, + { + "category_id": 13, + "poly": [ + 541, + 485, + 606, + 485, + 606, + 515, + 541, + 515 + ], + "score": 0.89, + "latex": "w _ { n + 1 }" + }, + { + "category_id": 13, + "poly": [ + 848, + 706, + 1152, + 706, + 1152, + 743, + 848, + 743 + ], + "score": 0.89, + "latex": "( w _ { 1 } , \\ldots , w _ { n } , z ) \\in \\mathbb { R } ^ { ( n + 1 ) d }" + }, + { + "category_id": 13, + "poly": [ + 298, + 1196, + 411, + 1196, + 411, + 1229, + 298, + 1229 + ], + "score": 0.89, + "latex": "0 \\in \\mathcal { T } ( q )" + }, + { + "category_id": 14, + "poly": [ + 459, + 845, + 1240, + 845, + 1240, + 887, + 459, + 887 + ], + "score": 0.88, + "latex": "\\mathcal { A } ( w _ { 1 } , \\dots , w _ { n } , z ) \\mapsto A _ { 1 } ^ { - 1 } ( w _ { 1 } ) \\times A _ { 2 } ^ { - 1 } ( w _ { 2 } ) \\times \\dots \\times A _ { n } ^ { - 1 } ( w _ { n } ) \\times \\{ 0 \\}" + }, + { + "category_id": 13, + "poly": [ + 738, + 1271, + 794, + 1271, + 794, + 1306, + 738, + 1306 + ], + "score": 0.87, + "latex": "\\| v \\| ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 1032, + 1492, + 1061, + 1492, + 1061, + 1517, + 1032, + 1517 + ], + "score": 0.86, + "latex": "v _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 430, + 1166, + 460, + 1166, + 460, + 1193, + 430, + 1193 + ], + "score": 0.86, + "latex": "\\mathcal { T }" + }, + { + "category_id": 13, + "poly": [ + 597, + 1106, + 627, + 1106, + 627, + 1132, + 597, + 1132 + ], + "score": 0.86, + "latex": "\\mathcal { B }" + }, + { + "category_id": 13, + "poly": [ + 611, + 1272, + 725, + 1272, + 725, + 1307, + 611, + 1307 + ], + "score": 0.85, + "latex": "v \\in \\mathcal { T } ( q )" + }, + { + "category_id": 13, + "poly": [ + 685, + 1491, + 714, + 1491, + 714, + 1517, + 685, + 1517 + ], + "score": 0.85, + "latex": "v _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 371, + 806, + 403, + 806, + 403, + 832, + 371, + 832 + ], + "score": 0.85, + "latex": "\\mathcal { A }" + }, + { + "category_id": 13, + "poly": [ + 344, + 900, + 374, + 900, + 374, + 926, + 344, + 926 + ], + "score": 0.85, + "latex": "\\mathcal { B }" + }, + { + "category_id": 13, + "poly": [ + 377, + 1369, + 405, + 1369, + 405, + 1396, + 377, + 1396 + ], + "score": 0.85, + "latex": "q _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 1077, + 1369, + 1105, + 1369, + 1105, + 1396, + 1077, + 1396 + ], + "score": 0.85, + "latex": "q _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 453, + 1399, + 481, + 1399, + 481, + 1428, + 453, + 1428 + ], + "score": 0.84, + "latex": "q _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 1113, + 454, + 1135, + 454, + 1135, + 480, + 1113, + 480 + ], + "score": 0.84, + "latex": "s" + }, + { + "category_id": 13, + "poly": [ + 1239, + 1106, + 1271, + 1106, + 1271, + 1132, + 1239, + 1132 + ], + "score": 0.83, + "latex": "\\mathcal { A }" + }, + { + "category_id": 13, + "poly": [ + 976, + 423, + 999, + 423, + 999, + 450, + 976, + 450 + ], + "score": 0.83, + "latex": "s" + }, + { + "category_id": 13, + "poly": [ + 645, + 1458, + 664, + 1458, + 664, + 1488, + 645, + 1488 + ], + "score": 0.83, + "latex": "f" + }, + { + "category_id": 13, + "poly": [ + 298, + 1370, + 325, + 1370, + 325, + 1397, + 298, + 1397 + ], + "score": 0.83, + "latex": "q _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 869, + 1340, + 885, + 1340, + 885, + 1366, + 869, + 1366 + ], + "score": 0.81, + "latex": "q" + }, + { + "category_id": 13, + "poly": [ + 513, + 1309, + 530, + 1309, + 530, + 1336, + 513, + 1336 + ], + "score": 0.81, + "latex": "q" + }, + { + "category_id": 13, + "poly": [ + 455, + 1201, + 471, + 1201, + 471, + 1227, + 455, + 1227 + ], + "score": 0.78, + "latex": "q" + }, + { + "category_id": 13, + "poly": [ + 1319, + 1309, + 1336, + 1309, + 1336, + 1336, + 1319, + 1336 + ], + "score": 0.78, + "latex": "q" + }, + { + "category_id": 13, + "poly": [ + 1249, + 1279, + 1267, + 1279, + 1267, + 1305, + 1249, + 1305 + ], + "score": 0.78, + "latex": "q" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1589.0, + 1042.0, + 1589.0, + 1042.0, + 1624.0, + 294.0, + 1624.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 870.0, + 2084.0, + 870.0, + 2122.0, + 830.0, + 2122.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1271.0, + 438.0, + 1271.0, + 438.0, + 1312.0, + 293.0, + 1312.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 503.0, + 1271.0, + 610.0, + 1271.0, + 610.0, + 1312.0, + 503.0, + 1312.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 726.0, + 1271.0, + 737.0, + 1271.0, + 737.0, + 1312.0, + 726.0, + 1312.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 795.0, + 1271.0, + 1248.0, + 1271.0, + 1248.0, + 1312.0, + 795.0, + 1312.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1268.0, + 1271.0, + 1407.0, + 1271.0, + 1407.0, + 1312.0, + 1268.0, + 1312.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1302.0, + 346.0, + 1302.0, + 346.0, + 1340.0, + 294.0, + 1340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 419.0, + 1302.0, + 512.0, + 1302.0, + 512.0, + 1340.0, + 419.0, + 1340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 531.0, + 1302.0, + 937.0, + 1302.0, + 937.0, + 1340.0, + 531.0, + 1340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 996.0, + 1302.0, + 1318.0, + 1302.0, + 1318.0, + 1340.0, + 996.0, + 1340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1337.0, + 1302.0, + 1406.0, + 1302.0, + 1406.0, + 1340.0, + 1337.0, + 1340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1334.0, + 868.0, + 1334.0, + 868.0, + 1370.0, + 293.0, + 1370.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 886.0, + 1334.0, + 1406.0, + 1334.0, + 1406.0, + 1370.0, + 886.0, + 1370.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1363.0, + 297.0, + 1363.0, + 297.0, + 1401.0, + 293.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 326.0, + 1363.0, + 376.0, + 1363.0, + 376.0, + 1401.0, + 326.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 406.0, + 1363.0, + 595.0, + 1363.0, + 595.0, + 1401.0, + 406.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 727.0, + 1363.0, + 777.0, + 1363.0, + 777.0, + 1401.0, + 727.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 917.0, + 1363.0, + 1076.0, + 1363.0, + 1076.0, + 1401.0, + 917.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1106.0, + 1363.0, + 1406.0, + 1363.0, + 1406.0, + 1401.0, + 1106.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1393.0, + 452.0, + 1393.0, + 452.0, + 1433.0, + 293.0, + 1433.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 482.0, + 1393.0, + 513.0, + 1393.0, + 513.0, + 1433.0, + 482.0, + 1433.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 689.0, + 1393.0, + 1406.0, + 1393.0, + 1406.0, + 1433.0, + 689.0, + 1433.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1424.0, + 688.0, + 1424.0, + 688.0, + 1462.0, + 294.0, + 1462.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 808.0, + 1424.0, + 1408.0, + 1424.0, + 1408.0, + 1462.0, + 808.0, + 1462.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1453.0, + 644.0, + 1453.0, + 644.0, + 1493.0, + 291.0, + 1493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 665.0, + 1453.0, + 955.0, + 1453.0, + 955.0, + 1493.0, + 665.0, + 1493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1033.0, + 1453.0, + 1406.0, + 1453.0, + 1406.0, + 1493.0, + 1033.0, + 1493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1484.0, + 329.0, + 1484.0, + 329.0, + 1525.0, + 293.0, + 1525.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 407.0, + 1484.0, + 684.0, + 1484.0, + 684.0, + 1525.0, + 407.0, + 1525.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 715.0, + 1484.0, + 1031.0, + 1484.0, + 1031.0, + 1525.0, + 715.0, + 1525.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1062.0, + 1484.0, + 1406.0, + 1484.0, + 1406.0, + 1525.0, + 1062.0, + 1525.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1515.0, + 408.0, + 1515.0, + 408.0, + 1558.0, + 293.0, + 1558.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 590.0, + 1515.0, + 601.0, + 1515.0, + 601.0, + 1558.0, + 590.0, + 1558.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1104.0, + 596.0, + 1104.0, + 596.0, + 1138.0, + 296.0, + 1138.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 628.0, + 1104.0, + 1238.0, + 1104.0, + 1238.0, + 1138.0, + 628.0, + 1138.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1272.0, + 1104.0, + 1404.0, + 1104.0, + 1404.0, + 1138.0, + 1272.0, + 1138.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1135.0, + 526.0, + 1135.0, + 526.0, + 1169.0, + 294.0, + 1169.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 699.0, + 1135.0, + 1404.0, + 1135.0, + 1404.0, + 1169.0, + 699.0, + 1169.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1165.0, + 429.0, + 1165.0, + 429.0, + 1198.0, + 296.0, + 1198.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 461.0, + 1165.0, + 1404.0, + 1165.0, + 1404.0, + 1198.0, + 461.0, + 1198.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1186.0, + 297.0, + 1186.0, + 297.0, + 1236.0, + 289.0, + 1236.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 412.0, + 1186.0, + 454.0, + 1186.0, + 454.0, + 1236.0, + 412.0, + 1236.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 472.0, + 1186.0, + 705.0, + 1186.0, + 705.0, + 1236.0, + 472.0, + 1236.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 802.0, + 1186.0, + 916.0, + 1186.0, + 916.0, + 1236.0, + 802.0, + 1236.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1002.0, + 1186.0, + 1409.0, + 1186.0, + 1409.0, + 1236.0, + 1002.0, + 1236.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1218.0, + 295.0, + 1218.0, + 295.0, + 1267.0, + 289.0, + 1267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 528.0, + 1218.0, + 637.0, + 1218.0, + 637.0, + 1267.0, + 528.0, + 1267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 752.0, + 1218.0, + 837.0, + 1218.0, + 837.0, + 1267.0, + 752.0, + 1267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1067.0, + 1218.0, + 1081.0, + 1218.0, + 1081.0, + 1267.0, + 1067.0, + 1267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 416.0, + 975.0, + 416.0, + 975.0, + 457.0, + 296.0, + 457.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1000.0, + 416.0, + 1401.0, + 416.0, + 1401.0, + 457.0, + 1000.0, + 457.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 300.0, + 453.0, + 1112.0, + 453.0, + 1112.0, + 486.0, + 300.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1136.0, + 453.0, + 1401.0, + 453.0, + 1401.0, + 486.0, + 1136.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 300.0, + 483.0, + 540.0, + 483.0, + 540.0, + 516.0, + 300.0, + 516.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 607.0, + 483.0, + 1399.0, + 483.0, + 1399.0, + 516.0, + 607.0, + 516.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 515.0, + 473.0, + 515.0, + 473.0, + 549.0, + 298.0, + 549.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1642.0, + 593.0, + 1642.0, + 593.0, + 1684.0, + 292.0, + 1684.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 688.0, + 1642.0, + 801.0, + 1642.0, + 801.0, + 1684.0, + 688.0, + 1684.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 896.0, + 1642.0, + 1108.0, + 1642.0, + 1108.0, + 1684.0, + 896.0, + 1684.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1254.0, + 1642.0, + 1404.0, + 1642.0, + 1404.0, + 1684.0, + 1254.0, + 1684.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1677.0, + 817.0, + 1677.0, + 817.0, + 1719.0, + 293.0, + 1719.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 984.0, + 1677.0, + 1146.0, + 1677.0, + 1146.0, + 1719.0, + 984.0, + 1719.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1893.0, + 781.0, + 1893.0, + 781.0, + 1931.0, + 293.0, + 1931.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 798.0, + 370.0, + 798.0, + 370.0, + 842.0, + 294.0, + 842.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 404.0, + 798.0, + 648.0, + 798.0, + 648.0, + 842.0, + 404.0, + 842.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 225.0, + 747.0, + 225.0, + 747.0, + 267.0, + 295.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 703.0, + 847.0, + 703.0, + 847.0, + 747.0, + 290.0, + 747.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1153.0, + 703.0, + 1266.0, + 703.0, + 1266.0, + 747.0, + 1153.0, + 747.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 898.0, + 343.0, + 898.0, + 343.0, + 934.0, + 297.0, + 934.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 375.0, + 898.0, + 697.0, + 898.0, + 697.0, + 934.0, + 375.0, + 934.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 27, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 8, + "poly": [ + 551, + 902, + 1146, + 902, + 1146, + 1063, + 551, + 1063 + ], + "score": 0.968 + }, + { + "category_id": 8, + "poly": [ + 469, + 303, + 1236, + 303, + 1236, + 733, + 469, + 733 + ], + "score": 0.967 + }, + { + "category_id": 8, + "poly": [ + 698, + 1901, + 1002, + 1901, + 1002, + 1965, + 698, + 1965 + ], + "score": 0.957 + }, + { + "category_id": 1, + "poly": [ + 292, + 226, + 1402, + 226, + 1402, + 294, + 292, + 294 + ], + "score": 0.956 + }, + { + "category_id": 1, + "poly": [ + 298, + 1071, + 1403, + 1071, + 1403, + 1136, + 298, + 1136 + ], + "score": 0.953 + }, + { + "category_id": 8, + "poly": [ + 666, + 1565, + 1032, + 1565, + 1032, + 1629, + 666, + 1629 + ], + "score": 0.951 + }, + { + "category_id": 1, + "poly": [ + 292, + 1411, + 1403, + 1411, + 1403, + 1476, + 292, + 1476 + ], + "score": 0.95 + }, + { + "category_id": 1, + "poly": [ + 298, + 747, + 1403, + 747, + 1403, + 815, + 298, + 815 + ], + "score": 0.948 + }, + { + "category_id": 1, + "poly": [ + 298, + 1227, + 1400, + 1227, + 1400, + 1293, + 298, + 1293 + ], + "score": 0.947 + }, + { + "category_id": 1, + "poly": [ + 292, + 1488, + 1401, + 1488, + 1401, + 1553, + 292, + 1553 + ], + "score": 0.945 + }, + { + "category_id": 1, + "poly": [ + 299, + 827, + 1402, + 827, + 1402, + 894, + 299, + 894 + ], + "score": 0.944 + }, + { + "category_id": 8, + "poly": [ + 653, + 1678, + 1046, + 1678, + 1046, + 1741, + 653, + 1741 + ], + "score": 0.944 + }, + { + "category_id": 1, + "poly": [ + 297, + 1972, + 1404, + 1972, + 1404, + 2033, + 297, + 2033 + ], + "score": 0.944 + }, + { + "category_id": 8, + "poly": [ + 598, + 1792, + 1101, + 1792, + 1101, + 1855, + 598, + 1855 + ], + "score": 0.941 + }, + { + "category_id": 1, + "poly": [ + 298, + 1634, + 656, + 1634, + 656, + 1667, + 298, + 1667 + ], + "score": 0.931 + }, + { + "category_id": 1, + "poly": [ + 298, + 1747, + 706, + 1747, + 706, + 1779, + 298, + 1779 + ], + "score": 0.93 + }, + { + "category_id": 0, + "poly": [ + 297, + 1171, + 584, + 1171, + 584, + 1203, + 297, + 1203 + ], + "score": 0.927 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 297, + 1861, + 416, + 1861, + 416, + 1893, + 297, + 1893 + ], + "score": 0.922 + }, + { + "category_id": 8, + "poly": [ + 680, + 1302, + 977, + 1302, + 977, + 1341, + 680, + 1341 + ], + "score": 0.909 + }, + { + "category_id": 9, + "poly": [ + 1352, + 1359, + 1400, + 1359, + 1400, + 1389, + 1352, + 1389 + ], + "score": 0.896 + }, + { + "category_id": 8, + "poly": [ + 650, + 1349, + 1047, + 1349, + 1047, + 1400, + 650, + 1400 + ], + "score": 0.887 + }, + { + "category_id": 9, + "poly": [ + 1352, + 1306, + 1400, + 1306, + 1400, + 1338, + 1352, + 1338 + ], + "score": 0.887 + }, + { + "category_id": 2, + "poly": [ + 834, + 2087, + 865, + 2087, + 865, + 2113, + 834, + 2113 + ], + "score": 0.881 + }, + { + "category_id": 14, + "poly": [ + 467, + 302, + 1235, + 302, + 1235, + 720, + 467, + 720 + ], + "score": 0.95, + "latex": "\\begin{array} { r l } & { H _ { k } = \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\left\\| B ( z ^ { k } ) + \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } \\right\\| ^ { 2 } } \\\\ & { \\quad = \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + \\left\\| B ( z ^ { k } ) + \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } - \\displaystyle \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } \\right\\| ^ { 2 } } \\\\ & { \\quad \\leq \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } + 2 \\left\\| \\displaystyle \\sum _ { i = 1 } ^ { n } ( y _ { i } ^ { k } - w _ { i } ^ { k } ) \\right\\| ^ { 2 } } \\\\ & { \\quad \\leq \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\| B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\| ^ { 2 } + 2 n \\displaystyle \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } } \\\\ & { \\quad < \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } + 2 \\| B ( z ^ { k } ) + \\displaystyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } } \\\\ & { \\quad < \\rho _ { n } \\alpha , } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 551, + 898, + 1148, + 898, + 1148, + 1063, + 551, + 1063 + ], + "score": 0.94, + "latex": "\\boldsymbol { v } _ { i } ^ { k } \\doteq \\left[ \\begin{array} { c } { x _ { 1 } ^ { k } - x _ { i } ^ { k } } \\\\ { \\vdots } \\\\ { x _ { n } ^ { k } - x _ { i } ^ { k } } \\\\ { B ( x _ { i } ^ { k } ) + \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k } } \\end{array} \\right] \\in \\mathscr { T } ( y _ { 1 } ^ { k } , \\ldots , y _ { n } ^ { k } , x _ { i } ^ { k } ) ." + }, + { + "category_id": 14, + "poly": [ + 666, + 1561, + 1033, + 1561, + 1033, + 1627, + 666, + 1627 + ], + "score": 0.94, + "latex": "\\frac { 1 } { \\alpha } ( q ^ { k } - \\bar { q } ^ { k } ) - \\mathcal { B } ( q ^ { k } ) \\in \\mathcal { A } ( \\bar { q } ^ { k } ) ." + }, + { + "category_id": 14, + "poly": [ + 697, + 1899, + 1002, + 1899, + 1002, + 1965, + 697, + 1965 + ], + "score": 0.94, + "latex": "R _ { k } ^ { \\mathrm { { T s e n g } } } \\doteq \\frac { 1 } { \\alpha ^ { 2 } } \\| q ^ { k } - q ^ { k + 1 } \\| ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 944, + 746, + 1116, + 746, + 1116, + 787, + 944, + 787 + ], + "score": 0.93, + "latex": "\\begin{array} { r } { \\sum _ { i = 1 } ^ { n + 1 } w _ { i } ^ { k } = 0 } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 651, + 1675, + 1048, + 1675, + 1048, + 1740, + 651, + 1740 + ], + "score": 0.93, + "latex": "\\frac { 1 } { \\alpha } ( \\bar { q } ^ { k } - q ^ { k + 1 } ) + \\mathcal { B } ( q ^ { k } ) = \\mathcal { B } ( \\bar { q } ^ { k } ) ." + }, + { + "category_id": 13, + "poly": [ + 740, + 228, + 921, + 228, + 921, + 264, + 740, + 264 + ], + "score": 0.92, + "latex": "( y _ { 1 } ^ { k } , \\dots , y _ { n } ^ { k } , z ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 298, + 1259, + 491, + 1259, + 491, + 1295, + 298, + 1295 + ], + "score": 0.92, + "latex": "q ^ { k } , \\bar { q } ^ { \\bar { k } } \\in \\mathbb { R } ^ { ( n + 1 ) d }" + }, + { + "category_id": 13, + "poly": [ + 521, + 861, + 662, + 861, + 662, + 893, + 521, + 893 + ], + "score": 0.92, + "latex": "i = 1 , \\ldots , n" + }, + { + "category_id": 13, + "poly": [ + 621, + 1071, + 691, + 1071, + 691, + 1107, + 621, + 1107 + ], + "score": 0.92, + "latex": "\\| v _ { i } ^ { k } \\| ^ { 2 }" + }, + { + "category_id": 14, + "poly": [ + 599, + 1788, + 1101, + 1788, + 1101, + 1853, + 599, + 1853 + ], + "score": 0.92, + "latex": "\\frac { 1 } { \\alpha } ( q ^ { k } - q ^ { k + 1 } ) \\in \\mathcal { A } ( \\bar { q } ^ { k } ) + \\mathcal { B } ( \\bar { q } ^ { k } ) = \\mathcal { T } ( \\bar { q } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 299, + 1519, + 376, + 1519, + 376, + 1554, + 299, + 1554 + ], + "score": 0.91, + "latex": "\\mathcal { T } ( \\bar { q } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 1241, + 1522, + 1296, + 1522, + 1296, + 1551, + 1241, + 1551 + ], + "score": 0.9, + "latex": "J _ { \\alpha \\mathcal { A } }" + }, + { + "category_id": 14, + "poly": [ + 651, + 1296, + 1047, + 1296, + 1047, + 1405, + 651, + 1405 + ], + "score": 0.9, + "latex": "\\begin{array} { c } { \\bar { q } ^ { k } = J _ { \\alpha \\mathcal { A } } ( q ^ { k } - \\alpha \\mathcal { B } ( q ^ { k } ) ) } \\\\ { q ^ { k + 1 } = \\bar { q } ^ { k } + \\alpha \\big ( \\mathcal { B } ( q ^ { k } ) - \\mathcal { B } ( \\bar { q } ^ { k } ) \\big ) , } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 1206, + 753, + 1244, + 753, + 1244, + 783, + 1206, + 783 + ], + "score": 0.9, + "latex": "R _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1208, + 1075, + 1246, + 1075, + 1246, + 1104, + 1208, + 1104 + ], + "score": 0.9, + "latex": "G _ { k }" + }, + { + "category_id": 13, + "poly": [ + 445, + 858, + 477, + 858, + 477, + 894, + 445, + 894 + ], + "score": 0.9, + "latex": "x _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 1196, + 1974, + 1233, + 1974, + 1233, + 2003, + 1196, + 2003 + ], + "score": 0.9, + "latex": "R _ { k }" + }, + { + "category_id": 13, + "poly": [ + 716, + 1105, + 752, + 1105, + 752, + 1134, + 716, + 1134 + ], + "score": 0.89, + "latex": "R _ { k }" + }, + { + "category_id": 13, + "poly": [ + 487, + 1444, + 519, + 1444, + 519, + 1473, + 487, + 1473 + ], + "score": 0.89, + "latex": "A _ { i }" + }, + { + "category_id": 13, + "poly": [ + 671, + 263, + 709, + 263, + 709, + 292, + 671, + 292 + ], + "score": 0.89, + "latex": "G _ { k }" + }, + { + "category_id": 13, + "poly": [ + 521, + 784, + 559, + 784, + 559, + 813, + 521, + 813 + ], + "score": 0.88, + "latex": "G _ { k }" + }, + { + "category_id": 13, + "poly": [ + 298, + 262, + 335, + 262, + 335, + 292, + 298, + 292 + ], + "score": 0.88, + "latex": "R _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1119, + 827, + 1149, + 827, + 1149, + 857, + 1119, + 857 + ], + "score": 0.87, + "latex": "z ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 451, + 1414, + 482, + 1414, + 482, + 1440, + 451, + 1440 + ], + "score": 0.86, + "latex": "\\mathcal { B }" + }, + { + "category_id": 13, + "poly": [ + 986, + 1414, + 1018, + 1414, + 1018, + 1440, + 986, + 1440 + ], + "score": 0.85, + "latex": "\\mathcal { A }" + }, + { + "category_id": 13, + "poly": [ + 370, + 1414, + 402, + 1414, + 402, + 1440, + 370, + 1440 + ], + "score": 0.83, + "latex": "\\mathcal { A }" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1168.0, + 586.0, + 1168.0, + 586.0, + 1207.0, + 293.0, + 1207.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2085.0, + 870.0, + 2085.0, + 870.0, + 2123.0, + 830.0, + 2123.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 226.0, + 739.0, + 226.0, + 739.0, + 267.0, + 294.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 922.0, + 226.0, + 1405.0, + 226.0, + 1405.0, + 267.0, + 922.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 260.0, + 297.0, + 260.0, + 297.0, + 295.0, + 294.0, + 295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 336.0, + 260.0, + 670.0, + 260.0, + 670.0, + 295.0, + 336.0, + 295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 710.0, + 260.0, + 1103.0, + 260.0, + 1103.0, + 295.0, + 710.0, + 295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1067.0, + 620.0, + 1067.0, + 620.0, + 1111.0, + 292.0, + 1111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 692.0, + 1067.0, + 1207.0, + 1067.0, + 1207.0, + 1111.0, + 692.0, + 1111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1247.0, + 1067.0, + 1405.0, + 1067.0, + 1405.0, + 1111.0, + 1247.0, + 1111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1101.0, + 715.0, + 1101.0, + 715.0, + 1138.0, + 293.0, + 1138.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 753.0, + 1101.0, + 763.0, + 1101.0, + 763.0, + 1138.0, + 753.0, + 1138.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1411.0, + 369.0, + 1411.0, + 369.0, + 1447.0, + 295.0, + 1447.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 403.0, + 1411.0, + 450.0, + 1411.0, + 450.0, + 1447.0, + 403.0, + 1447.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 483.0, + 1411.0, + 985.0, + 1411.0, + 985.0, + 1447.0, + 483.0, + 1447.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1019.0, + 1411.0, + 1404.0, + 1411.0, + 1404.0, + 1447.0, + 1019.0, + 1447.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1442.0, + 486.0, + 1442.0, + 486.0, + 1478.0, + 295.0, + 1478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 520.0, + 1442.0, + 1365.0, + 1442.0, + 1365.0, + 1478.0, + 520.0, + 1478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 736.0, + 943.0, + 736.0, + 943.0, + 800.0, + 289.0, + 800.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1117.0, + 736.0, + 1205.0, + 736.0, + 1205.0, + 800.0, + 1117.0, + 800.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1245.0, + 736.0, + 1404.0, + 736.0, + 1404.0, + 800.0, + 1245.0, + 800.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 784.0, + 520.0, + 784.0, + 520.0, + 815.0, + 294.0, + 815.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 560.0, + 784.0, + 793.0, + 784.0, + 793.0, + 815.0, + 560.0, + 815.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1224.0, + 1405.0, + 1224.0, + 1405.0, + 1265.0, + 292.0, + 1265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1255.0, + 297.0, + 1255.0, + 297.0, + 1295.0, + 291.0, + 1295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 492.0, + 1255.0, + 505.0, + 1255.0, + 505.0, + 1295.0, + 492.0, + 1295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1490.0, + 1404.0, + 1490.0, + 1404.0, + 1522.0, + 296.0, + 1522.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1515.0, + 298.0, + 1515.0, + 298.0, + 1558.0, + 295.0, + 1558.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 377.0, + 1515.0, + 1240.0, + 1515.0, + 1240.0, + 1558.0, + 377.0, + 1558.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1297.0, + 1515.0, + 1406.0, + 1515.0, + 1406.0, + 1558.0, + 1297.0, + 1558.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 821.0, + 1118.0, + 821.0, + 1118.0, + 868.0, + 292.0, + 868.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1150.0, + 821.0, + 1407.0, + 821.0, + 1407.0, + 868.0, + 1150.0, + 868.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 851.0, + 444.0, + 851.0, + 444.0, + 901.0, + 292.0, + 901.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 478.0, + 851.0, + 520.0, + 851.0, + 520.0, + 901.0, + 478.0, + 901.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 663.0, + 851.0, + 777.0, + 851.0, + 777.0, + 901.0, + 663.0, + 901.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1970.0, + 1195.0, + 1970.0, + 1195.0, + 2009.0, + 292.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1234.0, + 1970.0, + 1406.0, + 1970.0, + 1406.0, + 2009.0, + 1234.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 2001.0, + 397.0, + 2001.0, + 397.0, + 2035.0, + 294.0, + 2035.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1634.0, + 656.0, + 1634.0, + 656.0, + 1670.0, + 296.0, + 1670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1747.0, + 706.0, + 1747.0, + 706.0, + 1783.0, + 296.0, + 1783.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1857.0, + 421.0, + 1857.0, + 421.0, + 1897.0, + 294.0, + 1897.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 28, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 296, + 805, + 1407, + 805, + 1407, + 971, + 296, + 971 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 1531, + 1403, + 1531, + 1403, + 1629, + 298, + 1629 + ], + "score": 0.977 + }, + { + "category_id": 8, + "poly": [ + 395, + 1245, + 1299, + 1245, + 1299, + 1401, + 395, + 1401 + ], + "score": 0.971 + }, + { + "category_id": 1, + "poly": [ + 299, + 285, + 1403, + 285, + 1403, + 378, + 299, + 378 + ], + "score": 0.961 + }, + { + "category_id": 8, + "poly": [ + 648, + 979, + 1050, + 979, + 1050, + 1069, + 648, + 1069 + ], + "score": 0.958 + }, + { + "category_id": 8, + "poly": [ + 590, + 382, + 1105, + 382, + 1105, + 437, + 590, + 437 + ], + "score": 0.956 + }, + { + "category_id": 1, + "poly": [ + 298, + 651, + 1402, + 651, + 1402, + 714, + 298, + 714 + ], + "score": 0.955 + }, + { + "category_id": 8, + "poly": [ + 571, + 1815, + 1124, + 1815, + 1124, + 1857, + 571, + 1857 + ], + "score": 0.953 + }, + { + "category_id": 8, + "poly": [ + 681, + 1113, + 1018, + 1113, + 1018, + 1176, + 681, + 1176 + ], + "score": 0.952 + }, + { + "category_id": 1, + "poly": [ + 297, + 1961, + 1404, + 1961, + 1404, + 2034, + 297, + 2034 + ], + "score": 0.944 + }, + { + "category_id": 1, + "poly": [ + 294, + 1073, + 1055, + 1073, + 1055, + 1105, + 294, + 1105 + ], + "score": 0.943 + }, + { + "category_id": 1, + "poly": [ + 297, + 1746, + 1402, + 1746, + 1402, + 1809, + 297, + 1809 + ], + "score": 0.94 + }, + { + "category_id": 8, + "poly": [ + 750, + 588, + 947, + 588, + 947, + 632, + 750, + 632 + ], + "score": 0.938 + }, + { + "category_id": 1, + "poly": [ + 299, + 442, + 1172, + 442, + 1172, + 476, + 299, + 476 + ], + "score": 0.928 + }, + { + "category_id": 1, + "poly": [ + 295, + 1197, + 1398, + 1197, + 1398, + 1237, + 295, + 1237 + ], + "score": 0.927 + }, + { + "category_id": 2, + "poly": [ + 297, + 73, + 858, + 73, + 858, + 106, + 297, + 106 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 298, + 1862, + 628, + 1862, + 628, + 1893, + 298, + 1893 + ], + "score": 0.925 + }, + { + "category_id": 8, + "poly": [ + 459, + 487, + 1238, + 487, + 1238, + 542, + 459, + 542 + ], + "score": 0.925 + }, + { + "category_id": 1, + "poly": [ + 298, + 1406, + 715, + 1406, + 715, + 1440, + 298, + 1440 + ], + "score": 0.923 + }, + { + "category_id": 0, + "poly": [ + 300, + 749, + 723, + 749, + 723, + 782, + 300, + 782 + ], + "score": 0.922 + }, + { + "category_id": 0, + "poly": [ + 299, + 1473, + 685, + 1473, + 685, + 1507, + 299, + 1507 + ], + "score": 0.92 + }, + { + "category_id": 1, + "poly": [ + 298, + 549, + 1213, + 549, + 1213, + 583, + 298, + 583 + ], + "score": 0.916 + }, + { + "category_id": 9, + "poly": [ + 1352, + 1689, + 1400, + 1689, + 1400, + 1720, + 1352, + 1720 + ], + "score": 0.899 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1643, + 1400, + 1643, + 1400, + 1674, + 1351, + 1674 + ], + "score": 0.886 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1306, + 1401, + 1306, + 1401, + 1337, + 1351, + 1337 + ], + "score": 0.88 + }, + { + "category_id": 2, + "poly": [ + 835, + 2087, + 866, + 2087, + 866, + 2113, + 835, + 2113 + ], + "score": 0.877 + }, + { + "category_id": 0, + "poly": [ + 297, + 229, + 424, + 229, + 424, + 260, + 297, + 260 + ], + "score": 0.825 + }, + { + "category_id": 8, + "poly": [ + 624, + 1637, + 1092, + 1637, + 1092, + 1679, + 624, + 1679 + ], + "score": 0.744 + }, + { + "category_id": 8, + "poly": [ + 604, + 1684, + 808, + 1684, + 808, + 1725, + 604, + 1725 + ], + "score": 0.547 + }, + { + "category_id": 8, + "poly": [ + 597, + 1896, + 1100, + 1896, + 1100, + 1942, + 597, + 1942 + ], + "score": 0.534 + }, + { + "category_id": 8, + "poly": [ + 604, + 1637, + 1095, + 1637, + 1095, + 1724, + 604, + 1724 + ], + "score": 0.278 + }, + { + "category_id": 1, + "poly": [ + 297, + 229, + 424, + 229, + 424, + 260, + 297, + 260 + ], + "score": 0.091 + }, + { + "category_id": 14, + "poly": [ + 395, + 1244, + 1301, + 1244, + 1301, + 1402, + 395, + 1402 + ], + "score": 0.95, + "latex": "\\tilde { \\mathcal { B } } ( w _ { 1 } , \\dots , w _ { n } , z ) \\mapsto \\left[ \\begin{array} { c c c c } { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { \\vdots } & { \\ddots } & { \\vdots } & { \\vdots } \\\\ { 0 } & { \\cdots } & { 0 } & { - I } \\\\ { I } & { \\cdots } & { I } & { 0 } \\end{array} \\right] \\left[ \\begin{array} { c } { w _ { 1 } } \\\\ { \\vdots } \\\\ { w _ { n } } \\\\ { z } \\end{array} \\right] + \\left[ \\begin{array} { c } { 0 } \\\\ { \\vdots } \\\\ { 0 } \\\\ { \\frac { 1 } { \\vert \\mathbf { B } \\vert } \\sum _ { j \\in \\mathbf { B } } B _ { j } ( z ) } \\end{array} \\right] ." + }, + { + "category_id": 14, + "poly": [ + 590, + 380, + 1107, + 380, + 1107, + 439, + 590, + 439 + ], + "score": 0.93, + "latex": "q ^ { k + 1 } = J _ { \\alpha \\mathcal { A } } \\Big ( q ^ { k } - \\alpha \\big ( 2 \\mathcal { B } ( q ^ { k } ) - \\mathcal { B } ( q ^ { k - 1 } ) \\big ) \\Big ) ." + }, + { + "category_id": 14, + "poly": [ + 678, + 1110, + 1019, + 1110, + 1019, + 1177, + 678, + 1177 + ], + "score": 0.93, + "latex": "R _ { k } ^ { \\mathrm { { S - T s e n g } } } \\doteq \\frac { 1 } { \\alpha ^ { 2 } } \\| q _ { \\mathrm { { e r g } } } ^ { k } - q ^ { k + 1 } \\| ^ { 2 } ." + }, + { + "category_id": 13, + "poly": [ + 520, + 1406, + 706, + 1406, + 706, + 1441, + 520, + 1441 + ], + "score": 0.93, + "latex": "\\mathbf { B } \\in \\{ 1 , \\dots , m \\}" + }, + { + "category_id": 13, + "poly": [ + 409, + 838, + 622, + 838, + 622, + 872, + 409, + 872 + ], + "score": 0.92, + "latex": "0 \\in \\mathcal { A } ( q ) + \\mathcal { B } ( q )" + }, + { + "category_id": 13, + "poly": [ + 1152, + 1566, + 1241, + 1566, + 1241, + 1601, + 1152, + 1601 + ], + "score": 0.92, + "latex": "( q ^ { k } , p ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 592, + 319, + 749, + 319, + 749, + 347, + 592, + 347 + ], + "score": 0.92, + "latex": "\\mathcal { T } = \\mathcal { A } + \\mathcal { B }" + }, + { + "category_id": 13, + "poly": [ + 614, + 1964, + 686, + 1964, + 686, + 2004, + 614, + 2004 + ], + "score": 0.92, + "latex": "R _ { k } ^ { \\mathrm { T s e n g } }" + }, + { + "category_id": 13, + "poly": [ + 959, + 1198, + 1224, + 1198, + 1224, + 1237, + 959, + 1237 + ], + "score": 0.92, + "latex": "\\begin{array} { r } { B ( z ) = \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } B _ { i } ( z ) } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 931, + 1967, + 992, + 1967, + 992, + 2003, + 931, + 2003 + ], + "score": 0.92, + "latex": "R _ { k } ^ { \\mathrm { F R B } }" + }, + { + "category_id": 14, + "poly": [ + 605, + 1633, + 1095, + 1633, + 1095, + 1729, + 605, + 1729 + ], + "score": 0.92, + "latex": "\\begin{array} { c } { \\hat { q } ^ { k } = q ^ { k } - \\tau ( \\mathcal { B } ( p ^ { k } ) + \\tilde { \\mathcal { B } } ( q ^ { k } ) - \\tilde { \\mathcal { B } } ( p ^ { k } ) ) } \\\\ { q ^ { k + 1 } = J _ { \\tau \\mathcal { A } } ( \\hat { q } ^ { k } ) . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 1078, + 1532, + 1286, + 1532, + 1286, + 1565, + 1078, + 1565 + ], + "score": 0.91, + "latex": "0 \\in \\mathcal { A } ( q ) + \\mathcal { B } ( q )" + }, + { + "category_id": 13, + "poly": [ + 1090, + 933, + 1131, + 933, + 1131, + 974, + 1090, + 974 + ], + "score": 0.91, + "latex": "q _ { \\mathrm { e r g } } ^ { k }" + }, + { + "category_id": 14, + "poly": [ + 597, + 1896, + 1099, + 1896, + 1099, + 1940, + 597, + 1940 + ], + "score": 0.9, + "latex": "R _ { k } ^ { \\mathrm { F R B - V R } } = \\lVert \\tau ^ { - 1 } ( \\hat { q } ^ { k } - q ^ { k + 1 } ) + \\mathcal { B } ( q ^ { k + 1 } ) \\rVert ^ { 2 } ." + }, + { + "category_id": 14, + "poly": [ + 458, + 480, + 1240, + 480, + 1240, + 547, + 458, + 547 + ], + "score": 0.9, + "latex": "v _ { \\mathrm { F R B } } ^ { k } \\doteq \\frac { 1 } { \\alpha } \\left( q ^ { k - 1 } - q ^ { k } \\right) + \\mathcal { B } ( q ^ { k } ) + \\mathcal { B } ( q ^ { k - 2 } ) - 2 \\mathcal { B } ( q ^ { k - 1 } ) \\in \\mathcal { T } ( q ^ { k } ) ." + }, + { + "category_id": 13, + "poly": [ + 466, + 1971, + 503, + 1971, + 503, + 2000, + 466, + 2000 + ], + "score": 0.9, + "latex": "R _ { k }" + }, + { + "category_id": 14, + "poly": [ + 749, + 588, + 949, + 588, + 949, + 630, + 749, + 630 + ], + "score": 0.89, + "latex": "R _ { k } ^ { \\mathrm { F R B } } \\doteq \\| v _ { \\mathrm { F R B } } ^ { k } \\| ^ { 2 } ." + }, + { + "category_id": 13, + "poly": [ + 625, + 897, + 666, + 897, + 666, + 935, + 625, + 935 + ], + "score": 0.89, + "latex": "\\mathbf { \\bar { \\boldsymbol { q } } } _ { \\mathrm { e r g } } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 779, + 1775, + 1102, + 1775, + 1102, + 1810, + 779, + 1810 + ], + "score": 0.89, + "latex": "\\dot { \\tau } ^ { - 1 } ( \\hat { q } ^ { k } - q ^ { k + 1 } ) \\in \\mathcal { A } ( q ^ { k + 1 } )" + }, + { + "category_id": 13, + "poly": [ + 1073, + 552, + 1110, + 552, + 1110, + 581, + 1073, + 581 + ], + "score": 0.88, + "latex": "R _ { k }" + }, + { + "category_id": 13, + "poly": [ + 549, + 1563, + 580, + 1563, + 580, + 1595, + 549, + 1595 + ], + "score": 0.86, + "latex": "\\tilde { \\mathcal { B } }" + }, + { + "category_id": 13, + "poly": [ + 792, + 320, + 824, + 320, + 824, + 346, + 792, + 346 + ], + "score": 0.86, + "latex": "\\mathcal { A }" + }, + { + "category_id": 13, + "poly": [ + 1109, + 1964, + 1199, + 1964, + 1199, + 2004, + 1109, + 2004 + ], + "score": 0.85, + "latex": "R _ { k } ^ { \\mathrm { S - T s e n g } }" + }, + { + "category_id": 13, + "poly": [ + 875, + 320, + 906, + 320, + 906, + 346, + 875, + 346 + ], + "score": 0.85, + "latex": "\\mathcal { B }" + }, + { + "category_id": 14, + "poly": [ + 572, + 1816, + 1119, + 1816, + 1119, + 1856, + 572, + 1856 + ], + "score": 0.84, + "latex": "\\tau ^ { - 1 } ( \\hat { q } ^ { k } - q ^ { k + 1 } ) + \\mathcal { B } ( q ^ { k + 1 } ) \\in ( \\mathcal { A } + \\mathcal { B } ) ( q ^ { k + 1 } ) ." + }, + { + "category_id": 13, + "poly": [ + 845, + 840, + 877, + 840, + 877, + 867, + 845, + 867 + ], + "score": 0.84, + "latex": "\\mathcal { A }" + }, + { + "category_id": 13, + "poly": [ + 298, + 2000, + 393, + 2000, + 393, + 2037, + 298, + 2037 + ], + "score": 0.84, + "latex": "R _ { k } ^ { \\mathrm { F R B - V R } }" + }, + { + "category_id": 14, + "poly": [ + 650, + 976, + 1050, + 976, + 1050, + 1072, + 650, + 1072 + ], + "score": 0.78, + "latex": "\\begin{array} { r l } & { \\bar { q } ^ { k } = J _ { \\alpha \\mathcal { A } } ( q _ { \\mathrm { e r g } } ^ { k } - \\mathcal { B } ( q _ { \\mathrm { e r g } } ^ { k } ) ) } \\\\ & { q ^ { k + 1 } = \\bar { q } ^ { k } + \\alpha ( \\mathcal { B } ( q _ { \\mathrm { e r g } } ^ { k } ) - \\mathcal { B } ( \\bar { q } ^ { k } ) ) , } \\end{array}" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 747.0, + 727.0, + 747.0, + 727.0, + 784.0, + 292.0, + 784.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1474.0, + 686.0, + 1474.0, + 686.0, + 1508.0, + 295.0, + 1508.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 871.0, + 2084.0, + 871.0, + 2121.0, + 830.0, + 2121.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 227.0, + 426.0, + 227.0, + 426.0, + 262.0, + 292.0, + 262.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 806.0, + 1404.0, + 806.0, + 1404.0, + 842.0, + 294.0, + 842.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 836.0, + 408.0, + 836.0, + 408.0, + 875.0, + 293.0, + 875.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 623.0, + 836.0, + 844.0, + 836.0, + 844.0, + 875.0, + 623.0, + 875.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 878.0, + 836.0, + 1410.0, + 836.0, + 1410.0, + 875.0, + 878.0, + 875.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 864.0, + 1407.0, + 864.0, + 1407.0, + 906.0, + 294.0, + 906.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 891.0, + 624.0, + 891.0, + 624.0, + 944.0, + 292.0, + 944.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 667.0, + 891.0, + 1408.0, + 891.0, + 1408.0, + 944.0, + 667.0, + 944.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 287.0, + 919.0, + 1089.0, + 919.0, + 1089.0, + 984.0, + 287.0, + 984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1132.0, + 919.0, + 1320.0, + 919.0, + 1320.0, + 984.0, + 1132.0, + 984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1531.0, + 1077.0, + 1531.0, + 1077.0, + 1569.0, + 294.0, + 1569.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1287.0, + 1531.0, + 1404.0, + 1531.0, + 1404.0, + 1569.0, + 1287.0, + 1569.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1564.0, + 548.0, + 1564.0, + 548.0, + 1602.0, + 293.0, + 1602.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 581.0, + 1564.0, + 1151.0, + 1564.0, + 1151.0, + 1602.0, + 581.0, + 1602.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1242.0, + 1564.0, + 1407.0, + 1564.0, + 1407.0, + 1602.0, + 1242.0, + 1602.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1599.0, + 933.0, + 1599.0, + 933.0, + 1633.0, + 296.0, + 1633.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 282.0, + 1404.0, + 282.0, + 1404.0, + 324.0, + 293.0, + 324.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 316.0, + 591.0, + 316.0, + 591.0, + 354.0, + 293.0, + 354.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 750.0, + 316.0, + 791.0, + 316.0, + 791.0, + 354.0, + 750.0, + 354.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 825.0, + 316.0, + 874.0, + 316.0, + 874.0, + 354.0, + 825.0, + 354.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 907.0, + 316.0, + 1405.0, + 316.0, + 1405.0, + 354.0, + 907.0, + 354.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 351.0, + 408.0, + 351.0, + 408.0, + 379.0, + 291.0, + 379.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 649.0, + 1406.0, + 649.0, + 1406.0, + 688.0, + 294.0, + 688.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 684.0, + 827.0, + 684.0, + 827.0, + 716.0, + 297.0, + 716.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 282.0, + 1943.0, + 297.0, + 1943.0, + 297.0, + 2044.0, + 282.0, + 2044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 1943.0, + 465.0, + 1943.0, + 465.0, + 2044.0, + 394.0, + 2044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 504.0, + 1943.0, + 613.0, + 1943.0, + 613.0, + 2044.0, + 504.0, + 2044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 687.0, + 1943.0, + 930.0, + 1943.0, + 930.0, + 2044.0, + 687.0, + 2044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 993.0, + 1943.0, + 1108.0, + 1943.0, + 1108.0, + 2044.0, + 993.0, + 2044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1200.0, + 1943.0, + 1418.0, + 1943.0, + 1418.0, + 2044.0, + 1200.0, + 2044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1069.0, + 1058.0, + 1069.0, + 1058.0, + 1111.0, + 293.0, + 1111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1747.0, + 1403.0, + 1747.0, + 1403.0, + 1779.0, + 297.0, + 1779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1769.0, + 778.0, + 1769.0, + 778.0, + 1814.0, + 292.0, + 1814.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1103.0, + 1769.0, + 1229.0, + 1769.0, + 1229.0, + 1814.0, + 1103.0, + 1814.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 441.0, + 1174.0, + 441.0, + 1174.0, + 480.0, + 295.0, + 480.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1191.0, + 958.0, + 1191.0, + 958.0, + 1246.0, + 289.0, + 1246.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1225.0, + 1191.0, + 1402.0, + 1191.0, + 1402.0, + 1246.0, + 1225.0, + 1246.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1860.0, + 630.0, + 1860.0, + 630.0, + 1896.0, + 296.0, + 1896.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1404.0, + 519.0, + 1404.0, + 519.0, + 1444.0, + 294.0, + 1444.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 707.0, + 1404.0, + 716.0, + 1404.0, + 716.0, + 1444.0, + 707.0, + 1444.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 547.0, + 1072.0, + 547.0, + 1072.0, + 586.0, + 293.0, + 586.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1111.0, + 547.0, + 1212.0, + 547.0, + 1212.0, + 586.0, + 1111.0, + 586.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 227.0, + 426.0, + 227.0, + 426.0, + 262.0, + 292.0, + 262.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 29, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 285, + 1405, + 285, + 1405, + 623, + 297, + 623 + ], + "score": 0.984 + }, + { + "category_id": 1, + "poly": [ + 297, + 1650, + 1405, + 1650, + 1405, + 1896, + 297, + 1896 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 297, + 1026, + 1404, + 1026, + 1404, + 1274, + 297, + 1274 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 298, + 1462, + 1403, + 1462, + 1403, + 1585, + 298, + 1585 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 299, + 1910, + 1402, + 1910, + 1402, + 2034, + 299, + 2034 + ], + "score": 0.975 + }, + { + "category_id": 1, + "poly": [ + 295, + 1303, + 1400, + 1303, + 1400, + 1397, + 295, + 1397 + ], + "score": 0.961 + }, + { + "category_id": 8, + "poly": [ + 665, + 1589, + 1033, + 1589, + 1033, + 1647, + 665, + 1647 + ], + "score": 0.955 + }, + { + "category_id": 1, + "poly": [ + 300, + 732, + 1398, + 732, + 1398, + 795, + 300, + 795 + ], + "score": 0.951 + }, + { + "category_id": 1, + "poly": [ + 291, + 921, + 1402, + 921, + 1402, + 983, + 291, + 983 + ], + "score": 0.951 + }, + { + "category_id": 8, + "poly": [ + 675, + 1401, + 1023, + 1401, + 1023, + 1458, + 675, + 1458 + ], + "score": 0.947 + }, + { + "category_id": 8, + "poly": [ + 723, + 988, + 973, + 988, + 973, + 1023, + 723, + 1023 + ], + "score": 0.943 + }, + { + "category_id": 8, + "poly": [ + 633, + 880, + 1066, + 880, + 1066, + 918, + 633, + 918 + ], + "score": 0.938 + }, + { + "category_id": 8, + "poly": [ + 681, + 801, + 1018, + 801, + 1018, + 839, + 681, + 839 + ], + "score": 0.932 + }, + { + "category_id": 1, + "poly": [ + 297, + 842, + 1110, + 842, + 1110, + 874, + 297, + 874 + ], + "score": 0.925 + }, + { + "category_id": 0, + "poly": [ + 301, + 666, + 769, + 666, + 769, + 702, + 301, + 702 + ], + "score": 0.91 + }, + { + "category_id": 9, + "poly": [ + 1352, + 806, + 1399, + 806, + 1399, + 836, + 1352, + 836 + ], + "score": 0.902 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 857, + 75, + 857, + 104, + 298, + 104 + ], + "score": 0.897 + }, + { + "category_id": 9, + "poly": [ + 1352, + 990, + 1399, + 990, + 1399, + 1020, + 1352, + 1020 + ], + "score": 0.895 + }, + { + "category_id": 9, + "poly": [ + 1352, + 1594, + 1399, + 1594, + 1399, + 1624, + 1352, + 1624 + ], + "score": 0.895 + }, + { + "category_id": 9, + "poly": [ + 1352, + 1407, + 1399, + 1407, + 1399, + 1437, + 1352, + 1437 + ], + "score": 0.889 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 862, + 2088, + 862, + 2112, + 835, + 2112 + ], + "score": 0.803 + }, + { + "category_id": 0, + "poly": [ + 298, + 229, + 1214, + 229, + 1214, + 262, + 298, + 262 + ], + "score": 0.553 + }, + { + "category_id": 1, + "poly": [ + 298, + 229, + 1214, + 229, + 1214, + 262, + 298, + 262 + ], + "score": 0.355 + }, + { + "category_id": 14, + "poly": [ + 664, + 1586, + 1035, + 1586, + 1035, + 1648, + 664, + 1648 + ], + "score": 0.94, + "latex": "G _ { { \\mathcal C } _ { 2 } } ( z ) \\doteq \\operatorname* { s u p } _ { z ^ { \\prime } \\in { \\mathcal C } _ { 2 } } B ( z ^ { \\prime } ) ^ { \\top } ( z - z ^ { \\prime } ) ." + }, + { + "category_id": 14, + "poly": [ + 674, + 1398, + 1026, + 1398, + 1026, + 1459, + 674, + 1459 + ], + "score": 0.94, + "latex": "G _ { { \\mathcal C } } ( z ) \\doteq \\operatorname* { s u p } _ { z ^ { \\prime } \\in { \\mathcal C } } B ( z ^ { \\prime } ) ^ { \\top } ( z - z ^ { \\prime } ) ." + }, + { + "category_id": 13, + "poly": [ + 744, + 318, + 952, + 318, + 952, + 352, + 744, + 352 + ], + "score": 0.93, + "latex": "0 \\in \\mathcal { A } ( q ) + \\mathcal { B } ( q )" + }, + { + "category_id": 13, + "poly": [ + 959, + 1494, + 1033, + 1494, + 1033, + 1528, + 959, + 1528 + ], + "score": 0.93, + "latex": "G \\overset { \\cdot } { c } ( z )" + }, + { + "category_id": 13, + "poly": [ + 1001, + 921, + 1219, + 921, + 1219, + 955, + 1001, + 955 + ], + "score": 0.93, + "latex": "- B ( z ^ { * } ) \\in N _ { \\mathcal { C } } ( z ^ { * } )" + }, + { + "category_id": 13, + "poly": [ + 542, + 1464, + 677, + 1464, + 677, + 1498, + 542, + 1498 + ], + "score": 0.93, + "latex": "G _ { \\mathcal { C } } ( z ) \\geq 0" + }, + { + "category_id": 13, + "poly": [ + 474, + 731, + 643, + 731, + 643, + 761, + 474, + 761 + ], + "score": 0.92, + "latex": "B : \\mathbb { R } ^ { d } \\mathbb { R } ^ { d }" + }, + { + "category_id": 14, + "poly": [ + 629, + 877, + 1071, + 877, + 1071, + 919, + 629, + 919 + ], + "score": 0.91, + "latex": "N _ { { \\mathcal { C } } } ( x ) \\doteq \\{ g : g ^ { \\top } ( y - x ) \\le 0 \\ \\forall y \\in { \\mathcal { C } } \\}" + }, + { + "category_id": 13, + "poly": [ + 732, + 1464, + 867, + 1464, + 867, + 1498, + 732, + 1498 + ], + "score": 0.91, + "latex": "G _ { \\mathcal { C } } ( z ) = 0" + }, + { + "category_id": 13, + "poly": [ + 662, + 765, + 741, + 765, + 741, + 793, + 662, + 793 + ], + "score": 0.91, + "latex": "z ^ { \\ast } \\in \\mathcal { C }" + }, + { + "category_id": 14, + "poly": [ + 679, + 798, + 1021, + 798, + 1021, + 840, + 679, + 840 + ], + "score": 0.91, + "latex": "B ( z ^ { * } ) ^ { \\top } ( z - z ^ { * } ) \\geq 0 , \\forall z \\in { \\mathcal { C } } ." + }, + { + "category_id": 14, + "poly": [ + 723, + 986, + 975, + 986, + 975, + 1024, + 723, + 1024 + ], + "score": 0.91, + "latex": "0 \\in B ( z ^ { * } ) + N _ { \\cal { C } } ( z ^ { * } ) ." + }, + { + "category_id": 13, + "poly": [ + 925, + 1150, + 963, + 1150, + 963, + 1180, + 925, + 1180 + ], + "score": 0.89, + "latex": "N _ { \\mathcal { C } }" + }, + { + "category_id": 13, + "poly": [ + 492, + 1241, + 530, + 1241, + 530, + 1272, + 492, + 1272 + ], + "score": 0.89, + "latex": "N _ { \\mathcal { C } }" + }, + { + "category_id": 13, + "poly": [ + 1263, + 1498, + 1316, + 1498, + 1316, + 1524, + 1263, + 1524 + ], + "score": 0.88, + "latex": "+ \\infty" + }, + { + "category_id": 13, + "poly": [ + 371, + 1653, + 401, + 1653, + 401, + 1682, + 371, + 1682 + ], + "score": 0.88, + "latex": "\\mathcal { C } _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 1368, + 1652, + 1398, + 1652, + 1398, + 1682, + 1368, + 1682 + ], + "score": 0.87, + "latex": "\\mathcal { C } _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 1312, + 1713, + 1342, + 1713, + 1342, + 1743, + 1312, + 1743 + ], + "score": 0.87, + "latex": "\\mathcal { C } _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 366, + 1683, + 396, + 1683, + 396, + 1713, + 366, + 1713 + ], + "score": 0.87, + "latex": "\\mathcal { C } _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 855, + 923, + 886, + 923, + 886, + 950, + 855, + 950 + ], + "score": 0.86, + "latex": "z ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 1145, + 380, + 1176, + 380, + 1176, + 407, + 1145, + 407 + ], + "score": 0.84, + "latex": "\\mathcal { B }" + }, + { + "category_id": 13, + "poly": [ + 297, + 1713, + 327, + 1713, + 327, + 1744, + 297, + 1744 + ], + "score": 0.84, + "latex": "\\mathcal { C } _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 1044, + 350, + 1074, + 350, + 1074, + 376, + 1044, + 376 + ], + "score": 0.83, + "latex": "\\mathcal { B }" + }, + { + "category_id": 13, + "poly": [ + 801, + 1498, + 820, + 1498, + 820, + 1522, + 801, + 1522 + ], + "score": 0.83, + "latex": "\\mathcal { C }" + }, + { + "category_id": 13, + "poly": [ + 1348, + 923, + 1374, + 923, + 1374, + 949, + 1348, + 949 + ], + "score": 0.83, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 978, + 735, + 997, + 735, + 997, + 761, + 978, + 761 + ], + "score": 0.8, + "latex": "\\mathcal { C }" + }, + { + "category_id": 13, + "poly": [ + 1191, + 411, + 1217, + 411, + 1217, + 437, + 1191, + 437 + ], + "score": 0.8, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 1005, + 380, + 1030, + 380, + 1030, + 406, + 1005, + 406 + ], + "score": 0.78, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 1035, + 1471, + 1052, + 1471, + 1052, + 1492, + 1035, + 1492 + ], + "score": 0.76, + "latex": "z" + }, + { + "category_id": 13, + "poly": [ + 1309, + 1060, + 1329, + 1060, + 1329, + 1086, + 1309, + 1086 + ], + "score": 0.73, + "latex": "\\mathcal { C }" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 662.0, + 774.0, + 662.0, + 774.0, + 710.0, + 294.0, + 710.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 869.0, + 2084.0, + 869.0, + 2125.0, + 830.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 229.0, + 1220.0, + 229.0, + 1220.0, + 264.0, + 294.0, + 264.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 284.0, + 1407.0, + 284.0, + 1407.0, + 323.0, + 292.0, + 323.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 318.0, + 743.0, + 318.0, + 743.0, + 353.0, + 294.0, + 353.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 953.0, + 318.0, + 1405.0, + 318.0, + 1405.0, + 353.0, + 953.0, + 353.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 348.0, + 1043.0, + 348.0, + 1043.0, + 383.0, + 294.0, + 383.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1075.0, + 348.0, + 1407.0, + 348.0, + 1407.0, + 383.0, + 1075.0, + 383.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 376.0, + 1004.0, + 376.0, + 1004.0, + 415.0, + 292.0, + 415.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1031.0, + 376.0, + 1144.0, + 376.0, + 1144.0, + 415.0, + 1031.0, + 415.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1177.0, + 376.0, + 1407.0, + 376.0, + 1407.0, + 415.0, + 1177.0, + 415.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 409.0, + 1190.0, + 409.0, + 1190.0, + 444.0, + 294.0, + 444.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1218.0, + 409.0, + 1406.0, + 409.0, + 1406.0, + 444.0, + 1218.0, + 444.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 439.0, + 1406.0, + 439.0, + 1406.0, + 478.0, + 294.0, + 478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 469.0, + 1403.0, + 469.0, + 1403.0, + 505.0, + 294.0, + 505.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 499.0, + 1405.0, + 499.0, + 1405.0, + 536.0, + 294.0, + 536.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 530.0, + 1403.0, + 530.0, + 1403.0, + 566.0, + 295.0, + 566.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 562.0, + 1408.0, + 562.0, + 1408.0, + 596.0, + 294.0, + 596.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 591.0, + 652.0, + 591.0, + 652.0, + 626.0, + 295.0, + 626.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1652.0, + 370.0, + 1652.0, + 370.0, + 1685.0, + 295.0, + 1685.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 402.0, + 1652.0, + 1367.0, + 1652.0, + 1367.0, + 1685.0, + 402.0, + 1685.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1399.0, + 1652.0, + 1407.0, + 1652.0, + 1407.0, + 1685.0, + 1399.0, + 1685.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1678.0, + 365.0, + 1678.0, + 365.0, + 1717.0, + 291.0, + 1717.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 397.0, + 1678.0, + 1407.0, + 1678.0, + 1407.0, + 1717.0, + 397.0, + 1717.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 328.0, + 1711.0, + 1311.0, + 1711.0, + 1311.0, + 1748.0, + 328.0, + 1748.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1343.0, + 1711.0, + 1407.0, + 1711.0, + 1407.0, + 1748.0, + 1343.0, + 1748.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1741.0, + 1407.0, + 1741.0, + 1407.0, + 1779.0, + 292.0, + 1779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1772.0, + 1405.0, + 1772.0, + 1405.0, + 1808.0, + 292.0, + 1808.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1801.0, + 1406.0, + 1801.0, + 1406.0, + 1840.0, + 292.0, + 1840.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1835.0, + 1406.0, + 1835.0, + 1406.0, + 1869.0, + 295.0, + 1869.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1866.0, + 699.0, + 1866.0, + 699.0, + 1899.0, + 295.0, + 1899.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1026.0, + 1408.0, + 1026.0, + 1408.0, + 1063.0, + 294.0, + 1063.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1059.0, + 1308.0, + 1059.0, + 1308.0, + 1093.0, + 295.0, + 1093.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1330.0, + 1059.0, + 1407.0, + 1059.0, + 1407.0, + 1093.0, + 1330.0, + 1093.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1090.0, + 1404.0, + 1090.0, + 1404.0, + 1123.0, + 295.0, + 1123.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1120.0, + 1405.0, + 1120.0, + 1405.0, + 1152.0, + 294.0, + 1152.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1148.0, + 924.0, + 1148.0, + 924.0, + 1185.0, + 292.0, + 1185.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 964.0, + 1148.0, + 1405.0, + 1148.0, + 1405.0, + 1185.0, + 964.0, + 1185.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1180.0, + 1406.0, + 1180.0, + 1406.0, + 1214.0, + 292.0, + 1214.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1208.0, + 1405.0, + 1208.0, + 1405.0, + 1249.0, + 292.0, + 1249.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1242.0, + 491.0, + 1242.0, + 491.0, + 1277.0, + 295.0, + 1277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 531.0, + 1242.0, + 541.0, + 1242.0, + 541.0, + 1277.0, + 531.0, + 1277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1462.0, + 541.0, + 1462.0, + 541.0, + 1501.0, + 292.0, + 1501.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 678.0, + 1462.0, + 731.0, + 1462.0, + 731.0, + 1501.0, + 678.0, + 1501.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 868.0, + 1462.0, + 1034.0, + 1462.0, + 1034.0, + 1501.0, + 868.0, + 1501.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1053.0, + 1462.0, + 1408.0, + 1462.0, + 1408.0, + 1501.0, + 1053.0, + 1501.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1491.0, + 800.0, + 1491.0, + 800.0, + 1533.0, + 291.0, + 1533.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 821.0, + 1491.0, + 958.0, + 1491.0, + 958.0, + 1533.0, + 821.0, + 1533.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1034.0, + 1491.0, + 1262.0, + 1491.0, + 1262.0, + 1533.0, + 1034.0, + 1533.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1317.0, + 1491.0, + 1408.0, + 1491.0, + 1408.0, + 1533.0, + 1317.0, + 1533.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1525.0, + 1405.0, + 1525.0, + 1405.0, + 1560.0, + 292.0, + 1560.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1554.0, + 492.0, + 1554.0, + 492.0, + 1591.0, + 293.0, + 1591.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1909.0, + 1406.0, + 1909.0, + 1406.0, + 1948.0, + 293.0, + 1948.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1942.0, + 1406.0, + 1942.0, + 1406.0, + 1977.0, + 294.0, + 1977.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1974.0, + 1404.0, + 1974.0, + 1404.0, + 2007.0, + 295.0, + 2007.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 2004.0, + 859.0, + 2004.0, + 859.0, + 2035.0, + 295.0, + 2035.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1301.0, + 1405.0, + 1301.0, + 1405.0, + 1342.0, + 294.0, + 1342.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1335.0, + 1405.0, + 1335.0, + 1405.0, + 1369.0, + 295.0, + 1369.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1367.0, + 1318.0, + 1367.0, + 1318.0, + 1401.0, + 295.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 728.0, + 473.0, + 728.0, + 473.0, + 770.0, + 294.0, + 770.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 644.0, + 728.0, + 977.0, + 728.0, + 977.0, + 770.0, + 644.0, + 770.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 998.0, + 728.0, + 1403.0, + 728.0, + 1403.0, + 770.0, + 998.0, + 770.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 764.0, + 661.0, + 764.0, + 661.0, + 797.0, + 294.0, + 797.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 742.0, + 764.0, + 853.0, + 764.0, + 853.0, + 797.0, + 742.0, + 797.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 920.0, + 854.0, + 920.0, + 854.0, + 956.0, + 295.0, + 956.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 887.0, + 920.0, + 1000.0, + 920.0, + 1000.0, + 956.0, + 887.0, + 956.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1220.0, + 920.0, + 1347.0, + 920.0, + 1347.0, + 956.0, + 1220.0, + 956.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1375.0, + 920.0, + 1405.0, + 920.0, + 1405.0, + 956.0, + 1375.0, + 956.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 953.0, + 916.0, + 953.0, + 916.0, + 985.0, + 294.0, + 985.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 839.0, + 1111.0, + 839.0, + 1111.0, + 878.0, + 296.0, + 878.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 229.0, + 1220.0, + 229.0, + 1220.0, + 264.0, + 294.0, + 264.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 30, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 296, + 865, + 1406, + 865, + 1406, + 1146, + 296, + 1146 + ], + "score": 0.984 + }, + { + "category_id": 1, + "poly": [ + 296, + 292, + 1405, + 292, + 1405, + 550, + 296, + 550 + ], + "score": 0.982 + }, + { + "category_id": 8, + "poly": [ + 339, + 708, + 1310, + 708, + 1310, + 852, + 339, + 852 + ], + "score": 0.969 + }, + { + "category_id": 8, + "poly": [ + 624, + 1811, + 1066, + 1811, + 1066, + 1960, + 624, + 1960 + ], + "score": 0.965 + }, + { + "category_id": 8, + "poly": [ + 508, + 1664, + 1185, + 1664, + 1185, + 1760, + 508, + 1760 + ], + "score": 0.959 + }, + { + "category_id": 8, + "poly": [ + 708, + 1516, + 992, + 1516, + 992, + 1612, + 708, + 1612 + ], + "score": 0.958 + }, + { + "category_id": 8, + "poly": [ + 467, + 1238, + 1231, + 1238, + 1231, + 1326, + 467, + 1326 + ], + "score": 0.956 + }, + { + "category_id": 1, + "poly": [ + 300, + 1338, + 1393, + 1338, + 1393, + 1402, + 300, + 1402 + ], + "score": 0.95 + }, + { + "category_id": 1, + "poly": [ + 297, + 1971, + 1406, + 1971, + 1406, + 2036, + 297, + 2036 + ], + "score": 0.948 + }, + { + "category_id": 1, + "poly": [ + 298, + 1158, + 1400, + 1158, + 1400, + 1223, + 298, + 1223 + ], + "score": 0.947 + }, + { + "category_id": 8, + "poly": [ + 689, + 1417, + 1008, + 1417, + 1008, + 1455, + 689, + 1455 + ], + "score": 0.938 + }, + { + "category_id": 1, + "poly": [ + 298, + 1468, + 752, + 1468, + 752, + 1501, + 298, + 1501 + ], + "score": 0.929 + }, + { + "category_id": 0, + "poly": [ + 298, + 224, + 926, + 224, + 926, + 263, + 298, + 263 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 295, + 660, + 953, + 660, + 953, + 695, + 295, + 695 + ], + "score": 0.926 + }, + { + "category_id": 1, + "poly": [ + 296, + 1623, + 351, + 1623, + 351, + 1653, + 296, + 1653 + ], + "score": 0.921 + }, + { + "category_id": 2, + "poly": [ + 297, + 74, + 857, + 74, + 857, + 106, + 297, + 106 + ], + "score": 0.918 + }, + { + "category_id": 0, + "poly": [ + 297, + 591, + 1319, + 591, + 1319, + 629, + 297, + 629 + ], + "score": 0.912 + }, + { + "category_id": 1, + "poly": [ + 296, + 1771, + 344, + 1771, + 344, + 1801, + 296, + 1801 + ], + "score": 0.907 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1546, + 1401, + 1546, + 1401, + 1578, + 1351, + 1578 + ], + "score": 0.888 + }, + { + "category_id": 9, + "poly": [ + 1352, + 766, + 1400, + 766, + 1400, + 797, + 1352, + 797 + ], + "score": 0.887 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1419, + 1400, + 1419, + 1400, + 1451, + 1351, + 1451 + ], + "score": 0.886 + }, + { + "category_id": 2, + "poly": [ + 835, + 2087, + 865, + 2087, + 865, + 2113, + 835, + 2113 + ], + "score": 0.866 + }, + { + "category_id": 14, + "poly": [ + 467, + 1236, + 1230, + 1236, + 1230, + 1327, + 467, + 1327 + ], + "score": 0.96, + "latex": "\\mathcal { L } ( z ) \\doteq \\lambda ( \\delta - \\kappa ) + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\Psi ( \\langle { \\hat { x } _ { i } } , \\beta \\rangle ) + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } ( \\hat { y } _ { i } \\langle { \\hat { x } _ { i } } , \\beta \\rangle - \\lambda \\kappa ) ." + }, + { + "category_id": 14, + "poly": [ + 509, + 1662, + 1186, + 1662, + 1186, + 1759, + 509, + 1759 + ], + "score": 0.94, + "latex": "\\begin{array} { r } { \\nabla _ { \\lambda , \\beta } \\mathcal { L } ( z ) = \\left[ \\begin{array} { c } { \\delta - \\kappa ( 1 + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } ) } \\\\ { \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\Psi ^ { \\prime } ( \\langle \\hat { x } _ { i } , \\beta \\rangle ) \\hat { x } _ { i } + \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } \\hat { y } _ { i } \\hat { x } _ { i } } \\end{array} \\right] } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 705, + 1514, + 991, + 1514, + 991, + 1610, + 705, + 1610 + ], + "score": 0.94, + "latex": "\\boldsymbol { B } ( z ) \\doteq \\left[ \\begin{array} { l } { \\nabla _ { \\boldsymbol { \\lambda } , \\beta } \\mathcal { L } ( z ) } \\\\ { - \\nabla _ { \\boldsymbol { \\gamma } } \\mathcal { L } ( z ) } \\end{array} \\right] ," + }, + { + "category_id": 14, + "poly": [ + 625, + 1811, + 1074, + 1811, + 1074, + 1959, + 625, + 1959 + ], + "score": 0.93, + "latex": "\\nabla _ { \\boldsymbol { \\gamma } } \\mathcal { L } ( z ) = \\left[ \\begin{array} { c } { \\frac { 1 } { m } ( \\hat { y } _ { 1 } \\langle \\hat { x } _ { 1 } , \\beta \\rangle - \\lambda \\kappa ) } \\\\ { \\vdots } \\\\ { \\frac { 1 } { m } ( \\hat { y } _ { m } \\langle \\hat { x } _ { m } , \\beta \\rangle - \\lambda \\kappa ) } \\end{array} \\right] ." + }, + { + "category_id": 13, + "poly": [ + 328, + 458, + 481, + 458, + 481, + 487, + 328, + 487 + ], + "score": 0.93, + "latex": "z z - \\alpha _ { k } \\bar { y }" + }, + { + "category_id": 13, + "poly": [ + 667, + 327, + 771, + 327, + 771, + 365, + 667, + 365 + ], + "score": 0.93, + "latex": "\\textstyle \\sum _ { i = 1 } ^ { n } x _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 454, + 1190, + 550, + 1190, + 550, + 1223, + 454, + 1223 + ], + "score": 0.93, + "latex": "( \\lambda , \\beta , \\gamma )" + }, + { + "category_id": 13, + "poly": [ + 544, + 1468, + 604, + 1468, + 604, + 1502, + 544, + 1502 + ], + "score": 0.92, + "latex": "B ( z )" + }, + { + "category_id": 13, + "poly": [ + 805, + 365, + 894, + 365, + 894, + 398, + 805, + 398 + ], + "score": 0.92, + "latex": "x = x _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 1076, + 990, + 1186, + 990, + 1186, + 1024, + 1076, + 1024 + ], + "score": 0.92, + "latex": "\\{ ( \\hat { x } _ { i } , \\hat { y } _ { i } ) \\}" + }, + { + "category_id": 13, + "poly": [ + 827, + 327, + 929, + 327, + 929, + 363, + 827, + 363 + ], + "score": 0.92, + "latex": "\\textstyle \\sum _ { i = 1 } ^ { n } y _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 513, + 899, + 584, + 899, + 584, + 933, + 513, + 933 + ], + "score": 0.92, + "latex": "c \\| \\beta \\| _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 296, + 516, + 397, + 516, + 397, + 550, + 296, + 550 + ], + "score": 0.91, + "latex": "( n + 7 ) d" + }, + { + "category_id": 14, + "poly": [ + 337, + 708, + 1310, + 708, + 1310, + 853, + 337, + 853 + ], + "score": 0.91, + "latex": "\\begin{array} { r l } { \\underset { \\beta \\in \\mathbb { R } ^ { d } } { \\operatorname* { m i n } } \\quad \\underset { \\gamma \\in \\mathbb { R } ^ { m } } { \\operatorname* { m a x } } } & { \\left\\{ \\lambda ( \\delta - \\kappa ) + \\displaystyle \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\Psi ( \\langle \\hat { x } _ { i } , \\beta \\rangle ) + \\displaystyle \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } \\gamma _ { i } ( \\hat { y } _ { i } \\langle \\hat { x } _ { i } , \\beta \\rangle - \\lambda \\kappa ) + c \\| \\beta \\| _ { 1 } \\right\\} } \\\\ { \\mathrm { s . t . } \\quad } & { \\| \\beta \\| _ { 2 } \\leq \\lambda / ( L _ { \\Psi } + 1 ) \\qquad \\| \\gamma \\| _ { \\infty } \\leq 1 . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 516, + 398, + 700, + 398, + 700, + 426, + 516, + 426 + ], + "score": 0.91, + "latex": "w _ { i } w _ { i } - \\alpha _ { k } x" + }, + { + "category_id": 13, + "poly": [ + 924, + 1022, + 994, + 1022, + 994, + 1051, + 924, + 1051 + ], + "score": 0.91, + "latex": "\\kappa \\geq 0" + }, + { + "category_id": 13, + "poly": [ + 669, + 901, + 735, + 901, + 735, + 930, + 669, + 930 + ], + "score": 0.91, + "latex": "c \\geq 0" + }, + { + "category_id": 13, + "poly": [ + 882, + 455, + 953, + 455, + 953, + 489, + 882, + 489 + ], + "score": 0.91, + "latex": "x _ { i } ^ { k } , y _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 691, + 424, + 991, + 424, + 991, + 458, + 691, + 458 + ], + "score": 0.9, + "latex": "w _ { i } w _ { i } + \\alpha _ { k } \\bar { ( n + 1 ) } _ { . } ^ { - 1 } \\bar { x }" + }, + { + "category_id": 13, + "poly": [ + 1249, + 1113, + 1356, + 1113, + 1356, + 1141, + 1249, + 1141 + ], + "score": 0.9, + "latex": "c = 1 0 ^ { - 3 }" + }, + { + "category_id": 13, + "poly": [ + 298, + 1021, + 365, + 1021, + 365, + 1051, + 298, + 1051 + ], + "score": 0.9, + "latex": "\\delta \\geq 0" + }, + { + "category_id": 14, + "poly": [ + 689, + 1415, + 1010, + 1415, + 1010, + 1455, + 689, + 1455 + ], + "score": 0.9, + "latex": "0 \\in B ( z ) + A _ { 1 } ( z ) + A _ { 2 } ( z )" + }, + { + "category_id": 13, + "poly": [ + 593, + 294, + 625, + 294, + 625, + 330, + 593, + 330 + ], + "score": 0.9, + "latex": "y _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 1078, + 1114, + 1197, + 1114, + 1197, + 1140, + 1078, + 1140 + ], + "score": 0.9, + "latex": "\\delta = \\kappa = 1" + }, + { + "category_id": 13, + "poly": [ + 1056, + 1081, + 1263, + 1081, + 1263, + 1113, + 1056, + 1113 + ], + "score": 0.89, + "latex": "t \\mapsto \\log ( e ^ { t } + e ^ { - t } )" + }, + { + "category_id": 13, + "poly": [ + 297, + 364, + 395, + 364, + 395, + 394, + 297, + 394 + ], + "score": 0.89, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 1363, + 456, + 1400, + 456, + 1400, + 486, + 1363, + 486 + ], + "score": 0.89, + "latex": "R _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1313, + 1082, + 1404, + 1082, + 1404, + 1111, + 1313, + 1111 + ], + "score": 0.88, + "latex": "L _ { \\Psi } = 1" + }, + { + "category_id": 13, + "poly": [ + 787, + 931, + 816, + 931, + 816, + 960, + 787, + 960 + ], + "score": 0.88, + "latex": "{ \\hat { x } } _ { i }" + }, + { + "category_id": 13, + "poly": [ + 1375, + 869, + 1402, + 869, + 1402, + 899, + 1375, + 899 + ], + "score": 0.88, + "latex": "\\ell _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 1122, + 300, + 1165, + 300, + 1165, + 328, + 1122, + 328 + ], + "score": 0.88, + "latex": "t , x" + }, + { + "category_id": 13, + "poly": [ + 1285, + 368, + 1319, + 368, + 1319, + 395, + 1285, + 395 + ], + "score": 0.87, + "latex": "w _ { i }" + }, + { + "category_id": 13, + "poly": [ + 1104, + 931, + 1129, + 931, + 1129, + 961, + 1104, + 961 + ], + "score": 0.87, + "latex": "\\hat { y } _ { i }" + }, + { + "category_id": 13, + "poly": [ + 1096, + 362, + 1127, + 362, + 1127, + 392, + 1096, + 392 + ], + "score": 0.86, + "latex": "i ^ { \\mathrm { { t h } } }" + }, + { + "category_id": 13, + "poly": [ + 1326, + 334, + 1359, + 334, + 1359, + 360, + 1326, + 360 + ], + "score": 0.85, + "latex": "w _ { i }" + }, + { + "category_id": 13, + "poly": [ + 1381, + 902, + 1401, + 902, + 1401, + 931, + 1381, + 931 + ], + "score": 0.84, + "latex": "\\beta" + }, + { + "category_id": 13, + "poly": [ + 632, + 1084, + 652, + 1084, + 652, + 1114, + 632, + 1114 + ], + "score": 0.84, + "latex": "\\beta" + }, + { + "category_id": 13, + "poly": [ + 1042, + 961, + 1062, + 961, + 1062, + 991, + 1042, + 991 + ], + "score": 0.83, + "latex": "\\beta" + }, + { + "category_id": 13, + "poly": [ + 753, + 1083, + 777, + 1083, + 777, + 1109, + 753, + 1109 + ], + "score": 0.83, + "latex": "\\Psi" + }, + { + "category_id": 13, + "poly": [ + 666, + 1056, + 687, + 1056, + 687, + 1084, + 666, + 1084 + ], + "score": 0.83, + "latex": "\\gamma" + }, + { + "category_id": 13, + "poly": [ + 606, + 1974, + 631, + 1974, + 631, + 2000, + 606, + 2000 + ], + "score": 0.83, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 482, + 333, + 501, + 333, + 501, + 362, + 482, + 362 + ], + "score": 0.81, + "latex": "\\bar { y }" + }, + { + "category_id": 13, + "poly": [ + 1284, + 1974, + 1309, + 1974, + 1309, + 2000, + 1284, + 2000 + ], + "score": 0.81, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 735, + 1053, + 755, + 1053, + 755, + 1079, + 735, + 1079 + ], + "score": 0.81, + "latex": "\\lambda" + }, + { + "category_id": 13, + "poly": [ + 586, + 935, + 613, + 935, + 613, + 957, + 586, + 957 + ], + "score": 0.78, + "latex": "m" + }, + { + "category_id": 13, + "poly": [ + 407, + 333, + 427, + 333, + 427, + 357, + 407, + 357 + ], + "score": 0.76, + "latex": "\\bar { x }" + }, + { + "category_id": 13, + "poly": [ + 1219, + 302, + 1237, + 302, + 1237, + 329, + 1219, + 329 + ], + "score": 0.74, + "latex": "y" + }, + { + "category_id": 13, + "poly": [ + 703, + 371, + 720, + 371, + 720, + 392, + 703, + 392 + ], + "score": 0.74, + "latex": "z" + }, + { + "category_id": 13, + "poly": [ + 1324, + 1166, + 1342, + 1166, + 1342, + 1187, + 1324, + 1187 + ], + "score": 0.73, + "latex": "z" + }, + { + "category_id": 13, + "poly": [ + 497, + 294, + 530, + 294, + 530, + 330, + 497, + 330 + ], + "score": 0.67, + "latex": "x _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 458, + 294, + 485, + 294, + 485, + 330, + 458, + 330 + ], + "score": 0.63, + "latex": "t _ { i } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 457, + 293, + 530, + 293, + 530, + 330, + 457, + 330 + ], + "score": 0.34, + "latex": "t _ { i } ^ { k } , x _ { i } ^ { k }" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 220.0, + 929.0, + 220.0, + 929.0, + 270.0, + 292.0, + 270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 859.0, + 72.0, + 859.0, + 109.0, + 297.0, + 109.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 592.0, + 1325.0, + 592.0, + 1325.0, + 632.0, + 291.0, + 632.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2083.0, + 871.0, + 2083.0, + 871.0, + 2125.0, + 830.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 868.0, + 1374.0, + 868.0, + 1374.0, + 901.0, + 295.0, + 901.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 898.0, + 512.0, + 898.0, + 512.0, + 935.0, + 294.0, + 935.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 585.0, + 898.0, + 668.0, + 898.0, + 668.0, + 935.0, + 585.0, + 935.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 736.0, + 898.0, + 1380.0, + 898.0, + 1380.0, + 935.0, + 736.0, + 935.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1402.0, + 898.0, + 1406.0, + 898.0, + 1406.0, + 935.0, + 1402.0, + 935.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 923.0, + 585.0, + 923.0, + 585.0, + 970.0, + 291.0, + 970.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 614.0, + 923.0, + 786.0, + 923.0, + 786.0, + 970.0, + 614.0, + 970.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 817.0, + 923.0, + 1103.0, + 923.0, + 1103.0, + 970.0, + 817.0, + 970.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1130.0, + 923.0, + 1407.0, + 923.0, + 1407.0, + 970.0, + 1130.0, + 970.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 958.0, + 1041.0, + 958.0, + 1041.0, + 995.0, + 294.0, + 995.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1063.0, + 958.0, + 1406.0, + 958.0, + 1406.0, + 995.0, + 1063.0, + 995.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 986.0, + 1075.0, + 986.0, + 1075.0, + 1029.0, + 291.0, + 1029.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1187.0, + 986.0, + 1409.0, + 986.0, + 1409.0, + 1029.0, + 1187.0, + 1029.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1017.0, + 297.0, + 1017.0, + 297.0, + 1057.0, + 293.0, + 1057.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 366.0, + 1017.0, + 923.0, + 1017.0, + 923.0, + 1057.0, + 366.0, + 1057.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 995.0, + 1017.0, + 1406.0, + 1017.0, + 1406.0, + 1057.0, + 995.0, + 1057.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1052.0, + 665.0, + 1052.0, + 665.0, + 1085.0, + 294.0, + 1085.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 688.0, + 1052.0, + 734.0, + 1052.0, + 734.0, + 1085.0, + 688.0, + 1085.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 756.0, + 1052.0, + 1404.0, + 1052.0, + 1404.0, + 1085.0, + 756.0, + 1085.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1080.0, + 631.0, + 1080.0, + 631.0, + 1117.0, + 291.0, + 1117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 653.0, + 1080.0, + 752.0, + 1080.0, + 752.0, + 1117.0, + 653.0, + 1117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 778.0, + 1080.0, + 1055.0, + 1080.0, + 1055.0, + 1117.0, + 778.0, + 1117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1264.0, + 1080.0, + 1312.0, + 1080.0, + 1312.0, + 1117.0, + 1264.0, + 1117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1405.0, + 1080.0, + 1408.0, + 1080.0, + 1408.0, + 1117.0, + 1405.0, + 1117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1110.0, + 1077.0, + 1110.0, + 1077.0, + 1147.0, + 290.0, + 1147.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1198.0, + 1110.0, + 1248.0, + 1110.0, + 1248.0, + 1147.0, + 1198.0, + 1147.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1357.0, + 1110.0, + 1366.0, + 1110.0, + 1366.0, + 1147.0, + 1357.0, + 1147.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 293.0, + 456.0, + 293.0, + 456.0, + 332.0, + 294.0, + 332.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 531.0, + 293.0, + 592.0, + 293.0, + 592.0, + 332.0, + 531.0, + 332.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 626.0, + 293.0, + 1121.0, + 293.0, + 1121.0, + 332.0, + 626.0, + 332.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1166.0, + 293.0, + 1218.0, + 293.0, + 1218.0, + 332.0, + 1166.0, + 332.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1238.0, + 293.0, + 1406.0, + 293.0, + 1406.0, + 332.0, + 1238.0, + 332.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 285.0, + 309.0, + 406.0, + 309.0, + 406.0, + 380.0, + 285.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 428.0, + 309.0, + 481.0, + 309.0, + 481.0, + 380.0, + 428.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 502.0, + 309.0, + 666.0, + 309.0, + 666.0, + 380.0, + 502.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 772.0, + 309.0, + 826.0, + 309.0, + 826.0, + 380.0, + 772.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 930.0, + 309.0, + 1325.0, + 309.0, + 1325.0, + 380.0, + 930.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1360.0, + 309.0, + 1417.0, + 309.0, + 1417.0, + 380.0, + 1360.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 361.0, + 296.0, + 361.0, + 296.0, + 400.0, + 291.0, + 400.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 396.0, + 361.0, + 702.0, + 361.0, + 702.0, + 400.0, + 396.0, + 400.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 721.0, + 361.0, + 804.0, + 361.0, + 804.0, + 400.0, + 721.0, + 400.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 895.0, + 361.0, + 1095.0, + 361.0, + 1095.0, + 400.0, + 895.0, + 400.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1128.0, + 361.0, + 1284.0, + 361.0, + 1284.0, + 400.0, + 1128.0, + 400.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1320.0, + 361.0, + 1406.0, + 361.0, + 1406.0, + 400.0, + 1320.0, + 400.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 394.0, + 515.0, + 394.0, + 515.0, + 428.0, + 294.0, + 428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 701.0, + 394.0, + 1405.0, + 394.0, + 1405.0, + 428.0, + 701.0, + 428.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 424.0, + 690.0, + 424.0, + 690.0, + 460.0, + 293.0, + 460.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 992.0, + 424.0, + 1407.0, + 424.0, + 1407.0, + 460.0, + 992.0, + 460.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 447.0, + 327.0, + 447.0, + 327.0, + 494.0, + 289.0, + 494.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 482.0, + 447.0, + 881.0, + 447.0, + 881.0, + 494.0, + 482.0, + 494.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 954.0, + 447.0, + 1362.0, + 447.0, + 1362.0, + 494.0, + 954.0, + 494.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1401.0, + 447.0, + 1406.0, + 447.0, + 1406.0, + 494.0, + 1401.0, + 494.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 487.0, + 1405.0, + 487.0, + 1405.0, + 518.0, + 295.0, + 518.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 398.0, + 514.0, + 410.0, + 514.0, + 410.0, + 552.0, + 398.0, + 552.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1335.0, + 1398.0, + 1335.0, + 1398.0, + 1374.0, + 294.0, + 1374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1366.0, + 392.0, + 1366.0, + 392.0, + 1405.0, + 291.0, + 1405.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1971.0, + 605.0, + 1971.0, + 605.0, + 2007.0, + 295.0, + 2007.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 632.0, + 1971.0, + 1283.0, + 1971.0, + 1283.0, + 2007.0, + 632.0, + 2007.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1310.0, + 1971.0, + 1406.0, + 1971.0, + 1406.0, + 2007.0, + 1310.0, + 2007.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 2002.0, + 1408.0, + 2002.0, + 1408.0, + 2038.0, + 295.0, + 2038.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1154.0, + 1323.0, + 1154.0, + 1323.0, + 1197.0, + 293.0, + 1197.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1343.0, + 1154.0, + 1406.0, + 1154.0, + 1406.0, + 1197.0, + 1343.0, + 1197.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1188.0, + 453.0, + 1188.0, + 453.0, + 1224.0, + 296.0, + 1224.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 551.0, + 1188.0, + 684.0, + 1188.0, + 684.0, + 1224.0, + 551.0, + 1224.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1466.0, + 543.0, + 1466.0, + 543.0, + 1504.0, + 295.0, + 1504.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 605.0, + 1466.0, + 753.0, + 1466.0, + 753.0, + 1504.0, + 605.0, + 1504.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 660.0, + 957.0, + 660.0, + 957.0, + 700.0, + 294.0, + 700.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1621.0, + 352.0, + 1621.0, + 352.0, + 1654.0, + 292.0, + 1654.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1771.0, + 344.0, + 1771.0, + 344.0, + 1801.0, + 294.0, + 1801.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 31, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 296, + 1722, + 1406, + 1722, + 1406, + 2035, + 296, + 2035 + ], + "score": 0.984 + }, + { + "category_id": 1, + "poly": [ + 296, + 558, + 1405, + 558, + 1405, + 715, + 296, + 715 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 300, + 1565, + 1403, + 1565, + 1403, + 1692, + 300, + 1692 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 300, + 1307, + 1407, + 1307, + 1407, + 1403, + 300, + 1403 + ], + "score": 0.974 + }, + { + "category_id": 8, + "poly": [ + 627, + 969, + 1065, + 969, + 1065, + 1185, + 627, + 1185 + ], + "score": 0.965 + }, + { + "category_id": 8, + "poly": [ + 727, + 822, + 973, + 822, + 973, + 909, + 727, + 909 + ], + "score": 0.958 + }, + { + "category_id": 8, + "poly": [ + 456, + 1422, + 1242, + 1422, + 1242, + 1550, + 456, + 1550 + ], + "score": 0.955 + }, + { + "category_id": 1, + "poly": [ + 297, + 1201, + 1402, + 1201, + 1402, + 1277, + 297, + 1277 + ], + "score": 0.954 + }, + { + "category_id": 1, + "poly": [ + 295, + 228, + 1402, + 228, + 1402, + 293, + 295, + 293 + ], + "score": 0.947 + }, + { + "category_id": 8, + "poly": [ + 627, + 506, + 1072, + 506, + 1072, + 548, + 627, + 548 + ], + "score": 0.943 + }, + { + "category_id": 1, + "poly": [ + 295, + 743, + 1402, + 743, + 1402, + 810, + 295, + 810 + ], + "score": 0.941 + }, + { + "category_id": 8, + "poly": [ + 676, + 307, + 1025, + 307, + 1025, + 349, + 676, + 349 + ], + "score": 0.941 + }, + { + "category_id": 1, + "poly": [ + 297, + 923, + 369, + 923, + 369, + 953, + 297, + 953 + ], + "score": 0.927 + }, + { + "category_id": 2, + "poly": [ + 297, + 74, + 858, + 74, + 858, + 106, + 297, + 106 + ], + "score": 0.926 + }, + { + "category_id": 1, + "poly": [ + 297, + 361, + 369, + 361, + 369, + 391, + 297, + 391 + ], + "score": 0.926 + }, + { + "category_id": 8, + "poly": [ + 451, + 405, + 1245, + 405, + 1245, + 449, + 451, + 449 + ], + "score": 0.924 + }, + { + "category_id": 1, + "poly": [ + 296, + 461, + 344, + 461, + 344, + 491, + 296, + 491 + ], + "score": 0.905 + }, + { + "category_id": 2, + "poly": [ + 835, + 2087, + 865, + 2087, + 865, + 2113, + 835, + 2113 + ], + "score": 0.871 + }, + { + "category_id": 14, + "poly": [ + 626, + 969, + 1069, + 969, + 1069, + 1186, + 626, + 1186 + ], + "score": 0.95, + "latex": "B _ { i } ( z ) \\doteq \\left[ \\begin{array} { c } { \\delta - \\kappa ( 1 + \\gamma _ { i } ) } \\\\ { \\Psi ^ { \\prime } ( \\langle \\hat { x } _ { i } , \\beta \\rangle ) \\hat { x } _ { i } + \\gamma _ { i } \\hat { y } _ { i } \\hat { x } _ { i } } \\\\ { \\mathbf { 0 } _ { ( i - 1 ) \\times 1 } } \\\\ { - ( \\hat { y } _ { i } \\langle \\hat { x } _ { i } , \\beta \\rangle - \\lambda \\kappa ) } \\\\ { \\mathbf { 0 } _ { ( m - i ) \\times 1 } } \\end{array} \\right] ." + }, + { + "category_id": 13, + "poly": [ + 415, + 1242, + 604, + 1242, + 604, + 1277, + 415, + 1277 + ], + "score": 0.95, + "latex": "\\mathbf { B } \\subseteq \\{ 1 , \\dots , m \\}" + }, + { + "category_id": 13, + "poly": [ + 1006, + 1200, + 1291, + 1200, + 1291, + 1247, + 1006, + 1247 + ], + "score": 0.94, + "latex": "\\begin{array} { r } { \\tilde { B } ( z ) = \\frac { 1 } { | \\mathbf { B } | } \\sum _ { i \\in \\mathbf { B } } B _ { i } ( z ) } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 455, + 1420, + 1243, + 1420, + 1243, + 1550, + 455, + 1550 + ], + "score": 0.94, + "latex": "J _ { \\rho A _ { 1 } } ( z ) = \\left[ \\begin{array} { c } { \\mathrm { p r o j } _ { \\mathcal { C } _ { 1 } } ( \\lambda , \\beta ) } \\\\ { \\mathrm { p r o j } _ { \\mathcal { C } _ { 2 } } ( \\gamma ) } \\end{array} \\right] \\quad \\mathrm { a n d } \\quad J _ { \\rho A _ { 2 } } ( z ) = \\left[ \\begin{array} { c } { \\mathbf { 0 } _ { 1 \\times 1 } } \\\\ { \\mathrm { p r o x } _ { \\rho c \\| \\cdot \\| _ { 1 } } ( \\beta ) } \\\\ { \\mathbf { 0 } _ { m \\times 1 } } \\end{array} \\right] ." + }, + { + "category_id": 14, + "poly": [ + 725, + 817, + 974, + 817, + 974, + 910, + 725, + 910 + ], + "score": 0.93, + "latex": "B ( z ) = \\frac { 1 } { m } \\sum _ { i = 1 } ^ { m } B _ { i } ( z )" + }, + { + "category_id": 13, + "poly": [ + 1077, + 1725, + 1243, + 1725, + 1243, + 1758, + 1077, + 1758 + ], + "score": 0.93, + "latex": "\\rho _ { k } \\le \\overline { { \\rho } } < 1 / L" + }, + { + "category_id": 13, + "poly": [ + 1096, + 231, + 1167, + 231, + 1167, + 264, + 1096, + 264 + ], + "score": 0.93, + "latex": "A _ { 2 } ( z )" + }, + { + "category_id": 13, + "poly": [ + 1024, + 1910, + 1144, + 1910, + 1144, + 1945, + 1024, + 1945 + ], + "score": 0.93, + "latex": "\\alpha = C _ { f } \\rho ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 627, + 230, + 699, + 230, + 699, + 264, + 627, + 264 + ], + "score": 0.93, + "latex": "A _ { 1 } ( z )" + }, + { + "category_id": 13, + "poly": [ + 968, + 1845, + 1145, + 1845, + 1145, + 1878, + 968, + 1878 + ], + "score": 0.92, + "latex": "\\rho _ { k } = C _ { d } k ^ { - 0 . 2 5 }" + }, + { + "category_id": 14, + "poly": [ + 673, + 306, + 1022, + 306, + 1022, + 347, + 673, + 347 + ], + "score": 0.92, + "latex": "A _ { 1 } ( z ) \\doteq N _ { \\mathcal { C } _ { 1 } } ( \\lambda , \\beta ) \\times N _ { \\mathcal { C } _ { 2 } } ( \\gamma ) ," + }, + { + "category_id": 13, + "poly": [ + 834, + 1909, + 969, + 1909, + 969, + 1943, + 834, + 1943 + ], + "score": 0.92, + "latex": "\\rho = K ^ { - 1 / 4 }" + }, + { + "category_id": 14, + "poly": [ + 623, + 507, + 1074, + 507, + 1074, + 546, + 623, + 546 + ], + "score": 0.92, + "latex": "A _ { 2 } ( z ) \\doteq \\{ \\mathbf { 0 } _ { 1 \\times 1 } \\} \\times c \\partial \\| \\beta \\| _ { 1 } \\times \\{ \\mathbf { 0 } _ { m \\times 1 } \\} ." + }, + { + "category_id": 13, + "poly": [ + 734, + 1844, + 914, + 1844, + 914, + 1877, + 734, + 1877 + ], + "score": 0.92, + "latex": "\\alpha _ { k } ^ { - \\pm } = C _ { d } k ^ { - 0 . 5 1 }" + }, + { + "category_id": 13, + "poly": [ + 872, + 745, + 1164, + 745, + 1164, + 776, + 872, + 776 + ], + "score": 0.92, + "latex": "B : \\mathbb { R } ^ { m + d + 1 } \\mapsto \\mathbb { R } ^ { m + d + 1 }" + }, + { + "category_id": 13, + "poly": [ + 514, + 562, + 574, + 562, + 574, + 595, + 514, + 595 + ], + "score": 0.91, + "latex": "{ \\bf 0 } _ { p \\times 1 }" + }, + { + "category_id": 13, + "poly": [ + 1314, + 1943, + 1402, + 1943, + 1402, + 1976, + 1314, + 1976 + ], + "score": 0.91, + "latex": "C _ { f } = 5" + }, + { + "category_id": 13, + "poly": [ + 297, + 1910, + 410, + 1910, + 410, + 1943, + 297, + 1943 + ], + "score": 0.91, + "latex": "C _ { d } = 0 . 5" + }, + { + "category_id": 13, + "poly": [ + 1137, + 2003, + 1257, + 2003, + 1257, + 2031, + 1137, + 2031 + ], + "score": 0.91, + "latex": "K = 1 0 0 0" + }, + { + "category_id": 13, + "poly": [ + 568, + 1942, + 772, + 1942, + 772, + 1975, + 568, + 1975 + ], + "score": 0.91, + "latex": "\\{ 0 . 1 , 0 . 5 , 1 , 5 , 1 0 \\}" + }, + { + "category_id": 13, + "poly": [ + 393, + 2003, + 514, + 2003, + 514, + 2032, + 393, + 2032 + ], + "score": 0.9, + "latex": "K = 5 0 0 0" + }, + { + "category_id": 13, + "poly": [ + 902, + 1944, + 990, + 1944, + 990, + 1976, + 902, + 1976 + ], + "score": 0.9, + "latex": "C _ { f } = 1" + }, + { + "category_id": 13, + "poly": [ + 1010, + 1880, + 1096, + 1880, + 1096, + 1908, + 1010, + 1908 + ], + "score": 0.9, + "latex": "C _ { d } = 1" + }, + { + "category_id": 13, + "poly": [ + 739, + 2003, + 845, + 2003, + 845, + 2031, + 739, + 2031 + ], + "score": 0.9, + "latex": "K = 2 0 0" + }, + { + "category_id": 13, + "poly": [ + 957, + 1568, + 996, + 1568, + 996, + 1599, + 957, + 1599 + ], + "score": 0.9, + "latex": "\\ell _ { \\infty }" + }, + { + "category_id": 13, + "poly": [ + 672, + 1879, + 875, + 1879, + 875, + 1910, + 672, + 1910 + ], + "score": 0.9, + "latex": "\\{ 0 . 1 , 0 . 5 , 1 , 5 , 1 0 \\}" + }, + { + "category_id": 13, + "poly": [ + 471, + 1943, + 508, + 1943, + 508, + 1976, + 471, + 1976 + ], + "score": 0.89, + "latex": "C _ { f }" + }, + { + "category_id": 13, + "poly": [ + 569, + 1878, + 605, + 1878, + 605, + 1908, + 569, + 1908 + ], + "score": 0.89, + "latex": "C _ { d }" + }, + { + "category_id": 13, + "poly": [ + 712, + 623, + 748, + 623, + 748, + 652, + 712, + 652 + ], + "score": 0.89, + "latex": "A _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 860, + 1568, + 890, + 1568, + 890, + 1599, + 860, + 1599 + ], + "score": 0.89, + "latex": "\\mathcal { C } _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 1298, + 622, + 1334, + 622, + 1334, + 652, + 1298, + 652 + ], + "score": 0.89, + "latex": "A _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 798, + 1311, + 833, + 1311, + 833, + 1340, + 798, + 1340 + ], + "score": 0.89, + "latex": "A _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 458, + 1569, + 488, + 1569, + 488, + 1599, + 458, + 1599 + ], + "score": 0.89, + "latex": "\\mathcal { C } _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 829, + 1342, + 865, + 1342, + 865, + 1371, + 829, + 1371 + ], + "score": 0.88, + "latex": "A _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 1009, + 683, + 1080, + 683, + 1080, + 709, + 1009, + 709 + ], + "score": 0.88, + "latex": "n = 2" + }, + { + "category_id": 13, + "poly": [ + 1362, + 591, + 1400, + 591, + 1400, + 621, + 1362, + 621 + ], + "score": 0.88, + "latex": "\\ell _ { \\infty }" + }, + { + "category_id": 13, + "poly": [ + 1079, + 592, + 1109, + 592, + 1109, + 621, + 1079, + 621 + ], + "score": 0.88, + "latex": "\\mathcal { C } _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 786, + 1599, + 814, + 1599, + 814, + 1629, + 786, + 1629 + ], + "score": 0.88, + "latex": "\\ell _ { 1 }" + }, + { + "category_id": 14, + "poly": [ + 453, + 404, + 1243, + 404, + 1243, + 449, + 453, + 449 + ], + "score": 0.88, + "latex": "\\begin{array} { r } { \\mathcal { C } _ { 1 } \\doteq \\bigl \\{ ( \\lambda , \\beta ) : \\| \\beta \\| _ { 2 } \\le \\lambda / ( L _ { \\Psi } + 1 ) \\bigr \\} \\quad \\mathrm { ~ a n d ~ } \\quad \\mathcal { C } _ { 2 } \\doteq \\{ \\gamma : \\| \\gamma \\| _ { \\infty } \\le 1 \\} , } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 1117, + 562, + 1148, + 562, + 1148, + 591, + 1117, + 591 + ], + "score": 0.87, + "latex": "\\mathcal { C } _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 474, + 1342, + 505, + 1342, + 505, + 1371, + 474, + 1371 + ], + "score": 0.87, + "latex": "\\mathcal { C } _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 559, + 1341, + 589, + 1341, + 589, + 1371, + 559, + 1371 + ], + "score": 0.87, + "latex": "\\mathcal { C } _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 1302, + 1341, + 1330, + 1341, + 1330, + 1371, + 1302, + 1371 + ], + "score": 0.87, + "latex": "\\ell _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 864, + 1207, + 889, + 1207, + 889, + 1233, + 864, + 1233 + ], + "score": 0.85, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 1375, + 232, + 1402, + 232, + 1402, + 261, + 1375, + 261 + ], + "score": 0.85, + "latex": "\\ell _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 717, + 566, + 735, + 566, + 735, + 592, + 717, + 592 + ], + "score": 0.82, + "latex": "p" + }, + { + "category_id": 13, + "poly": [ + 564, + 1757, + 589, + 1757, + 589, + 1783, + 564, + 1783 + ], + "score": 0.79, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 508, + 1757, + 531, + 1757, + 531, + 1783, + 508, + 1783 + ], + "score": 0.78, + "latex": "L" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 858.0, + 72.0, + 858.0, + 109.0, + 297.0, + 109.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 869.0, + 2084.0, + 869.0, + 2122.0, + 830.0, + 2122.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1722.0, + 1076.0, + 1722.0, + 1076.0, + 1761.0, + 294.0, + 1761.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1244.0, + 1722.0, + 1406.0, + 1722.0, + 1406.0, + 1761.0, + 1244.0, + 1761.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1755.0, + 507.0, + 1755.0, + 507.0, + 1791.0, + 294.0, + 1791.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 532.0, + 1755.0, + 563.0, + 1755.0, + 563.0, + 1791.0, + 532.0, + 1791.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 590.0, + 1755.0, + 1407.0, + 1755.0, + 1407.0, + 1791.0, + 590.0, + 1791.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1786.0, + 1408.0, + 1786.0, + 1408.0, + 1822.0, + 294.0, + 1822.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1814.0, + 1406.0, + 1814.0, + 1406.0, + 1854.0, + 293.0, + 1854.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1837.0, + 733.0, + 1837.0, + 733.0, + 1885.0, + 289.0, + 1885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 915.0, + 1837.0, + 967.0, + 1837.0, + 967.0, + 1885.0, + 915.0, + 1885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1146.0, + 1837.0, + 1409.0, + 1837.0, + 1409.0, + 1885.0, + 1146.0, + 1885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1877.0, + 568.0, + 1877.0, + 568.0, + 1913.0, + 294.0, + 1913.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 606.0, + 1877.0, + 671.0, + 1877.0, + 671.0, + 1913.0, + 606.0, + 1913.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 876.0, + 1877.0, + 1009.0, + 1877.0, + 1009.0, + 1913.0, + 876.0, + 1913.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1097.0, + 1877.0, + 1406.0, + 1877.0, + 1406.0, + 1913.0, + 1097.0, + 1913.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1906.0, + 296.0, + 1906.0, + 296.0, + 1949.0, + 293.0, + 1949.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 411.0, + 1906.0, + 833.0, + 1906.0, + 833.0, + 1949.0, + 411.0, + 1949.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 970.0, + 1906.0, + 1023.0, + 1906.0, + 1023.0, + 1949.0, + 970.0, + 1949.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1145.0, + 1906.0, + 1408.0, + 1906.0, + 1408.0, + 1949.0, + 1145.0, + 1949.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1940.0, + 470.0, + 1940.0, + 470.0, + 1980.0, + 293.0, + 1980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 509.0, + 1940.0, + 567.0, + 1940.0, + 567.0, + 1980.0, + 509.0, + 1980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 773.0, + 1940.0, + 901.0, + 1940.0, + 901.0, + 1980.0, + 773.0, + 1980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 991.0, + 1940.0, + 1313.0, + 1940.0, + 1313.0, + 1980.0, + 991.0, + 1980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1403.0, + 1940.0, + 1408.0, + 1940.0, + 1408.0, + 1980.0, + 1403.0, + 1980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1970.0, + 1409.0, + 1970.0, + 1409.0, + 2008.0, + 293.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 2001.0, + 392.0, + 2001.0, + 392.0, + 2036.0, + 293.0, + 2036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 515.0, + 2001.0, + 738.0, + 2001.0, + 738.0, + 2036.0, + 515.0, + 2036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 846.0, + 2001.0, + 1136.0, + 2001.0, + 1136.0, + 2036.0, + 846.0, + 2036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1258.0, + 2001.0, + 1268.0, + 2001.0, + 1268.0, + 2036.0, + 1258.0, + 2036.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 560.0, + 513.0, + 560.0, + 513.0, + 594.0, + 295.0, + 594.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 575.0, + 560.0, + 716.0, + 560.0, + 716.0, + 594.0, + 575.0, + 594.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 736.0, + 560.0, + 1116.0, + 560.0, + 1116.0, + 594.0, + 736.0, + 594.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1149.0, + 560.0, + 1406.0, + 560.0, + 1406.0, + 594.0, + 1149.0, + 594.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 586.0, + 1078.0, + 586.0, + 1078.0, + 627.0, + 294.0, + 627.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1110.0, + 586.0, + 1361.0, + 586.0, + 1361.0, + 627.0, + 1110.0, + 627.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 620.0, + 711.0, + 620.0, + 711.0, + 654.0, + 295.0, + 654.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 749.0, + 620.0, + 1297.0, + 620.0, + 1297.0, + 654.0, + 749.0, + 654.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1335.0, + 620.0, + 1403.0, + 620.0, + 1403.0, + 654.0, + 1335.0, + 654.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 649.0, + 1406.0, + 649.0, + 1406.0, + 689.0, + 294.0, + 689.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 682.0, + 1008.0, + 682.0, + 1008.0, + 715.0, + 295.0, + 715.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1081.0, + 682.0, + 1090.0, + 682.0, + 1090.0, + 715.0, + 1081.0, + 715.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1567.0, + 457.0, + 1567.0, + 457.0, + 1600.0, + 296.0, + 1600.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 489.0, + 1567.0, + 859.0, + 1567.0, + 859.0, + 1600.0, + 489.0, + 1600.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 891.0, + 1567.0, + 956.0, + 1567.0, + 956.0, + 1600.0, + 891.0, + 1600.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 997.0, + 1567.0, + 1403.0, + 1567.0, + 1403.0, + 1600.0, + 997.0, + 1600.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1598.0, + 785.0, + 1598.0, + 785.0, + 1633.0, + 293.0, + 1633.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 815.0, + 1598.0, + 1407.0, + 1598.0, + 1407.0, + 1633.0, + 815.0, + 1633.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1627.0, + 1405.0, + 1627.0, + 1405.0, + 1664.0, + 294.0, + 1664.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1659.0, + 559.0, + 1659.0, + 559.0, + 1695.0, + 294.0, + 1695.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1308.0, + 797.0, + 1308.0, + 797.0, + 1343.0, + 295.0, + 1343.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 834.0, + 1308.0, + 1405.0, + 1308.0, + 1405.0, + 1343.0, + 834.0, + 1343.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1338.0, + 473.0, + 1338.0, + 473.0, + 1376.0, + 294.0, + 1376.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 506.0, + 1338.0, + 558.0, + 1338.0, + 558.0, + 1376.0, + 506.0, + 1376.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 590.0, + 1338.0, + 828.0, + 1338.0, + 828.0, + 1376.0, + 590.0, + 1376.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 866.0, + 1338.0, + 1301.0, + 1338.0, + 1301.0, + 1376.0, + 866.0, + 1376.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1331.0, + 1338.0, + 1408.0, + 1338.0, + 1408.0, + 1376.0, + 1331.0, + 1376.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1368.0, + 442.0, + 1368.0, + 442.0, + 1409.0, + 294.0, + 1409.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1196.0, + 863.0, + 1196.0, + 863.0, + 1247.0, + 291.0, + 1247.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 890.0, + 1196.0, + 1005.0, + 1196.0, + 1005.0, + 1247.0, + 890.0, + 1247.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1292.0, + 1196.0, + 1407.0, + 1196.0, + 1407.0, + 1247.0, + 1292.0, + 1247.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1240.0, + 414.0, + 1240.0, + 414.0, + 1279.0, + 296.0, + 1279.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 605.0, + 1240.0, + 931.0, + 1240.0, + 931.0, + 1279.0, + 605.0, + 1279.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 227.0, + 626.0, + 227.0, + 626.0, + 267.0, + 294.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 700.0, + 227.0, + 1095.0, + 227.0, + 1095.0, + 267.0, + 700.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1168.0, + 227.0, + 1374.0, + 227.0, + 1374.0, + 267.0, + 1168.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 261.0, + 637.0, + 261.0, + 637.0, + 295.0, + 294.0, + 295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 733.0, + 871.0, + 733.0, + 871.0, + 790.0, + 289.0, + 790.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1165.0, + 733.0, + 1409.0, + 733.0, + 1409.0, + 790.0, + 1165.0, + 790.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 775.0, + 448.0, + 775.0, + 448.0, + 812.0, + 293.0, + 812.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 918.0, + 374.0, + 918.0, + 374.0, + 957.0, + 294.0, + 957.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 356.0, + 374.0, + 356.0, + 374.0, + 395.0, + 294.0, + 395.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 461.0, + 344.0, + 461.0, + 344.0, + 491.0, + 294.0, + 491.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 32, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 655, + 1407, + 655, + 1407, + 910, + 297, + 910 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 297, + 1579, + 1407, + 1579, + 1407, + 1797, + 297, + 1797 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 1234, + 1404, + 1234, + 1404, + 1359, + 298, + 1359 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 297, + 1846, + 1405, + 1846, + 1405, + 2034, + 297, + 2034 + ], + "score": 0.979 + }, + { + "category_id": 5, + "poly": [ + 644, + 946, + 1047, + 946, + 1047, + 1088, + 644, + 1088 + ], + "score": 0.973, + "html": "
epsilonSUSYreal-sim
C0.560.560.77
d0.60.60.55
" + }, + { + "category_id": 3, + "poly": [ + 300, + 250, + 1375, + 250, + 1375, + 502, + 300, + 502 + ], + "score": 0.964 + }, + { + "category_id": 4, + "poly": [ + 297, + 519, + 1399, + 519, + 1399, + 582, + 297, + 582 + ], + "score": 0.95 + }, + { + "category_id": 1, + "poly": [ + 296, + 1373, + 1404, + 1373, + 1404, + 1437, + 296, + 1437 + ], + "score": 0.948 + }, + { + "category_id": 8, + "poly": [ + 746, + 1466, + 951, + 1466, + 951, + 1539, + 746, + 1539 + ], + "score": 0.947 + }, + { + "category_id": 6, + "poly": [ + 633, + 1131, + 1066, + 1131, + 1066, + 1164, + 633, + 1164 + ], + "score": 0.913 + }, + { + "category_id": 2, + "poly": [ + 297, + 76, + 857, + 76, + 857, + 104, + 297, + 104 + ], + "score": 0.889 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 864, + 2088, + 864, + 2112, + 835, + 2112 + ], + "score": 0.857 + }, + { + "category_id": 14, + "poly": [ + 745, + 1465, + 954, + 1465, + 954, + 1538, + 745, + 1538 + ], + "score": 0.95, + "latex": "\\tau = { \\frac { 1 - \\sqrt { 1 - p } } { 2 L } } ." + }, + { + "category_id": 13, + "poly": [ + 1178, + 814, + 1256, + 814, + 1256, + 848, + 1178, + 848 + ], + "score": 0.93, + "latex": "\\{ C , d \\}" + }, + { + "category_id": 13, + "poly": [ + 665, + 811, + 810, + 811, + 810, + 845, + 665, + 845 + ], + "score": 0.93, + "latex": "\\alpha _ { k } = C k ^ { - d }" + }, + { + "category_id": 13, + "poly": [ + 347, + 811, + 527, + 811, + 527, + 848, + 347, + 848 + ], + "score": 0.92, + "latex": "\\textstyle \\sum _ { k = 1 } ^ { \\infty } \\alpha _ { k } ^ { 2 } < \\infty" + }, + { + "category_id": 13, + "poly": [ + 1226, + 780, + 1401, + 780, + 1401, + 814, + 1226, + 814 + ], + "score": 0.92, + "latex": "\\textstyle \\sum _ { k = 1 } ^ { \\infty } { \\dot { \\alpha } } _ { k } = \\infty" + }, + { + "category_id": 13, + "poly": [ + 297, + 1406, + 401, + 1406, + 401, + 1438, + 297, + 1438 + ], + "score": 0.89, + "latex": "p = 0 . 0 1" + }, + { + "category_id": 13, + "poly": [ + 1307, + 689, + 1398, + 689, + 1398, + 718, + 1307, + 718 + ], + "score": 0.88, + "latex": "\\theta = 0 . 8" + }, + { + "category_id": 13, + "poly": [ + 656, + 849, + 722, + 849, + 722, + 877, + 656, + 877 + ], + "score": 0.87, + "latex": "5 \\times 5" + }, + { + "category_id": 13, + "poly": [ + 1036, + 784, + 1071, + 784, + 1071, + 811, + 1036, + 811 + ], + "score": 0.86, + "latex": "\\alpha _ { k }" + }, + { + "category_id": 13, + "poly": [ + 299, + 848, + 536, + 848, + 536, + 881, + 299, + 881 + ], + "score": 0.86, + "latex": "[ 1 0 ^ { - 4 } , 1 0 ] \\times [ 0 . 5 1 , 1 ]" + }, + { + "category_id": 13, + "poly": [ + 1276, + 749, + 1346, + 749, + 1346, + 779, + 1276, + 779 + ], + "score": 0.84, + "latex": "0 . 9 / L" + }, + { + "category_id": 13, + "poly": [ + 1293, + 1881, + 1398, + 1881, + 1398, + 1917, + 1293, + 1917 + ], + "score": 0.84, + "latex": "\\mathrm { p r o x } _ { c \\parallel \\cdot \\parallel _ { 1 } }" + }, + { + "category_id": 13, + "poly": [ + 1259, + 1380, + 1276, + 1380, + 1276, + 1406, + 1259, + 1406 + ], + "score": 0.79, + "latex": "p" + }, + { + "category_id": 13, + "poly": [ + 1045, + 1380, + 1063, + 1380, + 1063, + 1407, + 1045, + 1407 + ], + "score": 0.79, + "latex": "p" + }, + { + "category_id": 13, + "poly": [ + 1061, + 1978, + 1076, + 1978, + 1076, + 2000, + 1061, + 2000 + ], + "score": 0.7, + "latex": "c" + }, + { + "category_id": 13, + "poly": [ + 837, + 1237, + 904, + 1237, + 904, + 1269, + 837, + 1269 + ], + "score": 0.38, + "latex": "\\mathrm { F B F p }" + }, + { + "category_id": 15, + "poly": [ + 683.0, + 251.0, + 710.0, + 251.0, + 710.0, + 270.0, + 683.0, + 270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1057.0, + 270.0, + 1091.0, + 270.0, + 1091.0, + 303.0, + 1057.0, + 303.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1192.0, + 263.0, + 1317.0, + 263.0, + 1317.0, + 289.0, + 1192.0, + 289.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 282.0, + 345.0, + 282.0, + 345.0, + 449.0, + 298.0, + 449.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 661.0, + 282.0, + 709.0, + 282.0, + 709.0, + 448.0, + 661.0, + 448.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1033.0, + 282.0, + 1077.0, + 282.0, + 1077.0, + 447.0, + 1033.0, + 447.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1167.0, + 311.0, + 1183.0, + 311.0, + 1183.0, + 344.0, + 1167.0, + 344.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1192.0, + 282.0, + 1312.0, + 282.0, + 1312.0, + 348.0, + 1192.0, + 348.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 350.0, + 345.0, + 350.0, + 345.0, + 369.0, + 325.0, + 369.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 688.0, + 344.0, + 709.0, + 344.0, + 709.0, + 363.0, + 688.0, + 363.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1058.0, + 350.0, + 1077.0, + 350.0, + 1077.0, + 365.0, + 1058.0, + 365.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1192.0, + 344.0, + 1355.0, + 344.0, + 1355.0, + 367.0, + 1192.0, + 367.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 326.0, + 382.0, + 344.0, + 382.0, + 344.0, + 397.0, + 326.0, + 397.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 688.0, + 374.0, + 709.0, + 374.0, + 709.0, + 393.0, + 688.0, + 393.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1057.0, + 374.0, + 1077.0, + 374.0, + 1077.0, + 392.0, + 1057.0, + 392.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1169.0, + 371.0, + 1181.0, + 371.0, + 1181.0, + 380.0, + 1169.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1191.0, + 363.0, + 1253.0, + 363.0, + 1253.0, + 409.0, + 1191.0, + 409.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 410.0, + 347.0, + 410.0, + 347.0, + 429.0, + 325.0, + 429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 484.0, + 404.0, + 494.0, + 404.0, + 494.0, + 414.0, + 484.0, + 414.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 688.0, + 405.0, + 709.0, + 405.0, + 709.0, + 425.0, + 688.0, + 425.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1058.0, + 402.0, + 1076.0, + 402.0, + 1076.0, + 416.0, + 1058.0, + 416.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 441.0, + 345.0, + 441.0, + 345.0, + 459.0, + 320.0, + 459.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 457.0, + 420.0, + 589.0, + 420.0, + 589.0, + 457.0, + 457.0, + 457.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 683.0, + 437.0, + 709.0, + 437.0, + 709.0, + 454.0, + 683.0, + 454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1058.0, + 427.0, + 1076.0, + 427.0, + 1076.0, + 442.0, + 1058.0, + 442.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1134.0, + 424.0, + 1143.0, + 424.0, + 1143.0, + 433.0, + 1134.0, + 433.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1148.0, + 442.0, + 1163.0, + 442.0, + 1163.0, + 452.0, + 1148.0, + 452.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1173.0, + 441.0, + 1256.0, + 441.0, + 1256.0, + 457.0, + 1173.0, + 457.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1306.0, + 440.0, + 1354.0, + 440.0, + 1354.0, + 460.0, + 1306.0, + 460.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1052.0, + 451.0, + 1077.0, + 451.0, + 1077.0, + 468.0, + 1052.0, + 468.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 471.0, + 369.0, + 471.0, + 369.0, + 486.0, + 354.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 386.0, + 471.0, + 401.0, + 471.0, + 401.0, + 486.0, + 386.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 415.0, + 471.0, + 435.0, + 471.0, + 435.0, + 486.0, + 415.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 447.0, + 471.0, + 465.0, + 471.0, + 465.0, + 486.0, + 447.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 479.0, + 471.0, + 498.0, + 471.0, + 498.0, + 486.0, + 479.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 512.0, + 471.0, + 530.0, + 471.0, + 530.0, + 486.0, + 512.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 543.0, + 471.0, + 562.0, + 471.0, + 562.0, + 486.0, + 543.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 576.0, + 471.0, + 594.0, + 471.0, + 594.0, + 486.0, + 576.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 609.0, + 471.0, + 626.0, + 471.0, + 626.0, + 486.0, + 609.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 696.0, + 470.0, + 727.0, + 470.0, + 727.0, + 487.0, + 696.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 735.0, + 470.0, + 768.0, + 470.0, + 768.0, + 487.0, + 735.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 775.0, + 470.0, + 808.0, + 470.0, + 808.0, + 487.0, + 775.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 815.0, + 470.0, + 850.0, + 470.0, + 850.0, + 487.0, + 815.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 855.0, + 470.0, + 888.0, + 470.0, + 888.0, + 487.0, + 855.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 895.0, + 470.0, + 928.0, + 470.0, + 928.0, + 487.0, + 895.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 935.0, + 470.0, + 970.0, + 470.0, + 970.0, + 487.0, + 935.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 977.0, + 470.0, + 1009.0, + 470.0, + 1009.0, + 487.0, + 977.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1077.0, + 473.0, + 1088.0, + 473.0, + 1088.0, + 485.0, + 1077.0, + 485.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1129.0, + 471.0, + 1148.0, + 471.0, + 1148.0, + 486.0, + 1129.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1186.0, + 470.0, + 1203.0, + 470.0, + 1203.0, + 486.0, + 1186.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1240.0, + 469.0, + 1261.0, + 469.0, + 1261.0, + 487.0, + 1240.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1295.0, + 469.0, + 1317.0, + 469.0, + 1317.0, + 488.0, + 1295.0, + 488.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1349.0, + 470.0, + 1375.0, + 470.0, + 1375.0, + 487.0, + 1349.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 460.0, + 480.0, + 513.0, + 480.0, + 513.0, + 501.0, + 460.0, + 501.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 824.0, + 482.0, + 876.0, + 482.0, + 876.0, + 501.0, + 824.0, + 501.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1192.0, + 480.0, + 1247.0, + 480.0, + 1247.0, + 501.0, + 1192.0, + 501.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 257.0, + 348.0, + 257.0, + 348.0, + 276.0, + 319.0, + 276.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 362.0, + 329.5, + 442.0, + 329.5, + 442.0, + 362.5, + 362.0, + 362.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 446.0, + 379.5, + 469.0, + 379.5, + 469.0, + 395.5, + 446.0, + 395.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 516.0, + 1404.0, + 516.0, + 1404.0, + 556.0, + 295.0, + 556.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 549.0, + 643.0, + 549.0, + 643.0, + 584.0, + 295.0, + 584.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 628.0, + 1124.0, + 1070.0, + 1124.0, + 1070.0, + 1169.0, + 628.0, + 1169.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 73.0, + 859.0, + 73.0, + 859.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 829.0, + 2083.0, + 872.0, + 2083.0, + 872.0, + 2123.0, + 829.0, + 2123.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 656.0, + 1408.0, + 656.0, + 1408.0, + 691.0, + 294.0, + 691.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 686.0, + 1306.0, + 686.0, + 1306.0, + 723.0, + 294.0, + 723.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1399.0, + 686.0, + 1408.0, + 686.0, + 1408.0, + 723.0, + 1399.0, + 723.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 717.0, + 1405.0, + 717.0, + 1405.0, + 753.0, + 294.0, + 753.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 749.0, + 1275.0, + 749.0, + 1275.0, + 784.0, + 294.0, + 784.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1347.0, + 749.0, + 1404.0, + 749.0, + 1404.0, + 784.0, + 1347.0, + 784.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 771.0, + 1035.0, + 771.0, + 1035.0, + 822.0, + 291.0, + 822.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1072.0, + 771.0, + 1225.0, + 771.0, + 1225.0, + 822.0, + 1072.0, + 822.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1402.0, + 771.0, + 1407.0, + 771.0, + 1407.0, + 822.0, + 1402.0, + 822.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 528.0, + 795.0, + 664.0, + 795.0, + 664.0, + 864.0, + 528.0, + 864.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 811.0, + 795.0, + 1177.0, + 795.0, + 1177.0, + 864.0, + 811.0, + 864.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1257.0, + 795.0, + 1415.0, + 795.0, + 1415.0, + 864.0, + 1257.0, + 864.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 842.0, + 298.0, + 842.0, + 298.0, + 885.0, + 292.0, + 885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 537.0, + 842.0, + 655.0, + 842.0, + 655.0, + 885.0, + 537.0, + 885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 723.0, + 842.0, + 1410.0, + 842.0, + 1410.0, + 885.0, + 723.0, + 885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 879.0, + 683.0, + 879.0, + 683.0, + 909.0, + 296.0, + 909.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 810.0, + 431.0, + 810.0, + 431.0, + 854.5, + 292.0, + 854.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1581.0, + 1408.0, + 1581.0, + 1408.0, + 1616.0, + 295.0, + 1616.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1612.0, + 1408.0, + 1612.0, + 1408.0, + 1646.0, + 295.0, + 1646.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1641.0, + 1405.0, + 1641.0, + 1405.0, + 1677.0, + 295.0, + 1677.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1674.0, + 1403.0, + 1674.0, + 1403.0, + 1705.0, + 296.0, + 1705.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1705.0, + 1405.0, + 1705.0, + 1405.0, + 1736.0, + 296.0, + 1736.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1735.0, + 1404.0, + 1735.0, + 1404.0, + 1766.0, + 296.0, + 1766.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1766.0, + 573.0, + 1766.0, + 573.0, + 1796.0, + 295.0, + 1796.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1237.0, + 836.0, + 1237.0, + 836.0, + 1270.0, + 294.0, + 1270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 905.0, + 1237.0, + 1405.0, + 1237.0, + 1405.0, + 1270.0, + 905.0, + 1270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1266.0, + 1404.0, + 1266.0, + 1404.0, + 1301.0, + 293.0, + 1301.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1299.0, + 1405.0, + 1299.0, + 1405.0, + 1332.0, + 293.0, + 1332.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1329.0, + 905.0, + 1329.0, + 905.0, + 1360.0, + 294.0, + 1360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1844.0, + 1405.0, + 1844.0, + 1405.0, + 1882.0, + 294.0, + 1882.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1910.0, + 1402.0, + 1910.0, + 1402.0, + 1946.0, + 295.0, + 1946.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1941.0, + 1405.0, + 1941.0, + 1405.0, + 1977.0, + 294.0, + 1977.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1972.0, + 1060.0, + 1972.0, + 1060.0, + 2008.0, + 294.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1077.0, + 1972.0, + 1405.0, + 1972.0, + 1405.0, + 2008.0, + 1077.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 2003.0, + 536.0, + 2003.0, + 536.0, + 2035.0, + 295.0, + 2035.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.75, + 1870.5, + 1410.75, + 1870.5, + 1410.75, + 1919.5, + 288.75, + 1919.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1370.0, + 1044.0, + 1370.0, + 1044.0, + 1410.0, + 294.0, + 1410.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1064.0, + 1370.0, + 1258.0, + 1370.0, + 1258.0, + 1410.0, + 1064.0, + 1410.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1277.0, + 1370.0, + 1406.0, + 1370.0, + 1406.0, + 1410.0, + 1277.0, + 1410.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1404.0, + 296.0, + 1404.0, + 296.0, + 1440.0, + 290.0, + 1440.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 402.0, + 1404.0, + 1187.0, + 1404.0, + 1187.0, + 1440.0, + 402.0, + 1440.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 33, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 706, + 1406, + 706, + 1406, + 860, + 298, + 860 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 298, + 893, + 1404, + 893, + 1404, + 1049, + 298, + 1049 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 298, + 1062, + 1404, + 1062, + 1404, + 1189, + 298, + 1189 + ], + "score": 0.977 + }, + { + "category_id": 3, + "poly": [ + 307, + 252, + 1370, + 252, + 1370, + 503, + 307, + 503 + ], + "score": 0.965 + }, + { + "category_id": 1, + "poly": [ + 291, + 1299, + 1401, + 1299, + 1401, + 1363, + 291, + 1363 + ], + "score": 0.953 + }, + { + "category_id": 8, + "poly": [ + 496, + 1523, + 1203, + 1523, + 1203, + 1908, + 496, + 1908 + ], + "score": 0.953 + }, + { + "category_id": 8, + "poly": [ + 382, + 1376, + 1315, + 1376, + 1315, + 1465, + 382, + 1465 + ], + "score": 0.952 + }, + { + "category_id": 1, + "poly": [ + 297, + 1203, + 1399, + 1203, + 1399, + 1268, + 297, + 1268 + ], + "score": 0.95 + }, + { + "category_id": 4, + "poly": [ + 295, + 519, + 1403, + 519, + 1403, + 582, + 295, + 582 + ], + "score": 0.944 + }, + { + "category_id": 8, + "poly": [ + 490, + 1958, + 1206, + 1958, + 1206, + 2030, + 490, + 2030 + ], + "score": 0.934 + }, + { + "category_id": 1, + "poly": [ + 297, + 1475, + 1019, + 1475, + 1019, + 1510, + 297, + 1510 + ], + "score": 0.926 + }, + { + "category_id": 1, + "poly": [ + 296, + 1919, + 370, + 1919, + 370, + 1949, + 296, + 1949 + ], + "score": 0.915 + }, + { + "category_id": 0, + "poly": [ + 298, + 637, + 1149, + 637, + 1149, + 673, + 298, + 673 + ], + "score": 0.908 + }, + { + "category_id": 2, + "poly": [ + 297, + 75, + 857, + 75, + 857, + 105, + 297, + 105 + ], + "score": 0.906 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1428, + 1400, + 1428, + 1400, + 1459, + 1351, + 1459 + ], + "score": 0.885 + }, + { + "category_id": 9, + "poly": [ + 1350, + 1977, + 1401, + 1977, + 1401, + 2008, + 1350, + 2008 + ], + "score": 0.876 + }, + { + "category_id": 2, + "poly": [ + 835, + 2087, + 865, + 2087, + 865, + 2113, + 835, + 2113 + ], + "score": 0.862 + }, + { + "category_id": 8, + "poly": [ + 1042, + 1958, + 1204, + 1958, + 1204, + 2030, + 1042, + 2030 + ], + "score": 0.179 + }, + { + "category_id": 14, + "poly": [ + 495, + 1521, + 1201, + 1521, + 1201, + 1915, + 495, + 1915 + ], + "score": 0.94, + "latex": "\\begin{array} { l } { \\displaystyle T _ { k } ^ { \\prime } \\stackrel { \\prime } { = } \\frac { \\tau } { \\overline { { \\rho } } } \\displaystyle \\sum _ { i = 1 } ^ { n } \\| y _ { i } ^ { k } - w _ { i } ^ { k } \\| ^ { 2 } + \\frac { 1 } { \\overline { { \\rho } } \\tau } \\displaystyle \\sum _ { i = 1 } ^ { n } \\| z ^ { k } - x _ { i } ^ { k } \\| ^ { 2 } , } \\\\ { \\displaystyle l _ { k } \\stackrel { \\prime } { = } \\displaystyle \\sum _ { i = 1 } ^ { n } \\langle z ^ { * } - x _ { i } ^ { k } , w _ { i } ^ { * } - y _ { i } ^ { k } \\rangle + \\big \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { i } ^ { * } - B ( x _ { n + 1 } ^ { k } ) \\big \\rangle , } \\\\ { \\displaystyle r _ { k } \\stackrel { \\prime } { = } \\big \\langle k ^ { \\ell } , B ( \\tilde { x } ^ { k } ) - w _ { n + 1 } ^ { k } \\big \\rangle , } \\\\ { \\displaystyle r _ { k } ^ { \\prime } \\stackrel { \\prime } { = } \\big \\langle z ^ { k } - z ^ { * } , e ^ { k } \\big \\rangle , } \\\\ { \\displaystyle q _ { k } \\triangleq \\big ( \\rho _ { k } ^ { - 1 } - d / 2 \\big ) \\| \\tilde { x } ^ { k } - z ^ { k } \\| ^ { 2 } - \\| \\tilde { x } ^ { k } - z ^ { k } \\| \\| B ( \\tilde { x } ^ { k } ) - B ( z ^ { k } ) \\| } \\\\ { \\displaystyle q _ { k } ^ { \\prime } \\stackrel { \\prime } { = } \\rho _ { k } \\| \\epsilon ^ { k } \\| \\| B x _ { n + 1 } ^ { k } - B \\tilde { x } ^ { k } \\| + \\frac { 1 } { 2 d } \\| B \\tilde { x } _ { n + 1 } ^ { k } - B x _ { n + 1 } ^ { k } \\| ^ { 2 } , } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 976, + 1153, + 1147, + 1153, + 1147, + 1190, + 976, + 1190 + ], + "score": 0.93, + "latex": "\\textstyle \\sum _ { k = 1 } ^ { \\infty } \\rho _ { k } ^ { q } < \\infty" + }, + { + "category_id": 13, + "poly": [ + 915, + 1092, + 1131, + 1092, + 1131, + 1127, + 915, + 1127 + ], + "score": 0.92, + "latex": "\\mathbb { E } [ \\| \\epsilon ^ { k } \\| ^ { q } | \\mathcal { F } _ { k } ] \\le \\dot { N } ^ { q }" + }, + { + "category_id": 13, + "poly": [ + 1182, + 924, + 1263, + 924, + 1263, + 958, + 1182, + 958 + ], + "score": 0.92, + "latex": "\\mathbb { B } _ { r } \\big ( z ^ { * } \\big )" + }, + { + "category_id": 13, + "poly": [ + 395, + 925, + 733, + 925, + 733, + 959, + 395, + 959 + ], + "score": 0.92, + "latex": "p ^ { * } = ( z ^ { * } , w _ { 1 } ^ { * } , \\ldots , w _ { n + 1 } ^ { * } ) \\in \\mathcal { S }" + }, + { + "category_id": 14, + "poly": [ + 381, + 1374, + 1317, + 1374, + 1317, + 1470, + 381, + 1470 + ], + "score": 0.92, + "latex": "\\begin{array} { r l } & { \\| p ^ { k + 1 } - p ^ { * } \\| ^ { 2 } \\leq ( 1 + c _ { 1 } \\alpha _ { k } ^ { 2 } ) \\| p ^ { k } - p ^ { * } \\| ^ { 2 } - c _ { 2 } \\alpha _ { k } \\rho _ { k } ( T _ { k } ^ { \\prime } + l _ { k } + r _ { k } ) - c _ { 3 } \\alpha _ { k } ( r _ { k } ^ { \\prime } + q _ { k } ) } \\\\ & { \\qquad + c _ { 1 } \\alpha _ { k } ^ { 2 } \\big ( \\| e ^ { k } \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + c _ { 4 } \\big ) + c _ { 5 } \\alpha _ { k } q _ { k } ^ { \\prime } } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 1182, + 1092, + 1401, + 1092, + 1401, + 1127, + 1182, + 1127 + ], + "score": 0.92, + "latex": "\\mathbb { E } [ \\| e ^ { k } \\| ^ { q } | \\dot { \\mathcal { F } } _ { k } ] \\le \\mathsf { \\bar { N } } ^ { q }" + }, + { + "category_id": 13, + "poly": [ + 580, + 1478, + 724, + 1478, + 724, + 1508, + 580, + 1508 + ], + "score": 0.91, + "latex": "c _ { 1 } \\ldots c _ { 5 } \\geq 0" + }, + { + "category_id": 13, + "poly": [ + 899, + 987, + 992, + 987, + 992, + 1015, + 899, + 1015 + ], + "score": 0.9, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 402, + 1126, + 469, + 1126, + 469, + 1156, + 402, + 1156 + ], + "score": 0.89, + "latex": "q > 2" + }, + { + "category_id": 13, + "poly": [ + 633, + 1234, + 664, + 1234, + 664, + 1268, + 633, + 1268 + ], + "score": 0.89, + "latex": "p ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 1324, + 739, + 1398, + 739, + 1398, + 766, + 1324, + 766 + ], + "score": 0.89, + "latex": "n = 0" + }, + { + "category_id": 13, + "poly": [ + 1125, + 1203, + 1154, + 1203, + 1154, + 1236, + 1125, + 1236 + ], + "score": 0.88, + "latex": "p ^ { 1 }" + }, + { + "category_id": 14, + "poly": [ + 495, + 1959, + 1204, + 1959, + 1204, + 2029, + 495, + 2029 + ], + "score": 0.88, + "latex": "\\tilde { x } ^ { k } \\doteq z ^ { k } - \\rho _ { k } \\bigl ( B ( z ^ { k } ) - w _ { n + 1 } ^ { k } \\bigr ) \\qquad d \\doteq \\frac { 1 - \\overline { { \\rho } } L } { 1 + \\overline { { \\rho } } / 2 } ," + }, + { + "category_id": 13, + "poly": [ + 813, + 1238, + 841, + 1238, + 841, + 1268, + 813, + 1268 + ], + "score": 0.88, + "latex": "p ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 817, + 986, + 849, + 986, + 849, + 1016, + 817, + 1016 + ], + "score": 0.88, + "latex": "A _ { i }" + }, + { + "category_id": 13, + "poly": [ + 297, + 1237, + 326, + 1237, + 326, + 1268, + 297, + 1268 + ], + "score": 0.86, + "latex": "p ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 297, + 957, + 327, + 957, + 327, + 984, + 297, + 984 + ], + "score": 0.84, + "latex": "z ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 686, + 959, + 711, + 959, + 711, + 983, + 686, + 983 + ], + "score": 0.81, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 778, + 1064, + 803, + 1064, + 803, + 1092, + 778, + 1092 + ], + "score": 0.71, + "latex": "2 ^ { \\prime }" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 262.0, + 348.0, + 262.0, + 348.0, + 276.0, + 327.0, + 276.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 428.0, + 263.0, + 464.0, + 263.0, + 464.0, + 275.0, + 428.0, + 275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 484.0, + 264.0, + 499.0, + 264.0, + 499.0, + 274.0, + 484.0, + 274.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 540.0, + 264.0, + 555.0, + 264.0, + 555.0, + 274.0, + 540.0, + 274.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 579.0, + 263.0, + 628.0, + 263.0, + 628.0, + 275.0, + 579.0, + 275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 679.0, + 261.0, + 720.0, + 261.0, + 720.0, + 278.0, + 679.0, + 278.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1060.0, + 262.0, + 1080.0, + 262.0, + 1080.0, + 275.0, + 1060.0, + 275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 679.0, + 289.0, + 709.0, + 289.0, + 709.0, + 306.0, + 679.0, + 306.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 308.0, + 298.0, + 345.0, + 298.0, + 345.0, + 427.0, + 308.0, + 427.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 660.0, + 299.0, + 683.0, + 299.0, + 683.0, + 429.0, + 660.0, + 429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1040.0, + 298.0, + 1078.0, + 298.0, + 1078.0, + 429.0, + 1040.0, + 429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 331.0, + 347.0, + 331.0, + 347.0, + 349.0, + 325.0, + 349.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 677.0, + 318.0, + 709.0, + 318.0, + 709.0, + 335.0, + 677.0, + 335.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1195.0, + 325.0, + 1209.0, + 325.0, + 1209.0, + 341.0, + 1195.0, + 341.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1260.0, + 317.0, + 1333.0, + 317.0, + 1333.0, + 348.0, + 1260.0, + 348.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 679.0, + 346.0, + 709.0, + 346.0, + 709.0, + 363.0, + 679.0, + 363.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1058.0, + 339.0, + 1078.0, + 339.0, + 1078.0, + 358.0, + 1058.0, + 358.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1208.0, + 339.0, + 1225.0, + 339.0, + 1225.0, + 354.0, + 1208.0, + 354.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1243.0, + 346.0, + 1260.0, + 346.0, + 1260.0, + 359.0, + 1243.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1261.0, + 343.0, + 1333.0, + 343.0, + 1333.0, + 359.0, + 1261.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1229.0, + 353.0, + 1237.0, + 353.0, + 1237.0, + 363.0, + 1229.0, + 363.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1260.0, + 356.0, + 1332.0, + 356.0, + 1332.0, + 371.0, + 1260.0, + 371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 369.0, + 345.0, + 369.0, + 345.0, + 387.0, + 325.0, + 387.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 677.0, + 375.0, + 709.0, + 375.0, + 709.0, + 392.0, + 677.0, + 392.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1058.0, + 378.0, + 1078.0, + 378.0, + 1078.0, + 396.0, + 1058.0, + 396.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1243.0, + 365.0, + 1361.0, + 365.0, + 1361.0, + 396.0, + 1243.0, + 396.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1259.0, + 391.0, + 1301.0, + 391.0, + 1301.0, + 408.0, + 1259.0, + 408.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1246.0, + 395.0, + 1260.0, + 395.0, + 1260.0, + 404.0, + 1246.0, + 404.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 326.0, + 406.0, + 345.0, + 406.0, + 345.0, + 420.0, + 326.0, + 420.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 677.0, + 402.0, + 711.0, + 402.0, + 711.0, + 423.0, + 677.0, + 423.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1059.0, + 420.0, + 1078.0, + 420.0, + 1078.0, + 435.0, + 1059.0, + 435.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1350.0, + 420.0, + 1359.0, + 420.0, + 1359.0, + 429.0, + 1350.0, + 429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 679.0, + 432.0, + 710.0, + 432.0, + 710.0, + 449.0, + 679.0, + 449.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 440.0, + 347.0, + 440.0, + 347.0, + 458.0, + 325.0, + 458.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 679.0, + 462.0, + 710.0, + 462.0, + 710.0, + 477.0, + 679.0, + 477.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1057.0, + 458.0, + 1084.0, + 458.0, + 1084.0, + 475.0, + 1057.0, + 475.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 341.0, + 471.0, + 355.0, + 471.0, + 355.0, + 486.0, + 341.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 470.0, + 414.0, + 470.0, + 414.0, + 488.0, + 394.0, + 488.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 452.0, + 470.0, + 469.0, + 470.0, + 469.0, + 486.0, + 452.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 508.0, + 471.0, + 526.0, + 471.0, + 526.0, + 486.0, + 508.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 563.0, + 471.0, + 583.0, + 471.0, + 583.0, + 486.0, + 563.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 618.0, + 470.0, + 642.0, + 470.0, + 642.0, + 487.0, + 618.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 704.0, + 471.0, + 719.0, + 471.0, + 719.0, + 486.0, + 704.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 738.0, + 471.0, + 755.0, + 471.0, + 755.0, + 486.0, + 738.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 772.0, + 471.0, + 790.0, + 471.0, + 790.0, + 486.0, + 772.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 807.0, + 471.0, + 824.0, + 471.0, + 824.0, + 486.0, + 807.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 845.0, + 471.0, + 862.0, + 471.0, + 862.0, + 486.0, + 845.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 878.0, + 471.0, + 895.0, + 471.0, + 895.0, + 485.0, + 878.0, + 485.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 912.0, + 471.0, + 931.0, + 471.0, + 931.0, + 486.0, + 912.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 949.0, + 471.0, + 966.0, + 471.0, + 966.0, + 486.0, + 949.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 983.0, + 471.0, + 1001.0, + 471.0, + 1001.0, + 486.0, + 983.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1075.0, + 473.0, + 1086.0, + 473.0, + 1086.0, + 485.0, + 1075.0, + 485.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1132.0, + 473.0, + 1141.0, + 473.0, + 1141.0, + 485.0, + 1132.0, + 485.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1187.0, + 473.0, + 1197.0, + 473.0, + 1197.0, + 482.0, + 1187.0, + 482.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1243.0, + 474.0, + 1253.0, + 474.0, + 1253.0, + 482.0, + 1243.0, + 482.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1357.0, + 474.0, + 1365.0, + 474.0, + 1365.0, + 484.0, + 1357.0, + 484.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 429.0, + 480.0, + 548.0, + 480.0, + 548.0, + 503.0, + 429.0, + 503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 791.0, + 480.0, + 910.0, + 480.0, + 910.0, + 503.0, + 791.0, + 503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1161.0, + 480.0, + 1279.0, + 480.0, + 1279.0, + 503.0, + 1161.0, + 503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1184.0, + 313.5, + 1188.0, + 313.5, + 1188.0, + 322.5, + 1184.0, + 322.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 516.0, + 1408.0, + 516.0, + 1408.0, + 554.0, + 292.0, + 554.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 549.0, + 552.0, + 549.0, + 552.0, + 585.0, + 295.0, + 585.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 634.0, + 1153.0, + 634.0, + 1153.0, + 678.0, + 292.0, + 678.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 73.0, + 858.0, + 73.0, + 858.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 870.0, + 2084.0, + 870.0, + 2122.0, + 830.0, + 2122.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 705.0, + 1406.0, + 705.0, + 1406.0, + 743.0, + 293.0, + 743.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 738.0, + 1323.0, + 738.0, + 1323.0, + 771.0, + 295.0, + 771.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1399.0, + 738.0, + 1408.0, + 738.0, + 1408.0, + 771.0, + 1399.0, + 771.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 764.0, + 1409.0, + 764.0, + 1409.0, + 805.0, + 293.0, + 805.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 798.0, + 1406.0, + 798.0, + 1406.0, + 835.0, + 295.0, + 835.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 828.0, + 905.0, + 828.0, + 905.0, + 864.0, + 293.0, + 864.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 894.0, + 1404.0, + 894.0, + 1404.0, + 927.0, + 297.0, + 927.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 920.0, + 394.0, + 920.0, + 394.0, + 966.0, + 292.0, + 966.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 734.0, + 920.0, + 1181.0, + 920.0, + 1181.0, + 966.0, + 734.0, + 966.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1264.0, + 920.0, + 1409.0, + 920.0, + 1409.0, + 966.0, + 1264.0, + 966.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 328.0, + 952.0, + 685.0, + 952.0, + 685.0, + 990.0, + 328.0, + 990.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 712.0, + 952.0, + 1405.0, + 952.0, + 1405.0, + 990.0, + 712.0, + 990.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 986.0, + 816.0, + 986.0, + 816.0, + 1019.0, + 296.0, + 1019.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 850.0, + 986.0, + 898.0, + 986.0, + 898.0, + 1019.0, + 850.0, + 1019.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 993.0, + 986.0, + 1405.0, + 986.0, + 1405.0, + 1019.0, + 993.0, + 1019.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1014.0, + 1184.0, + 1014.0, + 1184.0, + 1052.0, + 295.0, + 1052.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1059.0, + 777.0, + 1059.0, + 777.0, + 1100.0, + 292.0, + 1100.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 804.0, + 1059.0, + 1405.0, + 1059.0, + 1405.0, + 1100.0, + 804.0, + 1100.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1090.0, + 914.0, + 1090.0, + 914.0, + 1130.0, + 292.0, + 1130.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1132.0, + 1090.0, + 1181.0, + 1090.0, + 1181.0, + 1130.0, + 1132.0, + 1130.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1402.0, + 1090.0, + 1406.0, + 1090.0, + 1406.0, + 1130.0, + 1402.0, + 1130.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1123.0, + 401.0, + 1123.0, + 401.0, + 1159.0, + 293.0, + 1159.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 470.0, + 1123.0, + 1404.0, + 1123.0, + 1404.0, + 1159.0, + 470.0, + 1159.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1150.0, + 975.0, + 1150.0, + 975.0, + 1194.0, + 292.0, + 1194.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1148.0, + 1150.0, + 1160.0, + 1150.0, + 1160.0, + 1194.0, + 1148.0, + 1194.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1295.0, + 1406.0, + 1295.0, + 1406.0, + 1337.0, + 293.0, + 1337.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1330.0, + 954.0, + 1330.0, + 954.0, + 1368.0, + 294.0, + 1368.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1201.0, + 1124.0, + 1201.0, + 1124.0, + 1241.0, + 293.0, + 1241.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1155.0, + 1201.0, + 1405.0, + 1201.0, + 1405.0, + 1241.0, + 1155.0, + 1241.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1233.0, + 296.0, + 1233.0, + 296.0, + 1273.0, + 291.0, + 1273.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 1233.0, + 632.0, + 1233.0, + 632.0, + 1273.0, + 327.0, + 1273.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 665.0, + 1233.0, + 812.0, + 1233.0, + 812.0, + 1273.0, + 665.0, + 1273.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 842.0, + 1233.0, + 854.0, + 1233.0, + 854.0, + 1273.0, + 842.0, + 1273.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1470.0, + 579.0, + 1470.0, + 579.0, + 1519.0, + 291.0, + 1519.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 725.0, + 1470.0, + 1021.0, + 1470.0, + 1021.0, + 1519.0, + 725.0, + 1519.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1914.0, + 375.0, + 1914.0, + 375.0, + 1953.0, + 294.0, + 1953.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 34, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 296, + 310, + 1406, + 310, + 1406, + 509, + 296, + 509 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 299, + 520, + 1404, + 520, + 1404, + 651, + 299, + 651 + ], + "score": 0.975 + }, + { + "category_id": 8, + "poly": [ + 327, + 1056, + 1374, + 1056, + 1374, + 1277, + 327, + 1277 + ], + "score": 0.965 + }, + { + "category_id": 8, + "poly": [ + 466, + 875, + 1235, + 875, + 1235, + 967, + 466, + 967 + ], + "score": 0.954 + }, + { + "category_id": 1, + "poly": [ + 295, + 226, + 1403, + 226, + 1403, + 298, + 295, + 298 + ], + "score": 0.954 + }, + { + "category_id": 8, + "poly": [ + 323, + 1808, + 1376, + 1808, + 1376, + 2022, + 323, + 2022 + ], + "score": 0.952 + }, + { + "category_id": 1, + "poly": [ + 303, + 980, + 1400, + 980, + 1400, + 1045, + 303, + 1045 + ], + "score": 0.949 + }, + { + "category_id": 1, + "poly": [ + 298, + 1721, + 1397, + 1721, + 1397, + 1786, + 298, + 1786 + ], + "score": 0.946 + }, + { + "category_id": 8, + "poly": [ + 308, + 1544, + 1396, + 1544, + 1396, + 1701, + 308, + 1701 + ], + "score": 0.944 + }, + { + "category_id": 1, + "poly": [ + 297, + 1293, + 809, + 1293, + 809, + 1326, + 297, + 1326 + ], + "score": 0.937 + }, + { + "category_id": 8, + "poly": [ + 482, + 667, + 1215, + 667, + 1215, + 800, + 482, + 800 + ], + "score": 0.925 + }, + { + "category_id": 2, + "poly": [ + 297, + 74, + 857, + 74, + 857, + 106, + 297, + 106 + ], + "score": 0.925 + }, + { + "category_id": 1, + "poly": [ + 296, + 1497, + 1104, + 1497, + 1104, + 1531, + 296, + 1531 + ], + "score": 0.918 + }, + { + "category_id": 1, + "poly": [ + 298, + 820, + 1103, + 820, + 1103, + 859, + 298, + 859 + ], + "score": 0.917 + }, + { + "category_id": 9, + "poly": [ + 1350, + 718, + 1401, + 718, + 1401, + 750, + 1350, + 750 + ], + "score": 0.889 + }, + { + "category_id": 9, + "poly": [ + 1351, + 926, + 1400, + 926, + 1400, + 957, + 1351, + 957 + ], + "score": 0.888 + }, + { + "category_id": 2, + "poly": [ + 835, + 2087, + 866, + 2087, + 866, + 2113, + 835, + 2113 + ], + "score": 0.864 + }, + { + "category_id": 9, + "poly": [ + 1351, + 1155, + 1401, + 1155, + 1401, + 1187, + 1351, + 1187 + ], + "score": 0.858 + }, + { + "category_id": 8, + "poly": [ + 323, + 1345, + 1375, + 1345, + 1375, + 1480, + 323, + 1480 + ], + "score": 0.858 + }, + { + "category_id": 8, + "poly": [ + 481, + 667, + 1214, + 667, + 1214, + 770, + 481, + 770 + ], + "score": 0.243 + }, + { + "category_id": 8, + "poly": [ + 322, + 1346, + 1375, + 1346, + 1375, + 1448, + 322, + 1448 + ], + "score": 0.199 + }, + { + "category_id": 2, + "poly": [ + 1057, + 2009, + 1081, + 2009, + 1081, + 2028, + 1057, + 2028 + ], + "score": 0.142 + }, + { + "category_id": 14, + "poly": [ + 324, + 1053, + 1374, + 1053, + 1374, + 1282, + 324, + 1282 + ], + "score": 0.94, + "latex": "\\begin{array} { r l } & { \\bigl \\langle z ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle = \\bigl \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle + \\bigl \\langle \\tilde { x } ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle } \\\\ & { \\qquad = \\bigl \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle + \\rho _ { k } \\bigl \\langle \\epsilon ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle \\qquad } \\\\ & { \\qquad = \\bigl \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle + \\rho _ { k } \\bigl \\langle \\epsilon ^ { k } , B x _ { n + 1 } ^ { k } - B \\tilde { x } ^ { k } \\bigr \\rangle \\qquad ( 8 } \\\\ & { \\qquad + \\rho _ { k } \\underbrace { \\bigl \\langle \\epsilon ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\bigr \\rangle } _ { r _ { k } } . } \\end{array}" + }, + { + "category_id": 14, + "poly": [ + 323, + 1804, + 1380, + 1804, + 1380, + 2036, + 323, + 2036 + ], + "score": 0.94, + "latex": "\\begin{array} { r l } { { \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\rangle - \\frac { d } { 2 } \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } } } \\\\ & { = \\langle z ^ { k } - \\tilde { x } ^ { k } , B z ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - B z ^ { k } \\rangle - \\frac { d } { 2 } \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } } \\\\ & { \\quad \\quad \\geq \\underbrace { ( \\rho _ { k } ^ { - 1 } - d / 2 ) \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } - \\| z ^ { k } - \\tilde { x } ^ { k } \\| \\| B \\tilde { x } ^ { k } - B z ^ { k } \\| } _ { q _ { k } } , } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 647, + 521, + 943, + 521, + 943, + 559, + 647, + 559 + ], + "score": 0.93, + "latex": "\\varphi _ { n + 1 , k } ( p ^ { k } ) - \\varphi _ { n + 1 , k } ( p ^ { * } )" + }, + { + "category_id": 13, + "poly": [ + 346, + 375, + 468, + 375, + 468, + 410, + 346, + 410 + ], + "score": 0.93, + "latex": "\\| \\nabla _ { w _ { i } } \\varphi _ { k } \\| ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 852, + 230, + 934, + 230, + 934, + 264, + 852, + 264 + ], + "score": 0.92, + "latex": "\\mathbb { B } _ { r } \\big ( z ^ { * } \\big )" + }, + { + "category_id": 13, + "poly": [ + 768, + 821, + 950, + 821, + 950, + 859, + 768, + 859 + ], + "score": 0.92, + "latex": "\\langle x _ { n + 1 } ^ { k } - z ^ { * } , e ^ { k } \\rangle" + }, + { + "category_id": 14, + "poly": [ + 461, + 872, + 1234, + 872, + 1234, + 969, + 461, + 969 + ], + "score": 0.92, + "latex": "\\begin{array} { r l } & { - \\varphi _ { n + 1 , k } ( p ^ { * } ) = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - y _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad = \\langle z ^ { * } - x _ { n + 1 } ^ { k } , w _ { n + 1 } ^ { * } - B x _ { n + 1 } ^ { k } \\rangle + \\langle x _ { n + 1 } ^ { k } - z ^ { * } , e _ { n + 1 } ^ { k } \\rangle } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 704, + 373, + 1008, + 373, + 1008, + 413, + 704, + 413 + ], + "score": 0.92, + "latex": "c _ { 1 } \\alpha _ { k } ^ { 2 } ( \\| \\bar { e } ^ { k } \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + c _ { 4 } )" + }, + { + "category_id": 13, + "poly": [ + 298, + 261, + 383, + 261, + 383, + 302, + 298, + 302 + ], + "score": 0.92, + "latex": "\\tilde { X } _ { t + 1 / 2 }" + }, + { + "category_id": 13, + "poly": [ + 1061, + 374, + 1246, + 374, + 1246, + 411, + 1061, + 411 + ], + "score": 0.92, + "latex": "c _ { 1 } \\alpha _ { k } ^ { 2 } \\| p ^ { k } - p ^ { * } \\| ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 927, + 589, + 987, + 589, + 987, + 622, + 927, + 622 + ], + "score": 0.92, + "latex": "B ( z )" + }, + { + "category_id": 13, + "poly": [ + 1142, + 979, + 1356, + 979, + 1356, + 1018, + 1142, + 1018 + ], + "score": 0.92, + "latex": "\\tilde { x } ^ { k } - x _ { n + 1 } ^ { k } = \\rho _ { k } \\epsilon _ { k }" + }, + { + "category_id": 13, + "poly": [ + 1291, + 342, + 1401, + 342, + 1401, + 377, + 1291, + 377 + ], + "score": 0.92, + "latex": "\\| \\nabla _ { z } \\varphi _ { k } \\| ^ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 533, + 410, + 758, + 410, + 758, + 447, + 533, + 447 + ], + "score": 0.91, + "latex": "\\varphi _ { i , k } ( p ^ { k } ) - \\varphi _ { i , k } ( p ^ { * } )" + }, + { + "category_id": 13, + "poly": [ + 1155, + 588, + 1238, + 588, + 1238, + 617, + 1155, + 617 + ], + "score": 0.91, + "latex": "z \\in \\mathbb { R } ^ { d }" + }, + { + "category_id": 14, + "poly": [ + 310, + 1544, + 1397, + 1544, + 1397, + 1676, + 310, + 1676 + ], + "score": 0.91, + "latex": "\\begin{array} { r l } { { \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle = \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\langle z ^ { k } - \\tilde { x } ^ { k } , B x _ { n + 1 } ^ { k } - B \\tilde { x } ^ { k } \\rangle } } \\\\ & { \\geq \\langle z ^ { k } - \\tilde { x } ^ { k } , B \\tilde { x } ^ { k } - w _ { n + 1 } ^ { k } \\rangle - \\displaystyle \\frac { d } { 2 } \\| z ^ { k } - \\tilde { x } ^ { k } \\| ^ { 2 } - \\displaystyle \\frac { 1 } { \\underline { { 2 d } } } \\| B \\tilde { x } ^ { k } - B x _ { n + 1 } ^ { k } \\| ^ { 2 } } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 495, + 475, + 589, + 475, + 589, + 508, + 495, + 508 + ], + "score": 0.89, + "latex": "{ ^ { \\circ } } \\varphi _ { n + 1 , k }" + }, + { + "category_id": 13, + "poly": [ + 383, + 1724, + 451, + 1724, + 451, + 1752, + 383, + 1752 + ], + "score": 0.89, + "latex": "d > 0" + }, + { + "category_id": 13, + "poly": [ + 386, + 984, + 415, + 984, + 415, + 1017, + 386, + 1017 + ], + "score": 0.89, + "latex": "r _ { k } ^ { \\prime }" + }, + { + "category_id": 13, + "poly": [ + 341, + 414, + 400, + 414, + 400, + 447, + 341, + 447 + ], + "score": 0.89, + "latex": "{ } ^ { \\mathfrak { e } } \\varphi _ { i , k }" + }, + { + "category_id": 13, + "poly": [ + 1074, + 228, + 1105, + 228, + 1105, + 258, + 1074, + 258 + ], + "score": 0.89, + "latex": "\\tilde { x } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 936, + 443, + 969, + 443, + 969, + 476, + 936, + 476 + ], + "score": 0.89, + "latex": "T _ { k } ^ { \\prime }" + }, + { + "category_id": 13, + "poly": [ + 696, + 590, + 735, + 590, + 735, + 616, + 696, + 616 + ], + "score": 0.87, + "latex": "B z" + }, + { + "category_id": 13, + "poly": [ + 1220, + 476, + 1244, + 476, + 1244, + 504, + 1220, + 504 + ], + "score": 0.86, + "latex": "l _ { k }" + }, + { + "category_id": 13, + "poly": [ + 985, + 984, + 1009, + 984, + 1009, + 1014, + 985, + 1014 + ], + "score": 0.86, + "latex": "l _ { k }" + }, + { + "category_id": 13, + "poly": [ + 927, + 557, + 959, + 557, + 959, + 586, + 927, + 586 + ], + "score": 0.86, + "latex": "\\tilde { x } ^ { k }" + }, + { + "category_id": 13, + "poly": [ + 1305, + 379, + 1397, + 379, + 1397, + 407, + 1305, + 407 + ], + "score": 0.85, + "latex": "i \\in 1 . . n" + }, + { + "category_id": 13, + "poly": [ + 1202, + 444, + 1227, + 444, + 1227, + 473, + 1202, + 473 + ], + "score": 0.85, + "latex": "l _ { k }" + }, + { + "category_id": 14, + "poly": [ + 324, + 1344, + 1376, + 1344, + 1376, + 1448, + 324, + 1448 + ], + "score": 0.83, + "latex": "\\begin{array} { r l } & { \\langle z ^ { k } - { \\tilde { x } } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\rho _ { k } \\langle \\epsilon ^ { k } , B x _ { n + 1 } ^ { k } - B { \\tilde { x } } ^ { k } \\rangle } \\\\ & { \\qquad \\geq \\langle z ^ { k } - { \\tilde { x } } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle - \\underbrace { \\rho _ { k } \\| \\epsilon ^ { k } \\| \\| B x _ { n + 1 } ^ { k } - B { \\tilde { x } } ^ { k } \\| } _ { \\mathrm { ~ } } . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 789, + 232, + 814, + 232, + 814, + 258, + 789, + 258 + ], + "score": 0.82, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 353, + 232, + 375, + 232, + 375, + 258, + 353, + 258 + ], + "score": 0.79, + "latex": "L" + }, + { + "category_id": 14, + "poly": [ + 483, + 664, + 1217, + 664, + 1217, + 797, + 483, + 797 + ], + "score": 0.75, + "latex": "\\begin{array} { r l } & { \\varphi _ { n + 1 , k } ( p ^ { k } ) = \\langle z ^ { k } - x _ { n + 1 } ^ { k } , y _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle } \\\\ & { \\qquad = \\langle z ^ { k } - x _ { n + 1 } ^ { k } , B x _ { n + 1 } ^ { k } - w _ { n + 1 } ^ { k } \\rangle + \\underbrace { \\langle z ^ { k } - x _ { n + 1 } ^ { k } , e ^ { k } \\rangle } _ { \\mathrm { p a r t } \\mathrm { o f } r _ { k } ^ { \\prime } } . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 881, + 474, + 910, + 474, + 910, + 508, + 881, + 508 + ], + "score": 0.64, + "latex": "r _ { k } ^ { \\prime }" + }, + { + "category_id": 13, + "poly": [ + 395, + 526, + 545, + 526, + 545, + 559, + 395, + 559 + ], + "score": 0.57, + "latex": "\\cdot \\circ _ { n + 1 , k } \\cdot \\mathbf { g } \\mathbf { a p } ^ { , , }" + }, + { + "category_id": 13, + "poly": [ + 841, + 477, + 870, + 477, + 870, + 505, + 841, + 505 + ], + "score": 0.57, + "latex": "r _ { k }" + }, + { + "category_id": 13, + "poly": [ + 961, + 475, + 989, + 475, + 989, + 508, + 961, + 508 + ], + "score": 0.45, + "latex": "q _ { k } ^ { \\prime }" + }, + { + "category_id": 13, + "poly": [ + 1125, + 776, + 1148, + 776, + 1148, + 800, + 1125, + 800 + ], + "score": 0.44, + "latex": "r _ { k } ^ { \\prime }" + }, + { + "category_id": 13, + "poly": [ + 844, + 475, + 989, + 475, + 989, + 509, + 844, + 509 + ], + "score": 0.28, + "latex": "r _ { k } , r _ { k } ^ { \\prime } , q _ { k } , q _ { k } ^ { \\prime }" + }, + { + "category_id": 13, + "poly": [ + 921, + 476, + 957, + 476, + 957, + 508, + 921, + 508 + ], + "score": 0.27, + "latex": "q _ { k } ," + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 859.0, + 72.0, + 859.0, + 109.0, + 297.0, + 109.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2083.0, + 871.0, + 2083.0, + 871.0, + 2121.0, + 830.0, + 2121.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 308.0, + 1407.0, + 308.0, + 1407.0, + 348.0, + 293.0, + 348.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 337.0, + 1290.0, + 337.0, + 1290.0, + 383.0, + 291.0, + 383.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1402.0, + 337.0, + 1407.0, + 337.0, + 1407.0, + 383.0, + 1402.0, + 383.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 369.0, + 345.0, + 369.0, + 345.0, + 416.0, + 290.0, + 416.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 469.0, + 369.0, + 703.0, + 369.0, + 703.0, + 416.0, + 469.0, + 416.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1009.0, + 369.0, + 1060.0, + 369.0, + 1060.0, + 416.0, + 1009.0, + 416.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1247.0, + 369.0, + 1304.0, + 369.0, + 1304.0, + 416.0, + 1247.0, + 416.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1398.0, + 369.0, + 1411.0, + 369.0, + 1411.0, + 416.0, + 1398.0, + 416.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 410.0, + 340.0, + 410.0, + 340.0, + 451.0, + 294.0, + 451.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 401.0, + 410.0, + 532.0, + 410.0, + 532.0, + 451.0, + 401.0, + 451.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 759.0, + 410.0, + 1406.0, + 410.0, + 1406.0, + 451.0, + 759.0, + 451.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 439.0, + 935.0, + 439.0, + 935.0, + 478.0, + 294.0, + 478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 970.0, + 439.0, + 1201.0, + 439.0, + 1201.0, + 478.0, + 970.0, + 478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1228.0, + 439.0, + 1406.0, + 439.0, + 1406.0, + 478.0, + 1228.0, + 478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 468.0, + 494.0, + 468.0, + 494.0, + 512.0, + 291.0, + 512.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 590.0, + 468.0, + 840.0, + 468.0, + 840.0, + 512.0, + 590.0, + 512.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 990.0, + 468.0, + 1219.0, + 468.0, + 1219.0, + 512.0, + 990.0, + 512.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1245.0, + 468.0, + 1258.0, + 468.0, + 1258.0, + 512.0, + 1245.0, + 512.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 519.0, + 394.0, + 519.0, + 394.0, + 564.0, + 292.0, + 564.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 546.0, + 519.0, + 646.0, + 519.0, + 646.0, + 564.0, + 546.0, + 564.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 944.0, + 519.0, + 1406.0, + 519.0, + 1406.0, + 564.0, + 944.0, + 564.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 555.0, + 926.0, + 555.0, + 926.0, + 593.0, + 293.0, + 593.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 960.0, + 555.0, + 1406.0, + 555.0, + 1406.0, + 593.0, + 960.0, + 593.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 586.0, + 695.0, + 586.0, + 695.0, + 624.0, + 290.0, + 624.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 736.0, + 586.0, + 926.0, + 586.0, + 926.0, + 624.0, + 736.0, + 624.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 988.0, + 586.0, + 1154.0, + 586.0, + 1154.0, + 624.0, + 988.0, + 624.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1239.0, + 586.0, + 1406.0, + 586.0, + 1406.0, + 624.0, + 1239.0, + 624.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 618.0, + 450.0, + 618.0, + 450.0, + 653.0, + 293.0, + 653.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 224.0, + 352.0, + 224.0, + 352.0, + 267.0, + 292.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 376.0, + 224.0, + 788.0, + 224.0, + 788.0, + 267.0, + 376.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 815.0, + 224.0, + 851.0, + 224.0, + 851.0, + 267.0, + 815.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 935.0, + 224.0, + 1073.0, + 224.0, + 1073.0, + 267.0, + 935.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1106.0, + 224.0, + 1405.0, + 224.0, + 1405.0, + 267.0, + 1106.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 261.0, + 297.0, + 261.0, + 297.0, + 302.0, + 294.0, + 302.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 384.0, + 261.0, + 693.0, + 261.0, + 693.0, + 302.0, + 384.0, + 302.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 287.0, + 960.0, + 385.0, + 960.0, + 385.0, + 1033.0, + 287.0, + 1033.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 416.0, + 960.0, + 984.0, + 960.0, + 984.0, + 1033.0, + 416.0, + 1033.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1010.0, + 960.0, + 1141.0, + 960.0, + 1141.0, + 1033.0, + 1010.0, + 1033.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1357.0, + 960.0, + 1414.0, + 960.0, + 1414.0, + 1033.0, + 1357.0, + 1033.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1011.0, + 643.0, + 1011.0, + 643.0, + 1049.0, + 294.0, + 1049.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1720.0, + 382.0, + 1720.0, + 382.0, + 1760.0, + 294.0, + 1760.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 452.0, + 1720.0, + 1403.0, + 1720.0, + 1403.0, + 1760.0, + 452.0, + 1760.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1752.0, + 625.0, + 1752.0, + 625.0, + 1784.0, + 295.0, + 1784.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1293.0, + 810.0, + 1293.0, + 810.0, + 1328.0, + 295.0, + 1328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1495.0, + 1106.0, + 1495.0, + 1106.0, + 1534.0, + 296.0, + 1534.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 813.0, + 767.0, + 813.0, + 767.0, + 868.0, + 290.0, + 868.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 951.0, + 813.0, + 1108.0, + 813.0, + 1108.0, + 868.0, + 951.0, + 868.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 35, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 342, + 1404, + 342, + 1404, + 467, + 297, + 467 + ], + "score": 0.972 + }, + { + "category_id": 1, + "poly": [ + 296, + 227, + 1404, + 227, + 1404, + 329, + 296, + 329 + ], + "score": 0.971 + }, + { + "category_id": 8, + "poly": [ + 516, + 481, + 1183, + 481, + 1183, + 654, + 516, + 654 + ], + "score": 0.965 + }, + { + "category_id": 1, + "poly": [ + 297, + 665, + 669, + 665, + 669, + 698, + 297, + 698 + ], + "score": 0.925 + }, + { + "category_id": 2, + "poly": [ + 297, + 74, + 857, + 74, + 857, + 105, + 297, + 105 + ], + "score": 0.922 + }, + { + "category_id": 1, + "poly": [ + 288, + 767, + 1317, + 767, + 1317, + 802, + 288, + 802 + ], + "score": 0.912 + }, + { + "category_id": 8, + "poly": [ + 424, + 713, + 1274, + 713, + 1274, + 756, + 424, + 756 + ], + "score": 0.892 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 864, + 2088, + 864, + 2112, + 835, + 2112 + ], + "score": 0.85 + }, + { + "category_id": 14, + "poly": [ + 513, + 477, + 1185, + 477, + 1185, + 658, + 513, + 658 + ], + "score": 0.93, + "latex": "\\begin{array} { r l } & { D _ { k } = \\| p ^ { k } - p ^ { * } \\| ^ { 2 } , } \\\\ & { \\zeta _ { k } = c _ { 2 } \\alpha _ { k } \\rho _ { k } ( T _ { k } ^ { \\prime } + l _ { k } ) + c _ { 3 } \\alpha _ { k } q _ { k } , } \\\\ & { \\xi _ { k } = - c _ { 2 } \\alpha _ { k } \\rho _ { k } r _ { k } - c _ { 3 } \\alpha _ { k } r _ { k } ^ { \\prime } , } \\\\ & { \\chi _ { k } = c _ { 1 } \\alpha _ { k } ^ { 2 } \\big ( \\| e ^ { k } \\| ^ { 2 } + \\| \\epsilon ^ { k } \\| ^ { 2 } + \\| p ^ { k } - p ^ { * } \\| ^ { 2 } + c _ { 4 } \\big ) + c _ { 5 } \\alpha _ { k } q _ { k } ^ { \\prime } , } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 1222, + 227, + 1407, + 227, + 1407, + 267, + 1222, + 267 + ], + "score": 0.92, + "latex": "B z ^ { k } - w _ { n + 1 } ^ { k } =" + }, + { + "category_id": 13, + "poly": [ + 452, + 668, + 497, + 668, + 497, + 698, + 452, + 698 + ], + "score": 0.91, + "latex": "E _ { \\infty } ^ { \\rho }" + }, + { + "category_id": 13, + "poly": [ + 298, + 261, + 456, + 261, + 456, + 301, + 298, + 301 + ], + "score": 0.91, + "latex": "\\rho _ { k } ^ { - 1 } ( z ^ { k } - \\tilde { x } ^ { k } )" + }, + { + "category_id": 13, + "poly": [ + 710, + 264, + 742, + 264, + 742, + 294, + 710, + 294 + ], + "score": 0.88, + "latex": "\\tilde { x } ^ { k }" + }, + { + "category_id": 14, + "poly": [ + 423, + 712, + 1272, + 712, + 1272, + 756, + 423, + 756 + ], + "score": 0.86, + "latex": "\\begin{array} { r } { E _ { \\infty } ^ { \\rho } = \\left\\{ x _ { n + 1 } ^ { k } \\in \\mathbb { B } _ { r } ( z ^ { * } ) , \\tilde { x } ^ { k } \\in \\mathbb { B } _ { \\rho r } ( z ^ { * } ) , p ^ { k } \\in \\mathbb { B } _ { \\rho r } ( p ^ { * } ) \\mathrm { ~ f o r ~ a l l ~ } k = 1 , 2 , \\ldots \\right\\} . } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 884, + 439, + 898, + 439, + 898, + 463, + 884, + 463 + ], + "score": 0.77, + "latex": "t" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 72.0, + 859.0, + 72.0, + 859.0, + 108.0, + 297.0, + 108.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 829.0, + 2084.0, + 871.0, + 2084.0, + 871.0, + 2124.0, + 829.0, + 2124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 344.0, + 1406.0, + 344.0, + 1406.0, + 380.0, + 294.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 375.0, + 1406.0, + 375.0, + 1406.0, + 408.0, + 295.0, + 408.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 404.0, + 1405.0, + 404.0, + 1405.0, + 440.0, + 294.0, + 440.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 434.0, + 883.0, + 434.0, + 883.0, + 469.0, + 292.0, + 469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 899.0, + 434.0, + 1146.0, + 434.0, + 1146.0, + 469.0, + 899.0, + 469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 286.0, + 211.0, + 1221.0, + 211.0, + 1221.0, + 277.0, + 286.0, + 277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 284.0, + 253.0, + 297.0, + 253.0, + 297.0, + 311.0, + 284.0, + 311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 457.0, + 253.0, + 709.0, + 253.0, + 709.0, + 311.0, + 457.0, + 311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 743.0, + 253.0, + 1414.0, + 253.0, + 1414.0, + 311.0, + 743.0, + 311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 294.0, + 388.0, + 294.0, + 388.0, + 332.0, + 291.0, + 332.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 664.0, + 451.0, + 664.0, + 451.0, + 700.0, + 296.0, + 700.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 498.0, + 664.0, + 669.0, + 664.0, + 669.0, + 700.0, + 498.0, + 700.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 766.0, + 1319.0, + 766.0, + 1319.0, + 805.0, + 292.0, + 805.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 36, + "width": 1700, + "height": 2200 + } + } +] \ No newline at end of file diff --git a/parse/test/TrloAXEJ2B/TrloAXEJ2B.md b/parse/test/TrloAXEJ2B/TrloAXEJ2B.md new file mode 100644 index 0000000000000000000000000000000000000000..87d00bee4aac5a17ed7d3b8679e5d36e567fa964 --- /dev/null +++ b/parse/test/TrloAXEJ2B/TrloAXEJ2B.md @@ -0,0 +1,301 @@ +# LoraHub: Efficient Cross-Task Generalization via Dynamic LoRA Composition + +Chengsong Huang $\mathbf { \Delta } \mathbf { \dag \ S \mathrm { \ s \mathrm { \ s } } }$ , Qian Liuβ€ βˆ—, Bill Yuchen $\mathbf { L i n } ^ { \bigotimes * }$ , Tianyu Pang†, Chao ${ { \mathbf { D } } { { \mathbf { u } } } ^ { \dag } }$ , Min Lin† †Sea AI Lab, Singapore Β§Washington University in St. Louis, MO, USA β™’Allen Institute for AI, Seattle, WA, USA + +# Abstract + +Low-rank adaptations (LoRA) are often employed to fine-tune large language models (LLMs) for new tasks. This paper investigates LoRA composability for cross-task generalization and introduces LoraHub, a simple framework devised for the purposive assembly of LoRA modules trained on diverse given tasks, with the objective of achieving adaptable performance on unseen tasks. With just a few examples from a new task, LoraHub can fluidly combine multiple LoRA modules, eliminating the need for human expertise and assumptions. Notably, the composition requires neither additional model parameters nor gradients. Empirical results on the Big-Bench Hard benchmark suggest that LoraHub, while not surpassing the performance of in-context learning, offers a notable performanceefficiency trade-off in few-shot scenarios by employing a significantly reduced number of tokens per example during inference. Notably, LoraHub establishes a better upper bound compared to in-context learning when paired with different demonstration examples, demonstrating its potential for future development. Our vision is to establish a platform for LoRA modules, empowering users to share their trained LoRA modules. This collaborative approach facilitates the seamless application of LoRA modules to novel tasks, contributing to an adaptive ecosystem. Our code is available at github.com/sail-sg/lorahub, and all the pre-trained LoRA modules are released at huggingface.co/lorahub. + +# 1 Introduction + +![](images/95e2ddec39022b4d6452e07a5b9cddb6f0b9d45a3c19a11a7273387b6b7e1205.jpg) +Figure 1: The illustration of zero-shot learning, few-shot in-context learning and few-shot LoraHub learning (ours). Note that the Compose procedure is conducted per task rather than per example. Our method achieves similar inference throughput as zero-shot learning, yet approaches the performance of in-context learning on the BIG-Bench Hard (BBH) benchmark. + +Recent progress in natural language processing (NLP) has been largely fueled by large language models (LLMs) such as OpenAI GPT (Brown et al., 2020), FLAN-T5 (Chung et al., 2022), and LLaMA (Touvron et al., 2023). These models demonstrate top-tier performance across different NLP tasks. However, their enormous parameter size presents issues regarding computational efficiency and memory usage during fine-tuning. To mitigate these challenges, Low-Rank Adaptation (LoRA) (Hu et al., 2022) has emerged as a parameterefficient fine-tuning technique (Lester et al., 2021; He et al., 2022; An et al., 2022). By reducing memory demands and computational costs, it speeds up LLM training. LoRA achieves this by freezing the base model parameters (that is, an LLM) and training a lightweight module, which regularly delivers high performance on target tasks. + +While prior research has targeted the efficiency enhancement facilitated by LoRA, there is a dearth of investigation into the inherent modularity and composability of LoRA modules. Typically, previous methods train LoRA modules to specialize in individual tasks. Yet, the intrinsic modularity of LoRA modules presents an intriguing research question: Would it be possible to compose LoRA modules to generalize to novel tasks in an efficient manner? In this paper, we tap into the potential of LoRA modularity for broad task generalization, going beyond single-task training to meticulously compose LoRA modules for malleable performance on unknown tasks. Crucially, our method enables an automatic assembling of LoRA modules, eliminating dependency on manual design or human expertise. With just a handful of examples from new tasks (e.g., 5), our approach can autonomously compose compatible LoRA modules without human intrusion. We do not make assumptions about which LoRA modules trained on particular tasks can be combined, allowing for flexibility in amalgamating any modules as long as they conform to the specification (e.g., using the same LLM). As our approach leverages several available LoRA modules, we refer to it as LoraHub and denote our learning method as LoraHub learning. + +To validate the efficiency of our proposed methods, we test our approaches using the widely recognized BBH benchmark with FLAN-T5 (Chung et al., 2022) serving as the base LLM. The results underline the effectiveness of the LoRA module composition for unfamiliar tasks through a few-shot LoraHub learning process. Notably, our methodology achieves an average performance that closely matches that of few-shot in-context learning, while demonstrating a superior upper bound, particularly when using different demonstration examples. Additionally, our method substantially reduces the inference cost compared to in-context learning, eliminating the requirement of examples as inputs for the LLM. With fewer tokens per example during inference, our method significantly reduces computational overhead and enables faster responses. It aligns with a broader research trend, where recent studies are actively exploring approaches to reduce the number of input tokens (Zhou et al., 2023; Ge et al., 2023; Chevalier et al., 2023; Jiang et al., 2023a; Li et al., 2023; Jiang et al., 2023b). Our learning procedure is also notable for its computational efficiency, using a gradient-free approach to obtain the coefficients of LoRA modules and requiring only a handful of inference steps for unseen tasks. For example, when applied to a new task in BBH, our methodology can deliver superior performance in less than a minute using a single A100 card. + +Importantly, LoraHub learning can feasibly be accomplished with a CPU-only machine, requiring proficiency solely for processing LLM inference. In our pursuit to democratize artificial intelligence, we are taking an important step forward by envisioning the establishment of the LoRA platform. The platform would serve as a marketplace where users can seamlessly share and access well-trained LoRA modules for diverse applications. LoRA providers have the flexibility to freely share or sell their modules on the platform without compromising data privacy. Users, equipped with CPU capability, can leverage trained LoRA modules contributed by others through automated distribution and composition algorithms. This platform not only cultivates a repository of reusable LoRA modules with a myriad of capabilities but also sets the stage for cooperative AI development. It empowers the community to collectively enrich the LLM’s capabilities through dynamic LoRA composition. + +# 2 Problem Statement + +Large Language Models We assume that a large language model $M _ { \theta }$ is based on Transformer architecture (Vaswani et al., 2017) and has been pre-trained on a large-scale text corpus. The model architecture can be either encoder-decoder (Raffel et al., 2020) or decoderonly (Brown et al., 2020). Also, $M _ { \theta }$ could also have been fine-tuned with a large set of instruction-following datasets such as Flan Colleciton (Longpre et al., 2023) and PromptSource (Bach et al., 2022). + +Cross-Task Generalization In real-world situations, users often desire an LLM to perform novel tasks that it has not encountered before β€” an ability widely known as cross-task generalization. Generally, cross-task generalization falls into two categories: zero-shot learning (Mishra et al., 2022; Sanh et al., 2022; Chung et al., 2022; OpenAI, 2022; Lin et al., 2022), which necessitates no labeled examples of the new task, and few-shot learning (Ye et al., 2021; Min et al., 2022) which demands a handful of labeled examples. Assume we have $N$ distinct upstream tasks that the LLM has been trained on, denoted as $\mathbb { T } = \{ \mathcal { T } _ { 1 } , . . . , \mathcal { T } _ { N } \}$ . Our paper primarily focuses on the latter category, where for an unseen target task $\mathcal { T } ^ { \prime } \notin \mathbb { T } ,$ , users can only provide a limited set of labeled examples, Q. Our aim is to modify the model $M _ { \theta }$ to adapt it to task $\tau ^ { \prime }$ using only $Q$ . An intuitive method would be to fine-tune the weights of ${ \mathrm { { \dot { M } } } } _ { \theta }$ based on $Q ,$ yielding an updated model $M _ { \phi }$ with enhanced performance on $\tau ^ { \prime }$ . However, this approach is inefficient, time-consuming, and unstable when $Q$ is small. + +LoRA Tuning LoRA is a parameter-efficient fine-tuning method (Hu et al., 2022), facilitates the adaptation of LLMs using lightweight modules, eliminating the need for finetuning the entire weights. LoRA tuning involves keeping the original model weights frozen while introducing trainable low-rank decomposition matrices as adapter modules into each layer of the model. Compared to the base LLM, this module possesses significantly fewer trainable parameters, paving the way for rapid adaptation using minimal examples. As such, LoRA tuning presents a resource-efficient technique to quickly adapt LLMs for new tasks with restricted training data. However, traditional LoRA methods primarily concentrate on training and testing within the same tasks (Gema et al., 2023), rather than venturing into few-shot cross-task generalization. + +# 3 Methodology + +In this section, we provide an overview of our proposed method. We then explain the LoRA tuning procedure in detail. Last, we introduce the procedure of our LoraHub learning, which consists of the COMPOSE stage and the ADAPT stage. + +# 3.1 Method Overview + +As depicted in Figure 2, we initially train LoRA modules on a variety of upstream tasks. Specifically, for $N$ distinct upstream tasks, we separately train $N$ LoRA modules, each represented as $m _ { i }$ for task $\mathscr { T } _ { i } \in \mathbf { \hat { T } }$ . Subsequently, for a new task $\mathcal { T } ^ { \prime } \notin \mathbb { T } ,$ , such as Boolean Expressions represented in Figure 2, its examples $Q$ are utilized to steer the LoraHub learning process. The LoraHub learning encapsulates two main phases: the COMPOSE phase and the ADAPT phase. In the COMPOSE phase, all available LoRA modules are combined into a single integrated module $\hat { m } _ { - }$ , using $\left\{ w _ { 1 } , w _ { 2 } , \dots , w _ { N } \right\}$ as coefficients. Each $w _ { i }$ is a scalar value that can take on positive or negative values, and the combination can be done in different ways. During the ADAPT phase, the combined LoRA module $\hat { m }$ is amalgamated with the LLM $M _ { \theta }$ , and its performance on few-shot examples from the new task $\mathbf { \breve { { \mathbf { \nabla } } } } _ { \mathbf { \mathbf { \mathbf { \mathbf { \mathcal { T } } } } } ^ { \prime } }$ is assessed. A gradient-free algorithm is subsequently deployed to update $w _ { . }$ , enhancing mΛ† ’s performance (e.g., loss) on the few-shot examples $Q$ . Finally, after iterating through $K$ steps, the optimum performing LoRA module is applied to the LLM $M _ { \theta }$ , yielding the final LLM $M _ { \phi } = \mathrm { L o R A } ( \hat { M } _ { \theta } , \hat { m } )$ . This serves as an effectively adjusted model for the unseen task $\tau ^ { \prime }$ , which will then be deployed and not updated anymore. + +# 3.2 LoRA tuning on upstream tasks + +LoRA effectively minimizes the number of trainable parameters through the process of decomposing the attention weight matrix update of the LLM, denoted as $W _ { 0 } \in \bar { R } ^ { d \times k } ,$ , into low-rank matrices. In more specific terms, LoRA exhibits the updated weight matrix in the form $W _ { 0 } + \delta W = W _ { 0 } + A B ,$ where $A \in \mathbb { R } ^ { d \times r }$ and $B \in \mathbb { R } ^ { r \times k }$ are trainable low-rank matrices with rank $r ,$ a dimension significantly smaller than those of $d$ and $k$ . In this context, the product $A B$ defines the LoRA module $m ,$ , as previously elaborated. By leveraging the low-rank decomposition, LoRA substantially reduces the number of trainable parameters needed to adapt the weights of LLMs duriing fine-tuning. + +![](images/fdc28d30d1864590ed2196198df1e30168cf83fc2b25e930c617edf738bdbc3b.jpg) +Figure 2: Our method encompasses two stages: the COMPOSE stage and the ADAPT stage. During the COMPOSE stage, existing LoRA modules are integrated into one unified module, employing a set of coefficients, denoted as $w$ . In the ADAPT stage, the combined LoRA module is evaluated on a few examples from the unseen task. Subsequently, a gradient-free algorithm is applied to refine $w$ . After executing $K$ iterations, a highly adapted combined LoRA module is produced, which can be incorporated with the LLM to perform the intended task. + +# 3.3 COMPOSE: Element-wise composition of LoRA modules + +Within the COMPOSE stage, we implement an element-wise method to combine LoRA modules. This process integrates the corresponding parameters of the LoRA modules, requiring the modules being combined to have the same rank $r$ to properly align the structures. Given that $m _ { i } = A _ { i } B _ { i } ,$ the combined LoRA module $\hat { m }$ can be obtained by: + +$$ +\hat { m } = ( w _ { 1 } A _ { 1 } + w _ { 2 } A _ { 2 } + \cdot \cdot \cdot + w _ { N } A _ { N } ) ( w _ { 1 } B _ { 1 } + w _ { 2 } B _ { 2 } + \cdot \cdot \cdot + w _ { N } B _ { N } ) . +$$ + +Notbly, as we show in Sec. 5, combining too many LoRA modules at once can expand the search space exponentially, which may destabilize the LoraHub learning process and prevent optimal performance. To mitigate this, we employ random selection to prune the candidate space, and more advanced pre-filtering algorithms could be explored in the future. + +# 3.4 ADAPT: Weight optimization via gradient-free methods + +During the ADAPT stage, our goal is to modify the coefficients $w$ to boost the model’s performace on the examples from an unseen task. One might think of using gradient descent to optimize $w ,$ following standard backpropagation methods. However, this approach demands constructing a hypernetwork for all LoRA modules, similar to differentiable architecture search methods (Zhang et al., 2019). Constructing these hypernetworks demands for substantial GPU memory and time, posing a challenge. Given that $w$ consists of a relatively small number of parameters, we opted for gradient-free methods for optimization instead of gradient descent. + +Inspired by previous work (Sun et al., 2022), we utilize a black-box optimization technique to find the optimal $w$ . The optimization process is steered by the cross-entropy loss, setting the goal to locate the best set $\left\{ w _ { 1 } , w _ { 2 } , \ldots , w _ { N } \right\}$ that reduces the loss $L$ on the few-shot examples $Q$ . Furthermore, we incorporate L1 regularization to penalize the sum of the absolute values of $w _ { . }$ , helping to prevent obtaining extreme values. Consequently, the final objective of LoraHub is to minimize $\begin{array} { r } { L + \alpha \cdot \sum _ { i = 1 } ^ { N } | \dot { w } _ { i } | , } \end{array}$ , where $\alpha$ serves as a hyperparameter. + +In terms of the gradient-free method, we leverage Shiwa, a combinatorial optimization approach (Liu et al., 2020). Shiwa offers a variety of algorithms and chooses the most suitable optimization algorithm for different circumstances. In most of the forthcoming experimental setups, we primarily employ the Covariance Matrix Adaptive Evolution Strategies (CMA-ES) (Hansen & Ostermeier, 1996). CMA-ES, as a stochastic and population-based optimization algorithm, offers versatility in addressing a broad spectrum of optimization challenges. It dynamically adjusts a search distribution, which is defined by a covariance matrix. During each iteration, CMA-ES systematically updates both the mean and covariance of this distribution to optimize the target function. In our application, we employ this algorithm to mold the search space for w. Ultimately, we use it to identify the optimal $w$ by evaluating their performance on the few-shot examples from an unseen task. + +# 4 Experimental Results + +In this section, we provide details on our main experiments. First, we give an overview of the experimental setup and implementation details. Next, we present our findings along with the results. + +# 4.1 Experimental setup + +Large Language Model In our main experiments, we employ FLAN-T5 (Chung et al., 2022), particularly FLAN-T5-large, as the base LLM. The model has shown impressive abilities to perform zero-shot and few-shot learning. + +Candidate LoRA Modules Our methodology requires a compendium of LoRA modules trained on preceding tasks. For parity with FLAN, we adopt the tasks utilized to instruct FLAN-T5, thereby incorporating nearly 200 distinct tasks and their corresponding instructions. Following this, we trained several LoRA modules as potential candidates. During each experimental sequence, we randomly select 20 LoRA modules from them as the candidate for our LoraHub learning. + +Dataset and evaluation Our method is evaluated using the Big-Bench Hard (BBH) benchmark, a well-established standard that consists of multiple-choice questions from a variety of domains. The benchmark consists of 27 different tasks, which are regarded to be challenging for language models. For all tasks, we employ the exact match (EM) as our evaluation metric. + +Baseline Setup To enhance the demonstration of our method’s performance, we expanded our comparisons beyond the zero-shot and in-context learning settings. We specifically chose three representative gradient-based methods for comparison: full fine-tuning (FFT), LoRA tuning (LoRA) (Hu et al., 2022), and IA3 fine-tuning (IA3) (Liu et al., 2022). For all gradient-based methods, for a fair comparsion, we train for 40 epochs on the same three runs of 5 examples employed in our methods. In the case of FFT, a learning rate of 3e-5 is employed, whereas for IA3 and LoRA, we adopt a learning rate of 2e-4. We report the performance of each method on the test set at the end of training (averaged over three runs) without any model selection to avoid potential selection bias. + +# 4.2 Main results + +As shown in Table 1, our experimental results demonstarte the superior efficacy of our method in comparison to zero-shot learning while closely resembling the performance of in-context learning (ICL) in few-shot scenarios. This observation is derived from an average performance of three runs, each leveraging different few-shot examples. Importantly, our model utilizes an equivalent number of tokens as the zero-shot method, notably fewer than the count used by ICL. Although occasional performance fluctuations, our method consistently outperforms zero-shot learning in most tasks. In the era of LLMs, the input length is directly proportional to the inference cost, and thus LoraHub’s ability to economize on input tokens while approaching the peak performance grows increasingly significant. Moreover, as shown in Appendix Table 4, the upper bound performance of our method across these runs can surpass ICL on 18 tasks, demonstrating its potential for future development. + +Table 1: Experimental results of zero-shot learning (Zero), few-shot in-context learning (ICL), IA3 fine-tuning (IA3), LoRA tuning (LoRA), full fine-tuning (FFT) and our proposed few-shot LoraHub learning (LoraHub) on the BBH benchmark with FLAN-T5-large as the base LLM. We denote algorithmic tasks with the superscript $\ S$ following previous work (Wu et al., 2023b). Note that we employ three runs, each leveraging different 5-shot examples per task, as demonstrations for all few-shot methods. The average performance of all methods is reported below, and the best performance of each few-shot method can be found in the Appendix B. + +
TaskZeroICLavgIA3avgLoRAavgFFTavgLoraHubavg
Boolean Expressions54.059.656.256.062.255.5
Causal Judgement57.559.460.255.657.554.3
Date Understanding15.320.420.035.859.332.9
Disambiguation0.069.10.068.068.245.2
Dyck Languages1.30.94.222.219.51.0
Formal Fallacies51.355.351.553.654.052.8
Geometric Shapes6.719.614.72431.17.4
Hyperbaton6.771.849.355.377.362.8
Logical DeductionS (five objects)21.339.132.740.042.236.1
Logical DeductionS (seven objects)12.740.733.837.344.936.8
Logical DeductionS (three objects)0.051.68.553.652.945.7
Movie Recommendation62.755.861.851.566.055.3
Multistep Arithmetic0.70.70.70.20.00.4
Navigate47.345.346.248.048.047.1
Object Counting34.732.435.138.735.633.7
Penguins in a Table43.541.345.036.231.935.9
Reasoning about Colored Objects32.040.240.739.637.640.0
Ruin Names23.319.324.437.861.324.4
Salient Translation Error Detection37.347.337.116.016.236.0
Snarks50.054.253.955.666.756.9
Sports Understanding56.054.755.156.554.056.7
Temporal Sequences16.725.118.225.137.818.2
Tracking Shuffled ObjectsS (five objects)12.012.012.013.816.912.3
Tracking Shuffled Objects (seven objects)6.76.76.710.09.87.7
Tracking Shuffled ObjectsS (three objects)24.731.130.730.932.029.2
Web of Lies54.053.854.252.748.250.1
Word Sorting1.30.51.34.94.91.1
Avg Performance Per Task27.037.331.637.742.134.7
Avg Tokens Per Example111.6597.8111.6111.6111.6111.6
Gradient-based TrainingNoNoYesYesYesNo
+ +Even when compared to certain gradient-based optimization methods, our approach consistently demonstrates competitive performance. For example, as depicted in Table 1, our method exhibits a notable improvement of $3 . 1 \%$ on average in contrast to the promising IA3 method. Nevertheless, we acknowledge that our approach still falls behind LoRA tuning and full fine-tuning, especially in tasks that exhibit significant deviation from the upstream task. Taking Dyck Languages as an example, both LoraHub and ICL achieve only an average performance of nearly $1 . 0 \%$ on these tasks, while LoRA and FFT methods showcase impressive results with only 5 examples. + +# 4.3 Discussion + +LoraHub addresses the challenge of reducing inference costs by eliminating the need for processing additional tokens, resulting in a noticeable reduction in overall inference expenses. However, it introduces an inherent cost during the ADAPT stage, necessitating extra inference steps, such as the 40 steps employed in our experiments. This introduces a trade-off between choosing the ICL approach and LoraHub, with the decision typically hinging on the nature of the situation. + +For one-time ad-hoc tasks, the ICL approach should be more pragmatic due to LoraHub’s additional inference step costs. In such scenarios, where immediate, single-use solutions are preferred, the simplicity and efficiency of ICL might outweigh the benefits of potential savings offered by LoraHub. Conversely, for recurring or similar tasks, LoraHub emerges as a compelling option. Despite the added inference step cost, LoraHub’s ability to efficiently handle repetitive tasks, often occurring thousands of times, while concurrently reducing overall expenses, positions it as a viable option in such kind of situations. + +In summary, our intention is not to replace $\scriptstyle { \mathrm { I C L } } ,$ but to present LoraHub as a complementary strategy with performance-efficiency trade-offs. Thus, we encourage a careful consideration of specific use cases and requirements when choosing between ICL and LoraHub, recognizing that the optimal solution may vary based on the nature and frequency of the tasks at hand. + +# 5 Experimental Analysis + +In this section, we thoroughly examine the characteristics of our proposed method and uncover several insightful findings. If not specified, we use FLAN-T5-large for all analysis. + +Does composing LoRA modules extend beyond the single module’s benefits? + +We acknowledge the investigation of cross-task performance in prior work (Jang et al., 2023), which delved into the capabilities of LoRA and proposed a novel method centered around LoRA module retrieval. In order to ensure a fair comparison, we conducted an experiment where we + +Table 2: The average performance of various methods across all tasks in the benchmark BBH. + +
LoRA RetrievalLoraHub avgLoraHub best
31.734.741.2
+ +designed a LoRA retrieval mechanism based on the loss derived from few-shot examples. Specifically, we ranked all LoRA module candidates according to this loss and evaluated the best candidate on the test set of the unseen task. As depicted in Table 2, the performance of LoRA retrieval is notably impressive, positioning it as a strong baseline. However, in comparison to LoraHub, the performance of LoRA retrieval is relatively less favorable + +How effective is the gradient-free optimization method? + +To assess the effectiveness of our gradient-free optimization method in correctly identifying the most suitable LoRA module for a given downstream task, we carried out an empirical study using the WikiTableQuestions (Pasupat & Liang, 2015) (WTQ) dataset. We strategically included a LoRA module that was specifically trained on the WTQ dataset into our pool of LoRA candidate modules, which originally stemmed from tasks exclusive to the Flan Collection. Subsequently, we designated WTQ as the targeted downstream task and computed the weights consistent with the methods employed in LoraHub learning. As an end result, the WTQ-specific LoRA module was awarded the highest weight, exemplifying the algorithm’s success in recognizing it as the most relevant. Moreover, the combined LoRA module demonstrated marginal superiority over the WTQ LoRA module. This underscores the claim that the gradient-free optimization method has the ability to proficiently select the optimal upstream LoRA module for an unseen task. + +Can LoraHub work well on non-instruction-tuning models? + +In previous investigations, we primarily focused on models with zero-shot capabilities that were trained with instruction tuning. However, for models like T5 without zero-shot abilities, where training has a larger effect on parameters, it was unclear if LoraHub could still effectively manage and improve them. Our experiments show that although these models perform worse than FLAN-T5, LoraHub learning can still enable them to effectively generlize to unseen tasks. See Appendix C for more details. + +Will the rank of LoRA modules impact the performance of LoraHub learning? + +The parameter rank plays a crucial role in the LoRA framework, directly influencing the number of trainable parameters utilized during LoRA tuning. This prompts an intriguing question: does the variation in rank values influence the outcomes observed within the LoraHub learning? Our analysis indicates that, for FLAN-T5, the choice of rank has minimal impact. However, for T5, it still exerts some influence. Empirical findings reveal that, in comparison to rank values of 4 or 64, a rank value of 16 consistently demonstrates superior performance across different runs, both in terms of average and optimal values. Additional results are available in Appendix C. + +Does more LoRA modules lead to better results? + +In our main experiments, we randomly selected 20 LoRA modules for LoraHub learning. Therefore, we conducted experiments to investigate the effect of using different numbers of LoRA modules. The results demonstrate that as we increased the number of LoRA modules, the variance in performance increased. However, the maximum achievable performance also improved. More analysis on the variance and the detailed results can be found in Appendix H. + +How much computational resource can be saved? + +We follow to the memory test settings from the LoRA-FA (Zhang et al., 2023b) study for an accurate benchmark. In this context, full fine-tuning required about 40GB of memory, whereas LoRA fine-tuning used around 34GB. Remarkably, LoraHub only utilized about 5GB of memory, illustrating its efficiency due to the inference-only mode, which eliminates the need for storing gradients and optimization states. + +# 6 Related work + +Model Merging Our method substantially draws on the concept of LoRA module composition, and thus, aligns with the significant thread of research in model merging. This research focus is broadly categorized based on the ultimate objectives of model merging. + +The first category focuses on merging entire models, and the goal is to combine individually trained models to approximate the performance benefits of model ensembling or multi-task learning. Prior works (Matena & Raffel, 2021; Jin et al., 2023; Yadav et al., 2023; Wu et al., 2023a) operated under the assumption of shared model architectures. For example, Matena & Raffel (2021) amalgamates models by approximating Gaussian posterior distributions garnered from Fisher information, while Yadav et al. (2023) merges models via resolving model interferences. Another approach is merging models with different architectures. For instance, Ainsworth et al. (2023) configures weights of different models prior to their merger. Following this objective, Stoica et al. (2023) merges models operating on varying tasks by identifying common features, without requiring additional training. Unlike these works, our work focuses on merging models for better cross-task generalization. + +The second category most closely aligns with our research, stemming from a shared motivation of module composition. Various scholars have made advances in this line of research: Kingetsu et al. (2021) decomposes and recomposes modules on the basis of their functionality; Ilharco et al. (2023) proposes modulating model behavior using task vectors; Lv et al. (2023) amalgamates parameter-efficient modules weighted according to task similarity; Zhang et al. (2023a) crafts modules by employing specific arithmetic operations; Sun et al. (2023) improves few-shot performance of unseen tasks by multi-task pre-training of prompts; Chronopoulou et al. (2023) averages adapter weights intended for transfer; Ponti et al. (2023) focuses on jointly learning adapters and a routing function that allocates skills to each task; and Muqeeth et al. (2023) concentrates on amalgamating experts in mixture of experts models; However, these methods generally necessitate multi-task training or human prior on module selection for the downstream task. In contrast, our method does not impose any special training requirements and simply employs vanilla LoRA tuning. Additionally, the module selection for downstream tasks is entirely data-driven without human prior knowledge. This design gives the advantage of easily adding new LoRA modules for reuse, allowing our method to flexibly scale up the number of LoRA module candidates in the future. + +Mixture of Experts The Mixture of Experts (MoE) is an ensemble method, often visualized as a collection of sub-modules, or β€œexperts”, each specializing in processing different types of input data. Each expert in this system is controlled by a unique gating network, activated based on the distinct nature of the input data. For every token in these input sequences, this network identifies and engages the most suitable experts to process the data. As a result, the performance is superior compared to relying on a single, generic model for all types of input. This technique has proven instrumental in numerous domains, such as natural language processing and computer vision (Jacobs et al., 1991; Shazeer et al., 2017; Du et al., 2022; Zhang et al., 2022; Wang et al., 2022; crumb, 2023). Our methodology displays similarities to MoE, wherein upstream-trained LoRA modules can be aligned with MoE’s expert design. A noteworthy distinguishing factor is that our approach mechanism does not require any specialized manipulation of LoRAs during training while facilitating dynamic LoRA module assembly at any scale, each pre-tuned to different tasks. In contrast, MoE mandates a predetermined count of experts during both the training and testing phases. Recent studies on the interrelation between MoE and instruction tuning have demonstrated that the simultaneous application of both approaches enhances the effectiveness of each individually (Shen et al., 2023). + +Cross-Task generalization Recent advancements like CrossFit (Ye et al., 2021), ExT5 (Aribandi et al., 2022), FLAN (Wei et al., 2022), T0 (Sanh et al., 2022), InstructGPT (Ouyang et al., 2022), and ReCross (Lin et al., 2022) have been striving to foster a vastly multi-task model’s generalization across different tasks, very much aligned with the objectives of our research. Among this cohort, the connections of CrossFit and ReCross with LoraHub are particularly noteworthy. The CrossFit framework (Ye et al., 2021) mandates a minimal number of labeled examples of the target task for few-shot fine-tuning. However, its limitation lies in the application of task names as hard prefixes in templates, posing challenges in the task’s generalization. On the other hand, while ReCross mitigates the need for labels in few-shot examples for retrieval, it necessitates a fine-tuning process using the retrieved data. This procedure appears time-consuming when compared to LoraHub’s approach. Through the deployment of few-shot labeled examples and a gradientfree optimization process, LoraHub facilitates an iterative update of weights to compose the LoRA modules. The resultant method is more efficient and cost-effective relative to previous work. Overall, LoraHub offers a more practical and viable solution to the optimization process. + +# 7 Conclusion + +In this work, we have introduced LoraHub, a strategic framework for composing LoRA modules trained on diverse tasks in order to achieve adaptable performance on new tasks. Our approach enables the fluid combination of multiple LoRA modules using just a few examples from a novel task, without requiring additional model parameters or human expertise. The empirical results on the BBH benchmark demonstrate that LoraHub can effectively match the performance of in-context learning in few-shot scenarios, removing the need for in-context examples during inference. Overall, our work shows the promise of strategic LoRA composability for rapidly adapting LLMs to diverse tasks. By fostering reuse and combination of LoRA modules, we can work towards more general and adaptable LLMs while minimizing training costs. + +# Reproducibility Statement + +The authors have made great efforts to ensure the reproducibility of the empirical results reported in this paper. Firstly, the experiment settings, evaluation metrics, and datasets were described in detail in Section 4.1. Secondly, the codes and script for reproduce the result will be opensource after accepted. Second, the source code implementing the proposed method and experiments will be made publicly available at upon acceptance of the paper. Third, pre-trained LoRA modules from this work along with their configuration files and weights will be shared. These allow reproduction without retraining the LoRA modules, enabling quick testing and verification. + +# References + +Samuel Ainsworth, Jonathan Hayase, and Siddhartha Srinivasa. Git re-basin: Merging models modulo permutation symmetries. In The Eleventh International Conference on Learning Representations, 2023. +Shengnan An, Yifei Li, Zeqi Lin, Qian Liu, Bei Chen, Qiang Fu, Weizhu Chen, Nanning Zheng, and Jian-Guang Lou. Input-tuning: Adapting unfamiliar inputs to frozen pretrained models. ArXiv preprint, 2022. +Vamsi Aribandi, Yi Tay, Tal Schuster, Jinfeng Rao, Huaixiu Steven Zheng, Sanket Vaibhav Mehta, Honglei Zhuang, Vinh Q. Tran, Dara Bahri, Jianmo Ni, Jai Prakash Gupta, Kai Hui, Sebastian Ruder, and Donald Metzler. Ext5: Towards extreme multi-task scaling for transfer learning. In Proc. of ICLR, 2022. +Stephen Bach, Victor Sanh, Zheng Xin Yong, Albert Webson, Colin Raffel, Nihal V. Nayak, Abheesht Sharma, Taewoon Kim, M Saiful Bari, Thibault Fevry, Zaid Alyafeai, Manan Dey, Andrea Santilli, Zhiqing Sun, Srulik Ben-david, Canwen Xu, Gunjan Chhablani, Han Wang, Jason Fries, Maged Al-shaibani, Shanya Sharma, Urmish Thakker, Khalid Almubarak, Xiangru Tang, Dragomir Radev, Mike Tian-jian Jiang, and Alexander Rush. PromptSource: An integrated development environment and repository for natural language prompts. In Proc. of ACL, 2022. +Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In Hugo Larochelle, Marc’Aurelio Ranzato, Raia Hadsell, MariaFlorina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. + +Alexis Chevalier, Alexander Wettig, Anirudh Ajith, and Danqi Chen. Adapting language models to compress contexts. CoRR, abs/2305.14788, 2023. doi: 10.48550/ARXIV.2305. 14788. URL https://doi.org/10.48550/arXiv.2305.14788. + +Alexandra Chronopoulou, Matthew Peters, Alexander Fraser, and Jesse Dodge. AdapterSoup: Weight averaging to improve generalization of pretrained language models. In Findings of the Association for Computational Linguistics: EACL 2023, 2023. + +Hyung Won Chung, Le Hou, S. Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Dasha Valter, Sharan Narang, Gaurav Mishra, Adams Wei Yu, Vincent Zhao, Yanping Huang, Andrew M. Dai, Hongkun Yu, Slav Petrov, Ed Huai hsin Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei. Scaling instruction-finetuned language models. ArXiv preprint, 2022. +crumb. Llama-2, mixutre of lora. https://crumbly.medium.com/ llama-2-molora-f5f909434711, 2023. +Nan Du, Yanping Huang, Andrew M. Dai, Simon Tong, Dmitry Lepikhin, Yuanzhong Xu, Maxim Krikun, Yanqi Zhou, Adams Wei Yu, Orhan Firat, Barret Zoph, Liam Fedus, Maarten P. Bosma, Zongwei Zhou, Tao Wang, Yu Emma Wang, Kellie Webster, Marie Pellat, Kevin Robinson, Kathleen S. Meier-Hellstern, Toju Duke, Lucas Dixon, Kun Zhang, Quoc V. Le, Yonghui Wu, Zhifeng Chen, and Claire Cui. Glam: Efficient scaling of language models with mixture-of-experts. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Β΄ International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, Proceedings of Machine Learning Research, 2022. +Tao Ge, Jing Hu, Xun Wang, Si-Qing Chen, and Furu Wei. In-context autoencoder for context compression in a large language model. CoRR, abs/2307.06945, 2023. doi: 10. 48550/ARXIV.2307.06945. URL https://doi.org/10.48550/arXiv.2307.06945. +Aryo Pradipta Gema, Luke Daines, Pasquale Minervini, and Beatrice Alex. Parameterefficient fine-tuning of llama for the clinical domain. ArXiv preprint, 2023. +Nikolaus Hansen and Andreas Ostermeier. Adapting arbitrary normal mutation distributions in evolution strategies: the covariance matrix adaptation. Proceedings of IEEE International Conference on Evolutionary Computation, 1996. +Junxian He, Chunting Zhou, Xuezhe Ma, Taylor Berg-Kirkpatrick, and Graham Neubig. Towards a unified view of parameter-efficient transfer learning. In Proc. of ICLR, 2022. +Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. In Proc. of ICLR, 2022. +Gabriel Ilharco, Marco Tulio Ribeiro, Mitchell Wortsman, Ludwig Schmidt, Hannaneh Hajishirzi, and Ali Farhadi. Editing models with task arithmetic. In The Eleventh International Conference on Learning Representations, 2023. +Robert A. Jacobs, Michael I. Jordan, Steven J. Nowlan, and Geoffrey E. Hinton. Adaptive mixtures of local experts. Neural Computation, 1991. +Joel Jang, Seungone Kim, Seonghyeon Ye, Doyoung Kim, Lajanugen Logeswaran, Moontae Lee, Kyungjae Lee, and Minjoon Seo. Exploring the benefits of training expert language models over instruction tuning. In International Conference on Machine Learning, 2023. URL https://api.semanticscholar.org/CorpusID:256627673. +Huiqiang Jiang, Qianhui Wu, Chin-Yew Lin, Yuqing Yang, and Lili Qiu. Llmlingua: Compressing prompts for accelerated inference of large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics, December 2023a. URL https://arxiv.org/abs/2310.05736. + +Huiqiang Jiang, Qianhui Wu, Xufang Luo, Dongsheng Li, Chin-Yew Lin, Yuqing Yang, and Lili Qiu. Longllmlingua: Accelerating and enhancing llms in long context scenarios via prompt compression. CoRR, abs/2310.06839, 2023b. doi: 10.48550/ARXIV.2310.06839. URL https://doi.org/10.48550/arXiv.2310.06839. + +Xisen Jin, Xiang Ren, Daniel Preotiuc-Pietro, and Pengxiang Cheng. Dataless knowledge fusion by merging weights of language models. In The Eleventh International Conference on Learning Representations, 2023. +Hiroaki Kingetsu, Kenichi Kobayashi, and Taiji Suzuki. Neural network module decomposition and recomposition. ArXiv preprint, 2021. +Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. In Proc. of EMNLP, 2021. +Yucheng Li, Bo Dong, Chenghua Lin, and Frank Guerin. Compressing context to enhance inference efficiency of large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics, December 2023. URL https://arxiv.org/abs/2310.06201. +Bill Yuchen Lin, Kangmin Tan, Chris Miller, Beiwen Tian, and Xiang Ren. Unsupervised cross-task generalization via retrieval augmentation. In NeurIPS, 2022. +Haokun Liu, Derek Tam, Mohammed Muqeeth, Jay Mohta, Tenghao Huang, Mohit Bansal, and Colin Raffel. Few-shot parameter-efficient fine-tuning is better and cheaper than incontext learning. ArXiv, abs/2205.05638, 2022. URL https://api.semanticscholar.org/ CorpusID:248693283. +Jialin Liu, A. Moreau, Mike Preuss, Baptiste Roziere, J \` erΒ΄ emy Rapin, Fabien Teytaud, and Β΄ Olivier Teytaud. Versatile black-box optimization. Proceedings of the 2020 Genetic and Evolutionary Computation Conference, 2020. +Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V. Le, Barret Zoph, Jason Wei, and Adam Roberts. The flan collection: Designing data and methods for effective instruction tuning, 2023. +Xingtai Lv, Ning Ding, Yujia Qin, Zhiyuan Liu, and Maosong Sun. Parameter-efficient weight ensembling facilitates task-level knowledge transfer. In Annual Meeting of the Association for Computational Linguistics, 2023. +Sourab Mangrulkar, Sylvain Gugger, Lysandre Debut, Younes Belkada, and Sayak Paul. Peft: State-of-the-art parameter-efficient fine-tuning methods. https://github.com/ huggingface/peft, 2022. +Michael Matena and Colin Raffel. Merging models with fisher-weighted averaging. ArXiv preprint, 2021. +Sewon Min, Mike Lewis, Luke Zettlemoyer, and Hannaneh Hajishirzi. MetaICL: Learning to learn in context. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2022. +Swaroop Mishra, Daniel Khashabi, Chitta Baral, and Hannaneh Hajishirzi. Cross-task generalization via natural language crowdsourcing instructions. In Proc. of ACL, 2022. +Mohammed Muqeeth, Haokun Liu, and Colin Raffel. Soft merging of experts with adaptive routing. ArXiv preprint, 2023. +OpenAI. ChatGPT. 2022. URL https://openai.com/blog/chatgpt. +Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke E. Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Francis Christiano, Jan Leike, and Ryan J. Lowe. Training language models to follow instructions with human feedback. ArXiv preprint, 2022. + +Panupong Pasupat and Percy Liang. Compositional semantic parsing on semi-structured tables. In Proc. of ACL, 2015. + +Edoardo Maria Ponti, Alessandro Sordoni, Yoshua Bengio, and Siva Reddy. Combining parameter-efficient modules for task-level generalisation. In Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics, 2023. + +Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 2020. + +J. Rapin and O. Teytaud. Nevergrad - A gradient-free optimization platform. https:// GitHub.com/FacebookResearch/Nevergrad, 2018. + +Victor Sanh, Albert Webson, Colin Raffel, Stephen H. Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Arun Raja, Manan Dey, M Saiful Bari, Canwen Xu, Urmish Thakker, Shanya Sharma Sharma, Eliza Szczechla, Taewoon Kim, Gunjan Chhablani, Nihal V. Nayak, Debajyoti Datta, Jonathan Chang, Mike Tian-Jian Jiang, Han Wang, Matteo Manica, Sheng Shen, Zheng Xin Yong, Harshit Pandey, Rachel Bawden, Thomas Wang, Trishala Neeraj, Jos Rozen, Abheesht Sharma, Andrea Santilli, Thibault Fevry, Jason Alan Fries, Ryan Teehan, Teven Le Scao, Stella Biderman, Leo Gao, Β΄ Thomas Wolf, and Alexander M. Rush. Multitask prompted training enables zero-shot task generalization. In Proc. of ICLR, 2022. + +Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc V. Le, Geoffrey E. Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixtureof-experts layer. In Proc. of ICLR, 2017. + +Sheng Shen, Le Hou, Yanqi Zhou, Nan Du, Shayne Longpre, Jason Wei, Hyung Won Chung, Barret Zoph, William Fedus, Xinyun Chen, Tu Vu, Yuexin Wu, Wuyang Chen, Albert Webson, Yunxuan Li, Vincent Zhao, Hongkun Yu, Kurt Keutzer, Trevor Darrell, and Denny Zhou. Mixture-of-experts meets instruction tuning:a winning combination for large language models, 2023. + +George Stoica, Daniel Bolya, Jakob Bjorner, Taylor Hearn, and Judy Hoffman. Zipit! merging models from different tasks without training. arXiv, 2023. + +Tianxiang Sun, Yunfan Shao, Hong Qian, Xuanjing Huang, and Xipeng Qiu. Black-box tuning for language-model-as-a-service. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Β΄ International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, Proceedings of Machine Learning Research, 2022. + +Tianxiang Sun, Zhengfu He, Qin Zhu, Xipeng Qiu, and Xuanjing Huang. Multitask pretraining of modular prompt for Chinese few-shot learning. In Proc. of ACL, 2023. + +Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozi Β΄ ere, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien \` Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. Llama: Open and efficient foundation language models. ArXiv preprint, 2023. + +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, 2017. + +Yaqing Wang, Sahaj Agarwal, Subhabrata Mukherjee, Xiaodong Liu, Jing Gao, Ahmed Hassan Awadallah, and Jianfeng Gao. AdaMix: Mixture-of-adaptations for parameter-efficient model tuning. In Proc. of EMNLP, 2022. + +Jason Wei, Maarten Bosma, Vincent Y. Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M. Dai, and Quoc V. Le. Finetuned language models are zero-shot learners. In Proc. of ICLR, 2022. +Chengyue Wu, Teng Wang, Yixiao Ge, Zeyu Lu, Ruisong Zhou, Ying Shan, and Ping Luo. $\pi$ -tuning: Transferring multimodal foundation models with optimal multi-task interpolation. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 37713–37727. PMLR, 2023a. URL https://proceedings.mlr. press/v202/wu23t.html. +Shijie Wu, Ozan Irsoy, Steven Lu, Vadim Dabravolski, Mark Dredze, Sebastian Gehrmann, Prabhanjan Kambadur, David S. Rosenberg, and Gideon Mann. Bloomberggpt: A large language model for finance. CoRR, abs/2303.17564, 2023b. doi: 10.48550/arXiv.2303. 17564. URL https://doi.org/10.48550/arXiv.2303.17564. +Prateek Yadav, Derek Tam, Leshem Choshen, Colin Raffel, and Mohit Bansal. TIESmerging: Resolving interference when merging models. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id= xtaX3WyCj1. +Qinyuan Ye, Bill Yuchen Lin, and Xiang Ren. CrossFit: A few-shot learning challenge for cross-task generalization in NLP. In Proc. of EMNLP, 2021. +Chris Zhang, Mengye Ren, and Raquel Urtasun. Graph hypernetworks for neural architecture search. In Proc. of ICLR, 2019. +Fan Zhang, Duyu Tang, Yong Dai, Cong Zhou, Shuangzhi Wu, and Shuming Shi. Skillnetnlu: A sparsely activated model for general-purpose natural language understanding, 2022. +Jinghan Zhang, Shiqi Chen, Junteng Liu, and Junxian He. Composing parameter-efficient modules with arithmetic operations. ArXiv preprint, 2023a. +Longteng Zhang, Lin Zhang, Shaohuai Shi, Xiaowen Chu, and Bo Li. Lora-fa: Memory-efficient low-rank adaptation for large language models fine-tuning. ArXiv, abs/2308.03303, 2023b. URL https://api.semanticscholar.org/CorpusID:260683267. +Wangchunshu Zhou, Yuchen Eleanor Jiang, Ryan Cotterell, and Mrinmaya Sachan. Efficient prompting via dynamic in-context learning. CoRR, abs/2305.11170, 2023. doi: 10.48550/ARXIV.2305.11170. URL https://doi.org/10.48550/arXiv.2305.11170. + +Table 3: The top five beneficial LoRA modules for BBH tasks and their associated upstream tasks, the average weight values and the average performance on all BBH tasks. + +
RankDataset: TaskWeightPerfTask Description
1WIQA: Last Process0.7228.1 Identifying the last step of a given process.
2RACE: Is this the Right Answer0.6830.8Determining if given answer is correct.
3WIQA: First Process0.6328.1 Identifying the first step of a given process.
4AdversarialQA: BiDAF0.6125.1Aserialmode-in-the-eby an
5WebQuestions: What is the Answer0.5827.0 Asweringrqomesten based oninformation
+ +# A More Analysis + +Which LoRA modules are most effective for BBH tasks? + +We hypothesized that the amalgamation of LoRA modules could incorporate skills and insights from a variety of specific tasks. To evaluate this, we examined the extent of influence a single LoRA module had amongst all tasks from the BBH benchmark. We measured the impact of each isolated task by calculating the average absolute weight. The top five modules, presented in Table 3, were found to have substantial influence, as indicated by their maximum average weights, which suggested that they were notably more effective in cross-task transfer. Remarkably, a common feature among these top five modules was their association with tasks requiring reading comprehension and reasoning skillsβ€”attributes indicative of higher cognitive complexity. However, it is worth noting that none of the modules exhibited consistent improvement across all BBH tasks, as reflected in their average performance on all BBH tasks, which did not show a significant improvement compared to the original FLAN-T5-large, except for the Rank 2. The results underscore the advantages of composing diverse modules in LoraHub. + +How effective is the gradient-free optimization method? + +To assess the effectiveness of our gradient-free optimization method in correctly identifying the most suitable LoRA module for a given downstream task, we carried out an empirical study using the WikiTableQuestions (Pasupat & Liang, 2015) (WTQ) dataset. We strategically included a LoRA module that was specifically trained on the WTQ dataset into our pool of LoRA candidate modules, which originally stemmed from tasks exclusive to the Flan Collection. Subsequently, we designated WTQ as the targeted downstream task and computed the weights consistent with the methods employed in LoraHub learning. As an end result, the WTQ-specific LoRA module was awarded the highest weight, exemplifying the algorithm’s success in recognizing it as the most relevant. Moreover, the combined LoRA module demonstrated marginal superiority over the WTQ LoRA module. This underscores the claim that the gradient-free optimization method has the ability to proficiently select the optimal upstream LoRA module for an unseen task. + +# B Result of Best Results + +As shown in Table 4, compared to gradient-based parameter-efficient training methods like LoRA and IA3, our approach demonstrates superior performance in terms of best results over experimental runs. While it exhibits a noticeable lag behind the fully fine-tuning (FFT) method, which updates all parameters during training, this observation suggests that our proposed method has a promising upper limit. We anticipate that future research efforts can contribute to accelerating the optimization speed and further enhancing the efficacy of our approach. + +Table 4: Experimental results of several few-shot methods, including in-context learning (ICL), IA3 fine-tuning (IA3), LoRA tuning (LoRA), full fine-tuning (FFT) and our LoraHub learning (LoraHub) on the BBH benchmark with FLAN-T5-large as the base LLM. We denote algorithmic tasks with the superscript $\ S$ following previous work (Wu et al., 2023b). Note that we use 5 examples per task as the demonstration for all methods. The best (best) performance is reported as the maximum value obtained across three runs. + +
TaskICLbestIA3bestLoRAbestFFTbestLoraHubbest
Boolean Expressions62.758.060.765.360.7
Causal Judgement59.862.157.560.963.2
Date Understanding21.320.740.767.345.3
Disambiguation69.30.068.770.768.0
Dyck Languages2.04.725.333.32.7
Formal Fallacies59.352.056.756.059.3
Geometric Shapes20.015.328.739.318.7
Hyperbaton72.749.357.382.072.7
Logical DeductionS (five objects)39.332.741.343.340.0
Logical DeductionS (seven objects)42.034.042.746.046.0
LogicalDrectjoets)52.78.756.760.752.7
Movie Recommendation56.762.064.570.762.0
Multistep Arithmetic0.70.70.70.01.3
Navigate46.747.350.750.051.3
Object Counting34.735.342.038.036.7
Penguins in a Table43.545.741.337.047.8
Reasoning about Colored Objects41.341.340.738.744.7
Ruin Names20.725.342.066.028.7
Salient Translation Error Detection48.037.317.321.342.7
Snarks55.156.459.069.261.5
Sports Understanding56.755.358.758.762.7
Temporal Sequences26.718.731.348.721.3
Tracking Shuffled ObjectsS (five objects)12.012.016.020.016.7
Tracking Shuffled ObjectsS (seven objects)6.76.712.010.015.3
Tracking Shuffled ObjectsS (three objects)31.330.732.036.031.3
Web of Lies54.054.755.354.057.3
Word Sorting0.71.35.36.01.3
Best Performance (Average)38.432.140.946.241.2
+ +# C Result of non-instrcution-tuned models + +Table 5: Comparsion among different ranks for few-shot LoraHub learning with the backbone T5-large (Raffel et al., 2020) on the BBH benchmark. Note that the T5-large model achieved $0 . { \bar { 0 } } \%$ on all tasks under the zero-shot setting except Dyck Languages, where it scored $0 . 6 7 \%$ . + +
Task ↓ Rank β†’4best 4avg16avg16best64avg64best
Boolean Expressions52.13 57.3350.6758.0047.4758.00
Causal Judgement52.4155.1749.6654.0250.8054.02
Date Understanding0.402.0014.4029.334.5310.00
Disambiguation10.0031.3326.9342.001.734.67
Dyck Languages0.400.670.400.670.402.00
Formal Fallacies48.4054.0046.9351.3346.9350.00
Geometric Shapes0.000.006.5332.671.477.33
Hyperbaton30.1350.0039.07 57.3332.9348.00
Logical DeductionS (five objects)5.2014.678.8019.331.336.67
Logical DeductionS (seven objects)6.4017.339.3319.333.4716.00
Logical DeductionS14.4032.0021.7334.676.9315.33
(three objects) Movie Recommendation7.0718.677.8722.001.206.00
Multistep Arithmetic two0.000.000.000.000.000.00
Navigate49.6054.6752.2756.6749.8752.00
Object Counting7.2018.0016.0021.3313.7326.67
Penguins ina Table6.5213.0410.4317.390.432.17
Reasoning about Colored Objects6.2710.005.0716.670.532.67
Ruin Names7.7313.3313.2028.005.7315.33
Salient Translation Error Detection0.000.001.738.670.000.00
Snarks21.2842.3149.4960.2616.1538.46
Sports Understanding46.5358.6746.8058.6746.5358.67
Temporal Sequences3.0713.336.5326.672.4012.00
Tracking Shuffled ObjectsS5.2014.004.139.330.130.67
(five objects) Tracking Shuffled ObjectsS (seven objects)2.6710.002.8014.003.208.00
Tracking Shuffled ObjectsS3.7317.3316.2734.675.8726.67
(three objects) Web of Lies48.5354.00 57.33
Word Sorting0.400.6754.00 0.1356.00 0.6754.67 0.000.00
20.78
Average Performance per Task16.1424.1730.7314.7621.43
+ +# D Result of larger model + +Table 6: Experimental results of zero-shot learning (Zero) and our few-shot LoraHub learning (LoraHub) on the BBH benchmark with FLAN-T5-xl as the base LLM. Note that we use 5 examples per task as the demonstration for both ICL and LoraHub. The average $( a v g )$ performance of LoraHub is computed over 5 runs with different random seeds, while the best (best) performance is reported as the maximum value obtained across these runs. We can see the trend of the results are similar to FLAN-T5-large. + +
TaskZeroLoraHub avgLoraHub best
Boolean Expressions52.058.763.3
Causal Judgement62.153.859.8
Date Understanding38.037.638.0
Disambiguation Qa0.020.5 54.7
Dyck Languages1.30.92.0
Formal Fallacies56.056.056.0
Geometric Shapes8.717.528.0
Hyperbaton45.353.556.7
Logical DeductionS (five objects)1.342.748.7
Logical DeductionS (seven objects)8.744.350.0
Logical DeductionS (three objects)0.756.461.3
Movie Recommendation2.062.866.0
Multistep Arithmetic Two0.00.40.7
Navigate50.750.750.7
Object Counting39.340.748.0
Penguins In A Table17.440.945.7
Reasoning About Colored Objects46.747.350.7
Ruin Names18.035.644.7
Salient Translation Error Detection44.745.148.7
Snarks60.360.861.5
Sports Understanding56.751.353.3
Temporal Sequences21.321.522.0
Tracking Shuffled ObjectsS3.39.913.3
(five objects) Tracking Shuffled ObjectsS (seven objects)5.37.38.7
Tracking Shuffled ObjectsS7.321.731.3
(three objects) Web Of Lies54.747.148.7
Word Sorting1.31.52.0
Average Performance per Task25.836.541.3
+ +# E Improving the Robustness of LoraHub + +In order to enhance the robustness of LoraHub, we explored a straightforward approach in the selection of LoRA module candidates. Specifically, we first identified 20 LoRA module candidates with the lowest loss on the few-shot examples. Our findings indicate a slight improvement in overall performance after applying the pre-filtering startegy. Since the primary instability in our approach arises from the selection of LoRA candidates. This method involves choosing a fixed set of LoRA candidates to ensure the stability of our approach. + +Table 7: The experimental results of loss-based pre-filtering. + +
TaskLoraHubavgLoraHubfilter
Boolean Expressions55.560.00
Causal Judgement54.352.9
Date Understanding32.933.3
Disambiguation45.262.7
Dyck Languages1.00.0
Formal Fallacies52.854.0
Geometric Shapes7.44.0
Hyperbaton62.864.0
Logical DeductionS (five objects)36.137.3
Logical DeductionS (seven objects)36.822.0
Logical DeductionS (three objects)45.756.0
Movie Recommendation55.368.0
Multistep Arithmetic0.40.7
Navigate47.149.3
Object Counting33.738.7
Penguins in a Table35.937.0
Reasoning about Colored Objects40.033.3
Ruin Names24.422.0
Salient Translation Error Detection36.024.0
Snarks56.952.66
Sports Understanding56.758.0
Temporal Sequences18.227.3
Tracking Shuffled ObjectsS12.311.3
(five objects) Tracking Shuffled ObjectsS7.78.0
(seven objects) Tracking Shuffled ObjectsS29.232.7
(three objects) Web of Lies50.146.0
Word Sorting1.11.3
34.735.4
Avg Performance Per Task
+ +# F Performance on General Important Task + +In our research, we have identified specific LoRA modules that exhibit significant impact when integrated into merged LoRAs. Our focus lies in assessing the performance of the top five task-related LoRAs on the BBH benchmark. The results indicate that these top LoRAs perform similarly or even worse than zero-shot in most cases. Only one of them stands out as significantly better than zero-shot. However, it’s worth noting that this performance is not as impressive as Lorahub. These findings support the idea that the merging process can improve overall performance. + +Table 8: Detailed experimental results of top five LoRA modules shown in Table 3 on BBH tasks. + +
TaskWIQA: LastRACE: RightWIQA: FirstADQAWebQA
Boolean Expressions52.6758.0052.6754.6753.33
Causal Judgement55.1763.2255.1757.4757.47
Date Understanding17.3319.3317.3316.6715.33
Disambiguation0.000.000.000.000.00
Dyck Languages0.670.670.671.331.33
Formal Fallacies51.3351.3351.3351.3351.33
Geometric Shapes8.0013.338.006.677.33
Hyperbaton16.6744.0016.671.336.00
Logical Ded uctionts)23.3328.0023.3319.3320.67
Logical DeductionS (seven objects)22.0026.0022.0010.6712.00
Logical DeductionS (three objects)0.679.330.670.000.00
Movie Recommendation63.3362.6763.3356.6763.33
Multistep Arithmetic0.670.670.670.670.67
Navigate47.3350.0047.3347.3347.33
Object Counting34.6734.0034.6735.3335.33
Penguins in a Table45.6541.3045.6539.1343.48
Reasoning about Colored Objects40.0037.3340.0031.3330.67
Ruin Names22.0021.3322.0017.3322.67
Salient Translation Error Detection36.6734.6736.6732.6737.33
Snarks52.5655.1352.5647.4452.56
Sports Understanding56.0058.6756.0055.33
Temporal Sequences16.6717.3316.6712.6755.33 17.33
Tracking Shuffled ObjectsS (five objects)12.0012.0012.0010.6712.00
Tracking Shuffled ObjectsS (seven objects)6.676.676.676.676.67
Tracking Shuffled ObjectsS20.6730.6720.6710.6725.33
(three objects) Web of Lies54.6754.0054.6754.00
Word Sorting1.331.331.331.3354.00 1.33
Avg Performance per Task β–³ FLAN-T5-large28.10 1.1030.78 3.7828.10 1.1025.14 -1.8627.04 0.04
+ +![](images/f08459cc633da4d25e332908058acffc5a55cf3fadee5264d074582bf20749f5.jpg) +Figure 3: The influence of number of LoRA modules on 15 tasks from BBH, and each box is obtained from 5 separate runs. The horizontal axis shows the number of LoRA modules to be composed in LoraHub learning. + +# G Implementation details + +We implemented LoRA tuning using the Huggingface PEFT library (Mangrulkar et al., 2022), with the rank being set as 16. The gradient-free method was implemented using the open-source Nevergrad optimization library (Rapin & Teytaud, 2018), with a constraint that the absolute value of LoRA weights should not exceed 1.5. Originally, all coefficients of LoRA modules were set at zero. + +In our standard settings, we set the maximum number of iterations $K$ as 40. The same 5 examples were used during our LoraHub learning and the few-shot in-context learning. The hyperparameter $\alpha$ is set as 0.05. Regarding the hyperparameters for training candidate LoRA modules, we maintained consistency across all modules, setting the batch size at 64, the learning rate at $1 e - 4 ,$ and the number of training epochs at 10. + +# H Influence of Number of LoRA modules + +As shown in Figure 3, with an increase in the number of LoRA module candidates, there is a corresponding increase in the performance variance. Based on our in-depth analysis, the primary source of variance is not related to gradient-free optimization algorithms but rather associated with the LoRA candidate modules. In other words, once the candidates are determined, random seeds have minimal impact on the final performance. Hence, we posit that the observed instability primarily arises from the inherent challenge of balancing the quantity and quality of the LoRA module candidates. + +# I The Impact of Threshold + +In this section, we omitted the threshold in our implementation, and the results are summarized in Table 9. Our observations indicate that the removal of the threshold had minimal impact on the majority of tasks, underscoring the robustness of the gradient-free optimization algorithm itself in most cases. The algorithm efficiently identified reasonable ranges even without specific upper and lower bounds. However, three tasks, namely Date Understanding, Disambiguation and Hyperbaton, exhibited notable effects. The resulting performance decline led to an average decrease of $1 . 2 \%$ compared to the setting with threshold. + +This highlights the significance of establishing a reasonable threshold to mitigate extreme scenarios. + +Table 9: The comparsion between LoraHub and LoraHub without threshold. + +
TaskLoraHubavg with thresholdLoraHubavg without threshold
Boolean Expressions55.554.0
Causal Judgement54.354.8
Date Understanding32.917.7
Disambiguation45.240.6
Dyck Languages1.01.1
Formal Fallacies52.851.7
Geometric Shapes7.46.7
Hyperbaton62.855.5
Logical DeductionS (five objects)36.136.5
Logical DeductionS (seven objects)36.835.6
Logical DeductionS45.7
(three objects) Movie Recommendation49.9
Multistep Arithmetic55.359.3
Navigate0.40.7
Object Counting47.147.6
33.734.7
Penguins in a Table35.933.8
Reasoning about Colored Objects40.037.9
Ruin Names24.424.0
Salient Translation Error Detection36.037.1
Snarks56.951.6
Sports Understanding56.755.9
Temporal Sequences18.216.7
Tracking Shuffled ObjectsS (five objects)12.312.3
Tracking Shuffled ObjectsS (seven objects)7.78.5
Tracking Shuffled ObjectsS (three objects)29.229.8
Web of Lies50.150.3
Word Sorting1.11.3
Avg Performance Per Task34.733.5
\ No newline at end of file diff --git a/parse/test/TrloAXEJ2B/TrloAXEJ2B_content_list.json b/parse/test/TrloAXEJ2B/TrloAXEJ2B_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..025c9eb991e9d3e953d71476c28bbf718c407f82 --- /dev/null +++ b/parse/test/TrloAXEJ2B/TrloAXEJ2B_content_list.json @@ -0,0 +1,726 @@ +[ + { + "type": "text", + "text": "LoraHub: Efficient Cross-Task Generalization via Dynamic LoRA Composition ", + "text_level": 1, + "page_idx": 0 + }, + { + "type": "text", + "text": "Chengsong Huang $\\mathbf { \\Delta } \\mathbf { \\dag \\ S \\mathrm { \\ s \\mathrm { \\ s } } }$ , Qian Liuβ€ βˆ—, Bill Yuchen $\\mathbf { L i n } ^ { \\bigotimes * }$ , Tianyu Pang†, Chao ${ { \\mathbf { D } } { { \\mathbf { u } } } ^ { \\dag } }$ , Min Lin† †Sea AI Lab, Singapore Β§Washington University in St. Louis, MO, USA β™’Allen Institute for AI, Seattle, WA, USA ", + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract ", + "text_level": 1, + "page_idx": 0 + }, + { + "type": "text", + "text": "Low-rank adaptations (LoRA) are often employed to fine-tune large language models (LLMs) for new tasks. This paper investigates LoRA composability for cross-task generalization and introduces LoraHub, a simple framework devised for the purposive assembly of LoRA modules trained on diverse given tasks, with the objective of achieving adaptable performance on unseen tasks. With just a few examples from a new task, LoraHub can fluidly combine multiple LoRA modules, eliminating the need for human expertise and assumptions. Notably, the composition requires neither additional model parameters nor gradients. Empirical results on the Big-Bench Hard benchmark suggest that LoraHub, while not surpassing the performance of in-context learning, offers a notable performanceefficiency trade-off in few-shot scenarios by employing a significantly reduced number of tokens per example during inference. Notably, LoraHub establishes a better upper bound compared to in-context learning when paired with different demonstration examples, demonstrating its potential for future development. Our vision is to establish a platform for LoRA modules, empowering users to share their trained LoRA modules. This collaborative approach facilitates the seamless application of LoRA modules to novel tasks, contributing to an adaptive ecosystem. Our code is available at github.com/sail-sg/lorahub, and all the pre-trained LoRA modules are released at huggingface.co/lorahub. ", + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction ", + "text_level": 1, + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/95e2ddec39022b4d6452e07a5b9cddb6f0b9d45a3c19a11a7273387b6b7e1205.jpg", + "image_caption": [ + "Figure 1: The illustration of zero-shot learning, few-shot in-context learning and few-shot LoraHub learning (ours). Note that the Compose procedure is conducted per task rather than per example. Our method achieves similar inference throughput as zero-shot learning, yet approaches the performance of in-context learning on the BIG-Bench Hard (BBH) benchmark. " + ], + "image_footnote": [], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent progress in natural language processing (NLP) has been largely fueled by large language models (LLMs) such as OpenAI GPT (Brown et al., 2020), FLAN-T5 (Chung et al., 2022), and LLaMA (Touvron et al., 2023). These models demonstrate top-tier performance across different NLP tasks. However, their enormous parameter size presents issues regarding computational efficiency and memory usage during fine-tuning. To mitigate these challenges, Low-Rank Adaptation (LoRA) (Hu et al., 2022) has emerged as a parameterefficient fine-tuning technique (Lester et al., 2021; He et al., 2022; An et al., 2022). By reducing memory demands and computational costs, it speeds up LLM training. LoRA achieves this by freezing the base model parameters (that is, an LLM) and training a lightweight module, which regularly delivers high performance on target tasks. ", + "page_idx": 0 + }, + { + "type": "text", + "text": "", + "page_idx": 1 + }, + { + "type": "text", + "text": "While prior research has targeted the efficiency enhancement facilitated by LoRA, there is a dearth of investigation into the inherent modularity and composability of LoRA modules. Typically, previous methods train LoRA modules to specialize in individual tasks. Yet, the intrinsic modularity of LoRA modules presents an intriguing research question: Would it be possible to compose LoRA modules to generalize to novel tasks in an efficient manner? In this paper, we tap into the potential of LoRA modularity for broad task generalization, going beyond single-task training to meticulously compose LoRA modules for malleable performance on unknown tasks. Crucially, our method enables an automatic assembling of LoRA modules, eliminating dependency on manual design or human expertise. With just a handful of examples from new tasks (e.g., 5), our approach can autonomously compose compatible LoRA modules without human intrusion. We do not make assumptions about which LoRA modules trained on particular tasks can be combined, allowing for flexibility in amalgamating any modules as long as they conform to the specification (e.g., using the same LLM). As our approach leverages several available LoRA modules, we refer to it as LoraHub and denote our learning method as LoraHub learning. ", + "page_idx": 1 + }, + { + "type": "text", + "text": "To validate the efficiency of our proposed methods, we test our approaches using the widely recognized BBH benchmark with FLAN-T5 (Chung et al., 2022) serving as the base LLM. The results underline the effectiveness of the LoRA module composition for unfamiliar tasks through a few-shot LoraHub learning process. Notably, our methodology achieves an average performance that closely matches that of few-shot in-context learning, while demonstrating a superior upper bound, particularly when using different demonstration examples. Additionally, our method substantially reduces the inference cost compared to in-context learning, eliminating the requirement of examples as inputs for the LLM. With fewer tokens per example during inference, our method significantly reduces computational overhead and enables faster responses. It aligns with a broader research trend, where recent studies are actively exploring approaches to reduce the number of input tokens (Zhou et al., 2023; Ge et al., 2023; Chevalier et al., 2023; Jiang et al., 2023a; Li et al., 2023; Jiang et al., 2023b). Our learning procedure is also notable for its computational efficiency, using a gradient-free approach to obtain the coefficients of LoRA modules and requiring only a handful of inference steps for unseen tasks. For example, when applied to a new task in BBH, our methodology can deliver superior performance in less than a minute using a single A100 card. ", + "page_idx": 1 + }, + { + "type": "text", + "text": "Importantly, LoraHub learning can feasibly be accomplished with a CPU-only machine, requiring proficiency solely for processing LLM inference. In our pursuit to democratize artificial intelligence, we are taking an important step forward by envisioning the establishment of the LoRA platform. The platform would serve as a marketplace where users can seamlessly share and access well-trained LoRA modules for diverse applications. LoRA providers have the flexibility to freely share or sell their modules on the platform without compromising data privacy. Users, equipped with CPU capability, can leverage trained LoRA modules contributed by others through automated distribution and composition algorithms. This platform not only cultivates a repository of reusable LoRA modules with a myriad of capabilities but also sets the stage for cooperative AI development. It empowers the community to collectively enrich the LLM’s capabilities through dynamic LoRA composition. ", + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Problem Statement ", + "text_level": 1, + "page_idx": 1 + }, + { + "type": "text", + "text": "Large Language Models We assume that a large language model $M _ { \\theta }$ is based on Transformer architecture (Vaswani et al., 2017) and has been pre-trained on a large-scale text corpus. The model architecture can be either encoder-decoder (Raffel et al., 2020) or decoderonly (Brown et al., 2020). Also, $M _ { \\theta }$ could also have been fine-tuned with a large set of instruction-following datasets such as Flan Colleciton (Longpre et al., 2023) and PromptSource (Bach et al., 2022). ", + "page_idx": 1 + }, + { + "type": "text", + "text": "", + "page_idx": 2 + }, + { + "type": "text", + "text": "Cross-Task Generalization In real-world situations, users often desire an LLM to perform novel tasks that it has not encountered before β€” an ability widely known as cross-task generalization. Generally, cross-task generalization falls into two categories: zero-shot learning (Mishra et al., 2022; Sanh et al., 2022; Chung et al., 2022; OpenAI, 2022; Lin et al., 2022), which necessitates no labeled examples of the new task, and few-shot learning (Ye et al., 2021; Min et al., 2022) which demands a handful of labeled examples. Assume we have $N$ distinct upstream tasks that the LLM has been trained on, denoted as $\\mathbb { T } = \\{ \\mathcal { T } _ { 1 } , . . . , \\mathcal { T } _ { N } \\}$ . Our paper primarily focuses on the latter category, where for an unseen target task $\\mathcal { T } ^ { \\prime } \\notin \\mathbb { T } ,$ , users can only provide a limited set of labeled examples, Q. Our aim is to modify the model $M _ { \\theta }$ to adapt it to task $\\tau ^ { \\prime }$ using only $Q$ . An intuitive method would be to fine-tune the weights of ${ \\mathrm { { \\dot { M } } } } _ { \\theta }$ based on $Q ,$ yielding an updated model $M _ { \\phi }$ with enhanced performance on $\\tau ^ { \\prime }$ . However, this approach is inefficient, time-consuming, and unstable when $Q$ is small. ", + "page_idx": 2 + }, + { + "type": "text", + "text": "LoRA Tuning LoRA is a parameter-efficient fine-tuning method (Hu et al., 2022), facilitates the adaptation of LLMs using lightweight modules, eliminating the need for finetuning the entire weights. LoRA tuning involves keeping the original model weights frozen while introducing trainable low-rank decomposition matrices as adapter modules into each layer of the model. Compared to the base LLM, this module possesses significantly fewer trainable parameters, paving the way for rapid adaptation using minimal examples. As such, LoRA tuning presents a resource-efficient technique to quickly adapt LLMs for new tasks with restricted training data. However, traditional LoRA methods primarily concentrate on training and testing within the same tasks (Gema et al., 2023), rather than venturing into few-shot cross-task generalization. ", + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Methodology ", + "text_level": 1, + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we provide an overview of our proposed method. We then explain the LoRA tuning procedure in detail. Last, we introduce the procedure of our LoraHub learning, which consists of the COMPOSE stage and the ADAPT stage. ", + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Method Overview ", + "text_level": 1, + "page_idx": 2 + }, + { + "type": "text", + "text": "As depicted in Figure 2, we initially train LoRA modules on a variety of upstream tasks. Specifically, for $N$ distinct upstream tasks, we separately train $N$ LoRA modules, each represented as $m _ { i }$ for task $\\mathscr { T } _ { i } \\in \\mathbf { \\hat { T } }$ . Subsequently, for a new task $\\mathcal { T } ^ { \\prime } \\notin \\mathbb { T } ,$ , such as Boolean Expressions represented in Figure 2, its examples $Q$ are utilized to steer the LoraHub learning process. The LoraHub learning encapsulates two main phases: the COMPOSE phase and the ADAPT phase. In the COMPOSE phase, all available LoRA modules are combined into a single integrated module $\\hat { m } _ { - }$ , using $\\left\\{ w _ { 1 } , w _ { 2 } , \\dots , w _ { N } \\right\\}$ as coefficients. Each $w _ { i }$ is a scalar value that can take on positive or negative values, and the combination can be done in different ways. During the ADAPT phase, the combined LoRA module $\\hat { m }$ is amalgamated with the LLM $M _ { \\theta }$ , and its performance on few-shot examples from the new task $\\mathbf { \\breve { { \\mathbf { \\nabla } } } } _ { \\mathbf { \\mathbf { \\mathbf { \\mathbf { \\mathcal { T } } } } } ^ { \\prime } }$ is assessed. A gradient-free algorithm is subsequently deployed to update $w _ { . }$ , enhancing mΛ† ’s performance (e.g., loss) on the few-shot examples $Q$ . Finally, after iterating through $K$ steps, the optimum performing LoRA module is applied to the LLM $M _ { \\theta }$ , yielding the final LLM $M _ { \\phi } = \\mathrm { L o R A } ( \\hat { M } _ { \\theta } , \\hat { m } )$ . This serves as an effectively adjusted model for the unseen task $\\tau ^ { \\prime }$ , which will then be deployed and not updated anymore. ", + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 LoRA tuning on upstream tasks ", + "text_level": 1, + "page_idx": 2 + }, + { + "type": "text", + "text": "LoRA effectively minimizes the number of trainable parameters through the process of decomposing the attention weight matrix update of the LLM, denoted as $W _ { 0 } \\in \\bar { R } ^ { d \\times k } ,$ , into low-rank matrices. In more specific terms, LoRA exhibits the updated weight matrix in the form $W _ { 0 } + \\delta W = W _ { 0 } + A B ,$ where $A \\in \\mathbb { R } ^ { d \\times r }$ and $B \\in \\mathbb { R } ^ { r \\times k }$ are trainable low-rank matrices with rank $r ,$ a dimension significantly smaller than those of $d$ and $k$ . In this context, the product $A B$ defines the LoRA module $m ,$ , as previously elaborated. By leveraging the low-rank decomposition, LoRA substantially reduces the number of trainable parameters needed to adapt the weights of LLMs duriing fine-tuning. ", + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/fdc28d30d1864590ed2196198df1e30168cf83fc2b25e930c617edf738bdbc3b.jpg", + "image_caption": [ + "Figure 2: Our method encompasses two stages: the COMPOSE stage and the ADAPT stage. During the COMPOSE stage, existing LoRA modules are integrated into one unified module, employing a set of coefficients, denoted as $w$ . In the ADAPT stage, the combined LoRA module is evaluated on a few examples from the unseen task. Subsequently, a gradient-free algorithm is applied to refine $w$ . After executing $K$ iterations, a highly adapted combined LoRA module is produced, which can be incorporated with the LLM to perform the intended task. " + ], + "image_footnote": [], + "page_idx": 3 + }, + { + "type": "text", + "text": "", + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 COMPOSE: Element-wise composition of LoRA modules ", + "text_level": 1, + "page_idx": 3 + }, + { + "type": "text", + "text": "Within the COMPOSE stage, we implement an element-wise method to combine LoRA modules. This process integrates the corresponding parameters of the LoRA modules, requiring the modules being combined to have the same rank $r$ to properly align the structures. Given that $m _ { i } = A _ { i } B _ { i } ,$ the combined LoRA module $\\hat { m }$ can be obtained by: ", + "page_idx": 3 + }, + { + "type": "equation", + "img_path": "images/622ebc57ca6de9adfd25eb29ad01864fdee3e77678a974073dcd3f51edc1c592.jpg", + "text": "$$\n\\hat { m } = ( w _ { 1 } A _ { 1 } + w _ { 2 } A _ { 2 } + \\cdot \\cdot \\cdot + w _ { N } A _ { N } ) ( w _ { 1 } B _ { 1 } + w _ { 2 } B _ { 2 } + \\cdot \\cdot \\cdot + w _ { N } B _ { N } ) .\n$$", + "text_format": "latex", + "page_idx": 3 + }, + { + "type": "text", + "text": "Notbly, as we show in Sec. 5, combining too many LoRA modules at once can expand the search space exponentially, which may destabilize the LoraHub learning process and prevent optimal performance. To mitigate this, we employ random selection to prune the candidate space, and more advanced pre-filtering algorithms could be explored in the future. ", + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4 ADAPT: Weight optimization via gradient-free methods ", + "text_level": 1, + "page_idx": 3 + }, + { + "type": "text", + "text": "During the ADAPT stage, our goal is to modify the coefficients $w$ to boost the model’s performace on the examples from an unseen task. One might think of using gradient descent to optimize $w ,$ following standard backpropagation methods. However, this approach demands constructing a hypernetwork for all LoRA modules, similar to differentiable architecture search methods (Zhang et al., 2019). Constructing these hypernetworks demands for substantial GPU memory and time, posing a challenge. Given that $w$ consists of a relatively small number of parameters, we opted for gradient-free methods for optimization instead of gradient descent. ", + "page_idx": 3 + }, + { + "type": "text", + "text": "Inspired by previous work (Sun et al., 2022), we utilize a black-box optimization technique to find the optimal $w$ . The optimization process is steered by the cross-entropy loss, setting the goal to locate the best set $\\left\\{ w _ { 1 } , w _ { 2 } , \\ldots , w _ { N } \\right\\}$ that reduces the loss $L$ on the few-shot examples $Q$ . Furthermore, we incorporate L1 regularization to penalize the sum of the absolute values of $w _ { . }$ , helping to prevent obtaining extreme values. Consequently, the final objective of LoraHub is to minimize $\\begin{array} { r } { L + \\alpha \\cdot \\sum _ { i = 1 } ^ { N } | \\dot { w } _ { i } | , } \\end{array}$ , where $\\alpha$ serves as a hyperparameter. ", + "page_idx": 3 + }, + { + "type": "text", + "text": "In terms of the gradient-free method, we leverage Shiwa, a combinatorial optimization approach (Liu et al., 2020). Shiwa offers a variety of algorithms and chooses the most suitable optimization algorithm for different circumstances. In most of the forthcoming experimental setups, we primarily employ the Covariance Matrix Adaptive Evolution Strategies (CMA-ES) (Hansen & Ostermeier, 1996). CMA-ES, as a stochastic and population-based optimization algorithm, offers versatility in addressing a broad spectrum of optimization challenges. It dynamically adjusts a search distribution, which is defined by a covariance matrix. During each iteration, CMA-ES systematically updates both the mean and covariance of this distribution to optimize the target function. In our application, we employ this algorithm to mold the search space for w. Ultimately, we use it to identify the optimal $w$ by evaluating their performance on the few-shot examples from an unseen task. ", + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Experimental Results ", + "text_level": 1, + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we provide details on our main experiments. First, we give an overview of the experimental setup and implementation details. Next, we present our findings along with the results. ", + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Experimental setup ", + "text_level": 1, + "page_idx": 4 + }, + { + "type": "text", + "text": "Large Language Model In our main experiments, we employ FLAN-T5 (Chung et al., 2022), particularly FLAN-T5-large, as the base LLM. The model has shown impressive abilities to perform zero-shot and few-shot learning. ", + "page_idx": 4 + }, + { + "type": "text", + "text": "Candidate LoRA Modules Our methodology requires a compendium of LoRA modules trained on preceding tasks. For parity with FLAN, we adopt the tasks utilized to instruct FLAN-T5, thereby incorporating nearly 200 distinct tasks and their corresponding instructions. Following this, we trained several LoRA modules as potential candidates. During each experimental sequence, we randomly select 20 LoRA modules from them as the candidate for our LoraHub learning. ", + "page_idx": 4 + }, + { + "type": "text", + "text": "Dataset and evaluation Our method is evaluated using the Big-Bench Hard (BBH) benchmark, a well-established standard that consists of multiple-choice questions from a variety of domains. The benchmark consists of 27 different tasks, which are regarded to be challenging for language models. For all tasks, we employ the exact match (EM) as our evaluation metric. ", + "page_idx": 4 + }, + { + "type": "text", + "text": "Baseline Setup To enhance the demonstration of our method’s performance, we expanded our comparisons beyond the zero-shot and in-context learning settings. We specifically chose three representative gradient-based methods for comparison: full fine-tuning (FFT), LoRA tuning (LoRA) (Hu et al., 2022), and IA3 fine-tuning (IA3) (Liu et al., 2022). For all gradient-based methods, for a fair comparsion, we train for 40 epochs on the same three runs of 5 examples employed in our methods. In the case of FFT, a learning rate of 3e-5 is employed, whereas for IA3 and LoRA, we adopt a learning rate of 2e-4. We report the performance of each method on the test set at the end of training (averaged over three runs) without any model selection to avoid potential selection bias. ", + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2 Main results ", + "text_level": 1, + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Table 1, our experimental results demonstarte the superior efficacy of our method in comparison to zero-shot learning while closely resembling the performance of in-context learning (ICL) in few-shot scenarios. This observation is derived from an average performance of three runs, each leveraging different few-shot examples. Importantly, our model utilizes an equivalent number of tokens as the zero-shot method, notably fewer than the count used by ICL. Although occasional performance fluctuations, our method consistently outperforms zero-shot learning in most tasks. In the era of LLMs, the input length is directly proportional to the inference cost, and thus LoraHub’s ability to economize on input tokens while approaching the peak performance grows increasingly significant. Moreover, as shown in Appendix Table 4, the upper bound performance of our method across these runs can surpass ICL on 18 tasks, demonstrating its potential for future development. ", + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/7ec4211137da396567e2ee2f253ff7f1eb99abc5b9e489998bc8f304dfdfbc78.jpg", + "table_caption": [ + "Table 1: Experimental results of zero-shot learning (Zero), few-shot in-context learning (ICL), IA3 fine-tuning (IA3), LoRA tuning (LoRA), full fine-tuning (FFT) and our proposed few-shot LoraHub learning (LoraHub) on the BBH benchmark with FLAN-T5-large as the base LLM. We denote algorithmic tasks with the superscript $\\ S$ following previous work (Wu et al., 2023b). Note that we employ three runs, each leveraging different 5-shot examples per task, as demonstrations for all few-shot methods. The average performance of all methods is reported below, and the best performance of each few-shot method can be found in the Appendix B. " + ], + "table_footnote": [], + "table_body": "
TaskZeroICLavgIA3avgLoRAavgFFTavgLoraHubavg
Boolean Expressions54.059.656.256.062.255.5
Causal Judgement57.559.460.255.657.554.3
Date Understanding15.320.420.035.859.332.9
Disambiguation0.069.10.068.068.245.2
Dyck Languages1.30.94.222.219.51.0
Formal Fallacies51.355.351.553.654.052.8
Geometric Shapes6.719.614.72431.17.4
Hyperbaton6.771.849.355.377.362.8
Logical DeductionS (five objects)21.339.132.740.042.236.1
Logical DeductionS (seven objects)12.740.733.837.344.936.8
Logical DeductionS (three objects)0.051.68.553.652.945.7
Movie Recommendation62.755.861.851.566.055.3
Multistep Arithmetic0.70.70.70.20.00.4
Navigate47.345.346.248.048.047.1
Object Counting34.732.435.138.735.633.7
Penguins in a Table43.541.345.036.231.935.9
Reasoning about Colored Objects32.040.240.739.637.640.0
Ruin Names23.319.324.437.861.324.4
Salient Translation Error Detection37.347.337.116.016.236.0
Snarks50.054.253.955.666.756.9
Sports Understanding56.054.755.156.554.056.7
Temporal Sequences16.725.118.225.137.818.2
Tracking Shuffled ObjectsS (five objects)12.012.012.013.816.912.3
Tracking Shuffled Objects (seven objects)6.76.76.710.09.87.7
Tracking Shuffled ObjectsS (three objects)24.731.130.730.932.029.2
Web of Lies54.053.854.252.748.250.1
Word Sorting1.30.51.34.94.91.1
Avg Performance Per Task27.037.331.637.742.134.7
Avg Tokens Per Example111.6597.8111.6111.6111.6111.6
Gradient-based TrainingNoNoYesYesYesNo
", + "page_idx": 5 + }, + { + "type": "text", + "text": "", + "page_idx": 5 + }, + { + "type": "text", + "text": "Even when compared to certain gradient-based optimization methods, our approach consistently demonstrates competitive performance. For example, as depicted in Table 1, our method exhibits a notable improvement of $3 . 1 \\%$ on average in contrast to the promising IA3 method. Nevertheless, we acknowledge that our approach still falls behind LoRA tuning and full fine-tuning, especially in tasks that exhibit significant deviation from the upstream task. Taking Dyck Languages as an example, both LoraHub and ICL achieve only an average performance of nearly $1 . 0 \\%$ on these tasks, while LoRA and FFT methods showcase impressive results with only 5 examples. ", + "page_idx": 5 + }, + { + "type": "text", + "text": "", + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 Discussion ", + "text_level": 1, + "page_idx": 6 + }, + { + "type": "text", + "text": "LoraHub addresses the challenge of reducing inference costs by eliminating the need for processing additional tokens, resulting in a noticeable reduction in overall inference expenses. However, it introduces an inherent cost during the ADAPT stage, necessitating extra inference steps, such as the 40 steps employed in our experiments. This introduces a trade-off between choosing the ICL approach and LoraHub, with the decision typically hinging on the nature of the situation. ", + "page_idx": 6 + }, + { + "type": "text", + "text": "For one-time ad-hoc tasks, the ICL approach should be more pragmatic due to LoraHub’s additional inference step costs. In such scenarios, where immediate, single-use solutions are preferred, the simplicity and efficiency of ICL might outweigh the benefits of potential savings offered by LoraHub. Conversely, for recurring or similar tasks, LoraHub emerges as a compelling option. Despite the added inference step cost, LoraHub’s ability to efficiently handle repetitive tasks, often occurring thousands of times, while concurrently reducing overall expenses, positions it as a viable option in such kind of situations. ", + "page_idx": 6 + }, + { + "type": "text", + "text": "In summary, our intention is not to replace $\\scriptstyle { \\mathrm { I C L } } ,$ but to present LoraHub as a complementary strategy with performance-efficiency trade-offs. Thus, we encourage a careful consideration of specific use cases and requirements when choosing between ICL and LoraHub, recognizing that the optimal solution may vary based on the nature and frequency of the tasks at hand. ", + "page_idx": 6 + }, + { + "type": "text", + "text": "5 Experimental Analysis ", + "text_level": 1, + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we thoroughly examine the characteristics of our proposed method and uncover several insightful findings. If not specified, we use FLAN-T5-large for all analysis. ", + "page_idx": 6 + }, + { + "type": "text", + "text": "Does composing LoRA modules extend beyond the single module’s benefits? ", + "page_idx": 6 + }, + { + "type": "text", + "text": "We acknowledge the investigation of cross-task performance in prior work (Jang et al., 2023), which delved into the capabilities of LoRA and proposed a novel method centered around LoRA module retrieval. In order to ensure a fair comparison, we conducted an experiment where we ", + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/50cfa3a8aba9701a20f931b84cb7087e21caaca64b4ee5f70bf22283760d4739.jpg", + "table_caption": [ + "Table 2: The average performance of various methods across all tasks in the benchmark BBH. " + ], + "table_footnote": [], + "table_body": "
LoRA RetrievalLoraHub avgLoraHub best
31.734.741.2
", + "page_idx": 6 + }, + { + "type": "text", + "text": "designed a LoRA retrieval mechanism based on the loss derived from few-shot examples. Specifically, we ranked all LoRA module candidates according to this loss and evaluated the best candidate on the test set of the unseen task. As depicted in Table 2, the performance of LoRA retrieval is notably impressive, positioning it as a strong baseline. However, in comparison to LoraHub, the performance of LoRA retrieval is relatively less favorable ", + "page_idx": 6 + }, + { + "type": "text", + "text": "How effective is the gradient-free optimization method? ", + "page_idx": 6 + }, + { + "type": "text", + "text": "To assess the effectiveness of our gradient-free optimization method in correctly identifying the most suitable LoRA module for a given downstream task, we carried out an empirical study using the WikiTableQuestions (Pasupat & Liang, 2015) (WTQ) dataset. We strategically included a LoRA module that was specifically trained on the WTQ dataset into our pool of LoRA candidate modules, which originally stemmed from tasks exclusive to the Flan Collection. Subsequently, we designated WTQ as the targeted downstream task and computed the weights consistent with the methods employed in LoraHub learning. As an end result, the WTQ-specific LoRA module was awarded the highest weight, exemplifying the algorithm’s success in recognizing it as the most relevant. Moreover, the combined LoRA module demonstrated marginal superiority over the WTQ LoRA module. This underscores the claim that the gradient-free optimization method has the ability to proficiently select the optimal upstream LoRA module for an unseen task. ", + "page_idx": 6 + }, + { + "type": "text", + "text": "", + "page_idx": 7 + }, + { + "type": "text", + "text": "Can LoraHub work well on non-instruction-tuning models? ", + "page_idx": 7 + }, + { + "type": "text", + "text": "In previous investigations, we primarily focused on models with zero-shot capabilities that were trained with instruction tuning. However, for models like T5 without zero-shot abilities, where training has a larger effect on parameters, it was unclear if LoraHub could still effectively manage and improve them. Our experiments show that although these models perform worse than FLAN-T5, LoraHub learning can still enable them to effectively generlize to unseen tasks. See Appendix C for more details. ", + "page_idx": 7 + }, + { + "type": "text", + "text": "Will the rank of LoRA modules impact the performance of LoraHub learning? ", + "page_idx": 7 + }, + { + "type": "text", + "text": "The parameter rank plays a crucial role in the LoRA framework, directly influencing the number of trainable parameters utilized during LoRA tuning. This prompts an intriguing question: does the variation in rank values influence the outcomes observed within the LoraHub learning? Our analysis indicates that, for FLAN-T5, the choice of rank has minimal impact. However, for T5, it still exerts some influence. Empirical findings reveal that, in comparison to rank values of 4 or 64, a rank value of 16 consistently demonstrates superior performance across different runs, both in terms of average and optimal values. Additional results are available in Appendix C. ", + "page_idx": 7 + }, + { + "type": "text", + "text": "Does more LoRA modules lead to better results? ", + "page_idx": 7 + }, + { + "type": "text", + "text": "In our main experiments, we randomly selected 20 LoRA modules for LoraHub learning. Therefore, we conducted experiments to investigate the effect of using different numbers of LoRA modules. The results demonstrate that as we increased the number of LoRA modules, the variance in performance increased. However, the maximum achievable performance also improved. More analysis on the variance and the detailed results can be found in Appendix H. ", + "page_idx": 7 + }, + { + "type": "text", + "text": "How much computational resource can be saved? ", + "page_idx": 7 + }, + { + "type": "text", + "text": "We follow to the memory test settings from the LoRA-FA (Zhang et al., 2023b) study for an accurate benchmark. In this context, full fine-tuning required about 40GB of memory, whereas LoRA fine-tuning used around 34GB. Remarkably, LoraHub only utilized about 5GB of memory, illustrating its efficiency due to the inference-only mode, which eliminates the need for storing gradients and optimization states. ", + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Related work ", + "text_level": 1, + "page_idx": 7 + }, + { + "type": "text", + "text": "Model Merging Our method substantially draws on the concept of LoRA module composition, and thus, aligns with the significant thread of research in model merging. This research focus is broadly categorized based on the ultimate objectives of model merging. ", + "page_idx": 7 + }, + { + "type": "text", + "text": "The first category focuses on merging entire models, and the goal is to combine individually trained models to approximate the performance benefits of model ensembling or multi-task learning. Prior works (Matena & Raffel, 2021; Jin et al., 2023; Yadav et al., 2023; Wu et al., 2023a) operated under the assumption of shared model architectures. For example, Matena & Raffel (2021) amalgamates models by approximating Gaussian posterior distributions garnered from Fisher information, while Yadav et al. (2023) merges models via resolving model interferences. Another approach is merging models with different architectures. For instance, Ainsworth et al. (2023) configures weights of different models prior to their merger. Following this objective, Stoica et al. (2023) merges models operating on varying tasks by identifying common features, without requiring additional training. Unlike these works, our work focuses on merging models for better cross-task generalization. ", + "page_idx": 7 + }, + { + "type": "text", + "text": "", + "page_idx": 8 + }, + { + "type": "text", + "text": "The second category most closely aligns with our research, stemming from a shared motivation of module composition. Various scholars have made advances in this line of research: Kingetsu et al. (2021) decomposes and recomposes modules on the basis of their functionality; Ilharco et al. (2023) proposes modulating model behavior using task vectors; Lv et al. (2023) amalgamates parameter-efficient modules weighted according to task similarity; Zhang et al. (2023a) crafts modules by employing specific arithmetic operations; Sun et al. (2023) improves few-shot performance of unseen tasks by multi-task pre-training of prompts; Chronopoulou et al. (2023) averages adapter weights intended for transfer; Ponti et al. (2023) focuses on jointly learning adapters and a routing function that allocates skills to each task; and Muqeeth et al. (2023) concentrates on amalgamating experts in mixture of experts models; However, these methods generally necessitate multi-task training or human prior on module selection for the downstream task. In contrast, our method does not impose any special training requirements and simply employs vanilla LoRA tuning. Additionally, the module selection for downstream tasks is entirely data-driven without human prior knowledge. This design gives the advantage of easily adding new LoRA modules for reuse, allowing our method to flexibly scale up the number of LoRA module candidates in the future. ", + "page_idx": 8 + }, + { + "type": "text", + "text": "Mixture of Experts The Mixture of Experts (MoE) is an ensemble method, often visualized as a collection of sub-modules, or β€œexperts”, each specializing in processing different types of input data. Each expert in this system is controlled by a unique gating network, activated based on the distinct nature of the input data. For every token in these input sequences, this network identifies and engages the most suitable experts to process the data. As a result, the performance is superior compared to relying on a single, generic model for all types of input. This technique has proven instrumental in numerous domains, such as natural language processing and computer vision (Jacobs et al., 1991; Shazeer et al., 2017; Du et al., 2022; Zhang et al., 2022; Wang et al., 2022; crumb, 2023). Our methodology displays similarities to MoE, wherein upstream-trained LoRA modules can be aligned with MoE’s expert design. A noteworthy distinguishing factor is that our approach mechanism does not require any specialized manipulation of LoRAs during training while facilitating dynamic LoRA module assembly at any scale, each pre-tuned to different tasks. In contrast, MoE mandates a predetermined count of experts during both the training and testing phases. Recent studies on the interrelation between MoE and instruction tuning have demonstrated that the simultaneous application of both approaches enhances the effectiveness of each individually (Shen et al., 2023). ", + "page_idx": 8 + }, + { + "type": "text", + "text": "Cross-Task generalization Recent advancements like CrossFit (Ye et al., 2021), ExT5 (Aribandi et al., 2022), FLAN (Wei et al., 2022), T0 (Sanh et al., 2022), InstructGPT (Ouyang et al., 2022), and ReCross (Lin et al., 2022) have been striving to foster a vastly multi-task model’s generalization across different tasks, very much aligned with the objectives of our research. Among this cohort, the connections of CrossFit and ReCross with LoraHub are particularly noteworthy. The CrossFit framework (Ye et al., 2021) mandates a minimal number of labeled examples of the target task for few-shot fine-tuning. However, its limitation lies in the application of task names as hard prefixes in templates, posing challenges in the task’s generalization. On the other hand, while ReCross mitigates the need for labels in few-shot examples for retrieval, it necessitates a fine-tuning process using the retrieved data. This procedure appears time-consuming when compared to LoraHub’s approach. Through the deployment of few-shot labeled examples and a gradientfree optimization process, LoraHub facilitates an iterative update of weights to compose the LoRA modules. The resultant method is more efficient and cost-effective relative to previous work. Overall, LoraHub offers a more practical and viable solution to the optimization process. ", + "page_idx": 8 + }, + { + "type": "text", + "text": "7 Conclusion ", + "text_level": 1, + "page_idx": 9 + }, + { + "type": "text", + "text": "In this work, we have introduced LoraHub, a strategic framework for composing LoRA modules trained on diverse tasks in order to achieve adaptable performance on new tasks. Our approach enables the fluid combination of multiple LoRA modules using just a few examples from a novel task, without requiring additional model parameters or human expertise. The empirical results on the BBH benchmark demonstrate that LoraHub can effectively match the performance of in-context learning in few-shot scenarios, removing the need for in-context examples during inference. Overall, our work shows the promise of strategic LoRA composability for rapidly adapting LLMs to diverse tasks. By fostering reuse and combination of LoRA modules, we can work towards more general and adaptable LLMs while minimizing training costs. ", + "page_idx": 9 + }, + { + "type": "text", + "text": "Reproducibility Statement ", + "text_level": 1, + "page_idx": 9 + }, + { + "type": "text", + "text": "The authors have made great efforts to ensure the reproducibility of the empirical results reported in this paper. Firstly, the experiment settings, evaluation metrics, and datasets were described in detail in Section 4.1. Secondly, the codes and script for reproduce the result will be opensource after accepted. Second, the source code implementing the proposed method and experiments will be made publicly available at upon acceptance of the paper. Third, pre-trained LoRA modules from this work along with their configuration files and weights will be shared. These allow reproduction without retraining the LoRA modules, enabling quick testing and verification. ", + "page_idx": 9 + }, + { + "type": "text", + "text": "References ", + "text_level": 1, + "page_idx": 9 + }, + { + "type": "text", + "text": "Samuel Ainsworth, Jonathan Hayase, and Siddhartha Srinivasa. Git re-basin: Merging models modulo permutation symmetries. In The Eleventh International Conference on Learning Representations, 2023. \nShengnan An, Yifei Li, Zeqi Lin, Qian Liu, Bei Chen, Qiang Fu, Weizhu Chen, Nanning Zheng, and Jian-Guang Lou. Input-tuning: Adapting unfamiliar inputs to frozen pretrained models. ArXiv preprint, 2022. \nVamsi Aribandi, Yi Tay, Tal Schuster, Jinfeng Rao, Huaixiu Steven Zheng, Sanket Vaibhav Mehta, Honglei Zhuang, Vinh Q. Tran, Dara Bahri, Jianmo Ni, Jai Prakash Gupta, Kai Hui, Sebastian Ruder, and Donald Metzler. Ext5: Towards extreme multi-task scaling for transfer learning. In Proc. of ICLR, 2022. \nStephen Bach, Victor Sanh, Zheng Xin Yong, Albert Webson, Colin Raffel, Nihal V. Nayak, Abheesht Sharma, Taewoon Kim, M Saiful Bari, Thibault Fevry, Zaid Alyafeai, Manan Dey, Andrea Santilli, Zhiqing Sun, Srulik Ben-david, Canwen Xu, Gunjan Chhablani, Han Wang, Jason Fries, Maged Al-shaibani, Shanya Sharma, Urmish Thakker, Khalid Almubarak, Xiangru Tang, Dragomir Radev, Mike Tian-jian Jiang, and Alexander Rush. PromptSource: An integrated development environment and repository for natural language prompts. In Proc. of ACL, 2022. \nTom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In Hugo Larochelle, Marc’Aurelio Ranzato, Raia Hadsell, MariaFlorina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. ", + "page_idx": 9 + }, + { + "type": "text", + "text": "Alexis Chevalier, Alexander Wettig, Anirudh Ajith, and Danqi Chen. Adapting language models to compress contexts. CoRR, abs/2305.14788, 2023. doi: 10.48550/ARXIV.2305. 14788. URL https://doi.org/10.48550/arXiv.2305.14788. ", + "page_idx": 10 + }, + { + "type": "text", + "text": "Alexandra Chronopoulou, Matthew Peters, Alexander Fraser, and Jesse Dodge. AdapterSoup: Weight averaging to improve generalization of pretrained language models. In Findings of the Association for Computational Linguistics: EACL 2023, 2023. ", + "page_idx": 10 + }, + { + "type": "text", + "text": "Hyung Won Chung, Le Hou, S. Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Dasha Valter, Sharan Narang, Gaurav Mishra, Adams Wei Yu, Vincent Zhao, Yanping Huang, Andrew M. Dai, Hongkun Yu, Slav Petrov, Ed Huai hsin Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei. Scaling instruction-finetuned language models. ArXiv preprint, 2022. \ncrumb. Llama-2, mixutre of lora. https://crumbly.medium.com/ llama-2-molora-f5f909434711, 2023. \nNan Du, Yanping Huang, Andrew M. Dai, Simon Tong, Dmitry Lepikhin, Yuanzhong Xu, Maxim Krikun, Yanqi Zhou, Adams Wei Yu, Orhan Firat, Barret Zoph, Liam Fedus, Maarten P. Bosma, Zongwei Zhou, Tao Wang, Yu Emma Wang, Kellie Webster, Marie Pellat, Kevin Robinson, Kathleen S. Meier-Hellstern, Toju Duke, Lucas Dixon, Kun Zhang, Quoc V. Le, Yonghui Wu, Zhifeng Chen, and Claire Cui. Glam: Efficient scaling of language models with mixture-of-experts. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Β΄ International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, Proceedings of Machine Learning Research, 2022. \nTao Ge, Jing Hu, Xun Wang, Si-Qing Chen, and Furu Wei. In-context autoencoder for context compression in a large language model. CoRR, abs/2307.06945, 2023. doi: 10. 48550/ARXIV.2307.06945. URL https://doi.org/10.48550/arXiv.2307.06945. \nAryo Pradipta Gema, Luke Daines, Pasquale Minervini, and Beatrice Alex. Parameterefficient fine-tuning of llama for the clinical domain. ArXiv preprint, 2023. \nNikolaus Hansen and Andreas Ostermeier. Adapting arbitrary normal mutation distributions in evolution strategies: the covariance matrix adaptation. Proceedings of IEEE International Conference on Evolutionary Computation, 1996. \nJunxian He, Chunting Zhou, Xuezhe Ma, Taylor Berg-Kirkpatrick, and Graham Neubig. Towards a unified view of parameter-efficient transfer learning. In Proc. of ICLR, 2022. \nEdward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. In Proc. of ICLR, 2022. \nGabriel Ilharco, Marco Tulio Ribeiro, Mitchell Wortsman, Ludwig Schmidt, Hannaneh Hajishirzi, and Ali Farhadi. Editing models with task arithmetic. In The Eleventh International Conference on Learning Representations, 2023. \nRobert A. Jacobs, Michael I. Jordan, Steven J. Nowlan, and Geoffrey E. Hinton. Adaptive mixtures of local experts. Neural Computation, 1991. \nJoel Jang, Seungone Kim, Seonghyeon Ye, Doyoung Kim, Lajanugen Logeswaran, Moontae Lee, Kyungjae Lee, and Minjoon Seo. Exploring the benefits of training expert language models over instruction tuning. In International Conference on Machine Learning, 2023. URL https://api.semanticscholar.org/CorpusID:256627673. \nHuiqiang Jiang, Qianhui Wu, Chin-Yew Lin, Yuqing Yang, and Lili Qiu. Llmlingua: Compressing prompts for accelerated inference of large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics, December 2023a. URL https://arxiv.org/abs/2310.05736. ", + "page_idx": 10 + }, + { + "type": "text", + "text": "Huiqiang Jiang, Qianhui Wu, Xufang Luo, Dongsheng Li, Chin-Yew Lin, Yuqing Yang, and Lili Qiu. Longllmlingua: Accelerating and enhancing llms in long context scenarios via prompt compression. CoRR, abs/2310.06839, 2023b. doi: 10.48550/ARXIV.2310.06839. URL https://doi.org/10.48550/arXiv.2310.06839. ", + "page_idx": 11 + }, + { + "type": "text", + "text": "Xisen Jin, Xiang Ren, Daniel Preotiuc-Pietro, and Pengxiang Cheng. Dataless knowledge fusion by merging weights of language models. In The Eleventh International Conference on Learning Representations, 2023. \nHiroaki Kingetsu, Kenichi Kobayashi, and Taiji Suzuki. Neural network module decomposition and recomposition. ArXiv preprint, 2021. \nBrian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. In Proc. of EMNLP, 2021. \nYucheng Li, Bo Dong, Chenghua Lin, and Frank Guerin. Compressing context to enhance inference efficiency of large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics, December 2023. URL https://arxiv.org/abs/2310.06201. \nBill Yuchen Lin, Kangmin Tan, Chris Miller, Beiwen Tian, and Xiang Ren. Unsupervised cross-task generalization via retrieval augmentation. In NeurIPS, 2022. \nHaokun Liu, Derek Tam, Mohammed Muqeeth, Jay Mohta, Tenghao Huang, Mohit Bansal, and Colin Raffel. Few-shot parameter-efficient fine-tuning is better and cheaper than incontext learning. ArXiv, abs/2205.05638, 2022. URL https://api.semanticscholar.org/ CorpusID:248693283. \nJialin Liu, A. Moreau, Mike Preuss, Baptiste Roziere, J \\` erΒ΄ emy Rapin, Fabien Teytaud, and Β΄ Olivier Teytaud. Versatile black-box optimization. Proceedings of the 2020 Genetic and Evolutionary Computation Conference, 2020. \nShayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V. Le, Barret Zoph, Jason Wei, and Adam Roberts. The flan collection: Designing data and methods for effective instruction tuning, 2023. \nXingtai Lv, Ning Ding, Yujia Qin, Zhiyuan Liu, and Maosong Sun. Parameter-efficient weight ensembling facilitates task-level knowledge transfer. In Annual Meeting of the Association for Computational Linguistics, 2023. \nSourab Mangrulkar, Sylvain Gugger, Lysandre Debut, Younes Belkada, and Sayak Paul. Peft: State-of-the-art parameter-efficient fine-tuning methods. https://github.com/ huggingface/peft, 2022. \nMichael Matena and Colin Raffel. Merging models with fisher-weighted averaging. ArXiv preprint, 2021. \nSewon Min, Mike Lewis, Luke Zettlemoyer, and Hannaneh Hajishirzi. MetaICL: Learning to learn in context. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2022. \nSwaroop Mishra, Daniel Khashabi, Chitta Baral, and Hannaneh Hajishirzi. Cross-task generalization via natural language crowdsourcing instructions. In Proc. of ACL, 2022. \nMohammed Muqeeth, Haokun Liu, and Colin Raffel. Soft merging of experts with adaptive routing. ArXiv preprint, 2023. \nOpenAI. ChatGPT. 2022. URL https://openai.com/blog/chatgpt. \nLong Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke E. Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Francis Christiano, Jan Leike, and Ryan J. Lowe. Training language models to follow instructions with human feedback. ArXiv preprint, 2022. ", + "page_idx": 11 + }, + { + "type": "text", + "text": "Panupong Pasupat and Percy Liang. Compositional semantic parsing on semi-structured tables. In Proc. of ACL, 2015. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "Edoardo Maria Ponti, Alessandro Sordoni, Yoshua Bengio, and Siva Reddy. Combining parameter-efficient modules for task-level generalisation. In Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics, 2023. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 2020. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "J. Rapin and O. Teytaud. Nevergrad - A gradient-free optimization platform. https:// GitHub.com/FacebookResearch/Nevergrad, 2018. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "Victor Sanh, Albert Webson, Colin Raffel, Stephen H. Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Arun Raja, Manan Dey, M Saiful Bari, Canwen Xu, Urmish Thakker, Shanya Sharma Sharma, Eliza Szczechla, Taewoon Kim, Gunjan Chhablani, Nihal V. Nayak, Debajyoti Datta, Jonathan Chang, Mike Tian-Jian Jiang, Han Wang, Matteo Manica, Sheng Shen, Zheng Xin Yong, Harshit Pandey, Rachel Bawden, Thomas Wang, Trishala Neeraj, Jos Rozen, Abheesht Sharma, Andrea Santilli, Thibault Fevry, Jason Alan Fries, Ryan Teehan, Teven Le Scao, Stella Biderman, Leo Gao, Β΄ Thomas Wolf, and Alexander M. Rush. Multitask prompted training enables zero-shot task generalization. In Proc. of ICLR, 2022. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc V. Le, Geoffrey E. Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixtureof-experts layer. In Proc. of ICLR, 2017. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "Sheng Shen, Le Hou, Yanqi Zhou, Nan Du, Shayne Longpre, Jason Wei, Hyung Won Chung, Barret Zoph, William Fedus, Xinyun Chen, Tu Vu, Yuexin Wu, Wuyang Chen, Albert Webson, Yunxuan Li, Vincent Zhao, Hongkun Yu, Kurt Keutzer, Trevor Darrell, and Denny Zhou. Mixture-of-experts meets instruction tuning:a winning combination for large language models, 2023. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "George Stoica, Daniel Bolya, Jakob Bjorner, Taylor Hearn, and Judy Hoffman. Zipit! merging models from different tasks without training. arXiv, 2023. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "Tianxiang Sun, Yunfan Shao, Hong Qian, Xuanjing Huang, and Xipeng Qiu. Black-box tuning for language-model-as-a-service. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Β΄ International Conference on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, Proceedings of Machine Learning Research, 2022. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "Tianxiang Sun, Zhengfu He, Qin Zhu, Xipeng Qiu, and Xuanjing Huang. Multitask pretraining of modular prompt for Chinese few-shot learning. In Proc. of ACL, 2023. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozi Β΄ ere, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien \\` Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. Llama: Open and efficient foundation language models. ArXiv preprint, 2023. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Isabelle Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vishwanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, 2017. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "Yaqing Wang, Sahaj Agarwal, Subhabrata Mukherjee, Xiaodong Liu, Jing Gao, Ahmed Hassan Awadallah, and Jianfeng Gao. AdaMix: Mixture-of-adaptations for parameter-efficient model tuning. In Proc. of EMNLP, 2022. ", + "page_idx": 12 + }, + { + "type": "text", + "text": "Jason Wei, Maarten Bosma, Vincent Y. Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M. Dai, and Quoc V. Le. Finetuned language models are zero-shot learners. In Proc. of ICLR, 2022. \nChengyue Wu, Teng Wang, Yixiao Ge, Zeyu Lu, Ruisong Zhou, Ying Shan, and Ping Luo. $\\pi$ -tuning: Transferring multimodal foundation models with optimal multi-task interpolation. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 37713–37727. PMLR, 2023a. URL https://proceedings.mlr. press/v202/wu23t.html. \nShijie Wu, Ozan Irsoy, Steven Lu, Vadim Dabravolski, Mark Dredze, Sebastian Gehrmann, Prabhanjan Kambadur, David S. Rosenberg, and Gideon Mann. Bloomberggpt: A large language model for finance. CoRR, abs/2303.17564, 2023b. doi: 10.48550/arXiv.2303. 17564. URL https://doi.org/10.48550/arXiv.2303.17564. \nPrateek Yadav, Derek Tam, Leshem Choshen, Colin Raffel, and Mohit Bansal. TIESmerging: Resolving interference when merging models. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id= xtaX3WyCj1. \nQinyuan Ye, Bill Yuchen Lin, and Xiang Ren. CrossFit: A few-shot learning challenge for cross-task generalization in NLP. In Proc. of EMNLP, 2021. \nChris Zhang, Mengye Ren, and Raquel Urtasun. Graph hypernetworks for neural architecture search. In Proc. of ICLR, 2019. \nFan Zhang, Duyu Tang, Yong Dai, Cong Zhou, Shuangzhi Wu, and Shuming Shi. Skillnetnlu: A sparsely activated model for general-purpose natural language understanding, 2022. \nJinghan Zhang, Shiqi Chen, Junteng Liu, and Junxian He. Composing parameter-efficient modules with arithmetic operations. ArXiv preprint, 2023a. \nLongteng Zhang, Lin Zhang, Shaohuai Shi, Xiaowen Chu, and Bo Li. Lora-fa: Memory-efficient low-rank adaptation for large language models fine-tuning. ArXiv, abs/2308.03303, 2023b. URL https://api.semanticscholar.org/CorpusID:260683267. \nWangchunshu Zhou, Yuchen Eleanor Jiang, Ryan Cotterell, and Mrinmaya Sachan. Efficient prompting via dynamic in-context learning. CoRR, abs/2305.11170, 2023. doi: 10.48550/ARXIV.2305.11170. URL https://doi.org/10.48550/arXiv.2305.11170. ", + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/8025498f3c910dbeea940a1ac3d7d18dc6c8a3edff07b8bec4d71d0b1464157c.jpg", + "table_caption": [ + "Table 3: The top five beneficial LoRA modules for BBH tasks and their associated upstream tasks, the average weight values and the average performance on all BBH tasks. " + ], + "table_footnote": [], + "table_body": "
RankDataset: TaskWeightPerfTask Description
1WIQA: Last Process0.7228.1 Identifying the last step of a given process.
2RACE: Is this the Right Answer0.6830.8Determining if given answer is correct.
3WIQA: First Process0.6328.1 Identifying the first step of a given process.
4AdversarialQA: BiDAF0.6125.1Aserialmode-in-the-eby an
5WebQuestions: What is the Answer0.5827.0 Asweringrqomesten based oninformation
", + "page_idx": 14 + }, + { + "type": "text", + "text": "A More Analysis ", + "text_level": 1, + "page_idx": 14 + }, + { + "type": "text", + "text": "Which LoRA modules are most effective for BBH tasks? ", + "page_idx": 14 + }, + { + "type": "text", + "text": "We hypothesized that the amalgamation of LoRA modules could incorporate skills and insights from a variety of specific tasks. To evaluate this, we examined the extent of influence a single LoRA module had amongst all tasks from the BBH benchmark. We measured the impact of each isolated task by calculating the average absolute weight. The top five modules, presented in Table 3, were found to have substantial influence, as indicated by their maximum average weights, which suggested that they were notably more effective in cross-task transfer. Remarkably, a common feature among these top five modules was their association with tasks requiring reading comprehension and reasoning skillsβ€”attributes indicative of higher cognitive complexity. However, it is worth noting that none of the modules exhibited consistent improvement across all BBH tasks, as reflected in their average performance on all BBH tasks, which did not show a significant improvement compared to the original FLAN-T5-large, except for the Rank 2. The results underscore the advantages of composing diverse modules in LoraHub. ", + "page_idx": 14 + }, + { + "type": "text", + "text": "How effective is the gradient-free optimization method? ", + "page_idx": 14 + }, + { + "type": "text", + "text": "To assess the effectiveness of our gradient-free optimization method in correctly identifying the most suitable LoRA module for a given downstream task, we carried out an empirical study using the WikiTableQuestions (Pasupat & Liang, 2015) (WTQ) dataset. We strategically included a LoRA module that was specifically trained on the WTQ dataset into our pool of LoRA candidate modules, which originally stemmed from tasks exclusive to the Flan Collection. Subsequently, we designated WTQ as the targeted downstream task and computed the weights consistent with the methods employed in LoraHub learning. As an end result, the WTQ-specific LoRA module was awarded the highest weight, exemplifying the algorithm’s success in recognizing it as the most relevant. Moreover, the combined LoRA module demonstrated marginal superiority over the WTQ LoRA module. This underscores the claim that the gradient-free optimization method has the ability to proficiently select the optimal upstream LoRA module for an unseen task. ", + "page_idx": 14 + }, + { + "type": "text", + "text": "B Result of Best Results ", + "text_level": 1, + "page_idx": 14 + }, + { + "type": "text", + "text": "As shown in Table 4, compared to gradient-based parameter-efficient training methods like LoRA and IA3, our approach demonstrates superior performance in terms of best results over experimental runs. While it exhibits a noticeable lag behind the fully fine-tuning (FFT) method, which updates all parameters during training, this observation suggests that our proposed method has a promising upper limit. We anticipate that future research efforts can contribute to accelerating the optimization speed and further enhancing the efficacy of our approach. ", + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/95c66e826a893a9a4a0a9da3fa42a7b74e1324af47f01994c7ebe53ba5447ca0.jpg", + "table_caption": [ + "Table 4: Experimental results of several few-shot methods, including in-context learning (ICL), IA3 fine-tuning (IA3), LoRA tuning (LoRA), full fine-tuning (FFT) and our LoraHub learning (LoraHub) on the BBH benchmark with FLAN-T5-large as the base LLM. We denote algorithmic tasks with the superscript $\\ S$ following previous work (Wu et al., 2023b). Note that we use 5 examples per task as the demonstration for all methods. The best (best) performance is reported as the maximum value obtained across three runs. " + ], + "table_footnote": [], + "table_body": "
TaskICLbestIA3bestLoRAbestFFTbestLoraHubbest
Boolean Expressions62.758.060.765.360.7
Causal Judgement59.862.157.560.963.2
Date Understanding21.320.740.767.345.3
Disambiguation69.30.068.770.768.0
Dyck Languages2.04.725.333.32.7
Formal Fallacies59.352.056.756.059.3
Geometric Shapes20.015.328.739.318.7
Hyperbaton72.749.357.382.072.7
Logical DeductionS (five objects)39.332.741.343.340.0
Logical DeductionS (seven objects)42.034.042.746.046.0
LogicalDrectjoets)52.78.756.760.752.7
Movie Recommendation56.762.064.570.762.0
Multistep Arithmetic0.70.70.70.01.3
Navigate46.747.350.750.051.3
Object Counting34.735.342.038.036.7
Penguins in a Table43.545.741.337.047.8
Reasoning about Colored Objects41.341.340.738.744.7
Ruin Names20.725.342.066.028.7
Salient Translation Error Detection48.037.317.321.342.7
Snarks55.156.459.069.261.5
Sports Understanding56.755.358.758.762.7
Temporal Sequences26.718.731.348.721.3
Tracking Shuffled ObjectsS (five objects)12.012.016.020.016.7
Tracking Shuffled ObjectsS (seven objects)6.76.712.010.015.3
Tracking Shuffled ObjectsS (three objects)31.330.732.036.031.3
Web of Lies54.054.755.354.057.3
Word Sorting0.71.35.36.01.3
Best Performance (Average)38.432.140.946.241.2
", + "page_idx": 15 + }, + { + "type": "text", + "text": "C Result of non-instrcution-tuned models ", + "text_level": 1, + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/0a96643f4a27749431647c2c4c4281dfc58fdf816ad0790812621e1af017102a.jpg", + "table_caption": [ + "Table 5: Comparsion among different ranks for few-shot LoraHub learning with the backbone T5-large (Raffel et al., 2020) on the BBH benchmark. Note that the T5-large model achieved $0 . { \\bar { 0 } } \\%$ on all tasks under the zero-shot setting except Dyck Languages, where it scored $0 . 6 7 \\%$ . " + ], + "table_footnote": [], + "table_body": "
Task ↓ Rank β†’4best 4avg16avg16best64avg64best
Boolean Expressions52.13 57.3350.6758.0047.4758.00
Causal Judgement52.4155.1749.6654.0250.8054.02
Date Understanding0.402.0014.4029.334.5310.00
Disambiguation10.0031.3326.9342.001.734.67
Dyck Languages0.400.670.400.670.402.00
Formal Fallacies48.4054.0046.9351.3346.9350.00
Geometric Shapes0.000.006.5332.671.477.33
Hyperbaton30.1350.0039.07 57.3332.9348.00
Logical DeductionS (five objects)5.2014.678.8019.331.336.67
Logical DeductionS (seven objects)6.4017.339.3319.333.4716.00
Logical DeductionS14.4032.0021.7334.676.9315.33
(three objects) Movie Recommendation7.0718.677.8722.001.206.00
Multistep Arithmetic two0.000.000.000.000.000.00
Navigate49.6054.6752.2756.6749.8752.00
Object Counting7.2018.0016.0021.3313.7326.67
Penguins ina Table6.5213.0410.4317.390.432.17
Reasoning about Colored Objects6.2710.005.0716.670.532.67
Ruin Names7.7313.3313.2028.005.7315.33
Salient Translation Error Detection0.000.001.738.670.000.00
Snarks21.2842.3149.4960.2616.1538.46
Sports Understanding46.5358.6746.8058.6746.5358.67
Temporal Sequences3.0713.336.5326.672.4012.00
Tracking Shuffled ObjectsS5.2014.004.139.330.130.67
(five objects) Tracking Shuffled ObjectsS (seven objects)2.6710.002.8014.003.208.00
Tracking Shuffled ObjectsS3.7317.3316.2734.675.8726.67
(three objects) Web of Lies48.5354.00 57.33
Word Sorting0.400.6754.00 0.1356.00 0.6754.67 0.000.00
20.78
Average Performance per Task16.1424.1730.7314.7621.43
", + "page_idx": 16 + }, + { + "type": "text", + "text": "D Result of larger model ", + "text_level": 1, + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 6: Experimental results of zero-shot learning (Zero) and our few-shot LoraHub learning (LoraHub) on the BBH benchmark with FLAN-T5-xl as the base LLM. Note that we use 5 examples per task as the demonstration for both ICL and LoraHub. The average $( a v g )$ performance of LoraHub is computed over 5 runs with different random seeds, while the best (best) performance is reported as the maximum value obtained across these runs. We can see the trend of the results are similar to FLAN-T5-large. ", + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/0ab8ee9f98ef9b7388aa22dd0aeb88c7626414f78e70ff4407066b0bb2b27a06.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TaskZeroLoraHub avgLoraHub best
Boolean Expressions52.058.763.3
Causal Judgement62.153.859.8
Date Understanding38.037.638.0
Disambiguation Qa0.020.5 54.7
Dyck Languages1.30.92.0
Formal Fallacies56.056.056.0
Geometric Shapes8.717.528.0
Hyperbaton45.353.556.7
Logical DeductionS (five objects)1.342.748.7
Logical DeductionS (seven objects)8.744.350.0
Logical DeductionS (three objects)0.756.461.3
Movie Recommendation2.062.866.0
Multistep Arithmetic Two0.00.40.7
Navigate50.750.750.7
Object Counting39.340.748.0
Penguins In A Table17.440.945.7
Reasoning About Colored Objects46.747.350.7
Ruin Names18.035.644.7
Salient Translation Error Detection44.745.148.7
Snarks60.360.861.5
Sports Understanding56.751.353.3
Temporal Sequences21.321.522.0
Tracking Shuffled ObjectsS3.39.913.3
(five objects) Tracking Shuffled ObjectsS (seven objects)5.37.38.7
Tracking Shuffled ObjectsS7.321.731.3
(three objects) Web Of Lies54.747.148.7
Word Sorting1.31.52.0
Average Performance per Task25.836.541.3
", + "page_idx": 17 + }, + { + "type": "text", + "text": "E Improving the Robustness of LoraHub ", + "text_level": 1, + "page_idx": 18 + }, + { + "type": "text", + "text": "In order to enhance the robustness of LoraHub, we explored a straightforward approach in the selection of LoRA module candidates. Specifically, we first identified 20 LoRA module candidates with the lowest loss on the few-shot examples. Our findings indicate a slight improvement in overall performance after applying the pre-filtering startegy. Since the primary instability in our approach arises from the selection of LoRA candidates. This method involves choosing a fixed set of LoRA candidates to ensure the stability of our approach. ", + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/d6f6dad37f46055044f3fa33031ace9323de2344f42ee9318d2484d1ab05748f.jpg", + "table_caption": [ + "Table 7: The experimental results of loss-based pre-filtering. " + ], + "table_footnote": [], + "table_body": "
TaskLoraHubavgLoraHubfilter
Boolean Expressions55.560.00
Causal Judgement54.352.9
Date Understanding32.933.3
Disambiguation45.262.7
Dyck Languages1.00.0
Formal Fallacies52.854.0
Geometric Shapes7.44.0
Hyperbaton62.864.0
Logical DeductionS (five objects)36.137.3
Logical DeductionS (seven objects)36.822.0
Logical DeductionS (three objects)45.756.0
Movie Recommendation55.368.0
Multistep Arithmetic0.40.7
Navigate47.149.3
Object Counting33.738.7
Penguins in a Table35.937.0
Reasoning about Colored Objects40.033.3
Ruin Names24.422.0
Salient Translation Error Detection36.024.0
Snarks56.952.66
Sports Understanding56.758.0
Temporal Sequences18.227.3
Tracking Shuffled ObjectsS12.311.3
(five objects) Tracking Shuffled ObjectsS7.78.0
(seven objects) Tracking Shuffled ObjectsS29.232.7
(three objects) Web of Lies50.146.0
Word Sorting1.11.3
34.735.4
Avg Performance Per Task
", + "page_idx": 18 + }, + { + "type": "text", + "text": "F Performance on General Important Task ", + "text_level": 1, + "page_idx": 19 + }, + { + "type": "text", + "text": "In our research, we have identified specific LoRA modules that exhibit significant impact when integrated into merged LoRAs. Our focus lies in assessing the performance of the top five task-related LoRAs on the BBH benchmark. The results indicate that these top LoRAs perform similarly or even worse than zero-shot in most cases. Only one of them stands out as significantly better than zero-shot. However, it’s worth noting that this performance is not as impressive as Lorahub. These findings support the idea that the merging process can improve overall performance. ", + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/a5a978a9e175ac55d980958495a1a7e775eca792910cb318c11e008479f07afe.jpg", + "table_caption": [ + "Table 8: Detailed experimental results of top five LoRA modules shown in Table 3 on BBH tasks. " + ], + "table_footnote": [], + "table_body": "
TaskWIQA: LastRACE: RightWIQA: FirstADQAWebQA
Boolean Expressions52.6758.0052.6754.6753.33
Causal Judgement55.1763.2255.1757.4757.47
Date Understanding17.3319.3317.3316.6715.33
Disambiguation0.000.000.000.000.00
Dyck Languages0.670.670.671.331.33
Formal Fallacies51.3351.3351.3351.3351.33
Geometric Shapes8.0013.338.006.677.33
Hyperbaton16.6744.0016.671.336.00
Logical Ded uctionts)23.3328.0023.3319.3320.67
Logical DeductionS (seven objects)22.0026.0022.0010.6712.00
Logical DeductionS (three objects)0.679.330.670.000.00
Movie Recommendation63.3362.6763.3356.6763.33
Multistep Arithmetic0.670.670.670.670.67
Navigate47.3350.0047.3347.3347.33
Object Counting34.6734.0034.6735.3335.33
Penguins in a Table45.6541.3045.6539.1343.48
Reasoning about Colored Objects40.0037.3340.0031.3330.67
Ruin Names22.0021.3322.0017.3322.67
Salient Translation Error Detection36.6734.6736.6732.6737.33
Snarks52.5655.1352.5647.4452.56
Sports Understanding56.0058.6756.0055.33
Temporal Sequences16.6717.3316.6712.6755.33 17.33
Tracking Shuffled ObjectsS (five objects)12.0012.0012.0010.6712.00
Tracking Shuffled ObjectsS (seven objects)6.676.676.676.676.67
Tracking Shuffled ObjectsS20.6730.6720.6710.6725.33
(three objects) Web of Lies54.6754.0054.6754.00
Word Sorting1.331.331.331.3354.00 1.33
Avg Performance per Task β–³ FLAN-T5-large28.10 1.1030.78 3.7828.10 1.1025.14 -1.8627.04 0.04
", + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/f08459cc633da4d25e332908058acffc5a55cf3fadee5264d074582bf20749f5.jpg", + "image_caption": [ + "Figure 3: The influence of number of LoRA modules on 15 tasks from BBH, and each box is obtained from 5 separate runs. The horizontal axis shows the number of LoRA modules to be composed in LoraHub learning. " + ], + "image_footnote": [], + "page_idx": 20 + }, + { + "type": "text", + "text": "G Implementation details ", + "text_level": 1, + "page_idx": 20 + }, + { + "type": "text", + "text": "We implemented LoRA tuning using the Huggingface PEFT library (Mangrulkar et al., 2022), with the rank being set as 16. The gradient-free method was implemented using the open-source Nevergrad optimization library (Rapin & Teytaud, 2018), with a constraint that the absolute value of LoRA weights should not exceed 1.5. Originally, all coefficients of LoRA modules were set at zero. ", + "page_idx": 20 + }, + { + "type": "text", + "text": "In our standard settings, we set the maximum number of iterations $K$ as 40. The same 5 examples were used during our LoraHub learning and the few-shot in-context learning. The hyperparameter $\\alpha$ is set as 0.05. Regarding the hyperparameters for training candidate LoRA modules, we maintained consistency across all modules, setting the batch size at 64, the learning rate at $1 e - 4 ,$ and the number of training epochs at 10. ", + "page_idx": 20 + }, + { + "type": "text", + "text": "H Influence of Number of LoRA modules ", + "text_level": 1, + "page_idx": 20 + }, + { + "type": "text", + "text": "As shown in Figure 3, with an increase in the number of LoRA module candidates, there is a corresponding increase in the performance variance. Based on our in-depth analysis, the primary source of variance is not related to gradient-free optimization algorithms but rather associated with the LoRA candidate modules. In other words, once the candidates are determined, random seeds have minimal impact on the final performance. Hence, we posit that the observed instability primarily arises from the inherent challenge of balancing the quantity and quality of the LoRA module candidates. ", + "page_idx": 20 + }, + { + "type": "text", + "text": "I The Impact of Threshold ", + "text_level": 1, + "page_idx": 20 + }, + { + "type": "text", + "text": "In this section, we omitted the threshold in our implementation, and the results are summarized in Table 9. Our observations indicate that the removal of the threshold had minimal impact on the majority of tasks, underscoring the robustness of the gradient-free optimization algorithm itself in most cases. The algorithm efficiently identified reasonable ranges even without specific upper and lower bounds. However, three tasks, namely Date Understanding, Disambiguation and Hyperbaton, exhibited notable effects. The resulting performance decline led to an average decrease of $1 . 2 \\%$ compared to the setting with threshold. ", + "page_idx": 20 + }, + { + "type": "text", + "text": "This highlights the significance of establishing a reasonable threshold to mitigate extreme scenarios. ", + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/522e1fa23ba78543a5afefbbeddc87850fa222239fd245c96bbb6d9c91774129.jpg", + "table_caption": [ + "Table 9: The comparsion between LoraHub and LoraHub without threshold. " + ], + "table_footnote": [], + "table_body": "
TaskLoraHubavg with thresholdLoraHubavg without threshold
Boolean Expressions55.554.0
Causal Judgement54.354.8
Date Understanding32.917.7
Disambiguation45.240.6
Dyck Languages1.01.1
Formal Fallacies52.851.7
Geometric Shapes7.46.7
Hyperbaton62.855.5
Logical DeductionS (five objects)36.136.5
Logical DeductionS (seven objects)36.835.6
Logical DeductionS45.7
(three objects) Movie Recommendation49.9
Multistep Arithmetic55.359.3
Navigate0.40.7
Object Counting47.147.6
33.734.7
Penguins in a Table35.933.8
Reasoning about Colored Objects40.037.9
Ruin Names24.424.0
Salient Translation Error Detection36.037.1
Snarks56.951.6
Sports Understanding56.755.9
Temporal Sequences18.216.7
Tracking Shuffled ObjectsS (five objects)12.312.3
Tracking Shuffled ObjectsS (seven objects)7.78.5
Tracking Shuffled ObjectsS (three objects)29.229.8
Web of Lies50.150.3
Word Sorting1.11.3
Avg Performance Per Task34.733.5
", + "page_idx": 21 + } +] \ No newline at end of file diff --git a/parse/test/TrloAXEJ2B/TrloAXEJ2B_middle.json b/parse/test/TrloAXEJ2B/TrloAXEJ2B_middle.json new file mode 100644 index 0000000000000000000000000000000000000000..aa523bfdb14a6d0ebb45c47bb3c6eb98953c60bc --- /dev/null +++ b/parse/test/TrloAXEJ2B/TrloAXEJ2B_middle.json @@ -0,0 +1,43191 @@ +{ + "pdf_info": [ + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 111, + 81, + 503, + 115 + ], + "lines": [ + { + "bbox": [ + 109, + 78, + 505, + 102 + ], + "spans": [ + { + "bbox": [ + 109, + 80, + 123, + 97 + ], + "score": 0.0, + "content": "", + "type": "text" + }, + { + "bbox": [ + 124, + 78, + 505, + 102 + ], + "score": 1.0, + "content": "LoraHub: Efficient Cross-Task Generalization via Dy-", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 108, + 97, + 281, + 117 + ], + "spans": [ + { + "bbox": [ + 108, + 97, + 281, + 117 + ], + "score": 1.0, + "content": "namic LoRA Composition", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "text", + "bbox": [ + 115, + 135, + 518, + 185 + ], + "lines": [ + { + "bbox": [ + 115, + 134, + 518, + 150 + ], + "spans": [ + { + "bbox": [ + 115, + 134, + 200, + 150 + ], + "score": 1.0, + "content": "Chengsong Huang", + "type": "text" + }, + { + "bbox": [ + 201, + 135, + 213, + 147 + ], + "score": 0.29, + "content": "\\mathbf { \\Delta } \\mathbf { \\dag \\ S \\mathrm { \\ s \\mathrm { \\ s } } }", + "type": "inline_equation" + }, + { + "bbox": [ + 213, + 134, + 327, + 150 + ], + "score": 1.0, + "content": ", Qian Liuβ€ βˆ—, Bill Yuchen", + "type": "text" + }, + { + "bbox": [ + 327, + 135, + 354, + 148 + ], + "score": 0.7, + "content": "\\mathbf { L i n } ^ { \\bigotimes * }", + "type": "inline_equation" + }, + { + "bbox": [ + 354, + 134, + 451, + 150 + ], + "score": 1.0, + "content": ", Tianyu Pang†, Chao", + "type": "text" + }, + { + "bbox": [ + 451, + 135, + 471, + 148 + ], + "score": 0.49, + "content": "{ { \\mathbf { D } } { { \\mathbf { u } } } ^ { \\dag } }", + "type": "inline_equation" + }, + { + "bbox": [ + 471, + 134, + 518, + 150 + ], + "score": 1.0, + "content": ", Min Lin†", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 260, + 147, + 364, + 162 + ], + "spans": [ + { + "bbox": [ + 260, + 147, + 364, + 162 + ], + "score": 1.0, + "content": "†Sea AI Lab, Singapore", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 205, + 158, + 417, + 175 + ], + "spans": [ + { + "bbox": [ + 205, + 158, + 417, + 175 + ], + "score": 1.0, + "content": "Β§Washington University in St. Louis, MO, USA", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 219, + 171, + 403, + 187 + ], + "spans": [ + { + "bbox": [ + 219, + 171, + 403, + 187 + ], + "score": 1.0, + "content": "β™’Allen Institute for AI, Seattle, WA, USA", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 3.5 + }, + { + "type": "title", + "bbox": [ + 282, + 213, + 329, + 226 + ], + "lines": [ + { + "bbox": [ + 281, + 213, + 331, + 228 + ], + "spans": [ + { + "bbox": [ + 281, + 213, + 331, + 228 + ], + "score": 1.0, + "content": "Abstract", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6 + }, + { + "type": "text", + "bbox": [ + 143, + 239, + 468, + 470 + ], + "lines": [ + { + "bbox": [ + 141, + 239, + 469, + 252 + ], + "spans": [ + { + "bbox": [ + 141, + 239, + 469, + 252 + ], + "score": 1.0, + "content": "Low-rank adaptations (LoRA) are often employed to fine-tune large lan-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 140, + 249, + 470, + 264 + ], + "spans": [ + { + "bbox": [ + 140, + 249, + 470, + 264 + ], + "score": 1.0, + "content": "guage models (LLMs) for new tasks. This paper investigates LoRA com-", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 261, + 470, + 275 + ], + "spans": [ + { + "bbox": [ + 140, + 261, + 470, + 275 + ], + "score": 1.0, + "content": "posability for cross-task generalization and introduces LoraHub, a simple", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 272, + 470, + 285 + ], + "spans": [ + { + "bbox": [ + 141, + 272, + 470, + 285 + ], + "score": 1.0, + "content": "framework devised for the purposive assembly of LoRA modules trained", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 141, + 283, + 470, + 296 + ], + "spans": [ + { + "bbox": [ + 141, + 283, + 470, + 296 + ], + "score": 1.0, + "content": "on diverse given tasks, with the objective of achieving adaptable perfor-", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 141, + 294, + 469, + 307 + ], + "spans": [ + { + "bbox": [ + 141, + 294, + 469, + 307 + ], + "score": 1.0, + "content": "mance on unseen tasks. With just a few examples from a new task, Lo-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 305, + 470, + 319 + ], + "spans": [ + { + "bbox": [ + 141, + 305, + 470, + 319 + ], + "score": 1.0, + "content": "raHub can fluidly combine multiple LoRA modules, eliminating the need", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 141, + 316, + 469, + 330 + ], + "spans": [ + { + "bbox": [ + 141, + 316, + 469, + 330 + ], + "score": 1.0, + "content": "for human expertise and assumptions. Notably, the composition requires", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 327, + 469, + 340 + ], + "spans": [ + { + "bbox": [ + 141, + 327, + 469, + 340 + ], + "score": 1.0, + "content": "neither additional model parameters nor gradients. Empirical results on", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 338, + 469, + 352 + ], + "spans": [ + { + "bbox": [ + 141, + 338, + 469, + 352 + ], + "score": 1.0, + "content": "the Big-Bench Hard benchmark suggest that LoraHub, while not surpass-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 349, + 469, + 363 + ], + "spans": [ + { + "bbox": [ + 141, + 349, + 469, + 363 + ], + "score": 1.0, + "content": "ing the performance of in-context learning, offers a notable performance-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 359, + 469, + 374 + ], + "spans": [ + { + "bbox": [ + 141, + 359, + 469, + 374 + ], + "score": 1.0, + "content": "efficiency trade-off in few-shot scenarios by employing a significantly re-", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 370, + 469, + 385 + ], + "spans": [ + { + "bbox": [ + 141, + 370, + 469, + 385 + ], + "score": 1.0, + "content": "duced number of tokens per example during inference. Notably, LoraHub", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 141, + 381, + 469, + 395 + ], + "spans": [ + { + "bbox": [ + 141, + 381, + 469, + 395 + ], + "score": 1.0, + "content": "establishes a better upper bound compared to in-context learning when", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 392, + 470, + 406 + ], + "spans": [ + { + "bbox": [ + 141, + 392, + 470, + 406 + ], + "score": 1.0, + "content": "paired with different demonstration examples, demonstrating its poten-", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 403, + 470, + 416 + ], + "spans": [ + { + "bbox": [ + 141, + 403, + 470, + 416 + ], + "score": 1.0, + "content": "tial for future development. Our vision is to establish a platform for LoRA", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 141, + 414, + 470, + 428 + ], + "spans": [ + { + "bbox": [ + 141, + 414, + 470, + 428 + ], + "score": 1.0, + "content": "modules, empowering users to share their trained LoRA modules. This", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 141, + 425, + 469, + 438 + ], + "spans": [ + { + "bbox": [ + 141, + 425, + 469, + 438 + ], + "score": 1.0, + "content": "collaborative approach facilitates the seamless application of LoRA mod-", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 141, + 436, + 470, + 450 + ], + "spans": [ + { + "bbox": [ + 141, + 436, + 470, + 450 + ], + "score": 1.0, + "content": "ules to novel tasks, contributing to an adaptive ecosystem. Our code is", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 141, + 446, + 469, + 461 + ], + "spans": [ + { + "bbox": [ + 141, + 446, + 469, + 461 + ], + "score": 1.0, + "content": "available at github.com/sail-sg/lorahub, and all the pre-trained LoRA", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 142, + 459, + 363, + 471 + ], + "spans": [ + { + "bbox": [ + 142, + 459, + 363, + 471 + ], + "score": 1.0, + "content": "modules are released at huggingface.co/lorahub.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 17 + }, + { + "type": "title", + "bbox": [ + 107, + 492, + 195, + 506 + ], + "lines": [ + { + "bbox": [ + 105, + 492, + 196, + 508 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 196, + 508 + ], + "score": 1.0, + "content": "1 Introduction", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 109, + 520, + 502, + 590 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 109, + 520, + 502, + 590 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 109, + 520, + 502, + 590 + ], + "spans": [ + { + "bbox": [ + 109, + 520, + 502, + 590 + ], + "score": 0.96, + "type": "image", + "image_path": "95e2ddec39022b4d6452e07a5b9cddb6f0b9d45a3c19a11a7273387b6b7e1205.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 109, + 520, + 502, + 543.3333333333334 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 109, + 543.3333333333334, + 502, + 566.6666666666667 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 109, + 566.6666666666667, + 502, + 590.0000000000001 + ], + "spans": [], + "index": 31 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 599, + 505, + 654 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 598, + 505, + 611 + ], + "spans": [ + { + "bbox": [ + 106, + 598, + 505, + 611 + ], + "score": 1.0, + "content": "Figure 1: The illustration of zero-shot learning, few-shot in-context learning and few-shot", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 609, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 505, + 622 + ], + "score": 1.0, + "content": "LoraHub learning (ours). Note that the Compose procedure is conducted per task rather", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 620, + 506, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 620, + 506, + 635 + ], + "score": 1.0, + "content": "than per example. Our method achieves similar inference throughput as zero-shot learn-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 631, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 505, + 645 + ], + "score": 1.0, + "content": "ing, yet approaches the performance of in-context learning on the BIG-Bench Hard (BBH)", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 644, + 160, + 654 + ], + "spans": [ + { + "bbox": [ + 106, + 644, + 160, + 654 + ], + "score": 1.0, + "content": "benchmark.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 34 + } + ], + "index": 32.0 + }, + { + "type": "text", + "bbox": [ + 107, + 669, + 505, + 703 + ], + "lines": [ + { + "bbox": [ + 105, + 668, + 505, + 683 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 505, + 683 + ], + "score": 1.0, + "content": "Recent progress in natural language processing (NLP) has been largely fueled by large lan-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 104, + 679, + 505, + 695 + ], + "spans": [ + { + "bbox": [ + 104, + 679, + 505, + 695 + ], + "score": 1.0, + "content": "guage models (LLMs) such as OpenAI GPT (Brown et al., 2020), FLAN-T5 (Chung et al.,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 690, + 505, + 705 + ], + "spans": [ + { + "bbox": [ + 105, + 690, + 505, + 705 + ], + "score": 1.0, + "content": "2022), and LLaMA (Touvron et al., 2023). These models demonstrate top-tier performance", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 38 + } + ], + "page_idx": 0, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 711, + 504, + 731 + ], + "lines": [ + { + "bbox": [ + 117, + 709, + 506, + 724 + ], + "spans": [ + { + "bbox": [ + 117, + 709, + 506, + 724 + ], + "score": 1.0, + "content": "βˆ—The first three authors contributed equally to this work. Correspondence to Qian Liu at", + "type": "text" + } + ] + }, + { + "bbox": [ + 105, + 719, + 180, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 180, + 733 + ], + "score": 1.0, + "content": "liuqian@sea.com.", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 763 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 763 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 13, + "width": 9 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 111, + 81, + 503, + 115 + ], + "lines": [ + { + "bbox": [ + 109, + 78, + 505, + 102 + ], + "spans": [ + { + "bbox": [ + 109, + 80, + 123, + 97 + ], + "score": 0.0, + "content": "", + "type": "text" + }, + { + "bbox": [ + 124, + 78, + 505, + 102 + ], + "score": 1.0, + "content": "LoraHub: Efficient Cross-Task Generalization via Dy-", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 108, + 97, + 281, + 117 + ], + "spans": [ + { + "bbox": [ + 108, + 97, + 281, + 117 + ], + "score": 1.0, + "content": "namic LoRA Composition", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "text", + "bbox": [ + 115, + 135, + 518, + 185 + ], + "lines": [ + { + "bbox": [ + 115, + 134, + 518, + 150 + ], + "spans": [ + { + "bbox": [ + 115, + 134, + 200, + 150 + ], + "score": 1.0, + "content": "Chengsong Huang", + "type": "text" + }, + { + "bbox": [ + 201, + 135, + 213, + 147 + ], + "score": 0.29, + "content": "\\mathbf { \\Delta } \\mathbf { \\dag \\ S \\mathrm { \\ s \\mathrm { \\ s } } }", + "type": "inline_equation" + }, + { + "bbox": [ + 213, + 134, + 327, + 150 + ], + "score": 1.0, + "content": ", Qian Liuβ€ βˆ—, Bill Yuchen", + "type": "text" + }, + { + "bbox": [ + 327, + 135, + 354, + 148 + ], + "score": 0.7, + "content": "\\mathbf { L i n } ^ { \\bigotimes * }", + "type": "inline_equation" + }, + { + "bbox": [ + 354, + 134, + 451, + 150 + ], + "score": 1.0, + "content": ", Tianyu Pang†, Chao", + "type": "text" + }, + { + "bbox": [ + 451, + 135, + 471, + 148 + ], + "score": 0.49, + "content": "{ { \\mathbf { D } } { { \\mathbf { u } } } ^ { \\dag } }", + "type": "inline_equation" + }, + { + "bbox": [ + 471, + 134, + 518, + 150 + ], + "score": 1.0, + "content": ", Min Lin†", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 260, + 147, + 364, + 162 + ], + "spans": [ + { + "bbox": [ + 260, + 147, + 364, + 162 + ], + "score": 1.0, + "content": "†Sea AI Lab, Singapore", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 205, + 158, + 417, + 175 + ], + "spans": [ + { + "bbox": [ + 205, + 158, + 417, + 175 + ], + "score": 1.0, + "content": "Β§Washington University in St. Louis, MO, USA", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 219, + 171, + 403, + 187 + ], + "spans": [ + { + "bbox": [ + 219, + 171, + 403, + 187 + ], + "score": 1.0, + "content": "β™’Allen Institute for AI, Seattle, WA, USA", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 3.5, + "bbox_fs": [ + 115, + 134, + 518, + 187 + ] + }, + { + "type": "title", + "bbox": [ + 282, + 213, + 329, + 226 + ], + "lines": [ + { + "bbox": [ + 281, + 213, + 331, + 228 + ], + "spans": [ + { + "bbox": [ + 281, + 213, + 331, + 228 + ], + "score": 1.0, + "content": "Abstract", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6 + }, + { + "type": "text", + "bbox": [ + 143, + 239, + 468, + 470 + ], + "lines": [ + { + "bbox": [ + 141, + 239, + 469, + 252 + ], + "spans": [ + { + "bbox": [ + 141, + 239, + 469, + 252 + ], + "score": 1.0, + "content": "Low-rank adaptations (LoRA) are often employed to fine-tune large lan-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 140, + 249, + 470, + 264 + ], + "spans": [ + { + "bbox": [ + 140, + 249, + 470, + 264 + ], + "score": 1.0, + "content": "guage models (LLMs) for new tasks. This paper investigates LoRA com-", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 261, + 470, + 275 + ], + "spans": [ + { + "bbox": [ + 140, + 261, + 470, + 275 + ], + "score": 1.0, + "content": "posability for cross-task generalization and introduces LoraHub, a simple", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 272, + 470, + 285 + ], + "spans": [ + { + "bbox": [ + 141, + 272, + 470, + 285 + ], + "score": 1.0, + "content": "framework devised for the purposive assembly of LoRA modules trained", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 141, + 283, + 470, + 296 + ], + "spans": [ + { + "bbox": [ + 141, + 283, + 470, + 296 + ], + "score": 1.0, + "content": "on diverse given tasks, with the objective of achieving adaptable perfor-", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 141, + 294, + 469, + 307 + ], + "spans": [ + { + "bbox": [ + 141, + 294, + 469, + 307 + ], + "score": 1.0, + "content": "mance on unseen tasks. With just a few examples from a new task, Lo-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 305, + 470, + 319 + ], + "spans": [ + { + "bbox": [ + 141, + 305, + 470, + 319 + ], + "score": 1.0, + "content": "raHub can fluidly combine multiple LoRA modules, eliminating the need", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 141, + 316, + 469, + 330 + ], + "spans": [ + { + "bbox": [ + 141, + 316, + 469, + 330 + ], + "score": 1.0, + "content": "for human expertise and assumptions. Notably, the composition requires", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 327, + 469, + 340 + ], + "spans": [ + { + "bbox": [ + 141, + 327, + 469, + 340 + ], + "score": 1.0, + "content": "neither additional model parameters nor gradients. Empirical results on", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 338, + 469, + 352 + ], + "spans": [ + { + "bbox": [ + 141, + 338, + 469, + 352 + ], + "score": 1.0, + "content": "the Big-Bench Hard benchmark suggest that LoraHub, while not surpass-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 349, + 469, + 363 + ], + "spans": [ + { + "bbox": [ + 141, + 349, + 469, + 363 + ], + "score": 1.0, + "content": "ing the performance of in-context learning, offers a notable performance-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 359, + 469, + 374 + ], + "spans": [ + { + "bbox": [ + 141, + 359, + 469, + 374 + ], + "score": 1.0, + "content": "efficiency trade-off in few-shot scenarios by employing a significantly re-", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 370, + 469, + 385 + ], + "spans": [ + { + "bbox": [ + 141, + 370, + 469, + 385 + ], + "score": 1.0, + "content": "duced number of tokens per example during inference. Notably, LoraHub", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 141, + 381, + 469, + 395 + ], + "spans": [ + { + "bbox": [ + 141, + 381, + 469, + 395 + ], + "score": 1.0, + "content": "establishes a better upper bound compared to in-context learning when", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 392, + 470, + 406 + ], + "spans": [ + { + "bbox": [ + 141, + 392, + 470, + 406 + ], + "score": 1.0, + "content": "paired with different demonstration examples, demonstrating its poten-", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 403, + 470, + 416 + ], + "spans": [ + { + "bbox": [ + 141, + 403, + 470, + 416 + ], + "score": 1.0, + "content": "tial for future development. Our vision is to establish a platform for LoRA", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 141, + 414, + 470, + 428 + ], + "spans": [ + { + "bbox": [ + 141, + 414, + 470, + 428 + ], + "score": 1.0, + "content": "modules, empowering users to share their trained LoRA modules. This", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 141, + 425, + 469, + 438 + ], + "spans": [ + { + "bbox": [ + 141, + 425, + 469, + 438 + ], + "score": 1.0, + "content": "collaborative approach facilitates the seamless application of LoRA mod-", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 141, + 436, + 470, + 450 + ], + "spans": [ + { + "bbox": [ + 141, + 436, + 470, + 450 + ], + "score": 1.0, + "content": "ules to novel tasks, contributing to an adaptive ecosystem. Our code is", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 141, + 446, + 469, + 461 + ], + "spans": [ + { + "bbox": [ + 141, + 446, + 469, + 461 + ], + "score": 1.0, + "content": "available at github.com/sail-sg/lorahub, and all the pre-trained LoRA", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 142, + 459, + 363, + 471 + ], + "spans": [ + { + "bbox": [ + 142, + 459, + 363, + 471 + ], + "score": 1.0, + "content": "modules are released at huggingface.co/lorahub.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 17, + "bbox_fs": [ + 140, + 239, + 470, + 471 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 492, + 195, + 506 + ], + "lines": [ + { + "bbox": [ + 105, + 492, + 196, + 508 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 196, + 508 + ], + "score": 1.0, + "content": "1 Introduction", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 109, + 520, + 502, + 590 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 109, + 520, + 502, + 590 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 109, + 520, + 502, + 590 + ], + "spans": [ + { + "bbox": [ + 109, + 520, + 502, + 590 + ], + "score": 0.96, + "type": "image", + "image_path": "95e2ddec39022b4d6452e07a5b9cddb6f0b9d45a3c19a11a7273387b6b7e1205.jpg" + } + ] + } + ], + "index": 30, + "virtual_lines": [ + { + "bbox": [ + 109, + 520, + 502, + 543.3333333333334 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 109, + 543.3333333333334, + 502, + 566.6666666666667 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 109, + 566.6666666666667, + 502, + 590.0000000000001 + ], + "spans": [], + "index": 31 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 599, + 505, + 654 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 598, + 505, + 611 + ], + "spans": [ + { + "bbox": [ + 106, + 598, + 505, + 611 + ], + "score": 1.0, + "content": "Figure 1: The illustration of zero-shot learning, few-shot in-context learning and few-shot", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 609, + 505, + 622 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 505, + 622 + ], + "score": 1.0, + "content": "LoraHub learning (ours). Note that the Compose procedure is conducted per task rather", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 620, + 506, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 620, + 506, + 635 + ], + "score": 1.0, + "content": "than per example. Our method achieves similar inference throughput as zero-shot learn-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 631, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 505, + 645 + ], + "score": 1.0, + "content": "ing, yet approaches the performance of in-context learning on the BIG-Bench Hard (BBH)", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 644, + 160, + 654 + ], + "spans": [ + { + "bbox": [ + 106, + 644, + 160, + 654 + ], + "score": 1.0, + "content": "benchmark.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 34 + } + ], + "index": 32.0 + }, + { + "type": "text", + "bbox": [ + 107, + 669, + 505, + 703 + ], + "lines": [ + { + "bbox": [ + 105, + 668, + 505, + 683 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 505, + 683 + ], + "score": 1.0, + "content": "Recent progress in natural language processing (NLP) has been largely fueled by large lan-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 104, + 679, + 505, + 695 + ], + "spans": [ + { + "bbox": [ + 104, + 679, + 505, + 695 + ], + "score": 1.0, + "content": "guage models (LLMs) such as OpenAI GPT (Brown et al., 2020), FLAN-T5 (Chung et al.,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 690, + 505, + 705 + ], + "spans": [ + { + "bbox": [ + 105, + 690, + 505, + 705 + ], + "score": 1.0, + "content": "2022), and LLaMA (Touvron et al., 2023). These models demonstrate top-tier performance", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 81, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 96 + ], + "score": 1.0, + "content": "across different NLP tasks. However, their enormous parameter size presents issues re-", + "type": "text", + "cross_page": true + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 93, + 506, + 108 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 506, + 108 + ], + "score": 1.0, + "content": "garding computational efficiency and memory usage during fine-tuning. To mitigate these", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "score": 1.0, + "content": "challenges, Low-Rank Adaptation (LoRA) (Hu et al., 2022) has emerged as a parameter-", + "type": "text", + "cross_page": true + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 114, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 114, + 505, + 129 + ], + "score": 1.0, + "content": "efficient fine-tuning technique (Lester et al., 2021; He et al., 2022; An et al., 2022). By reduc-", + "type": "text", + "cross_page": true + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 125, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 125, + 506, + 140 + ], + "score": 1.0, + "content": "ing memory demands and computational costs, it speeds up LLM training. LoRA achieves", + "type": "text", + "cross_page": true + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 136, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 136, + 506, + 152 + ], + "score": 1.0, + "content": "this by freezing the base model parameters (that is, an LLM) and training a lightweight", + "type": "text", + "cross_page": true + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 147, + 407, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 407, + 163 + ], + "score": 1.0, + "content": "module, which regularly delivers high performance on target tasks.", + "type": "text", + "cross_page": true + } + ], + "index": 6 + } + ], + "index": 38, + "bbox_fs": [ + 104, + 668, + 505, + 705 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 504, + 160 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 96 + ], + "score": 1.0, + "content": "across different NLP tasks. However, their enormous parameter size presents issues re-", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 93, + 506, + 108 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 506, + 108 + ], + "score": 1.0, + "content": "garding computational efficiency and memory usage during fine-tuning. To mitigate these", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "score": 1.0, + "content": "challenges, Low-Rank Adaptation (LoRA) (Hu et al., 2022) has emerged as a parameter-", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 114, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 114, + 505, + 129 + ], + "score": 1.0, + "content": "efficient fine-tuning technique (Lester et al., 2021; He et al., 2022; An et al., 2022). By reduc-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 125, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 125, + 506, + 140 + ], + "score": 1.0, + "content": "ing memory demands and computational costs, it speeds up LLM training. LoRA achieves", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 136, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 136, + 506, + 152 + ], + "score": 1.0, + "content": "this by freezing the base model parameters (that is, an LLM) and training a lightweight", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 147, + 407, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 407, + 163 + ], + "score": 1.0, + "content": "module, which regularly delivers high performance on target tasks.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 3 + }, + { + "type": "text", + "bbox": [ + 107, + 165, + 505, + 330 + ], + "lines": [ + { + "bbox": [ + 105, + 164, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 164, + 506, + 178 + ], + "score": 1.0, + "content": "While prior research has targeted the efficiency enhancement facilitated by LoRA, there is a", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 175, + 505, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 505, + 190 + ], + "score": 1.0, + "content": "dearth of investigation into the inherent modularity and composability of LoRA modules.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 187, + 505, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 505, + 200 + ], + "score": 1.0, + "content": "Typically, previous methods train LoRA modules to specialize in individual tasks. Yet, the", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 198, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 506, + 211 + ], + "score": 1.0, + "content": "intrinsic modularity of LoRA modules presents an intriguing research question: Would it", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 209, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 105, + 209, + 506, + 222 + ], + "score": 1.0, + "content": "be possible to compose LoRA modules to generalize to novel tasks in an efficient manner?", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 218, + 506, + 234 + ], + "spans": [ + { + "bbox": [ + 104, + 218, + 506, + 234 + ], + "score": 1.0, + "content": "In this paper, we tap into the potential of LoRA modularity for broad task generalization,", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 230, + 506, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 230, + 506, + 244 + ], + "score": 1.0, + "content": "going beyond single-task training to meticulously compose LoRA modules for malleable", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 242, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 242, + 506, + 255 + ], + "score": 1.0, + "content": "performance on unknown tasks. Crucially, our method enables an automatic assembling of", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 252, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 506, + 266 + ], + "score": 1.0, + "content": "LoRA modules, eliminating dependency on manual design or human expertise. With just", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 263, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 263, + 506, + 277 + ], + "score": 1.0, + "content": "a handful of examples from new tasks (e.g., 5), our approach can autonomously compose", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 274, + 506, + 288 + ], + "spans": [ + { + "bbox": [ + 105, + 274, + 506, + 288 + ], + "score": 1.0, + "content": "compatible LoRA modules without human intrusion. We do not make assumptions about", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 284, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 506, + 300 + ], + "score": 1.0, + "content": "which LoRA modules trained on particular tasks can be combined, allowing for flexibility", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 296, + 505, + 310 + ], + "spans": [ + { + "bbox": [ + 105, + 296, + 505, + 310 + ], + "score": 1.0, + "content": "in amalgamating any modules as long as they conform to the specification (e.g., using the", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "score": 1.0, + "content": "same LLM). As our approach leverages several available LoRA modules, we refer to it as", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 317, + 393, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 393, + 333 + ], + "score": 1.0, + "content": "LoraHub and denote our learning method as LoraHub learning.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 14 + }, + { + "type": "text", + "bbox": [ + 107, + 336, + 505, + 522 + ], + "lines": [ + { + "bbox": [ + 105, + 334, + 505, + 349 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 505, + 349 + ], + "score": 1.0, + "content": "To validate the efficiency of our proposed methods, we test our approaches using the", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 345, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 345, + 506, + 360 + ], + "score": 1.0, + "content": "widely recognized BBH benchmark with FLAN-T5 (Chung et al., 2022) serving as the", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 356, + 505, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 505, + 370 + ], + "score": 1.0, + "content": "base LLM. The results underline the effectiveness of the LoRA module composition for", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 366, + 505, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 505, + 383 + ], + "score": 1.0, + "content": "unfamiliar tasks through a few-shot LoraHub learning process. Notably, our methodology", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 378, + 506, + 394 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 506, + 394 + ], + "score": 1.0, + "content": "achieves an average performance that closely matches that of few-shot in-context learning,", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 390, + 505, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 505, + 404 + ], + "score": 1.0, + "content": "while demonstrating a superior upper bound, particularly when using different demon-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 401, + 505, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 505, + 414 + ], + "score": 1.0, + "content": "stration examples. Additionally, our method substantially reduces the inference cost com-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 411, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 505, + 426 + ], + "score": 1.0, + "content": "pared to in-context learning, eliminating the requirement of examples as inputs for the", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 421, + 506, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 421, + 506, + 438 + ], + "score": 1.0, + "content": "LLM. With fewer tokens per example during inference, our method significantly reduces", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 433, + 505, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 433, + 505, + 447 + ], + "score": 1.0, + "content": "computational overhead and enables faster responses. It aligns with a broader research", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 444, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 444, + 505, + 459 + ], + "score": 1.0, + "content": "trend, where recent studies are actively exploring approaches to reduce the number of in-", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 104, + 455, + 506, + 470 + ], + "spans": [ + { + "bbox": [ + 104, + 455, + 506, + 470 + ], + "score": 1.0, + "content": "put tokens (Zhou et al., 2023; Ge et al., 2023; Chevalier et al., 2023; Jiang et al., 2023a; Li", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 466, + 506, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 506, + 481 + ], + "score": 1.0, + "content": "et al., 2023; Jiang et al., 2023b). Our learning procedure is also notable for its computational", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 477, + 506, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 477, + 506, + 492 + ], + "score": 1.0, + "content": "efficiency, using a gradient-free approach to obtain the coefficients of LoRA modules and", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 488, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 505, + 502 + ], + "score": 1.0, + "content": "requiring only a handful of inference steps for unseen tasks. For example, when applied", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 499, + 507, + 515 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 507, + 515 + ], + "score": 1.0, + "content": "to a new task in BBH, our methodology can deliver superior performance in less than a", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 511, + 253, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 253, + 524 + ], + "score": 1.0, + "content": "minute using a single A100 card.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 30 + }, + { + "type": "text", + "bbox": [ + 107, + 527, + 505, + 660 + ], + "lines": [ + { + "bbox": [ + 106, + 527, + 505, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 527, + 505, + 541 + ], + "score": 1.0, + "content": "Importantly, LoraHub learning can feasibly be accomplished with a CPU-only machine,", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 538, + 506, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 538, + 506, + 552 + ], + "score": 1.0, + "content": "requiring proficiency solely for processing LLM inference. In our pursuit to democratize", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 549, + 505, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 549, + 505, + 563 + ], + "score": 1.0, + "content": "artificial intelligence, we are taking an important step forward by envisioning the establish-", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "score": 1.0, + "content": "ment of the LoRA platform. The platform would serve as a marketplace where users can", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 571, + 505, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 505, + 585 + ], + "score": 1.0, + "content": "seamlessly share and access well-trained LoRA modules for diverse applications. LoRA", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 104, + 582, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 596 + ], + "score": 1.0, + "content": "providers have the flexibility to freely share or sell their modules on the platform without", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 593, + 506, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 593, + 506, + 607 + ], + "score": 1.0, + "content": "compromising data privacy. Users, equipped with CPU capability, can leverage trained", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 604, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 505, + 617 + ], + "score": 1.0, + "content": "LoRA modules contributed by others through automated distribution and composition al-", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 615, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 506, + 629 + ], + "score": 1.0, + "content": "gorithms. This platform not only cultivates a repository of reusable LoRA modules with a", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 104, + 625, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 625, + 506, + 640 + ], + "score": 1.0, + "content": "myriad of capabilities but also sets the stage for cooperative AI development. It empow-", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 637, + 506, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 506, + 652 + ], + "score": 1.0, + "content": "ers the community to collectively enrich the LLM’s capabilities through dynamic LoRA", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 648, + 167, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 648, + 167, + 662 + ], + "score": 1.0, + "content": "composition.", + "type": "text" + } + ], + "index": 50 + } + ], + "index": 44.5 + }, + { + "type": "title", + "bbox": [ + 108, + 681, + 231, + 694 + ], + "lines": [ + { + "bbox": [ + 104, + 679, + 232, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 679, + 232, + 696 + ], + "score": 1.0, + "content": "2 Problem Statement", + "type": "text" + } + ], + "index": 51 + } + ], + "index": 51 + }, + { + "type": "text", + "bbox": [ + 107, + 709, + 503, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 405, + 723 + ], + "score": 1.0, + "content": "Large Language Models We assume that a large language model", + "type": "text" + }, + { + "bbox": [ + 406, + 710, + 421, + 721 + ], + "score": 0.88, + "content": "M _ { \\theta }", + "type": "inline_equation" + }, + { + "bbox": [ + 421, + 708, + 505, + 723 + ], + "score": 1.0, + "content": "is based on Trans-", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 105, + 719, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 505, + 733 + ], + "score": 1.0, + "content": "former architecture (Vaswani et al., 2017) and has been pre-trained on a large-scale text cor-", + "type": "text" + } + ], + "index": 53 + } + ], + "index": 52.5 + } + ], + "page_idx": 1, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 302, + 750, + 310, + 763 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 310, + 763 + ], + "score": 1.0, + "content": "2", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 504, + 160 + ], + "lines": [], + "index": 3, + "bbox_fs": [ + 105, + 81, + 506, + 163 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 107, + 165, + 505, + 330 + ], + "lines": [ + { + "bbox": [ + 105, + 164, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 164, + 506, + 178 + ], + "score": 1.0, + "content": "While prior research has targeted the efficiency enhancement facilitated by LoRA, there is a", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 175, + 505, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 505, + 190 + ], + "score": 1.0, + "content": "dearth of investigation into the inherent modularity and composability of LoRA modules.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 187, + 505, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 505, + 200 + ], + "score": 1.0, + "content": "Typically, previous methods train LoRA modules to specialize in individual tasks. Yet, the", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 198, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 506, + 211 + ], + "score": 1.0, + "content": "intrinsic modularity of LoRA modules presents an intriguing research question: Would it", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 209, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 105, + 209, + 506, + 222 + ], + "score": 1.0, + "content": "be possible to compose LoRA modules to generalize to novel tasks in an efficient manner?", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 218, + 506, + 234 + ], + "spans": [ + { + "bbox": [ + 104, + 218, + 506, + 234 + ], + "score": 1.0, + "content": "In this paper, we tap into the potential of LoRA modularity for broad task generalization,", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 230, + 506, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 230, + 506, + 244 + ], + "score": 1.0, + "content": "going beyond single-task training to meticulously compose LoRA modules for malleable", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 242, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 242, + 506, + 255 + ], + "score": 1.0, + "content": "performance on unknown tasks. Crucially, our method enables an automatic assembling of", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 252, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 506, + 266 + ], + "score": 1.0, + "content": "LoRA modules, eliminating dependency on manual design or human expertise. With just", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 263, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 263, + 506, + 277 + ], + "score": 1.0, + "content": "a handful of examples from new tasks (e.g., 5), our approach can autonomously compose", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 274, + 506, + 288 + ], + "spans": [ + { + "bbox": [ + 105, + 274, + 506, + 288 + ], + "score": 1.0, + "content": "compatible LoRA modules without human intrusion. We do not make assumptions about", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 284, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 506, + 300 + ], + "score": 1.0, + "content": "which LoRA modules trained on particular tasks can be combined, allowing for flexibility", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 296, + 505, + 310 + ], + "spans": [ + { + "bbox": [ + 105, + 296, + 505, + 310 + ], + "score": 1.0, + "content": "in amalgamating any modules as long as they conform to the specification (e.g., using the", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "score": 1.0, + "content": "same LLM). As our approach leverages several available LoRA modules, we refer to it as", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 317, + 393, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 393, + 333 + ], + "score": 1.0, + "content": "LoraHub and denote our learning method as LoraHub learning.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 14, + "bbox_fs": [ + 104, + 164, + 506, + 333 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 336, + 505, + 522 + ], + "lines": [ + { + "bbox": [ + 105, + 334, + 505, + 349 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 505, + 349 + ], + "score": 1.0, + "content": "To validate the efficiency of our proposed methods, we test our approaches using the", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 345, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 345, + 506, + 360 + ], + "score": 1.0, + "content": "widely recognized BBH benchmark with FLAN-T5 (Chung et al., 2022) serving as the", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 356, + 505, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 505, + 370 + ], + "score": 1.0, + "content": "base LLM. The results underline the effectiveness of the LoRA module composition for", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 366, + 505, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 505, + 383 + ], + "score": 1.0, + "content": "unfamiliar tasks through a few-shot LoraHub learning process. Notably, our methodology", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 378, + 506, + 394 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 506, + 394 + ], + "score": 1.0, + "content": "achieves an average performance that closely matches that of few-shot in-context learning,", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 390, + 505, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 505, + 404 + ], + "score": 1.0, + "content": "while demonstrating a superior upper bound, particularly when using different demon-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 401, + 505, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 505, + 414 + ], + "score": 1.0, + "content": "stration examples. Additionally, our method substantially reduces the inference cost com-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 411, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 505, + 426 + ], + "score": 1.0, + "content": "pared to in-context learning, eliminating the requirement of examples as inputs for the", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 421, + 506, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 421, + 506, + 438 + ], + "score": 1.0, + "content": "LLM. With fewer tokens per example during inference, our method significantly reduces", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 433, + 505, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 433, + 505, + 447 + ], + "score": 1.0, + "content": "computational overhead and enables faster responses. It aligns with a broader research", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 444, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 444, + 505, + 459 + ], + "score": 1.0, + "content": "trend, where recent studies are actively exploring approaches to reduce the number of in-", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 104, + 455, + 506, + 470 + ], + "spans": [ + { + "bbox": [ + 104, + 455, + 506, + 470 + ], + "score": 1.0, + "content": "put tokens (Zhou et al., 2023; Ge et al., 2023; Chevalier et al., 2023; Jiang et al., 2023a; Li", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 466, + 506, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 506, + 481 + ], + "score": 1.0, + "content": "et al., 2023; Jiang et al., 2023b). Our learning procedure is also notable for its computational", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 477, + 506, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 477, + 506, + 492 + ], + "score": 1.0, + "content": "efficiency, using a gradient-free approach to obtain the coefficients of LoRA modules and", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 488, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 505, + 502 + ], + "score": 1.0, + "content": "requiring only a handful of inference steps for unseen tasks. For example, when applied", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 499, + 507, + 515 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 507, + 515 + ], + "score": 1.0, + "content": "to a new task in BBH, our methodology can deliver superior performance in less than a", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 511, + 253, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 253, + 524 + ], + "score": 1.0, + "content": "minute using a single A100 card.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 30, + "bbox_fs": [ + 104, + 334, + 507, + 524 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 527, + 505, + 660 + ], + "lines": [ + { + "bbox": [ + 106, + 527, + 505, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 527, + 505, + 541 + ], + "score": 1.0, + "content": "Importantly, LoraHub learning can feasibly be accomplished with a CPU-only machine,", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 538, + 506, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 538, + 506, + 552 + ], + "score": 1.0, + "content": "requiring proficiency solely for processing LLM inference. In our pursuit to democratize", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 549, + 505, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 549, + 505, + 563 + ], + "score": 1.0, + "content": "artificial intelligence, we are taking an important step forward by envisioning the establish-", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "score": 1.0, + "content": "ment of the LoRA platform. The platform would serve as a marketplace where users can", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 571, + 505, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 505, + 585 + ], + "score": 1.0, + "content": "seamlessly share and access well-trained LoRA modules for diverse applications. LoRA", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 104, + 582, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 596 + ], + "score": 1.0, + "content": "providers have the flexibility to freely share or sell their modules on the platform without", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 593, + 506, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 593, + 506, + 607 + ], + "score": 1.0, + "content": "compromising data privacy. Users, equipped with CPU capability, can leverage trained", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 604, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 505, + 617 + ], + "score": 1.0, + "content": "LoRA modules contributed by others through automated distribution and composition al-", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 615, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 506, + 629 + ], + "score": 1.0, + "content": "gorithms. This platform not only cultivates a repository of reusable LoRA modules with a", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 104, + 625, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 625, + 506, + 640 + ], + "score": 1.0, + "content": "myriad of capabilities but also sets the stage for cooperative AI development. It empow-", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 637, + 506, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 506, + 652 + ], + "score": 1.0, + "content": "ers the community to collectively enrich the LLM’s capabilities through dynamic LoRA", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 648, + 167, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 648, + 167, + 662 + ], + "score": 1.0, + "content": "composition.", + "type": "text" + } + ], + "index": 50 + } + ], + "index": 44.5, + "bbox_fs": [ + 104, + 527, + 506, + 662 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 681, + 231, + 694 + ], + "lines": [ + { + "bbox": [ + 104, + 679, + 232, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 679, + 232, + 696 + ], + "score": 1.0, + "content": "2 Problem Statement", + "type": "text" + } + ], + "index": 51 + } + ], + "index": 51 + }, + { + "type": "text", + "bbox": [ + 107, + 709, + 503, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 405, + 723 + ], + "score": 1.0, + "content": "Large Language Models We assume that a large language model", + "type": "text" + }, + { + "bbox": [ + 406, + 710, + 421, + 721 + ], + "score": 0.88, + "content": "M _ { \\theta }", + "type": "inline_equation" + }, + { + "bbox": [ + 421, + 708, + 505, + 723 + ], + "score": 1.0, + "content": "is based on Trans-", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 105, + 719, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 505, + 733 + ], + "score": 1.0, + "content": "former architecture (Vaswani et al., 2017) and has been pre-trained on a large-scale text cor-", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 105, + 81, + 506, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 506, + 95 + ], + "score": 1.0, + "content": "pus. The model architecture can be either encoder-decoder (Raffel et al., 2020) or decoder-", + "type": "text", + "cross_page": true + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 93, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 253, + 106 + ], + "score": 1.0, + "content": "only (Brown et al., 2020). Also,", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 253, + 94, + 269, + 105 + ], + "score": 0.86, + "content": "M _ { \\theta }", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 269, + 93, + 506, + 106 + ], + "score": 1.0, + "content": "could also have been fine-tuned with a large set of", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 104, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 104, + 506, + 118 + ], + "score": 1.0, + "content": "instruction-following datasets such as Flan Colleciton (Longpre et al., 2023) and Prompt-", + "type": "text", + "cross_page": true + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 115, + 219, + 127 + ], + "spans": [ + { + "bbox": [ + 106, + 115, + 219, + 127 + ], + "score": 1.0, + "content": "Source (Bach et al., 2022).", + "type": "text", + "cross_page": true + } + ], + "index": 3 + } + ], + "index": 52.5, + "bbox_fs": [ + 105, + 708, + 505, + 733 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 504, + 127 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 506, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 506, + 95 + ], + "score": 1.0, + "content": "pus. The model architecture can be either encoder-decoder (Raffel et al., 2020) or decoder-", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 93, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 253, + 106 + ], + "score": 1.0, + "content": "only (Brown et al., 2020). Also,", + "type": "text" + }, + { + "bbox": [ + 253, + 94, + 269, + 105 + ], + "score": 0.86, + "content": "M _ { \\theta }", + "type": "inline_equation" + }, + { + "bbox": [ + 269, + 93, + 506, + 106 + ], + "score": 1.0, + "content": "could also have been fine-tuned with a large set of", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 104, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 104, + 506, + 118 + ], + "score": 1.0, + "content": "instruction-following datasets such as Flan Colleciton (Longpre et al., 2023) and Prompt-", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 115, + 219, + 127 + ], + "spans": [ + { + "bbox": [ + 106, + 115, + 219, + 127 + ], + "score": 1.0, + "content": "Source (Bach et al., 2022).", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 1.5 + }, + { + "type": "text", + "bbox": [ + 106, + 139, + 505, + 273 + ], + "lines": [ + { + "bbox": [ + 106, + 138, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 505, + 152 + ], + "score": 1.0, + "content": "Cross-Task Generalization In real-world situations, users often desire an LLM to per-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 150, + 505, + 163 + ], + "spans": [ + { + "bbox": [ + 106, + 150, + 505, + 163 + ], + "score": 1.0, + "content": "form novel tasks that it has not encountered before β€” an ability widely known as cross-task", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 161, + 505, + 174 + ], + "spans": [ + { + "bbox": [ + 104, + 161, + 505, + 174 + ], + "score": 1.0, + "content": "generalization. Generally, cross-task generalization falls into two categories: zero-shot learn-", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 172, + 505, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 505, + 185 + ], + "score": 1.0, + "content": "ing (Mishra et al., 2022; Sanh et al., 2022; Chung et al., 2022; OpenAI, 2022; Lin et al., 2022),", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 182, + 505, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 505, + 196 + ], + "score": 1.0, + "content": "which necessitates no labeled examples of the new task, and few-shot learning (Ye et al.,", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 193, + 506, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 506, + 208 + ], + "score": 1.0, + "content": "2021; Min et al., 2022) which demands a handful of labeled examples. Assume we have", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 204, + 506, + 219 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 117, + 216 + ], + "score": 0.59, + "content": "N", + "type": "inline_equation" + }, + { + "bbox": [ + 117, + 204, + 426, + 219 + ], + "score": 1.0, + "content": "distinct upstream tasks that the LLM has been trained on, denoted as", + "type": "text" + }, + { + "bbox": [ + 426, + 205, + 501, + 217 + ], + "score": 0.9, + "content": "\\mathbb { T } = \\{ \\mathcal { T } _ { 1 } , . . . , \\mathcal { T } _ { N } \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 501, + 204, + 506, + 219 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 216, + 505, + 229 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 468, + 229 + ], + "score": 1.0, + "content": "Our paper primarily focuses on the latter category, where for an unseen target task", + "type": "text" + }, + { + "bbox": [ + 469, + 217, + 502, + 227 + ], + "score": 0.84, + "content": "\\mathcal { T } ^ { \\prime } \\notin \\mathbb { T } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 502, + 216, + 505, + 229 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 227, + 505, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 227, + 505, + 240 + ], + "score": 1.0, + "content": "users can only provide a limited set of labeled examples, Q. Our aim is to modify the model", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 237, + 506, + 252 + ], + "spans": [ + { + "bbox": [ + 106, + 238, + 122, + 250 + ], + "score": 0.89, + "content": "M _ { \\theta }", + "type": "inline_equation" + }, + { + "bbox": [ + 122, + 237, + 208, + 252 + ], + "score": 1.0, + "content": "to adapt it to task", + "type": "text" + }, + { + "bbox": [ + 209, + 238, + 221, + 249 + ], + "score": 0.86, + "content": "\\tau ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 222, + 237, + 274, + 252 + ], + "score": 1.0, + "content": "using only", + "type": "text" + }, + { + "bbox": [ + 275, + 238, + 284, + 250 + ], + "score": 0.36, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 285, + 237, + 506, + 252 + ], + "score": 1.0, + "content": ". An intuitive method would be to fine-tune the", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 248, + 505, + 263 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 155, + 263 + ], + "score": 1.0, + "content": "weights of", + "type": "text" + }, + { + "bbox": [ + 155, + 249, + 170, + 261 + ], + "score": 0.89, + "content": "{ \\mathrm { { \\dot { M } } } } _ { \\theta }", + "type": "inline_equation" + }, + { + "bbox": [ + 171, + 248, + 213, + 263 + ], + "score": 1.0, + "content": "based on", + "type": "text" + }, + { + "bbox": [ + 213, + 249, + 225, + 261 + ], + "score": 0.64, + "content": "Q ,", + "type": "inline_equation" + }, + { + "bbox": [ + 225, + 248, + 348, + 263 + ], + "score": 1.0, + "content": "yielding an updated model", + "type": "text" + }, + { + "bbox": [ + 348, + 249, + 364, + 262 + ], + "score": 0.92, + "content": "M _ { \\phi }", + "type": "inline_equation" + }, + { + "bbox": [ + 365, + 248, + 505, + 263 + ], + "score": 1.0, + "content": "with enhanced performance on", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 260, + 503, + 275 + ], + "spans": [ + { + "bbox": [ + 106, + 261, + 119, + 272 + ], + "score": 0.83, + "content": "\\tau ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 119, + 260, + 453, + 275 + ], + "score": 1.0, + "content": ". However, this approach is inefficient, time-consuming, and unstable when", + "type": "text" + }, + { + "bbox": [ + 453, + 262, + 463, + 273 + ], + "score": 0.53, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 463, + 260, + 503, + 275 + ], + "score": 1.0, + "content": "is small.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 9.5 + }, + { + "type": "text", + "bbox": [ + 107, + 285, + 505, + 396 + ], + "lines": [ + { + "bbox": [ + 105, + 285, + 505, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 505, + 298 + ], + "score": 1.0, + "content": "LoRA Tuning LoRA is a parameter-efficient fine-tuning method (Hu et al., 2022), facil-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 297, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 505, + 309 + ], + "score": 1.0, + "content": "itates the adaptation of LLMs using lightweight modules, eliminating the need for fine-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "score": 1.0, + "content": "tuning the entire weights. LoRA tuning involves keeping the original model weights", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 317, + 506, + 332 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 506, + 332 + ], + "score": 1.0, + "content": "frozen while introducing trainable low-rank decomposition matrices as adapter modules", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 328, + 506, + 343 + ], + "spans": [ + { + "bbox": [ + 105, + 328, + 506, + 343 + ], + "score": 1.0, + "content": "into each layer of the model. Compared to the base LLM, this module possesses signif-", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 340, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 506, + 354 + ], + "score": 1.0, + "content": "icantly fewer trainable parameters, paving the way for rapid adaptation using minimal", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 351, + 506, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 506, + 365 + ], + "score": 1.0, + "content": "examples. As such, LoRA tuning presents a resource-efficient technique to quickly adapt", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 361, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 505, + 376 + ], + "score": 1.0, + "content": "LLMs for new tasks with restricted training data. However, traditional LoRA methods pri-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 373, + 506, + 387 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 506, + 387 + ], + "score": 1.0, + "content": "marily concentrate on training and testing within the same tasks (Gema et al., 2023), rather", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 385, + 349, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 349, + 397 + ], + "score": 1.0, + "content": "than venturing into few-shot cross-task generalization.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 20.5 + }, + { + "type": "title", + "bbox": [ + 107, + 412, + 200, + 426 + ], + "lines": [ + { + "bbox": [ + 103, + 409, + 201, + 430 + ], + "spans": [ + { + "bbox": [ + 103, + 409, + 201, + 430 + ], + "score": 1.0, + "content": "3 Methodology", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 107, + 438, + 503, + 471 + ], + "lines": [ + { + "bbox": [ + 105, + 438, + 505, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 438, + 505, + 451 + ], + "score": 1.0, + "content": "In this section, we provide an overview of our proposed method. We then explain the", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 448, + 505, + 462 + ], + "spans": [ + { + "bbox": [ + 105, + 448, + 505, + 462 + ], + "score": 1.0, + "content": "LoRA tuning procedure in detail. Last, we introduce the procedure of our LoraHub learn-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 459, + 391, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 459, + 391, + 475 + ], + "score": 1.0, + "content": "ing, which consists of the COMPOSE stage and the ADAPT stage.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28 + }, + { + "type": "title", + "bbox": [ + 107, + 485, + 213, + 497 + ], + "lines": [ + { + "bbox": [ + 105, + 484, + 214, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 214, + 498 + ], + "score": 1.0, + "content": "3.1 Method Overview", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 30 + }, + { + "type": "text", + "bbox": [ + 107, + 506, + 505, + 673 + ], + "lines": [ + { + "bbox": [ + 105, + 505, + 504, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 504, + 519 + ], + "score": 1.0, + "content": "As depicted in Figure 2, we initially train LoRA modules on a variety of upstream tasks.", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 517, + 505, + 531 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 175, + 531 + ], + "score": 1.0, + "content": "Specifically, for", + "type": "text" + }, + { + "bbox": [ + 176, + 518, + 186, + 527 + ], + "score": 0.72, + "content": "N", + "type": "inline_equation" + }, + { + "bbox": [ + 186, + 517, + 379, + 531 + ], + "score": 1.0, + "content": "distinct upstream tasks, we separately train", + "type": "text" + }, + { + "bbox": [ + 380, + 518, + 390, + 527 + ], + "score": 0.75, + "content": "N", + "type": "inline_equation" + }, + { + "bbox": [ + 390, + 517, + 505, + 531 + ], + "score": 1.0, + "content": "LoRA modules, each rep-", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 527, + 505, + 541 + ], + "spans": [ + { + "bbox": [ + 105, + 527, + 158, + 541 + ], + "score": 1.0, + "content": "resented as", + "type": "text" + }, + { + "bbox": [ + 159, + 529, + 171, + 540 + ], + "score": 0.87, + "content": "m _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 172, + 527, + 209, + 541 + ], + "score": 1.0, + "content": "for task", + "type": "text" + }, + { + "bbox": [ + 209, + 528, + 240, + 539 + ], + "score": 0.92, + "content": "\\mathscr { T } _ { i } \\in \\mathbf { \\hat { T } }", + "type": "inline_equation" + }, + { + "bbox": [ + 241, + 527, + 376, + 541 + ], + "score": 1.0, + "content": ". Subsequently, for a new task", + "type": "text" + }, + { + "bbox": [ + 376, + 528, + 411, + 539 + ], + "score": 0.89, + "content": "\\mathcal { T } ^ { \\prime } \\notin \\mathbb { T } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 411, + 527, + 505, + 541 + ], + "score": 1.0, + "content": ", such as Boolean Ex-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 538, + 506, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 538, + 312, + 554 + ], + "score": 1.0, + "content": "pressions represented in Figure 2, its examples", + "type": "text" + }, + { + "bbox": [ + 312, + 540, + 322, + 550 + ], + "score": 0.61, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 538, + 506, + 554 + ], + "score": 1.0, + "content": "are utilized to steer the LoraHub learning", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 550, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 506, + 564 + ], + "score": 1.0, + "content": "process. The LoraHub learning encapsulates two main phases: the COMPOSE phase and", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "score": 1.0, + "content": "the ADAPT phase. In the COMPOSE phase, all available LoRA modules are combined into", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 571, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 230, + 586 + ], + "score": 1.0, + "content": "a single integrated module", + "type": "text" + }, + { + "bbox": [ + 230, + 572, + 240, + 583 + ], + "score": 0.66, + "content": "\\hat { m } _ { - }", + "type": "inline_equation" + }, + { + "bbox": [ + 240, + 571, + 272, + 586 + ], + "score": 1.0, + "content": ", using", + "type": "text" + }, + { + "bbox": [ + 273, + 572, + 348, + 584 + ], + "score": 0.92, + "content": "\\left\\{ w _ { 1 } , w _ { 2 } , \\dots , w _ { N } \\right\\}", + "type": "inline_equation" + }, + { + "bbox": [ + 348, + 571, + 444, + 586 + ], + "score": 1.0, + "content": "as coefficients. Each", + "type": "text" + }, + { + "bbox": [ + 444, + 573, + 456, + 583 + ], + "score": 0.85, + "content": "w _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 457, + 571, + 506, + 586 + ], + "score": 1.0, + "content": "is a scalar", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 583, + 505, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 505, + 596 + ], + "score": 1.0, + "content": "value that can take on positive or negative values, and the combination can be done in", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 594, + 505, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 421, + 606 + ], + "score": 1.0, + "content": "different ways. During the ADAPT phase, the combined LoRA module", + "type": "text" + }, + { + "bbox": [ + 422, + 594, + 432, + 604 + ], + "score": 0.7, + "content": "\\hat { m }", + "type": "inline_equation" + }, + { + "bbox": [ + 432, + 594, + 505, + 606 + ], + "score": 1.0, + "content": "is amalgamated", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 604, + 506, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 171, + 618 + ], + "score": 1.0, + "content": "with the LLM", + "type": "text" + }, + { + "bbox": [ + 171, + 605, + 186, + 617 + ], + "score": 0.88, + "content": "M _ { \\theta }", + "type": "inline_equation" + }, + { + "bbox": [ + 187, + 604, + 465, + 618 + ], + "score": 1.0, + "content": ", and its performance on few-shot examples from the new task", + "type": "text" + }, + { + "bbox": [ + 466, + 605, + 478, + 615 + ], + "score": 0.87, + "content": "\\mathbf { \\breve { { \\mathbf { \\nabla } } } } _ { \\mathbf { \\mathbf { \\mathbf { \\mathbf { \\mathcal { T } } } } } ^ { \\prime } }", + "type": "inline_equation" + }, + { + "bbox": [ + 479, + 604, + 506, + 618 + ], + "score": 1.0, + "content": "is as-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 615, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 424, + 629 + ], + "score": 1.0, + "content": "sessed. A gradient-free algorithm is subsequently deployed to update", + "type": "text" + }, + { + "bbox": [ + 425, + 618, + 434, + 627 + ], + "score": 0.61, + "content": "w _ { . }", + "type": "inline_equation" + }, + { + "bbox": [ + 434, + 615, + 506, + 629 + ], + "score": 1.0, + "content": ", enhancing mΛ† ’s", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 626, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 336, + 640 + ], + "score": 1.0, + "content": "performance (e.g., loss) on the few-shot examples", + "type": "text" + }, + { + "bbox": [ + 336, + 627, + 346, + 639 + ], + "score": 0.58, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 346, + 626, + 495, + 640 + ], + "score": 1.0, + "content": ". Finally, after iterating through", + "type": "text" + }, + { + "bbox": [ + 495, + 627, + 504, + 637 + ], + "score": 0.55, + "content": "K", + "type": "inline_equation" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 636, + 506, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 408, + 652 + ], + "score": 1.0, + "content": "steps, the optimum performing LoRA module is applied to the LLM", + "type": "text" + }, + { + "bbox": [ + 409, + 638, + 424, + 649 + ], + "score": 0.87, + "content": "M _ { \\theta }", + "type": "inline_equation" + }, + { + "bbox": [ + 424, + 636, + 506, + 652 + ], + "score": 1.0, + "content": ", yielding the final", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 104, + 647, + 506, + 663 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 131, + 663 + ], + "score": 1.0, + "content": "LLM", + "type": "text" + }, + { + "bbox": [ + 131, + 649, + 221, + 662 + ], + "score": 0.91, + "content": "M _ { \\phi } = \\mathrm { L o R A } ( \\hat { M } _ { \\theta } , \\hat { m } )", + "type": "inline_equation" + }, + { + "bbox": [ + 222, + 647, + 506, + 663 + ], + "score": 1.0, + "content": ". This serves as an effectively adjusted model for the unseen task", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 106, + 660, + 371, + 675 + ], + "spans": [ + { + "bbox": [ + 106, + 661, + 119, + 672 + ], + "score": 0.82, + "content": "\\tau ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 119, + 660, + 371, + 675 + ], + "score": 1.0, + "content": ", which will then be deployed and not updated anymore.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 38 + }, + { + "type": "title", + "bbox": [ + 108, + 686, + 274, + 699 + ], + "lines": [ + { + "bbox": [ + 105, + 685, + 276, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 276, + 701 + ], + "score": 1.0, + "content": "3.2 LoRA tuning on upstream tasks", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 46 + }, + { + "type": "text", + "bbox": [ + 108, + 707, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 705, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 105, + 705, + 506, + 721 + ], + "score": 1.0, + "content": "LoRA effectively minimizes the number of trainable parameters through the process of", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 718, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 718, + 432, + 733 + ], + "score": 1.0, + "content": "decomposing the attention weight matrix update of the LLM, denoted as", + "type": "text" + }, + { + "bbox": [ + 432, + 719, + 482, + 732 + ], + "score": 0.92, + "content": "W _ { 0 } \\in \\bar { R } ^ { d \\times k } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 482, + 718, + 506, + 733 + ], + "score": 1.0, + "content": ", into", + "type": "text" + } + ], + "index": 48 + } + ], + "index": 47.5 + } + ], + "page_idx": 2, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 762 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 762 + ], + "score": 1.0, + "content": "3", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 504, + 127 + ], + "lines": [], + "index": 1.5, + "bbox_fs": [ + 105, + 81, + 506, + 127 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 106, + 139, + 505, + 273 + ], + "lines": [ + { + "bbox": [ + 106, + 138, + 505, + 152 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 505, + 152 + ], + "score": 1.0, + "content": "Cross-Task Generalization In real-world situations, users often desire an LLM to per-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 150, + 505, + 163 + ], + "spans": [ + { + "bbox": [ + 106, + 150, + 505, + 163 + ], + "score": 1.0, + "content": "form novel tasks that it has not encountered before β€” an ability widely known as cross-task", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 161, + 505, + 174 + ], + "spans": [ + { + "bbox": [ + 104, + 161, + 505, + 174 + ], + "score": 1.0, + "content": "generalization. Generally, cross-task generalization falls into two categories: zero-shot learn-", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 172, + 505, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 505, + 185 + ], + "score": 1.0, + "content": "ing (Mishra et al., 2022; Sanh et al., 2022; Chung et al., 2022; OpenAI, 2022; Lin et al., 2022),", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 182, + 505, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 505, + 196 + ], + "score": 1.0, + "content": "which necessitates no labeled examples of the new task, and few-shot learning (Ye et al.,", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 193, + 506, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 506, + 208 + ], + "score": 1.0, + "content": "2021; Min et al., 2022) which demands a handful of labeled examples. Assume we have", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 204, + 506, + 219 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 117, + 216 + ], + "score": 0.59, + "content": "N", + "type": "inline_equation" + }, + { + "bbox": [ + 117, + 204, + 426, + 219 + ], + "score": 1.0, + "content": "distinct upstream tasks that the LLM has been trained on, denoted as", + "type": "text" + }, + { + "bbox": [ + 426, + 205, + 501, + 217 + ], + "score": 0.9, + "content": "\\mathbb { T } = \\{ \\mathcal { T } _ { 1 } , . . . , \\mathcal { T } _ { N } \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 501, + 204, + 506, + 219 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 216, + 505, + 229 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 468, + 229 + ], + "score": 1.0, + "content": "Our paper primarily focuses on the latter category, where for an unseen target task", + "type": "text" + }, + { + "bbox": [ + 469, + 217, + 502, + 227 + ], + "score": 0.84, + "content": "\\mathcal { T } ^ { \\prime } \\notin \\mathbb { T } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 502, + 216, + 505, + 229 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 227, + 505, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 227, + 505, + 240 + ], + "score": 1.0, + "content": "users can only provide a limited set of labeled examples, Q. Our aim is to modify the model", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 237, + 506, + 252 + ], + "spans": [ + { + "bbox": [ + 106, + 238, + 122, + 250 + ], + "score": 0.89, + "content": "M _ { \\theta }", + "type": "inline_equation" + }, + { + "bbox": [ + 122, + 237, + 208, + 252 + ], + "score": 1.0, + "content": "to adapt it to task", + "type": "text" + }, + { + "bbox": [ + 209, + 238, + 221, + 249 + ], + "score": 0.86, + "content": "\\tau ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 222, + 237, + 274, + 252 + ], + "score": 1.0, + "content": "using only", + "type": "text" + }, + { + "bbox": [ + 275, + 238, + 284, + 250 + ], + "score": 0.36, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 285, + 237, + 506, + 252 + ], + "score": 1.0, + "content": ". An intuitive method would be to fine-tune the", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 248, + 505, + 263 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 155, + 263 + ], + "score": 1.0, + "content": "weights of", + "type": "text" + }, + { + "bbox": [ + 155, + 249, + 170, + 261 + ], + "score": 0.89, + "content": "{ \\mathrm { { \\dot { M } } } } _ { \\theta }", + "type": "inline_equation" + }, + { + "bbox": [ + 171, + 248, + 213, + 263 + ], + "score": 1.0, + "content": "based on", + "type": "text" + }, + { + "bbox": [ + 213, + 249, + 225, + 261 + ], + "score": 0.64, + "content": "Q ,", + "type": "inline_equation" + }, + { + "bbox": [ + 225, + 248, + 348, + 263 + ], + "score": 1.0, + "content": "yielding an updated model", + "type": "text" + }, + { + "bbox": [ + 348, + 249, + 364, + 262 + ], + "score": 0.92, + "content": "M _ { \\phi }", + "type": "inline_equation" + }, + { + "bbox": [ + 365, + 248, + 505, + 263 + ], + "score": 1.0, + "content": "with enhanced performance on", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 260, + 503, + 275 + ], + "spans": [ + { + "bbox": [ + 106, + 261, + 119, + 272 + ], + "score": 0.83, + "content": "\\tau ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 119, + 260, + 453, + 275 + ], + "score": 1.0, + "content": ". However, this approach is inefficient, time-consuming, and unstable when", + "type": "text" + }, + { + "bbox": [ + 453, + 262, + 463, + 273 + ], + "score": 0.53, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 463, + 260, + 503, + 275 + ], + "score": 1.0, + "content": "is small.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 9.5, + "bbox_fs": [ + 104, + 138, + 506, + 275 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 285, + 505, + 396 + ], + "lines": [ + { + "bbox": [ + 105, + 285, + 505, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 505, + 298 + ], + "score": 1.0, + "content": "LoRA Tuning LoRA is a parameter-efficient fine-tuning method (Hu et al., 2022), facil-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 297, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 505, + 309 + ], + "score": 1.0, + "content": "itates the adaptation of LLMs using lightweight modules, eliminating the need for fine-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 506, + 321 + ], + "score": 1.0, + "content": "tuning the entire weights. LoRA tuning involves keeping the original model weights", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 317, + 506, + 332 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 506, + 332 + ], + "score": 1.0, + "content": "frozen while introducing trainable low-rank decomposition matrices as adapter modules", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 328, + 506, + 343 + ], + "spans": [ + { + "bbox": [ + 105, + 328, + 506, + 343 + ], + "score": 1.0, + "content": "into each layer of the model. Compared to the base LLM, this module possesses signif-", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 340, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 506, + 354 + ], + "score": 1.0, + "content": "icantly fewer trainable parameters, paving the way for rapid adaptation using minimal", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 351, + 506, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 506, + 365 + ], + "score": 1.0, + "content": "examples. As such, LoRA tuning presents a resource-efficient technique to quickly adapt", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 361, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 505, + 376 + ], + "score": 1.0, + "content": "LLMs for new tasks with restricted training data. However, traditional LoRA methods pri-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 373, + 506, + 387 + ], + "spans": [ + { + "bbox": [ + 105, + 373, + 506, + 387 + ], + "score": 1.0, + "content": "marily concentrate on training and testing within the same tasks (Gema et al., 2023), rather", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 385, + 349, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 349, + 397 + ], + "score": 1.0, + "content": "than venturing into few-shot cross-task generalization.", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 20.5, + "bbox_fs": [ + 105, + 285, + 506, + 397 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 412, + 200, + 426 + ], + "lines": [ + { + "bbox": [ + 103, + 409, + 201, + 430 + ], + "spans": [ + { + "bbox": [ + 103, + 409, + 201, + 430 + ], + "score": 1.0, + "content": "3 Methodology", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 107, + 438, + 503, + 471 + ], + "lines": [ + { + "bbox": [ + 105, + 438, + 505, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 438, + 505, + 451 + ], + "score": 1.0, + "content": "In this section, we provide an overview of our proposed method. We then explain the", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 448, + 505, + 462 + ], + "spans": [ + { + "bbox": [ + 105, + 448, + 505, + 462 + ], + "score": 1.0, + "content": "LoRA tuning procedure in detail. Last, we introduce the procedure of our LoraHub learn-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 459, + 391, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 459, + 391, + 475 + ], + "score": 1.0, + "content": "ing, which consists of the COMPOSE stage and the ADAPT stage.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28, + "bbox_fs": [ + 105, + 438, + 505, + 475 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 485, + 213, + 497 + ], + "lines": [ + { + "bbox": [ + 105, + 484, + 214, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 214, + 498 + ], + "score": 1.0, + "content": "3.1 Method Overview", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 30 + }, + { + "type": "text", + "bbox": [ + 107, + 506, + 505, + 673 + ], + "lines": [ + { + "bbox": [ + 105, + 505, + 504, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 504, + 519 + ], + "score": 1.0, + "content": "As depicted in Figure 2, we initially train LoRA modules on a variety of upstream tasks.", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 517, + 505, + 531 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 175, + 531 + ], + "score": 1.0, + "content": "Specifically, for", + "type": "text" + }, + { + "bbox": [ + 176, + 518, + 186, + 527 + ], + "score": 0.72, + "content": "N", + "type": "inline_equation" + }, + { + "bbox": [ + 186, + 517, + 379, + 531 + ], + "score": 1.0, + "content": "distinct upstream tasks, we separately train", + "type": "text" + }, + { + "bbox": [ + 380, + 518, + 390, + 527 + ], + "score": 0.75, + "content": "N", + "type": "inline_equation" + }, + { + "bbox": [ + 390, + 517, + 505, + 531 + ], + "score": 1.0, + "content": "LoRA modules, each rep-", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 527, + 505, + 541 + ], + "spans": [ + { + "bbox": [ + 105, + 527, + 158, + 541 + ], + "score": 1.0, + "content": "resented as", + "type": "text" + }, + { + "bbox": [ + 159, + 529, + 171, + 540 + ], + "score": 0.87, + "content": "m _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 172, + 527, + 209, + 541 + ], + "score": 1.0, + "content": "for task", + "type": "text" + }, + { + "bbox": [ + 209, + 528, + 240, + 539 + ], + "score": 0.92, + "content": "\\mathscr { T } _ { i } \\in \\mathbf { \\hat { T } }", + "type": "inline_equation" + }, + { + "bbox": [ + 241, + 527, + 376, + 541 + ], + "score": 1.0, + "content": ". Subsequently, for a new task", + "type": "text" + }, + { + "bbox": [ + 376, + 528, + 411, + 539 + ], + "score": 0.89, + "content": "\\mathcal { T } ^ { \\prime } \\notin \\mathbb { T } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 411, + 527, + 505, + 541 + ], + "score": 1.0, + "content": ", such as Boolean Ex-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 538, + 506, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 538, + 312, + 554 + ], + "score": 1.0, + "content": "pressions represented in Figure 2, its examples", + "type": "text" + }, + { + "bbox": [ + 312, + 540, + 322, + 550 + ], + "score": 0.61, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 538, + 506, + 554 + ], + "score": 1.0, + "content": "are utilized to steer the LoraHub learning", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 550, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 506, + 564 + ], + "score": 1.0, + "content": "process. The LoraHub learning encapsulates two main phases: the COMPOSE phase and", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "score": 1.0, + "content": "the ADAPT phase. In the COMPOSE phase, all available LoRA modules are combined into", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 571, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 230, + 586 + ], + "score": 1.0, + "content": "a single integrated module", + "type": "text" + }, + { + "bbox": [ + 230, + 572, + 240, + 583 + ], + "score": 0.66, + "content": "\\hat { m } _ { - }", + "type": "inline_equation" + }, + { + "bbox": [ + 240, + 571, + 272, + 586 + ], + "score": 1.0, + "content": ", using", + "type": "text" + }, + { + "bbox": [ + 273, + 572, + 348, + 584 + ], + "score": 0.92, + "content": "\\left\\{ w _ { 1 } , w _ { 2 } , \\dots , w _ { N } \\right\\}", + "type": "inline_equation" + }, + { + "bbox": [ + 348, + 571, + 444, + 586 + ], + "score": 1.0, + "content": "as coefficients. Each", + "type": "text" + }, + { + "bbox": [ + 444, + 573, + 456, + 583 + ], + "score": 0.85, + "content": "w _ { i }", + "type": "inline_equation" + }, + { + "bbox": [ + 457, + 571, + 506, + 586 + ], + "score": 1.0, + "content": "is a scalar", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 583, + 505, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 505, + 596 + ], + "score": 1.0, + "content": "value that can take on positive or negative values, and the combination can be done in", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 594, + 505, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 421, + 606 + ], + "score": 1.0, + "content": "different ways. During the ADAPT phase, the combined LoRA module", + "type": "text" + }, + { + "bbox": [ + 422, + 594, + 432, + 604 + ], + "score": 0.7, + "content": "\\hat { m }", + "type": "inline_equation" + }, + { + "bbox": [ + 432, + 594, + 505, + 606 + ], + "score": 1.0, + "content": "is amalgamated", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 604, + 506, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 171, + 618 + ], + "score": 1.0, + "content": "with the LLM", + "type": "text" + }, + { + "bbox": [ + 171, + 605, + 186, + 617 + ], + "score": 0.88, + "content": "M _ { \\theta }", + "type": "inline_equation" + }, + { + "bbox": [ + 187, + 604, + 465, + 618 + ], + "score": 1.0, + "content": ", and its performance on few-shot examples from the new task", + "type": "text" + }, + { + "bbox": [ + 466, + 605, + 478, + 615 + ], + "score": 0.87, + "content": "\\mathbf { \\breve { { \\mathbf { \\nabla } } } } _ { \\mathbf { \\mathbf { \\mathbf { \\mathbf { \\mathcal { T } } } } } ^ { \\prime } }", + "type": "inline_equation" + }, + { + "bbox": [ + 479, + 604, + 506, + 618 + ], + "score": 1.0, + "content": "is as-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 615, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 424, + 629 + ], + "score": 1.0, + "content": "sessed. A gradient-free algorithm is subsequently deployed to update", + "type": "text" + }, + { + "bbox": [ + 425, + 618, + 434, + 627 + ], + "score": 0.61, + "content": "w _ { . }", + "type": "inline_equation" + }, + { + "bbox": [ + 434, + 615, + 506, + 629 + ], + "score": 1.0, + "content": ", enhancing mΛ† ’s", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 626, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 336, + 640 + ], + "score": 1.0, + "content": "performance (e.g., loss) on the few-shot examples", + "type": "text" + }, + { + "bbox": [ + 336, + 627, + 346, + 639 + ], + "score": 0.58, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 346, + 626, + 495, + 640 + ], + "score": 1.0, + "content": ". Finally, after iterating through", + "type": "text" + }, + { + "bbox": [ + 495, + 627, + 504, + 637 + ], + "score": 0.55, + "content": "K", + "type": "inline_equation" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 636, + 506, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 408, + 652 + ], + "score": 1.0, + "content": "steps, the optimum performing LoRA module is applied to the LLM", + "type": "text" + }, + { + "bbox": [ + 409, + 638, + 424, + 649 + ], + "score": 0.87, + "content": "M _ { \\theta }", + "type": "inline_equation" + }, + { + "bbox": [ + 424, + 636, + 506, + 652 + ], + "score": 1.0, + "content": ", yielding the final", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 104, + 647, + 506, + 663 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 131, + 663 + ], + "score": 1.0, + "content": "LLM", + "type": "text" + }, + { + "bbox": [ + 131, + 649, + 221, + 662 + ], + "score": 0.91, + "content": "M _ { \\phi } = \\mathrm { L o R A } ( \\hat { M } _ { \\theta } , \\hat { m } )", + "type": "inline_equation" + }, + { + "bbox": [ + 222, + 647, + 506, + 663 + ], + "score": 1.0, + "content": ". This serves as an effectively adjusted model for the unseen task", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 106, + 660, + 371, + 675 + ], + "spans": [ + { + "bbox": [ + 106, + 661, + 119, + 672 + ], + "score": 0.82, + "content": "\\tau ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 119, + 660, + 371, + 675 + ], + "score": 1.0, + "content": ", which will then be deployed and not updated anymore.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 38, + "bbox_fs": [ + 104, + 505, + 506, + 675 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 686, + 274, + 699 + ], + "lines": [ + { + "bbox": [ + 105, + 685, + 276, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 276, + 701 + ], + "score": 1.0, + "content": "3.2 LoRA tuning on upstream tasks", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 46 + }, + { + "type": "text", + "bbox": [ + 108, + 707, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 705, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 105, + 705, + 506, + 721 + ], + "score": 1.0, + "content": "LoRA effectively minimizes the number of trainable parameters through the process of", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 718, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 718, + 432, + 733 + ], + "score": 1.0, + "content": "decomposing the attention weight matrix update of the LLM, denoted as", + "type": "text" + }, + { + "bbox": [ + 432, + 719, + 482, + 732 + ], + "score": 0.92, + "content": "W _ { 0 } \\in \\bar { R } ^ { d \\times k } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 482, + 718, + 506, + 733 + ], + "score": 1.0, + "content": ", into", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 106, + 302, + 505, + 315 + ], + "spans": [ + { + "bbox": [ + 106, + 302, + 505, + 315 + ], + "score": 1.0, + "content": "low-rank matrices. In more specific terms, LoRA exhibits the updated weight matrix in", + "type": "text", + "cross_page": true + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 312, + 506, + 329 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 149, + 329 + ], + "score": 1.0, + "content": "the form", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 149, + 315, + 250, + 327 + ], + "score": 0.9, + "content": "W _ { 0 } + \\delta W = W _ { 0 } + A B ,", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 251, + 312, + 285, + 329 + ], + "score": 1.0, + "content": "where", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 285, + 314, + 333, + 326 + ], + "score": 0.91, + "content": "A \\in \\mathbb { R } ^ { d \\times r }", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 333, + 312, + 354, + 329 + ], + "score": 1.0, + "content": "and", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 355, + 314, + 401, + 326 + ], + "score": 0.91, + "content": "B \\in \\mathbb { R } ^ { r \\times k }", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 401, + 312, + 506, + 329 + ], + "score": 1.0, + "content": "are trainable low-rank", + "type": "text", + "cross_page": true + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 326, + 505, + 339 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 190, + 339 + ], + "score": 1.0, + "content": "matrices with rank", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 190, + 328, + 198, + 337 + ], + "score": 0.7, + "content": "r ,", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 198, + 326, + 403, + 339 + ], + "score": 1.0, + "content": "a dimension significantly smaller than those of", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 403, + 327, + 410, + 337 + ], + "score": 0.72, + "content": "d", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 410, + 326, + 428, + 339 + ], + "score": 1.0, + "content": "and", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 428, + 327, + 435, + 336 + ], + "score": 0.67, + "content": "k", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 435, + 326, + 505, + 339 + ], + "score": 1.0, + "content": ". In this context,", + "type": "text", + "cross_page": true + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 336, + 506, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 336, + 161, + 351 + ], + "score": 1.0, + "content": "the product", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 161, + 338, + 177, + 348 + ], + "score": 0.48, + "content": "A B", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 177, + 336, + 295, + 351 + ], + "score": 1.0, + "content": "defines the LoRA module", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 295, + 339, + 306, + 348 + ], + "score": 0.56, + "content": "m ,", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 306, + 336, + 506, + 351 + ], + "score": 1.0, + "content": ", as previously elaborated. By leveraging the", + "type": "text", + "cross_page": true + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 347, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 105, + 347, + 505, + 362 + ], + "score": 1.0, + "content": "low-rank decomposition, LoRA substantially reduces the number of trainable parameters", + "type": "text", + "cross_page": true + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 358, + 363, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 363, + 374 + ], + "score": 1.0, + "content": "needed to adapt the weights of LLMs duriing fine-tuning.", + "type": "text", + "cross_page": true + } + ], + "index": 15 + } + ], + "index": 47.5, + "bbox_fs": [ + 105, + 705, + 506, + 733 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 78, + 501, + 196 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 110, + 78, + 501, + 196 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 110, + 78, + 501, + 196 + ], + "spans": [ + { + "bbox": [ + 110, + 78, + 501, + 196 + ], + "score": 0.967, + "type": "image", + "image_path": "fdc28d30d1864590ed2196198df1e30168cf83fc2b25e930c617edf738bdbc3b.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 110, + 78, + 501, + 117.33333333333334 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 110, + 117.33333333333334, + 501, + 156.66666666666669 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 110, + 156.66666666666669, + 501, + 196.00000000000003 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 203, + 505, + 281 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 202, + 505, + 218 + ], + "spans": [ + { + "bbox": [ + 105, + 202, + 505, + 218 + ], + "score": 1.0, + "content": "Figure 2: Our method encompasses two stages: the COMPOSE stage and the ADAPT stage.", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 214, + 506, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 214, + 506, + 229 + ], + "score": 1.0, + "content": "During the COMPOSE stage, existing LoRA modules are integrated into one unified mod-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 226, + 505, + 238 + ], + "spans": [ + { + "bbox": [ + 106, + 226, + 311, + 238 + ], + "score": 1.0, + "content": "ule, employing a set of coefficients, denoted as", + "type": "text" + }, + { + "bbox": [ + 312, + 227, + 321, + 236 + ], + "score": 0.39, + "content": "w", + "type": "inline_equation" + }, + { + "bbox": [ + 321, + 226, + 505, + 238 + ], + "score": 1.0, + "content": ". In the ADAPT stage, the combined LoRA", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 236, + 505, + 249 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 505, + 249 + ], + "score": 1.0, + "content": "module is evaluated on a few examples from the unseen task. Subsequently, a gradient-free", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 247, + 505, + 261 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 238, + 261 + ], + "score": 1.0, + "content": "algorithm is applied to refine", + "type": "text" + }, + { + "bbox": [ + 239, + 250, + 248, + 258 + ], + "score": 0.55, + "content": "w", + "type": "inline_equation" + }, + { + "bbox": [ + 248, + 247, + 324, + 261 + ], + "score": 1.0, + "content": ". After executing", + "type": "text" + }, + { + "bbox": [ + 324, + 248, + 333, + 258 + ], + "score": 0.48, + "content": "K", + "type": "inline_equation" + }, + { + "bbox": [ + 333, + 247, + 505, + 261 + ], + "score": 1.0, + "content": "iterations, a highly adapted combined", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 257, + 505, + 272 + ], + "spans": [ + { + "bbox": [ + 104, + 257, + 505, + 272 + ], + "score": 1.0, + "content": "LoRA module is produced, which can be incorporated with the LLM to perform the in-", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 269, + 163, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 163, + 282 + ], + "score": 1.0, + "content": "tended task.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 6 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 106, + 302, + 505, + 371 + ], + "lines": [ + { + "bbox": [ + 106, + 302, + 505, + 315 + ], + "spans": [ + { + "bbox": [ + 106, + 302, + 505, + 315 + ], + "score": 1.0, + "content": "low-rank matrices. In more specific terms, LoRA exhibits the updated weight matrix in", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 312, + 506, + 329 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 149, + 329 + ], + "score": 1.0, + "content": "the form", + "type": "text" + }, + { + "bbox": [ + 149, + 315, + 250, + 327 + ], + "score": 0.9, + "content": "W _ { 0 } + \\delta W = W _ { 0 } + A B ,", + "type": "inline_equation" + }, + { + "bbox": [ + 251, + 312, + 285, + 329 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 285, + 314, + 333, + 326 + ], + "score": 0.91, + "content": "A \\in \\mathbb { R } ^ { d \\times r }", + "type": "inline_equation" + }, + { + "bbox": [ + 333, + 312, + 354, + 329 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 355, + 314, + 401, + 326 + ], + "score": 0.91, + "content": "B \\in \\mathbb { R } ^ { r \\times k }", + "type": "inline_equation" + }, + { + "bbox": [ + 401, + 312, + 506, + 329 + ], + "score": 1.0, + "content": "are trainable low-rank", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 326, + 505, + 339 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 190, + 339 + ], + "score": 1.0, + "content": "matrices with rank", + "type": "text" + }, + { + "bbox": [ + 190, + 328, + 198, + 337 + ], + "score": 0.7, + "content": "r ,", + "type": "inline_equation" + }, + { + "bbox": [ + 198, + 326, + 403, + 339 + ], + "score": 1.0, + "content": "a dimension significantly smaller than those of", + "type": "text" + }, + { + "bbox": [ + 403, + 327, + 410, + 337 + ], + "score": 0.72, + "content": "d", + "type": "inline_equation" + }, + { + "bbox": [ + 410, + 326, + 428, + 339 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 428, + 327, + 435, + 336 + ], + "score": 0.67, + "content": "k", + "type": "inline_equation" + }, + { + "bbox": [ + 435, + 326, + 505, + 339 + ], + "score": 1.0, + "content": ". In this context,", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 336, + 506, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 336, + 161, + 351 + ], + "score": 1.0, + "content": "the product", + "type": "text" + }, + { + "bbox": [ + 161, + 338, + 177, + 348 + ], + "score": 0.48, + "content": "A B", + "type": "inline_equation" + }, + { + "bbox": [ + 177, + 336, + 295, + 351 + ], + "score": 1.0, + "content": "defines the LoRA module", + "type": "text" + }, + { + "bbox": [ + 295, + 339, + 306, + 348 + ], + "score": 0.56, + "content": "m ,", + "type": "inline_equation" + }, + { + "bbox": [ + 306, + 336, + 506, + 351 + ], + "score": 1.0, + "content": ", as previously elaborated. By leveraging the", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 347, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 105, + 347, + 505, + 362 + ], + "score": 1.0, + "content": "low-rank decomposition, LoRA substantially reduces the number of trainable parameters", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 358, + 363, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 363, + 374 + ], + "score": 1.0, + "content": "needed to adapt the weights of LLMs duriing fine-tuning.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 12.5 + }, + { + "type": "title", + "bbox": [ + 107, + 384, + 386, + 396 + ], + "lines": [ + { + "bbox": [ + 105, + 384, + 387, + 398 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 387, + 398 + ], + "score": 1.0, + "content": "3.3 COMPOSE: Element-wise composition of LoRA modules", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "text", + "bbox": [ + 107, + 405, + 504, + 450 + ], + "lines": [ + { + "bbox": [ + 105, + 404, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 105, + 404, + 506, + 418 + ], + "score": 1.0, + "content": "Within the COMPOSE stage, we implement an element-wise method to combine LoRA", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 415, + 506, + 429 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 506, + 429 + ], + "score": 1.0, + "content": "modules. This process integrates the corresponding parameters of the LoRA modules,", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 427, + 505, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 427, + 378, + 440 + ], + "score": 1.0, + "content": "requiring the modules being combined to have the same rank", + "type": "text" + }, + { + "bbox": [ + 378, + 429, + 384, + 437 + ], + "score": 0.78, + "content": "r", + "type": "inline_equation" + }, + { + "bbox": [ + 385, + 427, + 505, + 440 + ], + "score": 1.0, + "content": "to properly align the struc-", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 437, + 459, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 183, + 451 + ], + "score": 1.0, + "content": "tures. Given that", + "type": "text" + }, + { + "bbox": [ + 183, + 438, + 231, + 450 + ], + "score": 0.92, + "content": "m _ { i } = A _ { i } B _ { i } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 232, + 437, + 360, + 451 + ], + "score": 1.0, + "content": "the combined LoRA module", + "type": "text" + }, + { + "bbox": [ + 360, + 439, + 369, + 448 + ], + "score": 0.56, + "content": "\\hat { m }", + "type": "inline_equation" + }, + { + "bbox": [ + 370, + 437, + 459, + 451 + ], + "score": 1.0, + "content": "can be obtained by:", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 18.5 + }, + { + "type": "interline_equation", + "bbox": [ + 159, + 455, + 452, + 469 + ], + "lines": [ + { + "bbox": [ + 159, + 455, + 452, + 469 + ], + "spans": [ + { + "bbox": [ + 159, + 455, + 452, + 469 + ], + "score": 0.88, + "content": "\\hat { m } = ( w _ { 1 } A _ { 1 } + w _ { 2 } A _ { 2 } + \\cdot \\cdot \\cdot + w _ { N } A _ { N } ) ( w _ { 1 } B _ { 1 } + w _ { 2 } B _ { 2 } + \\cdot \\cdot \\cdot + w _ { N } B _ { N } ) .", + "type": "interline_equation", + "image_path": "622ebc57ca6de9adfd25eb29ad01864fdee3e77678a974073dcd3f51edc1c592.jpg" + } + ] + } + ], + "index": 21, + "virtual_lines": [ + { + "bbox": [ + 159, + 455, + 452, + 469 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 479, + 505, + 535 + ], + "lines": [ + { + "bbox": [ + 105, + 479, + 505, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 479, + 505, + 493 + ], + "score": 1.0, + "content": "Notbly, as we show in Sec. 5, combining too many LoRA modules at once can expand", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 491, + 505, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 505, + 504 + ], + "score": 1.0, + "content": "the search space exponentially, which may destabilize the LoraHub learning process and", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 501, + 506, + 516 + ], + "spans": [ + { + "bbox": [ + 104, + 501, + 506, + 516 + ], + "score": 1.0, + "content": "prevent optimal performance. To mitigate this, we employ random selection to prune the", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 511, + 505, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 505, + 527 + ], + "score": 1.0, + "content": "candidate space, and more advanced pre-filtering algorithms could be explored in the fu-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 523, + 130, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 523, + 130, + 537 + ], + "score": 1.0, + "content": "ture.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 24 + }, + { + "type": "title", + "bbox": [ + 107, + 549, + 383, + 561 + ], + "lines": [ + { + "bbox": [ + 105, + 548, + 384, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 384, + 563 + ], + "score": 1.0, + "content": "3.4 ADAPT: Weight optimization via gradient-free methods", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "text", + "bbox": [ + 107, + 569, + 505, + 658 + ], + "lines": [ + { + "bbox": [ + 105, + 569, + 505, + 583 + ], + "spans": [ + { + "bbox": [ + 105, + 569, + 383, + 583 + ], + "score": 1.0, + "content": "During the ADAPT stage, our goal is to modify the coefficients", + "type": "text" + }, + { + "bbox": [ + 383, + 572, + 392, + 580 + ], + "score": 0.52, + "content": "w", + "type": "inline_equation" + }, + { + "bbox": [ + 392, + 569, + 505, + 583 + ], + "score": 1.0, + "content": "to boost the model’s per-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 581, + 505, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 581, + 505, + 594 + ], + "score": 1.0, + "content": "formace on the examples from an unseen task. One might think of using gradient descent", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 591, + 505, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 591, + 159, + 605 + ], + "score": 1.0, + "content": "to optimize", + "type": "text" + }, + { + "bbox": [ + 159, + 593, + 169, + 603 + ], + "score": 0.63, + "content": "w ,", + "type": "inline_equation" + }, + { + "bbox": [ + 170, + 591, + 505, + 605 + ], + "score": 1.0, + "content": "following standard backpropagation methods. However, this approach de-", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 603, + 505, + 614 + ], + "spans": [ + { + "bbox": [ + 105, + 603, + 505, + 614 + ], + "score": 1.0, + "content": "mands constructing a hypernetwork for all LoRA modules, similar to differentiable archi-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 613, + 505, + 627 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 505, + 627 + ], + "score": 1.0, + "content": "tecture search methods (Zhang et al., 2019). Constructing these hypernetworks demands", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 623, + 505, + 638 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 420, + 638 + ], + "score": 1.0, + "content": "for substantial GPU memory and time, posing a challenge. Given that", + "type": "text" + }, + { + "bbox": [ + 421, + 626, + 430, + 635 + ], + "score": 0.47, + "content": "w", + "type": "inline_equation" + }, + { + "bbox": [ + 430, + 623, + 505, + 638 + ], + "score": 1.0, + "content": "consists of a rel-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 635, + 505, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 505, + 650 + ], + "score": 1.0, + "content": "atively small number of parameters, we opted for gradient-free methods for optimization", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 647, + 230, + 659 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 230, + 659 + ], + "score": 1.0, + "content": "instead of gradient descent.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 31.5 + }, + { + "type": "text", + "bbox": [ + 107, + 663, + 505, + 733 + ], + "lines": [ + { + "bbox": [ + 105, + 662, + 506, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 506, + 677 + ], + "score": 1.0, + "content": "Inspired by previous work (Sun et al., 2022), we utilize a black-box optimization technique", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 672, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 104, + 672, + 190, + 690 + ], + "score": 1.0, + "content": "to find the optimal", + "type": "text" + }, + { + "bbox": [ + 190, + 676, + 199, + 685 + ], + "score": 0.59, + "content": "w", + "type": "inline_equation" + }, + { + "bbox": [ + 199, + 672, + 506, + 690 + ], + "score": 1.0, + "content": ". The optimization process is steered by the cross-entropy loss, setting", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 684, + 506, + 699 + ], + "spans": [ + { + "bbox": [ + 105, + 684, + 245, + 699 + ], + "score": 1.0, + "content": "the goal to locate the best set", + "type": "text" + }, + { + "bbox": [ + 246, + 685, + 321, + 698 + ], + "score": 0.92, + "content": "\\left\\{ w _ { 1 } , w _ { 2 } , \\ldots , w _ { N } \\right\\}", + "type": "inline_equation" + }, + { + "bbox": [ + 321, + 684, + 421, + 699 + ], + "score": 1.0, + "content": "that reduces the loss", + "type": "text" + }, + { + "bbox": [ + 421, + 686, + 429, + 695 + ], + "score": 0.62, + "content": "L", + "type": "inline_equation" + }, + { + "bbox": [ + 429, + 684, + 506, + 699 + ], + "score": 1.0, + "content": "on the few-shot", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 696, + 505, + 709 + ], + "spans": [ + { + "bbox": [ + 105, + 696, + 151, + 709 + ], + "score": 1.0, + "content": "examples", + "type": "text" + }, + { + "bbox": [ + 151, + 697, + 161, + 708 + ], + "score": 0.41, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 162, + 696, + 505, + 709 + ], + "score": 1.0, + "content": ". Furthermore, we incorporate L1 regularization to penalize the sum of the", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 706, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 105, + 706, + 188, + 721 + ], + "score": 1.0, + "content": "absolute values of", + "type": "text" + }, + { + "bbox": [ + 189, + 709, + 198, + 718 + ], + "score": 0.66, + "content": "w _ { . }", + "type": "inline_equation" + }, + { + "bbox": [ + 198, + 706, + 506, + 721 + ], + "score": 1.0, + "content": ", helping to prevent obtaining extreme values. Consequently, the final", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 104, + 716, + 507, + 736 + ], + "spans": [ + { + "bbox": [ + 104, + 716, + 267, + 736 + ], + "score": 1.0, + "content": "objective of LoraHub is to minimize", + "type": "text" + }, + { + "bbox": [ + 267, + 719, + 339, + 733 + ], + "score": 0.93, + "content": "\\begin{array} { r } { L + \\alpha \\cdot \\sum _ { i = 1 } ^ { N } | \\dot { w } _ { i } | , } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 339, + 716, + 371, + 736 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 372, + 722, + 378, + 730 + ], + "score": 0.67, + "content": "\\alpha", + "type": "inline_equation" + }, + { + "bbox": [ + 379, + 716, + 507, + 736 + ], + "score": 1.0, + "content": "serves as a hyperparameter.", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 38.5 + } + ], + "page_idx": 3, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 27, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 12, + "width": 9 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 78, + 501, + 196 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 110, + 78, + 501, + 196 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 110, + 78, + 501, + 196 + ], + "spans": [ + { + "bbox": [ + 110, + 78, + 501, + 196 + ], + "score": 0.967, + "type": "image", + "image_path": "fdc28d30d1864590ed2196198df1e30168cf83fc2b25e930c617edf738bdbc3b.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 110, + 78, + 501, + 117.33333333333334 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 110, + 117.33333333333334, + 501, + 156.66666666666669 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 110, + 156.66666666666669, + 501, + 196.00000000000003 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 203, + 505, + 281 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 202, + 505, + 218 + ], + "spans": [ + { + "bbox": [ + 105, + 202, + 505, + 218 + ], + "score": 1.0, + "content": "Figure 2: Our method encompasses two stages: the COMPOSE stage and the ADAPT stage.", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 214, + 506, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 214, + 506, + 229 + ], + "score": 1.0, + "content": "During the COMPOSE stage, existing LoRA modules are integrated into one unified mod-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 226, + 505, + 238 + ], + "spans": [ + { + "bbox": [ + 106, + 226, + 311, + 238 + ], + "score": 1.0, + "content": "ule, employing a set of coefficients, denoted as", + "type": "text" + }, + { + "bbox": [ + 312, + 227, + 321, + 236 + ], + "score": 0.39, + "content": "w", + "type": "inline_equation" + }, + { + "bbox": [ + 321, + 226, + 505, + 238 + ], + "score": 1.0, + "content": ". In the ADAPT stage, the combined LoRA", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 236, + 505, + 249 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 505, + 249 + ], + "score": 1.0, + "content": "module is evaluated on a few examples from the unseen task. Subsequently, a gradient-free", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 247, + 505, + 261 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 238, + 261 + ], + "score": 1.0, + "content": "algorithm is applied to refine", + "type": "text" + }, + { + "bbox": [ + 239, + 250, + 248, + 258 + ], + "score": 0.55, + "content": "w", + "type": "inline_equation" + }, + { + "bbox": [ + 248, + 247, + 324, + 261 + ], + "score": 1.0, + "content": ". After executing", + "type": "text" + }, + { + "bbox": [ + 324, + 248, + 333, + 258 + ], + "score": 0.48, + "content": "K", + "type": "inline_equation" + }, + { + "bbox": [ + 333, + 247, + 505, + 261 + ], + "score": 1.0, + "content": "iterations, a highly adapted combined", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 257, + 505, + 272 + ], + "spans": [ + { + "bbox": [ + 104, + 257, + 505, + 272 + ], + "score": 1.0, + "content": "LoRA module is produced, which can be incorporated with the LLM to perform the in-", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 269, + 163, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 163, + 282 + ], + "score": 1.0, + "content": "tended task.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 6 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 106, + 302, + 505, + 371 + ], + "lines": [], + "index": 12.5, + "bbox_fs": [ + 104, + 302, + 506, + 374 + ], + "lines_deleted": true + }, + { + "type": "title", + "bbox": [ + 107, + 384, + 386, + 396 + ], + "lines": [ + { + "bbox": [ + 105, + 384, + 387, + 398 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 387, + 398 + ], + "score": 1.0, + "content": "3.3 COMPOSE: Element-wise composition of LoRA modules", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "text", + "bbox": [ + 107, + 405, + 504, + 450 + ], + "lines": [ + { + "bbox": [ + 105, + 404, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 105, + 404, + 506, + 418 + ], + "score": 1.0, + "content": "Within the COMPOSE stage, we implement an element-wise method to combine LoRA", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 415, + 506, + 429 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 506, + 429 + ], + "score": 1.0, + "content": "modules. This process integrates the corresponding parameters of the LoRA modules,", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 427, + 505, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 427, + 378, + 440 + ], + "score": 1.0, + "content": "requiring the modules being combined to have the same rank", + "type": "text" + }, + { + "bbox": [ + 378, + 429, + 384, + 437 + ], + "score": 0.78, + "content": "r", + "type": "inline_equation" + }, + { + "bbox": [ + 385, + 427, + 505, + 440 + ], + "score": 1.0, + "content": "to properly align the struc-", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 437, + 459, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 183, + 451 + ], + "score": 1.0, + "content": "tures. Given that", + "type": "text" + }, + { + "bbox": [ + 183, + 438, + 231, + 450 + ], + "score": 0.92, + "content": "m _ { i } = A _ { i } B _ { i } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 232, + 437, + 360, + 451 + ], + "score": 1.0, + "content": "the combined LoRA module", + "type": "text" + }, + { + "bbox": [ + 360, + 439, + 369, + 448 + ], + "score": 0.56, + "content": "\\hat { m }", + "type": "inline_equation" + }, + { + "bbox": [ + 370, + 437, + 459, + 451 + ], + "score": 1.0, + "content": "can be obtained by:", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 18.5, + "bbox_fs": [ + 105, + 404, + 506, + 451 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 159, + 455, + 452, + 469 + ], + "lines": [ + { + "bbox": [ + 159, + 455, + 452, + 469 + ], + "spans": [ + { + "bbox": [ + 159, + 455, + 452, + 469 + ], + "score": 0.88, + "content": "\\hat { m } = ( w _ { 1 } A _ { 1 } + w _ { 2 } A _ { 2 } + \\cdot \\cdot \\cdot + w _ { N } A _ { N } ) ( w _ { 1 } B _ { 1 } + w _ { 2 } B _ { 2 } + \\cdot \\cdot \\cdot + w _ { N } B _ { N } ) .", + "type": "interline_equation", + "image_path": "622ebc57ca6de9adfd25eb29ad01864fdee3e77678a974073dcd3f51edc1c592.jpg" + } + ] + } + ], + "index": 21, + "virtual_lines": [ + { + "bbox": [ + 159, + 455, + 452, + 469 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "text", + "bbox": [ + 107, + 479, + 505, + 535 + ], + "lines": [ + { + "bbox": [ + 105, + 479, + 505, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 479, + 505, + 493 + ], + "score": 1.0, + "content": "Notbly, as we show in Sec. 5, combining too many LoRA modules at once can expand", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 491, + 505, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 505, + 504 + ], + "score": 1.0, + "content": "the search space exponentially, which may destabilize the LoraHub learning process and", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 501, + 506, + 516 + ], + "spans": [ + { + "bbox": [ + 104, + 501, + 506, + 516 + ], + "score": 1.0, + "content": "prevent optimal performance. To mitigate this, we employ random selection to prune the", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 511, + 505, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 505, + 527 + ], + "score": 1.0, + "content": "candidate space, and more advanced pre-filtering algorithms could be explored in the fu-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 523, + 130, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 523, + 130, + 537 + ], + "score": 1.0, + "content": "ture.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 24, + "bbox_fs": [ + 104, + 479, + 506, + 537 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 549, + 383, + 561 + ], + "lines": [ + { + "bbox": [ + 105, + 548, + 384, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 548, + 384, + 563 + ], + "score": 1.0, + "content": "3.4 ADAPT: Weight optimization via gradient-free methods", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "text", + "bbox": [ + 107, + 569, + 505, + 658 + ], + "lines": [ + { + "bbox": [ + 105, + 569, + 505, + 583 + ], + "spans": [ + { + "bbox": [ + 105, + 569, + 383, + 583 + ], + "score": 1.0, + "content": "During the ADAPT stage, our goal is to modify the coefficients", + "type": "text" + }, + { + "bbox": [ + 383, + 572, + 392, + 580 + ], + "score": 0.52, + "content": "w", + "type": "inline_equation" + }, + { + "bbox": [ + 392, + 569, + 505, + 583 + ], + "score": 1.0, + "content": "to boost the model’s per-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 581, + 505, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 581, + 505, + 594 + ], + "score": 1.0, + "content": "formace on the examples from an unseen task. One might think of using gradient descent", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 591, + 505, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 591, + 159, + 605 + ], + "score": 1.0, + "content": "to optimize", + "type": "text" + }, + { + "bbox": [ + 159, + 593, + 169, + 603 + ], + "score": 0.63, + "content": "w ,", + "type": "inline_equation" + }, + { + "bbox": [ + 170, + 591, + 505, + 605 + ], + "score": 1.0, + "content": "following standard backpropagation methods. However, this approach de-", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 603, + 505, + 614 + ], + "spans": [ + { + "bbox": [ + 105, + 603, + 505, + 614 + ], + "score": 1.0, + "content": "mands constructing a hypernetwork for all LoRA modules, similar to differentiable archi-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 613, + 505, + 627 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 505, + 627 + ], + "score": 1.0, + "content": "tecture search methods (Zhang et al., 2019). Constructing these hypernetworks demands", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 623, + 505, + 638 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 420, + 638 + ], + "score": 1.0, + "content": "for substantial GPU memory and time, posing a challenge. Given that", + "type": "text" + }, + { + "bbox": [ + 421, + 626, + 430, + 635 + ], + "score": 0.47, + "content": "w", + "type": "inline_equation" + }, + { + "bbox": [ + 430, + 623, + 505, + 638 + ], + "score": 1.0, + "content": "consists of a rel-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 635, + 505, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 505, + 650 + ], + "score": 1.0, + "content": "atively small number of parameters, we opted for gradient-free methods for optimization", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 647, + 230, + 659 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 230, + 659 + ], + "score": 1.0, + "content": "instead of gradient descent.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 31.5, + "bbox_fs": [ + 105, + 569, + 505, + 659 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 663, + 505, + 733 + ], + "lines": [ + { + "bbox": [ + 105, + 662, + 506, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 506, + 677 + ], + "score": 1.0, + "content": "Inspired by previous work (Sun et al., 2022), we utilize a black-box optimization technique", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 672, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 104, + 672, + 190, + 690 + ], + "score": 1.0, + "content": "to find the optimal", + "type": "text" + }, + { + "bbox": [ + 190, + 676, + 199, + 685 + ], + "score": 0.59, + "content": "w", + "type": "inline_equation" + }, + { + "bbox": [ + 199, + 672, + 506, + 690 + ], + "score": 1.0, + "content": ". The optimization process is steered by the cross-entropy loss, setting", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 684, + 506, + 699 + ], + "spans": [ + { + "bbox": [ + 105, + 684, + 245, + 699 + ], + "score": 1.0, + "content": "the goal to locate the best set", + "type": "text" + }, + { + "bbox": [ + 246, + 685, + 321, + 698 + ], + "score": 0.92, + "content": "\\left\\{ w _ { 1 } , w _ { 2 } , \\ldots , w _ { N } \\right\\}", + "type": "inline_equation" + }, + { + "bbox": [ + 321, + 684, + 421, + 699 + ], + "score": 1.0, + "content": "that reduces the loss", + "type": "text" + }, + { + "bbox": [ + 421, + 686, + 429, + 695 + ], + "score": 0.62, + "content": "L", + "type": "inline_equation" + }, + { + "bbox": [ + 429, + 684, + 506, + 699 + ], + "score": 1.0, + "content": "on the few-shot", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 696, + 505, + 709 + ], + "spans": [ + { + "bbox": [ + 105, + 696, + 151, + 709 + ], + "score": 1.0, + "content": "examples", + "type": "text" + }, + { + "bbox": [ + 151, + 697, + 161, + 708 + ], + "score": 0.41, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 162, + 696, + 505, + 709 + ], + "score": 1.0, + "content": ". Furthermore, we incorporate L1 regularization to penalize the sum of the", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 706, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 105, + 706, + 188, + 721 + ], + "score": 1.0, + "content": "absolute values of", + "type": "text" + }, + { + "bbox": [ + 189, + 709, + 198, + 718 + ], + "score": 0.66, + "content": "w _ { . }", + "type": "inline_equation" + }, + { + "bbox": [ + 198, + 706, + 506, + 721 + ], + "score": 1.0, + "content": ", helping to prevent obtaining extreme values. Consequently, the final", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 104, + 716, + 507, + 736 + ], + "spans": [ + { + "bbox": [ + 104, + 716, + 267, + 736 + ], + "score": 1.0, + "content": "objective of LoraHub is to minimize", + "type": "text" + }, + { + "bbox": [ + 267, + 719, + 339, + 733 + ], + "score": 0.93, + "content": "\\begin{array} { r } { L + \\alpha \\cdot \\sum _ { i = 1 } ^ { N } | \\dot { w } _ { i } | , } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 339, + 716, + 371, + 736 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 372, + 722, + 378, + 730 + ], + "score": 0.67, + "content": "\\alpha", + "type": "inline_equation" + }, + { + "bbox": [ + 379, + 716, + 507, + 736 + ], + "score": 1.0, + "content": "serves as a hyperparameter.", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 38.5, + "bbox_fs": [ + 104, + 662, + 507, + 736 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 505, + 203 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 97 + ], + "score": 1.0, + "content": "In terms of the gradient-free method, we leverage Shiwa, a combinatorial optimization ap-", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 92, + 506, + 108 + ], + "spans": [ + { + "bbox": [ + 105, + 92, + 506, + 108 + ], + "score": 1.0, + "content": "proach (Liu et al., 2020). Shiwa offers a variety of algorithms and chooses the most suitable", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 103, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 103, + 506, + 118 + ], + "score": 1.0, + "content": "optimization algorithm for different circumstances. In most of the forthcoming experi-", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 115, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 115, + 505, + 129 + ], + "score": 1.0, + "content": "mental setups, we primarily employ the Covariance Matrix Adaptive Evolution Strategies", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 126, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 126, + 505, + 140 + ], + "score": 1.0, + "content": "(CMA-ES) (Hansen & Ostermeier, 1996). CMA-ES, as a stochastic and population-based", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 137, + 505, + 151 + ], + "spans": [ + { + "bbox": [ + 105, + 137, + 505, + 151 + ], + "score": 1.0, + "content": "optimization algorithm, offers versatility in addressing a broad spectrum of optimization", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 147, + 505, + 162 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 505, + 162 + ], + "score": 1.0, + "content": "challenges. It dynamically adjusts a search distribution, which is defined by a covariance", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 159, + 505, + 172 + ], + "spans": [ + { + "bbox": [ + 105, + 159, + 505, + 172 + ], + "score": 1.0, + "content": "matrix. During each iteration, CMA-ES systematically updates both the mean and covari-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 170, + 505, + 184 + ], + "spans": [ + { + "bbox": [ + 105, + 170, + 505, + 184 + ], + "score": 1.0, + "content": "ance of this distribution to optimize the target function. In our application, we employ this", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 180, + 505, + 195 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 482, + 195 + ], + "score": 1.0, + "content": "algorithm to mold the search space for w. Ultimately, we use it to identify the optimal", + "type": "text" + }, + { + "bbox": [ + 482, + 183, + 491, + 191 + ], + "score": 0.57, + "content": "w", + "type": "inline_equation" + }, + { + "bbox": [ + 491, + 180, + 505, + 195 + ], + "score": 1.0, + "content": "by", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 192, + 447, + 204 + ], + "spans": [ + { + "bbox": [ + 105, + 192, + 447, + 204 + ], + "score": 1.0, + "content": "evaluating their performance on the few-shot examples from an unseen task.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 5 + }, + { + "type": "title", + "bbox": [ + 107, + 221, + 243, + 235 + ], + "lines": [ + { + "bbox": [ + 104, + 220, + 244, + 237 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 244, + 237 + ], + "score": 1.0, + "content": "4 Experimental Results", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11 + }, + { + "type": "text", + "bbox": [ + 107, + 248, + 505, + 281 + ], + "lines": [ + { + "bbox": [ + 105, + 248, + 506, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 506, + 261 + ], + "score": 1.0, + "content": "In this section, we provide details on our main experiments. First, we give an overview of", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 258, + 506, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 506, + 274 + ], + "score": 1.0, + "content": "the experimental setup and implementation details. Next, we present our findings along", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 271, + 179, + 282 + ], + "spans": [ + { + "bbox": [ + 106, + 271, + 179, + 282 + ], + "score": 1.0, + "content": "with the results.", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13 + }, + { + "type": "title", + "bbox": [ + 107, + 298, + 218, + 310 + ], + "lines": [ + { + "bbox": [ + 104, + 294, + 220, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 294, + 220, + 315 + ], + "score": 1.0, + "content": "4.1 Experimental setup", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + }, + { + "type": "text", + "bbox": [ + 108, + 319, + 503, + 353 + ], + "lines": [ + { + "bbox": [ + 105, + 318, + 505, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 505, + 333 + ], + "score": 1.0, + "content": "Large Language Model In our main experiments, we employ FLAN-T5 (Chung et al.,", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 329, + 504, + 343 + ], + "spans": [ + { + "bbox": [ + 106, + 329, + 504, + 343 + ], + "score": 1.0, + "content": "2022), particularly FLAN-T5-large, as the base LLM. The model has shown impressive abil-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 339, + 322, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 322, + 356 + ], + "score": 1.0, + "content": "ities to perform zero-shot and few-shot learning.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17 + }, + { + "type": "text", + "bbox": [ + 106, + 367, + 505, + 434 + ], + "lines": [ + { + "bbox": [ + 106, + 367, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 106, + 367, + 506, + 381 + ], + "score": 1.0, + "content": "Candidate LoRA Modules Our methodology requires a compendium of LoRA modules", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 378, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 506, + 392 + ], + "score": 1.0, + "content": "trained on preceding tasks. For parity with FLAN, we adopt the tasks utilized to instruct", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 388, + 506, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 506, + 403 + ], + "score": 1.0, + "content": "FLAN-T5, thereby incorporating nearly 200 distinct tasks and their corresponding instruc-", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 399, + 506, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 506, + 414 + ], + "score": 1.0, + "content": "tions. Following this, we trained several LoRA modules as potential candidates. During", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 411, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 506, + 425 + ], + "score": 1.0, + "content": "each experimental sequence, we randomly select 20 LoRA modules from them as the can-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 420, + 254, + 436 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 254, + 436 + ], + "score": 1.0, + "content": "didate for our LoraHub learning.", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 21.5 + }, + { + "type": "text", + "bbox": [ + 107, + 448, + 504, + 503 + ], + "lines": [ + { + "bbox": [ + 105, + 447, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 105, + 447, + 505, + 461 + ], + "score": 1.0, + "content": "Dataset and evaluation Our method is evaluated using the Big-Bench Hard (BBH) bench-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 458, + 505, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 458, + 505, + 472 + ], + "score": 1.0, + "content": "mark, a well-established standard that consists of multiple-choice questions from a variety", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 469, + 505, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 469, + 505, + 483 + ], + "score": 1.0, + "content": "of domains. The benchmark consists of 27 different tasks, which are regarded to be chal-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 481, + 506, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 506, + 494 + ], + "score": 1.0, + "content": "lenging for language models. For all tasks, we employ the exact match (EM) as our evalu-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 492, + 164, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 164, + 504 + ], + "score": 1.0, + "content": "ation metric.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 27 + }, + { + "type": "text", + "bbox": [ + 107, + 518, + 505, + 618 + ], + "lines": [ + { + "bbox": [ + 106, + 516, + 505, + 532 + ], + "spans": [ + { + "bbox": [ + 106, + 516, + 505, + 532 + ], + "score": 1.0, + "content": "Baseline Setup To enhance the demonstration of our method’s performance, we ex-", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 528, + 505, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 505, + 542 + ], + "score": 1.0, + "content": "panded our comparisons beyond the zero-shot and in-context learning settings. We specif-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 104, + 537, + 506, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 506, + 555 + ], + "score": 1.0, + "content": "ically chose three representative gradient-based methods for comparison: full fine-tuning", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 550, + 505, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 505, + 564 + ], + "score": 1.0, + "content": "(FFT), LoRA tuning (LoRA) (Hu et al., 2022), and IA3 fine-tuning (IA3) (Liu et al., 2022).", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 560, + 506, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 506, + 576 + ], + "score": 1.0, + "content": "For all gradient-based methods, for a fair comparsion, we train for 40 epochs on the same", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 572, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 506, + 586 + ], + "score": 1.0, + "content": "three runs of 5 examples employed in our methods. In the case of FFT, a learning rate of", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 582, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 506, + 597 + ], + "score": 1.0, + "content": "3e-5 is employed, whereas for IA3 and LoRA, we adopt a learning rate of 2e-4. We report", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 593, + 505, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 593, + 505, + 609 + ], + "score": 1.0, + "content": "the performance of each method on the test set at the end of training (averaged over three", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 606, + 404, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 404, + 619 + ], + "score": 1.0, + "content": "runs) without any model selection to avoid potential selection bias.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 34 + }, + { + "type": "title", + "bbox": [ + 107, + 633, + 188, + 645 + ], + "lines": [ + { + "bbox": [ + 106, + 632, + 189, + 645 + ], + "spans": [ + { + "bbox": [ + 106, + 632, + 189, + 645 + ], + "score": 1.0, + "content": "4.2 Main results", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 39 + }, + { + "type": "text", + "bbox": [ + 107, + 654, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 654, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 505, + 667 + ], + "score": 1.0, + "content": "As shown in Table 1, our experimental results demonstarte the superior efficacy of our", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 665, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 506, + 679 + ], + "score": 1.0, + "content": "method in comparison to zero-shot learning while closely resembling the performance of", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 676, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 676, + 506, + 690 + ], + "score": 1.0, + "content": "in-context learning (ICL) in few-shot scenarios. This observation is derived from an aver-", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 687, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 702 + ], + "score": 1.0, + "content": "age performance of three runs, each leveraging different few-shot examples. Importantly,", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "score": 1.0, + "content": "our model utilizes an equivalent number of tokens as the zero-shot method, notably fewer", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 709, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 506, + 722 + ], + "score": 1.0, + "content": "than the count used by ICL. Although occasional performance fluctuations, our method", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 720, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 506, + 734 + ], + "score": 1.0, + "content": "consistently outperforms zero-shot learning in most tasks. In the era of LLMs, the input", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 43 + } + ], + "page_idx": 4, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 12, + "width": 9 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 505, + 203 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 97 + ], + "score": 1.0, + "content": "In terms of the gradient-free method, we leverage Shiwa, a combinatorial optimization ap-", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 92, + 506, + 108 + ], + "spans": [ + { + "bbox": [ + 105, + 92, + 506, + 108 + ], + "score": 1.0, + "content": "proach (Liu et al., 2020). Shiwa offers a variety of algorithms and chooses the most suitable", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 103, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 103, + 506, + 118 + ], + "score": 1.0, + "content": "optimization algorithm for different circumstances. In most of the forthcoming experi-", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 115, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 115, + 505, + 129 + ], + "score": 1.0, + "content": "mental setups, we primarily employ the Covariance Matrix Adaptive Evolution Strategies", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 126, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 126, + 505, + 140 + ], + "score": 1.0, + "content": "(CMA-ES) (Hansen & Ostermeier, 1996). CMA-ES, as a stochastic and population-based", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 137, + 505, + 151 + ], + "spans": [ + { + "bbox": [ + 105, + 137, + 505, + 151 + ], + "score": 1.0, + "content": "optimization algorithm, offers versatility in addressing a broad spectrum of optimization", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 147, + 505, + 162 + ], + "spans": [ + { + "bbox": [ + 105, + 147, + 505, + 162 + ], + "score": 1.0, + "content": "challenges. It dynamically adjusts a search distribution, which is defined by a covariance", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 159, + 505, + 172 + ], + "spans": [ + { + "bbox": [ + 105, + 159, + 505, + 172 + ], + "score": 1.0, + "content": "matrix. During each iteration, CMA-ES systematically updates both the mean and covari-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 170, + 505, + 184 + ], + "spans": [ + { + "bbox": [ + 105, + 170, + 505, + 184 + ], + "score": 1.0, + "content": "ance of this distribution to optimize the target function. In our application, we employ this", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 180, + 505, + 195 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 482, + 195 + ], + "score": 1.0, + "content": "algorithm to mold the search space for w. Ultimately, we use it to identify the optimal", + "type": "text" + }, + { + "bbox": [ + 482, + 183, + 491, + 191 + ], + "score": 0.57, + "content": "w", + "type": "inline_equation" + }, + { + "bbox": [ + 491, + 180, + 505, + 195 + ], + "score": 1.0, + "content": "by", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 192, + 447, + 204 + ], + "spans": [ + { + "bbox": [ + 105, + 192, + 447, + 204 + ], + "score": 1.0, + "content": "evaluating their performance on the few-shot examples from an unseen task.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 5, + "bbox_fs": [ + 105, + 81, + 506, + 204 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 221, + 243, + 235 + ], + "lines": [ + { + "bbox": [ + 104, + 220, + 244, + 237 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 244, + 237 + ], + "score": 1.0, + "content": "4 Experimental Results", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11 + }, + { + "type": "text", + "bbox": [ + 107, + 248, + 505, + 281 + ], + "lines": [ + { + "bbox": [ + 105, + 248, + 506, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 506, + 261 + ], + "score": 1.0, + "content": "In this section, we provide details on our main experiments. First, we give an overview of", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 258, + 506, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 506, + 274 + ], + "score": 1.0, + "content": "the experimental setup and implementation details. Next, we present our findings along", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 271, + 179, + 282 + ], + "spans": [ + { + "bbox": [ + 106, + 271, + 179, + 282 + ], + "score": 1.0, + "content": "with the results.", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 13, + "bbox_fs": [ + 105, + 248, + 506, + 282 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 298, + 218, + 310 + ], + "lines": [ + { + "bbox": [ + 104, + 294, + 220, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 294, + 220, + 315 + ], + "score": 1.0, + "content": "4.1 Experimental setup", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + }, + { + "type": "text", + "bbox": [ + 108, + 319, + 503, + 353 + ], + "lines": [ + { + "bbox": [ + 105, + 318, + 505, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 505, + 333 + ], + "score": 1.0, + "content": "Large Language Model In our main experiments, we employ FLAN-T5 (Chung et al.,", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 329, + 504, + 343 + ], + "spans": [ + { + "bbox": [ + 106, + 329, + 504, + 343 + ], + "score": 1.0, + "content": "2022), particularly FLAN-T5-large, as the base LLM. The model has shown impressive abil-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 339, + 322, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 322, + 356 + ], + "score": 1.0, + "content": "ities to perform zero-shot and few-shot learning.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 17, + "bbox_fs": [ + 105, + 318, + 505, + 356 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 367, + 505, + 434 + ], + "lines": [ + { + "bbox": [ + 106, + 367, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 106, + 367, + 506, + 381 + ], + "score": 1.0, + "content": "Candidate LoRA Modules Our methodology requires a compendium of LoRA modules", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 378, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 506, + 392 + ], + "score": 1.0, + "content": "trained on preceding tasks. For parity with FLAN, we adopt the tasks utilized to instruct", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 388, + 506, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 506, + 403 + ], + "score": 1.0, + "content": "FLAN-T5, thereby incorporating nearly 200 distinct tasks and their corresponding instruc-", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 399, + 506, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 506, + 414 + ], + "score": 1.0, + "content": "tions. Following this, we trained several LoRA modules as potential candidates. During", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 411, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 506, + 425 + ], + "score": 1.0, + "content": "each experimental sequence, we randomly select 20 LoRA modules from them as the can-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 420, + 254, + 436 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 254, + 436 + ], + "score": 1.0, + "content": "didate for our LoraHub learning.", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 21.5, + "bbox_fs": [ + 104, + 367, + 506, + 436 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 448, + 504, + 503 + ], + "lines": [ + { + "bbox": [ + 105, + 447, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 105, + 447, + 505, + 461 + ], + "score": 1.0, + "content": "Dataset and evaluation Our method is evaluated using the Big-Bench Hard (BBH) bench-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 458, + 505, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 458, + 505, + 472 + ], + "score": 1.0, + "content": "mark, a well-established standard that consists of multiple-choice questions from a variety", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 469, + 505, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 469, + 505, + 483 + ], + "score": 1.0, + "content": "of domains. The benchmark consists of 27 different tasks, which are regarded to be chal-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 481, + 506, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 506, + 494 + ], + "score": 1.0, + "content": "lenging for language models. For all tasks, we employ the exact match (EM) as our evalu-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 492, + 164, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 164, + 504 + ], + "score": 1.0, + "content": "ation metric.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 27, + "bbox_fs": [ + 105, + 447, + 506, + 504 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 518, + 505, + 618 + ], + "lines": [ + { + "bbox": [ + 106, + 516, + 505, + 532 + ], + "spans": [ + { + "bbox": [ + 106, + 516, + 505, + 532 + ], + "score": 1.0, + "content": "Baseline Setup To enhance the demonstration of our method’s performance, we ex-", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 528, + 505, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 505, + 542 + ], + "score": 1.0, + "content": "panded our comparisons beyond the zero-shot and in-context learning settings. We specif-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 104, + 537, + 506, + 555 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 506, + 555 + ], + "score": 1.0, + "content": "ically chose three representative gradient-based methods for comparison: full fine-tuning", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 550, + 505, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 505, + 564 + ], + "score": 1.0, + "content": "(FFT), LoRA tuning (LoRA) (Hu et al., 2022), and IA3 fine-tuning (IA3) (Liu et al., 2022).", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 560, + 506, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 506, + 576 + ], + "score": 1.0, + "content": "For all gradient-based methods, for a fair comparsion, we train for 40 epochs on the same", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 572, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 506, + 586 + ], + "score": 1.0, + "content": "three runs of 5 examples employed in our methods. In the case of FFT, a learning rate of", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 582, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 506, + 597 + ], + "score": 1.0, + "content": "3e-5 is employed, whereas for IA3 and LoRA, we adopt a learning rate of 2e-4. We report", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 593, + 505, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 593, + 505, + 609 + ], + "score": 1.0, + "content": "the performance of each method on the test set at the end of training (averaged over three", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 606, + 404, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 404, + 619 + ], + "score": 1.0, + "content": "runs) without any model selection to avoid potential selection bias.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 34, + "bbox_fs": [ + 104, + 516, + 506, + 619 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 633, + 188, + 645 + ], + "lines": [ + { + "bbox": [ + 106, + 632, + 189, + 645 + ], + "spans": [ + { + "bbox": [ + 106, + 632, + 189, + 645 + ], + "score": 1.0, + "content": "4.2 Main results", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 39 + }, + { + "type": "text", + "bbox": [ + 107, + 654, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 654, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 505, + 667 + ], + "score": 1.0, + "content": "As shown in Table 1, our experimental results demonstarte the superior efficacy of our", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 665, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 506, + 679 + ], + "score": 1.0, + "content": "method in comparison to zero-shot learning while closely resembling the performance of", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 676, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 676, + 506, + 690 + ], + "score": 1.0, + "content": "in-context learning (ICL) in few-shot scenarios. This observation is derived from an aver-", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 687, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 702 + ], + "score": 1.0, + "content": "age performance of three runs, each leveraging different few-shot examples. Importantly,", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "score": 1.0, + "content": "our model utilizes an equivalent number of tokens as the zero-shot method, notably fewer", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 709, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 506, + 722 + ], + "score": 1.0, + "content": "than the count used by ICL. Although occasional performance fluctuations, our method", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 720, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 506, + 734 + ], + "score": 1.0, + "content": "consistently outperforms zero-shot learning in most tasks. In the era of LLMs, the input", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 604, + 505, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 505, + 618 + ], + "score": 1.0, + "content": "length is directly proportional to the inference cost, and thus LoraHub’s ability to econ-", + "type": "text", + "cross_page": true + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 614, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 614, + 505, + 631 + ], + "score": 1.0, + "content": "omize on input tokens while approaching the peak performance grows increasingly sig-", + "type": "text", + "cross_page": true + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 625, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 506, + 641 + ], + "score": 1.0, + "content": "nificant. Moreover, as shown in Appendix Table 4, the upper bound performance of our", + "type": "text", + "cross_page": true + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 636, + 505, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 505, + 652 + ], + "score": 1.0, + "content": "method across these runs can surpass ICL on 18 tasks, demonstrating its potential for fu-", + "type": "text", + "cross_page": true + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 648, + 190, + 663 + ], + "spans": [ + { + "bbox": [ + 105, + 648, + 190, + 663 + ], + "score": 1.0, + "content": "ture development.", + "type": "text", + "cross_page": true + } + ], + "index": 15 + } + ], + "index": 43, + "bbox_fs": [ + 105, + 654, + 506, + 734 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 176, + 514, + 572 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 106, + 79, + 505, + 169 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 506, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 506, + 94 + ], + "score": 1.0, + "content": "Table 1: Experimental results of zero-shot learning (Zero), few-shot in-context learning", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 90, + 505, + 104 + ], + "spans": [ + { + "bbox": [ + 105, + 90, + 505, + 104 + ], + "score": 1.0, + "content": "(ICL), IA3 fine-tuning (IA3), LoRA tuning (LoRA), full fine-tuning (FFT) and our pro-", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 101, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 101, + 506, + 117 + ], + "score": 1.0, + "content": "posed few-shot LoraHub learning (LoraHub) on the BBH benchmark with FLAN-T5-large", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 113, + 505, + 126 + ], + "spans": [ + { + "bbox": [ + 105, + 113, + 409, + 126 + ], + "score": 1.0, + "content": "as the base LLM. We denote algorithmic tasks with the superscript", + "type": "text" + }, + { + "bbox": [ + 409, + 113, + 417, + 125 + ], + "score": 0.69, + "content": "\\ S", + "type": "inline_equation" + }, + { + "bbox": [ + 417, + 113, + 505, + 126 + ], + "score": 1.0, + "content": "following previous", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 124, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 506, + 137 + ], + "score": 1.0, + "content": "work (Wu et al., 2023b). Note that we employ three runs, each leveraging different 5-shot", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 135, + 506, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 135, + 506, + 148 + ], + "score": 1.0, + "content": "examples per task, as demonstrations for all few-shot methods. The average performance", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 146, + 506, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 146, + 506, + 159 + ], + "score": 1.0, + "content": "of all methods is reported below, and the best performance of each few-shot method can", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 156, + 234, + 170 + ], + "spans": [ + { + "bbox": [ + 104, + 156, + 234, + 170 + ], + "score": 1.0, + "content": "be found in the Appendix B.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 3.5 + }, + { + "type": "table_body", + "bbox": [ + 108, + 176, + 514, + 572 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 108, + 176, + 514, + 572 + ], + "spans": [ + { + "bbox": [ + 108, + 176, + 514, + 572 + ], + "score": 0.983, + "html": "
TaskZeroICLavgIA3avgLoRAavgFFTavgLoraHubavg
Boolean Expressions54.059.656.256.062.255.5
Causal Judgement57.559.460.255.657.554.3
Date Understanding15.320.420.035.859.332.9
Disambiguation0.069.10.068.068.245.2
Dyck Languages1.30.94.222.219.51.0
Formal Fallacies51.355.351.553.654.052.8
Geometric Shapes6.719.614.72431.17.4
Hyperbaton6.771.849.355.377.362.8
Logical DeductionS (five objects)21.339.132.740.042.236.1
Logical DeductionS (seven objects)12.740.733.837.344.936.8
Logical DeductionS (three objects)0.051.68.553.652.945.7
Movie Recommendation62.755.861.851.566.055.3
Multistep Arithmetic0.70.70.70.20.00.4
Navigate47.345.346.248.048.047.1
Object Counting34.732.435.138.735.633.7
Penguins in a Table43.541.345.036.231.935.9
Reasoning about Colored Objects32.040.240.739.637.640.0
Ruin Names23.319.324.437.861.324.4
Salient Translation Error Detection37.347.337.116.016.236.0
Snarks50.054.253.955.666.756.9
Sports Understanding56.054.755.156.554.056.7
Temporal Sequences16.725.118.225.137.818.2
Tracking Shuffled ObjectsS (five objects)12.012.012.013.816.912.3
Tracking Shuffled Objects (seven objects)6.76.76.710.09.87.7
Tracking Shuffled ObjectsS (three objects)24.731.130.730.932.029.2
Web of Lies54.053.854.252.748.250.1
Word Sorting1.30.51.34.94.91.1
Avg Performance Per Task27.037.331.637.742.134.7
Avg Tokens Per Example111.6597.8111.6111.6111.6111.6
Gradient-based TrainingNoNoYesYesYesNo
", + "type": "table", + "image_path": "7ec4211137da396567e2ee2f253ff7f1eb99abc5b9e489998bc8f304dfdfbc78.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 108, + 176, + 514, + 308.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 108, + 308.0, + 514, + 440.0 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 108, + 440.0, + 514, + 572.0 + ], + "spans": [], + "index": 10 + } + ] + } + ], + "index": 6.25 + }, + { + "type": "text", + "bbox": [ + 107, + 604, + 505, + 660 + ], + "lines": [ + { + "bbox": [ + 105, + 604, + 505, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 505, + 618 + ], + "score": 1.0, + "content": "length is directly proportional to the inference cost, and thus LoraHub’s ability to econ-", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 614, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 614, + 505, + 631 + ], + "score": 1.0, + "content": "omize on input tokens while approaching the peak performance grows increasingly sig-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 625, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 506, + 641 + ], + "score": 1.0, + "content": "nificant. Moreover, as shown in Appendix Table 4, the upper bound performance of our", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 636, + 505, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 505, + 652 + ], + "score": 1.0, + "content": "method across these runs can surpass ICL on 18 tasks, demonstrating its potential for fu-", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 648, + 190, + 663 + ], + "spans": [ + { + "bbox": [ + 105, + 648, + 190, + 663 + ], + "score": 1.0, + "content": "ture development.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 13 + }, + { + "type": "text", + "bbox": [ + 107, + 665, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 664, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 664, + 505, + 679 + ], + "score": 1.0, + "content": "Even when compared to certain gradient-based optimization methods, our approach con-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 677, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 505, + 689 + ], + "score": 1.0, + "content": "sistently demonstrates competitive performance. For example, as depicted in Table 1, our", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 685, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 300, + 702 + ], + "score": 1.0, + "content": "method exhibits a notable improvement of", + "type": "text" + }, + { + "bbox": [ + 301, + 687, + 324, + 698 + ], + "score": 0.86, + "content": "3 . 1 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 324, + 685, + 506, + 702 + ], + "score": 1.0, + "content": "on average in contrast to the promising", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 697, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 506, + 713 + ], + "score": 1.0, + "content": "IA3 method. Nevertheless, we acknowledge that our approach still falls behind LoRA", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 709, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 506, + 723 + ], + "score": 1.0, + "content": "tuning and full fine-tuning, especially in tasks that exhibit significant deviation from the", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 720, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 506, + 734 + ], + "score": 1.0, + "content": "upstream task. Taking Dyck Languages as an example, both LoraHub and ICL achieve", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 18.5 + } + ], + "page_idx": 5, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 752, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 762 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 762 + ], + "score": 1.0, + "content": "6", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 176, + 514, + 572 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 106, + 79, + 505, + 169 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 506, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 506, + 94 + ], + "score": 1.0, + "content": "Table 1: Experimental results of zero-shot learning (Zero), few-shot in-context learning", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 90, + 505, + 104 + ], + "spans": [ + { + "bbox": [ + 105, + 90, + 505, + 104 + ], + "score": 1.0, + "content": "(ICL), IA3 fine-tuning (IA3), LoRA tuning (LoRA), full fine-tuning (FFT) and our pro-", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 101, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 101, + 506, + 117 + ], + "score": 1.0, + "content": "posed few-shot LoraHub learning (LoraHub) on the BBH benchmark with FLAN-T5-large", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 113, + 505, + 126 + ], + "spans": [ + { + "bbox": [ + 105, + 113, + 409, + 126 + ], + "score": 1.0, + "content": "as the base LLM. We denote algorithmic tasks with the superscript", + "type": "text" + }, + { + "bbox": [ + 409, + 113, + 417, + 125 + ], + "score": 0.69, + "content": "\\ S", + "type": "inline_equation" + }, + { + "bbox": [ + 417, + 113, + 505, + 126 + ], + "score": 1.0, + "content": "following previous", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 124, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 506, + 137 + ], + "score": 1.0, + "content": "work (Wu et al., 2023b). Note that we employ three runs, each leveraging different 5-shot", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 135, + 506, + 148 + ], + "spans": [ + { + "bbox": [ + 105, + 135, + 506, + 148 + ], + "score": 1.0, + "content": "examples per task, as demonstrations for all few-shot methods. The average performance", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 146, + 506, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 146, + 506, + 159 + ], + "score": 1.0, + "content": "of all methods is reported below, and the best performance of each few-shot method can", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 156, + 234, + 170 + ], + "spans": [ + { + "bbox": [ + 104, + 156, + 234, + 170 + ], + "score": 1.0, + "content": "be found in the Appendix B.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 3.5 + }, + { + "type": "table_body", + "bbox": [ + 108, + 176, + 514, + 572 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 108, + 176, + 514, + 572 + ], + "spans": [ + { + "bbox": [ + 108, + 176, + 514, + 572 + ], + "score": 0.983, + "html": "
TaskZeroICLavgIA3avgLoRAavgFFTavgLoraHubavg
Boolean Expressions54.059.656.256.062.255.5
Causal Judgement57.559.460.255.657.554.3
Date Understanding15.320.420.035.859.332.9
Disambiguation0.069.10.068.068.245.2
Dyck Languages1.30.94.222.219.51.0
Formal Fallacies51.355.351.553.654.052.8
Geometric Shapes6.719.614.72431.17.4
Hyperbaton6.771.849.355.377.362.8
Logical DeductionS (five objects)21.339.132.740.042.236.1
Logical DeductionS (seven objects)12.740.733.837.344.936.8
Logical DeductionS (three objects)0.051.68.553.652.945.7
Movie Recommendation62.755.861.851.566.055.3
Multistep Arithmetic0.70.70.70.20.00.4
Navigate47.345.346.248.048.047.1
Object Counting34.732.435.138.735.633.7
Penguins in a Table43.541.345.036.231.935.9
Reasoning about Colored Objects32.040.240.739.637.640.0
Ruin Names23.319.324.437.861.324.4
Salient Translation Error Detection37.347.337.116.016.236.0
Snarks50.054.253.955.666.756.9
Sports Understanding56.054.755.156.554.056.7
Temporal Sequences16.725.118.225.137.818.2
Tracking Shuffled ObjectsS (five objects)12.012.012.013.816.912.3
Tracking Shuffled Objects (seven objects)6.76.76.710.09.87.7
Tracking Shuffled ObjectsS (three objects)24.731.130.730.932.029.2
Web of Lies54.053.854.252.748.250.1
Word Sorting1.30.51.34.94.91.1
Avg Performance Per Task27.037.331.637.742.134.7
Avg Tokens Per Example111.6597.8111.6111.6111.6111.6
Gradient-based TrainingNoNoYesYesYesNo
", + "type": "table", + "image_path": "7ec4211137da396567e2ee2f253ff7f1eb99abc5b9e489998bc8f304dfdfbc78.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 108, + 176, + 514, + 308.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 108, + 308.0, + 514, + 440.0 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 108, + 440.0, + 514, + 572.0 + ], + "spans": [], + "index": 10 + } + ] + } + ], + "index": 6.25 + }, + { + "type": "text", + "bbox": [ + 107, + 604, + 505, + 660 + ], + "lines": [], + "index": 13, + "bbox_fs": [ + 105, + 604, + 506, + 663 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 107, + 665, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 664, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 664, + 505, + 679 + ], + "score": 1.0, + "content": "Even when compared to certain gradient-based optimization methods, our approach con-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 677, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 505, + 689 + ], + "score": 1.0, + "content": "sistently demonstrates competitive performance. For example, as depicted in Table 1, our", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 685, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 300, + 702 + ], + "score": 1.0, + "content": "method exhibits a notable improvement of", + "type": "text" + }, + { + "bbox": [ + 301, + 687, + 324, + 698 + ], + "score": 0.86, + "content": "3 . 1 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 324, + 685, + 506, + 702 + ], + "score": 1.0, + "content": "on average in contrast to the promising", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 697, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 506, + 713 + ], + "score": 1.0, + "content": "IA3 method. Nevertheless, we acknowledge that our approach still falls behind LoRA", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 709, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 506, + 723 + ], + "score": 1.0, + "content": "tuning and full fine-tuning, especially in tasks that exhibit significant deviation from the", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 720, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 506, + 734 + ], + "score": 1.0, + "content": "upstream task. Taking Dyck Languages as an example, both LoraHub and ICL achieve", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 81, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 279, + 95 + ], + "score": 1.0, + "content": "only an average performance of nearly", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 280, + 82, + 302, + 93 + ], + "score": 0.87, + "content": "1 . 0 \\%", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 302, + 81, + 505, + 95 + ], + "score": 1.0, + "content": "on these tasks, while LoRA and FFT methods", + "type": "text", + "cross_page": true + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 93, + 331, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 93, + 331, + 106 + ], + "score": 1.0, + "content": "showcase impressive results with only 5 examples.", + "type": "text", + "cross_page": true + } + ], + "index": 1 + } + ], + "index": 18.5, + "bbox_fs": [ + 105, + 664, + 506, + 734 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 504, + 105 + ], + "lines": [ + { + "bbox": [ + 106, + 81, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 279, + 95 + ], + "score": 1.0, + "content": "only an average performance of nearly", + "type": "text" + }, + { + "bbox": [ + 280, + 82, + 302, + 93 + ], + "score": 0.87, + "content": "1 . 0 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 81, + 505, + 95 + ], + "score": 1.0, + "content": "on these tasks, while LoRA and FFT methods", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 93, + 331, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 93, + 331, + 106 + ], + "score": 1.0, + "content": "showcase impressive results with only 5 examples.", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "title", + "bbox": [ + 107, + 119, + 181, + 131 + ], + "lines": [ + { + "bbox": [ + 105, + 116, + 182, + 133 + ], + "spans": [ + { + "bbox": [ + 105, + 116, + 182, + 133 + ], + "score": 1.0, + "content": "4.3 Discussion", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2 + }, + { + "type": "text", + "bbox": [ + 107, + 139, + 505, + 206 + ], + "lines": [ + { + "bbox": [ + 105, + 138, + 506, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 138, + 506, + 153 + ], + "score": 1.0, + "content": "LoraHub addresses the challenge of reducing inference costs by eliminating the need for", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 150, + 506, + 164 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 506, + 164 + ], + "score": 1.0, + "content": "processing additional tokens, resulting in a noticeable reduction in overall inference ex-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 160, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 160, + 506, + 177 + ], + "score": 1.0, + "content": "penses. However, it introduces an inherent cost during the ADAPT stage, necessitating", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 173, + 505, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 173, + 505, + 185 + ], + "score": 1.0, + "content": "extra inference steps, such as the 40 steps employed in our experiments. This introduces", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 183, + 505, + 197 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 505, + 197 + ], + "score": 1.0, + "content": "a trade-off between choosing the ICL approach and LoraHub, with the decision typically", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 194, + 275, + 207 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 275, + 207 + ], + "score": 1.0, + "content": "hinging on the nature of the situation.", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 5.5 + }, + { + "type": "text", + "bbox": [ + 107, + 211, + 505, + 289 + ], + "lines": [ + { + "bbox": [ + 105, + 211, + 506, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 211, + 506, + 225 + ], + "score": 1.0, + "content": "For one-time ad-hoc tasks, the ICL approach should be more pragmatic due to LoraHub’s", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 223, + 505, + 235 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 505, + 235 + ], + "score": 1.0, + "content": "additional inference step costs. In such scenarios, where immediate, single-use solutions", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 233, + 505, + 246 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 505, + 246 + ], + "score": 1.0, + "content": "are preferred, the simplicity and efficiency of ICL might outweigh the benefits of potential", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 244, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 244, + 505, + 258 + ], + "score": 1.0, + "content": "savings offered by LoraHub. Conversely, for recurring or similar tasks, LoraHub emerges", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 255, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 506, + 268 + ], + "score": 1.0, + "content": "as a compelling option. Despite the added inference step cost, LoraHub’s ability to ef-", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 266, + 505, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 505, + 281 + ], + "score": 1.0, + "content": "ficiently handle repetitive tasks, often occurring thousands of times, while concurrently", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 277, + 474, + 291 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 474, + 291 + ], + "score": 1.0, + "content": "reducing overall expenses, positions it as a viable option in such kind of situations.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 12 + }, + { + "type": "text", + "bbox": [ + 107, + 294, + 505, + 349 + ], + "lines": [ + { + "bbox": [ + 105, + 293, + 505, + 307 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 298, + 307 + ], + "score": 1.0, + "content": "In summary, our intention is not to replace", + "type": "text" + }, + { + "bbox": [ + 298, + 294, + 317, + 305 + ], + "score": 0.38, + "content": "\\scriptstyle { \\mathrm { I C L } } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 318, + 293, + 505, + 307 + ], + "score": 1.0, + "content": "but to present LoraHub as a complemen-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "score": 1.0, + "content": "tary strategy with performance-efficiency trade-offs. Thus, we encourage a careful consid-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 315, + 505, + 329 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 505, + 329 + ], + "score": 1.0, + "content": "eration of specific use cases and requirements when choosing between ICL and LoraHub,", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 326, + 506, + 341 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 506, + 341 + ], + "score": 1.0, + "content": "recognizing that the optimal solution may vary based on the nature and frequency of the", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 337, + 169, + 349 + ], + "spans": [ + { + "bbox": [ + 105, + 337, + 169, + 349 + ], + "score": 1.0, + "content": "tasks at hand.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 18 + }, + { + "type": "title", + "bbox": [ + 108, + 365, + 250, + 380 + ], + "lines": [ + { + "bbox": [ + 104, + 364, + 252, + 383 + ], + "spans": [ + { + "bbox": [ + 104, + 364, + 252, + 383 + ], + "score": 1.0, + "content": "5 Experimental Analysis", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 21 + }, + { + "type": "text", + "bbox": [ + 106, + 391, + 505, + 414 + ], + "lines": [ + { + "bbox": [ + 105, + 390, + 505, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 505, + 405 + ], + "score": 1.0, + "content": "In this section, we thoroughly examine the characteristics of our proposed method and", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 402, + 505, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 505, + 416 + ], + "score": 1.0, + "content": "uncover several insightful findings. If not specified, we use FLAN-T5-large for all analysis.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 22.5 + }, + { + "type": "text", + "bbox": [ + 120, + 426, + 461, + 439 + ], + "lines": [ + { + "bbox": [ + 118, + 424, + 464, + 441 + ], + "spans": [ + { + "bbox": [ + 118, + 424, + 464, + 441 + ], + "score": 1.0, + "content": "Does composing LoRA modules extend beyond the single module’s benefits?", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24 + }, + { + "type": "text", + "bbox": [ + 107, + 452, + 257, + 550 + ], + "lines": [ + { + "bbox": [ + 106, + 450, + 257, + 465 + ], + "spans": [ + { + "bbox": [ + 106, + 450, + 257, + 465 + ], + "score": 1.0, + "content": "We acknowledge the investiga-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 463, + 257, + 474 + ], + "spans": [ + { + "bbox": [ + 106, + 463, + 257, + 474 + ], + "score": 1.0, + "content": "tion of cross-task performance in", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 473, + 257, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 473, + 257, + 486 + ], + "score": 1.0, + "content": "prior work (Jang et al., 2023),", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 484, + 258, + 497 + ], + "spans": [ + { + "bbox": [ + 106, + 484, + 258, + 497 + ], + "score": 1.0, + "content": "which delved into the capabilities", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 494, + 259, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 494, + 259, + 509 + ], + "score": 1.0, + "content": "of LoRA and proposed a novel", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 506, + 258, + 518 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 258, + 518 + ], + "score": 1.0, + "content": "method centered around LoRA", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 516, + 258, + 530 + ], + "spans": [ + { + "bbox": [ + 105, + 516, + 258, + 530 + ], + "score": 1.0, + "content": "module retrieval. In order to en-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 528, + 258, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 258, + 542 + ], + "score": 1.0, + "content": "sure a fair comparison, we con-", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 539, + 258, + 552 + ], + "spans": [ + { + "bbox": [ + 106, + 539, + 258, + 552 + ], + "score": 1.0, + "content": "ducted an experiment where we", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 31 + }, + { + "type": "table", + "bbox": [ + 274, + 497, + 493, + 532 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 264, + 465, + 504, + 487 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 264, + 464, + 505, + 477 + ], + "spans": [ + { + "bbox": [ + 264, + 464, + 505, + 477 + ], + "score": 1.0, + "content": "Table 2: The average performance of various methods", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 263, + 475, + 438, + 487 + ], + "spans": [ + { + "bbox": [ + 263, + 475, + 438, + 487 + ], + "score": 1.0, + "content": "across all tasks in the benchmark BBH.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28.0 + }, + { + "type": "table_body", + "bbox": [ + 274, + 497, + 493, + 532 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 274, + 497, + 493, + 532 + ], + "spans": [ + { + "bbox": [ + 274, + 497, + 493, + 532 + ], + "score": 0.967, + "html": "
LoRA RetrievalLoraHub avgLoraHub best
31.734.741.2
", + "type": "table", + "image_path": "50cfa3a8aba9701a20f931b84cb7087e21caaca64b4ee5f70bf22283760d4739.jpg" + } + ] + } + ], + "index": 34.0, + "virtual_lines": [ + { + "bbox": [ + 274, + 497, + 493, + 514.5 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 274, + 514.5, + 493, + 532.0 + ], + "spans": [], + "index": 35 + } + ] + } + ], + "index": 31.0 + }, + { + "type": "text", + "bbox": [ + 107, + 551, + 505, + 606 + ], + "lines": [ + { + "bbox": [ + 106, + 550, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 106, + 550, + 504, + 562 + ], + "score": 1.0, + "content": "designed a LoRA retrieval mechanism based on the loss derived from few-shot examples.", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 561, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 561, + 505, + 574 + ], + "score": 1.0, + "content": "Specifically, we ranked all LoRA module candidates according to this loss and evaluated", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 571, + 505, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 505, + 585 + ], + "score": 1.0, + "content": "the best candidate on the test set of the unseen task. As depicted in Table 2, the performance", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 582, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 506, + 597 + ], + "score": 1.0, + "content": "of LoRA retrieval is notably impressive, positioning it as a strong baseline. However, in", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 594, + 488, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 488, + 608 + ], + "score": 1.0, + "content": "comparison to LoraHub, the performance of LoRA retrieval is relatively less favorable", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 40 + }, + { + "type": "text", + "bbox": [ + 119, + 619, + 369, + 631 + ], + "lines": [ + { + "bbox": [ + 118, + 617, + 370, + 632 + ], + "spans": [ + { + "bbox": [ + 118, + 617, + 370, + 632 + ], + "score": 1.0, + "content": "How effective is the gradient-free optimization method?", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 43 + }, + { + "type": "text", + "bbox": [ + 107, + 643, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 642, + 505, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 642, + 505, + 657 + ], + "score": 1.0, + "content": "To assess the effectiveness of our gradient-free optimization method in correctly identi-", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 654, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 506, + 668 + ], + "score": 1.0, + "content": "fying the most suitable LoRA module for a given downstream task, we carried out an", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 665, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 506, + 679 + ], + "score": 1.0, + "content": "empirical study using the WikiTableQuestions (Pasupat & Liang, 2015) (WTQ) dataset. We", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 676, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 676, + 506, + 690 + ], + "score": 1.0, + "content": "strategically included a LoRA module that was specifically trained on the WTQ dataset", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "score": 1.0, + "content": "into our pool of LoRA candidate modules, which originally stemmed from tasks exclusive", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "score": 1.0, + "content": "to the Flan Collection. Subsequently, we designated WTQ as the targeted downstream task", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "score": 1.0, + "content": "and computed the weights consistent with the methods employed in LoraHub learning.", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 719, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 505, + 734 + ], + "score": 1.0, + "content": "As an end result, the WTQ-specific LoRA module was awarded the highest weight, ex-", + "type": "text" + } + ], + "index": 51 + } + ], + "index": 47.5 + } + ], + "page_idx": 6, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 759 + ], + "lines": [ + { + "bbox": [ + 302, + 750, + 310, + 763 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 310, + 763 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 13, + "width": 8 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 504, + 105 + ], + "lines": [], + "index": 0.5, + "bbox_fs": [ + 106, + 81, + 505, + 106 + ], + "lines_deleted": true + }, + { + "type": "title", + "bbox": [ + 107, + 119, + 181, + 131 + ], + "lines": [ + { + "bbox": [ + 105, + 116, + 182, + 133 + ], + "spans": [ + { + "bbox": [ + 105, + 116, + 182, + 133 + ], + "score": 1.0, + "content": "4.3 Discussion", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2 + }, + { + "type": "text", + "bbox": [ + 107, + 139, + 505, + 206 + ], + "lines": [ + { + "bbox": [ + 105, + 138, + 506, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 138, + 506, + 153 + ], + "score": 1.0, + "content": "LoraHub addresses the challenge of reducing inference costs by eliminating the need for", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 150, + 506, + 164 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 506, + 164 + ], + "score": 1.0, + "content": "processing additional tokens, resulting in a noticeable reduction in overall inference ex-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 160, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 160, + 506, + 177 + ], + "score": 1.0, + "content": "penses. However, it introduces an inherent cost during the ADAPT stage, necessitating", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 173, + 505, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 173, + 505, + 185 + ], + "score": 1.0, + "content": "extra inference steps, such as the 40 steps employed in our experiments. This introduces", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 183, + 505, + 197 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 505, + 197 + ], + "score": 1.0, + "content": "a trade-off between choosing the ICL approach and LoraHub, with the decision typically", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 194, + 275, + 207 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 275, + 207 + ], + "score": 1.0, + "content": "hinging on the nature of the situation.", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 5.5, + "bbox_fs": [ + 104, + 138, + 506, + 207 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 211, + 505, + 289 + ], + "lines": [ + { + "bbox": [ + 105, + 211, + 506, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 211, + 506, + 225 + ], + "score": 1.0, + "content": "For one-time ad-hoc tasks, the ICL approach should be more pragmatic due to LoraHub’s", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 223, + 505, + 235 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 505, + 235 + ], + "score": 1.0, + "content": "additional inference step costs. In such scenarios, where immediate, single-use solutions", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 233, + 505, + 246 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 505, + 246 + ], + "score": 1.0, + "content": "are preferred, the simplicity and efficiency of ICL might outweigh the benefits of potential", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 244, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 244, + 505, + 258 + ], + "score": 1.0, + "content": "savings offered by LoraHub. Conversely, for recurring or similar tasks, LoraHub emerges", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 255, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 506, + 268 + ], + "score": 1.0, + "content": "as a compelling option. Despite the added inference step cost, LoraHub’s ability to ef-", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 266, + 505, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 505, + 281 + ], + "score": 1.0, + "content": "ficiently handle repetitive tasks, often occurring thousands of times, while concurrently", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 277, + 474, + 291 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 474, + 291 + ], + "score": 1.0, + "content": "reducing overall expenses, positions it as a viable option in such kind of situations.", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 12, + "bbox_fs": [ + 105, + 211, + 506, + 291 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 294, + 505, + 349 + ], + "lines": [ + { + "bbox": [ + 105, + 293, + 505, + 307 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 298, + 307 + ], + "score": 1.0, + "content": "In summary, our intention is not to replace", + "type": "text" + }, + { + "bbox": [ + 298, + 294, + 317, + 305 + ], + "score": 0.38, + "content": "\\scriptstyle { \\mathrm { I C L } } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 318, + 293, + 505, + 307 + ], + "score": 1.0, + "content": "but to present LoraHub as a complemen-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "score": 1.0, + "content": "tary strategy with performance-efficiency trade-offs. Thus, we encourage a careful consid-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 315, + 505, + 329 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 505, + 329 + ], + "score": 1.0, + "content": "eration of specific use cases and requirements when choosing between ICL and LoraHub,", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 326, + 506, + 341 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 506, + 341 + ], + "score": 1.0, + "content": "recognizing that the optimal solution may vary based on the nature and frequency of the", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 337, + 169, + 349 + ], + "spans": [ + { + "bbox": [ + 105, + 337, + 169, + 349 + ], + "score": 1.0, + "content": "tasks at hand.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 18, + "bbox_fs": [ + 105, + 293, + 506, + 349 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 365, + 250, + 380 + ], + "lines": [ + { + "bbox": [ + 104, + 364, + 252, + 383 + ], + "spans": [ + { + "bbox": [ + 104, + 364, + 252, + 383 + ], + "score": 1.0, + "content": "5 Experimental Analysis", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 21 + }, + { + "type": "text", + "bbox": [ + 106, + 391, + 505, + 414 + ], + "lines": [ + { + "bbox": [ + 105, + 390, + 505, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 505, + 405 + ], + "score": 1.0, + "content": "In this section, we thoroughly examine the characteristics of our proposed method and", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 402, + 505, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 505, + 416 + ], + "score": 1.0, + "content": "uncover several insightful findings. If not specified, we use FLAN-T5-large for all analysis.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 22.5, + "bbox_fs": [ + 105, + 390, + 505, + 416 + ] + }, + { + "type": "text", + "bbox": [ + 120, + 426, + 461, + 439 + ], + "lines": [ + { + "bbox": [ + 118, + 424, + 464, + 441 + ], + "spans": [ + { + "bbox": [ + 118, + 424, + 464, + 441 + ], + "score": 1.0, + "content": "Does composing LoRA modules extend beyond the single module’s benefits?", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24, + "bbox_fs": [ + 118, + 424, + 464, + 441 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 452, + 257, + 550 + ], + "lines": [ + { + "bbox": [ + 106, + 450, + 257, + 465 + ], + "spans": [ + { + "bbox": [ + 106, + 450, + 257, + 465 + ], + "score": 1.0, + "content": "We acknowledge the investiga-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 463, + 257, + 474 + ], + "spans": [ + { + "bbox": [ + 106, + 463, + 257, + 474 + ], + "score": 1.0, + "content": "tion of cross-task performance in", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 473, + 257, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 473, + 257, + 486 + ], + "score": 1.0, + "content": "prior work (Jang et al., 2023),", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 484, + 258, + 497 + ], + "spans": [ + { + "bbox": [ + 106, + 484, + 258, + 497 + ], + "score": 1.0, + "content": "which delved into the capabilities", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 494, + 259, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 494, + 259, + 509 + ], + "score": 1.0, + "content": "of LoRA and proposed a novel", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 506, + 258, + 518 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 258, + 518 + ], + "score": 1.0, + "content": "method centered around LoRA", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 516, + 258, + 530 + ], + "spans": [ + { + "bbox": [ + 105, + 516, + 258, + 530 + ], + "score": 1.0, + "content": "module retrieval. In order to en-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 528, + 258, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 258, + 542 + ], + "score": 1.0, + "content": "sure a fair comparison, we con-", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 539, + 258, + 552 + ], + "spans": [ + { + "bbox": [ + 106, + 539, + 258, + 552 + ], + "score": 1.0, + "content": "ducted an experiment where we", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 31, + "bbox_fs": [ + 105, + 450, + 259, + 552 + ] + }, + { + "type": "table", + "bbox": [ + 274, + 497, + 493, + 532 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 264, + 465, + 504, + 487 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 264, + 464, + 505, + 477 + ], + "spans": [ + { + "bbox": [ + 264, + 464, + 505, + 477 + ], + "score": 1.0, + "content": "Table 2: The average performance of various methods", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 263, + 475, + 438, + 487 + ], + "spans": [ + { + "bbox": [ + 263, + 475, + 438, + 487 + ], + "score": 1.0, + "content": "across all tasks in the benchmark BBH.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 28.0 + }, + { + "type": "table_body", + "bbox": [ + 274, + 497, + 493, + 532 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 274, + 497, + 493, + 532 + ], + "spans": [ + { + "bbox": [ + 274, + 497, + 493, + 532 + ], + "score": 0.967, + "html": "
LoRA RetrievalLoraHub avgLoraHub best
31.734.741.2
", + "type": "table", + "image_path": "50cfa3a8aba9701a20f931b84cb7087e21caaca64b4ee5f70bf22283760d4739.jpg" + } + ] + } + ], + "index": 34.0, + "virtual_lines": [ + { + "bbox": [ + 274, + 497, + 493, + 514.5 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 274, + 514.5, + 493, + 532.0 + ], + "spans": [], + "index": 35 + } + ] + } + ], + "index": 31.0 + }, + { + "type": "text", + "bbox": [ + 107, + 551, + 505, + 606 + ], + "lines": [ + { + "bbox": [ + 106, + 550, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 106, + 550, + 504, + 562 + ], + "score": 1.0, + "content": "designed a LoRA retrieval mechanism based on the loss derived from few-shot examples.", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 561, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 561, + 505, + 574 + ], + "score": 1.0, + "content": "Specifically, we ranked all LoRA module candidates according to this loss and evaluated", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 571, + 505, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 505, + 585 + ], + "score": 1.0, + "content": "the best candidate on the test set of the unseen task. As depicted in Table 2, the performance", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 582, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 506, + 597 + ], + "score": 1.0, + "content": "of LoRA retrieval is notably impressive, positioning it as a strong baseline. However, in", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 594, + 488, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 488, + 608 + ], + "score": 1.0, + "content": "comparison to LoraHub, the performance of LoRA retrieval is relatively less favorable", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 40, + "bbox_fs": [ + 105, + 550, + 506, + 608 + ] + }, + { + "type": "text", + "bbox": [ + 119, + 619, + 369, + 631 + ], + "lines": [ + { + "bbox": [ + 118, + 617, + 370, + 632 + ], + "spans": [ + { + "bbox": [ + 118, + 617, + 370, + 632 + ], + "score": 1.0, + "content": "How effective is the gradient-free optimization method?", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 43, + "bbox_fs": [ + 118, + 617, + 370, + 632 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 643, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 642, + 505, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 642, + 505, + 657 + ], + "score": 1.0, + "content": "To assess the effectiveness of our gradient-free optimization method in correctly identi-", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 654, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 506, + 668 + ], + "score": 1.0, + "content": "fying the most suitable LoRA module for a given downstream task, we carried out an", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 665, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 506, + 679 + ], + "score": 1.0, + "content": "empirical study using the WikiTableQuestions (Pasupat & Liang, 2015) (WTQ) dataset. We", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 676, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 676, + 506, + 690 + ], + "score": 1.0, + "content": "strategically included a LoRA module that was specifically trained on the WTQ dataset", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "score": 1.0, + "content": "into our pool of LoRA candidate modules, which originally stemmed from tasks exclusive", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "score": 1.0, + "content": "to the Flan Collection. Subsequently, we designated WTQ as the targeted downstream task", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "score": 1.0, + "content": "and computed the weights consistent with the methods employed in LoraHub learning.", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 719, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 505, + 734 + ], + "score": 1.0, + "content": "As an end result, the WTQ-specific LoRA module was awarded the highest weight, ex-", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 82, + 506, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 506, + 96 + ], + "score": 1.0, + "content": "emplifying the algorithm’s success in recognizing it as the most relevant. Moreover, the", + "type": "text", + "cross_page": true + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 93, + 505, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 505, + 107 + ], + "score": 1.0, + "content": "combined LoRA module demonstrated marginal superiority over the WTQ LoRA module.", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "score": 1.0, + "content": "This underscores the claim that the gradient-free optimization method has the ability to", + "type": "text", + "cross_page": true + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 115, + 434, + 127 + ], + "spans": [ + { + "bbox": [ + 105, + 115, + 434, + 127 + ], + "score": 1.0, + "content": "proficiently select the optimal upstream LoRA module for an unseen task.", + "type": "text", + "cross_page": true + } + ], + "index": 3 + } + ], + "index": 47.5, + "bbox_fs": [ + 105, + 642, + 506, + 734 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 505, + 127 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 506, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 506, + 96 + ], + "score": 1.0, + "content": "emplifying the algorithm’s success in recognizing it as the most relevant. Moreover, the", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 93, + 505, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 505, + 107 + ], + "score": 1.0, + "content": "combined LoRA module demonstrated marginal superiority over the WTQ LoRA module.", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "score": 1.0, + "content": "This underscores the claim that the gradient-free optimization method has the ability to", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 115, + 434, + 127 + ], + "spans": [ + { + "bbox": [ + 105, + 115, + 434, + 127 + ], + "score": 1.0, + "content": "proficiently select the optimal upstream LoRA module for an unseen task.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 1.5 + }, + { + "type": "text", + "bbox": [ + 119, + 139, + 385, + 152 + ], + "lines": [ + { + "bbox": [ + 118, + 137, + 388, + 153 + ], + "spans": [ + { + "bbox": [ + 118, + 137, + 388, + 153 + ], + "score": 1.0, + "content": "Can LoraHub work well on non-instruction-tuning models?", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4 + }, + { + "type": "text", + "bbox": [ + 107, + 164, + 505, + 231 + ], + "lines": [ + { + "bbox": [ + 105, + 164, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 164, + 506, + 177 + ], + "score": 1.0, + "content": "In previous investigations, we primarily focused on models with zero-shot capabilities that", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 175, + 505, + 188 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 505, + 188 + ], + "score": 1.0, + "content": "were trained with instruction tuning. However, for models like T5 without zero-shot abili-", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 186, + 505, + 199 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 505, + 199 + ], + "score": 1.0, + "content": "ties, where training has a larger effect on parameters, it was unclear if LoraHub could still", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 196, + 505, + 212 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 505, + 212 + ], + "score": 1.0, + "content": "effectively manage and improve them. Our experiments show that although these mod-", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 208, + 505, + 221 + ], + "spans": [ + { + "bbox": [ + 106, + 208, + 505, + 221 + ], + "score": 1.0, + "content": "els perform worse than FLAN-T5, LoraHub learning can still enable them to effectively", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 220, + 370, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 370, + 232 + ], + "score": 1.0, + "content": "generlize to unseen tasks. See Appendix C for more details.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 7.5 + }, + { + "type": "text", + "bbox": [ + 114, + 244, + 465, + 257 + ], + "lines": [ + { + "bbox": [ + 115, + 241, + 467, + 258 + ], + "spans": [ + { + "bbox": [ + 115, + 241, + 467, + 258 + ], + "score": 1.0, + "content": "Will the rank of LoRA modules impact the performance of LoraHub learning?", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11 + }, + { + "type": "text", + "bbox": [ + 107, + 268, + 505, + 357 + ], + "lines": [ + { + "bbox": [ + 106, + 268, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 106, + 268, + 505, + 282 + ], + "score": 1.0, + "content": "The parameter rank plays a crucial role in the LoRA framework, directly influencing the", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 277, + 506, + 294 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 506, + 294 + ], + "score": 1.0, + "content": "number of trainable parameters utilized during LoRA tuning. This prompts an intriguing", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 290, + 505, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 505, + 303 + ], + "score": 1.0, + "content": "question: does the variation in rank values influence the outcomes observed within the Lo-", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 302, + 505, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 302, + 505, + 314 + ], + "score": 1.0, + "content": "raHub learning? Our analysis indicates that, for FLAN-T5, the choice of rank has minimal", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 312, + 505, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 505, + 325 + ], + "score": 1.0, + "content": "impact. However, for T5, it still exerts some influence. Empirical findings reveal that, in", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 323, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 506, + 338 + ], + "score": 1.0, + "content": "comparison to rank values of 4 or 64, a rank value of 16 consistently demonstrates superior", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 334, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 506, + 348 + ], + "score": 1.0, + "content": "performance across different runs, both in terms of average and optimal values. Additional", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 346, + 266, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 266, + 358 + ], + "score": 1.0, + "content": "results are available in Appendix C.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 15.5 + }, + { + "type": "text", + "bbox": [ + 122, + 370, + 334, + 381 + ], + "lines": [ + { + "bbox": [ + 121, + 369, + 335, + 382 + ], + "spans": [ + { + "bbox": [ + 121, + 369, + 335, + 382 + ], + "score": 1.0, + "content": "Does more LoRA modules lead to better results?", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20 + }, + { + "type": "text", + "bbox": [ + 107, + 392, + 505, + 459 + ], + "lines": [ + { + "bbox": [ + 104, + 389, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 104, + 389, + 506, + 407 + ], + "score": 1.0, + "content": "In our main experiments, we randomly selected 20 LoRA modules for LoraHub learning.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 402, + 505, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 505, + 416 + ], + "score": 1.0, + "content": "Therefore, we conducted experiments to investigate the effect of using different numbers", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 413, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 506, + 427 + ], + "score": 1.0, + "content": "of LoRA modules. The results demonstrate that as we increased the number of LoRA", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 425, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 505, + 438 + ], + "score": 1.0, + "content": "modules, the variance in performance increased. However, the maximum achievable per-", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 436, + 505, + 448 + ], + "spans": [ + { + "bbox": [ + 106, + 436, + 505, + 448 + ], + "score": 1.0, + "content": "formance also improved. More analysis on the variance and the detailed results can be", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 447, + 207, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 447, + 207, + 460 + ], + "score": 1.0, + "content": "found in Appendix H.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 23.5 + }, + { + "type": "text", + "bbox": [ + 120, + 471, + 341, + 484 + ], + "lines": [ + { + "bbox": [ + 119, + 470, + 342, + 484 + ], + "spans": [ + { + "bbox": [ + 119, + 470, + 342, + 484 + ], + "score": 1.0, + "content": "How much computational resource can be saved?", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "text", + "bbox": [ + 107, + 496, + 505, + 552 + ], + "lines": [ + { + "bbox": [ + 105, + 495, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 506, + 510 + ], + "score": 1.0, + "content": "We follow to the memory test settings from the LoRA-FA (Zhang et al., 2023b) study for", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 505, + 506, + 522 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 506, + 522 + ], + "score": 1.0, + "content": "an accurate benchmark. In this context, full fine-tuning required about 40GB of memory,", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 518, + 505, + 531 + ], + "spans": [ + { + "bbox": [ + 106, + 518, + 505, + 531 + ], + "score": 1.0, + "content": "whereas LoRA fine-tuning used around 34GB. Remarkably, LoraHub only utilized about", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 527, + 506, + 543 + ], + "spans": [ + { + "bbox": [ + 105, + 527, + 506, + 543 + ], + "score": 1.0, + "content": "5GB of memory, illustrating its efficiency due to the inference-only mode, which eliminates", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 539, + 347, + 554 + ], + "spans": [ + { + "bbox": [ + 106, + 539, + 347, + 554 + ], + "score": 1.0, + "content": "the need for storing gradients and optimization states.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 30 + }, + { + "type": "title", + "bbox": [ + 107, + 568, + 199, + 581 + ], + "lines": [ + { + "bbox": [ + 104, + 567, + 200, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 567, + 200, + 582 + ], + "score": 1.0, + "content": "6 Related work", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 33 + }, + { + "type": "text", + "bbox": [ + 107, + 594, + 505, + 627 + ], + "lines": [ + { + "bbox": [ + 105, + 592, + 505, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 505, + 607 + ], + "score": 1.0, + "content": "Model Merging Our method substantially draws on the concept of LoRA module com-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 604, + 505, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 505, + 618 + ], + "score": 1.0, + "content": "position, and thus, aligns with the significant thread of research in model merging. This", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 614, + 498, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 614, + 498, + 631 + ], + "score": 1.0, + "content": "research focus is broadly categorized based on the ultimate objectives of model merging.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 35 + }, + { + "type": "text", + "bbox": [ + 106, + 632, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 632, + 505, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 632, + 505, + 646 + ], + "score": 1.0, + "content": "The first category focuses on merging entire models, and the goal is to combine individ-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 643, + 505, + 657 + ], + "spans": [ + { + "bbox": [ + 106, + 643, + 505, + 657 + ], + "score": 1.0, + "content": "ually trained models to approximate the performance benefits of model ensembling or", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 654, + 506, + 667 + ], + "spans": [ + { + "bbox": [ + 106, + 654, + 506, + 667 + ], + "score": 1.0, + "content": "multi-task learning. Prior works (Matena & Raffel, 2021; Jin et al., 2023; Yadav et al., 2023;", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 664, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 664, + 506, + 679 + ], + "score": 1.0, + "content": "Wu et al., 2023a) operated under the assumption of shared model architectures. For ex-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 675, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 506, + 691 + ], + "score": 1.0, + "content": "ample, Matena & Raffel (2021) amalgamates models by approximating Gaussian posterior", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 686, + 505, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 505, + 700 + ], + "score": 1.0, + "content": "distributions garnered from Fisher information, while Yadav et al. (2023) merges models", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "score": 1.0, + "content": "via resolving model interferences. Another approach is merging models with different ar-", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "chitectures. For instance, Ainsworth et al. (2023) configures weights of different models", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 104, + 719, + 506, + 735 + ], + "spans": [ + { + "bbox": [ + 104, + 719, + 506, + 735 + ], + "score": 1.0, + "content": "prior to their merger. Following this objective, Stoica et al. (2023) merges models operating", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 41 + } + ], + "page_idx": 7, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 12, + "width": 9 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 505, + 127 + ], + "lines": [], + "index": 1.5, + "bbox_fs": [ + 105, + 82, + 506, + 127 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 119, + 139, + 385, + 152 + ], + "lines": [ + { + "bbox": [ + 118, + 137, + 388, + 153 + ], + "spans": [ + { + "bbox": [ + 118, + 137, + 388, + 153 + ], + "score": 1.0, + "content": "Can LoraHub work well on non-instruction-tuning models?", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4, + "bbox_fs": [ + 118, + 137, + 388, + 153 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 164, + 505, + 231 + ], + "lines": [ + { + "bbox": [ + 105, + 164, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 164, + 506, + 177 + ], + "score": 1.0, + "content": "In previous investigations, we primarily focused on models with zero-shot capabilities that", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 175, + 505, + 188 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 505, + 188 + ], + "score": 1.0, + "content": "were trained with instruction tuning. However, for models like T5 without zero-shot abili-", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 186, + 505, + 199 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 505, + 199 + ], + "score": 1.0, + "content": "ties, where training has a larger effect on parameters, it was unclear if LoraHub could still", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 196, + 505, + 212 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 505, + 212 + ], + "score": 1.0, + "content": "effectively manage and improve them. Our experiments show that although these mod-", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 208, + 505, + 221 + ], + "spans": [ + { + "bbox": [ + 106, + 208, + 505, + 221 + ], + "score": 1.0, + "content": "els perform worse than FLAN-T5, LoraHub learning can still enable them to effectively", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 220, + 370, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 370, + 232 + ], + "score": 1.0, + "content": "generlize to unseen tasks. See Appendix C for more details.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 7.5, + "bbox_fs": [ + 105, + 164, + 506, + 232 + ] + }, + { + "type": "text", + "bbox": [ + 114, + 244, + 465, + 257 + ], + "lines": [ + { + "bbox": [ + 115, + 241, + 467, + 258 + ], + "spans": [ + { + "bbox": [ + 115, + 241, + 467, + 258 + ], + "score": 1.0, + "content": "Will the rank of LoRA modules impact the performance of LoraHub learning?", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11, + "bbox_fs": [ + 115, + 241, + 467, + 258 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 268, + 505, + 357 + ], + "lines": [ + { + "bbox": [ + 106, + 268, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 106, + 268, + 505, + 282 + ], + "score": 1.0, + "content": "The parameter rank plays a crucial role in the LoRA framework, directly influencing the", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 277, + 506, + 294 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 506, + 294 + ], + "score": 1.0, + "content": "number of trainable parameters utilized during LoRA tuning. This prompts an intriguing", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 290, + 505, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 505, + 303 + ], + "score": 1.0, + "content": "question: does the variation in rank values influence the outcomes observed within the Lo-", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 302, + 505, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 302, + 505, + 314 + ], + "score": 1.0, + "content": "raHub learning? Our analysis indicates that, for FLAN-T5, the choice of rank has minimal", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 312, + 505, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 505, + 325 + ], + "score": 1.0, + "content": "impact. However, for T5, it still exerts some influence. Empirical findings reveal that, in", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 323, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 506, + 338 + ], + "score": 1.0, + "content": "comparison to rank values of 4 or 64, a rank value of 16 consistently demonstrates superior", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 334, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 506, + 348 + ], + "score": 1.0, + "content": "performance across different runs, both in terms of average and optimal values. Additional", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 346, + 266, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 266, + 358 + ], + "score": 1.0, + "content": "results are available in Appendix C.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 15.5, + "bbox_fs": [ + 104, + 268, + 506, + 358 + ] + }, + { + "type": "text", + "bbox": [ + 122, + 370, + 334, + 381 + ], + "lines": [ + { + "bbox": [ + 121, + 369, + 335, + 382 + ], + "spans": [ + { + "bbox": [ + 121, + 369, + 335, + 382 + ], + "score": 1.0, + "content": "Does more LoRA modules lead to better results?", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20, + "bbox_fs": [ + 121, + 369, + 335, + 382 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 392, + 505, + 459 + ], + "lines": [ + { + "bbox": [ + 104, + 389, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 104, + 389, + 506, + 407 + ], + "score": 1.0, + "content": "In our main experiments, we randomly selected 20 LoRA modules for LoraHub learning.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 402, + 505, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 505, + 416 + ], + "score": 1.0, + "content": "Therefore, we conducted experiments to investigate the effect of using different numbers", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 413, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 506, + 427 + ], + "score": 1.0, + "content": "of LoRA modules. The results demonstrate that as we increased the number of LoRA", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 425, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 505, + 438 + ], + "score": 1.0, + "content": "modules, the variance in performance increased. However, the maximum achievable per-", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 436, + 505, + 448 + ], + "spans": [ + { + "bbox": [ + 106, + 436, + 505, + 448 + ], + "score": 1.0, + "content": "formance also improved. More analysis on the variance and the detailed results can be", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 447, + 207, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 447, + 207, + 460 + ], + "score": 1.0, + "content": "found in Appendix H.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 23.5, + "bbox_fs": [ + 104, + 389, + 506, + 460 + ] + }, + { + "type": "text", + "bbox": [ + 120, + 471, + 341, + 484 + ], + "lines": [ + { + "bbox": [ + 119, + 470, + 342, + 484 + ], + "spans": [ + { + "bbox": [ + 119, + 470, + 342, + 484 + ], + "score": 1.0, + "content": "How much computational resource can be saved?", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27, + "bbox_fs": [ + 119, + 470, + 342, + 484 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 496, + 505, + 552 + ], + "lines": [ + { + "bbox": [ + 105, + 495, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 506, + 510 + ], + "score": 1.0, + "content": "We follow to the memory test settings from the LoRA-FA (Zhang et al., 2023b) study for", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 505, + 506, + 522 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 506, + 522 + ], + "score": 1.0, + "content": "an accurate benchmark. In this context, full fine-tuning required about 40GB of memory,", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 518, + 505, + 531 + ], + "spans": [ + { + "bbox": [ + 106, + 518, + 505, + 531 + ], + "score": 1.0, + "content": "whereas LoRA fine-tuning used around 34GB. Remarkably, LoraHub only utilized about", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 527, + 506, + 543 + ], + "spans": [ + { + "bbox": [ + 105, + 527, + 506, + 543 + ], + "score": 1.0, + "content": "5GB of memory, illustrating its efficiency due to the inference-only mode, which eliminates", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 539, + 347, + 554 + ], + "spans": [ + { + "bbox": [ + 106, + 539, + 347, + 554 + ], + "score": 1.0, + "content": "the need for storing gradients and optimization states.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 30, + "bbox_fs": [ + 105, + 495, + 506, + 554 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 568, + 199, + 581 + ], + "lines": [ + { + "bbox": [ + 104, + 567, + 200, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 567, + 200, + 582 + ], + "score": 1.0, + "content": "6 Related work", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 33 + }, + { + "type": "text", + "bbox": [ + 107, + 594, + 505, + 627 + ], + "lines": [ + { + "bbox": [ + 105, + 592, + 505, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 505, + 607 + ], + "score": 1.0, + "content": "Model Merging Our method substantially draws on the concept of LoRA module com-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 604, + 505, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 505, + 618 + ], + "score": 1.0, + "content": "position, and thus, aligns with the significant thread of research in model merging. This", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 614, + 498, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 614, + 498, + 631 + ], + "score": 1.0, + "content": "research focus is broadly categorized based on the ultimate objectives of model merging.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 35, + "bbox_fs": [ + 105, + 592, + 505, + 631 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 632, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 632, + 505, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 632, + 505, + 646 + ], + "score": 1.0, + "content": "The first category focuses on merging entire models, and the goal is to combine individ-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 643, + 505, + 657 + ], + "spans": [ + { + "bbox": [ + 106, + 643, + 505, + 657 + ], + "score": 1.0, + "content": "ually trained models to approximate the performance benefits of model ensembling or", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 654, + 506, + 667 + ], + "spans": [ + { + "bbox": [ + 106, + 654, + 506, + 667 + ], + "score": 1.0, + "content": "multi-task learning. Prior works (Matena & Raffel, 2021; Jin et al., 2023; Yadav et al., 2023;", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 664, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 664, + 506, + 679 + ], + "score": 1.0, + "content": "Wu et al., 2023a) operated under the assumption of shared model architectures. For ex-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 675, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 675, + 506, + 691 + ], + "score": 1.0, + "content": "ample, Matena & Raffel (2021) amalgamates models by approximating Gaussian posterior", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 686, + 505, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 505, + 700 + ], + "score": 1.0, + "content": "distributions garnered from Fisher information, while Yadav et al. (2023) merges models", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 506, + 712 + ], + "score": 1.0, + "content": "via resolving model interferences. Another approach is merging models with different ar-", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "chitectures. For instance, Ainsworth et al. (2023) configures weights of different models", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 104, + 719, + 506, + 735 + ], + "spans": [ + { + "bbox": [ + 104, + 719, + 506, + 735 + ], + "score": 1.0, + "content": "prior to their merger. Following this objective, Stoica et al. (2023) merges models operating", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 81, + 505, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 97 + ], + "score": 1.0, + "content": "on varying tasks by identifying common features, without requiring additional training.", + "type": "text", + "cross_page": true + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 92, + 505, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 92, + 505, + 107 + ], + "score": 1.0, + "content": "Unlike these works, our work focuses on merging models for better cross-task generaliza-", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 104, + 130, + 117 + ], + "spans": [ + { + "bbox": [ + 106, + 104, + 130, + 117 + ], + "score": 1.0, + "content": "tion.", + "type": "text", + "cross_page": true + } + ], + "index": 2 + } + ], + "index": 41, + "bbox_fs": [ + 104, + 632, + 506, + 735 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 108, + 82, + 503, + 116 + ], + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 97 + ], + "score": 1.0, + "content": "on varying tasks by identifying common features, without requiring additional training.", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 92, + 505, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 92, + 505, + 107 + ], + "score": 1.0, + "content": "Unlike these works, our work focuses on merging models for better cross-task generaliza-", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 104, + 130, + 117 + ], + "spans": [ + { + "bbox": [ + 106, + 104, + 130, + 117 + ], + "score": 1.0, + "content": "tion.", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1 + }, + { + "type": "text", + "bbox": [ + 107, + 122, + 505, + 308 + ], + "lines": [ + { + "bbox": [ + 106, + 121, + 505, + 134 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 505, + 134 + ], + "score": 1.0, + "content": "The second category most closely aligns with our research, stemming from a shared mo-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 131, + 504, + 144 + ], + "spans": [ + { + "bbox": [ + 106, + 131, + 504, + 144 + ], + "score": 1.0, + "content": "tivation of module composition. Various scholars have made advances in this line of re-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 142, + 506, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 506, + 157 + ], + "score": 1.0, + "content": "search: Kingetsu et al. (2021) decomposes and recomposes modules on the basis of their", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 153, + 507, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 153, + 507, + 168 + ], + "score": 1.0, + "content": "functionality; Ilharco et al. (2023) proposes modulating model behavior using task vectors;", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 165, + 505, + 179 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 505, + 179 + ], + "score": 1.0, + "content": "Lv et al. (2023) amalgamates parameter-efficient modules weighted according to task simi-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 175, + 506, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 506, + 190 + ], + "score": 1.0, + "content": "larity; Zhang et al. (2023a) crafts modules by employing specific arithmetic operations; Sun", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 186, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 506, + 201 + ], + "score": 1.0, + "content": "et al. (2023) improves few-shot performance of unseen tasks by multi-task pre-training of", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 198, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 506, + 211 + ], + "score": 1.0, + "content": "prompts; Chronopoulou et al. (2023) averages adapter weights intended for transfer; Ponti", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 208, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 208, + 506, + 222 + ], + "score": 1.0, + "content": "et al. (2023) focuses on jointly learning adapters and a routing function that allocates skills", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 219, + 506, + 234 + ], + "spans": [ + { + "bbox": [ + 105, + 219, + 506, + 234 + ], + "score": 1.0, + "content": "to each task; and Muqeeth et al. (2023) concentrates on amalgamating experts in mixture of", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 230, + 505, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 230, + 505, + 244 + ], + "score": 1.0, + "content": "experts models; However, these methods generally necessitate multi-task training or hu-", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 241, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 506, + 255 + ], + "score": 1.0, + "content": "man prior on module selection for the downstream task. In contrast, our method does not", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 252, + 505, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 505, + 266 + ], + "score": 1.0, + "content": "impose any special training requirements and simply employs vanilla LoRA tuning. Addi-", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 264, + 505, + 277 + ], + "spans": [ + { + "bbox": [ + 106, + 264, + 505, + 277 + ], + "score": 1.0, + "content": "tionally, the module selection for downstream tasks is entirely data-driven without human", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 273, + 506, + 288 + ], + "spans": [ + { + "bbox": [ + 105, + 273, + 506, + 288 + ], + "score": 1.0, + "content": "prior knowledge. This design gives the advantage of easily adding new LoRA modules for", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 285, + 505, + 299 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 505, + 299 + ], + "score": 1.0, + "content": "reuse, allowing our method to flexibly scale up the number of LoRA module candidates in", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 297, + 155, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 297, + 155, + 308 + ], + "score": 1.0, + "content": "the future.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 11 + }, + { + "type": "text", + "bbox": [ + 107, + 339, + 505, + 525 + ], + "lines": [ + { + "bbox": [ + 105, + 338, + 505, + 352 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 505, + 352 + ], + "score": 1.0, + "content": "Mixture of Experts The Mixture of Experts (MoE) is an ensemble method, often visual-", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 348, + 505, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 505, + 363 + ], + "score": 1.0, + "content": "ized as a collection of sub-modules, or β€œexperts”, each specializing in processing different", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 360, + 506, + 375 + ], + "spans": [ + { + "bbox": [ + 104, + 360, + 506, + 375 + ], + "score": 1.0, + "content": "types of input data. Each expert in this system is controlled by a unique gating network,", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 370, + 505, + 386 + ], + "spans": [ + { + "bbox": [ + 105, + 370, + 505, + 386 + ], + "score": 1.0, + "content": "activated based on the distinct nature of the input data. For every token in these input se-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 383, + 505, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 383, + 505, + 397 + ], + "score": 1.0, + "content": "quences, this network identifies and engages the most suitable experts to process the data.", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 393, + 505, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 393, + 505, + 407 + ], + "score": 1.0, + "content": "As a result, the performance is superior compared to relying on a single, generic model for", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 404, + 506, + 419 + ], + "spans": [ + { + "bbox": [ + 105, + 404, + 506, + 419 + ], + "score": 1.0, + "content": "all types of input. This technique has proven instrumental in numerous domains, such as", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 415, + 506, + 429 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 506, + 429 + ], + "score": 1.0, + "content": "natural language processing and computer vision (Jacobs et al., 1991; Shazeer et al., 2017;", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 425, + 506, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 506, + 441 + ], + "score": 1.0, + "content": "Du et al., 2022; Zhang et al., 2022; Wang et al., 2022; crumb, 2023). Our methodology dis-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 437, + 505, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 505, + 451 + ], + "score": 1.0, + "content": "plays similarities to MoE, wherein upstream-trained LoRA modules can be aligned with", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 446, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 105, + 446, + 506, + 463 + ], + "score": 1.0, + "content": "MoE’s expert design. A noteworthy distinguishing factor is that our approach mechanism", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 458, + 506, + 474 + ], + "spans": [ + { + "bbox": [ + 105, + 458, + 506, + 474 + ], + "score": 1.0, + "content": "does not require any specialized manipulation of LoRAs during training while facilitat-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 470, + 505, + 484 + ], + "spans": [ + { + "bbox": [ + 105, + 470, + 505, + 484 + ], + "score": 1.0, + "content": "ing dynamic LoRA module assembly at any scale, each pre-tuned to different tasks. In", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 481, + 505, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 505, + 494 + ], + "score": 1.0, + "content": "contrast, MoE mandates a predetermined count of experts during both the training and", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 491, + 506, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 506, + 507 + ], + "score": 1.0, + "content": "testing phases. Recent studies on the interrelation between MoE and instruction tuning", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 104, + 502, + 506, + 517 + ], + "spans": [ + { + "bbox": [ + 104, + 502, + 506, + 517 + ], + "score": 1.0, + "content": "have demonstrated that the simultaneous application of both approaches enhances the ef-", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 514, + 328, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 328, + 527 + ], + "score": 1.0, + "content": "fectiveness of each individually (Shen et al., 2023).", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 28 + }, + { + "type": "text", + "bbox": [ + 107, + 556, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 555, + 505, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 505, + 569 + ], + "score": 1.0, + "content": "Cross-Task generalization Recent advancements like CrossFit (Ye et al., 2021),", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 566, + 505, + 580 + ], + "spans": [ + { + "bbox": [ + 105, + 566, + 505, + 580 + ], + "score": 1.0, + "content": "ExT5 (Aribandi et al., 2022), FLAN (Wei et al., 2022), T0 (Sanh et al., 2022), Instruct-", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 104, + 577, + 506, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 506, + 592 + ], + "score": 1.0, + "content": "GPT (Ouyang et al., 2022), and ReCross (Lin et al., 2022) have been striving to foster a", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 589, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 589, + 505, + 601 + ], + "score": 1.0, + "content": "vastly multi-task model’s generalization across different tasks, very much aligned with the", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 599, + 505, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 505, + 613 + ], + "score": 1.0, + "content": "objectives of our research. Among this cohort, the connections of CrossFit and ReCross", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 611, + 505, + 623 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 505, + 623 + ], + "score": 1.0, + "content": "with LoraHub are particularly noteworthy. The CrossFit framework (Ye et al., 2021) man-", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 104, + 619, + 505, + 636 + ], + "spans": [ + { + "bbox": [ + 104, + 619, + 505, + 636 + ], + "score": 1.0, + "content": "dates a minimal number of labeled examples of the target task for few-shot fine-tuning.", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 632, + 505, + 646 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 505, + 646 + ], + "score": 1.0, + "content": "However, its limitation lies in the application of task names as hard prefixes in templates,", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 104, + 643, + 505, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 505, + 658 + ], + "score": 1.0, + "content": "posing challenges in the task’s generalization. On the other hand, while ReCross mitigates", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 654, + 505, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 505, + 669 + ], + "score": 1.0, + "content": "the need for labels in few-shot examples for retrieval, it necessitates a fine-tuning process", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 665, + 505, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 505, + 680 + ], + "score": 1.0, + "content": "using the retrieved data. This procedure appears time-consuming when compared to Lo-", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 104, + 675, + 505, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 675, + 505, + 691 + ], + "score": 1.0, + "content": "raHub’s approach. Through the deployment of few-shot labeled examples and a gradient-", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "score": 1.0, + "content": "free optimization process, LoraHub facilitates an iterative update of weights to compose", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 697, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 506, + 712 + ], + "score": 1.0, + "content": "the LoRA modules. The resultant method is more efficient and cost-effective relative to", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 709, + 505, + 724 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 724 + ], + "score": 1.0, + "content": "previous work. Overall, LoraHub offers a more practical and viable solution to the opti-", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 106, + 722, + 186, + 733 + ], + "spans": [ + { + "bbox": [ + 106, + 722, + 186, + 733 + ], + "score": 1.0, + "content": "mization process.", + "type": "text" + } + ], + "index": 52 + } + ], + "index": 44.5 + } + ], + "page_idx": 8, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 759 + ], + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 762 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 762 + ], + "score": 1.0, + "content": "9", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 108, + 82, + 503, + 116 + ], + "lines": [], + "index": 1, + "bbox_fs": [ + 105, + 81, + 505, + 117 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 107, + 122, + 505, + 308 + ], + "lines": [ + { + "bbox": [ + 106, + 121, + 505, + 134 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 505, + 134 + ], + "score": 1.0, + "content": "The second category most closely aligns with our research, stemming from a shared mo-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 131, + 504, + 144 + ], + "spans": [ + { + "bbox": [ + 106, + 131, + 504, + 144 + ], + "score": 1.0, + "content": "tivation of module composition. Various scholars have made advances in this line of re-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 142, + 506, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 506, + 157 + ], + "score": 1.0, + "content": "search: Kingetsu et al. (2021) decomposes and recomposes modules on the basis of their", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 153, + 507, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 153, + 507, + 168 + ], + "score": 1.0, + "content": "functionality; Ilharco et al. (2023) proposes modulating model behavior using task vectors;", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 165, + 505, + 179 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 505, + 179 + ], + "score": 1.0, + "content": "Lv et al. (2023) amalgamates parameter-efficient modules weighted according to task simi-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 175, + 506, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 506, + 190 + ], + "score": 1.0, + "content": "larity; Zhang et al. (2023a) crafts modules by employing specific arithmetic operations; Sun", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 186, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 506, + 201 + ], + "score": 1.0, + "content": "et al. (2023) improves few-shot performance of unseen tasks by multi-task pre-training of", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 198, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 506, + 211 + ], + "score": 1.0, + "content": "prompts; Chronopoulou et al. (2023) averages adapter weights intended for transfer; Ponti", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 208, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 208, + 506, + 222 + ], + "score": 1.0, + "content": "et al. (2023) focuses on jointly learning adapters and a routing function that allocates skills", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 219, + 506, + 234 + ], + "spans": [ + { + "bbox": [ + 105, + 219, + 506, + 234 + ], + "score": 1.0, + "content": "to each task; and Muqeeth et al. (2023) concentrates on amalgamating experts in mixture of", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 230, + 505, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 230, + 505, + 244 + ], + "score": 1.0, + "content": "experts models; However, these methods generally necessitate multi-task training or hu-", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 241, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 506, + 255 + ], + "score": 1.0, + "content": "man prior on module selection for the downstream task. In contrast, our method does not", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 252, + 505, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 505, + 266 + ], + "score": 1.0, + "content": "impose any special training requirements and simply employs vanilla LoRA tuning. Addi-", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 264, + 505, + 277 + ], + "spans": [ + { + "bbox": [ + 106, + 264, + 505, + 277 + ], + "score": 1.0, + "content": "tionally, the module selection for downstream tasks is entirely data-driven without human", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 273, + 506, + 288 + ], + "spans": [ + { + "bbox": [ + 105, + 273, + 506, + 288 + ], + "score": 1.0, + "content": "prior knowledge. This design gives the advantage of easily adding new LoRA modules for", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 285, + 505, + 299 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 505, + 299 + ], + "score": 1.0, + "content": "reuse, allowing our method to flexibly scale up the number of LoRA module candidates in", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 297, + 155, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 297, + 155, + 308 + ], + "score": 1.0, + "content": "the future.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 11, + "bbox_fs": [ + 104, + 121, + 507, + 308 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 339, + 505, + 525 + ], + "lines": [ + { + "bbox": [ + 105, + 338, + 505, + 352 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 505, + 352 + ], + "score": 1.0, + "content": "Mixture of Experts The Mixture of Experts (MoE) is an ensemble method, often visual-", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 348, + 505, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 505, + 363 + ], + "score": 1.0, + "content": "ized as a collection of sub-modules, or β€œexperts”, each specializing in processing different", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 360, + 506, + 375 + ], + "spans": [ + { + "bbox": [ + 104, + 360, + 506, + 375 + ], + "score": 1.0, + "content": "types of input data. Each expert in this system is controlled by a unique gating network,", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 370, + 505, + 386 + ], + "spans": [ + { + "bbox": [ + 105, + 370, + 505, + 386 + ], + "score": 1.0, + "content": "activated based on the distinct nature of the input data. For every token in these input se-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 383, + 505, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 383, + 505, + 397 + ], + "score": 1.0, + "content": "quences, this network identifies and engages the most suitable experts to process the data.", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 393, + 505, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 393, + 505, + 407 + ], + "score": 1.0, + "content": "As a result, the performance is superior compared to relying on a single, generic model for", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 404, + 506, + 419 + ], + "spans": [ + { + "bbox": [ + 105, + 404, + 506, + 419 + ], + "score": 1.0, + "content": "all types of input. This technique has proven instrumental in numerous domains, such as", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 415, + 506, + 429 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 506, + 429 + ], + "score": 1.0, + "content": "natural language processing and computer vision (Jacobs et al., 1991; Shazeer et al., 2017;", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 425, + 506, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 506, + 441 + ], + "score": 1.0, + "content": "Du et al., 2022; Zhang et al., 2022; Wang et al., 2022; crumb, 2023). Our methodology dis-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 437, + 505, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 505, + 451 + ], + "score": 1.0, + "content": "plays similarities to MoE, wherein upstream-trained LoRA modules can be aligned with", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 446, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 105, + 446, + 506, + 463 + ], + "score": 1.0, + "content": "MoE’s expert design. A noteworthy distinguishing factor is that our approach mechanism", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 458, + 506, + 474 + ], + "spans": [ + { + "bbox": [ + 105, + 458, + 506, + 474 + ], + "score": 1.0, + "content": "does not require any specialized manipulation of LoRAs during training while facilitat-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 470, + 505, + 484 + ], + "spans": [ + { + "bbox": [ + 105, + 470, + 505, + 484 + ], + "score": 1.0, + "content": "ing dynamic LoRA module assembly at any scale, each pre-tuned to different tasks. In", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 481, + 505, + 494 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 505, + 494 + ], + "score": 1.0, + "content": "contrast, MoE mandates a predetermined count of experts during both the training and", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 491, + 506, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 506, + 507 + ], + "score": 1.0, + "content": "testing phases. Recent studies on the interrelation between MoE and instruction tuning", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 104, + 502, + 506, + 517 + ], + "spans": [ + { + "bbox": [ + 104, + 502, + 506, + 517 + ], + "score": 1.0, + "content": "have demonstrated that the simultaneous application of both approaches enhances the ef-", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 514, + 328, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 328, + 527 + ], + "score": 1.0, + "content": "fectiveness of each individually (Shen et al., 2023).", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 28, + "bbox_fs": [ + 104, + 338, + 506, + 527 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 556, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 555, + 505, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 505, + 569 + ], + "score": 1.0, + "content": "Cross-Task generalization Recent advancements like CrossFit (Ye et al., 2021),", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 566, + 505, + 580 + ], + "spans": [ + { + "bbox": [ + 105, + 566, + 505, + 580 + ], + "score": 1.0, + "content": "ExT5 (Aribandi et al., 2022), FLAN (Wei et al., 2022), T0 (Sanh et al., 2022), Instruct-", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 104, + 577, + 506, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 506, + 592 + ], + "score": 1.0, + "content": "GPT (Ouyang et al., 2022), and ReCross (Lin et al., 2022) have been striving to foster a", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 589, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 589, + 505, + 601 + ], + "score": 1.0, + "content": "vastly multi-task model’s generalization across different tasks, very much aligned with the", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 599, + 505, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 505, + 613 + ], + "score": 1.0, + "content": "objectives of our research. Among this cohort, the connections of CrossFit and ReCross", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 611, + 505, + 623 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 505, + 623 + ], + "score": 1.0, + "content": "with LoraHub are particularly noteworthy. The CrossFit framework (Ye et al., 2021) man-", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 104, + 619, + 505, + 636 + ], + "spans": [ + { + "bbox": [ + 104, + 619, + 505, + 636 + ], + "score": 1.0, + "content": "dates a minimal number of labeled examples of the target task for few-shot fine-tuning.", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 632, + 505, + 646 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 505, + 646 + ], + "score": 1.0, + "content": "However, its limitation lies in the application of task names as hard prefixes in templates,", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 104, + 643, + 505, + 658 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 505, + 658 + ], + "score": 1.0, + "content": "posing challenges in the task’s generalization. On the other hand, while ReCross mitigates", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 654, + 505, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 505, + 669 + ], + "score": 1.0, + "content": "the need for labels in few-shot examples for retrieval, it necessitates a fine-tuning process", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 665, + 505, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 665, + 505, + 680 + ], + "score": 1.0, + "content": "using the retrieved data. This procedure appears time-consuming when compared to Lo-", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 104, + 675, + 505, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 675, + 505, + 691 + ], + "score": 1.0, + "content": "raHub’s approach. Through the deployment of few-shot labeled examples and a gradient-", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "score": 1.0, + "content": "free optimization process, LoraHub facilitates an iterative update of weights to compose", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 697, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 506, + 712 + ], + "score": 1.0, + "content": "the LoRA modules. The resultant method is more efficient and cost-effective relative to", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 709, + 505, + 724 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 724 + ], + "score": 1.0, + "content": "previous work. Overall, LoraHub offers a more practical and viable solution to the opti-", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 106, + 722, + 186, + 733 + ], + "spans": [ + { + "bbox": [ + 106, + 722, + 186, + 733 + ], + "score": 1.0, + "content": "mization process.", + "type": "text" + } + ], + "index": 52 + } + ], + "index": 44.5, + "bbox_fs": [ + 104, + 555, + 506, + 733 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 80, + 188, + 94 + ], + "lines": [ + { + "bbox": [ + 104, + 78, + 190, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 78, + 190, + 97 + ], + "score": 1.0, + "content": "7 Conclusion", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 107, + 505, + 218 + ], + "lines": [ + { + "bbox": [ + 105, + 106, + 505, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 505, + 120 + ], + "score": 1.0, + "content": "In this work, we have introduced LoraHub, a strategic framework for composing LoRA", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 118, + 505, + 130 + ], + "spans": [ + { + "bbox": [ + 105, + 118, + 505, + 130 + ], + "score": 1.0, + "content": "modules trained on diverse tasks in order to achieve adaptable performance on new tasks.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 129, + 505, + 142 + ], + "spans": [ + { + "bbox": [ + 106, + 129, + 505, + 142 + ], + "score": 1.0, + "content": "Our approach enables the fluid combination of multiple LoRA modules using just a few", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 140, + 505, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 505, + 153 + ], + "score": 1.0, + "content": "examples from a novel task, without requiring additional model parameters or human", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 151, + 506, + 164 + ], + "spans": [ + { + "bbox": [ + 105, + 151, + 506, + 164 + ], + "score": 1.0, + "content": "expertise. The empirical results on the BBH benchmark demonstrate that LoraHub can", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 161, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 161, + 506, + 177 + ], + "score": 1.0, + "content": "effectively match the performance of in-context learning in few-shot scenarios, removing", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 172, + 506, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 506, + 187 + ], + "score": 1.0, + "content": "the need for in-context examples during inference. Overall, our work shows the promise", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 182, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 200 + ], + "score": 1.0, + "content": "of strategic LoRA composability for rapidly adapting LLMs to diverse tasks. By fostering", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 195, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 195, + 505, + 208 + ], + "score": 1.0, + "content": "reuse and combination of LoRA modules, we can work towards more general and adapt-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 205, + 301, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 301, + 220 + ], + "score": 1.0, + "content": "able LLMs while minimizing training costs.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 5.5 + }, + { + "type": "title", + "bbox": [ + 108, + 235, + 253, + 250 + ], + "lines": [ + { + "bbox": [ + 105, + 234, + 255, + 254 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 255, + 254 + ], + "score": 1.0, + "content": "Reproducibility Statement", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11 + }, + { + "type": "text", + "bbox": [ + 107, + 262, + 505, + 351 + ], + "lines": [ + { + "bbox": [ + 105, + 261, + 505, + 276 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 505, + 276 + ], + "score": 1.0, + "content": "The authors have made great efforts to ensure the reproducibility of the empirical results", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 273, + 505, + 287 + ], + "spans": [ + { + "bbox": [ + 105, + 273, + 505, + 287 + ], + "score": 1.0, + "content": "reported in this paper. Firstly, the experiment settings, evaluation metrics, and datasets", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 284, + 505, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 505, + 298 + ], + "score": 1.0, + "content": "were described in detail in Section 4.1. Secondly, the codes and script for reproduce the", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 294, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 294, + 506, + 310 + ], + "score": 1.0, + "content": "result will be opensource after accepted. Second, the source code implementing the pro-", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 306, + 506, + 320 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 506, + 320 + ], + "score": 1.0, + "content": "posed method and experiments will be made publicly available at upon acceptance of the", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 317, + 506, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 317, + 506, + 331 + ], + "score": 1.0, + "content": "paper. Third, pre-trained LoRA modules from this work along with their configuration", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 328, + 505, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 328, + 505, + 342 + ], + "score": 1.0, + "content": "files and weights will be shared. These allow reproduction without retraining the LoRA", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 339, + 324, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 324, + 353 + ], + "score": 1.0, + "content": "modules, enabling quick testing and verification.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 15.5 + }, + { + "type": "title", + "bbox": [ + 107, + 369, + 167, + 382 + ], + "lines": [ + { + "bbox": [ + 105, + 368, + 169, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 368, + 169, + 384 + ], + "score": 1.0, + "content": "References", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20 + }, + { + "type": "text", + "bbox": [ + 102, + 387, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 387, + 505, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 387, + 505, + 405 + ], + "score": 1.0, + "content": "Samuel Ainsworth, Jonathan Hayase, and Siddhartha Srinivasa. Git re-basin: Merging", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 116, + 401, + 505, + 414 + ], + "spans": [ + { + "bbox": [ + 116, + 401, + 505, + 414 + ], + "score": 1.0, + "content": "models modulo permutation symmetries. In The Eleventh International Conference on", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 116, + 412, + 250, + 425 + ], + "spans": [ + { + "bbox": [ + 116, + 412, + 250, + 425 + ], + "score": 1.0, + "content": "Learning Representations, 2023.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 433, + 505, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 433, + 505, + 448 + ], + "score": 1.0, + "content": "Shengnan An, Yifei Li, Zeqi Lin, Qian Liu, Bei Chen, Qiang Fu, Weizhu Chen, Nanning", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 115, + 444, + 505, + 460 + ], + "spans": [ + { + "bbox": [ + 115, + 444, + 505, + 460 + ], + "score": 1.0, + "content": "Zheng, and Jian-Guang Lou. Input-tuning: Adapting unfamiliar inputs to frozen pre-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 116, + 456, + 280, + 469 + ], + "spans": [ + { + "bbox": [ + 116, + 456, + 280, + 469 + ], + "score": 1.0, + "content": "trained models. ArXiv preprint, 2022.", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 477, + 505, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 477, + 505, + 492 + ], + "score": 1.0, + "content": "Vamsi Aribandi, Yi Tay, Tal Schuster, Jinfeng Rao, Huaixiu Steven Zheng, Sanket Vaibhav", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 116, + 489, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 116, + 489, + 505, + 502 + ], + "score": 1.0, + "content": "Mehta, Honglei Zhuang, Vinh Q. Tran, Dara Bahri, Jianmo Ni, Jai Prakash Gupta, Kai", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 500, + 505, + 514 + ], + "spans": [ + { + "bbox": [ + 115, + 500, + 505, + 514 + ], + "score": 1.0, + "content": "Hui, Sebastian Ruder, and Donald Metzler. Ext5: Towards extreme multi-task scaling for", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 116, + 511, + 292, + 524 + ], + "spans": [ + { + "bbox": [ + 116, + 511, + 292, + 524 + ], + "score": 1.0, + "content": "transfer learning. In Proc. of ICLR, 2022.", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 533, + 505, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 505, + 547 + ], + "score": 1.0, + "content": "Stephen Bach, Victor Sanh, Zheng Xin Yong, Albert Webson, Colin Raffel, Nihal V. Nayak,", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 544, + 505, + 558 + ], + "spans": [ + { + "bbox": [ + 115, + 544, + 505, + 558 + ], + "score": 1.0, + "content": "Abheesht Sharma, Taewoon Kim, M Saiful Bari, Thibault Fevry, Zaid Alyafeai, Manan", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 115, + 555, + 505, + 569 + ], + "spans": [ + { + "bbox": [ + 115, + 555, + 505, + 569 + ], + "score": 1.0, + "content": "Dey, Andrea Santilli, Zhiqing Sun, Srulik Ben-david, Canwen Xu, Gunjan Chhablani,", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 115, + 566, + 505, + 580 + ], + "spans": [ + { + "bbox": [ + 115, + 566, + 505, + 580 + ], + "score": 1.0, + "content": "Han Wang, Jason Fries, Maged Al-shaibani, Shanya Sharma, Urmish Thakker, Khalid", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 116, + 577, + 505, + 591 + ], + "spans": [ + { + "bbox": [ + 116, + 577, + 505, + 591 + ], + "score": 1.0, + "content": "Almubarak, Xiangru Tang, Dragomir Radev, Mike Tian-jian Jiang, and Alexander Rush.", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 587, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 114, + 587, + 506, + 604 + ], + "score": 1.0, + "content": "PromptSource: An integrated development environment and repository for natural lan-", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 600, + 284, + 613 + ], + "spans": [ + { + "bbox": [ + 115, + 600, + 284, + 613 + ], + "score": 1.0, + "content": "guage prompts. In Proc. of ACL, 2022.", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 620, + 505, + 635 + ], + "spans": [ + { + "bbox": [ + 106, + 620, + 505, + 635 + ], + "score": 1.0, + "content": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 116, + 633, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 116, + 633, + 505, + 645 + ], + "score": 1.0, + "content": "Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 115, + 643, + 505, + 657 + ], + "spans": [ + { + "bbox": [ + 115, + 643, + 505, + 657 + ], + "score": 1.0, + "content": "Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 115, + 654, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 115, + 654, + 505, + 668 + ], + "score": 1.0, + "content": "Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen,", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 664, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 115, + 664, + 505, + 679 + ], + "score": 1.0, + "content": "Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner,", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 115, + 676, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 115, + 676, + 505, + 690 + ], + "score": 1.0, + "content": "Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 686, + 505, + 700 + ], + "spans": [ + { + "bbox": [ + 115, + 686, + 505, + 700 + ], + "score": 1.0, + "content": "are few-shot learners. In Hugo Larochelle, Marc’Aurelio Ranzato, Raia Hadsell, Maria-", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 698, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 115, + 698, + 505, + 712 + ], + "score": 1.0, + "content": "Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Sys-", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 115, + 709, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 115, + 709, + 505, + 723 + ], + "score": 1.0, + "content": "tems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020,", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 116, + 720, + 264, + 733 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 264, + 733 + ], + "score": 1.0, + "content": "December 6-12, 2020, virtual, 2020.", + "type": "text" + } + ], + "index": 47 + } + ], + "index": 34 + } + ], + "page_idx": 9, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 15, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 80, + 188, + 94 + ], + "lines": [ + { + "bbox": [ + 104, + 78, + 190, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 78, + 190, + 97 + ], + "score": 1.0, + "content": "7 Conclusion", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 107, + 505, + 218 + ], + "lines": [ + { + "bbox": [ + 105, + 106, + 505, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 505, + 120 + ], + "score": 1.0, + "content": "In this work, we have introduced LoraHub, a strategic framework for composing LoRA", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 118, + 505, + 130 + ], + "spans": [ + { + "bbox": [ + 105, + 118, + 505, + 130 + ], + "score": 1.0, + "content": "modules trained on diverse tasks in order to achieve adaptable performance on new tasks.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 129, + 505, + 142 + ], + "spans": [ + { + "bbox": [ + 106, + 129, + 505, + 142 + ], + "score": 1.0, + "content": "Our approach enables the fluid combination of multiple LoRA modules using just a few", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 140, + 505, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 505, + 153 + ], + "score": 1.0, + "content": "examples from a novel task, without requiring additional model parameters or human", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 151, + 506, + 164 + ], + "spans": [ + { + "bbox": [ + 105, + 151, + 506, + 164 + ], + "score": 1.0, + "content": "expertise. The empirical results on the BBH benchmark demonstrate that LoraHub can", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 161, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 161, + 506, + 177 + ], + "score": 1.0, + "content": "effectively match the performance of in-context learning in few-shot scenarios, removing", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 172, + 506, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 506, + 187 + ], + "score": 1.0, + "content": "the need for in-context examples during inference. Overall, our work shows the promise", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 182, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 200 + ], + "score": 1.0, + "content": "of strategic LoRA composability for rapidly adapting LLMs to diverse tasks. By fostering", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 195, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 195, + 505, + 208 + ], + "score": 1.0, + "content": "reuse and combination of LoRA modules, we can work towards more general and adapt-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 205, + 301, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 301, + 220 + ], + "score": 1.0, + "content": "able LLMs while minimizing training costs.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 5.5, + "bbox_fs": [ + 104, + 106, + 506, + 220 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 235, + 253, + 250 + ], + "lines": [ + { + "bbox": [ + 105, + 234, + 255, + 254 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 255, + 254 + ], + "score": 1.0, + "content": "Reproducibility Statement", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 11 + }, + { + "type": "text", + "bbox": [ + 107, + 262, + 505, + 351 + ], + "lines": [ + { + "bbox": [ + 105, + 261, + 505, + 276 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 505, + 276 + ], + "score": 1.0, + "content": "The authors have made great efforts to ensure the reproducibility of the empirical results", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 273, + 505, + 287 + ], + "spans": [ + { + "bbox": [ + 105, + 273, + 505, + 287 + ], + "score": 1.0, + "content": "reported in this paper. Firstly, the experiment settings, evaluation metrics, and datasets", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 284, + 505, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 505, + 298 + ], + "score": 1.0, + "content": "were described in detail in Section 4.1. Secondly, the codes and script for reproduce the", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 294, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 294, + 506, + 310 + ], + "score": 1.0, + "content": "result will be opensource after accepted. Second, the source code implementing the pro-", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 306, + 506, + 320 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 506, + 320 + ], + "score": 1.0, + "content": "posed method and experiments will be made publicly available at upon acceptance of the", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 317, + 506, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 317, + 506, + 331 + ], + "score": 1.0, + "content": "paper. Third, pre-trained LoRA modules from this work along with their configuration", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 328, + 505, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 328, + 505, + 342 + ], + "score": 1.0, + "content": "files and weights will be shared. These allow reproduction without retraining the LoRA", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 339, + 324, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 339, + 324, + 353 + ], + "score": 1.0, + "content": "modules, enabling quick testing and verification.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 15.5, + "bbox_fs": [ + 104, + 261, + 506, + 353 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 369, + 167, + 382 + ], + "lines": [ + { + "bbox": [ + 105, + 368, + 169, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 368, + 169, + 384 + ], + "score": 1.0, + "content": "References", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20 + }, + { + "type": "list", + "bbox": [ + 102, + 387, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 387, + 505, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 387, + 505, + 405 + ], + "score": 1.0, + "content": "Samuel Ainsworth, Jonathan Hayase, and Siddhartha Srinivasa. Git re-basin: Merging", + "type": "text" + } + ], + "index": 21, + "is_list_start_line": true + }, + { + "bbox": [ + 116, + 401, + 505, + 414 + ], + "spans": [ + { + "bbox": [ + 116, + 401, + 505, + 414 + ], + "score": 1.0, + "content": "models modulo permutation symmetries. In The Eleventh International Conference on", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 116, + 412, + 250, + 425 + ], + "spans": [ + { + "bbox": [ + 116, + 412, + 250, + 425 + ], + "score": 1.0, + "content": "Learning Representations, 2023.", + "type": "text" + } + ], + "index": 23, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 433, + 505, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 433, + 505, + 448 + ], + "score": 1.0, + "content": "Shengnan An, Yifei Li, Zeqi Lin, Qian Liu, Bei Chen, Qiang Fu, Weizhu Chen, Nanning", + "type": "text" + } + ], + "index": 24, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 444, + 505, + 460 + ], + "spans": [ + { + "bbox": [ + 115, + 444, + 505, + 460 + ], + "score": 1.0, + "content": "Zheng, and Jian-Guang Lou. Input-tuning: Adapting unfamiliar inputs to frozen pre-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 116, + 456, + 280, + 469 + ], + "spans": [ + { + "bbox": [ + 116, + 456, + 280, + 469 + ], + "score": 1.0, + "content": "trained models. ArXiv preprint, 2022.", + "type": "text" + } + ], + "index": 26, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 477, + 505, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 477, + 505, + 492 + ], + "score": 1.0, + "content": "Vamsi Aribandi, Yi Tay, Tal Schuster, Jinfeng Rao, Huaixiu Steven Zheng, Sanket Vaibhav", + "type": "text" + } + ], + "index": 27, + "is_list_start_line": true + }, + { + "bbox": [ + 116, + 489, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 116, + 489, + 505, + 502 + ], + "score": 1.0, + "content": "Mehta, Honglei Zhuang, Vinh Q. Tran, Dara Bahri, Jianmo Ni, Jai Prakash Gupta, Kai", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 500, + 505, + 514 + ], + "spans": [ + { + "bbox": [ + 115, + 500, + 505, + 514 + ], + "score": 1.0, + "content": "Hui, Sebastian Ruder, and Donald Metzler. Ext5: Towards extreme multi-task scaling for", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 116, + 511, + 292, + 524 + ], + "spans": [ + { + "bbox": [ + 116, + 511, + 292, + 524 + ], + "score": 1.0, + "content": "transfer learning. In Proc. of ICLR, 2022.", + "type": "text" + } + ], + "index": 30, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 533, + 505, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 505, + 547 + ], + "score": 1.0, + "content": "Stephen Bach, Victor Sanh, Zheng Xin Yong, Albert Webson, Colin Raffel, Nihal V. Nayak,", + "type": "text" + } + ], + "index": 31, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 544, + 505, + 558 + ], + "spans": [ + { + "bbox": [ + 115, + 544, + 505, + 558 + ], + "score": 1.0, + "content": "Abheesht Sharma, Taewoon Kim, M Saiful Bari, Thibault Fevry, Zaid Alyafeai, Manan", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 115, + 555, + 505, + 569 + ], + "spans": [ + { + "bbox": [ + 115, + 555, + 505, + 569 + ], + "score": 1.0, + "content": "Dey, Andrea Santilli, Zhiqing Sun, Srulik Ben-david, Canwen Xu, Gunjan Chhablani,", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 115, + 566, + 505, + 580 + ], + "spans": [ + { + "bbox": [ + 115, + 566, + 505, + 580 + ], + "score": 1.0, + "content": "Han Wang, Jason Fries, Maged Al-shaibani, Shanya Sharma, Urmish Thakker, Khalid", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 116, + 577, + 505, + 591 + ], + "spans": [ + { + "bbox": [ + 116, + 577, + 505, + 591 + ], + "score": 1.0, + "content": "Almubarak, Xiangru Tang, Dragomir Radev, Mike Tian-jian Jiang, and Alexander Rush.", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 587, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 114, + 587, + 506, + 604 + ], + "score": 1.0, + "content": "PromptSource: An integrated development environment and repository for natural lan-", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 600, + 284, + 613 + ], + "spans": [ + { + "bbox": [ + 115, + 600, + 284, + 613 + ], + "score": 1.0, + "content": "guage prompts. In Proc. of ACL, 2022.", + "type": "text" + } + ], + "index": 37, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 620, + 505, + 635 + ], + "spans": [ + { + "bbox": [ + 106, + 620, + 505, + 635 + ], + "score": 1.0, + "content": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla", + "type": "text" + } + ], + "index": 38, + "is_list_start_line": true + }, + { + "bbox": [ + 116, + 633, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 116, + 633, + 505, + 645 + ], + "score": 1.0, + "content": "Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 115, + 643, + 505, + 657 + ], + "spans": [ + { + "bbox": [ + 115, + 643, + 505, + 657 + ], + "score": 1.0, + "content": "Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 115, + 654, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 115, + 654, + 505, + 668 + ], + "score": 1.0, + "content": "Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen,", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 664, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 115, + 664, + 505, + 679 + ], + "score": 1.0, + "content": "Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner,", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 115, + 676, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 115, + 676, + 505, + 690 + ], + "score": 1.0, + "content": "Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 686, + 505, + 700 + ], + "spans": [ + { + "bbox": [ + 115, + 686, + 505, + 700 + ], + "score": 1.0, + "content": "are few-shot learners. In Hugo Larochelle, Marc’Aurelio Ranzato, Raia Hadsell, Maria-", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 698, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 115, + 698, + 505, + 712 + ], + "score": 1.0, + "content": "Florina Balcan, and Hsuan-Tien Lin (eds.), Advances in Neural Information Processing Sys-", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 115, + 709, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 115, + 709, + 505, + 723 + ], + "score": 1.0, + "content": "tems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020,", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 116, + 720, + 264, + 733 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 264, + 733 + ], + "score": 1.0, + "content": "December 6-12, 2020, virtual, 2020.", + "type": "text" + } + ], + "index": 47, + "is_list_end_line": true + } + ], + "index": 34, + "bbox_fs": [ + 105, + 387, + 506, + 733 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 504, + 116 + ], + "lines": [ + { + "bbox": [ + 105, + 79, + 506, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 506, + 97 + ], + "score": 1.0, + "content": "Alexis Chevalier, Alexander Wettig, Anirudh Ajith, and Danqi Chen. Adapting language", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 92, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 92, + 506, + 106 + ], + "score": 1.0, + "content": "models to compress contexts. CoRR, abs/2305.14788, 2023. doi: 10.48550/ARXIV.2305.", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 116, + 104, + 380, + 117 + ], + "spans": [ + { + "bbox": [ + 116, + 104, + 380, + 117 + ], + "score": 1.0, + "content": "14788. URL https://doi.org/10.48550/arXiv.2305.14788.", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1 + }, + { + "type": "text", + "bbox": [ + 109, + 123, + 502, + 157 + ], + "lines": [ + { + "bbox": [ + 106, + 121, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 504, + 137 + ], + "score": 1.0, + "content": "Alexandra Chronopoulou, Matthew Peters, Alexander Fraser, and Jesse Dodge. Adapter-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 133, + 505, + 147 + ], + "spans": [ + { + "bbox": [ + 115, + 133, + 505, + 147 + ], + "score": 1.0, + "content": "Soup: Weight averaging to improve generalization of pretrained language models. In", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 142, + 434, + 159 + ], + "spans": [ + { + "bbox": [ + 115, + 142, + 434, + 159 + ], + "score": 1.0, + "content": "Findings of the Association for Computational Linguistics: EACL 2023, 2023.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4 + }, + { + "type": "text", + "bbox": [ + 100, + 143, + 507, + 742 + ], + "lines": [ + { + "bbox": [ + 106, + 163, + 505, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 163, + 505, + 177 + ], + "score": 1.0, + "content": "Hyung Won Chung, Le Hou, S. Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 114, + 172, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 114, + 172, + 506, + 189 + ], + "score": 1.0, + "content": "Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 185, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 115, + 185, + 506, + 200 + ], + "score": 1.0, + "content": "Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Dasha Valter,", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 196, + 505, + 210 + ], + "spans": [ + { + "bbox": [ + 115, + 196, + 505, + 210 + ], + "score": 1.0, + "content": "Sharan Narang, Gaurav Mishra, Adams Wei Yu, Vincent Zhao, Yanping Huang, An-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 208, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 115, + 208, + 505, + 220 + ], + "score": 1.0, + "content": "drew M. Dai, Hongkun Yu, Slav Petrov, Ed Huai hsin Chi, Jeff Dean, Jacob Devlin, Adam", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 216, + 506, + 234 + ], + "spans": [ + { + "bbox": [ + 113, + 216, + 506, + 234 + ], + "score": 1.0, + "content": "Roberts, Denny Zhou, Quoc V. Le, and Jason Wei. Scaling instruction-finetuned language", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 228, + 245, + 243 + ], + "spans": [ + { + "bbox": [ + 115, + 228, + 245, + 243 + ], + "score": 1.0, + "content": "models. ArXiv preprint, 2022.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 245, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 141, + 261 + ], + "score": 1.0, + "content": "crumb.", + "type": "text" + }, + { + "bbox": [ + 177, + 245, + 222, + 262 + ], + "score": 1.0, + "content": "Llama-2,", + "type": "text" + }, + { + "bbox": [ + 232, + 245, + 331, + 262 + ], + "score": 1.0, + "content": "mixutre of lora.", + "type": "text" + }, + { + "bbox": [ + 367, + 248, + 504, + 261 + ], + "score": 1.0, + "content": "https://crumbly.medium.com/", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 116, + 259, + 282, + 272 + ], + "spans": [ + { + "bbox": [ + 116, + 259, + 282, + 272 + ], + "score": 1.0, + "content": "llama-2-molora-f5f909434711, 2023.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 274, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 506, + 293 + ], + "score": 1.0, + "content": "Nan Du, Yanping Huang, Andrew M. Dai, Simon Tong, Dmitry Lepikhin, Yuanzhong", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 288, + 506, + 302 + ], + "spans": [ + { + "bbox": [ + 115, + 288, + 506, + 302 + ], + "score": 1.0, + "content": "Xu, Maxim Krikun, Yanqi Zhou, Adams Wei Yu, Orhan Firat, Barret Zoph, Liam Fe-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 299, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 115, + 299, + 506, + 313 + ], + "score": 1.0, + "content": "dus, Maarten P. Bosma, Zongwei Zhou, Tao Wang, Yu Emma Wang, Kellie Webster,", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 310, + 506, + 323 + ], + "spans": [ + { + "bbox": [ + 115, + 310, + 506, + 323 + ], + "score": 1.0, + "content": "Marie Pellat, Kevin Robinson, Kathleen S. Meier-Hellstern, Toju Duke, Lucas Dixon, Kun", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 113, + 319, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 113, + 319, + 506, + 336 + ], + "score": 1.0, + "content": "Zhang, Quoc V. Le, Yonghui Wu, Zhifeng Chen, and Claire Cui. Glam: Efficient scaling", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 331, + 506, + 347 + ], + "spans": [ + { + "bbox": [ + 114, + 331, + 506, + 347 + ], + "score": 1.0, + "content": "of language models with mixture-of-experts. In Kamalika Chaudhuri, Stefanie Jegelka,", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 343, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 115, + 343, + 506, + 357 + ], + "score": 1.0, + "content": "Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Β΄ International Conference", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 113, + 352, + 506, + 370 + ], + "spans": [ + { + "bbox": [ + 113, + 352, + 506, + 370 + ], + "score": 1.0, + "content": "on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, Proceedings", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 365, + 279, + 379 + ], + "spans": [ + { + "bbox": [ + 114, + 365, + 279, + 379 + ], + "score": 1.0, + "content": "of Machine Learning Research, 2022.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 384, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 106, + 384, + 506, + 397 + ], + "score": 1.0, + "content": "Tao Ge, Jing Hu, Xun Wang, Si-Qing Chen, and Furu Wei. In-context autoencoder for", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 394, + 506, + 408 + ], + "spans": [ + { + "bbox": [ + 114, + 394, + 506, + 408 + ], + "score": 1.0, + "content": "context compression in a large language model. CoRR, abs/2307.06945, 2023. doi: 10.", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 116, + 406, + 465, + 419 + ], + "spans": [ + { + "bbox": [ + 116, + 406, + 465, + 419 + ], + "score": 1.0, + "content": "48550/ARXIV.2307.06945. URL https://doi.org/10.48550/arXiv.2307.06945.", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 425, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 425, + 504, + 437 + ], + "score": 1.0, + "content": "Aryo Pradipta Gema, Luke Daines, Pasquale Minervini, and Beatrice Alex. Parameter-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 436, + 441, + 448 + ], + "spans": [ + { + "bbox": [ + 115, + 436, + 441, + 448 + ], + "score": 1.0, + "content": "efficient fine-tuning of llama for the clinical domain. ArXiv preprint, 2023.", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 104, + 452, + 505, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 452, + 505, + 468 + ], + "score": 1.0, + "content": "Nikolaus Hansen and Andreas Ostermeier. Adapting arbitrary normal mutation distri-", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 115, + 464, + 505, + 479 + ], + "spans": [ + { + "bbox": [ + 115, + 464, + 505, + 479 + ], + "score": 1.0, + "content": "butions in evolution strategies: the covariance matrix adaptation. Proceedings of IEEE", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 115, + 475, + 370, + 489 + ], + "spans": [ + { + "bbox": [ + 115, + 475, + 370, + 489 + ], + "score": 1.0, + "content": "International Conference on Evolutionary Computation, 1996.", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 103, + 493, + 505, + 510 + ], + "spans": [ + { + "bbox": [ + 103, + 493, + 505, + 510 + ], + "score": 1.0, + "content": "Junxian He, Chunting Zhou, Xuezhe Ma, Taylor Berg-Kirkpatrick, and Graham Neubig.", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 115, + 505, + 496, + 520 + ], + "spans": [ + { + "bbox": [ + 115, + 505, + 496, + 520 + ], + "score": 1.0, + "content": "Towards a unified view of parameter-efficient transfer learning. In Proc. of ICLR, 2022.", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 520, + 507, + 541 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 507, + 541 + ], + "score": 1.0, + "content": "Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang,", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 115, + 534, + 506, + 550 + ], + "spans": [ + { + "bbox": [ + 115, + 534, + 506, + 550 + ], + "score": 1.0, + "content": "Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. In", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 115, + 545, + 200, + 559 + ], + "spans": [ + { + "bbox": [ + 115, + 545, + 200, + 559 + ], + "score": 1.0, + "content": "Proc. of ICLR, 2022.", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 564, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 564, + 505, + 578 + ], + "score": 1.0, + "content": "Gabriel Ilharco, Marco Tulio Ribeiro, Mitchell Wortsman, Ludwig Schmidt, Hannaneh Ha-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 116, + 576, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 116, + 576, + 504, + 590 + ], + "score": 1.0, + "content": "jishirzi, and Ali Farhadi. Editing models with task arithmetic. In The Eleventh Interna-", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 116, + 587, + 334, + 599 + ], + "spans": [ + { + "bbox": [ + 116, + 587, + 334, + 599 + ], + "score": 1.0, + "content": "tional Conference on Learning Representations, 2023.", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 104, + 603, + 506, + 621 + ], + "spans": [ + { + "bbox": [ + 104, + 603, + 506, + 621 + ], + "score": 1.0, + "content": "Robert A. Jacobs, Michael I. Jordan, Steven J. Nowlan, and Geoffrey E. Hinton. Adaptive", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 115, + 616, + 344, + 631 + ], + "spans": [ + { + "bbox": [ + 115, + 616, + 344, + 631 + ], + "score": 1.0, + "content": "mixtures of local experts. Neural Computation, 1991.", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 635, + 506, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 506, + 650 + ], + "score": 1.0, + "content": "Joel Jang, Seungone Kim, Seonghyeon Ye, Doyoung Kim, Lajanugen Logeswaran, Moontae", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 645, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 114, + 645, + 506, + 662 + ], + "score": 1.0, + "content": "Lee, Kyungjae Lee, and Minjoon Seo. Exploring the benefits of training expert language", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 657, + 505, + 672 + ], + "spans": [ + { + "bbox": [ + 115, + 657, + 505, + 672 + ], + "score": 1.0, + "content": "models over instruction tuning. In International Conference on Machine Learning, 2023.", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 667, + 394, + 683 + ], + "spans": [ + { + "bbox": [ + 115, + 667, + 394, + 683 + ], + "score": 1.0, + "content": "URL https://api.semanticscholar.org/CorpusID:256627673.", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 104, + 685, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 685, + 506, + 702 + ], + "score": 1.0, + "content": "Huiqiang Jiang, Qianhui Wu, Chin-Yew Lin, Yuqing Yang, and Lili Qiu. Llmlingua: Com-", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 115, + 698, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 115, + 698, + 506, + 713 + ], + "score": 1.0, + "content": "pressing prompts for accelerated inference of large language models. In Proceedings of", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 115, + 709, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 115, + 709, + 506, + 723 + ], + "score": 1.0, + "content": "the 2023 Conference on Empirical Methods in Natural Language Processing. Association for", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 115, + 720, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 504, + 734 + ], + "score": 1.0, + "content": "Computational Linguistics, December 2023a. URL https://arxiv.org/abs/2310.05736.", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 27.5 + } + ], + "page_idx": 10, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 310, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 764 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 14, + "width": 13 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 504, + 116 + ], + "lines": [ + { + "bbox": [ + 105, + 79, + 506, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 506, + 97 + ], + "score": 1.0, + "content": "Alexis Chevalier, Alexander Wettig, Anirudh Ajith, and Danqi Chen. Adapting language", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 92, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 92, + 506, + 106 + ], + "score": 1.0, + "content": "models to compress contexts. CoRR, abs/2305.14788, 2023. doi: 10.48550/ARXIV.2305.", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 116, + 104, + 380, + 117 + ], + "spans": [ + { + "bbox": [ + 116, + 104, + 380, + 117 + ], + "score": 1.0, + "content": "14788. URL https://doi.org/10.48550/arXiv.2305.14788.", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 1, + "bbox_fs": [ + 105, + 79, + 506, + 117 + ] + }, + { + "type": "text", + "bbox": [ + 109, + 123, + 502, + 157 + ], + "lines": [ + { + "bbox": [ + 106, + 121, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 504, + 137 + ], + "score": 1.0, + "content": "Alexandra Chronopoulou, Matthew Peters, Alexander Fraser, and Jesse Dodge. Adapter-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 133, + 505, + 147 + ], + "spans": [ + { + "bbox": [ + 115, + 133, + 505, + 147 + ], + "score": 1.0, + "content": "Soup: Weight averaging to improve generalization of pretrained language models. In", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 142, + 434, + 159 + ], + "spans": [ + { + "bbox": [ + 115, + 142, + 434, + 159 + ], + "score": 1.0, + "content": "Findings of the Association for Computational Linguistics: EACL 2023, 2023.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4, + "bbox_fs": [ + 106, + 121, + 505, + 159 + ] + }, + { + "type": "list", + "bbox": [ + 100, + 143, + 507, + 742 + ], + "lines": [ + { + "bbox": [ + 106, + 163, + 505, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 163, + 505, + 177 + ], + "score": 1.0, + "content": "Hyung Won Chung, Le Hou, S. Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li,", + "type": "text" + } + ], + "index": 6, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 172, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 114, + 172, + 506, + 189 + ], + "score": 1.0, + "content": "Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 185, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 115, + 185, + 506, + 200 + ], + "score": 1.0, + "content": "Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Dasha Valter,", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 196, + 505, + 210 + ], + "spans": [ + { + "bbox": [ + 115, + 196, + 505, + 210 + ], + "score": 1.0, + "content": "Sharan Narang, Gaurav Mishra, Adams Wei Yu, Vincent Zhao, Yanping Huang, An-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 208, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 115, + 208, + 505, + 220 + ], + "score": 1.0, + "content": "drew M. Dai, Hongkun Yu, Slav Petrov, Ed Huai hsin Chi, Jeff Dean, Jacob Devlin, Adam", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 113, + 216, + 506, + 234 + ], + "spans": [ + { + "bbox": [ + 113, + 216, + 506, + 234 + ], + "score": 1.0, + "content": "Roberts, Denny Zhou, Quoc V. Le, and Jason Wei. Scaling instruction-finetuned language", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 228, + 245, + 243 + ], + "spans": [ + { + "bbox": [ + 115, + 228, + 245, + 243 + ], + "score": 1.0, + "content": "models. ArXiv preprint, 2022.", + "type": "text" + } + ], + "index": 12, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 245, + 504, + 262 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 141, + 261 + ], + "score": 1.0, + "content": "crumb.", + "type": "text" + }, + { + "bbox": [ + 177, + 245, + 222, + 262 + ], + "score": 1.0, + "content": "Llama-2,", + "type": "text" + }, + { + "bbox": [ + 232, + 245, + 331, + 262 + ], + "score": 1.0, + "content": "mixutre of lora.", + "type": "text" + }, + { + "bbox": [ + 367, + 248, + 504, + 261 + ], + "score": 1.0, + "content": "https://crumbly.medium.com/", + "type": "text" + } + ], + "index": 13, + "is_list_start_line": true + }, + { + "bbox": [ + 116, + 259, + 282, + 272 + ], + "spans": [ + { + "bbox": [ + 116, + 259, + 282, + 272 + ], + "score": 1.0, + "content": "llama-2-molora-f5f909434711, 2023.", + "type": "text" + } + ], + "index": 14, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 274, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 506, + 293 + ], + "score": 1.0, + "content": "Nan Du, Yanping Huang, Andrew M. Dai, Simon Tong, Dmitry Lepikhin, Yuanzhong", + "type": "text" + } + ], + "index": 15, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 288, + 506, + 302 + ], + "spans": [ + { + "bbox": [ + 115, + 288, + 506, + 302 + ], + "score": 1.0, + "content": "Xu, Maxim Krikun, Yanqi Zhou, Adams Wei Yu, Orhan Firat, Barret Zoph, Liam Fe-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 299, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 115, + 299, + 506, + 313 + ], + "score": 1.0, + "content": "dus, Maarten P. Bosma, Zongwei Zhou, Tao Wang, Yu Emma Wang, Kellie Webster,", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 310, + 506, + 323 + ], + "spans": [ + { + "bbox": [ + 115, + 310, + 506, + 323 + ], + "score": 1.0, + "content": "Marie Pellat, Kevin Robinson, Kathleen S. Meier-Hellstern, Toju Duke, Lucas Dixon, Kun", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 113, + 319, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 113, + 319, + 506, + 336 + ], + "score": 1.0, + "content": "Zhang, Quoc V. Le, Yonghui Wu, Zhifeng Chen, and Claire Cui. Glam: Efficient scaling", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 331, + 506, + 347 + ], + "spans": [ + { + "bbox": [ + 114, + 331, + 506, + 347 + ], + "score": 1.0, + "content": "of language models with mixture-of-experts. In Kamalika Chaudhuri, Stefanie Jegelka,", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 343, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 115, + 343, + 506, + 357 + ], + "score": 1.0, + "content": "Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Β΄ International Conference", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 113, + 352, + 506, + 370 + ], + "spans": [ + { + "bbox": [ + 113, + 352, + 506, + 370 + ], + "score": 1.0, + "content": "on Machine Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, Proceedings", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 365, + 279, + 379 + ], + "spans": [ + { + "bbox": [ + 114, + 365, + 279, + 379 + ], + "score": 1.0, + "content": "of Machine Learning Research, 2022.", + "type": "text" + } + ], + "index": 23, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 384, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 106, + 384, + 506, + 397 + ], + "score": 1.0, + "content": "Tao Ge, Jing Hu, Xun Wang, Si-Qing Chen, and Furu Wei. In-context autoencoder for", + "type": "text" + } + ], + "index": 24, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 394, + 506, + 408 + ], + "spans": [ + { + "bbox": [ + 114, + 394, + 506, + 408 + ], + "score": 1.0, + "content": "context compression in a large language model. CoRR, abs/2307.06945, 2023. doi: 10.", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 116, + 406, + 465, + 419 + ], + "spans": [ + { + "bbox": [ + 116, + 406, + 465, + 419 + ], + "score": 1.0, + "content": "48550/ARXIV.2307.06945. URL https://doi.org/10.48550/arXiv.2307.06945.", + "type": "text" + } + ], + "index": 26, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 425, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 425, + 504, + 437 + ], + "score": 1.0, + "content": "Aryo Pradipta Gema, Luke Daines, Pasquale Minervini, and Beatrice Alex. Parameter-", + "type": "text" + } + ], + "index": 27, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 436, + 441, + 448 + ], + "spans": [ + { + "bbox": [ + 115, + 436, + 441, + 448 + ], + "score": 1.0, + "content": "efficient fine-tuning of llama for the clinical domain. ArXiv preprint, 2023.", + "type": "text" + } + ], + "index": 28, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 452, + 505, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 452, + 505, + 468 + ], + "score": 1.0, + "content": "Nikolaus Hansen and Andreas Ostermeier. Adapting arbitrary normal mutation distri-", + "type": "text" + } + ], + "index": 29, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 464, + 505, + 479 + ], + "spans": [ + { + "bbox": [ + 115, + 464, + 505, + 479 + ], + "score": 1.0, + "content": "butions in evolution strategies: the covariance matrix adaptation. Proceedings of IEEE", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 115, + 475, + 370, + 489 + ], + "spans": [ + { + "bbox": [ + 115, + 475, + 370, + 489 + ], + "score": 1.0, + "content": "International Conference on Evolutionary Computation, 1996.", + "type": "text" + } + ], + "index": 31, + "is_list_end_line": true + }, + { + "bbox": [ + 103, + 493, + 505, + 510 + ], + "spans": [ + { + "bbox": [ + 103, + 493, + 505, + 510 + ], + "score": 1.0, + "content": "Junxian He, Chunting Zhou, Xuezhe Ma, Taylor Berg-Kirkpatrick, and Graham Neubig.", + "type": "text" + } + ], + "index": 32, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 505, + 496, + 520 + ], + "spans": [ + { + "bbox": [ + 115, + 505, + 496, + 520 + ], + "score": 1.0, + "content": "Towards a unified view of parameter-efficient transfer learning. In Proc. of ICLR, 2022.", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 520, + 507, + 541 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 507, + 541 + ], + "score": 1.0, + "content": "Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang,", + "type": "text" + } + ], + "index": 34, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 534, + 506, + 550 + ], + "spans": [ + { + "bbox": [ + 115, + 534, + 506, + 550 + ], + "score": 1.0, + "content": "Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. In", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 115, + 545, + 200, + 559 + ], + "spans": [ + { + "bbox": [ + 115, + 545, + 200, + 559 + ], + "score": 1.0, + "content": "Proc. of ICLR, 2022.", + "type": "text" + } + ], + "index": 36, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 564, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 564, + 505, + 578 + ], + "score": 1.0, + "content": "Gabriel Ilharco, Marco Tulio Ribeiro, Mitchell Wortsman, Ludwig Schmidt, Hannaneh Ha-", + "type": "text" + } + ], + "index": 37, + "is_list_start_line": true + }, + { + "bbox": [ + 116, + 576, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 116, + 576, + 504, + 590 + ], + "score": 1.0, + "content": "jishirzi, and Ali Farhadi. Editing models with task arithmetic. In The Eleventh Interna-", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 116, + 587, + 334, + 599 + ], + "spans": [ + { + "bbox": [ + 116, + 587, + 334, + 599 + ], + "score": 1.0, + "content": "tional Conference on Learning Representations, 2023.", + "type": "text" + } + ], + "index": 39, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 603, + 506, + 621 + ], + "spans": [ + { + "bbox": [ + 104, + 603, + 506, + 621 + ], + "score": 1.0, + "content": "Robert A. Jacobs, Michael I. Jordan, Steven J. Nowlan, and Geoffrey E. Hinton. Adaptive", + "type": "text" + } + ], + "index": 40, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 616, + 344, + 631 + ], + "spans": [ + { + "bbox": [ + 115, + 616, + 344, + 631 + ], + "score": 1.0, + "content": "mixtures of local experts. Neural Computation, 1991.", + "type": "text" + } + ], + "index": 41, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 635, + 506, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 506, + 650 + ], + "score": 1.0, + "content": "Joel Jang, Seungone Kim, Seonghyeon Ye, Doyoung Kim, Lajanugen Logeswaran, Moontae", + "type": "text" + } + ], + "index": 42, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 645, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 114, + 645, + 506, + 662 + ], + "score": 1.0, + "content": "Lee, Kyungjae Lee, and Minjoon Seo. Exploring the benefits of training expert language", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 657, + 505, + 672 + ], + "spans": [ + { + "bbox": [ + 115, + 657, + 505, + 672 + ], + "score": 1.0, + "content": "models over instruction tuning. In International Conference on Machine Learning, 2023.", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 667, + 394, + 683 + ], + "spans": [ + { + "bbox": [ + 115, + 667, + 394, + 683 + ], + "score": 1.0, + "content": "URL https://api.semanticscholar.org/CorpusID:256627673.", + "type": "text" + } + ], + "index": 45, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 685, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 685, + 506, + 702 + ], + "score": 1.0, + "content": "Huiqiang Jiang, Qianhui Wu, Chin-Yew Lin, Yuqing Yang, and Lili Qiu. Llmlingua: Com-", + "type": "text" + } + ], + "index": 46, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 698, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 115, + 698, + 506, + 713 + ], + "score": 1.0, + "content": "pressing prompts for accelerated inference of large language models. In Proceedings of", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 115, + 709, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 115, + 709, + 506, + 723 + ], + "score": 1.0, + "content": "the 2023 Conference on Empirical Methods in Natural Language Processing. Association for", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 115, + 720, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 504, + 734 + ], + "score": 1.0, + "content": "Computational Linguistics, December 2023a. URL https://arxiv.org/abs/2310.05736.", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 27.5, + "bbox_fs": [ + 103, + 163, + 507, + 734 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 505, + 127 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 505, + 95 + ], + "score": 1.0, + "content": "Huiqiang Jiang, Qianhui Wu, Xufang Luo, Dongsheng Li, Chin-Yew Lin, Yuqing Yang, and", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 93, + 506, + 107 + ], + "spans": [ + { + "bbox": [ + 115, + 93, + 506, + 107 + ], + "score": 1.0, + "content": "Lili Qiu. Longllmlingua: Accelerating and enhancing llms in long context scenarios via", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 103, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 115, + 103, + 505, + 118 + ], + "score": 1.0, + "content": "prompt compression. CoRR, abs/2310.06839, 2023b. doi: 10.48550/ARXIV.2310.06839.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 113, + 349, + 129 + ], + "spans": [ + { + "bbox": [ + 115, + 113, + 349, + 129 + ], + "score": 1.0, + "content": "URL https://doi.org/10.48550/arXiv.2310.06839.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 1.5 + }, + { + "type": "text", + "bbox": [ + 102, + 95, + 506, + 732 + ], + "lines": [ + { + "bbox": [ + 104, + 132, + 505, + 147 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 505, + 147 + ], + "score": 1.0, + "content": "Xisen Jin, Xiang Ren, Daniel Preotiuc-Pietro, and Pengxiang Cheng. Dataless knowledge", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 144, + 505, + 159 + ], + "spans": [ + { + "bbox": [ + 115, + 144, + 505, + 159 + ], + "score": 1.0, + "content": "fusion by merging weights of language models. In The Eleventh International Conference", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 155, + 262, + 169 + ], + "spans": [ + { + "bbox": [ + 115, + 155, + 262, + 169 + ], + "score": 1.0, + "content": "on Learning Representations, 2023.", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 174, + 504, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 174, + 504, + 187 + ], + "score": 1.0, + "content": "Hiroaki Kingetsu, Kenichi Kobayashi, and Taiji Suzuki. Neural network module decom-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 114, + 185, + 336, + 199 + ], + "spans": [ + { + "bbox": [ + 114, + 185, + 336, + 199 + ], + "score": 1.0, + "content": "position and recomposition. ArXiv preprint, 2021.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 202, + 506, + 218 + ], + "spans": [ + { + "bbox": [ + 104, + 202, + 506, + 218 + ], + "score": 1.0, + "content": "Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 214, + 298, + 229 + ], + "spans": [ + { + "bbox": [ + 114, + 214, + 298, + 229 + ], + "score": 1.0, + "content": "prompt tuning. In Proc. of EMNLP, 2021.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 231, + 506, + 247 + ], + "spans": [ + { + "bbox": [ + 104, + 231, + 506, + 247 + ], + "score": 1.0, + "content": "Yucheng Li, Bo Dong, Chenghua Lin, and Frank Guerin. Compressing context to enhance", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 244, + 505, + 259 + ], + "spans": [ + { + "bbox": [ + 114, + 244, + 505, + 259 + ], + "score": 1.0, + "content": "inference efficiency of large language models. In Proceedings of the 2023 Conference on Em-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 254, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 114, + 254, + 506, + 270 + ], + "score": 1.0, + "content": "pirical Methods in Natural Language Processing. Association for Computational Linguistics,", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 266, + 376, + 280 + ], + "spans": [ + { + "bbox": [ + 115, + 266, + 376, + 280 + ], + "score": 1.0, + "content": "December 2023. URL https://arxiv.org/abs/2310.06201.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 284, + 505, + 299 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 505, + 299 + ], + "score": 1.0, + "content": "Bill Yuchen Lin, Kangmin Tan, Chris Miller, Beiwen Tian, and Xiang Ren. Unsupervised", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 297, + 428, + 308 + ], + "spans": [ + { + "bbox": [ + 115, + 297, + 428, + 308 + ], + "score": 1.0, + "content": "cross-task generalization via retrieval augmentation. In NeurIPS, 2022.", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 312, + 505, + 329 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 505, + 329 + ], + "score": 1.0, + "content": "Haokun Liu, Derek Tam, Mohammed Muqeeth, Jay Mohta, Tenghao Huang, Mohit Bansal,", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 325, + 505, + 340 + ], + "spans": [ + { + "bbox": [ + 114, + 325, + 505, + 340 + ], + "score": 1.0, + "content": "and Colin Raffel. Few-shot parameter-efficient fine-tuning is better and cheaper than in-", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 115, + 336, + 504, + 351 + ], + "spans": [ + { + "bbox": [ + 115, + 336, + 504, + 351 + ], + "score": 1.0, + "content": "context learning. ArXiv, abs/2205.05638, 2022. URL https://api.semanticscholar.org/", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 116, + 348, + 211, + 360 + ], + "spans": [ + { + "bbox": [ + 116, + 348, + 211, + 360 + ], + "score": 1.0, + "content": "CorpusID:248693283.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 366, + 505, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 505, + 379 + ], + "score": 1.0, + "content": "Jialin Liu, A. Moreau, Mike Preuss, Baptiste Roziere, J ` erΒ΄ emy Rapin, Fabien Teytaud, and Β΄", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 116, + 377, + 505, + 390 + ], + "spans": [ + { + "bbox": [ + 116, + 377, + 505, + 390 + ], + "score": 1.0, + "content": "Olivier Teytaud. Versatile black-box optimization. Proceedings of the 2020 Genetic and", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 116, + 388, + 301, + 402 + ], + "spans": [ + { + "bbox": [ + 116, + 388, + 301, + 402 + ], + "score": 1.0, + "content": "Evolutionary Computation Conference, 2020.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 405, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 104, + 405, + 505, + 420 + ], + "score": 1.0, + "content": "Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou,", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 114, + 415, + 507, + 434 + ], + "spans": [ + { + "bbox": [ + 114, + 415, + 507, + 434 + ], + "score": 1.0, + "content": "Quoc V. Le, Barret Zoph, Jason Wei, and Adam Roberts. The flan collection: Designing", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 115, + 428, + 364, + 443 + ], + "spans": [ + { + "bbox": [ + 115, + 428, + 364, + 443 + ], + "score": 1.0, + "content": "data and methods for effective instruction tuning, 2023.", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 447, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 105, + 447, + 505, + 461 + ], + "score": 1.0, + "content": "Xingtai Lv, Ning Ding, Yujia Qin, Zhiyuan Liu, and Maosong Sun. Parameter-efficient", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 457, + 506, + 473 + ], + "spans": [ + { + "bbox": [ + 115, + 457, + 506, + 473 + ], + "score": 1.0, + "content": "weight ensembling facilitates task-level knowledge transfer. In Annual Meeting of the", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 469, + 316, + 482 + ], + "spans": [ + { + "bbox": [ + 115, + 469, + 316, + 482 + ], + "score": 1.0, + "content": "Association for Computational Linguistics, 2023.", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 104, + 487, + 505, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 505, + 501 + ], + "score": 1.0, + "content": "Sourab Mangrulkar, Sylvain Gugger, Lysandre Debut, Younes Belkada, and Sayak Paul.", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 497, + 505, + 513 + ], + "spans": [ + { + "bbox": [ + 114, + 497, + 505, + 513 + ], + "score": 1.0, + "content": "Peft: State-of-the-art parameter-efficient fine-tuning methods. https://github.com/", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 116, + 510, + 226, + 523 + ], + "spans": [ + { + "bbox": [ + 116, + 510, + 226, + 523 + ], + "score": 1.0, + "content": "huggingface/peft, 2022.", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 103, + 525, + 506, + 543 + ], + "spans": [ + { + "bbox": [ + 103, + 525, + 506, + 543 + ], + "score": 1.0, + "content": "Michael Matena and Colin Raffel. Merging models with fisher-weighted averaging. ArXiv", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 115, + 540, + 181, + 553 + ], + "spans": [ + { + "bbox": [ + 115, + 540, + 181, + 553 + ], + "score": 1.0, + "content": "preprint, 2021.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 104, + 556, + 507, + 573 + ], + "spans": [ + { + "bbox": [ + 104, + 556, + 507, + 573 + ], + "score": 1.0, + "content": "Sewon Min, Mike Lewis, Luke Zettlemoyer, and Hannaneh Hajishirzi. MetaICL: Learning", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 113, + 566, + 507, + 585 + ], + "spans": [ + { + "bbox": [ + 113, + 566, + 507, + 585 + ], + "score": 1.0, + "content": "to learn in context. In Proceedings of the 2022 Conference of the North American Chapter of", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 579, + 462, + 595 + ], + "spans": [ + { + "bbox": [ + 115, + 579, + 462, + 595 + ], + "score": 1.0, + "content": "the Association for Computational Linguistics: Human Language Technologies, 2022.", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 104, + 596, + 505, + 613 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 505, + 613 + ], + "score": 1.0, + "content": "Swaroop Mishra, Daniel Khashabi, Chitta Baral, and Hannaneh Hajishirzi. Cross-task gen-", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 114, + 609, + 480, + 623 + ], + "spans": [ + { + "bbox": [ + 114, + 609, + 480, + 623 + ], + "score": 1.0, + "content": "eralization via natural language crowdsourcing instructions. In Proc. of ACL, 2022.", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 103, + 624, + 505, + 644 + ], + "spans": [ + { + "bbox": [ + 103, + 624, + 505, + 644 + ], + "score": 1.0, + "content": "Mohammed Muqeeth, Haokun Liu, and Colin Raffel. Soft merging of experts with adap-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 114, + 638, + 267, + 653 + ], + "spans": [ + { + "bbox": [ + 114, + 638, + 267, + 653 + ], + "score": 1.0, + "content": "tive routing. ArXiv preprint, 2023.", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 104, + 656, + 403, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 403, + 672 + ], + "score": 1.0, + "content": "OpenAI. ChatGPT. 2022. URL https://openai.com/blog/chatgpt.", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 104, + 675, + 506, + 692 + ], + "spans": [ + { + "bbox": [ + 104, + 675, + 506, + 692 + ], + "score": 1.0, + "content": "Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin,", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 686, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 115, + 686, + 505, + 702 + ], + "score": 1.0, + "content": "Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 698, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 115, + 698, + 505, + 712 + ], + "score": 1.0, + "content": "Hilton, Fraser Kelton, Luke E. Miller, Maddie Simens, Amanda Askell, Peter Welinder,", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 707, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 114, + 707, + 505, + 723 + ], + "score": 1.0, + "content": "Paul Francis Christiano, Jan Leike, and Ryan J. Lowe. Training language models to fol-", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 115, + 720, + 384, + 733 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 384, + 733 + ], + "score": 1.0, + "content": "low instructions with human feedback. ArXiv preprint, 2022.", + "type": "text" + } + ], + "index": 47 + } + ], + "index": 25.5 + } + ], + "page_idx": 11, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 15, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 82, + 505, + 127 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 505, + 95 + ], + "score": 1.0, + "content": "Huiqiang Jiang, Qianhui Wu, Xufang Luo, Dongsheng Li, Chin-Yew Lin, Yuqing Yang, and", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 93, + 506, + 107 + ], + "spans": [ + { + "bbox": [ + 115, + 93, + 506, + 107 + ], + "score": 1.0, + "content": "Lili Qiu. Longllmlingua: Accelerating and enhancing llms in long context scenarios via", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 103, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 115, + 103, + 505, + 118 + ], + "score": 1.0, + "content": "prompt compression. CoRR, abs/2310.06839, 2023b. doi: 10.48550/ARXIV.2310.06839.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 113, + 349, + 129 + ], + "spans": [ + { + "bbox": [ + 115, + 113, + 349, + 129 + ], + "score": 1.0, + "content": "URL https://doi.org/10.48550/arXiv.2310.06839.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 1.5, + "bbox_fs": [ + 105, + 82, + 506, + 129 + ] + }, + { + "type": "list", + "bbox": [ + 102, + 95, + 506, + 732 + ], + "lines": [ + { + "bbox": [ + 104, + 132, + 505, + 147 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 505, + 147 + ], + "score": 1.0, + "content": "Xisen Jin, Xiang Ren, Daniel Preotiuc-Pietro, and Pengxiang Cheng. Dataless knowledge", + "type": "text" + } + ], + "index": 4, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 144, + 505, + 159 + ], + "spans": [ + { + "bbox": [ + 115, + 144, + 505, + 159 + ], + "score": 1.0, + "content": "fusion by merging weights of language models. In The Eleventh International Conference", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 155, + 262, + 169 + ], + "spans": [ + { + "bbox": [ + 115, + 155, + 262, + 169 + ], + "score": 1.0, + "content": "on Learning Representations, 2023.", + "type": "text" + } + ], + "index": 6, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 174, + 504, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 174, + 504, + 187 + ], + "score": 1.0, + "content": "Hiroaki Kingetsu, Kenichi Kobayashi, and Taiji Suzuki. Neural network module decom-", + "type": "text" + } + ], + "index": 7, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 185, + 336, + 199 + ], + "spans": [ + { + "bbox": [ + 114, + 185, + 336, + 199 + ], + "score": 1.0, + "content": "position and recomposition. ArXiv preprint, 2021.", + "type": "text" + } + ], + "index": 8, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 202, + 506, + 218 + ], + "spans": [ + { + "bbox": [ + 104, + 202, + 506, + 218 + ], + "score": 1.0, + "content": "Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient", + "type": "text" + } + ], + "index": 9, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 214, + 298, + 229 + ], + "spans": [ + { + "bbox": [ + 114, + 214, + 298, + 229 + ], + "score": 1.0, + "content": "prompt tuning. In Proc. of EMNLP, 2021.", + "type": "text" + } + ], + "index": 10, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 231, + 506, + 247 + ], + "spans": [ + { + "bbox": [ + 104, + 231, + 506, + 247 + ], + "score": 1.0, + "content": "Yucheng Li, Bo Dong, Chenghua Lin, and Frank Guerin. Compressing context to enhance", + "type": "text" + } + ], + "index": 11, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 244, + 505, + 259 + ], + "spans": [ + { + "bbox": [ + 114, + 244, + 505, + 259 + ], + "score": 1.0, + "content": "inference efficiency of large language models. In Proceedings of the 2023 Conference on Em-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 254, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 114, + 254, + 506, + 270 + ], + "score": 1.0, + "content": "pirical Methods in Natural Language Processing. Association for Computational Linguistics,", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 266, + 376, + 280 + ], + "spans": [ + { + "bbox": [ + 115, + 266, + 376, + 280 + ], + "score": 1.0, + "content": "December 2023. URL https://arxiv.org/abs/2310.06201.", + "type": "text" + } + ], + "index": 14, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 284, + 505, + 299 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 505, + 299 + ], + "score": 1.0, + "content": "Bill Yuchen Lin, Kangmin Tan, Chris Miller, Beiwen Tian, and Xiang Ren. Unsupervised", + "type": "text" + } + ], + "index": 15, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 297, + 428, + 308 + ], + "spans": [ + { + "bbox": [ + 115, + 297, + 428, + 308 + ], + "score": 1.0, + "content": "cross-task generalization via retrieval augmentation. In NeurIPS, 2022.", + "type": "text" + } + ], + "index": 16, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 312, + 505, + 329 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 505, + 329 + ], + "score": 1.0, + "content": "Haokun Liu, Derek Tam, Mohammed Muqeeth, Jay Mohta, Tenghao Huang, Mohit Bansal,", + "type": "text" + } + ], + "index": 17, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 325, + 505, + 340 + ], + "spans": [ + { + "bbox": [ + 114, + 325, + 505, + 340 + ], + "score": 1.0, + "content": "and Colin Raffel. Few-shot parameter-efficient fine-tuning is better and cheaper than in-", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 115, + 336, + 504, + 351 + ], + "spans": [ + { + "bbox": [ + 115, + 336, + 504, + 351 + ], + "score": 1.0, + "content": "context learning. ArXiv, abs/2205.05638, 2022. URL https://api.semanticscholar.org/", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 116, + 348, + 211, + 360 + ], + "spans": [ + { + "bbox": [ + 116, + 348, + 211, + 360 + ], + "score": 1.0, + "content": "CorpusID:248693283.", + "type": "text" + } + ], + "index": 20, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 366, + 505, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 505, + 379 + ], + "score": 1.0, + "content": "Jialin Liu, A. Moreau, Mike Preuss, Baptiste Roziere, J ` erΒ΄ emy Rapin, Fabien Teytaud, and Β΄", + "type": "text" + } + ], + "index": 21, + "is_list_start_line": true + }, + { + "bbox": [ + 116, + 377, + 505, + 390 + ], + "spans": [ + { + "bbox": [ + 116, + 377, + 505, + 390 + ], + "score": 1.0, + "content": "Olivier Teytaud. Versatile black-box optimization. Proceedings of the 2020 Genetic and", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 116, + 388, + 301, + 402 + ], + "spans": [ + { + "bbox": [ + 116, + 388, + 301, + 402 + ], + "score": 1.0, + "content": "Evolutionary Computation Conference, 2020.", + "type": "text" + } + ], + "index": 23, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 405, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 104, + 405, + 505, + 420 + ], + "score": 1.0, + "content": "Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou,", + "type": "text" + } + ], + "index": 24, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 415, + 507, + 434 + ], + "spans": [ + { + "bbox": [ + 114, + 415, + 507, + 434 + ], + "score": 1.0, + "content": "Quoc V. Le, Barret Zoph, Jason Wei, and Adam Roberts. The flan collection: Designing", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 115, + 428, + 364, + 443 + ], + "spans": [ + { + "bbox": [ + 115, + 428, + 364, + 443 + ], + "score": 1.0, + "content": "data and methods for effective instruction tuning, 2023.", + "type": "text" + } + ], + "index": 26, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 447, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 105, + 447, + 505, + 461 + ], + "score": 1.0, + "content": "Xingtai Lv, Ning Ding, Yujia Qin, Zhiyuan Liu, and Maosong Sun. Parameter-efficient", + "type": "text" + } + ], + "index": 27, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 457, + 506, + 473 + ], + "spans": [ + { + "bbox": [ + 115, + 457, + 506, + 473 + ], + "score": 1.0, + "content": "weight ensembling facilitates task-level knowledge transfer. In Annual Meeting of the", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 469, + 316, + 482 + ], + "spans": [ + { + "bbox": [ + 115, + 469, + 316, + 482 + ], + "score": 1.0, + "content": "Association for Computational Linguistics, 2023.", + "type": "text" + } + ], + "index": 29, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 487, + 505, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 505, + 501 + ], + "score": 1.0, + "content": "Sourab Mangrulkar, Sylvain Gugger, Lysandre Debut, Younes Belkada, and Sayak Paul.", + "type": "text" + } + ], + "index": 30, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 497, + 505, + 513 + ], + "spans": [ + { + "bbox": [ + 114, + 497, + 505, + 513 + ], + "score": 1.0, + "content": "Peft: State-of-the-art parameter-efficient fine-tuning methods. https://github.com/", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 116, + 510, + 226, + 523 + ], + "spans": [ + { + "bbox": [ + 116, + 510, + 226, + 523 + ], + "score": 1.0, + "content": "huggingface/peft, 2022.", + "type": "text" + } + ], + "index": 32, + "is_list_end_line": true + }, + { + "bbox": [ + 103, + 525, + 506, + 543 + ], + "spans": [ + { + "bbox": [ + 103, + 525, + 506, + 543 + ], + "score": 1.0, + "content": "Michael Matena and Colin Raffel. Merging models with fisher-weighted averaging. ArXiv", + "type": "text" + } + ], + "index": 33, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 540, + 181, + 553 + ], + "spans": [ + { + "bbox": [ + 115, + 540, + 181, + 553 + ], + "score": 1.0, + "content": "preprint, 2021.", + "type": "text" + } + ], + "index": 34, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 556, + 507, + 573 + ], + "spans": [ + { + "bbox": [ + 104, + 556, + 507, + 573 + ], + "score": 1.0, + "content": "Sewon Min, Mike Lewis, Luke Zettlemoyer, and Hannaneh Hajishirzi. MetaICL: Learning", + "type": "text" + } + ], + "index": 35, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 566, + 507, + 585 + ], + "spans": [ + { + "bbox": [ + 113, + 566, + 507, + 585 + ], + "score": 1.0, + "content": "to learn in context. In Proceedings of the 2022 Conference of the North American Chapter of", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 579, + 462, + 595 + ], + "spans": [ + { + "bbox": [ + 115, + 579, + 462, + 595 + ], + "score": 1.0, + "content": "the Association for Computational Linguistics: Human Language Technologies, 2022.", + "type": "text" + } + ], + "index": 37, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 596, + 505, + 613 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 505, + 613 + ], + "score": 1.0, + "content": "Swaroop Mishra, Daniel Khashabi, Chitta Baral, and Hannaneh Hajishirzi. Cross-task gen-", + "type": "text" + } + ], + "index": 38, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 609, + 480, + 623 + ], + "spans": [ + { + "bbox": [ + 114, + 609, + 480, + 623 + ], + "score": 1.0, + "content": "eralization via natural language crowdsourcing instructions. In Proc. of ACL, 2022.", + "type": "text" + } + ], + "index": 39, + "is_list_end_line": true + }, + { + "bbox": [ + 103, + 624, + 505, + 644 + ], + "spans": [ + { + "bbox": [ + 103, + 624, + 505, + 644 + ], + "score": 1.0, + "content": "Mohammed Muqeeth, Haokun Liu, and Colin Raffel. Soft merging of experts with adap-", + "type": "text" + } + ], + "index": 40, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 638, + 267, + 653 + ], + "spans": [ + { + "bbox": [ + 114, + 638, + 267, + 653 + ], + "score": 1.0, + "content": "tive routing. ArXiv preprint, 2023.", + "type": "text" + } + ], + "index": 41, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 656, + 403, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 403, + 672 + ], + "score": 1.0, + "content": "OpenAI. ChatGPT. 2022. URL https://openai.com/blog/chatgpt.", + "type": "text" + } + ], + "index": 42, + "is_list_start_line": true, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 675, + 506, + 692 + ], + "spans": [ + { + "bbox": [ + 104, + 675, + 506, + 692 + ], + "score": 1.0, + "content": "Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin,", + "type": "text" + } + ], + "index": 43, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 686, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 115, + 686, + 505, + 702 + ], + "score": 1.0, + "content": "Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 698, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 115, + 698, + 505, + 712 + ], + "score": 1.0, + "content": "Hilton, Fraser Kelton, Luke E. Miller, Maddie Simens, Amanda Askell, Peter Welinder,", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 114, + 707, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 114, + 707, + 505, + 723 + ], + "score": 1.0, + "content": "Paul Francis Christiano, Jan Leike, and Ryan J. Lowe. Training language models to fol-", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 115, + 720, + 384, + 733 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 384, + 733 + ], + "score": 1.0, + "content": "low instructions with human feedback. ArXiv preprint, 2022.", + "type": "text" + } + ], + "index": 47, + "is_list_end_line": true + } + ], + "index": 25.5, + "bbox_fs": [ + 103, + 132, + 507, + 733 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 82, + 504, + 105 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 505, + 96 + ], + "score": 1.0, + "content": "Panupong Pasupat and Percy Liang. Compositional semantic parsing on semi-structured", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 92, + 242, + 105 + ], + "spans": [ + { + "bbox": [ + 116, + 92, + 242, + 105 + ], + "score": 1.0, + "content": "tables. In Proc. of ACL, 2015.", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "text", + "bbox": [ + 107, + 113, + 505, + 147 + ], + "lines": [ + { + "bbox": [ + 105, + 111, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 506, + 128 + ], + "score": 1.0, + "content": "Edoardo Maria Ponti, Alessandro Sordoni, Yoshua Bengio, and Siva Reddy. Combining", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 124, + 505, + 137 + ], + "spans": [ + { + "bbox": [ + 115, + 124, + 505, + 137 + ], + "score": 1.0, + "content": "parameter-efficient modules for task-level generalisation. In Proceedings of the 17th Con-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 135, + 472, + 149 + ], + "spans": [ + { + "bbox": [ + 115, + 135, + 472, + 149 + ], + "score": 1.0, + "content": "ference of the European Chapter of the Association for Computational Linguistics, 2023.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3 + }, + { + "type": "text", + "bbox": [ + 106, + 155, + 505, + 189 + ], + "lines": [ + { + "bbox": [ + 106, + 155, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 106, + 155, + 506, + 168 + ], + "score": 1.0, + "content": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 164, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 115, + 164, + 506, + 182 + ], + "score": 1.0, + "content": "Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 116, + 178, + 400, + 190 + ], + "spans": [ + { + "bbox": [ + 116, + 178, + 400, + 190 + ], + "score": 1.0, + "content": "with a unified text-to-text transformer. J. Mach. Learn. Res., 2020.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 6 + }, + { + "type": "text", + "bbox": [ + 106, + 198, + 503, + 221 + ], + "lines": [ + { + "bbox": [ + 104, + 196, + 505, + 210 + ], + "spans": [ + { + "bbox": [ + 104, + 196, + 505, + 210 + ], + "score": 1.0, + "content": "J. Rapin and O. Teytaud. Nevergrad - A gradient-free optimization platform. https://", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 116, + 208, + 331, + 221 + ], + "spans": [ + { + "bbox": [ + 116, + 208, + 331, + 221 + ], + "score": 1.0, + "content": "GitHub.com/FacebookResearch/Nevergrad, 2018.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 8.5 + }, + { + "type": "text", + "bbox": [ + 107, + 228, + 505, + 329 + ], + "lines": [ + { + "bbox": [ + 106, + 228, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 106, + 228, + 506, + 243 + ], + "score": 1.0, + "content": "Victor Sanh, Albert Webson, Colin Raffel, Stephen H. Bach, Lintang Sutawika, Zaid", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 239, + 505, + 254 + ], + "spans": [ + { + "bbox": [ + 115, + 239, + 505, + 254 + ], + "score": 1.0, + "content": "Alyafeai, Antoine Chaffin, Arnaud Stiegler, Arun Raja, Manan Dey, M Saiful Bari,", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 249, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 114, + 249, + 506, + 266 + ], + "score": 1.0, + "content": "Canwen Xu, Urmish Thakker, Shanya Sharma Sharma, Eliza Szczechla, Taewoon Kim,", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 261, + 505, + 276 + ], + "spans": [ + { + "bbox": [ + 114, + 261, + 505, + 276 + ], + "score": 1.0, + "content": "Gunjan Chhablani, Nihal V. Nayak, Debajyoti Datta, Jonathan Chang, Mike Tian-Jian", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 272, + 506, + 288 + ], + "spans": [ + { + "bbox": [ + 114, + 272, + 506, + 288 + ], + "score": 1.0, + "content": "Jiang, Han Wang, Matteo Manica, Sheng Shen, Zheng Xin Yong, Harshit Pandey, Rachel", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 282, + 506, + 297 + ], + "spans": [ + { + "bbox": [ + 115, + 282, + 506, + 297 + ], + "score": 1.0, + "content": "Bawden, Thomas Wang, Trishala Neeraj, Jos Rozen, Abheesht Sharma, Andrea Santilli,", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 293, + 506, + 309 + ], + "spans": [ + { + "bbox": [ + 114, + 293, + 506, + 309 + ], + "score": 1.0, + "content": "Thibault Fevry, Jason Alan Fries, Ryan Teehan, Teven Le Scao, Stella Biderman, Leo Gao, Β΄", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 305, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 115, + 305, + 506, + 319 + ], + "score": 1.0, + "content": "Thomas Wolf, and Alexander M. Rush. Multitask prompted training enables zero-shot", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 116, + 317, + 302, + 330 + ], + "spans": [ + { + "bbox": [ + 116, + 317, + 302, + 330 + ], + "score": 1.0, + "content": "task generalization. In Proc. of ICLR, 2022.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 14 + }, + { + "type": "text", + "bbox": [ + 107, + 337, + 505, + 371 + ], + "lines": [ + { + "bbox": [ + 105, + 335, + 505, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 505, + 351 + ], + "score": 1.0, + "content": "Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc V. Le, Geoffrey E.", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 348, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 115, + 348, + 505, + 362 + ], + "score": 1.0, + "content": "Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture-", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 117, + 359, + 288, + 371 + ], + "spans": [ + { + "bbox": [ + 117, + 359, + 288, + 371 + ], + "score": 1.0, + "content": "of-experts layer. In Proc. of ICLR, 2017.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20 + }, + { + "type": "text", + "bbox": [ + 106, + 379, + 505, + 435 + ], + "lines": [ + { + "bbox": [ + 105, + 379, + 506, + 393 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 506, + 393 + ], + "score": 1.0, + "content": "Sheng Shen, Le Hou, Yanqi Zhou, Nan Du, Shayne Longpre, Jason Wei, Hyung Won", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 389, + 506, + 405 + ], + "spans": [ + { + "bbox": [ + 115, + 389, + 506, + 405 + ], + "score": 1.0, + "content": "Chung, Barret Zoph, William Fedus, Xinyun Chen, Tu Vu, Yuexin Wu, Wuyang Chen,", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 115, + 400, + 505, + 415 + ], + "spans": [ + { + "bbox": [ + 115, + 400, + 505, + 415 + ], + "score": 1.0, + "content": "Albert Webson, Yunxuan Li, Vincent Zhao, Hongkun Yu, Kurt Keutzer, Trevor Darrell,", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 115, + 411, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 115, + 411, + 506, + 426 + ], + "score": 1.0, + "content": "and Denny Zhou. Mixture-of-experts meets instruction tuning:a winning combination", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 115, + 422, + 262, + 438 + ], + "spans": [ + { + "bbox": [ + 115, + 422, + 262, + 438 + ], + "score": 1.0, + "content": "for large language models, 2023.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 24 + }, + { + "type": "text", + "bbox": [ + 106, + 443, + 503, + 466 + ], + "lines": [ + { + "bbox": [ + 104, + 439, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 505, + 459 + ], + "score": 1.0, + "content": "George Stoica, Daniel Bolya, Jakob Bjorner, Taylor Hearn, and Judy Hoffman. Zipit! merg-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 453, + 389, + 468 + ], + "spans": [ + { + "bbox": [ + 115, + 453, + 389, + 468 + ], + "score": 1.0, + "content": "ing models from different tasks without training. arXiv, 2023.", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 27.5 + }, + { + "type": "text", + "bbox": [ + 107, + 474, + 505, + 530 + ], + "lines": [ + { + "bbox": [ + 106, + 474, + 506, + 489 + ], + "spans": [ + { + "bbox": [ + 106, + 474, + 506, + 489 + ], + "score": 1.0, + "content": "Tianxiang Sun, Yunfan Shao, Hong Qian, Xuanjing Huang, and Xipeng Qiu. Black-box tun-", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 115, + 484, + 505, + 500 + ], + "spans": [ + { + "bbox": [ + 115, + 484, + 505, + 500 + ], + "score": 1.0, + "content": "ing for language-model-as-a-service. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song,", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 115, + 495, + 505, + 511 + ], + "spans": [ + { + "bbox": [ + 115, + 495, + 505, + 511 + ], + "score": 1.0, + "content": "Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Β΄ International Conference on Machine", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 506, + 505, + 522 + ], + "spans": [ + { + "bbox": [ + 115, + 506, + 505, + 522 + ], + "score": 1.0, + "content": "Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, Proceedings of Machine", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 115, + 518, + 228, + 532 + ], + "spans": [ + { + "bbox": [ + 115, + 518, + 228, + 532 + ], + "score": 1.0, + "content": "Learning Research, 2022.", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 31 + }, + { + "type": "text", + "bbox": [ + 106, + 538, + 504, + 562 + ], + "lines": [ + { + "bbox": [ + 106, + 538, + 505, + 552 + ], + "spans": [ + { + "bbox": [ + 106, + 538, + 505, + 552 + ], + "score": 1.0, + "content": "Tianxiang Sun, Zhengfu He, Qin Zhu, Xipeng Qiu, and Xuanjing Huang. Multitask pre-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 115, + 549, + 472, + 563 + ], + "spans": [ + { + "bbox": [ + 115, + 549, + 472, + 563 + ], + "score": 1.0, + "content": "training of modular prompt for Chinese few-shot learning. In Proc. of ACL, 2023.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 34.5 + }, + { + "type": "text", + "bbox": [ + 106, + 569, + 505, + 615 + ], + "lines": [ + { + "bbox": [ + 105, + 569, + 506, + 583 + ], + "spans": [ + { + "bbox": [ + 105, + 569, + 506, + 583 + ], + "score": 1.0, + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux,", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 116, + 581, + 505, + 594 + ], + "spans": [ + { + "bbox": [ + 116, + 581, + 505, + 594 + ], + "score": 1.0, + "content": "Timothee Lacroix, Baptiste Rozi Β΄ ere, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien `", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 115, + 591, + 505, + 605 + ], + "spans": [ + { + "bbox": [ + 115, + 591, + 505, + 605 + ], + "score": 1.0, + "content": "Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. Llama: Open and", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 115, + 603, + 378, + 617 + ], + "spans": [ + { + "bbox": [ + 115, + 603, + 378, + 617 + ], + "score": 1.0, + "content": "efficient foundation language models. ArXiv preprint, 2023.", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 37.5 + }, + { + "type": "text", + "bbox": [ + 107, + 623, + 505, + 690 + ], + "lines": [ + { + "bbox": [ + 106, + 623, + 505, + 636 + ], + "spans": [ + { + "bbox": [ + 106, + 623, + 505, + 636 + ], + "score": 1.0, + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N.", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 115, + 634, + 505, + 647 + ], + "spans": [ + { + "bbox": [ + 115, + 634, + 505, + 647 + ], + "score": 1.0, + "content": "Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Isabelle", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 645, + 505, + 659 + ], + "spans": [ + { + "bbox": [ + 115, + 645, + 505, + 659 + ], + "score": 1.0, + "content": "Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vish-", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 115, + 655, + 505, + 670 + ], + "spans": [ + { + "bbox": [ + 115, + 655, + 505, + 670 + ], + "score": 1.0, + "content": "wanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 667, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 115, + 667, + 506, + 681 + ], + "score": 1.0, + "content": "30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017,", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 678, + 237, + 691 + ], + "spans": [ + { + "bbox": [ + 115, + 678, + 237, + 691 + ], + "score": 1.0, + "content": "Long Beach, CA, USA, 2017.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 42.5 + }, + { + "type": "text", + "bbox": [ + 108, + 699, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 697, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 505, + 712 + ], + "score": 1.0, + "content": "Yaqing Wang, Sahaj Agarwal, Subhabrata Mukherjee, Xiaodong Liu, Jing Gao,", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 116, + 708, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 116, + 708, + 505, + 722 + ], + "score": 1.0, + "content": "Ahmed Hassan Awadallah, and Jianfeng Gao. AdaMix: Mixture-of-adaptations for", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 115, + 720, + 378, + 733 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 378, + 733 + ], + "score": 1.0, + "content": "parameter-efficient model tuning. In Proc. of EMNLP, 2022.", + "type": "text" + } + ], + "index": 48 + } + ], + "index": 47 + } + ], + "page_idx": 12, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 297, + 750, + 312, + 763 + ], + "spans": [ + { + "bbox": [ + 297, + 750, + 312, + 763 + ], + "score": 1.0, + "content": "13", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 82, + 504, + 105 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 505, + 96 + ], + "score": 1.0, + "content": "Panupong Pasupat and Percy Liang. Compositional semantic parsing on semi-structured", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 92, + 242, + 105 + ], + "spans": [ + { + "bbox": [ + 116, + 92, + 242, + 105 + ], + "score": 1.0, + "content": "tables. In Proc. of ACL, 2015.", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5, + "bbox_fs": [ + 105, + 82, + 505, + 105 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 113, + 505, + 147 + ], + "lines": [ + { + "bbox": [ + 105, + 111, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 506, + 128 + ], + "score": 1.0, + "content": "Edoardo Maria Ponti, Alessandro Sordoni, Yoshua Bengio, and Siva Reddy. Combining", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 124, + 505, + 137 + ], + "spans": [ + { + "bbox": [ + 115, + 124, + 505, + 137 + ], + "score": 1.0, + "content": "parameter-efficient modules for task-level generalisation. In Proceedings of the 17th Con-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 135, + 472, + 149 + ], + "spans": [ + { + "bbox": [ + 115, + 135, + 472, + 149 + ], + "score": 1.0, + "content": "ference of the European Chapter of the Association for Computational Linguistics, 2023.", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3, + "bbox_fs": [ + 105, + 111, + 506, + 149 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 155, + 505, + 189 + ], + "lines": [ + { + "bbox": [ + 106, + 155, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 106, + 155, + 506, + 168 + ], + "score": 1.0, + "content": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 115, + 164, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 115, + 164, + 506, + 182 + ], + "score": 1.0, + "content": "Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 116, + 178, + 400, + 190 + ], + "spans": [ + { + "bbox": [ + 116, + 178, + 400, + 190 + ], + "score": 1.0, + "content": "with a unified text-to-text transformer. J. Mach. Learn. Res., 2020.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 6, + "bbox_fs": [ + 106, + 155, + 506, + 190 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 198, + 503, + 221 + ], + "lines": [ + { + "bbox": [ + 104, + 196, + 505, + 210 + ], + "spans": [ + { + "bbox": [ + 104, + 196, + 505, + 210 + ], + "score": 1.0, + "content": "J. Rapin and O. Teytaud. Nevergrad - A gradient-free optimization platform. https://", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 116, + 208, + 331, + 221 + ], + "spans": [ + { + "bbox": [ + 116, + 208, + 331, + 221 + ], + "score": 1.0, + "content": "GitHub.com/FacebookResearch/Nevergrad, 2018.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 8.5, + "bbox_fs": [ + 104, + 196, + 505, + 221 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 228, + 505, + 329 + ], + "lines": [ + { + "bbox": [ + 106, + 228, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 106, + 228, + 506, + 243 + ], + "score": 1.0, + "content": "Victor Sanh, Albert Webson, Colin Raffel, Stephen H. Bach, Lintang Sutawika, Zaid", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 115, + 239, + 505, + 254 + ], + "spans": [ + { + "bbox": [ + 115, + 239, + 505, + 254 + ], + "score": 1.0, + "content": "Alyafeai, Antoine Chaffin, Arnaud Stiegler, Arun Raja, Manan Dey, M Saiful Bari,", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 114, + 249, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 114, + 249, + 506, + 266 + ], + "score": 1.0, + "content": "Canwen Xu, Urmish Thakker, Shanya Sharma Sharma, Eliza Szczechla, Taewoon Kim,", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 261, + 505, + 276 + ], + "spans": [ + { + "bbox": [ + 114, + 261, + 505, + 276 + ], + "score": 1.0, + "content": "Gunjan Chhablani, Nihal V. Nayak, Debajyoti Datta, Jonathan Chang, Mike Tian-Jian", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 272, + 506, + 288 + ], + "spans": [ + { + "bbox": [ + 114, + 272, + 506, + 288 + ], + "score": 1.0, + "content": "Jiang, Han Wang, Matteo Manica, Sheng Shen, Zheng Xin Yong, Harshit Pandey, Rachel", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 282, + 506, + 297 + ], + "spans": [ + { + "bbox": [ + 115, + 282, + 506, + 297 + ], + "score": 1.0, + "content": "Bawden, Thomas Wang, Trishala Neeraj, Jos Rozen, Abheesht Sharma, Andrea Santilli,", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 293, + 506, + 309 + ], + "spans": [ + { + "bbox": [ + 114, + 293, + 506, + 309 + ], + "score": 1.0, + "content": "Thibault Fevry, Jason Alan Fries, Ryan Teehan, Teven Le Scao, Stella Biderman, Leo Gao, Β΄", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 115, + 305, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 115, + 305, + 506, + 319 + ], + "score": 1.0, + "content": "Thomas Wolf, and Alexander M. Rush. Multitask prompted training enables zero-shot", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 116, + 317, + 302, + 330 + ], + "spans": [ + { + "bbox": [ + 116, + 317, + 302, + 330 + ], + "score": 1.0, + "content": "task generalization. In Proc. of ICLR, 2022.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 14, + "bbox_fs": [ + 106, + 228, + 506, + 330 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 337, + 505, + 371 + ], + "lines": [ + { + "bbox": [ + 105, + 335, + 505, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 505, + 351 + ], + "score": 1.0, + "content": "Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc V. Le, Geoffrey E.", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 348, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 115, + 348, + 505, + 362 + ], + "score": 1.0, + "content": "Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture-", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 117, + 359, + 288, + 371 + ], + "spans": [ + { + "bbox": [ + 117, + 359, + 288, + 371 + ], + "score": 1.0, + "content": "of-experts layer. In Proc. of ICLR, 2017.", + "type": "text" + } + ], + "index": 21 + } + ], + "index": 20, + "bbox_fs": [ + 105, + 335, + 505, + 371 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 379, + 505, + 435 + ], + "lines": [ + { + "bbox": [ + 105, + 379, + 506, + 393 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 506, + 393 + ], + "score": 1.0, + "content": "Sheng Shen, Le Hou, Yanqi Zhou, Nan Du, Shayne Longpre, Jason Wei, Hyung Won", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 115, + 389, + 506, + 405 + ], + "spans": [ + { + "bbox": [ + 115, + 389, + 506, + 405 + ], + "score": 1.0, + "content": "Chung, Barret Zoph, William Fedus, Xinyun Chen, Tu Vu, Yuexin Wu, Wuyang Chen,", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 115, + 400, + 505, + 415 + ], + "spans": [ + { + "bbox": [ + 115, + 400, + 505, + 415 + ], + "score": 1.0, + "content": "Albert Webson, Yunxuan Li, Vincent Zhao, Hongkun Yu, Kurt Keutzer, Trevor Darrell,", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 115, + 411, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 115, + 411, + 506, + 426 + ], + "score": 1.0, + "content": "and Denny Zhou. Mixture-of-experts meets instruction tuning:a winning combination", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 115, + 422, + 262, + 438 + ], + "spans": [ + { + "bbox": [ + 115, + 422, + 262, + 438 + ], + "score": 1.0, + "content": "for large language models, 2023.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 24, + "bbox_fs": [ + 105, + 379, + 506, + 438 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 443, + 503, + 466 + ], + "lines": [ + { + "bbox": [ + 104, + 439, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 505, + 459 + ], + "score": 1.0, + "content": "George Stoica, Daniel Bolya, Jakob Bjorner, Taylor Hearn, and Judy Hoffman. Zipit! merg-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 453, + 389, + 468 + ], + "spans": [ + { + "bbox": [ + 115, + 453, + 389, + 468 + ], + "score": 1.0, + "content": "ing models from different tasks without training. arXiv, 2023.", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 27.5, + "bbox_fs": [ + 104, + 439, + 505, + 468 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 474, + 505, + 530 + ], + "lines": [ + { + "bbox": [ + 106, + 474, + 506, + 489 + ], + "spans": [ + { + "bbox": [ + 106, + 474, + 506, + 489 + ], + "score": 1.0, + "content": "Tianxiang Sun, Yunfan Shao, Hong Qian, Xuanjing Huang, and Xipeng Qiu. Black-box tun-", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 115, + 484, + 505, + 500 + ], + "spans": [ + { + "bbox": [ + 115, + 484, + 505, + 500 + ], + "score": 1.0, + "content": "ing for language-model-as-a-service. In Kamalika Chaudhuri, Stefanie Jegelka, Le Song,", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 115, + 495, + 505, + 511 + ], + "spans": [ + { + "bbox": [ + 115, + 495, + 505, + 511 + ], + "score": 1.0, + "content": "Csaba Szepesvari, Gang Niu, and Sivan Sabato (eds.), Β΄ International Conference on Machine", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 506, + 505, + 522 + ], + "spans": [ + { + "bbox": [ + 115, + 506, + 505, + 522 + ], + "score": 1.0, + "content": "Learning, ICML 2022, 17-23 July 2022, Baltimore, Maryland, USA, Proceedings of Machine", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 115, + 518, + 228, + 532 + ], + "spans": [ + { + "bbox": [ + 115, + 518, + 228, + 532 + ], + "score": 1.0, + "content": "Learning Research, 2022.", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 31, + "bbox_fs": [ + 106, + 474, + 506, + 532 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 538, + 504, + 562 + ], + "lines": [ + { + "bbox": [ + 106, + 538, + 505, + 552 + ], + "spans": [ + { + "bbox": [ + 106, + 538, + 505, + 552 + ], + "score": 1.0, + "content": "Tianxiang Sun, Zhengfu He, Qin Zhu, Xipeng Qiu, and Xuanjing Huang. Multitask pre-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 115, + 549, + 472, + 563 + ], + "spans": [ + { + "bbox": [ + 115, + 549, + 472, + 563 + ], + "score": 1.0, + "content": "training of modular prompt for Chinese few-shot learning. In Proc. of ACL, 2023.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 34.5, + "bbox_fs": [ + 106, + 538, + 505, + 563 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 569, + 505, + 615 + ], + "lines": [ + { + "bbox": [ + 105, + 569, + 506, + 583 + ], + "spans": [ + { + "bbox": [ + 105, + 569, + 506, + 583 + ], + "score": 1.0, + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux,", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 116, + 581, + 505, + 594 + ], + "spans": [ + { + "bbox": [ + 116, + 581, + 505, + 594 + ], + "score": 1.0, + "content": "Timothee Lacroix, Baptiste Rozi Β΄ ere, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien `", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 115, + 591, + 505, + 605 + ], + "spans": [ + { + "bbox": [ + 115, + 591, + 505, + 605 + ], + "score": 1.0, + "content": "Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. Llama: Open and", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 115, + 603, + 378, + 617 + ], + "spans": [ + { + "bbox": [ + 115, + 603, + 378, + 617 + ], + "score": 1.0, + "content": "efficient foundation language models. ArXiv preprint, 2023.", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 37.5, + "bbox_fs": [ + 105, + 569, + 506, + 617 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 623, + 505, + 690 + ], + "lines": [ + { + "bbox": [ + 106, + 623, + 505, + 636 + ], + "spans": [ + { + "bbox": [ + 106, + 623, + 505, + 636 + ], + "score": 1.0, + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N.", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 115, + 634, + 505, + 647 + ], + "spans": [ + { + "bbox": [ + 115, + 634, + 505, + 647 + ], + "score": 1.0, + "content": "Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Isabelle", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 645, + 505, + 659 + ], + "spans": [ + { + "bbox": [ + 115, + 645, + 505, + 659 + ], + "score": 1.0, + "content": "Guyon, Ulrike von Luxburg, Samy Bengio, Hanna M. Wallach, Rob Fergus, S. V. N. Vish-", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 115, + 655, + 505, + 670 + ], + "spans": [ + { + "bbox": [ + 115, + 655, + 505, + 670 + ], + "score": 1.0, + "content": "wanathan, and Roman Garnett (eds.), Advances in Neural Information Processing Systems", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 115, + 667, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 115, + 667, + 506, + 681 + ], + "score": 1.0, + "content": "30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017,", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 678, + 237, + 691 + ], + "spans": [ + { + "bbox": [ + 115, + 678, + 237, + 691 + ], + "score": 1.0, + "content": "Long Beach, CA, USA, 2017.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 42.5, + "bbox_fs": [ + 106, + 623, + 506, + 691 + ] + }, + { + "type": "text", + "bbox": [ + 108, + 699, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 697, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 505, + 712 + ], + "score": 1.0, + "content": "Yaqing Wang, Sahaj Agarwal, Subhabrata Mukherjee, Xiaodong Liu, Jing Gao,", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 116, + 708, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 116, + 708, + 505, + 722 + ], + "score": 1.0, + "content": "Ahmed Hassan Awadallah, and Jianfeng Gao. AdaMix: Mixture-of-adaptations for", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 115, + 720, + 378, + 733 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 378, + 733 + ], + "score": 1.0, + "content": "parameter-efficient model tuning. In Proc. of EMNLP, 2022.", + "type": "text" + } + ], + "index": 48 + } + ], + "index": 47, + "bbox_fs": [ + 105, + 697, + 505, + 733 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 104, + 68, + 506, + 527 + ], + "lines": [ + { + "bbox": [ + 104, + 80, + 506, + 96 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 506, + 96 + ], + "score": 1.0, + "content": "Jason Wei, Maarten Bosma, Vincent Y. Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 115, + 93, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 93, + 504, + 106 + ], + "score": 1.0, + "content": "Du, Andrew M. Dai, and Quoc V. Le. Finetuned language models are zero-shot learners.", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 104, + 212, + 117 + ], + "spans": [ + { + "bbox": [ + 115, + 104, + 212, + 117 + ], + "score": 1.0, + "content": "In Proc. of ICLR, 2022.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 122, + 505, + 136 + ], + "spans": [ + { + "bbox": [ + 106, + 122, + 505, + 136 + ], + "score": 1.0, + "content": "Chengyue Wu, Teng Wang, Yixiao Ge, Zeyu Lu, Ruisong Zhou, Ying Shan, and Ping Luo.", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 117, + 133, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 117, + 136, + 125, + 145 + ], + "score": 0.4, + "content": "\\pi", + "type": "inline_equation" + }, + { + "bbox": [ + 125, + 133, + 505, + 148 + ], + "score": 1.0, + "content": "-tuning: Transferring multimodal foundation models with optimal multi-task inter-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 114, + 144, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 114, + 144, + 505, + 158 + ], + "score": 1.0, + "content": "polation. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt,", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 114, + 154, + 505, + 170 + ], + "spans": [ + { + "bbox": [ + 114, + 154, + 505, + 170 + ], + "score": 1.0, + "content": "Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 166, + 504, + 179 + ], + "spans": [ + { + "bbox": [ + 115, + 166, + 504, + 179 + ], + "score": 1.0, + "content": "ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Ma-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 178, + 503, + 191 + ], + "spans": [ + { + "bbox": [ + 115, + 178, + 503, + 191 + ], + "score": 1.0, + "content": "chine Learning Research, pp. 37713–37727. PMLR, 2023a. URL https://proceedings.mlr.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 189, + 227, + 201 + ], + "spans": [ + { + "bbox": [ + 115, + 189, + 227, + 201 + ], + "score": 1.0, + "content": "press/v202/wu23t.html.", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 206, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 105, + 206, + 506, + 222 + ], + "score": 1.0, + "content": "Shijie Wu, Ozan Irsoy, Steven Lu, Vadim Dabravolski, Mark Dredze, Sebastian Gehrmann,", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 216, + 505, + 233 + ], + "spans": [ + { + "bbox": [ + 114, + 216, + 505, + 233 + ], + "score": 1.0, + "content": "Prabhanjan Kambadur, David S. Rosenberg, and Gideon Mann. Bloomberggpt: A large", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 230, + 505, + 243 + ], + "spans": [ + { + "bbox": [ + 115, + 230, + 505, + 243 + ], + "score": 1.0, + "content": "language model for finance. CoRR, abs/2303.17564, 2023b. doi: 10.48550/arXiv.2303.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 240, + 380, + 253 + ], + "spans": [ + { + "bbox": [ + 115, + 240, + 380, + 253 + ], + "score": 1.0, + "content": "17564. URL https://doi.org/10.48550/arXiv.2303.17564.", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 259, + 505, + 272 + ], + "spans": [ + { + "bbox": [ + 106, + 259, + 505, + 272 + ], + "score": 1.0, + "content": "Prateek Yadav, Derek Tam, Leshem Choshen, Colin Raffel, and Mohit Bansal. TIES-", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 115, + 270, + 506, + 284 + ], + "spans": [ + { + "bbox": [ + 115, + 270, + 506, + 284 + ], + "score": 1.0, + "content": "merging: Resolving interference when merging models. In Thirty-seventh Conference on", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 281, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 115, + 281, + 505, + 294 + ], + "score": 1.0, + "content": "Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 116, + 291, + 171, + 306 + ], + "spans": [ + { + "bbox": [ + 116, + 291, + 171, + 306 + ], + "score": 1.0, + "content": "xtaX3WyCj1.", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 309, + 505, + 326 + ], + "spans": [ + { + "bbox": [ + 105, + 309, + 505, + 326 + ], + "score": 1.0, + "content": "Qinyuan Ye, Bill Yuchen Lin, and Xiang Ren. CrossFit: A few-shot learning challenge for", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 114, + 321, + 376, + 335 + ], + "spans": [ + { + "bbox": [ + 114, + 321, + 376, + 335 + ], + "score": 1.0, + "content": "cross-task generalization in NLP. In Proc. of EMNLP, 2021.", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 340, + 504, + 353 + ], + "spans": [ + { + "bbox": [ + 106, + 340, + 504, + 353 + ], + "score": 1.0, + "content": "Chris Zhang, Mengye Ren, and Raquel Urtasun. Graph hypernetworks for neural archi-", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 116, + 352, + 280, + 365 + ], + "spans": [ + { + "bbox": [ + 116, + 352, + 280, + 365 + ], + "score": 1.0, + "content": "tecture search. In Proc. of ICLR, 2019.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 371, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 504, + 384 + ], + "score": 1.0, + "content": "Fan Zhang, Duyu Tang, Yong Dai, Cong Zhou, Shuangzhi Wu, and Shuming Shi. Skillnet-", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 114, + 380, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 114, + 380, + 506, + 397 + ], + "score": 1.0, + "content": "nlu: A sparsely activated model for general-purpose natural language understanding,", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 115, + 393, + 141, + 405 + ], + "spans": [ + { + "bbox": [ + 115, + 393, + 141, + 405 + ], + "score": 1.0, + "content": "2022.", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 411, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 505, + 425 + ], + "score": 1.0, + "content": "Jinghan Zhang, Shiqi Chen, Junteng Liu, and Junxian He. Composing parameter-efficient", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 115, + 423, + 376, + 436 + ], + "spans": [ + { + "bbox": [ + 115, + 423, + 376, + 436 + ], + "score": 1.0, + "content": "modules with arithmetic operations. ArXiv preprint, 2023a.", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 441, + 506, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 506, + 455 + ], + "score": 1.0, + "content": "Longteng Zhang, Lin Zhang, Shaohuai Shi, Xiaowen Chu, and Bo Li. Lora-fa:", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 115, + 453, + 505, + 466 + ], + "spans": [ + { + "bbox": [ + 115, + 453, + 505, + 466 + ], + "score": 1.0, + "content": "Memory-efficient low-rank adaptation for large language models fine-tuning. ArXiv,", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 464, + 498, + 477 + ], + "spans": [ + { + "bbox": [ + 115, + 464, + 498, + 477 + ], + "score": 1.0, + "content": "abs/2308.03303, 2023b. URL https://api.semanticscholar.org/CorpusID:260683267.", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 483, + 505, + 496 + ], + "spans": [ + { + "bbox": [ + 106, + 483, + 505, + 496 + ], + "score": 1.0, + "content": "Wangchunshu Zhou, Yuchen Eleanor Jiang, Ryan Cotterell, and Mrinmaya Sachan. Ef-", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 115, + 493, + 506, + 507 + ], + "spans": [ + { + "bbox": [ + 115, + 493, + 506, + 507 + ], + "score": 1.0, + "content": "ficient prompting via dynamic in-context learning. CoRR, abs/2305.11170, 2023. doi:", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 503, + 479, + 518 + ], + "spans": [ + { + "bbox": [ + 115, + 503, + 479, + 518 + ], + "score": 1.0, + "content": "10.48550/ARXIV.2305.11170. URL https://doi.org/10.48550/arXiv.2305.11170.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 16 + } + ], + "page_idx": 13, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 15, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "list", + "bbox": [ + 104, + 68, + 506, + 527 + ], + "lines": [ + { + "bbox": [ + 104, + 80, + 506, + 96 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 506, + 96 + ], + "score": 1.0, + "content": "Jason Wei, Maarten Bosma, Vincent Y. Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan", + "type": "text" + } + ], + "index": 0, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 93, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 93, + 504, + 106 + ], + "score": 1.0, + "content": "Du, Andrew M. Dai, and Quoc V. Le. Finetuned language models are zero-shot learners.", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 104, + 212, + 117 + ], + "spans": [ + { + "bbox": [ + 115, + 104, + 212, + 117 + ], + "score": 1.0, + "content": "In Proc. of ICLR, 2022.", + "type": "text" + } + ], + "index": 2, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 122, + 505, + 136 + ], + "spans": [ + { + "bbox": [ + 106, + 122, + 505, + 136 + ], + "score": 1.0, + "content": "Chengyue Wu, Teng Wang, Yixiao Ge, Zeyu Lu, Ruisong Zhou, Ying Shan, and Ping Luo.", + "type": "text" + } + ], + "index": 3, + "is_list_start_line": true + }, + { + "bbox": [ + 117, + 133, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 117, + 136, + 125, + 145 + ], + "score": 0.4, + "content": "\\pi", + "type": "inline_equation" + }, + { + "bbox": [ + 125, + 133, + 505, + 148 + ], + "score": 1.0, + "content": "-tuning: Transferring multimodal foundation models with optimal multi-task inter-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 114, + 144, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 114, + 144, + 505, + 158 + ], + "score": 1.0, + "content": "polation. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt,", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 114, + 154, + 505, + 170 + ], + "spans": [ + { + "bbox": [ + 114, + 154, + 505, + 170 + ], + "score": 1.0, + "content": "Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 166, + 504, + 179 + ], + "spans": [ + { + "bbox": [ + 115, + 166, + 504, + 179 + ], + "score": 1.0, + "content": "ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Ma-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 178, + 503, + 191 + ], + "spans": [ + { + "bbox": [ + 115, + 178, + 503, + 191 + ], + "score": 1.0, + "content": "chine Learning Research, pp. 37713–37727. PMLR, 2023a. URL https://proceedings.mlr.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 189, + 227, + 201 + ], + "spans": [ + { + "bbox": [ + 115, + 189, + 227, + 201 + ], + "score": 1.0, + "content": "press/v202/wu23t.html.", + "type": "text" + } + ], + "index": 9, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 206, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 105, + 206, + 506, + 222 + ], + "score": 1.0, + "content": "Shijie Wu, Ozan Irsoy, Steven Lu, Vadim Dabravolski, Mark Dredze, Sebastian Gehrmann,", + "type": "text" + } + ], + "index": 10, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 216, + 505, + 233 + ], + "spans": [ + { + "bbox": [ + 114, + 216, + 505, + 233 + ], + "score": 1.0, + "content": "Prabhanjan Kambadur, David S. Rosenberg, and Gideon Mann. Bloomberggpt: A large", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 230, + 505, + 243 + ], + "spans": [ + { + "bbox": [ + 115, + 230, + 505, + 243 + ], + "score": 1.0, + "content": "language model for finance. CoRR, abs/2303.17564, 2023b. doi: 10.48550/arXiv.2303.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 240, + 380, + 253 + ], + "spans": [ + { + "bbox": [ + 115, + 240, + 380, + 253 + ], + "score": 1.0, + "content": "17564. URL https://doi.org/10.48550/arXiv.2303.17564.", + "type": "text" + } + ], + "index": 13, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 259, + 505, + 272 + ], + "spans": [ + { + "bbox": [ + 106, + 259, + 505, + 272 + ], + "score": 1.0, + "content": "Prateek Yadav, Derek Tam, Leshem Choshen, Colin Raffel, and Mohit Bansal. TIES-", + "type": "text" + } + ], + "index": 14, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 270, + 506, + 284 + ], + "spans": [ + { + "bbox": [ + 115, + 270, + 506, + 284 + ], + "score": 1.0, + "content": "merging: Resolving interference when merging models. In Thirty-seventh Conference on", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 115, + 281, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 115, + 281, + 505, + 294 + ], + "score": 1.0, + "content": "Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 116, + 291, + 171, + 306 + ], + "spans": [ + { + "bbox": [ + 116, + 291, + 171, + 306 + ], + "score": 1.0, + "content": "xtaX3WyCj1.", + "type": "text" + } + ], + "index": 17, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 309, + 505, + 326 + ], + "spans": [ + { + "bbox": [ + 105, + 309, + 505, + 326 + ], + "score": 1.0, + "content": "Qinyuan Ye, Bill Yuchen Lin, and Xiang Ren. CrossFit: A few-shot learning challenge for", + "type": "text" + } + ], + "index": 18, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 321, + 376, + 335 + ], + "spans": [ + { + "bbox": [ + 114, + 321, + 376, + 335 + ], + "score": 1.0, + "content": "cross-task generalization in NLP. In Proc. of EMNLP, 2021.", + "type": "text" + } + ], + "index": 19, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 340, + 504, + 353 + ], + "spans": [ + { + "bbox": [ + 106, + 340, + 504, + 353 + ], + "score": 1.0, + "content": "Chris Zhang, Mengye Ren, and Raquel Urtasun. Graph hypernetworks for neural archi-", + "type": "text" + } + ], + "index": 20, + "is_list_start_line": true + }, + { + "bbox": [ + 116, + 352, + 280, + 365 + ], + "spans": [ + { + "bbox": [ + 116, + 352, + 280, + 365 + ], + "score": 1.0, + "content": "tecture search. In Proc. of ICLR, 2019.", + "type": "text" + } + ], + "index": 21, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 371, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 504, + 384 + ], + "score": 1.0, + "content": "Fan Zhang, Duyu Tang, Yong Dai, Cong Zhou, Shuangzhi Wu, and Shuming Shi. Skillnet-", + "type": "text" + } + ], + "index": 22, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 380, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 114, + 380, + 506, + 397 + ], + "score": 1.0, + "content": "nlu: A sparsely activated model for general-purpose natural language understanding,", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 115, + 393, + 141, + 405 + ], + "spans": [ + { + "bbox": [ + 115, + 393, + 141, + 405 + ], + "score": 1.0, + "content": "2022.", + "type": "text" + } + ], + "index": 24, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 411, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 411, + 505, + 425 + ], + "score": 1.0, + "content": "Jinghan Zhang, Shiqi Chen, Junteng Liu, and Junxian He. Composing parameter-efficient", + "type": "text" + } + ], + "index": 25, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 423, + 376, + 436 + ], + "spans": [ + { + "bbox": [ + 115, + 423, + 376, + 436 + ], + "score": 1.0, + "content": "modules with arithmetic operations. ArXiv preprint, 2023a.", + "type": "text" + } + ], + "index": 26, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 441, + 506, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 506, + 455 + ], + "score": 1.0, + "content": "Longteng Zhang, Lin Zhang, Shaohuai Shi, Xiaowen Chu, and Bo Li. Lora-fa:", + "type": "text" + } + ], + "index": 27, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 453, + 505, + 466 + ], + "spans": [ + { + "bbox": [ + 115, + 453, + 505, + 466 + ], + "score": 1.0, + "content": "Memory-efficient low-rank adaptation for large language models fine-tuning. ArXiv,", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 115, + 464, + 498, + 477 + ], + "spans": [ + { + "bbox": [ + 115, + 464, + 498, + 477 + ], + "score": 1.0, + "content": "abs/2308.03303, 2023b. URL https://api.semanticscholar.org/CorpusID:260683267.", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 483, + 505, + 496 + ], + "spans": [ + { + "bbox": [ + 106, + 483, + 505, + 496 + ], + "score": 1.0, + "content": "Wangchunshu Zhou, Yuchen Eleanor Jiang, Ryan Cotterell, and Mrinmaya Sachan. Ef-", + "type": "text" + } + ], + "index": 30, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 493, + 506, + 507 + ], + "spans": [ + { + "bbox": [ + 115, + 493, + 506, + 507 + ], + "score": 1.0, + "content": "ficient prompting via dynamic in-context learning. CoRR, abs/2305.11170, 2023. doi:", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 503, + 479, + 518 + ], + "spans": [ + { + "bbox": [ + 115, + 503, + 479, + 518 + ], + "score": 1.0, + "content": "10.48550/ARXIV.2305.11170. URL https://doi.org/10.48550/arXiv.2305.11170.", + "type": "text" + } + ], + "index": 32, + "is_list_end_line": true + } + ], + "index": 16, + "bbox_fs": [ + 104, + 80, + 506, + 518 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 112, + 543, + 205 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 106, + 79, + 505, + 103 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 79, + 505, + 93 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 505, + 93 + ], + "score": 1.0, + "content": "Table 3: The top five beneficial LoRA modules for BBH tasks and their associated upstream", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 90, + 460, + 104 + ], + "spans": [ + { + "bbox": [ + 105, + 90, + 460, + 104 + ], + "score": 1.0, + "content": "tasks, the average weight values and the average performance on all BBH tasks.", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "table_body", + "bbox": [ + 107, + 112, + 543, + 205 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 107, + 112, + 543, + 205 + ], + "spans": [ + { + "bbox": [ + 107, + 112, + 543, + 205 + ], + "score": 0.98, + "html": "
RankDataset: TaskWeightPerfTask Description
1WIQA: Last Process0.7228.1 Identifying the last step of a given process.
2RACE: Is this the Right Answer0.6830.8Determining if given answer is correct.
3WIQA: First Process0.6328.1 Identifying the first step of a given process.
4AdversarialQA: BiDAF0.6125.1Aserialmode-in-the-eby an
5WebQuestions: What is the Answer0.5827.0 Asweringrqomesten based oninformation
", + "type": "table", + "image_path": "8025498f3c910dbeea940a1ac3d7d18dc6c8a3edff07b8bec4d71d0b1464157c.jpg" + } + ] + } + ], + "index": 3, + "virtual_lines": [ + { + "bbox": [ + 107, + 112, + 543, + 143.0 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 107, + 143.0, + 543, + 174.0 + ], + "spans": [], + "index": 3 + }, + { + "bbox": [ + 107, + 174.0, + 543, + 205.0 + ], + "spans": [], + "index": 4 + } + ] + } + ], + "index": 1.75 + }, + { + "type": "title", + "bbox": [ + 108, + 225, + 210, + 239 + ], + "lines": [ + { + "bbox": [ + 105, + 224, + 210, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 224, + 210, + 241 + ], + "score": 1.0, + "content": "A More Analysis", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 120, + 248, + 367, + 261 + ], + "lines": [ + { + "bbox": [ + 118, + 246, + 368, + 262 + ], + "spans": [ + { + "bbox": [ + 118, + 246, + 368, + 262 + ], + "score": 1.0, + "content": "Which LoRA modules are most effective for BBH tasks?", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6 + }, + { + "type": "text", + "bbox": [ + 106, + 270, + 505, + 414 + ], + "lines": [ + { + "bbox": [ + 106, + 271, + 505, + 284 + ], + "spans": [ + { + "bbox": [ + 106, + 271, + 505, + 284 + ], + "score": 1.0, + "content": "We hypothesized that the amalgamation of LoRA modules could incorporate skills and", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 281, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 105, + 281, + 505, + 294 + ], + "score": 1.0, + "content": "insights from a variety of specific tasks. To evaluate this, we examined the extent of influ-", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 292, + 505, + 305 + ], + "spans": [ + { + "bbox": [ + 105, + 292, + 505, + 305 + ], + "score": 1.0, + "content": "ence a single LoRA module had amongst all tasks from the BBH benchmark. We measured", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 303, + 505, + 317 + ], + "spans": [ + { + "bbox": [ + 105, + 303, + 505, + 317 + ], + "score": 1.0, + "content": "the impact of each isolated task by calculating the average absolute weight. The top five", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 315, + 505, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 505, + 327 + ], + "score": 1.0, + "content": "modules, presented in Table 3, were found to have substantial influence, as indicated by", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 325, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 325, + 505, + 338 + ], + "score": 1.0, + "content": "their maximum average weights, which suggested that they were notably more effective in", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 336, + 505, + 349 + ], + "spans": [ + { + "bbox": [ + 106, + 336, + 505, + 349 + ], + "score": 1.0, + "content": "cross-task transfer. Remarkably, a common feature among these top five modules was their", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 348, + 505, + 361 + ], + "spans": [ + { + "bbox": [ + 106, + 348, + 505, + 361 + ], + "score": 1.0, + "content": "association with tasks requiring reading comprehension and reasoning skillsβ€”attributes", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 357, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 506, + 374 + ], + "score": 1.0, + "content": "indicative of higher cognitive complexity. However, it is worth noting that none of the", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 368, + 505, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 368, + 505, + 383 + ], + "score": 1.0, + "content": "modules exhibited consistent improvement across all BBH tasks, as reflected in their av-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 379, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 505, + 394 + ], + "score": 1.0, + "content": "erage performance on all BBH tasks, which did not show a significant improvement com-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 390, + 506, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 506, + 405 + ], + "score": 1.0, + "content": "pared to the original FLAN-T5-large, except for the Rank 2. The results underscore the", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 402, + 354, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 354, + 415 + ], + "score": 1.0, + "content": "advantages of composing diverse modules in LoraHub.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 13 + }, + { + "type": "text", + "bbox": [ + 119, + 426, + 369, + 439 + ], + "lines": [ + { + "bbox": [ + 118, + 425, + 370, + 439 + ], + "spans": [ + { + "bbox": [ + 118, + 425, + 370, + 439 + ], + "score": 1.0, + "content": "How effective is the gradient-free optimization method?", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20 + }, + { + "type": "text", + "bbox": [ + 106, + 451, + 505, + 584 + ], + "lines": [ + { + "bbox": [ + 105, + 451, + 506, + 465 + ], + "spans": [ + { + "bbox": [ + 105, + 451, + 506, + 465 + ], + "score": 1.0, + "content": "To assess the effectiveness of our gradient-free optimization method in correctly identi-", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 463, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 106, + 463, + 506, + 475 + ], + "score": 1.0, + "content": "fying the most suitable LoRA module for a given downstream task, we carried out an", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 473, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 106, + 473, + 506, + 487 + ], + "score": 1.0, + "content": "empirical study using the WikiTableQuestions (Pasupat & Liang, 2015) (WTQ) dataset. We", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 484, + 506, + 497 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 506, + 497 + ], + "score": 1.0, + "content": "strategically included a LoRA module that was specifically trained on the WTQ dataset", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 496, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 105, + 496, + 505, + 508 + ], + "score": 1.0, + "content": "into our pool of LoRA candidate modules, which originally stemmed from tasks exclusive", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 505, + 506, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 506, + 519 + ], + "score": 1.0, + "content": "to the Flan Collection. Subsequently, we designated WTQ as the targeted downstream task", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 516, + 506, + 532 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 506, + 532 + ], + "score": 1.0, + "content": "and computed the weights consistent with the methods employed in LoraHub learning.", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 528, + 505, + 541 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 505, + 541 + ], + "score": 1.0, + "content": "As an end result, the WTQ-specific LoRA module was awarded the highest weight, ex-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 538, + 506, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 538, + 506, + 553 + ], + "score": 1.0, + "content": "emplifying the algorithm’s success in recognizing it as the most relevant. Moreover, the", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 550, + 505, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 505, + 564 + ], + "score": 1.0, + "content": "combined LoRA module demonstrated marginal superiority over the WTQ LoRA module.", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 560, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 505, + 574 + ], + "score": 1.0, + "content": "This underscores the claim that the gradient-free optimization method has the ability to", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 572, + 434, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 434, + 585 + ], + "score": 1.0, + "content": "proficiently select the optimal upstream LoRA module for an unseen task.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 26.5 + }, + { + "type": "title", + "bbox": [ + 108, + 599, + 247, + 613 + ], + "lines": [ + { + "bbox": [ + 105, + 599, + 248, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 248, + 615 + ], + "score": 1.0, + "content": "B Result of Best Results", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 33 + }, + { + "type": "text", + "bbox": [ + 107, + 625, + 505, + 703 + ], + "lines": [ + { + "bbox": [ + 105, + 624, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 506, + 640 + ], + "score": 1.0, + "content": "As shown in Table 4, compared to gradient-based parameter-efficient training methods like", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 636, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 506, + 651 + ], + "score": 1.0, + "content": "LoRA and IA3, our approach demonstrates superior performance in terms of best results", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 646, + 505, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 505, + 662 + ], + "score": 1.0, + "content": "over experimental runs. While it exhibits a noticeable lag behind the fully fine-tuning (FFT)", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 658, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 506, + 672 + ], + "score": 1.0, + "content": "method, which updates all parameters during training, this observation suggests that our", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 104, + 668, + 506, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 668, + 506, + 682 + ], + "score": 1.0, + "content": "proposed method has a promising upper limit. We anticipate that future research efforts", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 680, + 506, + 695 + ], + "spans": [ + { + "bbox": [ + 105, + 680, + 506, + 695 + ], + "score": 1.0, + "content": "can contribute to accelerating the optimization speed and further enhancing the efficacy of", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 691, + 171, + 704 + ], + "spans": [ + { + "bbox": [ + 105, + 691, + 171, + 704 + ], + "score": 1.0, + "content": "our approach.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 37 + } + ], + "page_idx": 14, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 15, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 112, + 543, + 205 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 106, + 79, + 505, + 103 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 79, + 505, + 93 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 505, + 93 + ], + "score": 1.0, + "content": "Table 3: The top five beneficial LoRA modules for BBH tasks and their associated upstream", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 90, + 460, + 104 + ], + "spans": [ + { + "bbox": [ + 105, + 90, + 460, + 104 + ], + "score": 1.0, + "content": "tasks, the average weight values and the average performance on all BBH tasks.", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "table_body", + "bbox": [ + 107, + 112, + 543, + 205 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 107, + 112, + 543, + 205 + ], + "spans": [ + { + "bbox": [ + 107, + 112, + 543, + 205 + ], + "score": 0.98, + "html": "
RankDataset: TaskWeightPerfTask Description
1WIQA: Last Process0.7228.1 Identifying the last step of a given process.
2RACE: Is this the Right Answer0.6830.8Determining if given answer is correct.
3WIQA: First Process0.6328.1 Identifying the first step of a given process.
4AdversarialQA: BiDAF0.6125.1Aserialmode-in-the-eby an
5WebQuestions: What is the Answer0.5827.0 Asweringrqomesten based oninformation
", + "type": "table", + "image_path": "8025498f3c910dbeea940a1ac3d7d18dc6c8a3edff07b8bec4d71d0b1464157c.jpg" + } + ] + } + ], + "index": 3, + "virtual_lines": [ + { + "bbox": [ + 107, + 112, + 543, + 143.0 + ], + "spans": [], + "index": 2 + }, + { + "bbox": [ + 107, + 143.0, + 543, + 174.0 + ], + "spans": [], + "index": 3 + }, + { + "bbox": [ + 107, + 174.0, + 543, + 205.0 + ], + "spans": [], + "index": 4 + } + ] + } + ], + "index": 1.75 + }, + { + "type": "title", + "bbox": [ + 108, + 225, + 210, + 239 + ], + "lines": [ + { + "bbox": [ + 105, + 224, + 210, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 224, + 210, + 241 + ], + "score": 1.0, + "content": "A More Analysis", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 120, + 248, + 367, + 261 + ], + "lines": [ + { + "bbox": [ + 118, + 246, + 368, + 262 + ], + "spans": [ + { + "bbox": [ + 118, + 246, + 368, + 262 + ], + "score": 1.0, + "content": "Which LoRA modules are most effective for BBH tasks?", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6, + "bbox_fs": [ + 118, + 246, + 368, + 262 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 270, + 505, + 414 + ], + "lines": [ + { + "bbox": [ + 106, + 271, + 505, + 284 + ], + "spans": [ + { + "bbox": [ + 106, + 271, + 505, + 284 + ], + "score": 1.0, + "content": "We hypothesized that the amalgamation of LoRA modules could incorporate skills and", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 281, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 105, + 281, + 505, + 294 + ], + "score": 1.0, + "content": "insights from a variety of specific tasks. To evaluate this, we examined the extent of influ-", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 292, + 505, + 305 + ], + "spans": [ + { + "bbox": [ + 105, + 292, + 505, + 305 + ], + "score": 1.0, + "content": "ence a single LoRA module had amongst all tasks from the BBH benchmark. We measured", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 303, + 505, + 317 + ], + "spans": [ + { + "bbox": [ + 105, + 303, + 505, + 317 + ], + "score": 1.0, + "content": "the impact of each isolated task by calculating the average absolute weight. The top five", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 315, + 505, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 505, + 327 + ], + "score": 1.0, + "content": "modules, presented in Table 3, were found to have substantial influence, as indicated by", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 325, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 325, + 505, + 338 + ], + "score": 1.0, + "content": "their maximum average weights, which suggested that they were notably more effective in", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 336, + 505, + 349 + ], + "spans": [ + { + "bbox": [ + 106, + 336, + 505, + 349 + ], + "score": 1.0, + "content": "cross-task transfer. Remarkably, a common feature among these top five modules was their", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 348, + 505, + 361 + ], + "spans": [ + { + "bbox": [ + 106, + 348, + 505, + 361 + ], + "score": 1.0, + "content": "association with tasks requiring reading comprehension and reasoning skillsβ€”attributes", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 357, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 506, + 374 + ], + "score": 1.0, + "content": "indicative of higher cognitive complexity. However, it is worth noting that none of the", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 368, + 505, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 368, + 505, + 383 + ], + "score": 1.0, + "content": "modules exhibited consistent improvement across all BBH tasks, as reflected in their av-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 379, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 505, + 394 + ], + "score": 1.0, + "content": "erage performance on all BBH tasks, which did not show a significant improvement com-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 390, + 506, + 405 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 506, + 405 + ], + "score": 1.0, + "content": "pared to the original FLAN-T5-large, except for the Rank 2. The results underscore the", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 402, + 354, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 354, + 415 + ], + "score": 1.0, + "content": "advantages of composing diverse modules in LoraHub.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 13, + "bbox_fs": [ + 105, + 271, + 506, + 415 + ] + }, + { + "type": "text", + "bbox": [ + 119, + 426, + 369, + 439 + ], + "lines": [ + { + "bbox": [ + 118, + 425, + 370, + 439 + ], + "spans": [ + { + "bbox": [ + 118, + 425, + 370, + 439 + ], + "score": 1.0, + "content": "How effective is the gradient-free optimization method?", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20, + "bbox_fs": [ + 118, + 425, + 370, + 439 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 451, + 505, + 584 + ], + "lines": [ + { + "bbox": [ + 105, + 451, + 506, + 465 + ], + "spans": [ + { + "bbox": [ + 105, + 451, + 506, + 465 + ], + "score": 1.0, + "content": "To assess the effectiveness of our gradient-free optimization method in correctly identi-", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 463, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 106, + 463, + 506, + 475 + ], + "score": 1.0, + "content": "fying the most suitable LoRA module for a given downstream task, we carried out an", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 473, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 106, + 473, + 506, + 487 + ], + "score": 1.0, + "content": "empirical study using the WikiTableQuestions (Pasupat & Liang, 2015) (WTQ) dataset. We", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 484, + 506, + 497 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 506, + 497 + ], + "score": 1.0, + "content": "strategically included a LoRA module that was specifically trained on the WTQ dataset", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 496, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 105, + 496, + 505, + 508 + ], + "score": 1.0, + "content": "into our pool of LoRA candidate modules, which originally stemmed from tasks exclusive", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 505, + 506, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 506, + 519 + ], + "score": 1.0, + "content": "to the Flan Collection. Subsequently, we designated WTQ as the targeted downstream task", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 516, + 506, + 532 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 506, + 532 + ], + "score": 1.0, + "content": "and computed the weights consistent with the methods employed in LoraHub learning.", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 528, + 505, + 541 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 505, + 541 + ], + "score": 1.0, + "content": "As an end result, the WTQ-specific LoRA module was awarded the highest weight, ex-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 538, + 506, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 538, + 506, + 553 + ], + "score": 1.0, + "content": "emplifying the algorithm’s success in recognizing it as the most relevant. Moreover, the", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 550, + 505, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 505, + 564 + ], + "score": 1.0, + "content": "combined LoRA module demonstrated marginal superiority over the WTQ LoRA module.", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 560, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 505, + 574 + ], + "score": 1.0, + "content": "This underscores the claim that the gradient-free optimization method has the ability to", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 572, + 434, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 434, + 585 + ], + "score": 1.0, + "content": "proficiently select the optimal upstream LoRA module for an unseen task.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 26.5, + "bbox_fs": [ + 104, + 451, + 506, + 585 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 599, + 247, + 613 + ], + "lines": [ + { + "bbox": [ + 105, + 599, + 248, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 248, + 615 + ], + "score": 1.0, + "content": "B Result of Best Results", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 33 + }, + { + "type": "text", + "bbox": [ + 107, + 625, + 505, + 703 + ], + "lines": [ + { + "bbox": [ + 105, + 624, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 624, + 506, + 640 + ], + "score": 1.0, + "content": "As shown in Table 4, compared to gradient-based parameter-efficient training methods like", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 636, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 506, + 651 + ], + "score": 1.0, + "content": "LoRA and IA3, our approach demonstrates superior performance in terms of best results", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 646, + 505, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 505, + 662 + ], + "score": 1.0, + "content": "over experimental runs. While it exhibits a noticeable lag behind the fully fine-tuning (FFT)", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 658, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 506, + 672 + ], + "score": 1.0, + "content": "method, which updates all parameters during training, this observation suggests that our", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 104, + 668, + 506, + 682 + ], + "spans": [ + { + "bbox": [ + 104, + 668, + 506, + 682 + ], + "score": 1.0, + "content": "proposed method has a promising upper limit. We anticipate that future research efforts", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 680, + 506, + 695 + ], + "spans": [ + { + "bbox": [ + 105, + 680, + 506, + 695 + ], + "score": 1.0, + "content": "can contribute to accelerating the optimization speed and further enhancing the efficacy of", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 691, + 171, + 704 + ], + "spans": [ + { + "bbox": [ + 105, + 691, + 171, + 704 + ], + "score": 1.0, + "content": "our approach.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 37, + "bbox_fs": [ + 104, + 624, + 506, + 704 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "table", + "bbox": [ + 112, + 255, + 497, + 630 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 106, + 180, + 505, + 247 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 178, + 506, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 178, + 506, + 194 + ], + "score": 1.0, + "content": "Table 4: Experimental results of several few-shot methods, including in-context learning", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 190, + 506, + 205 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 506, + 205 + ], + "score": 1.0, + "content": "(ICL), IA3 fine-tuning (IA3), LoRA tuning (LoRA), full fine-tuning (FFT) and our LoraHub", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 202, + 505, + 215 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 505, + 215 + ], + "score": 1.0, + "content": "learning (LoraHub) on the BBH benchmark with FLAN-T5-large as the base LLM. We de-", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 212, + 505, + 226 + ], + "spans": [ + { + "bbox": [ + 104, + 212, + 300, + 226 + ], + "score": 1.0, + "content": "note algorithmic tasks with the superscript", + "type": "text" + }, + { + "bbox": [ + 300, + 213, + 308, + 225 + ], + "score": 0.76, + "content": "\\ S", + "type": "inline_equation" + }, + { + "bbox": [ + 308, + 212, + 505, + 226 + ], + "score": 1.0, + "content": "following previous work (Wu et al., 2023b).", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 223, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 506, + 237 + ], + "score": 1.0, + "content": "Note that we use 5 examples per task as the demonstration for all methods. The best (best)", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 235, + 439, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 235, + 439, + 248 + ], + "score": 1.0, + "content": "performance is reported as the maximum value obtained across three runs.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 2.5 + }, + { + "type": "table_body", + "bbox": [ + 112, + 255, + 497, + 630 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 112, + 255, + 497, + 630 + ], + "spans": [ + { + "bbox": [ + 112, + 255, + 497, + 630 + ], + "score": 0.983, + "html": "
TaskICLbestIA3bestLoRAbestFFTbestLoraHubbest
Boolean Expressions62.758.060.765.360.7
Causal Judgement59.862.157.560.963.2
Date Understanding21.320.740.767.345.3
Disambiguation69.30.068.770.768.0
Dyck Languages2.04.725.333.32.7
Formal Fallacies59.352.056.756.059.3
Geometric Shapes20.015.328.739.318.7
Hyperbaton72.749.357.382.072.7
Logical DeductionS (five objects)39.332.741.343.340.0
Logical DeductionS (seven objects)42.034.042.746.046.0
LogicalDrectjoets)52.78.756.760.752.7
Movie Recommendation56.762.064.570.762.0
Multistep Arithmetic0.70.70.70.01.3
Navigate46.747.350.750.051.3
Object Counting34.735.342.038.036.7
Penguins in a Table43.545.741.337.047.8
Reasoning about Colored Objects41.341.340.738.744.7
Ruin Names20.725.342.066.028.7
Salient Translation Error Detection48.037.317.321.342.7
Snarks55.156.459.069.261.5
Sports Understanding56.755.358.758.762.7
Temporal Sequences26.718.731.348.721.3
Tracking Shuffled ObjectsS (five objects)12.012.016.020.016.7
Tracking Shuffled ObjectsS (seven objects)6.76.712.010.015.3
Tracking Shuffled ObjectsS (three objects)31.330.732.036.031.3
Web of Lies54.054.755.354.057.3
Word Sorting0.71.35.36.01.3
Best Performance (Average)38.432.140.946.241.2
", + "type": "table", + "image_path": "95c66e826a893a9a4a0a9da3fa42a7b74e1324af47f01994c7ebe53ba5447ca0.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 112, + 255, + 497, + 380.0 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 112, + 380.0, + 497, + 505.0 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 112, + 505.0, + 497, + 630.0 + ], + "spans": [], + "index": 8 + } + ] + } + ], + "index": 4.75 + } + ], + "page_idx": 15, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 313, + 764 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 313, + 764 + ], + "score": 1.0, + "content": "16", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "table", + "bbox": [ + 112, + 255, + 497, + 630 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 106, + 180, + 505, + 247 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 178, + 506, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 178, + 506, + 194 + ], + "score": 1.0, + "content": "Table 4: Experimental results of several few-shot methods, including in-context learning", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 190, + 506, + 205 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 506, + 205 + ], + "score": 1.0, + "content": "(ICL), IA3 fine-tuning (IA3), LoRA tuning (LoRA), full fine-tuning (FFT) and our LoraHub", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 202, + 505, + 215 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 505, + 215 + ], + "score": 1.0, + "content": "learning (LoraHub) on the BBH benchmark with FLAN-T5-large as the base LLM. We de-", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 212, + 505, + 226 + ], + "spans": [ + { + "bbox": [ + 104, + 212, + 300, + 226 + ], + "score": 1.0, + "content": "note algorithmic tasks with the superscript", + "type": "text" + }, + { + "bbox": [ + 300, + 213, + 308, + 225 + ], + "score": 0.76, + "content": "\\ S", + "type": "inline_equation" + }, + { + "bbox": [ + 308, + 212, + 505, + 226 + ], + "score": 1.0, + "content": "following previous work (Wu et al., 2023b).", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 223, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 506, + 237 + ], + "score": 1.0, + "content": "Note that we use 5 examples per task as the demonstration for all methods. The best (best)", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 235, + 439, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 235, + 439, + 248 + ], + "score": 1.0, + "content": "performance is reported as the maximum value obtained across three runs.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 2.5 + }, + { + "type": "table_body", + "bbox": [ + 112, + 255, + 497, + 630 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 112, + 255, + 497, + 630 + ], + "spans": [ + { + "bbox": [ + 112, + 255, + 497, + 630 + ], + "score": 0.983, + "html": "
TaskICLbestIA3bestLoRAbestFFTbestLoraHubbest
Boolean Expressions62.758.060.765.360.7
Causal Judgement59.862.157.560.963.2
Date Understanding21.320.740.767.345.3
Disambiguation69.30.068.770.768.0
Dyck Languages2.04.725.333.32.7
Formal Fallacies59.352.056.756.059.3
Geometric Shapes20.015.328.739.318.7
Hyperbaton72.749.357.382.072.7
Logical DeductionS (five objects)39.332.741.343.340.0
Logical DeductionS (seven objects)42.034.042.746.046.0
LogicalDrectjoets)52.78.756.760.752.7
Movie Recommendation56.762.064.570.762.0
Multistep Arithmetic0.70.70.70.01.3
Navigate46.747.350.750.051.3
Object Counting34.735.342.038.036.7
Penguins in a Table43.545.741.337.047.8
Reasoning about Colored Objects41.341.340.738.744.7
Ruin Names20.725.342.066.028.7
Salient Translation Error Detection48.037.317.321.342.7
Snarks55.156.459.069.261.5
Sports Understanding56.755.358.758.762.7
Temporal Sequences26.718.731.348.721.3
Tracking Shuffled ObjectsS (five objects)12.012.016.020.016.7
Tracking Shuffled ObjectsS (seven objects)6.76.712.010.015.3
Tracking Shuffled ObjectsS (three objects)31.330.732.036.031.3
Web of Lies54.054.755.354.057.3
Word Sorting0.71.35.36.01.3
Best Performance (Average)38.432.140.946.241.2
", + "type": "table", + "image_path": "95c66e826a893a9a4a0a9da3fa42a7b74e1324af47f01994c7ebe53ba5447ca0.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 112, + 255, + 497, + 380.0 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 112, + 380.0, + 497, + 505.0 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 112, + 505.0, + 497, + 630.0 + ], + "spans": [], + "index": 8 + } + ] + } + ], + "index": 4.75 + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 106, + 80, + 345, + 95 + ], + "lines": [ + { + "bbox": [ + 106, + 80, + 345, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 80, + 345, + 96 + ], + "score": 1.0, + "content": "C Result of non-instrcution-tuned models", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 115, + 161, + 493, + 574 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 106, + 109, + 506, + 153 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 109, + 505, + 122 + ], + "spans": [ + { + "bbox": [ + 106, + 109, + 505, + 122 + ], + "score": 1.0, + "content": "Table 5: Comparsion among different ranks for few-shot LoraHub learning with the back-", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 119, + 506, + 133 + ], + "spans": [ + { + "bbox": [ + 105, + 119, + 506, + 133 + ], + "score": 1.0, + "content": "bone T5-large (Raffel et al., 2020) on the BBH benchmark. Note that the T5-large model", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 131, + 506, + 144 + ], + "spans": [ + { + "bbox": [ + 105, + 131, + 149, + 144 + ], + "score": 1.0, + "content": "achieved", + "type": "text" + }, + { + "bbox": [ + 150, + 132, + 172, + 142 + ], + "score": 0.86, + "content": "0 . { \\bar { 0 } } \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 173, + 131, + 506, + 144 + ], + "score": 1.0, + "content": "on all tasks under the zero-shot setting except Dyck Languages, where it", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 142, + 168, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 137, + 153 + ], + "score": 1.0, + "content": "scored", + "type": "text" + }, + { + "bbox": [ + 138, + 142, + 164, + 153 + ], + "score": 0.86, + "content": "0 . 6 7 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 165, + 142, + 168, + 153 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 2.5 + }, + { + "type": "table_body", + "bbox": [ + 115, + 161, + 493, + 574 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 115, + 161, + 493, + 574 + ], + "spans": [ + { + "bbox": [ + 115, + 161, + 493, + 574 + ], + "score": 0.984, + "html": "
Task ↓ Rank β†’4best 4avg16avg16best64avg64best
Boolean Expressions52.13 57.3350.6758.0047.4758.00
Causal Judgement52.4155.1749.6654.0250.8054.02
Date Understanding0.402.0014.4029.334.5310.00
Disambiguation10.0031.3326.9342.001.734.67
Dyck Languages0.400.670.400.670.402.00
Formal Fallacies48.4054.0046.9351.3346.9350.00
Geometric Shapes0.000.006.5332.671.477.33
Hyperbaton30.1350.0039.07 57.3332.9348.00
Logical DeductionS (five objects)5.2014.678.8019.331.336.67
Logical DeductionS (seven objects)6.4017.339.3319.333.4716.00
Logical DeductionS14.4032.0021.7334.676.9315.33
(three objects) Movie Recommendation7.0718.677.8722.001.206.00
Multistep Arithmetic two0.000.000.000.000.000.00
Navigate49.6054.6752.2756.6749.8752.00
Object Counting7.2018.0016.0021.3313.7326.67
Penguins ina Table6.5213.0410.4317.390.432.17
Reasoning about Colored Objects6.2710.005.0716.670.532.67
Ruin Names7.7313.3313.2028.005.7315.33
Salient Translation Error Detection0.000.001.738.670.000.00
Snarks21.2842.3149.4960.2616.1538.46
Sports Understanding46.5358.6746.8058.6746.5358.67
Temporal Sequences3.0713.336.5326.672.4012.00
Tracking Shuffled ObjectsS5.2014.004.139.330.130.67
(five objects) Tracking Shuffled ObjectsS (seven objects)2.6710.002.8014.003.208.00
Tracking Shuffled ObjectsS3.7317.3316.2734.675.8726.67
(three objects) Web of Lies48.5354.00 57.33
Word Sorting0.400.6754.00 0.1356.00 0.6754.67 0.000.00
20.78
Average Performance per Task16.1424.1730.7314.7621.43
", + "type": "table", + "image_path": "0a96643f4a27749431647c2c4c4281dfc58fdf816ad0790812621e1af017102a.jpg" + } + ] + } + ], + "index": 6, + "virtual_lines": [ + { + "bbox": [ + 115, + 161, + 493, + 298.66666666666663 + ], + "spans": [], + "index": 5 + }, + { + "bbox": [ + 115, + 298.66666666666663, + 493, + 436.33333333333326 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 115, + 436.33333333333326, + 493, + 573.9999999999999 + ], + "spans": [], + "index": 7 + } + ] + } + ], + "index": 4.25 + } + ], + "page_idx": 16, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 15, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 106, + 80, + 345, + 95 + ], + "lines": [ + { + "bbox": [ + 106, + 80, + 345, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 80, + 345, + 96 + ], + "score": 1.0, + "content": "C Result of non-instrcution-tuned models", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 115, + 161, + 493, + 574 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 106, + 109, + 506, + 153 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 109, + 505, + 122 + ], + "spans": [ + { + "bbox": [ + 106, + 109, + 505, + 122 + ], + "score": 1.0, + "content": "Table 5: Comparsion among different ranks for few-shot LoraHub learning with the back-", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 119, + 506, + 133 + ], + "spans": [ + { + "bbox": [ + 105, + 119, + 506, + 133 + ], + "score": 1.0, + "content": "bone T5-large (Raffel et al., 2020) on the BBH benchmark. Note that the T5-large model", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 131, + 506, + 144 + ], + "spans": [ + { + "bbox": [ + 105, + 131, + 149, + 144 + ], + "score": 1.0, + "content": "achieved", + "type": "text" + }, + { + "bbox": [ + 150, + 132, + 172, + 142 + ], + "score": 0.86, + "content": "0 . { \\bar { 0 } } \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 173, + 131, + 506, + 144 + ], + "score": 1.0, + "content": "on all tasks under the zero-shot setting except Dyck Languages, where it", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 142, + 168, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 137, + 153 + ], + "score": 1.0, + "content": "scored", + "type": "text" + }, + { + "bbox": [ + 138, + 142, + 164, + 153 + ], + "score": 0.86, + "content": "0 . 6 7 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 165, + 142, + 168, + 153 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 2.5 + }, + { + "type": "table_body", + "bbox": [ + 115, + 161, + 493, + 574 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 115, + 161, + 493, + 574 + ], + "spans": [ + { + "bbox": [ + 115, + 161, + 493, + 574 + ], + "score": 0.984, + "html": "
Task ↓ Rank β†’4best 4avg16avg16best64avg64best
Boolean Expressions52.13 57.3350.6758.0047.4758.00
Causal Judgement52.4155.1749.6654.0250.8054.02
Date Understanding0.402.0014.4029.334.5310.00
Disambiguation10.0031.3326.9342.001.734.67
Dyck Languages0.400.670.400.670.402.00
Formal Fallacies48.4054.0046.9351.3346.9350.00
Geometric Shapes0.000.006.5332.671.477.33
Hyperbaton30.1350.0039.07 57.3332.9348.00
Logical DeductionS (five objects)5.2014.678.8019.331.336.67
Logical DeductionS (seven objects)6.4017.339.3319.333.4716.00
Logical DeductionS14.4032.0021.7334.676.9315.33
(three objects) Movie Recommendation7.0718.677.8722.001.206.00
Multistep Arithmetic two0.000.000.000.000.000.00
Navigate49.6054.6752.2756.6749.8752.00
Object Counting7.2018.0016.0021.3313.7326.67
Penguins ina Table6.5213.0410.4317.390.432.17
Reasoning about Colored Objects6.2710.005.0716.670.532.67
Ruin Names7.7313.3313.2028.005.7315.33
Salient Translation Error Detection0.000.001.738.670.000.00
Snarks21.2842.3149.4960.2616.1538.46
Sports Understanding46.5358.6746.8058.6746.5358.67
Temporal Sequences3.0713.336.5326.672.4012.00
Tracking Shuffled ObjectsS5.2014.004.139.330.130.67
(five objects) Tracking Shuffled ObjectsS (seven objects)2.6710.002.8014.003.208.00
Tracking Shuffled ObjectsS3.7317.3316.2734.675.8726.67
(three objects) Web of Lies48.5354.00 57.33
Word Sorting0.400.6754.00 0.1356.00 0.6754.67 0.000.00
20.78
Average Performance per Task16.1424.1730.7314.7621.43
", + "type": "table", + "image_path": "0a96643f4a27749431647c2c4c4281dfc58fdf816ad0790812621e1af017102a.jpg" + } + ] + } + ], + "index": 6, + "virtual_lines": [ + { + "bbox": [ + 115, + 161, + 493, + 298.66666666666663 + ], + "spans": [], + "index": 5 + }, + { + "bbox": [ + 115, + 298.66666666666663, + 493, + 436.33333333333326 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 115, + 436.33333333333326, + 493, + 573.9999999999999 + ], + "spans": [], + "index": 7 + } + ] + } + ], + "index": 4.25 + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 108, + 80, + 252, + 95 + ], + "lines": [ + { + "bbox": [ + 105, + 78, + 254, + 99 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 254, + 99 + ], + "score": 1.0, + "content": "D Result of larger model", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 111, + 505, + 179 + ], + "lines": [ + { + "bbox": [ + 106, + 111, + 505, + 124 + ], + "spans": [ + { + "bbox": [ + 106, + 111, + 505, + 124 + ], + "score": 1.0, + "content": "Table 6: Experimental results of zero-shot learning (Zero) and our few-shot LoraHub learn-", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 506, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 506, + 136 + ], + "score": 1.0, + "content": "ing (LoraHub) on the BBH benchmark with FLAN-T5-xl as the base LLM. Note that we use", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 133, + 506, + 148 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 462, + 148 + ], + "score": 1.0, + "content": "5 examples per task as the demonstration for both ICL and LoraHub. The average", + "type": "text" + }, + { + "bbox": [ + 462, + 135, + 484, + 146 + ], + "score": 0.27, + "content": "( a v g )", + "type": "inline_equation" + }, + { + "bbox": [ + 484, + 133, + 506, + 148 + ], + "score": 1.0, + "content": "per-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 145, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 106, + 145, + 505, + 158 + ], + "score": 1.0, + "content": "formance of LoraHub is computed over 5 runs with different random seeds, while the best", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 155, + 506, + 169 + ], + "spans": [ + { + "bbox": [ + 105, + 155, + 506, + 169 + ], + "score": 1.0, + "content": "(best) performance is reported as the maximum value obtained across these runs. We can", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 165, + 357, + 181 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 357, + 181 + ], + "score": 1.0, + "content": "see the trend of the results are similar to FLAN-T5-large.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 3.5 + }, + { + "type": "table", + "bbox": [ + 140, + 184, + 474, + 600 + ], + "blocks": [ + { + "type": "table_body", + "bbox": [ + 140, + 184, + 474, + 600 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 140, + 184, + 474, + 600 + ], + "spans": [ + { + "bbox": [ + 140, + 184, + 474, + 600 + ], + "score": 0.976, + "html": "
TaskZeroLoraHub avgLoraHub best
Boolean Expressions52.058.763.3
Causal Judgement62.153.859.8
Date Understanding38.037.638.0
Disambiguation Qa0.020.5 54.7
Dyck Languages1.30.92.0
Formal Fallacies56.056.056.0
Geometric Shapes8.717.528.0
Hyperbaton45.353.556.7
Logical DeductionS (five objects)1.342.748.7
Logical DeductionS (seven objects)8.744.350.0
Logical DeductionS (three objects)0.756.461.3
Movie Recommendation2.062.866.0
Multistep Arithmetic Two0.00.40.7
Navigate50.750.750.7
Object Counting39.340.748.0
Penguins In A Table17.440.945.7
Reasoning About Colored Objects46.747.350.7
Ruin Names18.035.644.7
Salient Translation Error Detection44.745.148.7
Snarks60.360.861.5
Sports Understanding56.751.353.3
Temporal Sequences21.321.522.0
Tracking Shuffled ObjectsS3.39.913.3
(five objects) Tracking Shuffled ObjectsS (seven objects)5.37.38.7
Tracking Shuffled ObjectsS7.321.731.3
(three objects) Web Of Lies54.747.148.7
Word Sorting1.31.52.0
Average Performance per Task25.836.541.3
", + "type": "table", + "image_path": "0ab8ee9f98ef9b7388aa22dd0aeb88c7626414f78e70ff4407066b0bb2b27a06.jpg" + } + ] + } + ], + "index": 8, + "virtual_lines": [ + { + "bbox": [ + 140, + 184, + 474, + 322.66666666666663 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 140, + 322.66666666666663, + 474, + 461.33333333333326 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 140, + 461.33333333333326, + 474, + 599.9999999999999 + ], + "spans": [], + "index": 9 + } + ] + } + ], + "index": 8 + } + ], + "page_idx": 17, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 313, + 763 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 313, + 763 + ], + "score": 1.0, + "content": "18", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 108, + 80, + 252, + 95 + ], + "lines": [ + { + "bbox": [ + 105, + 78, + 254, + 99 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 254, + 99 + ], + "score": 1.0, + "content": "D Result of larger model", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 111, + 505, + 179 + ], + "lines": [ + { + "bbox": [ + 106, + 111, + 505, + 124 + ], + "spans": [ + { + "bbox": [ + 106, + 111, + 505, + 124 + ], + "score": 1.0, + "content": "Table 6: Experimental results of zero-shot learning (Zero) and our few-shot LoraHub learn-", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 506, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 506, + 136 + ], + "score": 1.0, + "content": "ing (LoraHub) on the BBH benchmark with FLAN-T5-xl as the base LLM. Note that we use", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 133, + 506, + 148 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 462, + 148 + ], + "score": 1.0, + "content": "5 examples per task as the demonstration for both ICL and LoraHub. The average", + "type": "text" + }, + { + "bbox": [ + 462, + 135, + 484, + 146 + ], + "score": 0.27, + "content": "( a v g )", + "type": "inline_equation" + }, + { + "bbox": [ + 484, + 133, + 506, + 148 + ], + "score": 1.0, + "content": "per-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 145, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 106, + 145, + 505, + 158 + ], + "score": 1.0, + "content": "formance of LoraHub is computed over 5 runs with different random seeds, while the best", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 155, + 506, + 169 + ], + "spans": [ + { + "bbox": [ + 105, + 155, + 506, + 169 + ], + "score": 1.0, + "content": "(best) performance is reported as the maximum value obtained across these runs. We can", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 165, + 357, + 181 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 357, + 181 + ], + "score": 1.0, + "content": "see the trend of the results are similar to FLAN-T5-large.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 3.5, + "bbox_fs": [ + 104, + 111, + 506, + 181 + ] + }, + { + "type": "table", + "bbox": [ + 140, + 184, + 474, + 600 + ], + "blocks": [ + { + "type": "table_body", + "bbox": [ + 140, + 184, + 474, + 600 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 140, + 184, + 474, + 600 + ], + "spans": [ + { + "bbox": [ + 140, + 184, + 474, + 600 + ], + "score": 0.976, + "html": "
TaskZeroLoraHub avgLoraHub best
Boolean Expressions52.058.763.3
Causal Judgement62.153.859.8
Date Understanding38.037.638.0
Disambiguation Qa0.020.5 54.7
Dyck Languages1.30.92.0
Formal Fallacies56.056.056.0
Geometric Shapes8.717.528.0
Hyperbaton45.353.556.7
Logical DeductionS (five objects)1.342.748.7
Logical DeductionS (seven objects)8.744.350.0
Logical DeductionS (three objects)0.756.461.3
Movie Recommendation2.062.866.0
Multistep Arithmetic Two0.00.40.7
Navigate50.750.750.7
Object Counting39.340.748.0
Penguins In A Table17.440.945.7
Reasoning About Colored Objects46.747.350.7
Ruin Names18.035.644.7
Salient Translation Error Detection44.745.148.7
Snarks60.360.861.5
Sports Understanding56.751.353.3
Temporal Sequences21.321.522.0
Tracking Shuffled ObjectsS3.39.913.3
(five objects) Tracking Shuffled ObjectsS (seven objects)5.37.38.7
Tracking Shuffled ObjectsS7.321.731.3
(three objects) Web Of Lies54.747.148.7
Word Sorting1.31.52.0
Average Performance per Task25.836.541.3
", + "type": "table", + "image_path": "0ab8ee9f98ef9b7388aa22dd0aeb88c7626414f78e70ff4407066b0bb2b27a06.jpg" + } + ] + } + ], + "index": 8, + "virtual_lines": [ + { + "bbox": [ + 140, + 184, + 474, + 322.66666666666663 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 140, + 322.66666666666663, + 474, + 461.33333333333326 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 140, + 461.33333333333326, + 474, + 599.9999999999999 + ], + "spans": [], + "index": 9 + } + ] + } + ], + "index": 8 + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 80, + 339, + 95 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 339, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 339, + 97 + ], + "score": 1.0, + "content": "E Improving the Robustness of LoraHub", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 106, + 505, + 184 + ], + "lines": [ + { + "bbox": [ + 105, + 106, + 505, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 505, + 120 + ], + "score": 1.0, + "content": "In order to enhance the robustness of LoraHub, we explored a straightforward approach in", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 116, + 505, + 130 + ], + "spans": [ + { + "bbox": [ + 105, + 116, + 505, + 130 + ], + "score": 1.0, + "content": "the selection of LoRA module candidates. Specifically, we first identified 20 LoRA module", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 127, + 506, + 141 + ], + "spans": [ + { + "bbox": [ + 105, + 127, + 506, + 141 + ], + "score": 1.0, + "content": "candidates with the lowest loss on the few-shot examples. Our findings indicate a slight", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 139, + 506, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 139, + 506, + 153 + ], + "score": 1.0, + "content": "improvement in overall performance after applying the pre-filtering startegy. Since the", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 149, + 505, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 149, + 505, + 163 + ], + "score": 1.0, + "content": "primary instability in our approach arises from the selection of LoRA candidates. This", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 160, + 505, + 175 + ], + "spans": [ + { + "bbox": [ + 105, + 160, + 505, + 175 + ], + "score": 1.0, + "content": "method involves choosing a fixed set of LoRA candidates to ensure the stability of our", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 173, + 153, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 173, + 153, + 185 + ], + "score": 1.0, + "content": "approach.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 154, + 214, + 456, + 628 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 172, + 194, + 437, + 207 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 172, + 191, + 439, + 210 + ], + "spans": [ + { + "bbox": [ + 172, + 191, + 439, + 210 + ], + "score": 1.0, + "content": "Table 7: The experimental results of loss-based pre-filtering.", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 8 + }, + { + "type": "table_body", + "bbox": [ + 154, + 214, + 456, + 628 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 154, + 214, + 456, + 628 + ], + "spans": [ + { + "bbox": [ + 154, + 214, + 456, + 628 + ], + "score": 0.982, + "html": "
TaskLoraHubavgLoraHubfilter
Boolean Expressions55.560.00
Causal Judgement54.352.9
Date Understanding32.933.3
Disambiguation45.262.7
Dyck Languages1.00.0
Formal Fallacies52.854.0
Geometric Shapes7.44.0
Hyperbaton62.864.0
Logical DeductionS (five objects)36.137.3
Logical DeductionS (seven objects)36.822.0
Logical DeductionS (three objects)45.756.0
Movie Recommendation55.368.0
Multistep Arithmetic0.40.7
Navigate47.149.3
Object Counting33.738.7
Penguins in a Table35.937.0
Reasoning about Colored Objects40.033.3
Ruin Names24.422.0
Salient Translation Error Detection36.024.0
Snarks56.952.66
Sports Understanding56.758.0
Temporal Sequences18.227.3
Tracking Shuffled ObjectsS12.311.3
(five objects) Tracking Shuffled ObjectsS7.78.0
(seven objects) Tracking Shuffled ObjectsS29.232.7
(three objects) Web of Lies50.146.0
Word Sorting1.11.3
34.735.4
Avg Performance Per Task
", + "type": "table", + "image_path": "d6f6dad37f46055044f3fa33031ace9323de2344f42ee9318d2484d1ab05748f.jpg" + } + ] + } + ], + "index": 23, + "virtual_lines": [ + { + "bbox": [ + 154, + 214, + 456, + 228.27586206896552 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 154, + 228.27586206896552, + 456, + 242.55172413793105 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 154, + 242.55172413793105, + 456, + 256.82758620689657 + ], + "spans": [], + "index": 11 + }, + { + "bbox": [ + 154, + 256.82758620689657, + 456, + 271.1034482758621 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 154, + 271.1034482758621, + 456, + 285.3793103448276 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 154, + 285.3793103448276, + 456, + 299.65517241379314 + ], + "spans": [], + "index": 14 + }, + { + "bbox": [ + 154, + 299.65517241379314, + 456, + 313.93103448275866 + ], + "spans": [], + "index": 15 + }, + { + "bbox": [ + 154, + 313.93103448275866, + 456, + 328.2068965517242 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 154, + 328.2068965517242, + 456, + 342.4827586206897 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 154, + 342.4827586206897, + 456, + 356.75862068965523 + ], + "spans": [], + "index": 18 + }, + { + "bbox": [ + 154, + 356.75862068965523, + 456, + 371.03448275862075 + ], + "spans": [], + "index": 19 + }, + { + "bbox": [ + 154, + 371.03448275862075, + 456, + 385.3103448275863 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 154, + 385.3103448275863, + 456, + 399.5862068965518 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 154, + 399.5862068965518, + 456, + 413.8620689655173 + ], + "spans": [], + "index": 22 + }, + { + "bbox": [ + 154, + 413.8620689655173, + 456, + 428.13793103448285 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 154, + 428.13793103448285, + 456, + 442.41379310344837 + ], + "spans": [], + "index": 24 + }, + { + "bbox": [ + 154, + 442.41379310344837, + 456, + 456.6896551724139 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 154, + 456.6896551724139, + 456, + 470.9655172413794 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 154, + 470.9655172413794, + 456, + 485.24137931034494 + ], + "spans": [], + "index": 27 + }, + { + "bbox": [ + 154, + 485.24137931034494, + 456, + 499.51724137931046 + ], + "spans": [], + "index": 28 + }, + { + "bbox": [ + 154, + 499.51724137931046, + 456, + 513.7931034482759 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 154, + 513.7931034482759, + 456, + 528.0689655172414 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 154, + 528.0689655172414, + 456, + 542.3448275862069 + ], + "spans": [], + "index": 31 + }, + { + "bbox": [ + 154, + 542.3448275862069, + 456, + 556.6206896551723 + ], + "spans": [], + "index": 32 + }, + { + "bbox": [ + 154, + 556.6206896551723, + 456, + 570.8965517241378 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 154, + 570.8965517241378, + 456, + 585.1724137931033 + ], + "spans": [], + "index": 34 + }, + { + "bbox": [ + 154, + 585.1724137931033, + 456, + 599.4482758620687 + ], + "spans": [], + "index": 35 + }, + { + "bbox": [ + 154, + 599.4482758620687, + 456, + 613.7241379310342 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 154, + 613.7241379310342, + 456, + 627.9999999999997 + ], + "spans": [], + "index": 37 + } + ] + } + ], + "index": 15.5 + } + ], + "page_idx": 18, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 299, + 750, + 313, + 764 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 313, + 764 + ], + "score": 1.0, + "content": "19", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 80, + 339, + 95 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 339, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 339, + 97 + ], + "score": 1.0, + "content": "E Improving the Robustness of LoraHub", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 106, + 106, + 505, + 184 + ], + "lines": [ + { + "bbox": [ + 105, + 106, + 505, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 505, + 120 + ], + "score": 1.0, + "content": "In order to enhance the robustness of LoraHub, we explored a straightforward approach in", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 116, + 505, + 130 + ], + "spans": [ + { + "bbox": [ + 105, + 116, + 505, + 130 + ], + "score": 1.0, + "content": "the selection of LoRA module candidates. Specifically, we first identified 20 LoRA module", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 127, + 506, + 141 + ], + "spans": [ + { + "bbox": [ + 105, + 127, + 506, + 141 + ], + "score": 1.0, + "content": "candidates with the lowest loss on the few-shot examples. Our findings indicate a slight", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 139, + 506, + 153 + ], + "spans": [ + { + "bbox": [ + 105, + 139, + 506, + 153 + ], + "score": 1.0, + "content": "improvement in overall performance after applying the pre-filtering startegy. Since the", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 149, + 505, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 149, + 505, + 163 + ], + "score": 1.0, + "content": "primary instability in our approach arises from the selection of LoRA candidates. This", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 160, + 505, + 175 + ], + "spans": [ + { + "bbox": [ + 105, + 160, + 505, + 175 + ], + "score": 1.0, + "content": "method involves choosing a fixed set of LoRA candidates to ensure the stability of our", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 173, + 153, + 185 + ], + "spans": [ + { + "bbox": [ + 105, + 173, + 153, + 185 + ], + "score": 1.0, + "content": "approach.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 4, + "bbox_fs": [ + 105, + 106, + 506, + 185 + ] + }, + { + "type": "table", + "bbox": [ + 154, + 214, + 456, + 628 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 172, + 194, + 437, + 207 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 172, + 191, + 439, + 210 + ], + "spans": [ + { + "bbox": [ + 172, + 191, + 439, + 210 + ], + "score": 1.0, + "content": "Table 7: The experimental results of loss-based pre-filtering.", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 8 + }, + { + "type": "table_body", + "bbox": [ + 154, + 214, + 456, + 628 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 154, + 214, + 456, + 628 + ], + "spans": [ + { + "bbox": [ + 154, + 214, + 456, + 628 + ], + "score": 0.982, + "html": "
TaskLoraHubavgLoraHubfilter
Boolean Expressions55.560.00
Causal Judgement54.352.9
Date Understanding32.933.3
Disambiguation45.262.7
Dyck Languages1.00.0
Formal Fallacies52.854.0
Geometric Shapes7.44.0
Hyperbaton62.864.0
Logical DeductionS (five objects)36.137.3
Logical DeductionS (seven objects)36.822.0
Logical DeductionS (three objects)45.756.0
Movie Recommendation55.368.0
Multistep Arithmetic0.40.7
Navigate47.149.3
Object Counting33.738.7
Penguins in a Table35.937.0
Reasoning about Colored Objects40.033.3
Ruin Names24.422.0
Salient Translation Error Detection36.024.0
Snarks56.952.66
Sports Understanding56.758.0
Temporal Sequences18.227.3
Tracking Shuffled ObjectsS12.311.3
(five objects) Tracking Shuffled ObjectsS7.78.0
(seven objects) Tracking Shuffled ObjectsS29.232.7
(three objects) Web of Lies50.146.0
Word Sorting1.11.3
34.735.4
Avg Performance Per Task
", + "type": "table", + "image_path": "d6f6dad37f46055044f3fa33031ace9323de2344f42ee9318d2484d1ab05748f.jpg" + } + ] + } + ], + "index": 23, + "virtual_lines": [ + { + "bbox": [ + 154, + 214, + 456, + 228.27586206896552 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 154, + 228.27586206896552, + 456, + 242.55172413793105 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 154, + 242.55172413793105, + 456, + 256.82758620689657 + ], + "spans": [], + "index": 11 + }, + { + "bbox": [ + 154, + 256.82758620689657, + 456, + 271.1034482758621 + ], + "spans": [], + "index": 12 + }, + { + "bbox": [ + 154, + 271.1034482758621, + 456, + 285.3793103448276 + ], + "spans": [], + "index": 13 + }, + { + "bbox": [ + 154, + 285.3793103448276, + 456, + 299.65517241379314 + ], + "spans": [], + "index": 14 + }, + { + "bbox": [ + 154, + 299.65517241379314, + 456, + 313.93103448275866 + ], + "spans": [], + "index": 15 + }, + { + "bbox": [ + 154, + 313.93103448275866, + 456, + 328.2068965517242 + ], + "spans": [], + "index": 16 + }, + { + "bbox": [ + 154, + 328.2068965517242, + 456, + 342.4827586206897 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 154, + 342.4827586206897, + 456, + 356.75862068965523 + ], + "spans": [], + "index": 18 + }, + { + "bbox": [ + 154, + 356.75862068965523, + 456, + 371.03448275862075 + ], + "spans": [], + "index": 19 + }, + { + "bbox": [ + 154, + 371.03448275862075, + 456, + 385.3103448275863 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 154, + 385.3103448275863, + 456, + 399.5862068965518 + ], + "spans": [], + "index": 21 + }, + { + "bbox": [ + 154, + 399.5862068965518, + 456, + 413.8620689655173 + ], + "spans": [], + "index": 22 + }, + { + "bbox": [ + 154, + 413.8620689655173, + 456, + 428.13793103448285 + ], + "spans": [], + "index": 23 + }, + { + "bbox": [ + 154, + 428.13793103448285, + 456, + 442.41379310344837 + ], + "spans": [], + "index": 24 + }, + { + "bbox": [ + 154, + 442.41379310344837, + 456, + 456.6896551724139 + ], + "spans": [], + "index": 25 + }, + { + "bbox": [ + 154, + 456.6896551724139, + 456, + 470.9655172413794 + ], + "spans": [], + "index": 26 + }, + { + "bbox": [ + 154, + 470.9655172413794, + 456, + 485.24137931034494 + ], + "spans": [], + "index": 27 + }, + { + "bbox": [ + 154, + 485.24137931034494, + 456, + 499.51724137931046 + ], + "spans": [], + "index": 28 + }, + { + "bbox": [ + 154, + 499.51724137931046, + 456, + 513.7931034482759 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 154, + 513.7931034482759, + 456, + 528.0689655172414 + ], + "spans": [], + "index": 30 + }, + { + "bbox": [ + 154, + 528.0689655172414, + 456, + 542.3448275862069 + ], + "spans": [], + "index": 31 + }, + { + "bbox": [ + 154, + 542.3448275862069, + 456, + 556.6206896551723 + ], + "spans": [], + "index": 32 + }, + { + "bbox": [ + 154, + 556.6206896551723, + 456, + 570.8965517241378 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 154, + 570.8965517241378, + 456, + 585.1724137931033 + ], + "spans": [], + "index": 34 + }, + { + "bbox": [ + 154, + 585.1724137931033, + 456, + 599.4482758620687 + ], + "spans": [], + "index": 35 + }, + { + "bbox": [ + 154, + 599.4482758620687, + 456, + 613.7241379310342 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 154, + 613.7241379310342, + 456, + 627.9999999999997 + ], + "spans": [], + "index": 37 + } + ] + } + ], + "index": 15.5 + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 106, + 80, + 345, + 95 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 347, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 347, + 97 + ], + "score": 1.0, + "content": "F Performance on General Important Task", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 106, + 505, + 184 + ], + "lines": [ + { + "bbox": [ + 105, + 105, + 506, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 506, + 120 + ], + "score": 1.0, + "content": "In our research, we have identified specific LoRA modules that exhibit significant impact", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 116, + 505, + 131 + ], + "spans": [ + { + "bbox": [ + 105, + 116, + 505, + 131 + ], + "score": 1.0, + "content": "when integrated into merged LoRAs. Our focus lies in assessing the performance of the top", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 128, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 128, + 505, + 140 + ], + "score": 1.0, + "content": "five task-related LoRAs on the BBH benchmark. The results indicate that these top LoRAs", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 139, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 139, + 506, + 152 + ], + "score": 1.0, + "content": "perform similarly or even worse than zero-shot in most cases. Only one of them stands out", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 150, + 505, + 164 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 505, + 164 + ], + "score": 1.0, + "content": "as significantly better than zero-shot. However, it’s worth noting that this performance is", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 161, + 505, + 175 + ], + "spans": [ + { + "bbox": [ + 105, + 161, + 505, + 175 + ], + "score": 1.0, + "content": "not as impressive as Lorahub. These findings support the idea that the merging process", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 173, + 258, + 185 + ], + "spans": [ + { + "bbox": [ + 106, + 173, + 258, + 185 + ], + "score": 1.0, + "content": "can improve overall performance.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 108, + 223, + 526, + 610 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 107, + 194, + 504, + 217 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 192, + 506, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 192, + 506, + 208 + ], + "score": 1.0, + "content": "Table 8: Detailed experimental results of top five LoRA modules shown in Table 3 on BBH", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 204, + 135, + 218 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 135, + 218 + ], + "score": 1.0, + "content": "tasks.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 8.5 + }, + { + "type": "table_body", + "bbox": [ + 108, + 223, + 526, + 610 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 108, + 223, + 526, + 610 + ], + "spans": [ + { + "bbox": [ + 108, + 223, + 526, + 610 + ], + "score": 0.984, + "html": "
TaskWIQA: LastRACE: RightWIQA: FirstADQAWebQA
Boolean Expressions52.6758.0052.6754.6753.33
Causal Judgement55.1763.2255.1757.4757.47
Date Understanding17.3319.3317.3316.6715.33
Disambiguation0.000.000.000.000.00
Dyck Languages0.670.670.671.331.33
Formal Fallacies51.3351.3351.3351.3351.33
Geometric Shapes8.0013.338.006.677.33
Hyperbaton16.6744.0016.671.336.00
Logical Ded uctionts)23.3328.0023.3319.3320.67
Logical DeductionS (seven objects)22.0026.0022.0010.6712.00
Logical DeductionS (three objects)0.679.330.670.000.00
Movie Recommendation63.3362.6763.3356.6763.33
Multistep Arithmetic0.670.670.670.670.67
Navigate47.3350.0047.3347.3347.33
Object Counting34.6734.0034.6735.3335.33
Penguins in a Table45.6541.3045.6539.1343.48
Reasoning about Colored Objects40.0037.3340.0031.3330.67
Ruin Names22.0021.3322.0017.3322.67
Salient Translation Error Detection36.6734.6736.6732.6737.33
Snarks52.5655.1352.5647.4452.56
Sports Understanding56.0058.6756.0055.33
Temporal Sequences16.6717.3316.6712.6755.33 17.33
Tracking Shuffled ObjectsS (five objects)12.0012.0012.0010.6712.00
Tracking Shuffled ObjectsS (seven objects)6.676.676.676.676.67
Tracking Shuffled ObjectsS20.6730.6720.6710.6725.33
(three objects) Web of Lies54.6754.0054.6754.00
Word Sorting1.331.331.331.3354.00 1.33
Avg Performance per Task β–³ FLAN-T5-large28.10 1.1030.78 3.7828.10 1.1025.14 -1.8627.04 0.04
", + "type": "table", + "image_path": "a5a978a9e175ac55d980958495a1a7e775eca792910cb318c11e008479f07afe.jpg" + } + ] + } + ], + "index": 11, + "virtual_lines": [ + { + "bbox": [ + 108, + 223, + 526, + 352.0 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 108, + 352.0, + 526, + 481.0 + ], + "spans": [], + "index": 11 + }, + { + "bbox": [ + 108, + 481.0, + 526, + 610.0 + ], + "spans": [], + "index": 12 + } + ] + } + ], + "index": 9.75 + } + ], + "page_idx": 19, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 764 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 764 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 14, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 106, + 80, + 345, + 95 + ], + "lines": [ + { + "bbox": [ + 104, + 79, + 347, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 347, + 97 + ], + "score": 1.0, + "content": "F Performance on General Important Task", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 106, + 505, + 184 + ], + "lines": [ + { + "bbox": [ + 105, + 105, + 506, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 506, + 120 + ], + "score": 1.0, + "content": "In our research, we have identified specific LoRA modules that exhibit significant impact", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 116, + 505, + 131 + ], + "spans": [ + { + "bbox": [ + 105, + 116, + 505, + 131 + ], + "score": 1.0, + "content": "when integrated into merged LoRAs. Our focus lies in assessing the performance of the top", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 128, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 128, + 505, + 140 + ], + "score": 1.0, + "content": "five task-related LoRAs on the BBH benchmark. The results indicate that these top LoRAs", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 139, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 105, + 139, + 506, + 152 + ], + "score": 1.0, + "content": "perform similarly or even worse than zero-shot in most cases. Only one of them stands out", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 150, + 505, + 164 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 505, + 164 + ], + "score": 1.0, + "content": "as significantly better than zero-shot. However, it’s worth noting that this performance is", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 161, + 505, + 175 + ], + "spans": [ + { + "bbox": [ + 105, + 161, + 505, + 175 + ], + "score": 1.0, + "content": "not as impressive as Lorahub. These findings support the idea that the merging process", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 173, + 258, + 185 + ], + "spans": [ + { + "bbox": [ + 106, + 173, + 258, + 185 + ], + "score": 1.0, + "content": "can improve overall performance.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 4, + "bbox_fs": [ + 105, + 105, + 506, + 185 + ] + }, + { + "type": "table", + "bbox": [ + 108, + 223, + 526, + 610 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 107, + 194, + 504, + 217 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 192, + 506, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 192, + 506, + 208 + ], + "score": 1.0, + "content": "Table 8: Detailed experimental results of top five LoRA modules shown in Table 3 on BBH", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 204, + 135, + 218 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 135, + 218 + ], + "score": 1.0, + "content": "tasks.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 8.5 + }, + { + "type": "table_body", + "bbox": [ + 108, + 223, + 526, + 610 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 108, + 223, + 526, + 610 + ], + "spans": [ + { + "bbox": [ + 108, + 223, + 526, + 610 + ], + "score": 0.984, + "html": "
TaskWIQA: LastRACE: RightWIQA: FirstADQAWebQA
Boolean Expressions52.6758.0052.6754.6753.33
Causal Judgement55.1763.2255.1757.4757.47
Date Understanding17.3319.3317.3316.6715.33
Disambiguation0.000.000.000.000.00
Dyck Languages0.670.670.671.331.33
Formal Fallacies51.3351.3351.3351.3351.33
Geometric Shapes8.0013.338.006.677.33
Hyperbaton16.6744.0016.671.336.00
Logical Ded uctionts)23.3328.0023.3319.3320.67
Logical DeductionS (seven objects)22.0026.0022.0010.6712.00
Logical DeductionS (three objects)0.679.330.670.000.00
Movie Recommendation63.3362.6763.3356.6763.33
Multistep Arithmetic0.670.670.670.670.67
Navigate47.3350.0047.3347.3347.33
Object Counting34.6734.0034.6735.3335.33
Penguins in a Table45.6541.3045.6539.1343.48
Reasoning about Colored Objects40.0037.3340.0031.3330.67
Ruin Names22.0021.3322.0017.3322.67
Salient Translation Error Detection36.6734.6736.6732.6737.33
Snarks52.5655.1352.5647.4452.56
Sports Understanding56.0058.6756.0055.33
Temporal Sequences16.6717.3316.6712.6755.33 17.33
Tracking Shuffled ObjectsS (five objects)12.0012.0012.0010.6712.00
Tracking Shuffled ObjectsS (seven objects)6.676.676.676.676.67
Tracking Shuffled ObjectsS20.6730.6720.6710.6725.33
(three objects) Web of Lies54.6754.0054.6754.00
Word Sorting1.331.331.331.3354.00 1.33
Avg Performance per Task β–³ FLAN-T5-large28.10 1.1030.78 3.7828.10 1.1025.14 -1.8627.04 0.04
", + "type": "table", + "image_path": "a5a978a9e175ac55d980958495a1a7e775eca792910cb318c11e008479f07afe.jpg" + } + ] + } + ], + "index": 11, + "virtual_lines": [ + { + "bbox": [ + 108, + 223, + 526, + 352.0 + ], + "spans": [], + "index": 10 + }, + { + "bbox": [ + 108, + 352.0, + 526, + 481.0 + ], + "spans": [], + "index": 11 + }, + { + "bbox": [ + 108, + 481.0, + 526, + 610.0 + ], + "spans": [], + "index": 12 + } + ] + } + ], + "index": 9.75 + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 161, + 98, + 454, + 280 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 161, + 98, + 454, + 280 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 161, + 98, + 454, + 280 + ], + "spans": [ + { + "bbox": [ + 161, + 98, + 454, + 280 + ], + "score": 0.972, + "type": "image", + "image_path": "f08459cc633da4d25e332908058acffc5a55cf3fadee5264d074582bf20749f5.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 161, + 98, + 454, + 158.66666666666666 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 161, + 158.66666666666666, + 454, + 219.33333333333331 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 161, + 219.33333333333331, + 454, + 280.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 300, + 505, + 335 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 300, + 505, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 505, + 314 + ], + "score": 1.0, + "content": "Figure 3: The influence of number of LoRA modules on 15 tasks from BBH, and each box", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 312, + 505, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 505, + 324 + ], + "score": 1.0, + "content": "is obtained from 5 separate runs. The horizontal axis shows the number of LoRA modules", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 321, + 273, + 337 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 273, + 337 + ], + "score": 1.0, + "content": "to be composed in LoraHub learning.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4 + } + ], + "index": 2.5 + }, + { + "type": "title", + "bbox": [ + 108, + 353, + 258, + 367 + ], + "lines": [ + { + "bbox": [ + 105, + 352, + 259, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 352, + 259, + 369 + ], + "score": 1.0, + "content": "G Implementation details", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6 + }, + { + "type": "text", + "bbox": [ + 107, + 378, + 505, + 434 + ], + "lines": [ + { + "bbox": [ + 106, + 378, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 106, + 378, + 505, + 392 + ], + "score": 1.0, + "content": "We implemented LoRA tuning using the Huggingface PEFT library (Mangrulkar et al.,", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 389, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 505, + 403 + ], + "score": 1.0, + "content": "2022), with the rank being set as 16. The gradient-free method was implemented using the", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 400, + 505, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 400, + 505, + 414 + ], + "score": 1.0, + "content": "open-source Nevergrad optimization library (Rapin & Teytaud, 2018), with a constraint", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 410, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 410, + 505, + 425 + ], + "score": 1.0, + "content": "that the absolute value of LoRA weights should not exceed 1.5. Originally, all coefficients", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 422, + 260, + 435 + ], + "spans": [ + { + "bbox": [ + 106, + 422, + 260, + 435 + ], + "score": 1.0, + "content": "of LoRA modules were set at zero.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 9 + }, + { + "type": "text", + "bbox": [ + 107, + 439, + 505, + 496 + ], + "lines": [ + { + "bbox": [ + 106, + 439, + 506, + 452 + ], + "spans": [ + { + "bbox": [ + 106, + 439, + 411, + 452 + ], + "score": 1.0, + "content": "In our standard settings, we set the maximum number of iterations", + "type": "text" + }, + { + "bbox": [ + 411, + 440, + 420, + 450 + ], + "score": 0.49, + "content": "K", + "type": "inline_equation" + }, + { + "bbox": [ + 420, + 439, + 506, + 452 + ], + "score": 1.0, + "content": "as 40. The same 5", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 448, + 505, + 465 + ], + "spans": [ + { + "bbox": [ + 105, + 448, + 505, + 465 + ], + "score": 1.0, + "content": "examples were used during our LoraHub learning and the few-shot in-context learning.", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 459, + 506, + 476 + ], + "spans": [ + { + "bbox": [ + 105, + 459, + 199, + 476 + ], + "score": 1.0, + "content": "The hyperparameter", + "type": "text" + }, + { + "bbox": [ + 199, + 464, + 206, + 472 + ], + "score": 0.52, + "content": "\\alpha", + "type": "inline_equation" + }, + { + "bbox": [ + 206, + 459, + 506, + 476 + ], + "score": 1.0, + "content": "is set as 0.05. Regarding the hyperparameters for training candidate", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 471, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 506, + 486 + ], + "score": 1.0, + "content": "LoRA modules, we maintained consistency across all modules, setting the batch size at 64,", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 482, + 404, + 496 + ], + "spans": [ + { + "bbox": [ + 106, + 482, + 192, + 496 + ], + "score": 1.0, + "content": "the learning rate at", + "type": "text" + }, + { + "bbox": [ + 192, + 484, + 221, + 495 + ], + "score": 0.78, + "content": "1 e - 4 ,", + "type": "inline_equation" + }, + { + "bbox": [ + 222, + 482, + 404, + 496 + ], + "score": 1.0, + "content": "and the number of training epochs at 10.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 14 + }, + { + "type": "title", + "bbox": [ + 106, + 510, + 344, + 524 + ], + "lines": [ + { + "bbox": [ + 104, + 509, + 345, + 527 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 345, + 527 + ], + "score": 1.0, + "content": "H Influence of Number of LoRA modules", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 17 + }, + { + "type": "text", + "bbox": [ + 106, + 536, + 505, + 614 + ], + "lines": [ + { + "bbox": [ + 106, + 536, + 505, + 549 + ], + "spans": [ + { + "bbox": [ + 106, + 536, + 505, + 549 + ], + "score": 1.0, + "content": "As shown in Figure 3, with an increase in the number of LoRA module candidates, there", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 546, + 505, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 505, + 560 + ], + "score": 1.0, + "content": "is a corresponding increase in the performance variance. Based on our in-depth analysis,", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 558, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 558, + 505, + 570 + ], + "score": 1.0, + "content": "the primary source of variance is not related to gradient-free optimization algorithms but", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 569, + 505, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 569, + 505, + 581 + ], + "score": 1.0, + "content": "rather associated with the LoRA candidate modules. In other words, once the candidates", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 579, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 506, + 594 + ], + "score": 1.0, + "content": "are determined, random seeds have minimal impact on the final performance. Hence, we", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 589, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 589, + 506, + 606 + ], + "score": 1.0, + "content": "posit that the observed instability primarily arises from the inherent challenge of balancing", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 601, + 361, + 615 + ], + "spans": [ + { + "bbox": [ + 106, + 601, + 361, + 615 + ], + "score": 1.0, + "content": "the quantity and quality of the LoRA module candidates.", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 21 + }, + { + "type": "title", + "bbox": [ + 108, + 629, + 260, + 643 + ], + "lines": [ + { + "bbox": [ + 104, + 628, + 262, + 646 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 262, + 646 + ], + "score": 1.0, + "content": "I The Impact of Threshold", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25 + }, + { + "type": "text", + "bbox": [ + 107, + 654, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 654, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 505, + 667 + ], + "score": 1.0, + "content": "In this section, we omitted the threshold in our implementation, and the results are summa-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 666, + 505, + 677 + ], + "spans": [ + { + "bbox": [ + 106, + 666, + 505, + 677 + ], + "score": 1.0, + "content": "rized in Table 9. Our observations indicate that the removal of the threshold had minimal", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 677, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 106, + 677, + 505, + 690 + ], + "score": 1.0, + "content": "impact on the majority of tasks, underscoring the robustness of the gradient-free optimiza-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 687, + 505, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 505, + 701 + ], + "score": 1.0, + "content": "tion algorithm itself in most cases. The algorithm efficiently identified reasonable ranges", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "score": 1.0, + "content": "even without specific upper and lower bounds. However, three tasks, namely Date Under-", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "standing, Disambiguation and Hyperbaton, exhibited notable effects. The resulting perfor-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 720, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 303, + 733 + ], + "score": 1.0, + "content": "mance decline led to an average decrease of", + "type": "text" + }, + { + "bbox": [ + 304, + 720, + 326, + 731 + ], + "score": 0.84, + "content": "1 . 2 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 326, + 720, + 505, + 733 + ], + "score": 1.0, + "content": "compared to the setting with threshold.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 29 + } + ], + "page_idx": 20, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 27, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 310, + 760 + ], + "lines": [] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 161, + 98, + 454, + 280 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 161, + 98, + 454, + 280 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 161, + 98, + 454, + 280 + ], + "spans": [ + { + "bbox": [ + 161, + 98, + 454, + 280 + ], + "score": 0.972, + "type": "image", + "image_path": "f08459cc633da4d25e332908058acffc5a55cf3fadee5264d074582bf20749f5.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 161, + 98, + 454, + 158.66666666666666 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 161, + 158.66666666666666, + 454, + 219.33333333333331 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 161, + 219.33333333333331, + 454, + 280.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 300, + 505, + 335 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 300, + 505, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 505, + 314 + ], + "score": 1.0, + "content": "Figure 3: The influence of number of LoRA modules on 15 tasks from BBH, and each box", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 312, + 505, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 505, + 324 + ], + "score": 1.0, + "content": "is obtained from 5 separate runs. The horizontal axis shows the number of LoRA modules", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 321, + 273, + 337 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 273, + 337 + ], + "score": 1.0, + "content": "to be composed in LoraHub learning.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4 + } + ], + "index": 2.5 + }, + { + "type": "title", + "bbox": [ + 108, + 353, + 258, + 367 + ], + "lines": [ + { + "bbox": [ + 105, + 352, + 259, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 352, + 259, + 369 + ], + "score": 1.0, + "content": "G Implementation details", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 6 + }, + { + "type": "text", + "bbox": [ + 107, + 378, + 505, + 434 + ], + "lines": [ + { + "bbox": [ + 106, + 378, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 106, + 378, + 505, + 392 + ], + "score": 1.0, + "content": "We implemented LoRA tuning using the Huggingface PEFT library (Mangrulkar et al.,", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 389, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 505, + 403 + ], + "score": 1.0, + "content": "2022), with the rank being set as 16. The gradient-free method was implemented using the", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 400, + 505, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 400, + 505, + 414 + ], + "score": 1.0, + "content": "open-source Nevergrad optimization library (Rapin & Teytaud, 2018), with a constraint", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 410, + 505, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 410, + 505, + 425 + ], + "score": 1.0, + "content": "that the absolute value of LoRA weights should not exceed 1.5. Originally, all coefficients", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 422, + 260, + 435 + ], + "spans": [ + { + "bbox": [ + 106, + 422, + 260, + 435 + ], + "score": 1.0, + "content": "of LoRA modules were set at zero.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 9, + "bbox_fs": [ + 105, + 378, + 505, + 435 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 439, + 505, + 496 + ], + "lines": [ + { + "bbox": [ + 106, + 439, + 506, + 452 + ], + "spans": [ + { + "bbox": [ + 106, + 439, + 411, + 452 + ], + "score": 1.0, + "content": "In our standard settings, we set the maximum number of iterations", + "type": "text" + }, + { + "bbox": [ + 411, + 440, + 420, + 450 + ], + "score": 0.49, + "content": "K", + "type": "inline_equation" + }, + { + "bbox": [ + 420, + 439, + 506, + 452 + ], + "score": 1.0, + "content": "as 40. The same 5", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 448, + 505, + 465 + ], + "spans": [ + { + "bbox": [ + 105, + 448, + 505, + 465 + ], + "score": 1.0, + "content": "examples were used during our LoraHub learning and the few-shot in-context learning.", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 459, + 506, + 476 + ], + "spans": [ + { + "bbox": [ + 105, + 459, + 199, + 476 + ], + "score": 1.0, + "content": "The hyperparameter", + "type": "text" + }, + { + "bbox": [ + 199, + 464, + 206, + 472 + ], + "score": 0.52, + "content": "\\alpha", + "type": "inline_equation" + }, + { + "bbox": [ + 206, + 459, + 506, + 476 + ], + "score": 1.0, + "content": "is set as 0.05. Regarding the hyperparameters for training candidate", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 471, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 506, + 486 + ], + "score": 1.0, + "content": "LoRA modules, we maintained consistency across all modules, setting the batch size at 64,", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 482, + 404, + 496 + ], + "spans": [ + { + "bbox": [ + 106, + 482, + 192, + 496 + ], + "score": 1.0, + "content": "the learning rate at", + "type": "text" + }, + { + "bbox": [ + 192, + 484, + 221, + 495 + ], + "score": 0.78, + "content": "1 e - 4 ,", + "type": "inline_equation" + }, + { + "bbox": [ + 222, + 482, + 404, + 496 + ], + "score": 1.0, + "content": "and the number of training epochs at 10.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 14, + "bbox_fs": [ + 105, + 439, + 506, + 496 + ] + }, + { + "type": "title", + "bbox": [ + 106, + 510, + 344, + 524 + ], + "lines": [ + { + "bbox": [ + 104, + 509, + 345, + 527 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 345, + 527 + ], + "score": 1.0, + "content": "H Influence of Number of LoRA modules", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 17 + }, + { + "type": "text", + "bbox": [ + 106, + 536, + 505, + 614 + ], + "lines": [ + { + "bbox": [ + 106, + 536, + 505, + 549 + ], + "spans": [ + { + "bbox": [ + 106, + 536, + 505, + 549 + ], + "score": 1.0, + "content": "As shown in Figure 3, with an increase in the number of LoRA module candidates, there", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 546, + 505, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 505, + 560 + ], + "score": 1.0, + "content": "is a corresponding increase in the performance variance. Based on our in-depth analysis,", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 558, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 558, + 505, + 570 + ], + "score": 1.0, + "content": "the primary source of variance is not related to gradient-free optimization algorithms but", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 569, + 505, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 569, + 505, + 581 + ], + "score": 1.0, + "content": "rather associated with the LoRA candidate modules. In other words, once the candidates", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 579, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 506, + 594 + ], + "score": 1.0, + "content": "are determined, random seeds have minimal impact on the final performance. Hence, we", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 589, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 589, + 506, + 606 + ], + "score": 1.0, + "content": "posit that the observed instability primarily arises from the inherent challenge of balancing", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 601, + 361, + 615 + ], + "spans": [ + { + "bbox": [ + 106, + 601, + 361, + 615 + ], + "score": 1.0, + "content": "the quantity and quality of the LoRA module candidates.", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 21, + "bbox_fs": [ + 104, + 536, + 506, + 615 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 629, + 260, + 643 + ], + "lines": [ + { + "bbox": [ + 104, + 628, + 262, + 646 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 262, + 646 + ], + "score": 1.0, + "content": "I The Impact of Threshold", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25 + }, + { + "type": "text", + "bbox": [ + 107, + 654, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 654, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 505, + 667 + ], + "score": 1.0, + "content": "In this section, we omitted the threshold in our implementation, and the results are summa-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 666, + 505, + 677 + ], + "spans": [ + { + "bbox": [ + 106, + 666, + 505, + 677 + ], + "score": 1.0, + "content": "rized in Table 9. Our observations indicate that the removal of the threshold had minimal", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 677, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 106, + 677, + 505, + 690 + ], + "score": 1.0, + "content": "impact on the majority of tasks, underscoring the robustness of the gradient-free optimiza-", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 687, + 505, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 505, + 701 + ], + "score": 1.0, + "content": "tion algorithm itself in most cases. The algorithm efficiently identified reasonable ranges", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "score": 1.0, + "content": "even without specific upper and lower bounds. However, three tasks, namely Date Under-", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "standing, Disambiguation and Hyperbaton, exhibited notable effects. The resulting perfor-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 720, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 303, + 733 + ], + "score": 1.0, + "content": "mance decline led to an average decrease of", + "type": "text" + }, + { + "bbox": [ + 304, + 720, + 326, + 731 + ], + "score": 0.84, + "content": "1 . 2 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 326, + 720, + 505, + 733 + ], + "score": 1.0, + "content": "compared to the setting with threshold.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 29, + "bbox_fs": [ + 105, + 654, + 505, + 733 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 505, + 106 + ], + "lines": [ + { + "bbox": [ + 105, + 80, + 505, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 505, + 97 + ], + "score": 1.0, + "content": "This highlights the significance of establishing a reasonable threshold to mitigate extreme", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 94, + 153, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 94, + 153, + 106 + ], + "score": 1.0, + "content": "scenarios.", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "table", + "bbox": [ + 108, + 133, + 512, + 510 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 136, + 113, + 475, + 125 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 136, + 112, + 475, + 126 + ], + "spans": [ + { + "bbox": [ + 136, + 112, + 475, + 126 + ], + "score": 1.0, + "content": "Table 9: The comparsion between LoraHub and LoraHub without threshold.", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2 + }, + { + "type": "table_body", + "bbox": [ + 108, + 133, + 512, + 510 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 108, + 133, + 512, + 510 + ], + "spans": [ + { + "bbox": [ + 108, + 133, + 512, + 510 + ], + "score": 0.984, + "html": "
TaskLoraHubavg with thresholdLoraHubavg without threshold
Boolean Expressions55.554.0
Causal Judgement54.354.8
Date Understanding32.917.7
Disambiguation45.240.6
Dyck Languages1.01.1
Formal Fallacies52.851.7
Geometric Shapes7.46.7
Hyperbaton62.855.5
Logical DeductionS (five objects)36.136.5
Logical DeductionS (seven objects)36.835.6
Logical DeductionS45.7
(three objects) Movie Recommendation49.9
Multistep Arithmetic55.359.3
Navigate0.40.7
Object Counting47.147.6
33.734.7
Penguins in a Table35.933.8
Reasoning about Colored Objects40.037.9
Ruin Names24.424.0
Salient Translation Error Detection36.037.1
Snarks56.951.6
Sports Understanding56.755.9
Temporal Sequences18.216.7
Tracking Shuffled ObjectsS (five objects)12.312.3
Tracking Shuffled ObjectsS (seven objects)7.78.5
Tracking Shuffled ObjectsS (three objects)29.229.8
Web of Lies50.150.3
Word Sorting1.11.3
Avg Performance Per Task34.733.5
", + "type": "table", + "image_path": "522e1fa23ba78543a5afefbbeddc87850fa222239fd245c96bbb6d9c91774129.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 108, + 133, + 512, + 258.6666666666667 + ], + "spans": [], + "index": 3 + }, + { + "bbox": [ + 108, + 258.6666666666667, + 512, + 384.33333333333337 + ], + "spans": [], + "index": 4 + }, + { + "bbox": [ + 108, + 384.33333333333337, + 512, + 510.00000000000006 + ], + "spans": [], + "index": 5 + } + ] + } + ], + "index": 3.0 + } + ], + "page_idx": 21, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 15, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 82, + 505, + 106 + ], + "lines": [ + { + "bbox": [ + 105, + 80, + 505, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 505, + 97 + ], + "score": 1.0, + "content": "This highlights the significance of establishing a reasonable threshold to mitigate extreme", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 94, + 153, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 94, + 153, + 106 + ], + "score": 1.0, + "content": "scenarios.", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5, + "bbox_fs": [ + 105, + 80, + 505, + 106 + ] + }, + { + "type": "table", + "bbox": [ + 108, + 133, + 512, + 510 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 136, + 113, + 475, + 125 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 136, + 112, + 475, + 126 + ], + "spans": [ + { + "bbox": [ + 136, + 112, + 475, + 126 + ], + "score": 1.0, + "content": "Table 9: The comparsion between LoraHub and LoraHub without threshold.", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2 + }, + { + "type": "table_body", + "bbox": [ + 108, + 133, + 512, + 510 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 108, + 133, + 512, + 510 + ], + "spans": [ + { + "bbox": [ + 108, + 133, + 512, + 510 + ], + "score": 0.984, + "html": "
TaskLoraHubavg with thresholdLoraHubavg without threshold
Boolean Expressions55.554.0
Causal Judgement54.354.8
Date Understanding32.917.7
Disambiguation45.240.6
Dyck Languages1.01.1
Formal Fallacies52.851.7
Geometric Shapes7.46.7
Hyperbaton62.855.5
Logical DeductionS (five objects)36.136.5
Logical DeductionS (seven objects)36.835.6
Logical DeductionS45.7
(three objects) Movie Recommendation49.9
Multistep Arithmetic55.359.3
Navigate0.40.7
Object Counting47.147.6
33.734.7
Penguins in a Table35.933.8
Reasoning about Colored Objects40.037.9
Ruin Names24.424.0
Salient Translation Error Detection36.037.1
Snarks56.951.6
Sports Understanding56.755.9
Temporal Sequences18.216.7
Tracking Shuffled ObjectsS (five objects)12.312.3
Tracking Shuffled ObjectsS (seven objects)7.78.5
Tracking Shuffled ObjectsS (three objects)29.229.8
Web of Lies50.150.3
Word Sorting1.11.3
Avg Performance Per Task34.733.5
", + "type": "table", + "image_path": "522e1fa23ba78543a5afefbbeddc87850fa222239fd245c96bbb6d9c91774129.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 108, + 133, + 512, + 258.6666666666667 + ], + "spans": [], + "index": 3 + }, + { + "bbox": [ + 108, + 258.6666666666667, + 512, + 384.33333333333337 + ], + "spans": [], + "index": 4 + }, + { + "bbox": [ + 108, + 384.33333333333337, + 512, + 510.00000000000006 + ], + "spans": [], + "index": 5 + } + ] + } + ], + "index": 3.0 + } + ] + } + ], + "_backend": "pipeline", + "_version_name": "2.1.11" +} \ No newline at end of file diff --git a/parse/test/TrloAXEJ2B/TrloAXEJ2B_model.json b/parse/test/TrloAXEJ2B/TrloAXEJ2B_model.json new file mode 100644 index 0000000000000000000000000000000000000000..3c0629296728a8a27afd33e13c65bc9a330e60ba --- /dev/null +++ b/parse/test/TrloAXEJ2B/TrloAXEJ2B_model.json @@ -0,0 +1,19911 @@ +[ + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 398, + 665, + 1302, + 665, + 1302, + 1307, + 398, + 1307 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 298, + 1860, + 1403, + 1860, + 1403, + 1953, + 298, + 1953 + ], + "score": 0.965 + }, + { + "category_id": 3, + "poly": [ + 305, + 1445, + 1397, + 1445, + 1397, + 1641, + 305, + 1641 + ], + "score": 0.96 + }, + { + "category_id": 0, + "poly": [ + 309, + 226, + 1398, + 226, + 1398, + 321, + 309, + 321 + ], + "score": 0.951 + }, + { + "category_id": 4, + "poly": [ + 297, + 1664, + 1404, + 1664, + 1404, + 1818, + 297, + 1818 + ], + "score": 0.935 + }, + { + "category_id": 2, + "poly": [ + 298, + 1977, + 1401, + 1977, + 1401, + 2033, + 298, + 2033 + ], + "score": 0.924 + }, + { + "category_id": 0, + "poly": [ + 298, + 1368, + 542, + 1368, + 542, + 1406, + 298, + 1406 + ], + "score": 0.907 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 879, + 74, + 879, + 106, + 298, + 106 + ], + "score": 0.891 + }, + { + "category_id": 0, + "poly": [ + 786, + 592, + 914, + 592, + 914, + 630, + 786, + 630 + ], + "score": 0.887 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 857, + 2088, + 857, + 2112, + 841, + 2112 + ], + "score": 0.762 + }, + { + "category_id": 1, + "poly": [ + 321, + 376, + 1441, + 376, + 1441, + 515, + 321, + 515 + ], + "score": 0.661 + }, + { + "category_id": 13, + "poly": [ + 911, + 377, + 985, + 377, + 985, + 413, + 911, + 413 + ], + "score": 0.7, + "latex": "\\mathbf { L i n } ^ { \\bigotimes * }" + }, + { + "category_id": 13, + "poly": [ + 1254, + 377, + 1310, + 377, + 1310, + 412, + 1254, + 412 + ], + "score": 0.49, + "latex": "{ { \\mathbf { D } } { { \\mathbf { u } } } ^ { \\dag } }" + }, + { + "category_id": 13, + "poly": [ + 559, + 377, + 593, + 377, + 593, + 410, + 559, + 410 + ], + "score": 0.29, + "latex": "\\mathbf { \\Delta } \\mathbf { \\dag \\ S \\mathrm { \\ s \\mathrm { \\ s } } }" + }, + { + "category_id": 15, + "poly": [ + 337.0, + 1445.0, + 533.0, + 1445.0, + 533.0, + 1474.0, + 337.0, + 1474.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 728.0, + 1445.0, + 770.0, + 1445.0, + 770.0, + 1482.0, + 728.0, + 1482.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 885.0, + 1443.0, + 934.0, + 1443.0, + 934.0, + 1484.0, + 885.0, + 1484.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1166.0, + 1445.0, + 1210.0, + 1445.0, + 1210.0, + 1481.0, + 1166.0, + 1481.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 337.0, + 1476.0, + 619.0, + 1476.0, + 619.0, + 1505.0, + 337.0, + 1505.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 336.0, + 1507.0, + 608.0, + 1507.0, + 608.0, + 1538.0, + 336.0, + 1538.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 681.0, + 1498.0, + 798.0, + 1498.0, + 798.0, + 1552.0, + 681.0, + 1552.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 843.0, + 1500.0, + 954.0, + 1500.0, + 954.0, + 1547.0, + 843.0, + 1547.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1000.0, + 1493.0, + 1242.0, + 1493.0, + 1242.0, + 1547.0, + 1000.0, + 1547.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1246.0, + 1503.0, + 1384.0, + 1503.0, + 1384.0, + 1543.0, + 1246.0, + 1543.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1184.0, + 1528.0, + 1215.0, + 1528.0, + 1215.0, + 1552.0, + 1184.0, + 1552.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1173.0, + 1534.0, + 1181.0, + 1534.0, + 1181.0, + 1546.0, + 1173.0, + 1546.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 428.0, + 1552.0, + 480.0, + 1552.0, + 480.0, + 1585.0, + 428.0, + 1585.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1120.0, + 1547.0, + 1133.0, + 1547.0, + 1133.0, + 1568.0, + 1120.0, + 1568.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1359.0, + 1554.0, + 1379.0, + 1554.0, + 1379.0, + 1567.0, + 1359.0, + 1567.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 344.0, + 1569.0, + 394.0, + 1569.0, + 394.0, + 1597.0, + 344.0, + 1597.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 516.0, + 1560.0, + 560.0, + 1560.0, + 560.0, + 1588.0, + 516.0, + 1588.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 331.0, + 1606.0, + 604.0, + 1606.0, + 604.0, + 1638.0, + 331.0, + 1638.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 651.0, + 1605.0, + 880.0, + 1605.0, + 880.0, + 1641.0, + 651.0, + 1641.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 969.0, + 1602.0, + 1209.0, + 1602.0, + 1209.0, + 1642.0, + 969.0, + 1642.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 304.0, + 224.0, + 342.0, + 224.0, + 342.0, + 272.0, + 304.0, + 272.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 346.0, + 219.0, + 1404.0, + 219.0, + 1404.0, + 285.0, + 346.0, + 285.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 300.0, + 271.0, + 781.0, + 271.0, + 781.0, + 325.0, + 300.0, + 325.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1662.0, + 1404.0, + 1662.0, + 1404.0, + 1699.0, + 295.0, + 1699.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1692.0, + 1405.0, + 1692.0, + 1405.0, + 1730.0, + 292.0, + 1730.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1724.0, + 1407.0, + 1724.0, + 1407.0, + 1765.0, + 294.0, + 1765.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1754.0, + 1405.0, + 1754.0, + 1405.0, + 1793.0, + 292.0, + 1793.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1789.0, + 447.0, + 1789.0, + 447.0, + 1819.0, + 296.0, + 1819.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 1970.0, + 1407.0, + 1970.0, + 1407.0, + 2012.0, + 327.0, + 2012.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1999.0, + 500.0, + 1999.0, + 500.0, + 2037.0, + 293.0, + 2037.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1367.0, + 545.0, + 1367.0, + 545.0, + 1412.0, + 292.0, + 1412.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 879.0, + 71.0, + 879.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 782.0, + 592.0, + 921.0, + 592.0, + 921.0, + 634.0, + 782.0, + 634.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2121.0, + 838.0, + 2121.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 665.0, + 1304.0, + 665.0, + 1304.0, + 702.0, + 393.0, + 702.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 390.0, + 694.0, + 1307.0, + 694.0, + 1307.0, + 734.0, + 390.0, + 734.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 391.0, + 727.0, + 1307.0, + 727.0, + 1307.0, + 764.0, + 391.0, + 764.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 757.0, + 1306.0, + 757.0, + 1306.0, + 793.0, + 392.0, + 793.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 788.0, + 1306.0, + 788.0, + 1306.0, + 824.0, + 394.0, + 824.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 819.0, + 1305.0, + 819.0, + 1305.0, + 854.0, + 392.0, + 854.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 848.0, + 1307.0, + 848.0, + 1307.0, + 887.0, + 393.0, + 887.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 878.0, + 1304.0, + 878.0, + 1304.0, + 918.0, + 393.0, + 918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 910.0, + 1305.0, + 910.0, + 1305.0, + 946.0, + 393.0, + 946.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 939.0, + 1304.0, + 939.0, + 1304.0, + 978.0, + 393.0, + 978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 970.0, + 1305.0, + 970.0, + 1305.0, + 1009.0, + 393.0, + 1009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 999.0, + 1305.0, + 999.0, + 1305.0, + 1040.0, + 392.0, + 1040.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1030.0, + 1305.0, + 1030.0, + 1305.0, + 1070.0, + 393.0, + 1070.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1061.0, + 1305.0, + 1061.0, + 1305.0, + 1099.0, + 393.0, + 1099.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 1091.0, + 1306.0, + 1091.0, + 1306.0, + 1129.0, + 392.0, + 1129.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1122.0, + 1306.0, + 1122.0, + 1306.0, + 1158.0, + 393.0, + 1158.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 1152.0, + 1307.0, + 1152.0, + 1307.0, + 1189.0, + 392.0, + 1189.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 1183.0, + 1305.0, + 1183.0, + 1305.0, + 1219.0, + 394.0, + 1219.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 1213.0, + 1306.0, + 1213.0, + 1306.0, + 1250.0, + 394.0, + 1250.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 1241.0, + 1305.0, + 1241.0, + 1305.0, + 1283.0, + 392.0, + 1283.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 395.0, + 1276.0, + 1009.0, + 1276.0, + 1009.0, + 1309.0, + 395.0, + 1309.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1857.0, + 1404.0, + 1857.0, + 1404.0, + 1898.0, + 293.0, + 1898.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1888.0, + 1405.0, + 1888.0, + 1405.0, + 1931.0, + 291.0, + 1931.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1917.0, + 1405.0, + 1917.0, + 1405.0, + 1961.0, + 292.0, + 1961.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 373.0, + 558.0, + 373.0, + 558.0, + 417.0, + 321.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 594.0, + 373.0, + 910.0, + 373.0, + 910.0, + 417.0, + 594.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 986.0, + 373.0, + 1253.0, + 373.0, + 1253.0, + 417.0, + 986.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1311.0, + 373.0, + 1441.0, + 373.0, + 1441.0, + 417.0, + 1311.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 723.0, + 409.0, + 1013.0, + 409.0, + 1013.0, + 452.0, + 723.0, + 452.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 572.0, + 440.0, + 1161.0, + 440.0, + 1161.0, + 488.0, + 572.0, + 488.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 609.0, + 475.0, + 1120.0, + 475.0, + 1120.0, + 520.0, + 609.0, + 520.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 0, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1466, + 1404, + 1466, + 1404, + 1835, + 298, + 1835 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 298, + 460, + 1404, + 460, + 1404, + 918, + 298, + 918 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 934, + 1404, + 934, + 1404, + 1452, + 298, + 1452 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 229, + 1402, + 229, + 1402, + 446, + 298, + 446 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 298, + 1971, + 1399, + 1971, + 1399, + 2034, + 298, + 2034 + ], + "score": 0.941 + }, + { + "category_id": 0, + "poly": [ + 300, + 1892, + 642, + 1892, + 642, + 1929, + 300, + 1929 + ], + "score": 0.906 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 878, + 75, + 878, + 106, + 298, + 106 + ], + "score": 0.898 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 858, + 2088, + 858, + 2112, + 841, + 2112 + ], + "score": 0.749 + }, + { + "category_id": 13, + "poly": [ + 1128, + 1973, + 1171, + 1973, + 1171, + 2004, + 1128, + 2004 + ], + "score": 0.88, + "latex": "M _ { \\theta }" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1888.0, + 646.0, + 1888.0, + 646.0, + 1935.0, + 291.0, + 1935.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2121.0, + 839.0, + 2121.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1466.0, + 1405.0, + 1466.0, + 1405.0, + 1504.0, + 296.0, + 1504.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1497.0, + 1406.0, + 1497.0, + 1406.0, + 1536.0, + 292.0, + 1536.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1527.0, + 1404.0, + 1527.0, + 1404.0, + 1565.0, + 294.0, + 1565.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1556.0, + 1406.0, + 1556.0, + 1406.0, + 1598.0, + 292.0, + 1598.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1588.0, + 1405.0, + 1588.0, + 1405.0, + 1626.0, + 293.0, + 1626.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1618.0, + 1407.0, + 1618.0, + 1407.0, + 1657.0, + 291.0, + 1657.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1649.0, + 1406.0, + 1649.0, + 1406.0, + 1687.0, + 292.0, + 1687.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1679.0, + 1405.0, + 1679.0, + 1405.0, + 1716.0, + 293.0, + 1716.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1709.0, + 1407.0, + 1709.0, + 1407.0, + 1748.0, + 292.0, + 1748.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1738.0, + 1406.0, + 1738.0, + 1406.0, + 1780.0, + 291.0, + 1780.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1770.0, + 1406.0, + 1770.0, + 1406.0, + 1812.0, + 292.0, + 1812.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1802.0, + 464.0, + 1802.0, + 464.0, + 1841.0, + 293.0, + 1841.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 456.0, + 1406.0, + 456.0, + 1406.0, + 497.0, + 293.0, + 497.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 487.0, + 1405.0, + 487.0, + 1405.0, + 530.0, + 292.0, + 530.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 520.0, + 1405.0, + 520.0, + 1405.0, + 558.0, + 293.0, + 558.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 550.0, + 1406.0, + 550.0, + 1406.0, + 588.0, + 292.0, + 588.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 581.0, + 1406.0, + 581.0, + 1406.0, + 619.0, + 293.0, + 619.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 608.0, + 1408.0, + 608.0, + 1408.0, + 652.0, + 291.0, + 652.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 641.0, + 1407.0, + 641.0, + 1407.0, + 678.0, + 292.0, + 678.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 673.0, + 1407.0, + 673.0, + 1407.0, + 709.0, + 291.0, + 709.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 701.0, + 1408.0, + 701.0, + 1408.0, + 740.0, + 293.0, + 740.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 731.0, + 1406.0, + 731.0, + 1406.0, + 772.0, + 291.0, + 772.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 762.0, + 1408.0, + 762.0, + 1408.0, + 801.0, + 293.0, + 801.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 789.0, + 1406.0, + 789.0, + 1406.0, + 835.0, + 292.0, + 835.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 824.0, + 1404.0, + 824.0, + 1404.0, + 862.0, + 293.0, + 862.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 855.0, + 1406.0, + 855.0, + 1406.0, + 893.0, + 293.0, + 893.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 882.0, + 1094.0, + 882.0, + 1094.0, + 926.0, + 293.0, + 926.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 929.0, + 1405.0, + 929.0, + 1405.0, + 970.0, + 293.0, + 970.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 960.0, + 1408.0, + 960.0, + 1408.0, + 1001.0, + 293.0, + 1001.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 991.0, + 1405.0, + 991.0, + 1405.0, + 1030.0, + 293.0, + 1030.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1017.0, + 1405.0, + 1017.0, + 1405.0, + 1066.0, + 292.0, + 1066.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1050.0, + 1408.0, + 1050.0, + 1408.0, + 1096.0, + 291.0, + 1096.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1084.0, + 1405.0, + 1084.0, + 1405.0, + 1123.0, + 294.0, + 1123.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1116.0, + 1404.0, + 1116.0, + 1404.0, + 1152.0, + 294.0, + 1152.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1144.0, + 1405.0, + 1144.0, + 1405.0, + 1184.0, + 293.0, + 1184.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1172.0, + 1408.0, + 1172.0, + 1408.0, + 1219.0, + 292.0, + 1219.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1205.0, + 1405.0, + 1205.0, + 1405.0, + 1243.0, + 292.0, + 1243.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1235.0, + 1405.0, + 1235.0, + 1405.0, + 1275.0, + 293.0, + 1275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1264.0, + 1406.0, + 1264.0, + 1406.0, + 1308.0, + 291.0, + 1308.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1297.0, + 1406.0, + 1297.0, + 1406.0, + 1337.0, + 293.0, + 1337.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1326.0, + 1408.0, + 1326.0, + 1408.0, + 1369.0, + 292.0, + 1369.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1358.0, + 1405.0, + 1358.0, + 1405.0, + 1396.0, + 292.0, + 1396.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1387.0, + 1409.0, + 1387.0, + 1409.0, + 1431.0, + 292.0, + 1431.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1421.0, + 704.0, + 1421.0, + 704.0, + 1456.0, + 293.0, + 1456.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 227.0, + 1404.0, + 227.0, + 1404.0, + 268.0, + 292.0, + 268.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 260.0, + 1407.0, + 260.0, + 1407.0, + 300.0, + 292.0, + 300.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 288.0, + 1404.0, + 288.0, + 1404.0, + 329.0, + 293.0, + 329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 318.0, + 1404.0, + 318.0, + 1404.0, + 359.0, + 293.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 349.0, + 1407.0, + 349.0, + 1407.0, + 391.0, + 292.0, + 391.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 378.0, + 1407.0, + 378.0, + 1407.0, + 423.0, + 292.0, + 423.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 411.0, + 1131.0, + 411.0, + 1131.0, + 453.0, + 293.0, + 453.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1969.0, + 1127.0, + 1969.0, + 1127.0, + 2009.0, + 294.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1172.0, + 1969.0, + 1404.0, + 1969.0, + 1404.0, + 2009.0, + 1172.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1999.0, + 1403.0, + 1999.0, + 1403.0, + 2038.0, + 294.0, + 2038.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 1, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1406, + 1404, + 1406, + 1404, + 1870, + 298, + 1870 + ], + "score": 0.984 + }, + { + "category_id": 1, + "poly": [ + 297, + 387, + 1404, + 387, + 1404, + 759, + 297, + 759 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 794, + 1403, + 794, + 1403, + 1101, + 298, + 1101 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 299, + 228, + 1402, + 228, + 1402, + 353, + 299, + 353 + ], + "score": 0.973 + }, + { + "category_id": 1, + "poly": [ + 299, + 1217, + 1398, + 1217, + 1398, + 1311, + 299, + 1311 + ], + "score": 0.962 + }, + { + "category_id": 1, + "poly": [ + 300, + 1965, + 1402, + 1965, + 1402, + 2035, + 300, + 2035 + ], + "score": 0.95 + }, + { + "category_id": 0, + "poly": [ + 299, + 1348, + 593, + 1348, + 593, + 1381, + 299, + 1381 + ], + "score": 0.912 + }, + { + "category_id": 0, + "poly": [ + 301, + 1908, + 763, + 1908, + 763, + 1942, + 301, + 1942 + ], + "score": 0.911 + }, + { + "category_id": 0, + "poly": [ + 298, + 1145, + 556, + 1145, + 556, + 1185, + 298, + 1185 + ], + "score": 0.906 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 878, + 75, + 878, + 106, + 298, + 106 + ], + "score": 0.901 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 858, + 2088, + 858, + 2112, + 841, + 2112 + ], + "score": 0.728 + }, + { + "category_id": 13, + "poly": [ + 759, + 1590, + 968, + 1590, + 968, + 1624, + 759, + 1624 + ], + "score": 0.92, + "latex": "\\left\\{ w _ { 1 } , w _ { 2 } , \\dots , w _ { N } \\right\\}" + }, + { + "category_id": 13, + "poly": [ + 968, + 694, + 1013, + 694, + 1013, + 729, + 968, + 729 + ], + "score": 0.92, + "latex": "M _ { \\phi }" + }, + { + "category_id": 13, + "poly": [ + 1201, + 1998, + 1340, + 1998, + 1340, + 2034, + 1201, + 2034 + ], + "score": 0.92, + "latex": "W _ { 0 } \\in \\bar { R } ^ { d \\times k } ," + }, + { + "category_id": 13, + "poly": [ + 582, + 1468, + 669, + 1468, + 669, + 1499, + 582, + 1499 + ], + "score": 0.92, + "latex": "\\mathscr { T } _ { i } \\in \\mathbf { \\hat { T } }" + }, + { + "category_id": 13, + "poly": [ + 365, + 1803, + 616, + 1803, + 616, + 1839, + 365, + 1839 + ], + "score": 0.91, + "latex": "M _ { \\phi } = \\mathrm { L o R A } ( \\hat { M } _ { \\theta } , \\hat { m } )" + }, + { + "category_id": 13, + "poly": [ + 1185, + 570, + 1393, + 570, + 1393, + 604, + 1185, + 604 + ], + "score": 0.9, + "latex": "\\mathbb { T } = \\{ \\mathcal { T } _ { 1 } , . . . , \\mathcal { T } _ { N } \\}" + }, + { + "category_id": 13, + "poly": [ + 1046, + 1468, + 1143, + 1468, + 1143, + 1499, + 1046, + 1499 + ], + "score": 0.89, + "latex": "\\mathcal { T } ^ { \\prime } \\notin \\mathbb { T } ," + }, + { + "category_id": 13, + "poly": [ + 432, + 694, + 474, + 694, + 474, + 725, + 432, + 725 + ], + "score": 0.89, + "latex": "{ \\mathrm { { \\dot { M } } } } _ { \\theta }" + }, + { + "category_id": 13, + "poly": [ + 297, + 663, + 340, + 663, + 340, + 695, + 297, + 695 + ], + "score": 0.89, + "latex": "M _ { \\theta }" + }, + { + "category_id": 13, + "poly": [ + 476, + 1682, + 519, + 1682, + 519, + 1714, + 476, + 1714 + ], + "score": 0.88, + "latex": "M _ { \\theta }" + }, + { + "category_id": 13, + "poly": [ + 1295, + 1681, + 1330, + 1681, + 1330, + 1710, + 1295, + 1710 + ], + "score": 0.87, + "latex": "\\mathbf { \\breve { { \\mathbf { \\nabla } } } } _ { \\mathbf { \\mathbf { \\mathbf { \\mathbf { \\mathcal { T } } } } } ^ { \\prime } }" + }, + { + "category_id": 13, + "poly": [ + 1137, + 1773, + 1179, + 1773, + 1179, + 1805, + 1137, + 1805 + ], + "score": 0.87, + "latex": "M _ { \\theta }" + }, + { + "category_id": 13, + "poly": [ + 442, + 1472, + 477, + 1472, + 477, + 1500, + 442, + 1500 + ], + "score": 0.87, + "latex": "m _ { i }" + }, + { + "category_id": 13, + "poly": [ + 705, + 262, + 748, + 262, + 748, + 293, + 705, + 293 + ], + "score": 0.86, + "latex": "M _ { \\theta }" + }, + { + "category_id": 13, + "poly": [ + 581, + 662, + 616, + 662, + 616, + 692, + 581, + 692 + ], + "score": 0.86, + "latex": "\\tau ^ { \\prime }" + }, + { + "category_id": 13, + "poly": [ + 1236, + 1593, + 1269, + 1593, + 1269, + 1622, + 1236, + 1622 + ], + "score": 0.85, + "latex": "w _ { i }" + }, + { + "category_id": 13, + "poly": [ + 1303, + 603, + 1396, + 603, + 1396, + 633, + 1303, + 633 + ], + "score": 0.84, + "latex": "\\mathcal { T } ^ { \\prime } \\notin \\mathbb { T } ," + }, + { + "category_id": 13, + "poly": [ + 297, + 727, + 331, + 727, + 331, + 757, + 297, + 757 + ], + "score": 0.83, + "latex": "\\tau ^ { \\prime }" + }, + { + "category_id": 13, + "poly": [ + 297, + 1837, + 332, + 1837, + 332, + 1869, + 297, + 1869 + ], + "score": 0.82, + "latex": "\\tau ^ { \\prime }" + }, + { + "category_id": 13, + "poly": [ + 1056, + 1439, + 1084, + 1439, + 1084, + 1466, + 1056, + 1466 + ], + "score": 0.75, + "latex": "N" + }, + { + "category_id": 13, + "poly": [ + 489, + 1439, + 517, + 1439, + 517, + 1466, + 489, + 1466 + ], + "score": 0.72, + "latex": "N" + }, + { + "category_id": 13, + "poly": [ + 1173, + 1652, + 1200, + 1652, + 1200, + 1680, + 1173, + 1680 + ], + "score": 0.7, + "latex": "\\hat { m }" + }, + { + "category_id": 13, + "poly": [ + 641, + 1591, + 668, + 1591, + 668, + 1620, + 641, + 1620 + ], + "score": 0.66, + "latex": "\\hat { m } _ { - }" + }, + { + "category_id": 13, + "poly": [ + 593, + 694, + 625, + 694, + 625, + 726, + 593, + 726 + ], + "score": 0.64, + "latex": "Q ," + }, + { + "category_id": 13, + "poly": [ + 1181, + 1717, + 1207, + 1717, + 1207, + 1742, + 1181, + 1742 + ], + "score": 0.61, + "latex": "w _ { . }" + }, + { + "category_id": 13, + "poly": [ + 868, + 1500, + 895, + 1500, + 895, + 1530, + 868, + 1530 + ], + "score": 0.61, + "latex": "Q" + }, + { + "category_id": 13, + "poly": [ + 297, + 572, + 326, + 572, + 326, + 600, + 297, + 600 + ], + "score": 0.59, + "latex": "N" + }, + { + "category_id": 13, + "poly": [ + 936, + 1743, + 962, + 1743, + 962, + 1775, + 936, + 1775 + ], + "score": 0.58, + "latex": "Q" + }, + { + "category_id": 13, + "poly": [ + 1377, + 1743, + 1402, + 1743, + 1402, + 1771, + 1377, + 1771 + ], + "score": 0.55, + "latex": "K" + }, + { + "category_id": 13, + "poly": [ + 1260, + 728, + 1287, + 728, + 1287, + 760, + 1260, + 760 + ], + "score": 0.53, + "latex": "Q" + }, + { + "category_id": 13, + "poly": [ + 764, + 663, + 791, + 663, + 791, + 695, + 764, + 695 + ], + "score": 0.36, + "latex": "Q" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1346.0, + 597.0, + 1346.0, + 597.0, + 1385.0, + 293.0, + 1385.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1904.0, + 767.0, + 1904.0, + 767.0, + 1949.0, + 292.0, + 1949.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 1138.0, + 560.0, + 1138.0, + 560.0, + 1197.0, + 288.0, + 1197.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2084.0, + 861.0, + 2084.0, + 861.0, + 2117.0, + 838.0, + 2117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1405.0, + 1402.0, + 1405.0, + 1402.0, + 1444.0, + 294.0, + 1444.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1437.0, + 488.0, + 1437.0, + 488.0, + 1476.0, + 294.0, + 1476.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 518.0, + 1437.0, + 1055.0, + 1437.0, + 1055.0, + 1476.0, + 518.0, + 1476.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1085.0, + 1437.0, + 1404.0, + 1437.0, + 1404.0, + 1476.0, + 1085.0, + 1476.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1466.0, + 441.0, + 1466.0, + 441.0, + 1503.0, + 292.0, + 1503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 478.0, + 1466.0, + 581.0, + 1466.0, + 581.0, + 1503.0, + 478.0, + 1503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 670.0, + 1466.0, + 1045.0, + 1466.0, + 1045.0, + 1503.0, + 670.0, + 1503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1144.0, + 1466.0, + 1405.0, + 1466.0, + 1405.0, + 1503.0, + 1144.0, + 1503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1496.0, + 867.0, + 1496.0, + 867.0, + 1539.0, + 291.0, + 1539.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 896.0, + 1496.0, + 1406.0, + 1496.0, + 1406.0, + 1539.0, + 896.0, + 1539.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1528.0, + 1406.0, + 1528.0, + 1406.0, + 1568.0, + 293.0, + 1568.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1556.0, + 1406.0, + 1556.0, + 1406.0, + 1599.0, + 292.0, + 1599.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1588.0, + 640.0, + 1588.0, + 640.0, + 1628.0, + 289.0, + 1628.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 669.0, + 1588.0, + 758.0, + 1588.0, + 758.0, + 1628.0, + 669.0, + 1628.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 969.0, + 1588.0, + 1235.0, + 1588.0, + 1235.0, + 1628.0, + 969.0, + 1628.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1270.0, + 1588.0, + 1407.0, + 1588.0, + 1407.0, + 1628.0, + 1270.0, + 1628.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1620.0, + 1405.0, + 1620.0, + 1405.0, + 1656.0, + 294.0, + 1656.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1651.0, + 1172.0, + 1651.0, + 1172.0, + 1686.0, + 294.0, + 1686.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1201.0, + 1651.0, + 1405.0, + 1651.0, + 1405.0, + 1686.0, + 1201.0, + 1686.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1678.0, + 475.0, + 1678.0, + 475.0, + 1717.0, + 292.0, + 1717.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 520.0, + 1678.0, + 1294.0, + 1678.0, + 1294.0, + 1717.0, + 520.0, + 1717.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1331.0, + 1678.0, + 1407.0, + 1678.0, + 1407.0, + 1717.0, + 1331.0, + 1717.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1711.0, + 1180.0, + 1711.0, + 1180.0, + 1749.0, + 292.0, + 1749.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1208.0, + 1711.0, + 1406.0, + 1711.0, + 1406.0, + 1749.0, + 1208.0, + 1749.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1741.0, + 935.0, + 1741.0, + 935.0, + 1780.0, + 292.0, + 1780.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 963.0, + 1741.0, + 1376.0, + 1741.0, + 1376.0, + 1780.0, + 963.0, + 1780.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1403.0, + 1741.0, + 1406.0, + 1741.0, + 1406.0, + 1780.0, + 1403.0, + 1780.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1769.0, + 1136.0, + 1769.0, + 1136.0, + 1812.0, + 292.0, + 1812.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1180.0, + 1769.0, + 1408.0, + 1769.0, + 1408.0, + 1812.0, + 1180.0, + 1812.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1798.0, + 364.0, + 1798.0, + 364.0, + 1842.0, + 291.0, + 1842.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 617.0, + 1798.0, + 1408.0, + 1798.0, + 1408.0, + 1842.0, + 617.0, + 1842.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 333.0, + 1834.0, + 1033.0, + 1834.0, + 1033.0, + 1876.0, + 333.0, + 1876.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 385.0, + 1404.0, + 385.0, + 1404.0, + 423.0, + 296.0, + 423.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 419.0, + 1405.0, + 419.0, + 1405.0, + 454.0, + 295.0, + 454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 449.0, + 1405.0, + 449.0, + 1405.0, + 486.0, + 291.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 478.0, + 1405.0, + 478.0, + 1405.0, + 516.0, + 292.0, + 516.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 508.0, + 1404.0, + 508.0, + 1404.0, + 545.0, + 294.0, + 545.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 537.0, + 1408.0, + 537.0, + 1408.0, + 578.0, + 292.0, + 578.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 568.0, + 296.0, + 568.0, + 296.0, + 610.0, + 292.0, + 610.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 568.0, + 1184.0, + 568.0, + 1184.0, + 610.0, + 327.0, + 610.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1394.0, + 568.0, + 1406.0, + 568.0, + 1406.0, + 610.0, + 1394.0, + 610.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 600.0, + 1302.0, + 600.0, + 1302.0, + 638.0, + 292.0, + 638.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1397.0, + 600.0, + 1405.0, + 600.0, + 1405.0, + 638.0, + 1397.0, + 638.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 631.0, + 1404.0, + 631.0, + 1404.0, + 668.0, + 294.0, + 668.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 341.0, + 659.0, + 580.0, + 659.0, + 580.0, + 701.0, + 341.0, + 701.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 617.0, + 659.0, + 763.0, + 659.0, + 763.0, + 701.0, + 617.0, + 701.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 792.0, + 659.0, + 1406.0, + 659.0, + 1406.0, + 701.0, + 792.0, + 701.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 690.0, + 431.0, + 690.0, + 431.0, + 732.0, + 292.0, + 732.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 475.0, + 690.0, + 592.0, + 690.0, + 592.0, + 732.0, + 475.0, + 732.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 626.0, + 690.0, + 967.0, + 690.0, + 967.0, + 732.0, + 626.0, + 732.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1014.0, + 690.0, + 1405.0, + 690.0, + 1405.0, + 732.0, + 1014.0, + 732.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 332.0, + 724.0, + 1259.0, + 724.0, + 1259.0, + 765.0, + 332.0, + 765.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1288.0, + 724.0, + 1399.0, + 724.0, + 1399.0, + 765.0, + 1288.0, + 765.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 792.0, + 1403.0, + 792.0, + 1403.0, + 828.0, + 294.0, + 828.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 825.0, + 1404.0, + 825.0, + 1404.0, + 861.0, + 294.0, + 861.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 855.0, + 1407.0, + 855.0, + 1407.0, + 894.0, + 293.0, + 894.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 883.0, + 1407.0, + 883.0, + 1407.0, + 923.0, + 292.0, + 923.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 913.0, + 1407.0, + 913.0, + 1407.0, + 955.0, + 292.0, + 955.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 946.0, + 1407.0, + 946.0, + 1407.0, + 986.0, + 293.0, + 986.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 977.0, + 1407.0, + 977.0, + 1407.0, + 1016.0, + 293.0, + 1016.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1003.0, + 1404.0, + 1003.0, + 1404.0, + 1045.0, + 292.0, + 1045.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1037.0, + 1407.0, + 1037.0, + 1407.0, + 1076.0, + 293.0, + 1076.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1070.0, + 972.0, + 1070.0, + 972.0, + 1105.0, + 294.0, + 1105.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 227.0, + 1407.0, + 227.0, + 1407.0, + 265.0, + 292.0, + 265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 260.0, + 704.0, + 260.0, + 704.0, + 296.0, + 294.0, + 296.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 749.0, + 260.0, + 1407.0, + 260.0, + 1407.0, + 296.0, + 749.0, + 296.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 289.0, + 1406.0, + 289.0, + 1406.0, + 328.0, + 293.0, + 328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 320.0, + 610.0, + 320.0, + 610.0, + 354.0, + 295.0, + 354.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1217.0, + 1404.0, + 1217.0, + 1404.0, + 1254.0, + 293.0, + 1254.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1247.0, + 1404.0, + 1247.0, + 1404.0, + 1285.0, + 294.0, + 1285.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1275.0, + 1087.0, + 1275.0, + 1087.0, + 1320.0, + 293.0, + 1320.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1961.0, + 1406.0, + 1961.0, + 1406.0, + 2004.0, + 294.0, + 2004.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1996.0, + 1200.0, + 1996.0, + 1200.0, + 2038.0, + 293.0, + 2038.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1341.0, + 1996.0, + 1407.0, + 1996.0, + 1407.0, + 2038.0, + 1341.0, + 2038.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 2, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 839, + 1405, + 839, + 1405, + 1033, + 297, + 1033 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 298, + 1582, + 1404, + 1582, + 1404, + 1830, + 298, + 1830 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 298, + 1126, + 1402, + 1126, + 1402, + 1250, + 298, + 1250 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 1843, + 1404, + 1843, + 1404, + 2038, + 298, + 2038 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 1333, + 1403, + 1333, + 1403, + 1488, + 298, + 1488 + ], + "score": 0.978 + }, + { + "category_id": 3, + "poly": [ + 307, + 218, + 1393, + 218, + 1393, + 545, + 307, + 545 + ], + "score": 0.967 + }, + { + "category_id": 4, + "poly": [ + 296, + 566, + 1405, + 566, + 1405, + 782, + 296, + 782 + ], + "score": 0.96 + }, + { + "category_id": 0, + "poly": [ + 299, + 1526, + 1065, + 1526, + 1065, + 1560, + 299, + 1560 + ], + "score": 0.939 + }, + { + "category_id": 8, + "poly": [ + 441, + 1265, + 1255, + 1265, + 1255, + 1304, + 441, + 1304 + ], + "score": 0.93 + }, + { + "category_id": 0, + "poly": [ + 298, + 1068, + 1074, + 1068, + 1074, + 1102, + 298, + 1102 + ], + "score": 0.927 + }, + { + "category_id": 2, + "poly": [ + 297, + 75, + 878, + 75, + 878, + 106, + 297, + 106 + ], + "score": 0.898 + }, + { + "category_id": 9, + "poly": [ + 1366, + 1269, + 1399, + 1269, + 1399, + 1300, + 1366, + 1300 + ], + "score": 0.871 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 858, + 2088, + 858, + 2112, + 841, + 2112 + ], + "score": 0.759 + }, + { + "category_id": 13, + "poly": [ + 744, + 1998, + 943, + 1998, + 943, + 2038, + 744, + 2038 + ], + "score": 0.93, + "latex": "\\begin{array} { r } { L + \\alpha \\cdot \\sum _ { i = 1 } ^ { N } | \\dot { w } _ { i } | , } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 510, + 1219, + 644, + 1219, + 644, + 1251, + 510, + 1251 + ], + "score": 0.92, + "latex": "m _ { i } = A _ { i } B _ { i } ," + }, + { + "category_id": 13, + "poly": [ + 684, + 1905, + 892, + 1905, + 892, + 1940, + 684, + 1940 + ], + "score": 0.92, + "latex": "\\left\\{ w _ { 1 } , w _ { 2 } , \\ldots , w _ { N } \\right\\}" + }, + { + "category_id": 13, + "poly": [ + 793, + 874, + 925, + 874, + 925, + 907, + 793, + 907 + ], + "score": 0.91, + "latex": "A \\in \\mathbb { R } ^ { d \\times r }" + }, + { + "category_id": 13, + "poly": [ + 987, + 873, + 1115, + 873, + 1115, + 907, + 987, + 907 + ], + "score": 0.91, + "latex": "B \\in \\mathbb { R } ^ { r \\times k }" + }, + { + "category_id": 13, + "poly": [ + 415, + 877, + 697, + 877, + 697, + 909, + 415, + 909 + ], + "score": 0.9, + "latex": "W _ { 0 } + \\delta W = W _ { 0 } + A B ," + }, + { + "category_id": 14, + "poly": [ + 443, + 1264, + 1256, + 1264, + 1256, + 1304, + 443, + 1304 + ], + "score": 0.88, + "latex": "\\hat { m } = ( w _ { 1 } A _ { 1 } + w _ { 2 } A _ { 2 } + \\cdot \\cdot \\cdot + w _ { N } A _ { N } ) ( w _ { 1 } B _ { 1 } + w _ { 2 } B _ { 2 } + \\cdot \\cdot \\cdot + w _ { N } B _ { N } ) ." + }, + { + "category_id": 13, + "poly": [ + 1052, + 1193, + 1069, + 1193, + 1069, + 1216, + 1052, + 1216 + ], + "score": 0.78, + "latex": "r" + }, + { + "category_id": 13, + "poly": [ + 1121, + 909, + 1139, + 909, + 1139, + 937, + 1121, + 937 + ], + "score": 0.72, + "latex": "d" + }, + { + "category_id": 13, + "poly": [ + 529, + 913, + 550, + 913, + 550, + 938, + 529, + 938 + ], + "score": 0.7, + "latex": "r ," + }, + { + "category_id": 13, + "poly": [ + 1191, + 909, + 1209, + 909, + 1209, + 936, + 1191, + 936 + ], + "score": 0.67, + "latex": "k" + }, + { + "category_id": 13, + "poly": [ + 1034, + 2008, + 1052, + 2008, + 1052, + 2030, + 1034, + 2030 + ], + "score": 0.67, + "latex": "\\alpha" + }, + { + "category_id": 13, + "poly": [ + 525, + 1971, + 550, + 1971, + 550, + 1996, + 525, + 1996 + ], + "score": 0.66, + "latex": "w _ { . }" + }, + { + "category_id": 13, + "poly": [ + 444, + 1649, + 472, + 1649, + 472, + 1676, + 444, + 1676 + ], + "score": 0.63, + "latex": "w ," + }, + { + "category_id": 13, + "poly": [ + 1171, + 1907, + 1193, + 1907, + 1193, + 1933, + 1171, + 1933 + ], + "score": 0.62, + "latex": "L" + }, + { + "category_id": 13, + "poly": [ + 530, + 1879, + 554, + 1879, + 554, + 1903, + 530, + 1903 + ], + "score": 0.59, + "latex": "w" + }, + { + "category_id": 13, + "poly": [ + 821, + 943, + 851, + 943, + 851, + 969, + 821, + 969 + ], + "score": 0.56, + "latex": "m ," + }, + { + "category_id": 13, + "poly": [ + 1001, + 1220, + 1027, + 1220, + 1027, + 1247, + 1001, + 1247 + ], + "score": 0.56, + "latex": "\\hat { m }" + }, + { + "category_id": 13, + "poly": [ + 664, + 695, + 689, + 695, + 689, + 718, + 664, + 718 + ], + "score": 0.55, + "latex": "w" + }, + { + "category_id": 13, + "poly": [ + 1066, + 1590, + 1090, + 1590, + 1090, + 1613, + 1066, + 1613 + ], + "score": 0.52, + "latex": "w" + }, + { + "category_id": 13, + "poly": [ + 449, + 939, + 493, + 939, + 493, + 968, + 449, + 968 + ], + "score": 0.48, + "latex": "A B" + }, + { + "category_id": 13, + "poly": [ + 901, + 690, + 925, + 690, + 925, + 718, + 901, + 718 + ], + "score": 0.48, + "latex": "K" + }, + { + "category_id": 13, + "poly": [ + 1170, + 1741, + 1195, + 1741, + 1195, + 1765, + 1170, + 1765 + ], + "score": 0.47, + "latex": "w" + }, + { + "category_id": 13, + "poly": [ + 422, + 1937, + 449, + 1937, + 449, + 1968, + 422, + 1968 + ], + "score": 0.41, + "latex": "Q" + }, + { + "category_id": 13, + "poly": [ + 867, + 633, + 892, + 633, + 892, + 656, + 867, + 656 + ], + "score": 0.39, + "latex": "w" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 227.0, + 515.0, + 227.0, + 515.0, + 253.0, + 392.0, + 253.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 975.0, + 232.0, + 1011.0, + 232.0, + 1011.0, + 260.0, + 975.0, + 260.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1086.0, + 231.0, + 1124.0, + 231.0, + 1124.0, + 259.0, + 1086.0, + 259.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 406.0, + 248.0, + 501.0, + 248.0, + 501.0, + 280.0, + 406.0, + 280.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 767.0, + 257.0, + 794.0, + 257.0, + 794.0, + 281.0, + 767.0, + 281.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 255.0, + 866.0, + 255.0, + 866.0, + 281.0, + 839.0, + 281.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 907.0, + 257.0, + 933.0, + 257.0, + 933.0, + 281.0, + 907.0, + 281.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 980.0, + 257.0, + 1006.0, + 257.0, + 1006.0, + 281.0, + 980.0, + 281.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1092.0, + 254.0, + 1119.0, + 254.0, + 1119.0, + 280.0, + 1092.0, + 280.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1197.0, + 277.0, + 1232.0, + 277.0, + 1232.0, + 306.0, + 1197.0, + 306.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 949.0, + 294.0, + 969.0, + 294.0, + 969.0, + 317.0, + 949.0, + 317.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 411.0, + 316.0, + 497.0, + 316.0, + 497.0, + 347.0, + 411.0, + 347.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 411.0, + 360.0, + 495.0, + 360.0, + 495.0, + 388.0, + 411.0, + 388.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 565.0, + 350.0, + 719.0, + 350.0, + 719.0, + 385.0, + 565.0, + 385.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 992.0, + 355.0, + 1020.0, + 355.0, + 1020.0, + 389.0, + 992.0, + 389.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1274.0, + 351.0, + 1376.0, + 351.0, + 1376.0, + 383.0, + 1274.0, + 383.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1028.0, + 389.0, + 1206.0, + 389.0, + 1206.0, + 415.0, + 1028.0, + 415.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 411.0, + 426.0, + 495.0, + 426.0, + 495.0, + 454.0, + 411.0, + 454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 749.0, + 408.0, + 885.0, + 408.0, + 885.0, + 458.0, + 749.0, + 458.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 899.0, + 420.0, + 1022.0, + 420.0, + 1022.0, + 459.0, + 899.0, + 459.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1028.0, + 412.0, + 1187.0, + 412.0, + 1187.0, + 460.0, + 1028.0, + 460.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 405.0, + 452.0, + 502.0, + 452.0, + 502.0, + 476.0, + 405.0, + 476.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1029.0, + 453.0, + 1155.0, + 453.0, + 1155.0, + 481.0, + 1029.0, + 481.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 437.0, + 489.0, + 471.0, + 489.0, + 471.0, + 511.0, + 437.0, + 511.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1034.0, + 484.0, + 1205.0, + 484.0, + 1205.0, + 515.0, + 1034.0, + 515.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 303.0, + 516.0, + 589.0, + 516.0, + 589.0, + 543.0, + 303.0, + 543.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 871.0, + 519.0, + 1190.0, + 519.0, + 1190.0, + 545.0, + 871.0, + 545.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 765.0, + 231.0, + 795.0, + 231.0, + 795.0, + 259.0, + 765.0, + 259.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 837.0, + 231.5, + 868.0, + 231.5, + 868.0, + 259.5, + 837.0, + 259.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 905.75, + 231.0, + 933.75, + 231.0, + 933.75, + 259.0, + 905.75, + 259.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 380.0, + 293.0, + 527.0, + 293.0, + 527.0, + 321.0, + 380.0, + 321.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 408.0, + 384.0, + 500.0, + 384.0, + 500.0, + 413.0, + 408.0, + 413.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 563.0, + 1405.0, + 563.0, + 1405.0, + 607.0, + 293.0, + 607.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 595.0, + 1406.0, + 595.0, + 1406.0, + 637.0, + 291.0, + 637.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 628.0, + 866.0, + 628.0, + 866.0, + 663.0, + 295.0, + 663.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 893.0, + 628.0, + 1403.0, + 628.0, + 1403.0, + 663.0, + 893.0, + 663.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 657.0, + 1403.0, + 657.0, + 1403.0, + 694.0, + 293.0, + 694.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 688.0, + 663.0, + 688.0, + 663.0, + 725.0, + 291.0, + 725.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 690.0, + 688.0, + 900.0, + 688.0, + 900.0, + 725.0, + 690.0, + 725.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 926.0, + 688.0, + 1405.0, + 688.0, + 1405.0, + 725.0, + 926.0, + 725.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 716.0, + 1405.0, + 716.0, + 1405.0, + 757.0, + 291.0, + 757.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 748.0, + 455.0, + 748.0, + 455.0, + 784.0, + 294.0, + 784.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1523.0, + 1068.0, + 1523.0, + 1068.0, + 1565.0, + 292.0, + 1565.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1067.0, + 1077.0, + 1067.0, + 1077.0, + 1107.0, + 293.0, + 1107.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2119.0, + 838.0, + 2119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 840.0, + 1405.0, + 840.0, + 1405.0, + 877.0, + 295.0, + 877.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 869.0, + 414.0, + 869.0, + 414.0, + 914.0, + 292.0, + 914.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 698.0, + 869.0, + 792.0, + 869.0, + 792.0, + 914.0, + 698.0, + 914.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 926.0, + 869.0, + 986.0, + 869.0, + 986.0, + 914.0, + 926.0, + 914.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1116.0, + 869.0, + 1406.0, + 869.0, + 1406.0, + 914.0, + 1116.0, + 914.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 907.0, + 528.0, + 907.0, + 528.0, + 944.0, + 294.0, + 944.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 551.0, + 907.0, + 1120.0, + 907.0, + 1120.0, + 944.0, + 551.0, + 944.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1140.0, + 907.0, + 1190.0, + 907.0, + 1190.0, + 944.0, + 1140.0, + 944.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1210.0, + 907.0, + 1405.0, + 907.0, + 1405.0, + 944.0, + 1210.0, + 944.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 936.0, + 448.0, + 936.0, + 448.0, + 977.0, + 294.0, + 977.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 494.0, + 936.0, + 820.0, + 936.0, + 820.0, + 977.0, + 494.0, + 977.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 852.0, + 936.0, + 1406.0, + 936.0, + 1406.0, + 977.0, + 852.0, + 977.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 965.0, + 1405.0, + 965.0, + 1405.0, + 1007.0, + 292.0, + 1007.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 995.0, + 1011.0, + 995.0, + 1011.0, + 1040.0, + 291.0, + 1040.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1583.0, + 1065.0, + 1583.0, + 1065.0, + 1620.0, + 294.0, + 1620.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1091.0, + 1583.0, + 1404.0, + 1583.0, + 1404.0, + 1620.0, + 1091.0, + 1620.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1614.0, + 1405.0, + 1614.0, + 1405.0, + 1652.0, + 294.0, + 1652.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1644.0, + 443.0, + 1644.0, + 443.0, + 1682.0, + 294.0, + 1682.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 473.0, + 1644.0, + 1405.0, + 1644.0, + 1405.0, + 1682.0, + 473.0, + 1682.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1675.0, + 1405.0, + 1675.0, + 1405.0, + 1708.0, + 292.0, + 1708.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1705.0, + 1405.0, + 1705.0, + 1405.0, + 1742.0, + 292.0, + 1742.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1733.0, + 1169.0, + 1733.0, + 1169.0, + 1774.0, + 292.0, + 1774.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1196.0, + 1733.0, + 1405.0, + 1733.0, + 1405.0, + 1774.0, + 1196.0, + 1774.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1765.0, + 1405.0, + 1765.0, + 1405.0, + 1806.0, + 292.0, + 1806.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1798.0, + 640.0, + 1798.0, + 640.0, + 1832.0, + 294.0, + 1832.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1124.0, + 1406.0, + 1124.0, + 1406.0, + 1162.0, + 292.0, + 1162.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1155.0, + 1406.0, + 1155.0, + 1406.0, + 1194.0, + 292.0, + 1194.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1188.0, + 1051.0, + 1188.0, + 1051.0, + 1224.0, + 293.0, + 1224.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1070.0, + 1188.0, + 1404.0, + 1188.0, + 1404.0, + 1224.0, + 1070.0, + 1224.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1216.0, + 509.0, + 1216.0, + 509.0, + 1255.0, + 292.0, + 1255.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 645.0, + 1216.0, + 1000.0, + 1216.0, + 1000.0, + 1255.0, + 645.0, + 1255.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1028.0, + 1216.0, + 1275.0, + 1216.0, + 1275.0, + 1255.0, + 1028.0, + 1255.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1839.0, + 1406.0, + 1839.0, + 1406.0, + 1883.0, + 293.0, + 1883.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1867.0, + 529.0, + 1867.0, + 529.0, + 1917.0, + 291.0, + 1917.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 555.0, + 1867.0, + 1408.0, + 1867.0, + 1408.0, + 1917.0, + 555.0, + 1917.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1902.0, + 683.0, + 1902.0, + 683.0, + 1942.0, + 293.0, + 1942.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 893.0, + 1902.0, + 1170.0, + 1902.0, + 1170.0, + 1942.0, + 893.0, + 1942.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1194.0, + 1902.0, + 1406.0, + 1902.0, + 1406.0, + 1942.0, + 1194.0, + 1942.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1935.0, + 421.0, + 1935.0, + 421.0, + 1972.0, + 294.0, + 1972.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 450.0, + 1935.0, + 1404.0, + 1935.0, + 1404.0, + 1972.0, + 450.0, + 1972.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1962.0, + 524.0, + 1962.0, + 524.0, + 2005.0, + 292.0, + 2005.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 551.0, + 1962.0, + 1406.0, + 1962.0, + 1406.0, + 2005.0, + 551.0, + 2005.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1989.0, + 743.0, + 1989.0, + 743.0, + 2047.0, + 289.0, + 2047.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 944.0, + 1989.0, + 1033.0, + 1989.0, + 1033.0, + 2047.0, + 944.0, + 2047.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1053.0, + 1989.0, + 1409.0, + 1989.0, + 1409.0, + 2047.0, + 1053.0, + 2047.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1333.0, + 1404.0, + 1333.0, + 1404.0, + 1371.0, + 294.0, + 1371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1364.0, + 1405.0, + 1364.0, + 1405.0, + 1401.0, + 294.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1392.0, + 1408.0, + 1392.0, + 1408.0, + 1434.0, + 291.0, + 1434.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1422.0, + 1405.0, + 1422.0, + 1405.0, + 1465.0, + 292.0, + 1465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1454.0, + 363.0, + 1454.0, + 363.0, + 1493.0, + 293.0, + 1493.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 3, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 229, + 1404, + 229, + 1404, + 566, + 298, + 566 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 297, + 1021, + 1403, + 1021, + 1403, + 1207, + 297, + 1207 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 298, + 1819, + 1402, + 1819, + 1402, + 2035, + 298, + 2035 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 298, + 1439, + 1404, + 1439, + 1404, + 1717, + 298, + 1717 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 298, + 1245, + 1402, + 1245, + 1402, + 1399, + 298, + 1399 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 299, + 691, + 1403, + 691, + 1403, + 783, + 299, + 783 + ], + "score": 0.973 + }, + { + "category_id": 1, + "poly": [ + 300, + 887, + 1398, + 887, + 1398, + 982, + 300, + 982 + ], + "score": 0.963 + }, + { + "category_id": 0, + "poly": [ + 298, + 1759, + 523, + 1759, + 523, + 1792, + 298, + 1792 + ], + "score": 0.912 + }, + { + "category_id": 0, + "poly": [ + 299, + 829, + 608, + 829, + 608, + 862, + 299, + 862 + ], + "score": 0.908 + }, + { + "category_id": 0, + "poly": [ + 299, + 616, + 675, + 616, + 675, + 655, + 299, + 655 + ], + "score": 0.905 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 878, + 75, + 878, + 106, + 298, + 106 + ], + "score": 0.903 + }, + { + "category_id": 2, + "poly": [ + 840, + 2088, + 858, + 2088, + 858, + 2112, + 840, + 2112 + ], + "score": 0.783 + }, + { + "category_id": 13, + "poly": [ + 1340, + 509, + 1364, + 509, + 1364, + 532, + 1340, + 532 + ], + "score": 0.57, + "latex": "w" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1758.0, + 525.0, + 1758.0, + 525.0, + 1794.0, + 295.0, + 1794.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 819.0, + 613.0, + 819.0, + 613.0, + 875.0, + 291.0, + 875.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 613.0, + 678.0, + 613.0, + 678.0, + 661.0, + 289.0, + 661.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2118.0, + 838.0, + 2118.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 227.0, + 1405.0, + 227.0, + 1405.0, + 270.0, + 292.0, + 270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 258.0, + 1408.0, + 258.0, + 1408.0, + 301.0, + 293.0, + 301.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 288.0, + 1406.0, + 288.0, + 1406.0, + 329.0, + 292.0, + 329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 321.0, + 1405.0, + 321.0, + 1405.0, + 359.0, + 292.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 350.0, + 1405.0, + 350.0, + 1405.0, + 389.0, + 294.0, + 389.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 381.0, + 1405.0, + 381.0, + 1405.0, + 420.0, + 293.0, + 420.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 411.0, + 1405.0, + 411.0, + 1405.0, + 450.0, + 293.0, + 450.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 443.0, + 1404.0, + 443.0, + 1404.0, + 478.0, + 294.0, + 478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 473.0, + 1405.0, + 473.0, + 1405.0, + 512.0, + 293.0, + 512.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 501.0, + 1339.0, + 501.0, + 1339.0, + 544.0, + 292.0, + 544.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1365.0, + 501.0, + 1405.0, + 501.0, + 1405.0, + 544.0, + 1365.0, + 544.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 534.0, + 1242.0, + 534.0, + 1242.0, + 569.0, + 293.0, + 569.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1020.0, + 1407.0, + 1020.0, + 1407.0, + 1059.0, + 295.0, + 1059.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1050.0, + 1407.0, + 1050.0, + 1407.0, + 1090.0, + 293.0, + 1090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1080.0, + 1407.0, + 1080.0, + 1407.0, + 1120.0, + 291.0, + 1120.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1109.0, + 1407.0, + 1109.0, + 1407.0, + 1152.0, + 292.0, + 1152.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1142.0, + 1407.0, + 1142.0, + 1407.0, + 1182.0, + 292.0, + 1182.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1169.0, + 707.0, + 1169.0, + 707.0, + 1213.0, + 294.0, + 1213.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1818.0, + 1404.0, + 1818.0, + 1404.0, + 1855.0, + 294.0, + 1855.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1849.0, + 1406.0, + 1849.0, + 1406.0, + 1887.0, + 293.0, + 1887.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1880.0, + 1406.0, + 1880.0, + 1406.0, + 1918.0, + 294.0, + 1918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1910.0, + 1407.0, + 1910.0, + 1407.0, + 1952.0, + 292.0, + 1952.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1941.0, + 1407.0, + 1941.0, + 1407.0, + 1978.0, + 292.0, + 1978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1970.0, + 1407.0, + 1970.0, + 1407.0, + 2007.0, + 293.0, + 2007.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 2001.0, + 1406.0, + 2001.0, + 1406.0, + 2039.0, + 294.0, + 2039.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1436.0, + 1404.0, + 1436.0, + 1404.0, + 1478.0, + 295.0, + 1478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1469.0, + 1405.0, + 1469.0, + 1405.0, + 1508.0, + 292.0, + 1508.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1493.0, + 1408.0, + 1493.0, + 1408.0, + 1544.0, + 291.0, + 1544.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1529.0, + 1404.0, + 1529.0, + 1404.0, + 1569.0, + 293.0, + 1569.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1558.0, + 1408.0, + 1558.0, + 1408.0, + 1602.0, + 292.0, + 1602.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1591.0, + 1406.0, + 1591.0, + 1406.0, + 1628.0, + 294.0, + 1628.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1619.0, + 1406.0, + 1619.0, + 1406.0, + 1661.0, + 292.0, + 1661.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1649.0, + 1405.0, + 1649.0, + 1405.0, + 1692.0, + 292.0, + 1692.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1684.0, + 1124.0, + 1684.0, + 1124.0, + 1721.0, + 294.0, + 1721.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1243.0, + 1404.0, + 1243.0, + 1404.0, + 1281.0, + 293.0, + 1281.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1274.0, + 1404.0, + 1274.0, + 1404.0, + 1313.0, + 293.0, + 1313.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1303.0, + 1404.0, + 1303.0, + 1404.0, + 1342.0, + 293.0, + 1342.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1337.0, + 1406.0, + 1337.0, + 1406.0, + 1374.0, + 293.0, + 1374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1368.0, + 457.0, + 1368.0, + 457.0, + 1401.0, + 293.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 690.0, + 1407.0, + 690.0, + 1407.0, + 727.0, + 293.0, + 727.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 717.0, + 1407.0, + 717.0, + 1407.0, + 762.0, + 292.0, + 762.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 753.0, + 499.0, + 753.0, + 499.0, + 784.0, + 297.0, + 784.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 885.0, + 1403.0, + 885.0, + 1403.0, + 926.0, + 294.0, + 926.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 916.0, + 1402.0, + 916.0, + 1402.0, + 954.0, + 295.0, + 954.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 944.0, + 897.0, + 944.0, + 897.0, + 989.0, + 294.0, + 989.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 4, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 5, + "poly": [ + 302, + 491, + 1430, + 491, + 1430, + 1590, + 302, + 1590 + ], + "score": 0.983, + "html": "
TaskZeroICLavgIA3avgLoRAavgFFTavgLoraHubavg
Boolean Expressions54.059.656.256.062.255.5
Causal Judgement57.559.460.255.657.554.3
Date Understanding15.320.420.035.859.332.9
Disambiguation0.069.10.068.068.245.2
Dyck Languages1.30.94.222.219.51.0
Formal Fallacies51.355.351.553.654.052.8
Geometric Shapes6.719.614.72431.17.4
Hyperbaton6.771.849.355.377.362.8
Logical DeductionS (five objects)21.339.132.740.042.236.1
Logical DeductionS (seven objects)12.740.733.837.344.936.8
Logical DeductionS (three objects)0.051.68.553.652.945.7
Movie Recommendation62.755.861.851.566.055.3
Multistep Arithmetic0.70.70.70.20.00.4
Navigate47.345.346.248.048.047.1
Object Counting34.732.435.138.735.633.7
Penguins in a Table43.541.345.036.231.935.9
Reasoning about Colored Objects32.040.240.739.637.640.0
Ruin Names23.319.324.437.861.324.4
Salient Translation Error Detection37.347.337.116.016.236.0
Snarks50.054.253.955.666.756.9
Sports Understanding56.054.755.156.554.056.7
Temporal Sequences16.725.118.225.137.818.2
Tracking Shuffled ObjectsS (five objects)12.012.012.013.816.912.3
Tracking Shuffled Objects (seven objects)6.76.76.710.09.87.7
Tracking Shuffled ObjectsS (three objects)24.731.130.730.932.029.2
Web of Lies54.053.854.252.748.250.1
Word Sorting1.30.51.34.94.91.1
Avg Performance Per Task27.037.331.637.742.134.7
Avg Tokens Per Example111.6597.8111.6111.6111.6111.6
Gradient-based TrainingNoNoYesYesYesNo
" + }, + { + "category_id": 1, + "poly": [ + 298, + 1849, + 1403, + 1849, + 1403, + 2035, + 298, + 2035 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 298, + 1680, + 1403, + 1680, + 1403, + 1836, + 298, + 1836 + ], + "score": 0.971 + }, + { + "category_id": 6, + "poly": [ + 296, + 221, + 1405, + 221, + 1405, + 471, + 296, + 471 + ], + "score": 0.918 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 106, + 298, + 106 + ], + "score": 0.905 + }, + { + "category_id": 2, + "poly": [ + 840, + 2089, + 858, + 2089, + 858, + 2112, + 840, + 2112 + ], + "score": 0.787 + }, + { + "category_id": 1, + "poly": [ + 296, + 221, + 1405, + 221, + 1405, + 471, + 296, + 471 + ], + "score": 0.144 + }, + { + "category_id": 13, + "poly": [ + 837, + 1911, + 900, + 1911, + 900, + 1941, + 837, + 1941 + ], + "score": 0.86, + "latex": "3 . 1 \\%" + }, + { + "category_id": 13, + "poly": [ + 1138, + 316, + 1159, + 316, + 1159, + 348, + 1138, + 348 + ], + "score": 0.69, + "latex": "\\ S" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 217.0, + 1406.0, + 217.0, + 1406.0, + 263.0, + 293.0, + 263.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 251.0, + 1405.0, + 251.0, + 1405.0, + 291.0, + 294.0, + 291.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 282.0, + 1406.0, + 282.0, + 1406.0, + 325.0, + 290.0, + 325.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 314.0, + 1137.0, + 314.0, + 1137.0, + 352.0, + 293.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1160.0, + 314.0, + 1405.0, + 314.0, + 1405.0, + 352.0, + 1160.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 345.0, + 1406.0, + 345.0, + 1406.0, + 382.0, + 294.0, + 382.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 375.0, + 1406.0, + 375.0, + 1406.0, + 413.0, + 294.0, + 413.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 406.0, + 1406.0, + 406.0, + 1406.0, + 443.0, + 293.0, + 443.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 436.0, + 651.0, + 436.0, + 651.0, + 474.0, + 291.0, + 474.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 2087.0, + 861.0, + 2087.0, + 861.0, + 2118.0, + 840.0, + 2118.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1846.0, + 1405.0, + 1846.0, + 1405.0, + 1887.0, + 294.0, + 1887.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1881.0, + 1405.0, + 1881.0, + 1405.0, + 1916.0, + 294.0, + 1916.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1905.0, + 836.0, + 1905.0, + 836.0, + 1951.0, + 292.0, + 1951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 901.0, + 1905.0, + 1407.0, + 1905.0, + 1407.0, + 1951.0, + 901.0, + 1951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1938.0, + 1408.0, + 1938.0, + 1408.0, + 1982.0, + 292.0, + 1982.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1970.0, + 1407.0, + 1970.0, + 1407.0, + 2010.0, + 293.0, + 2010.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 2001.0, + 1407.0, + 2001.0, + 1407.0, + 2039.0, + 294.0, + 2039.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1679.0, + 1404.0, + 1679.0, + 1404.0, + 1717.0, + 294.0, + 1717.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1706.0, + 1404.0, + 1706.0, + 1404.0, + 1753.0, + 292.0, + 1753.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1738.0, + 1407.0, + 1738.0, + 1407.0, + 1782.0, + 292.0, + 1782.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1769.0, + 1405.0, + 1769.0, + 1405.0, + 1813.0, + 292.0, + 1813.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1801.0, + 528.0, + 1801.0, + 528.0, + 1842.0, + 293.0, + 1842.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 217.0, + 1406.0, + 217.0, + 1406.0, + 263.0, + 293.0, + 263.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 251.0, + 1405.0, + 251.0, + 1405.0, + 291.0, + 294.0, + 291.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 282.0, + 1406.0, + 282.0, + 1406.0, + 325.0, + 290.0, + 325.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 314.0, + 1137.0, + 314.0, + 1137.0, + 352.0, + 293.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1160.0, + 314.0, + 1405.0, + 314.0, + 1405.0, + 352.0, + 1160.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 345.0, + 1406.0, + 345.0, + 1406.0, + 382.0, + 294.0, + 382.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 375.0, + 1406.0, + 375.0, + 1406.0, + 413.0, + 294.0, + 413.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 406.0, + 1406.0, + 406.0, + 1406.0, + 443.0, + 293.0, + 443.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 436.0, + 651.0, + 436.0, + 651.0, + 474.0, + 291.0, + 474.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 5, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 588, + 1403, + 588, + 1403, + 805, + 298, + 805 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 298, + 387, + 1403, + 387, + 1403, + 574, + 298, + 574 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 299, + 817, + 1403, + 817, + 1403, + 972, + 299, + 972 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 298, + 1788, + 1403, + 1788, + 1403, + 2035, + 298, + 2035 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 298, + 1256, + 715, + 1256, + 715, + 1530, + 298, + 1530 + ], + "score": 0.974 + }, + { + "category_id": 1, + "poly": [ + 298, + 1531, + 1404, + 1531, + 1404, + 1686, + 298, + 1686 + ], + "score": 0.968 + }, + { + "category_id": 5, + "poly": [ + 762, + 1381, + 1370, + 1381, + 1370, + 1480, + 762, + 1480 + ], + "score": 0.967, + "html": "
LoRA RetrievalLoraHub avgLoraHub best
31.734.741.2
" + }, + { + "category_id": 1, + "poly": [ + 297, + 228, + 1402, + 228, + 1402, + 294, + 297, + 294 + ], + "score": 0.949 + }, + { + "category_id": 6, + "poly": [ + 734, + 1293, + 1400, + 1293, + 1400, + 1355, + 734, + 1355 + ], + "score": 0.933 + }, + { + "category_id": 1, + "poly": [ + 296, + 1088, + 1405, + 1088, + 1405, + 1152, + 296, + 1152 + ], + "score": 0.924 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 106, + 298, + 106 + ], + "score": 0.906 + }, + { + "category_id": 0, + "poly": [ + 300, + 1016, + 695, + 1016, + 695, + 1056, + 300, + 1056 + ], + "score": 0.905 + }, + { + "category_id": 1, + "poly": [ + 334, + 1185, + 1283, + 1185, + 1283, + 1222, + 334, + 1222 + ], + "score": 0.903 + }, + { + "category_id": 0, + "poly": [ + 298, + 331, + 503, + 331, + 503, + 364, + 298, + 364 + ], + "score": 0.903 + }, + { + "category_id": 1, + "poly": [ + 333, + 1720, + 1026, + 1720, + 1026, + 1755, + 333, + 1755 + ], + "score": 0.896 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 858, + 2088, + 858, + 2111, + 841, + 2111 + ], + "score": 0.766 + }, + { + "category_id": 13, + "poly": [ + 778, + 230, + 840, + 230, + 840, + 261, + 778, + 261 + ], + "score": 0.87, + "latex": "1 . 0 \\%" + }, + { + "category_id": 13, + "poly": [ + 830, + 818, + 883, + 818, + 883, + 849, + 830, + 849 + ], + "score": 0.38, + "latex": "\\scriptstyle { \\mathrm { I C L } } ," + }, + { + "category_id": 15, + "poly": [ + 735.0, + 1291.0, + 1403.0, + 1291.0, + 1403.0, + 1327.0, + 735.0, + 1327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 733.0, + 1320.0, + 1217.0, + 1320.0, + 1217.0, + 1355.0, + 733.0, + 1355.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1013.0, + 700.0, + 1013.0, + 700.0, + 1066.0, + 291.0, + 1066.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 324.0, + 507.0, + 324.0, + 507.0, + 371.0, + 292.0, + 371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2122.0, + 839.0, + 2122.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 588.0, + 1407.0, + 588.0, + 1407.0, + 626.0, + 293.0, + 626.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 620.0, + 1404.0, + 620.0, + 1404.0, + 654.0, + 294.0, + 654.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 648.0, + 1405.0, + 648.0, + 1405.0, + 685.0, + 293.0, + 685.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 679.0, + 1404.0, + 679.0, + 1404.0, + 718.0, + 294.0, + 718.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 709.0, + 1406.0, + 709.0, + 1406.0, + 746.0, + 292.0, + 746.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 739.0, + 1405.0, + 739.0, + 1405.0, + 781.0, + 293.0, + 781.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 771.0, + 1317.0, + 771.0, + 1317.0, + 809.0, + 294.0, + 809.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 384.0, + 1407.0, + 384.0, + 1407.0, + 425.0, + 292.0, + 425.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 419.0, + 1407.0, + 419.0, + 1407.0, + 457.0, + 292.0, + 457.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 446.0, + 1408.0, + 446.0, + 1408.0, + 492.0, + 289.0, + 492.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 481.0, + 1405.0, + 481.0, + 1405.0, + 516.0, + 294.0, + 516.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 509.0, + 1405.0, + 509.0, + 1405.0, + 548.0, + 292.0, + 548.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 541.0, + 765.0, + 541.0, + 765.0, + 577.0, + 294.0, + 577.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 816.0, + 829.0, + 816.0, + 829.0, + 853.0, + 294.0, + 853.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 884.0, + 816.0, + 1404.0, + 816.0, + 1404.0, + 853.0, + 884.0, + 853.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 847.0, + 1407.0, + 847.0, + 1407.0, + 885.0, + 293.0, + 885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 877.0, + 1404.0, + 877.0, + 1404.0, + 914.0, + 293.0, + 914.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 908.0, + 1407.0, + 908.0, + 1407.0, + 948.0, + 292.0, + 948.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 938.0, + 471.0, + 938.0, + 471.0, + 972.0, + 294.0, + 972.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1785.0, + 1404.0, + 1785.0, + 1404.0, + 1825.0, + 293.0, + 1825.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1819.0, + 1407.0, + 1819.0, + 1407.0, + 1856.0, + 294.0, + 1856.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1849.0, + 1408.0, + 1849.0, + 1408.0, + 1887.0, + 292.0, + 1887.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1880.0, + 1407.0, + 1880.0, + 1407.0, + 1918.0, + 293.0, + 1918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1910.0, + 1407.0, + 1910.0, + 1407.0, + 1948.0, + 293.0, + 1948.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1940.0, + 1407.0, + 1940.0, + 1407.0, + 1978.0, + 293.0, + 1978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1969.0, + 1404.0, + 1969.0, + 1404.0, + 2010.0, + 292.0, + 2010.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1999.0, + 1403.0, + 1999.0, + 1403.0, + 2039.0, + 292.0, + 2039.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1252.0, + 716.0, + 1252.0, + 716.0, + 1293.0, + 295.0, + 1293.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1288.0, + 716.0, + 1288.0, + 716.0, + 1319.0, + 297.0, + 1319.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1316.0, + 716.0, + 1316.0, + 716.0, + 1351.0, + 292.0, + 1351.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1345.0, + 717.0, + 1345.0, + 717.0, + 1381.0, + 295.0, + 1381.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1374.0, + 720.0, + 1374.0, + 720.0, + 1414.0, + 293.0, + 1414.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1408.0, + 717.0, + 1408.0, + 717.0, + 1440.0, + 293.0, + 1440.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1435.0, + 718.0, + 1435.0, + 718.0, + 1474.0, + 294.0, + 1474.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1467.0, + 718.0, + 1467.0, + 718.0, + 1506.0, + 294.0, + 1506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1498.0, + 718.0, + 1498.0, + 718.0, + 1534.0, + 296.0, + 1534.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1530.0, + 1401.0, + 1530.0, + 1401.0, + 1563.0, + 297.0, + 1563.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1559.0, + 1404.0, + 1559.0, + 1404.0, + 1595.0, + 293.0, + 1595.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1587.0, + 1405.0, + 1587.0, + 1405.0, + 1627.0, + 293.0, + 1627.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1619.0, + 1406.0, + 1619.0, + 1406.0, + 1660.0, + 293.0, + 1660.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1651.0, + 1357.0, + 1651.0, + 1357.0, + 1689.0, + 292.0, + 1689.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 227.0, + 777.0, + 227.0, + 777.0, + 264.0, + 295.0, + 264.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 841.0, + 227.0, + 1404.0, + 227.0, + 1404.0, + 264.0, + 841.0, + 264.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 261.0, + 922.0, + 261.0, + 922.0, + 297.0, + 295.0, + 297.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1085.0, + 1405.0, + 1085.0, + 1405.0, + 1125.0, + 294.0, + 1125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1117.0, + 1405.0, + 1117.0, + 1405.0, + 1156.0, + 293.0, + 1156.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 328.0, + 1180.0, + 1289.0, + 1180.0, + 1289.0, + 1225.0, + 328.0, + 1225.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 330.0, + 1716.0, + 1030.0, + 1716.0, + 1030.0, + 1756.0, + 330.0, + 1756.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 6, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 1757, + 1403, + 1757, + 1403, + 2035, + 297, + 2035 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 298, + 1379, + 1403, + 1379, + 1403, + 1534, + 298, + 1534 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 298, + 746, + 1404, + 746, + 1404, + 994, + 298, + 994 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 298, + 1089, + 1403, + 1089, + 1403, + 1276, + 298, + 1276 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 299, + 456, + 1403, + 456, + 1403, + 644, + 299, + 644 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 299, + 1650, + 1404, + 1650, + 1404, + 1744, + 299, + 1744 + ], + "score": 0.973 + }, + { + "category_id": 1, + "poly": [ + 298, + 228, + 1403, + 228, + 1403, + 353, + 298, + 353 + ], + "score": 0.973 + }, + { + "category_id": 0, + "poly": [ + 299, + 1579, + 555, + 1579, + 555, + 1616, + 299, + 1616 + ], + "score": 0.919 + }, + { + "category_id": 1, + "poly": [ + 335, + 1311, + 949, + 1311, + 949, + 1345, + 335, + 1345 + ], + "score": 0.908 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 106, + 298, + 106 + ], + "score": 0.905 + }, + { + "category_id": 1, + "poly": [ + 318, + 678, + 1294, + 678, + 1294, + 714, + 318, + 714 + ], + "score": 0.888 + }, + { + "category_id": 1, + "poly": [ + 332, + 388, + 1072, + 388, + 1072, + 423, + 332, + 423 + ], + "score": 0.888 + }, + { + "category_id": 1, + "poly": [ + 341, + 1029, + 928, + 1029, + 928, + 1061, + 341, + 1061 + ], + "score": 0.874 + }, + { + "category_id": 2, + "poly": [ + 840, + 2088, + 858, + 2088, + 858, + 2112, + 840, + 2112 + ], + "score": 0.795 + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1576.0, + 558.0, + 1576.0, + 558.0, + 1619.0, + 291.0, + 1619.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2084.0, + 862.0, + 2084.0, + 862.0, + 2117.0, + 838.0, + 2117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1756.0, + 1405.0, + 1756.0, + 1405.0, + 1796.0, + 295.0, + 1796.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1788.0, + 1405.0, + 1788.0, + 1405.0, + 1825.0, + 296.0, + 1825.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1819.0, + 1407.0, + 1819.0, + 1407.0, + 1855.0, + 295.0, + 1855.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1845.0, + 1407.0, + 1845.0, + 1407.0, + 1888.0, + 292.0, + 1888.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1876.0, + 1407.0, + 1876.0, + 1407.0, + 1920.0, + 292.0, + 1920.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1908.0, + 1405.0, + 1908.0, + 1405.0, + 1947.0, + 294.0, + 1947.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1939.0, + 1406.0, + 1939.0, + 1406.0, + 1979.0, + 292.0, + 1979.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1971.0, + 1404.0, + 1971.0, + 1404.0, + 2008.0, + 295.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1998.0, + 1407.0, + 1998.0, + 1407.0, + 2044.0, + 290.0, + 2044.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1377.0, + 1407.0, + 1377.0, + 1407.0, + 1418.0, + 294.0, + 1418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1405.0, + 1407.0, + 1405.0, + 1407.0, + 1450.0, + 292.0, + 1450.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1439.0, + 1405.0, + 1439.0, + 1405.0, + 1476.0, + 296.0, + 1476.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1466.0, + 1407.0, + 1466.0, + 1407.0, + 1510.0, + 292.0, + 1510.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1499.0, + 966.0, + 1499.0, + 966.0, + 1540.0, + 295.0, + 1540.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 747.0, + 1405.0, + 747.0, + 1405.0, + 784.0, + 296.0, + 784.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 772.0, + 1408.0, + 772.0, + 1408.0, + 819.0, + 291.0, + 819.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 808.0, + 1405.0, + 808.0, + 1405.0, + 843.0, + 293.0, + 843.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 840.0, + 1404.0, + 840.0, + 1404.0, + 874.0, + 294.0, + 874.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 869.0, + 1405.0, + 869.0, + 1405.0, + 905.0, + 293.0, + 905.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 898.0, + 1406.0, + 898.0, + 1406.0, + 939.0, + 293.0, + 939.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 929.0, + 1406.0, + 929.0, + 1406.0, + 967.0, + 292.0, + 967.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 962.0, + 740.0, + 962.0, + 740.0, + 996.0, + 294.0, + 996.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1083.0, + 1408.0, + 1083.0, + 1408.0, + 1131.0, + 291.0, + 1131.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1118.0, + 1405.0, + 1118.0, + 1405.0, + 1157.0, + 293.0, + 1157.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1148.0, + 1407.0, + 1148.0, + 1407.0, + 1188.0, + 293.0, + 1188.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1181.0, + 1405.0, + 1181.0, + 1405.0, + 1217.0, + 294.0, + 1217.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1213.0, + 1405.0, + 1213.0, + 1405.0, + 1245.0, + 296.0, + 1245.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1243.0, + 577.0, + 1243.0, + 577.0, + 1279.0, + 294.0, + 1279.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 456.0, + 1407.0, + 456.0, + 1407.0, + 492.0, + 293.0, + 492.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 487.0, + 1404.0, + 487.0, + 1404.0, + 523.0, + 293.0, + 523.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 518.0, + 1405.0, + 518.0, + 1405.0, + 554.0, + 294.0, + 554.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 546.0, + 1405.0, + 546.0, + 1405.0, + 590.0, + 293.0, + 590.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 580.0, + 1403.0, + 580.0, + 1403.0, + 616.0, + 295.0, + 616.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 612.0, + 1030.0, + 612.0, + 1030.0, + 645.0, + 294.0, + 645.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1647.0, + 1404.0, + 1647.0, + 1404.0, + 1687.0, + 293.0, + 1687.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1679.0, + 1405.0, + 1679.0, + 1405.0, + 1719.0, + 293.0, + 1719.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1707.0, + 1386.0, + 1707.0, + 1386.0, + 1753.0, + 292.0, + 1753.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 228.0, + 1407.0, + 228.0, + 1407.0, + 268.0, + 293.0, + 268.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 259.0, + 1405.0, + 259.0, + 1405.0, + 299.0, + 293.0, + 299.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 288.0, + 1404.0, + 288.0, + 1404.0, + 328.0, + 293.0, + 328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 322.0, + 1207.0, + 322.0, + 1207.0, + 355.0, + 293.0, + 355.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 332.0, + 1307.0, + 952.0, + 1307.0, + 952.0, + 1347.0, + 332.0, + 1347.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 670.0, + 1299.0, + 670.0, + 1299.0, + 719.0, + 322.0, + 719.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 328.0, + 382.0, + 1078.0, + 382.0, + 1078.0, + 426.0, + 328.0, + 426.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 337.0, + 1026.0, + 932.0, + 1026.0, + 932.0, + 1063.0, + 337.0, + 1063.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 7, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 339, + 1404, + 339, + 1404, + 856, + 298, + 856 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 298, + 942, + 1404, + 942, + 1404, + 1461, + 298, + 1461 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 1546, + 1403, + 1546, + 1403, + 2034, + 298, + 2034 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 301, + 229, + 1398, + 229, + 1398, + 323, + 301, + 323 + ], + "score": 0.956 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 878, + 75, + 878, + 106, + 298, + 106 + ], + "score": 0.897 + }, + { + "category_id": 2, + "poly": [ + 840, + 2088, + 858, + 2088, + 858, + 2111, + 840, + 2111 + ], + "score": 0.788 + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2087.0, + 860.0, + 2087.0, + 860.0, + 2117.0, + 839.0, + 2117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 338.0, + 1404.0, + 338.0, + 1404.0, + 374.0, + 296.0, + 374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 366.0, + 1402.0, + 366.0, + 1402.0, + 402.0, + 296.0, + 402.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 397.0, + 1406.0, + 397.0, + 1406.0, + 437.0, + 293.0, + 437.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 425.0, + 1409.0, + 425.0, + 1409.0, + 469.0, + 292.0, + 469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 459.0, + 1405.0, + 459.0, + 1405.0, + 499.0, + 293.0, + 499.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 488.0, + 1406.0, + 488.0, + 1406.0, + 528.0, + 293.0, + 528.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 518.0, + 1408.0, + 518.0, + 1408.0, + 559.0, + 292.0, + 559.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 550.0, + 1406.0, + 550.0, + 1406.0, + 587.0, + 292.0, + 587.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 580.0, + 1406.0, + 580.0, + 1406.0, + 619.0, + 291.0, + 619.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 610.0, + 1406.0, + 610.0, + 1406.0, + 650.0, + 293.0, + 650.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 641.0, + 1405.0, + 641.0, + 1405.0, + 678.0, + 293.0, + 678.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 672.0, + 1406.0, + 672.0, + 1406.0, + 709.0, + 292.0, + 709.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 702.0, + 1405.0, + 702.0, + 1405.0, + 741.0, + 293.0, + 741.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 734.0, + 1404.0, + 734.0, + 1404.0, + 770.0, + 296.0, + 770.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 761.0, + 1406.0, + 761.0, + 1406.0, + 802.0, + 292.0, + 802.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 794.0, + 1405.0, + 794.0, + 1405.0, + 831.0, + 292.0, + 831.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 825.0, + 431.0, + 825.0, + 431.0, + 858.0, + 296.0, + 858.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 940.0, + 1404.0, + 940.0, + 1404.0, + 980.0, + 293.0, + 980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 968.0, + 1405.0, + 968.0, + 1405.0, + 1011.0, + 292.0, + 1011.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1000.0, + 1406.0, + 1000.0, + 1406.0, + 1043.0, + 291.0, + 1043.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1029.0, + 1404.0, + 1029.0, + 1404.0, + 1073.0, + 292.0, + 1073.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1064.0, + 1405.0, + 1064.0, + 1405.0, + 1103.0, + 293.0, + 1103.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1092.0, + 1405.0, + 1092.0, + 1405.0, + 1131.0, + 292.0, + 1131.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1124.0, + 1408.0, + 1124.0, + 1408.0, + 1164.0, + 293.0, + 1164.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1153.0, + 1407.0, + 1153.0, + 1407.0, + 1192.0, + 291.0, + 1192.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1182.0, + 1408.0, + 1182.0, + 1408.0, + 1226.0, + 292.0, + 1226.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1214.0, + 1405.0, + 1214.0, + 1405.0, + 1254.0, + 294.0, + 1254.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1240.0, + 1406.0, + 1240.0, + 1406.0, + 1287.0, + 292.0, + 1287.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1274.0, + 1408.0, + 1274.0, + 1408.0, + 1318.0, + 292.0, + 1318.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1306.0, + 1405.0, + 1306.0, + 1405.0, + 1346.0, + 293.0, + 1346.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1338.0, + 1405.0, + 1338.0, + 1405.0, + 1374.0, + 294.0, + 1374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1365.0, + 1408.0, + 1365.0, + 1408.0, + 1409.0, + 292.0, + 1409.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1395.0, + 1406.0, + 1395.0, + 1406.0, + 1438.0, + 291.0, + 1438.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1428.0, + 913.0, + 1428.0, + 913.0, + 1465.0, + 294.0, + 1465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1544.0, + 1404.0, + 1544.0, + 1404.0, + 1582.0, + 294.0, + 1582.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1574.0, + 1405.0, + 1574.0, + 1405.0, + 1612.0, + 293.0, + 1612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1603.0, + 1408.0, + 1603.0, + 1408.0, + 1647.0, + 291.0, + 1647.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1638.0, + 1405.0, + 1638.0, + 1405.0, + 1672.0, + 294.0, + 1672.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1665.0, + 1405.0, + 1665.0, + 1405.0, + 1703.0, + 293.0, + 1703.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1698.0, + 1405.0, + 1698.0, + 1405.0, + 1732.0, + 294.0, + 1732.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1721.0, + 1405.0, + 1721.0, + 1405.0, + 1769.0, + 291.0, + 1769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1756.0, + 1405.0, + 1756.0, + 1405.0, + 1796.0, + 292.0, + 1796.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1788.0, + 1405.0, + 1788.0, + 1405.0, + 1828.0, + 291.0, + 1828.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1817.0, + 1404.0, + 1817.0, + 1404.0, + 1859.0, + 292.0, + 1859.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1848.0, + 1404.0, + 1848.0, + 1404.0, + 1889.0, + 293.0, + 1889.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1877.0, + 1405.0, + 1877.0, + 1405.0, + 1920.0, + 291.0, + 1920.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1909.0, + 1407.0, + 1909.0, + 1407.0, + 1949.0, + 292.0, + 1949.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1938.0, + 1407.0, + 1938.0, + 1407.0, + 1979.0, + 292.0, + 1979.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1970.0, + 1405.0, + 1970.0, + 1405.0, + 2012.0, + 292.0, + 2012.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 2006.0, + 517.0, + 2006.0, + 517.0, + 2037.0, + 296.0, + 2037.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 225.0, + 1404.0, + 225.0, + 1404.0, + 270.0, + 292.0, + 270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 258.0, + 1403.0, + 258.0, + 1403.0, + 298.0, + 293.0, + 298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 289.0, + 363.0, + 289.0, + 363.0, + 326.0, + 295.0, + 326.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 8, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 730, + 1404, + 730, + 1404, + 977, + 298, + 977 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 298, + 299, + 1403, + 299, + 1403, + 606, + 298, + 606 + ], + "score": 0.98 + }, + { + "category_id": 0, + "poly": [ + 301, + 655, + 704, + 655, + 704, + 695, + 301, + 695 + ], + "score": 0.91 + }, + { + "category_id": 0, + "poly": [ + 298, + 224, + 523, + 224, + 523, + 263, + 298, + 263 + ], + "score": 0.901 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 106, + 298, + 106 + ], + "score": 0.9 + }, + { + "category_id": 0, + "poly": [ + 299, + 1026, + 466, + 1026, + 466, + 1063, + 299, + 1063 + ], + "score": 0.898 + }, + { + "category_id": 2, + "poly": [ + 836, + 2088, + 865, + 2088, + 865, + 2112, + 836, + 2112 + ], + "score": 0.845 + }, + { + "category_id": 1, + "poly": [ + 285, + 1077, + 1405, + 1077, + 1405, + 2034, + 285, + 2034 + ], + "score": 0.383 + }, + { + "category_id": 15, + "poly": [ + 293.0, + 651.0, + 709.0, + 651.0, + 709.0, + 706.0, + 293.0, + 706.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 217.0, + 529.0, + 217.0, + 529.0, + 272.0, + 289.0, + 272.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1023.0, + 470.0, + 1023.0, + 470.0, + 1069.0, + 294.0, + 1069.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 871.0, + 2084.0, + 871.0, + 2125.0, + 830.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 727.0, + 1405.0, + 727.0, + 1405.0, + 767.0, + 293.0, + 767.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 761.0, + 1405.0, + 761.0, + 1405.0, + 798.0, + 293.0, + 798.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 791.0, + 1405.0, + 791.0, + 1405.0, + 828.0, + 293.0, + 828.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 817.0, + 1406.0, + 817.0, + 1406.0, + 862.0, + 291.0, + 862.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 851.0, + 1406.0, + 851.0, + 1406.0, + 890.0, + 291.0, + 890.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 881.0, + 1406.0, + 881.0, + 1406.0, + 921.0, + 291.0, + 921.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 913.0, + 1405.0, + 913.0, + 1405.0, + 950.0, + 293.0, + 950.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 944.0, + 900.0, + 944.0, + 900.0, + 981.0, + 294.0, + 981.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 297.0, + 1404.0, + 297.0, + 1404.0, + 334.0, + 293.0, + 334.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 329.0, + 1403.0, + 329.0, + 1403.0, + 363.0, + 294.0, + 363.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 361.0, + 1403.0, + 361.0, + 1403.0, + 397.0, + 296.0, + 397.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 391.0, + 1404.0, + 391.0, + 1404.0, + 427.0, + 294.0, + 427.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 421.0, + 1407.0, + 421.0, + 1407.0, + 458.0, + 293.0, + 458.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 449.0, + 1407.0, + 449.0, + 1407.0, + 492.0, + 292.0, + 492.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 479.0, + 1407.0, + 479.0, + 1407.0, + 521.0, + 292.0, + 521.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 506.0, + 1408.0, + 506.0, + 1408.0, + 556.0, + 291.0, + 556.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 542.0, + 1405.0, + 542.0, + 1405.0, + 580.0, + 292.0, + 580.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 572.0, + 837.0, + 572.0, + 837.0, + 612.0, + 293.0, + 612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1076.0, + 1405.0, + 1076.0, + 1405.0, + 1125.0, + 292.0, + 1125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1115.0, + 1403.0, + 1115.0, + 1403.0, + 1150.0, + 323.0, + 1150.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1145.0, + 695.0, + 1145.0, + 695.0, + 1181.0, + 324.0, + 1181.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1204.0, + 1404.0, + 1204.0, + 1404.0, + 1247.0, + 294.0, + 1247.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1235.0, + 1404.0, + 1235.0, + 1404.0, + 1279.0, + 321.0, + 1279.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1268.0, + 779.0, + 1268.0, + 779.0, + 1304.0, + 324.0, + 1304.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1327.0, + 1404.0, + 1327.0, + 1404.0, + 1367.0, + 293.0, + 1367.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1361.0, + 1404.0, + 1361.0, + 1404.0, + 1397.0, + 323.0, + 1397.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1390.0, + 1405.0, + 1390.0, + 1405.0, + 1430.0, + 321.0, + 1430.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1422.0, + 812.0, + 1422.0, + 812.0, + 1458.0, + 324.0, + 1458.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1482.0, + 1405.0, + 1482.0, + 1405.0, + 1521.0, + 293.0, + 1521.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1512.0, + 1405.0, + 1512.0, + 1405.0, + 1550.0, + 321.0, + 1550.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1542.0, + 1405.0, + 1542.0, + 1405.0, + 1582.0, + 320.0, + 1582.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1573.0, + 1405.0, + 1573.0, + 1405.0, + 1613.0, + 320.0, + 1613.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1604.0, + 1404.0, + 1604.0, + 1404.0, + 1643.0, + 323.0, + 1643.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1633.0, + 1407.0, + 1633.0, + 1407.0, + 1680.0, + 319.0, + 1680.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1667.0, + 789.0, + 1667.0, + 789.0, + 1704.0, + 320.0, + 1704.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1724.0, + 1404.0, + 1724.0, + 1404.0, + 1765.0, + 295.0, + 1765.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1759.0, + 1404.0, + 1759.0, + 1404.0, + 1794.0, + 323.0, + 1794.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1787.0, + 1405.0, + 1787.0, + 1405.0, + 1827.0, + 320.0, + 1827.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1818.0, + 1405.0, + 1818.0, + 1405.0, + 1858.0, + 321.0, + 1858.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1846.0, + 1405.0, + 1846.0, + 1405.0, + 1888.0, + 320.0, + 1888.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1879.0, + 1405.0, + 1879.0, + 1405.0, + 1919.0, + 321.0, + 1919.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1907.0, + 1404.0, + 1907.0, + 1404.0, + 1947.0, + 321.0, + 1947.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1939.0, + 1403.0, + 1939.0, + 1403.0, + 1979.0, + 321.0, + 1979.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1970.0, + 1405.0, + 1970.0, + 1405.0, + 2009.0, + 320.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 2001.0, + 734.0, + 2001.0, + 734.0, + 2037.0, + 323.0, + 2037.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 9, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 107, + 298, + 107 + ], + "score": 0.89 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 863, + 2088, + 863, + 2113, + 835, + 2113 + ], + "score": 0.819 + }, + { + "category_id": 1, + "poly": [ + 299, + 228, + 1401, + 228, + 1401, + 323, + 299, + 323 + ], + "score": 0.709 + }, + { + "category_id": 1, + "poly": [ + 305, + 342, + 1396, + 342, + 1396, + 437, + 305, + 437 + ], + "score": 0.618 + }, + { + "category_id": 1, + "poly": [ + 278, + 398, + 1409, + 398, + 1409, + 2063, + 278, + 2063 + ], + "score": 0.104 + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2085.0, + 869.0, + 2085.0, + 869.0, + 2124.0, + 831.0, + 2124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 221.0, + 1407.0, + 221.0, + 1407.0, + 272.0, + 292.0, + 272.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 258.0, + 1406.0, + 258.0, + 1406.0, + 295.0, + 320.0, + 295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 289.0, + 1057.0, + 289.0, + 1057.0, + 327.0, + 323.0, + 327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 338.0, + 1401.0, + 338.0, + 1401.0, + 382.0, + 297.0, + 382.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 372.0, + 1403.0, + 372.0, + 1403.0, + 410.0, + 322.0, + 410.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 402.0, + 1207.0, + 402.0, + 1207.0, + 443.0, + 322.0, + 443.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 396.0, + 1206.0, + 396.0, + 1206.0, + 442.0, + 321.0, + 442.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1221.0, + 394.0, + 1243.0, + 394.0, + 1243.0, + 409.0, + 1221.0, + 409.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 453.0, + 1404.0, + 453.0, + 1404.0, + 493.0, + 296.0, + 493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 480.0, + 1406.0, + 480.0, + 1406.0, + 525.0, + 318.0, + 525.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 515.0, + 1406.0, + 515.0, + 1406.0, + 556.0, + 321.0, + 556.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 545.0, + 1404.0, + 545.0, + 1404.0, + 585.0, + 321.0, + 585.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 578.0, + 1404.0, + 578.0, + 1404.0, + 613.0, + 321.0, + 613.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 316.0, + 600.0, + 1406.0, + 600.0, + 1406.0, + 651.0, + 316.0, + 651.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 635.0, + 682.0, + 635.0, + 682.0, + 675.0, + 320.0, + 675.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 688.0, + 393.0, + 688.0, + 393.0, + 725.0, + 292.0, + 725.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 493.0, + 682.0, + 617.0, + 682.0, + 617.0, + 729.0, + 493.0, + 729.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 646.0, + 682.0, + 921.0, + 682.0, + 921.0, + 729.0, + 646.0, + 729.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1021.0, + 690.0, + 1400.0, + 690.0, + 1400.0, + 725.0, + 1021.0, + 725.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 721.0, + 785.0, + 721.0, + 785.0, + 756.0, + 323.0, + 756.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 763.0, + 1408.0, + 763.0, + 1408.0, + 815.0, + 289.0, + 815.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 800.0, + 1406.0, + 800.0, + 1406.0, + 841.0, + 320.0, + 841.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 832.0, + 1406.0, + 832.0, + 1406.0, + 872.0, + 321.0, + 872.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 863.0, + 1406.0, + 863.0, + 1406.0, + 898.0, + 321.0, + 898.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 316.0, + 887.0, + 1408.0, + 887.0, + 1408.0, + 936.0, + 316.0, + 936.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 920.0, + 1408.0, + 920.0, + 1408.0, + 966.0, + 318.0, + 966.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 953.0, + 1406.0, + 953.0, + 1406.0, + 993.0, + 320.0, + 993.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 316.0, + 979.0, + 1406.0, + 979.0, + 1406.0, + 1028.0, + 316.0, + 1028.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1014.0, + 777.0, + 1014.0, + 777.0, + 1054.0, + 318.0, + 1054.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1069.0, + 1406.0, + 1069.0, + 1406.0, + 1104.0, + 296.0, + 1104.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1096.0, + 1406.0, + 1096.0, + 1406.0, + 1135.0, + 318.0, + 1135.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1129.0, + 1294.0, + 1129.0, + 1294.0, + 1164.0, + 323.0, + 1164.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1181.0, + 1402.0, + 1181.0, + 1402.0, + 1216.0, + 296.0, + 1216.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1212.0, + 1225.0, + 1212.0, + 1225.0, + 1247.0, + 321.0, + 1247.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1258.0, + 1404.0, + 1258.0, + 1404.0, + 1300.0, + 290.0, + 1300.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1291.0, + 1404.0, + 1291.0, + 1404.0, + 1332.0, + 320.0, + 1332.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1322.0, + 1029.0, + 1322.0, + 1029.0, + 1361.0, + 321.0, + 1361.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 1370.0, + 1404.0, + 1370.0, + 1404.0, + 1418.0, + 288.0, + 1418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1405.0, + 1378.0, + 1405.0, + 1378.0, + 1446.0, + 321.0, + 1446.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1447.0, + 1410.0, + 1447.0, + 1410.0, + 1505.0, + 289.0, + 1505.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1486.0, + 1406.0, + 1486.0, + 1406.0, + 1528.0, + 320.0, + 1528.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1514.0, + 558.0, + 1514.0, + 558.0, + 1554.0, + 321.0, + 1554.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1567.0, + 1404.0, + 1567.0, + 1404.0, + 1607.0, + 294.0, + 1607.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1600.0, + 1402.0, + 1600.0, + 1402.0, + 1640.0, + 323.0, + 1640.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1631.0, + 930.0, + 1631.0, + 930.0, + 1666.0, + 323.0, + 1666.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1677.0, + 1406.0, + 1677.0, + 1406.0, + 1725.0, + 290.0, + 1725.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1712.0, + 957.0, + 1712.0, + 957.0, + 1753.0, + 320.0, + 1753.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1766.0, + 1408.0, + 1766.0, + 1408.0, + 1806.0, + 292.0, + 1806.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1793.0, + 1408.0, + 1793.0, + 1408.0, + 1839.0, + 318.0, + 1839.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1826.0, + 1404.0, + 1826.0, + 1404.0, + 1867.0, + 320.0, + 1867.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1854.0, + 1096.0, + 1854.0, + 1096.0, + 1898.0, + 320.0, + 1898.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1905.0, + 1406.0, + 1905.0, + 1406.0, + 1951.0, + 290.0, + 1951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1940.0, + 1408.0, + 1940.0, + 1408.0, + 1981.0, + 320.0, + 1981.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1970.0, + 1408.0, + 1970.0, + 1408.0, + 2010.0, + 321.0, + 2010.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 2001.0, + 1402.0, + 2001.0, + 1402.0, + 2041.0, + 321.0, + 2041.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 10, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 107, + 298, + 107 + ], + "score": 0.889 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 865, + 2088, + 865, + 2113, + 835, + 2113 + ], + "score": 0.819 + }, + { + "category_id": 1, + "poly": [ + 298, + 228, + 1404, + 228, + 1404, + 353, + 298, + 353 + ], + "score": 0.737 + }, + { + "category_id": 1, + "poly": [ + 284, + 265, + 1407, + 265, + 1407, + 2035, + 284, + 2035 + ], + "score": 0.255 + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 871.0, + 2084.0, + 871.0, + 2125.0, + 830.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 230.0, + 1405.0, + 230.0, + 1405.0, + 266.0, + 294.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 259.0, + 1406.0, + 259.0, + 1406.0, + 298.0, + 321.0, + 298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 292.0, + 1406.0, + 292.0, + 1406.0, + 323.0, + 320.0, + 323.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 321.0, + 969.0, + 321.0, + 969.0, + 354.0, + 325.0, + 354.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 256.0, + 1405.0, + 256.0, + 1405.0, + 299.0, + 322.0, + 299.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 287.0, + 1405.0, + 287.0, + 1405.0, + 328.0, + 320.0, + 328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 314.0, + 972.0, + 314.0, + 972.0, + 359.0, + 320.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 367.0, + 1403.0, + 367.0, + 1403.0, + 410.0, + 291.0, + 410.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 400.0, + 1405.0, + 400.0, + 1405.0, + 443.0, + 320.0, + 443.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 433.0, + 729.0, + 433.0, + 729.0, + 470.0, + 320.0, + 470.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 484.0, + 1402.0, + 484.0, + 1402.0, + 521.0, + 293.0, + 521.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 515.0, + 935.0, + 515.0, + 935.0, + 554.0, + 318.0, + 554.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 562.0, + 1407.0, + 562.0, + 1407.0, + 607.0, + 291.0, + 607.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 597.0, + 830.0, + 597.0, + 830.0, + 638.0, + 318.0, + 638.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 644.0, + 1407.0, + 644.0, + 1407.0, + 688.0, + 291.0, + 688.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 679.0, + 1405.0, + 679.0, + 1405.0, + 721.0, + 318.0, + 721.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 706.0, + 1407.0, + 706.0, + 1407.0, + 751.0, + 318.0, + 751.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 741.0, + 1046.0, + 741.0, + 1046.0, + 778.0, + 322.0, + 778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 790.0, + 1405.0, + 790.0, + 1405.0, + 832.0, + 293.0, + 832.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 825.0, + 1189.0, + 825.0, + 1189.0, + 858.0, + 320.0, + 858.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 869.0, + 1405.0, + 869.0, + 1405.0, + 914.0, + 291.0, + 914.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 903.0, + 1405.0, + 903.0, + 1405.0, + 945.0, + 318.0, + 945.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 934.0, + 1402.0, + 934.0, + 1402.0, + 977.0, + 320.0, + 977.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 969.0, + 588.0, + 969.0, + 588.0, + 1000.0, + 324.0, + 1000.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1018.0, + 1405.0, + 1018.0, + 1405.0, + 1055.0, + 293.0, + 1055.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1049.0, + 1403.0, + 1049.0, + 1403.0, + 1086.0, + 324.0, + 1086.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1080.0, + 838.0, + 1080.0, + 838.0, + 1117.0, + 324.0, + 1117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1127.0, + 1405.0, + 1127.0, + 1405.0, + 1169.0, + 291.0, + 1169.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1154.0, + 1409.0, + 1154.0, + 1409.0, + 1207.0, + 318.0, + 1207.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1189.0, + 1012.0, + 1189.0, + 1012.0, + 1232.0, + 320.0, + 1232.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1244.0, + 1405.0, + 1244.0, + 1405.0, + 1281.0, + 293.0, + 1281.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1271.0, + 1407.0, + 1271.0, + 1407.0, + 1314.0, + 320.0, + 1314.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1304.0, + 880.0, + 1304.0, + 880.0, + 1341.0, + 322.0, + 1341.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1353.0, + 1403.0, + 1353.0, + 1403.0, + 1393.0, + 289.0, + 1393.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1382.0, + 1404.0, + 1382.0, + 1404.0, + 1427.0, + 318.0, + 1427.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1417.0, + 628.0, + 1417.0, + 628.0, + 1454.0, + 324.0, + 1454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 1460.0, + 1407.0, + 1460.0, + 1407.0, + 1510.0, + 288.0, + 1510.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1501.0, + 503.0, + 1501.0, + 503.0, + 1538.0, + 320.0, + 1538.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1545.0, + 1409.0, + 1545.0, + 1409.0, + 1592.0, + 290.0, + 1592.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 314.0, + 1573.0, + 1411.0, + 1573.0, + 1411.0, + 1625.0, + 314.0, + 1625.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1610.0, + 1285.0, + 1610.0, + 1285.0, + 1653.0, + 320.0, + 1653.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1658.0, + 1405.0, + 1658.0, + 1405.0, + 1703.0, + 289.0, + 1703.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1692.0, + 1335.0, + 1692.0, + 1335.0, + 1732.0, + 318.0, + 1732.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 288.0, + 1736.0, + 1405.0, + 1736.0, + 1405.0, + 1791.0, + 288.0, + 1791.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1773.0, + 744.0, + 1773.0, + 744.0, + 1816.0, + 318.0, + 1816.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1824.0, + 1121.0, + 1824.0, + 1121.0, + 1869.0, + 291.0, + 1869.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1875.0, + 1407.0, + 1875.0, + 1407.0, + 1923.0, + 289.0, + 1923.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1908.0, + 1405.0, + 1908.0, + 1405.0, + 1951.0, + 320.0, + 1951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1941.0, + 1405.0, + 1941.0, + 1405.0, + 1978.0, + 322.0, + 1978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1966.0, + 1405.0, + 1966.0, + 1405.0, + 2011.0, + 318.0, + 2011.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 2001.0, + 1067.0, + 2001.0, + 1067.0, + 2038.0, + 322.0, + 2038.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 11, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1318, + 1404, + 1318, + 1404, + 1474, + 298, + 1474 + ], + "score": 0.953 + }, + { + "category_id": 1, + "poly": [ + 299, + 636, + 1405, + 636, + 1405, + 915, + 299, + 915 + ], + "score": 0.95 + }, + { + "category_id": 1, + "poly": [ + 296, + 1053, + 1404, + 1053, + 1404, + 1210, + 296, + 1210 + ], + "score": 0.948 + }, + { + "category_id": 1, + "poly": [ + 297, + 433, + 1405, + 433, + 1405, + 527, + 297, + 527 + ], + "score": 0.936 + }, + { + "category_id": 1, + "poly": [ + 297, + 1583, + 1403, + 1583, + 1403, + 1710, + 297, + 1710 + ], + "score": 0.936 + }, + { + "category_id": 1, + "poly": [ + 298, + 315, + 1405, + 315, + 1405, + 411, + 298, + 411 + ], + "score": 0.93 + }, + { + "category_id": 1, + "poly": [ + 298, + 1731, + 1404, + 1731, + 1404, + 1917, + 298, + 1917 + ], + "score": 0.927 + }, + { + "category_id": 1, + "poly": [ + 300, + 1942, + 1402, + 1942, + 1402, + 2035, + 300, + 2035 + ], + "score": 0.925 + }, + { + "category_id": 1, + "poly": [ + 298, + 937, + 1405, + 937, + 1405, + 1031, + 298, + 1031 + ], + "score": 0.925 + }, + { + "category_id": 1, + "poly": [ + 297, + 1231, + 1399, + 1231, + 1399, + 1297, + 297, + 1297 + ], + "score": 0.923 + }, + { + "category_id": 1, + "poly": [ + 294, + 229, + 1402, + 229, + 1402, + 293, + 294, + 293 + ], + "score": 0.914 + }, + { + "category_id": 1, + "poly": [ + 297, + 550, + 1399, + 550, + 1399, + 614, + 297, + 614 + ], + "score": 0.91 + }, + { + "category_id": 1, + "poly": [ + 296, + 1496, + 1400, + 1496, + 1400, + 1562, + 296, + 1562 + ], + "score": 0.902 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 106, + 298, + 106 + ], + "score": 0.897 + }, + { + "category_id": 2, + "poly": [ + 836, + 2088, + 865, + 2088, + 865, + 2112, + 836, + 2112 + ], + "score": 0.826 + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 827.0, + 2085.0, + 869.0, + 2085.0, + 869.0, + 2122.0, + 827.0, + 2122.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1317.0, + 1406.0, + 1317.0, + 1406.0, + 1359.0, + 296.0, + 1359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1347.0, + 1405.0, + 1347.0, + 1405.0, + 1391.0, + 321.0, + 1391.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1376.0, + 1405.0, + 1376.0, + 1405.0, + 1420.0, + 321.0, + 1420.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1407.0, + 1405.0, + 1407.0, + 1405.0, + 1451.0, + 320.0, + 1451.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1441.0, + 634.0, + 1441.0, + 634.0, + 1480.0, + 322.0, + 1480.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 635.0, + 1406.0, + 635.0, + 1406.0, + 675.0, + 295.0, + 675.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 666.0, + 1405.0, + 666.0, + 1405.0, + 706.0, + 322.0, + 706.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 694.0, + 1406.0, + 694.0, + 1406.0, + 739.0, + 319.0, + 739.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 725.0, + 1405.0, + 725.0, + 1405.0, + 769.0, + 319.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 756.0, + 1408.0, + 756.0, + 1408.0, + 800.0, + 318.0, + 800.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 786.0, + 1406.0, + 786.0, + 1406.0, + 827.0, + 321.0, + 827.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 816.0, + 1406.0, + 816.0, + 1406.0, + 860.0, + 319.0, + 860.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 848.0, + 1406.0, + 848.0, + 1406.0, + 887.0, + 322.0, + 887.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 881.0, + 841.0, + 881.0, + 841.0, + 918.0, + 323.0, + 918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1053.0, + 1406.0, + 1053.0, + 1406.0, + 1094.0, + 294.0, + 1094.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1082.0, + 1407.0, + 1082.0, + 1407.0, + 1125.0, + 320.0, + 1125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1113.0, + 1404.0, + 1113.0, + 1404.0, + 1153.0, + 322.0, + 1153.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1142.0, + 1406.0, + 1142.0, + 1406.0, + 1186.0, + 320.0, + 1186.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1174.0, + 728.0, + 1174.0, + 728.0, + 1217.0, + 322.0, + 1217.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 431.0, + 1406.0, + 431.0, + 1406.0, + 469.0, + 296.0, + 469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 458.0, + 1407.0, + 458.0, + 1407.0, + 506.0, + 320.0, + 506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 495.0, + 1113.0, + 495.0, + 1113.0, + 529.0, + 324.0, + 529.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1581.0, + 1407.0, + 1581.0, + 1407.0, + 1622.0, + 294.0, + 1622.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1615.0, + 1404.0, + 1615.0, + 1404.0, + 1651.0, + 324.0, + 1651.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1644.0, + 1404.0, + 1644.0, + 1404.0, + 1683.0, + 321.0, + 1683.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1675.0, + 1052.0, + 1675.0, + 1052.0, + 1714.0, + 321.0, + 1714.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 309.0, + 1406.0, + 309.0, + 1406.0, + 357.0, + 292.0, + 357.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 346.0, + 1405.0, + 346.0, + 1405.0, + 383.0, + 320.0, + 383.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 376.0, + 1312.0, + 376.0, + 1312.0, + 414.0, + 320.0, + 414.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1732.0, + 1404.0, + 1732.0, + 1404.0, + 1768.0, + 296.0, + 1768.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1763.0, + 1404.0, + 1763.0, + 1404.0, + 1798.0, + 322.0, + 1798.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1792.0, + 1405.0, + 1792.0, + 1405.0, + 1832.0, + 322.0, + 1832.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1821.0, + 1405.0, + 1821.0, + 1405.0, + 1862.0, + 320.0, + 1862.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1853.0, + 1406.0, + 1853.0, + 1406.0, + 1893.0, + 321.0, + 1893.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1884.0, + 660.0, + 1884.0, + 660.0, + 1920.0, + 320.0, + 1920.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1937.0, + 1404.0, + 1937.0, + 1404.0, + 1980.0, + 294.0, + 1980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1968.0, + 1403.0, + 1968.0, + 1403.0, + 2008.0, + 323.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 2002.0, + 1051.0, + 2002.0, + 1051.0, + 2038.0, + 321.0, + 2038.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 932.0, + 1403.0, + 932.0, + 1403.0, + 976.0, + 292.0, + 976.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 968.0, + 1405.0, + 968.0, + 1405.0, + 1006.0, + 322.0, + 1006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 999.0, + 800.0, + 999.0, + 800.0, + 1032.0, + 325.0, + 1032.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1222.0, + 1405.0, + 1222.0, + 1405.0, + 1277.0, + 291.0, + 1277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1261.0, + 1081.0, + 1261.0, + 1081.0, + 1300.0, + 322.0, + 1300.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 228.0, + 1405.0, + 228.0, + 1405.0, + 267.0, + 294.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 258.0, + 673.0, + 258.0, + 673.0, + 294.0, + 323.0, + 294.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 547.0, + 1404.0, + 547.0, + 1404.0, + 586.0, + 291.0, + 586.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 579.0, + 922.0, + 579.0, + 922.0, + 615.0, + 323.0, + 615.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1495.0, + 1404.0, + 1495.0, + 1404.0, + 1535.0, + 296.0, + 1535.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1526.0, + 1313.0, + 1526.0, + 1313.0, + 1565.0, + 321.0, + 1565.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 12, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 106, + 298, + 106 + ], + "score": 0.888 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 865, + 2088, + 865, + 2113, + 835, + 2113 + ], + "score": 0.834 + }, + { + "category_id": 1, + "poly": [ + 289, + 190, + 1407, + 190, + 1407, + 1464, + 289, + 1464 + ], + "score": 0.133 + }, + { + "category_id": 13, + "poly": [ + 326, + 379, + 348, + 379, + 348, + 403, + 326, + 403 + ], + "score": 0.4, + "latex": "\\pi" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 871.0, + 2084.0, + 871.0, + 2125.0, + 830.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 223.0, + 1406.0, + 223.0, + 1406.0, + 267.0, + 291.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 260.0, + 1402.0, + 260.0, + 1402.0, + 296.0, + 322.0, + 296.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 289.0, + 590.0, + 289.0, + 590.0, + 325.0, + 322.0, + 325.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 340.0, + 1403.0, + 340.0, + 1403.0, + 380.0, + 295.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 372.0, + 325.0, + 372.0, + 325.0, + 412.0, + 321.0, + 412.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 349.0, + 372.0, + 1405.0, + 372.0, + 1405.0, + 412.0, + 349.0, + 412.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 402.0, + 1405.0, + 402.0, + 1405.0, + 441.0, + 319.0, + 441.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 429.0, + 1405.0, + 429.0, + 1405.0, + 474.0, + 319.0, + 474.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 463.0, + 1402.0, + 463.0, + 1402.0, + 499.0, + 322.0, + 499.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 495.0, + 1399.0, + 495.0, + 1399.0, + 531.0, + 322.0, + 531.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 525.0, + 631.0, + 525.0, + 631.0, + 559.0, + 321.0, + 559.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 574.0, + 1406.0, + 574.0, + 1406.0, + 617.0, + 293.0, + 617.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 602.0, + 1405.0, + 602.0, + 1405.0, + 648.0, + 319.0, + 648.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 640.0, + 1403.0, + 640.0, + 1403.0, + 675.0, + 322.0, + 675.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 667.0, + 1058.0, + 667.0, + 1058.0, + 705.0, + 322.0, + 705.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 721.0, + 1403.0, + 721.0, + 1403.0, + 757.0, + 295.0, + 757.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 751.0, + 1406.0, + 751.0, + 1406.0, + 791.0, + 321.0, + 791.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 783.0, + 1403.0, + 783.0, + 1403.0, + 818.0, + 322.0, + 818.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 811.0, + 476.0, + 811.0, + 476.0, + 850.0, + 323.0, + 850.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 860.0, + 1405.0, + 860.0, + 1405.0, + 906.0, + 293.0, + 906.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 894.0, + 1047.0, + 894.0, + 1047.0, + 933.0, + 319.0, + 933.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 947.0, + 1401.0, + 947.0, + 1401.0, + 983.0, + 297.0, + 983.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 979.0, + 779.0, + 979.0, + 779.0, + 1014.0, + 324.0, + 1014.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1032.0, + 1402.0, + 1032.0, + 1402.0, + 1067.0, + 294.0, + 1067.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 1056.0, + 1406.0, + 1056.0, + 1406.0, + 1105.0, + 317.0, + 1105.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1092.0, + 394.0, + 1092.0, + 394.0, + 1126.0, + 321.0, + 1126.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1143.0, + 1405.0, + 1143.0, + 1405.0, + 1183.0, + 293.0, + 1183.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1176.0, + 1047.0, + 1176.0, + 1047.0, + 1212.0, + 322.0, + 1212.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1225.0, + 1406.0, + 1225.0, + 1406.0, + 1265.0, + 293.0, + 1265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1259.0, + 1405.0, + 1259.0, + 1405.0, + 1295.0, + 322.0, + 1295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1289.0, + 1385.0, + 1289.0, + 1385.0, + 1325.0, + 322.0, + 1325.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1342.0, + 1403.0, + 1342.0, + 1403.0, + 1378.0, + 295.0, + 1378.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1371.0, + 1408.0, + 1371.0, + 1408.0, + 1411.0, + 321.0, + 1411.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1398.0, + 1333.0, + 1398.0, + 1333.0, + 1440.0, + 322.0, + 1440.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 13, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 752, + 1405, + 752, + 1405, + 1151, + 297, + 1151 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 1738, + 1404, + 1738, + 1404, + 1955, + 298, + 1955 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 297, + 1255, + 1405, + 1255, + 1405, + 1624, + 297, + 1624 + ], + "score": 0.981 + }, + { + "category_id": 5, + "poly": [ + 298, + 312, + 1511, + 312, + 1511, + 572, + 298, + 572 + ], + "score": 0.98, + "html": "
RankDataset: TaskWeightPerfTask Description
1WIQA: Last Process0.7228.1 Identifying the last step of a given process.
2RACE: Is this the Right Answer0.6830.8Determining if given answer is correct.
3WIQA: First Process0.6328.1 Identifying the first step of a given process.
4AdversarialQA: BiDAF0.6125.1Aserialmode-in-the-eby an
5WebQuestions: What is the Answer0.5827.0 Asweringrqomesten based oninformation
" + }, + { + "category_id": 6, + "poly": [ + 297, + 222, + 1405, + 222, + 1405, + 288, + 297, + 288 + ], + "score": 0.933 + }, + { + "category_id": 0, + "poly": [ + 300, + 1666, + 688, + 1666, + 688, + 1705, + 300, + 1705 + ], + "score": 0.919 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 106, + 298, + 106 + ], + "score": 0.908 + }, + { + "category_id": 0, + "poly": [ + 300, + 625, + 584, + 625, + 584, + 665, + 300, + 665 + ], + "score": 0.907 + }, + { + "category_id": 2, + "poly": [ + 836, + 2088, + 865, + 2088, + 865, + 2112, + 836, + 2112 + ], + "score": 0.856 + }, + { + "category_id": 1, + "poly": [ + 333, + 1186, + 1027, + 1186, + 1027, + 1221, + 333, + 1221 + ], + "score": 0.841 + }, + { + "category_id": 1, + "poly": [ + 334, + 690, + 1020, + 690, + 1020, + 725, + 334, + 725 + ], + "score": 0.711 + }, + { + "category_id": 0, + "poly": [ + 334, + 690, + 1020, + 690, + 1020, + 725, + 334, + 725 + ], + "score": 0.255 + }, + { + "category_id": 15, + "poly": [ + 295.0, + 221.0, + 1403.0, + 221.0, + 1403.0, + 259.0, + 295.0, + 259.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 252.0, + 1278.0, + 252.0, + 1278.0, + 289.0, + 294.0, + 289.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1665.0, + 690.0, + 1665.0, + 690.0, + 1709.0, + 294.0, + 1709.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 623.0, + 585.0, + 623.0, + 585.0, + 672.0, + 294.0, + 672.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 871.0, + 2084.0, + 871.0, + 2125.0, + 830.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 330.0, + 686.0, + 1023.0, + 686.0, + 1023.0, + 728.0, + 330.0, + 728.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 753.0, + 1405.0, + 753.0, + 1405.0, + 789.0, + 296.0, + 789.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 783.0, + 1405.0, + 783.0, + 1405.0, + 819.0, + 294.0, + 819.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 812.0, + 1405.0, + 812.0, + 1405.0, + 849.0, + 292.0, + 849.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 842.0, + 1405.0, + 842.0, + 1405.0, + 881.0, + 294.0, + 881.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 875.0, + 1403.0, + 875.0, + 1403.0, + 911.0, + 294.0, + 911.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 905.0, + 1405.0, + 905.0, + 1405.0, + 941.0, + 294.0, + 941.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 936.0, + 1405.0, + 936.0, + 1405.0, + 972.0, + 295.0, + 972.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 967.0, + 1405.0, + 967.0, + 1405.0, + 1003.0, + 295.0, + 1003.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 994.0, + 1407.0, + 994.0, + 1407.0, + 1039.0, + 292.0, + 1039.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1024.0, + 1405.0, + 1024.0, + 1405.0, + 1066.0, + 292.0, + 1066.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1055.0, + 1405.0, + 1055.0, + 1405.0, + 1096.0, + 292.0, + 1096.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1085.0, + 1407.0, + 1085.0, + 1407.0, + 1126.0, + 292.0, + 1126.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1118.0, + 984.0, + 1118.0, + 984.0, + 1154.0, + 294.0, + 1154.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1736.0, + 1408.0, + 1736.0, + 1408.0, + 1778.0, + 293.0, + 1778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1767.0, + 1406.0, + 1767.0, + 1406.0, + 1809.0, + 292.0, + 1809.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1797.0, + 1405.0, + 1797.0, + 1405.0, + 1839.0, + 292.0, + 1839.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1828.0, + 1406.0, + 1828.0, + 1406.0, + 1869.0, + 292.0, + 1869.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1858.0, + 1406.0, + 1858.0, + 1406.0, + 1897.0, + 291.0, + 1897.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1890.0, + 1408.0, + 1890.0, + 1408.0, + 1932.0, + 292.0, + 1932.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1922.0, + 475.0, + 1922.0, + 475.0, + 1958.0, + 294.0, + 1958.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1254.0, + 1406.0, + 1254.0, + 1406.0, + 1292.0, + 294.0, + 1292.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1287.0, + 1406.0, + 1287.0, + 1406.0, + 1321.0, + 296.0, + 1321.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1316.0, + 1406.0, + 1316.0, + 1406.0, + 1354.0, + 295.0, + 1354.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1347.0, + 1407.0, + 1347.0, + 1407.0, + 1383.0, + 292.0, + 1383.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1378.0, + 1405.0, + 1378.0, + 1405.0, + 1413.0, + 294.0, + 1413.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1405.0, + 1407.0, + 1405.0, + 1407.0, + 1444.0, + 292.0, + 1444.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1435.0, + 1406.0, + 1435.0, + 1406.0, + 1480.0, + 291.0, + 1480.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1468.0, + 1403.0, + 1468.0, + 1403.0, + 1505.0, + 294.0, + 1505.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1497.0, + 1407.0, + 1497.0, + 1407.0, + 1538.0, + 294.0, + 1538.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1529.0, + 1405.0, + 1529.0, + 1405.0, + 1567.0, + 294.0, + 1567.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1557.0, + 1405.0, + 1557.0, + 1405.0, + 1597.0, + 294.0, + 1597.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1591.0, + 1208.0, + 1591.0, + 1208.0, + 1625.0, + 293.0, + 1625.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 329.0, + 1182.0, + 1030.0, + 1182.0, + 1030.0, + 1221.0, + 329.0, + 1221.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 330.0, + 686.0, + 1023.0, + 686.0, + 1023.0, + 728.0, + 330.0, + 728.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 14, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 5, + "poly": [ + 312, + 710, + 1382, + 710, + 1382, + 1751, + 312, + 1751 + ], + "score": 0.983, + "html": "
TaskICLbestIA3bestLoRAbestFFTbestLoraHubbest
Boolean Expressions62.758.060.765.360.7
Causal Judgement59.862.157.560.963.2
Date Understanding21.320.740.767.345.3
Disambiguation69.30.068.770.768.0
Dyck Languages2.04.725.333.32.7
Formal Fallacies59.352.056.756.059.3
Geometric Shapes20.015.328.739.318.7
Hyperbaton72.749.357.382.072.7
Logical DeductionS (five objects)39.332.741.343.340.0
Logical DeductionS (seven objects)42.034.042.746.046.0
LogicalDrectjoets)52.78.756.760.752.7
Movie Recommendation56.762.064.570.762.0
Multistep Arithmetic0.70.70.70.01.3
Navigate46.747.350.750.051.3
Object Counting34.735.342.038.036.7
Penguins in a Table43.545.741.337.047.8
Reasoning about Colored Objects41.341.340.738.744.7
Ruin Names20.725.342.066.028.7
Salient Translation Error Detection48.037.317.321.342.7
Snarks55.156.459.069.261.5
Sports Understanding56.755.358.758.762.7
Temporal Sequences26.718.731.348.721.3
Tracking Shuffled ObjectsS (five objects)12.012.016.020.016.7
Tracking Shuffled ObjectsS (seven objects)6.76.712.010.015.3
Tracking Shuffled ObjectsS (three objects)31.330.732.036.031.3
Web of Lies54.054.755.354.057.3
Word Sorting0.71.35.36.01.3
Best Performance (Average)38.432.140.946.241.2
" + }, + { + "category_id": 6, + "poly": [ + 297, + 500, + 1405, + 500, + 1405, + 688, + 297, + 688 + ], + "score": 0.973 + }, + { + "category_id": 2, + "poly": [ + 297, + 74, + 878, + 74, + 878, + 107, + 297, + 107 + ], + "score": 0.911 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 865, + 2088, + 865, + 2113, + 835, + 2113 + ], + "score": 0.849 + }, + { + "category_id": 13, + "poly": [ + 835, + 594, + 857, + 594, + 857, + 626, + 835, + 626 + ], + "score": 0.76, + "latex": "\\ S" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 496.0, + 1406.0, + 496.0, + 1406.0, + 541.0, + 292.0, + 541.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 530.0, + 1406.0, + 530.0, + 1406.0, + 570.0, + 294.0, + 570.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 562.0, + 1405.0, + 562.0, + 1405.0, + 598.0, + 295.0, + 598.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 591.0, + 834.0, + 591.0, + 834.0, + 630.0, + 291.0, + 630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 858.0, + 591.0, + 1405.0, + 591.0, + 1405.0, + 630.0, + 858.0, + 630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 621.0, + 1406.0, + 621.0, + 1406.0, + 661.0, + 292.0, + 661.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 653.0, + 1222.0, + 653.0, + 1222.0, + 690.0, + 292.0, + 690.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2085.0, + 870.0, + 2085.0, + 870.0, + 2123.0, + 831.0, + 2123.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 15, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 5, + "poly": [ + 320, + 449, + 1371, + 449, + 1371, + 1597, + 320, + 1597 + ], + "score": 0.984, + "html": "
Task ↓ Rank β†’4best 4avg16avg16best64avg64best
Boolean Expressions52.13 57.3350.6758.0047.4758.00
Causal Judgement52.4155.1749.6654.0250.8054.02
Date Understanding0.402.0014.4029.334.5310.00
Disambiguation10.0031.3326.9342.001.734.67
Dyck Languages0.400.670.400.670.402.00
Formal Fallacies48.4054.0046.9351.3346.9350.00
Geometric Shapes0.000.006.5332.671.477.33
Hyperbaton30.1350.0039.07 57.3332.9348.00
Logical DeductionS (five objects)5.2014.678.8019.331.336.67
Logical DeductionS (seven objects)6.4017.339.3319.333.4716.00
Logical DeductionS14.4032.0021.7334.676.9315.33
(three objects) Movie Recommendation7.0718.677.8722.001.206.00
Multistep Arithmetic two0.000.000.000.000.000.00
Navigate49.6054.6752.2756.6749.8752.00
Object Counting7.2018.0016.0021.3313.7326.67
Penguins ina Table6.5213.0410.4317.390.432.17
Reasoning about Colored Objects6.2710.005.0716.670.532.67
Ruin Names7.7313.3313.2028.005.7315.33
Salient Translation Error Detection0.000.001.738.670.000.00
Snarks21.2842.3149.4960.2616.1538.46
Sports Understanding46.5358.6746.8058.6746.5358.67
Temporal Sequences3.0713.336.5326.672.4012.00
Tracking Shuffled ObjectsS5.2014.004.139.330.130.67
(five objects) Tracking Shuffled ObjectsS (seven objects)2.6710.002.8014.003.208.00
Tracking Shuffled ObjectsS3.7317.3316.2734.675.8726.67
(three objects) Web of Lies48.5354.00 57.33
Word Sorting0.400.6754.00 0.1356.00 0.6754.67 0.000.00
20.78
Average Performance per Task16.1424.1730.7314.7621.43
" + }, + { + "category_id": 0, + "poly": [ + 297, + 224, + 959, + 224, + 959, + 264, + 297, + 264 + ], + "score": 0.936 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 107, + 298, + 107 + ], + "score": 0.901 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 865, + 2088, + 865, + 2112, + 835, + 2112 + ], + "score": 0.847 + }, + { + "category_id": 6, + "poly": [ + 297, + 303, + 1406, + 303, + 1406, + 427, + 297, + 427 + ], + "score": 0.716 + }, + { + "category_id": 1, + "poly": [ + 297, + 303, + 1406, + 303, + 1406, + 427, + 297, + 427 + ], + "score": 0.439 + }, + { + "category_id": 13, + "poly": [ + 384, + 397, + 458, + 397, + 458, + 426, + 384, + 426 + ], + "score": 0.86, + "latex": "0 . 6 7 \\%" + }, + { + "category_id": 13, + "poly": [ + 417, + 367, + 480, + 367, + 480, + 396, + 417, + 396 + ], + "score": 0.86, + "latex": "0 . { \\bar { 0 } } \\%" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 223.0, + 961.0, + 223.0, + 961.0, + 268.0, + 296.0, + 268.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2084.0, + 870.0, + 2084.0, + 870.0, + 2125.0, + 830.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 303.0, + 1404.0, + 303.0, + 1404.0, + 339.0, + 295.0, + 339.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 333.0, + 1406.0, + 333.0, + 1406.0, + 370.0, + 292.0, + 370.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 364.0, + 416.0, + 364.0, + 416.0, + 402.0, + 292.0, + 402.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 481.0, + 364.0, + 1407.0, + 364.0, + 1407.0, + 402.0, + 481.0, + 402.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 397.0, + 383.0, + 397.0, + 383.0, + 427.0, + 292.0, + 427.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 459.0, + 397.0, + 469.0, + 397.0, + 469.0, + 427.0, + 459.0, + 427.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 303.0, + 1404.0, + 303.0, + 1404.0, + 339.0, + 295.0, + 339.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 333.0, + 1406.0, + 333.0, + 1406.0, + 370.0, + 292.0, + 370.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 364.0, + 416.0, + 364.0, + 416.0, + 402.0, + 292.0, + 402.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 481.0, + 364.0, + 1407.0, + 364.0, + 1407.0, + 402.0, + 481.0, + 402.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 397.0, + 383.0, + 397.0, + 383.0, + 427.0, + 292.0, + 427.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 459.0, + 397.0, + 469.0, + 397.0, + 469.0, + 427.0, + 459.0, + 427.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 16, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 311, + 1405, + 311, + 1405, + 498, + 297, + 498 + ], + "score": 0.957 + }, + { + "category_id": 0, + "poly": [ + 301, + 224, + 700, + 224, + 700, + 265, + 301, + 265 + ], + "score": 0.915 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 107, + 298, + 107 + ], + "score": 0.904 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 865, + 2088, + 865, + 2112, + 835, + 2112 + ], + "score": 0.849 + }, + { + "category_id": 13, + "poly": [ + 1285, + 375, + 1345, + 375, + 1345, + 408, + 1285, + 408 + ], + "score": 0.27, + "latex": "( a v g )" + }, + { + "category_id": 5, + "poly": [ + 389, + 512, + 1317, + 512, + 1317, + 1668, + 389, + 1668 + ], + "score": 0.976, + "html": "
TaskZeroLoraHub avgLoraHub best
Boolean Expressions52.058.763.3
Causal Judgement62.153.859.8
Date Understanding38.037.638.0
Disambiguation Qa0.020.5 54.7
Dyck Languages1.30.92.0
Formal Fallacies56.056.056.0
Geometric Shapes8.717.528.0
Hyperbaton45.353.556.7
Logical DeductionS (five objects)1.342.748.7
Logical DeductionS (seven objects)8.744.350.0
Logical DeductionS (three objects)0.756.461.3
Movie Recommendation2.062.866.0
Multistep Arithmetic Two0.00.40.7
Navigate50.750.750.7
Object Counting39.340.748.0
Penguins In A Table17.440.945.7
Reasoning About Colored Objects46.747.350.7
Ruin Names18.035.644.7
Salient Translation Error Detection44.745.148.7
Snarks60.360.861.5
Sports Understanding56.751.353.3
Temporal Sequences21.321.522.0
Tracking Shuffled ObjectsS3.39.913.3
(five objects) Tracking Shuffled ObjectsS (seven objects)5.37.38.7
Tracking Shuffled ObjectsS7.321.731.3
(three objects) Web Of Lies54.747.148.7
Word Sorting1.31.52.0
Average Performance per Task25.836.541.3
" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 218.0, + 706.0, + 218.0, + 706.0, + 275.0, + 292.0, + 275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2084.0, + 870.0, + 2084.0, + 870.0, + 2121.0, + 831.0, + 2121.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 310.0, + 1403.0, + 310.0, + 1403.0, + 346.0, + 295.0, + 346.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 340.0, + 1406.0, + 340.0, + 1406.0, + 380.0, + 292.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 370.0, + 1284.0, + 370.0, + 1284.0, + 412.0, + 291.0, + 412.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1346.0, + 370.0, + 1406.0, + 370.0, + 1406.0, + 412.0, + 1346.0, + 412.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 403.0, + 1405.0, + 403.0, + 1405.0, + 439.0, + 295.0, + 439.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 431.0, + 1406.0, + 431.0, + 1406.0, + 472.0, + 294.0, + 472.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 461.0, + 993.0, + 461.0, + 993.0, + 504.0, + 291.0, + 504.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 17, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 5, + "poly": [ + 429, + 595, + 1269, + 595, + 1269, + 1746, + 429, + 1746 + ], + "score": 0.982, + "html": "
TaskLoraHubavgLoraHubfilter
Boolean Expressions55.560.00
Causal Judgement54.352.9
Date Understanding32.933.3
Disambiguation45.262.7
Dyck Languages1.00.0
Formal Fallacies52.854.0
Geometric Shapes7.44.0
Hyperbaton62.864.0
Logical DeductionS (five objects)36.137.3
Logical DeductionS (seven objects)36.822.0
Logical DeductionS (three objects)45.756.0
Movie Recommendation55.368.0
Multistep Arithmetic0.40.7
Navigate47.149.3
Object Counting33.738.7
Penguins in a Table35.937.0
Reasoning about Colored Objects40.033.3
Ruin Names24.422.0
Salient Translation Error Detection36.024.0
Snarks56.952.66
Sports Understanding56.758.0
Temporal Sequences18.227.3
Tracking Shuffled ObjectsS12.311.3
(five objects) Tracking Shuffled ObjectsS7.78.0
(seven objects) Tracking Shuffled ObjectsS29.232.7
(three objects) Web of Lies50.146.0
Word Sorting1.11.3
34.735.4
Avg Performance Per Task
" + }, + { + "category_id": 1, + "poly": [ + 297, + 296, + 1405, + 296, + 1405, + 512, + 297, + 512 + ], + "score": 0.98 + }, + { + "category_id": 0, + "poly": [ + 298, + 223, + 942, + 223, + 942, + 265, + 298, + 265 + ], + "score": 0.932 + }, + { + "category_id": 6, + "poly": [ + 479, + 541, + 1215, + 541, + 1215, + 577, + 479, + 577 + ], + "score": 0.916 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 107, + 298, + 107 + ], + "score": 0.9 + }, + { + "category_id": 2, + "poly": [ + 836, + 2088, + 865, + 2088, + 865, + 2112, + 836, + 2112 + ], + "score": 0.847 + }, + { + "category_id": 15, + "poly": [ + 291.0, + 222.0, + 944.0, + 222.0, + 944.0, + 272.0, + 291.0, + 272.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 479.0, + 533.0, + 1220.0, + 533.0, + 1220.0, + 585.0, + 479.0, + 585.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2085.0, + 870.0, + 2085.0, + 870.0, + 2124.0, + 831.0, + 2124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 295.0, + 1405.0, + 295.0, + 1405.0, + 334.0, + 294.0, + 334.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 324.0, + 1405.0, + 324.0, + 1405.0, + 362.0, + 294.0, + 362.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 353.0, + 1406.0, + 353.0, + 1406.0, + 394.0, + 294.0, + 394.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 387.0, + 1406.0, + 387.0, + 1406.0, + 426.0, + 292.0, + 426.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 416.0, + 1405.0, + 416.0, + 1405.0, + 455.0, + 292.0, + 455.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 446.0, + 1405.0, + 446.0, + 1405.0, + 487.0, + 292.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 481.0, + 426.0, + 481.0, + 426.0, + 514.0, + 292.0, + 514.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 18, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 5, + "poly": [ + 302, + 622, + 1462, + 622, + 1462, + 1697, + 302, + 1697 + ], + "score": 0.984, + "html": "
TaskWIQA: LastRACE: RightWIQA: FirstADQAWebQA
Boolean Expressions52.6758.0052.6754.6753.33
Causal Judgement55.1763.2255.1757.4757.47
Date Understanding17.3319.3317.3316.6715.33
Disambiguation0.000.000.000.000.00
Dyck Languages0.670.670.671.331.33
Formal Fallacies51.3351.3351.3351.3351.33
Geometric Shapes8.0013.338.006.677.33
Hyperbaton16.6744.0016.671.336.00
Logical Ded uctionts)23.3328.0023.3319.3320.67
Logical DeductionS (seven objects)22.0026.0022.0010.6712.00
Logical DeductionS (three objects)0.679.330.670.000.00
Movie Recommendation63.3362.6763.3356.6763.33
Multistep Arithmetic0.670.670.670.670.67
Navigate47.3350.0047.3347.3347.33
Object Counting34.6734.0034.6735.3335.33
Penguins in a Table45.6541.3045.6539.1343.48
Reasoning about Colored Objects40.0037.3340.0031.3330.67
Ruin Names22.0021.3322.0017.3322.67
Salient Translation Error Detection36.6734.6736.6732.6737.33
Snarks52.5655.1352.5647.4452.56
Sports Understanding56.0058.6756.0055.33
Temporal Sequences16.6717.3316.6712.6755.33 17.33
Tracking Shuffled ObjectsS (five objects)12.0012.0012.0010.6712.00
Tracking Shuffled ObjectsS (seven objects)6.676.676.676.676.67
Tracking Shuffled ObjectsS20.6730.6720.6710.6725.33
(three objects) Web of Lies54.6754.0054.6754.00
Word Sorting1.331.331.331.3354.00 1.33
Avg Performance per Task β–³ FLAN-T5-large28.10 1.1030.78 3.7828.10 1.1025.14 -1.8627.04 0.04
" + }, + { + "category_id": 1, + "poly": [ + 298, + 295, + 1404, + 295, + 1404, + 512, + 298, + 512 + ], + "score": 0.98 + }, + { + "category_id": 6, + "poly": [ + 299, + 540, + 1401, + 540, + 1401, + 605, + 299, + 605 + ], + "score": 0.934 + }, + { + "category_id": 0, + "poly": [ + 297, + 223, + 961, + 223, + 961, + 265, + 297, + 265 + ], + "score": 0.934 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 107, + 298, + 107 + ], + "score": 0.9 + }, + { + "category_id": 2, + "poly": [ + 834, + 2088, + 866, + 2088, + 866, + 2113, + 834, + 2113 + ], + "score": 0.858 + }, + { + "category_id": 15, + "poly": [ + 293.0, + 536.0, + 1406.0, + 536.0, + 1406.0, + 579.0, + 293.0, + 579.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 569.0, + 375.0, + 569.0, + 375.0, + 607.0, + 293.0, + 607.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 221.0, + 965.0, + 221.0, + 965.0, + 272.0, + 290.0, + 272.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 828.0, + 2084.0, + 872.0, + 2084.0, + 872.0, + 2124.0, + 828.0, + 2124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 293.0, + 1406.0, + 293.0, + 1406.0, + 334.0, + 293.0, + 334.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 324.0, + 1405.0, + 324.0, + 1405.0, + 365.0, + 293.0, + 365.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 356.0, + 1404.0, + 356.0, + 1404.0, + 391.0, + 294.0, + 391.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 387.0, + 1406.0, + 387.0, + 1406.0, + 423.0, + 292.0, + 423.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 417.0, + 1405.0, + 417.0, + 1405.0, + 456.0, + 293.0, + 456.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 449.0, + 1405.0, + 449.0, + 1405.0, + 488.0, + 293.0, + 488.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 482.0, + 718.0, + 482.0, + 718.0, + 516.0, + 296.0, + 516.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 19, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 1489, + 1404, + 1489, + 1404, + 1706, + 297, + 1706 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 298, + 1818, + 1403, + 1818, + 1403, + 2035, + 298, + 2035 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 298, + 1052, + 1404, + 1052, + 1404, + 1206, + 298, + 1206 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 299, + 1221, + 1404, + 1221, + 1404, + 1378, + 299, + 1378 + ], + "score": 0.977 + }, + { + "category_id": 3, + "poly": [ + 449, + 273, + 1263, + 273, + 1263, + 780, + 449, + 780 + ], + "score": 0.972 + }, + { + "category_id": 4, + "poly": [ + 297, + 836, + 1404, + 836, + 1404, + 932, + 297, + 932 + ], + "score": 0.952 + }, + { + "category_id": 0, + "poly": [ + 297, + 1419, + 956, + 1419, + 956, + 1458, + 297, + 1458 + ], + "score": 0.934 + }, + { + "category_id": 0, + "poly": [ + 300, + 1748, + 724, + 1748, + 724, + 1788, + 300, + 1788 + ], + "score": 0.916 + }, + { + "category_id": 0, + "poly": [ + 302, + 982, + 717, + 982, + 717, + 1021, + 302, + 1021 + ], + "score": 0.898 + }, + { + "category_id": 2, + "poly": [ + 297, + 75, + 878, + 75, + 878, + 106, + 297, + 106 + ], + "score": 0.898 + }, + { + "category_id": 2, + "poly": [ + 834, + 2088, + 863, + 2088, + 863, + 2112, + 834, + 2112 + ], + "score": 0.847 + }, + { + "category_id": 13, + "poly": [ + 845, + 2002, + 907, + 2002, + 907, + 2032, + 845, + 2032 + ], + "score": 0.84, + "latex": "1 . 2 \\%" + }, + { + "category_id": 13, + "poly": [ + 535, + 1346, + 616, + 1346, + 616, + 1375, + 535, + 1375 + ], + "score": 0.78, + "latex": "1 e - 4 ," + }, + { + "category_id": 13, + "poly": [ + 554, + 1289, + 573, + 1289, + 573, + 1312, + 554, + 1312 + ], + "score": 0.52, + "latex": "\\alpha" + }, + { + "category_id": 13, + "poly": [ + 1144, + 1223, + 1168, + 1223, + 1168, + 1250, + 1144, + 1250 + ], + "score": 0.49, + "latex": "K" + }, + { + "category_id": 15, + "poly": [ + 469.0, + 274.0, + 612.0, + 274.0, + 612.0, + 298.0, + 469.0, + 298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 638.0, + 276.0, + 764.0, + 276.0, + 764.0, + 298.0, + 638.0, + 298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 793.0, + 274.0, + 930.0, + 274.0, + 930.0, + 298.0, + 793.0, + 298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 958.0, + 276.0, + 1088.0, + 276.0, + 1088.0, + 298.0, + 958.0, + 298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1128.0, + 273.0, + 1240.0, + 273.0, + 1240.0, + 299.0, + 1128.0, + 299.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 453.0, + 304.0, + 480.0, + 304.0, + 480.0, + 323.0, + 453.0, + 323.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 582.0, + 307.0, + 591.0, + 307.0, + 591.0, + 316.0, + 582.0, + 316.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 674.0, + 300.0, + 694.0, + 300.0, + 694.0, + 319.0, + 674.0, + 319.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 773.0, + 305.0, + 801.0, + 305.0, + 801.0, + 327.0, + 773.0, + 327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 936.0, + 305.0, + 960.0, + 305.0, + 960.0, + 325.0, + 936.0, + 325.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1000.0, + 304.0, + 1017.0, + 304.0, + 1017.0, + 323.0, + 1000.0, + 323.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1027.0, + 298.0, + 1051.0, + 298.0, + 1051.0, + 318.0, + 1027.0, + 318.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1056.0, + 299.0, + 1082.0, + 299.0, + 1082.0, + 325.0, + 1056.0, + 325.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1095.0, + 290.0, + 1127.0, + 290.0, + 1127.0, + 311.0, + 1095.0, + 311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 453.0, + 331.0, + 478.0, + 331.0, + 478.0, + 351.0, + 453.0, + 351.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 543.0, + 338.0, + 571.0, + 338.0, + 571.0, + 362.0, + 543.0, + 362.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 614.0, + 313.0, + 639.0, + 313.0, + 639.0, + 360.0, + 614.0, + 360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 774.0, + 332.0, + 799.0, + 332.0, + 799.0, + 352.0, + 774.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 865.0, + 317.0, + 891.0, + 317.0, + 891.0, + 351.0, + 865.0, + 351.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 935.0, + 336.0, + 960.0, + 336.0, + 960.0, + 355.0, + 935.0, + 355.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1095.0, + 324.0, + 1123.0, + 324.0, + 1123.0, + 346.0, + 1095.0, + 346.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 452.0, + 358.0, + 481.0, + 358.0, + 481.0, + 380.0, + 452.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 549.0, + 357.0, + 564.0, + 357.0, + 564.0, + 374.0, + 549.0, + 374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 614.0, + 369.0, + 638.0, + 369.0, + 638.0, + 389.0, + 614.0, + 389.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 775.0, + 358.0, + 800.0, + 358.0, + 800.0, + 377.0, + 775.0, + 377.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 934.0, + 364.0, + 962.0, + 364.0, + 962.0, + 387.0, + 934.0, + 387.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1104.0, + 361.0, + 1121.0, + 361.0, + 1121.0, + 379.0, + 1104.0, + 379.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 453.0, + 383.0, + 482.0, + 383.0, + 482.0, + 408.0, + 453.0, + 408.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 582.0, + 399.0, + 593.0, + 399.0, + 593.0, + 407.0, + 582.0, + 407.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 614.0, + 397.0, + 642.0, + 397.0, + 642.0, + 416.0, + 614.0, + 416.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 774.0, + 384.0, + 829.0, + 384.0, + 829.0, + 411.0, + 774.0, + 411.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 943.0, + 397.0, + 967.0, + 397.0, + 967.0, + 414.0, + 943.0, + 414.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1103.0, + 396.0, + 1127.0, + 396.0, + 1127.0, + 415.0, + 1103.0, + 415.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1154.0, + 393.0, + 1179.0, + 393.0, + 1179.0, + 408.0, + 1154.0, + 408.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1222.0, + 390.0, + 1238.0, + 390.0, + 1238.0, + 406.0, + 1222.0, + 406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 487.0, + 412.0, + 502.0, + 412.0, + 502.0, + 431.0, + 487.0, + 431.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 511.0, + 411.0, + 603.0, + 411.0, + 603.0, + 433.0, + 511.0, + 433.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 648.0, + 412.0, + 663.0, + 412.0, + 663.0, + 431.0, + 648.0, + 431.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 674.0, + 411.0, + 763.0, + 411.0, + 763.0, + 432.0, + 674.0, + 432.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 809.0, + 412.0, + 823.0, + 412.0, + 823.0, + 431.0, + 809.0, + 431.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 835.0, + 412.0, + 860.0, + 412.0, + 860.0, + 431.0, + 835.0, + 431.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 865.0, + 412.0, + 924.0, + 412.0, + 924.0, + 432.0, + 865.0, + 432.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 968.0, + 408.0, + 986.0, + 408.0, + 986.0, + 432.0, + 968.0, + 432.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 997.0, + 413.0, + 1021.0, + 413.0, + 1021.0, + 431.0, + 997.0, + 431.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1025.0, + 412.0, + 1085.0, + 412.0, + 1085.0, + 432.0, + 1025.0, + 432.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1130.0, + 412.0, + 1146.0, + 412.0, + 1146.0, + 431.0, + 1130.0, + 431.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1156.0, + 412.0, + 1181.0, + 412.0, + 1181.0, + 431.0, + 1156.0, + 431.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1188.0, + 412.0, + 1246.0, + 412.0, + 1246.0, + 432.0, + 1188.0, + 432.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 486.0, + 442.0, + 595.0, + 442.0, + 595.0, + 468.0, + 486.0, + 468.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 637.0, + 446.0, + 764.0, + 446.0, + 764.0, + 465.0, + 637.0, + 465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 822.0, + 446.0, + 903.0, + 446.0, + 903.0, + 466.0, + 822.0, + 466.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 950.0, + 446.0, + 1099.0, + 446.0, + 1099.0, + 465.0, + 950.0, + 465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1100.0, + 445.0, + 1265.0, + 445.0, + 1265.0, + 467.0, + 1100.0, + 467.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 936.0, + 459.0, + 961.0, + 459.0, + 961.0, + 478.0, + 936.0, + 478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 452.0, + 470.0, + 482.0, + 470.0, + 482.0, + 492.0, + 452.0, + 492.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 874.0, + 470.0, + 883.0, + 470.0, + 883.0, + 479.0, + 874.0, + 479.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 613.0, + 482.0, + 642.0, + 482.0, + 642.0, + 504.0, + 613.0, + 504.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 774.0, + 484.0, + 800.0, + 484.0, + 800.0, + 503.0, + 774.0, + 503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 903.0, + 476.0, + 916.0, + 476.0, + 916.0, + 489.0, + 903.0, + 489.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 936.0, + 481.0, + 960.0, + 481.0, + 960.0, + 500.0, + 936.0, + 500.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1095.0, + 475.0, + 1151.0, + 475.0, + 1151.0, + 499.0, + 1095.0, + 499.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1157.0, + 474.0, + 1180.0, + 474.0, + 1180.0, + 488.0, + 1157.0, + 488.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 452.0, + 495.0, + 479.0, + 495.0, + 479.0, + 518.0, + 452.0, + 518.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 613.0, + 517.0, + 640.0, + 517.0, + 640.0, + 538.0, + 613.0, + 538.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 773.0, + 513.0, + 801.0, + 513.0, + 801.0, + 534.0, + 773.0, + 534.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 842.0, + 508.0, + 852.0, + 508.0, + 852.0, + 517.0, + 842.0, + 517.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 904.0, + 501.0, + 914.0, + 501.0, + 914.0, + 508.0, + 904.0, + 508.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 936.0, + 503.0, + 960.0, + 503.0, + 960.0, + 523.0, + 936.0, + 523.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1063.0, + 518.0, + 1075.0, + 518.0, + 1075.0, + 526.0, + 1063.0, + 526.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1095.0, + 507.0, + 1124.0, + 507.0, + 1124.0, + 528.0, + 1095.0, + 528.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1217.0, + 508.0, + 1244.0, + 508.0, + 1244.0, + 539.0, + 1217.0, + 539.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 453.0, + 522.0, + 477.0, + 522.0, + 477.0, + 541.0, + 453.0, + 541.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 710.0, + 523.0, + 715.0, + 523.0, + 715.0, + 527.0, + 710.0, + 527.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 452.0, + 546.0, + 483.0, + 546.0, + 483.0, + 568.0, + 452.0, + 568.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 613.0, + 551.0, + 642.0, + 551.0, + 642.0, + 573.0, + 613.0, + 573.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 773.0, + 542.0, + 801.0, + 542.0, + 801.0, + 564.0, + 773.0, + 564.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 935.0, + 526.0, + 960.0, + 526.0, + 960.0, + 569.0, + 935.0, + 569.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1033.0, + 535.0, + 1045.0, + 535.0, + 1045.0, + 547.0, + 1033.0, + 547.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1095.0, + 539.0, + 1124.0, + 539.0, + 1124.0, + 561.0, + 1095.0, + 561.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 488.0, + 581.0, + 502.0, + 581.0, + 502.0, + 599.0, + 488.0, + 599.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 514.0, + 579.0, + 603.0, + 579.0, + 603.0, + 602.0, + 514.0, + 602.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 648.0, + 581.0, + 663.0, + 581.0, + 663.0, + 599.0, + 648.0, + 599.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 673.0, + 579.0, + 764.0, + 579.0, + 764.0, + 602.0, + 673.0, + 602.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 809.0, + 581.0, + 824.0, + 581.0, + 824.0, + 600.0, + 809.0, + 600.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 835.0, + 581.0, + 861.0, + 581.0, + 861.0, + 600.0, + 835.0, + 600.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 864.0, + 579.0, + 925.0, + 579.0, + 925.0, + 602.0, + 864.0, + 602.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 969.0, + 582.0, + 984.0, + 582.0, + 984.0, + 600.0, + 969.0, + 600.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 997.0, + 581.0, + 1021.0, + 581.0, + 1021.0, + 600.0, + 997.0, + 600.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1026.0, + 580.0, + 1085.0, + 580.0, + 1085.0, + 601.0, + 1026.0, + 601.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1130.0, + 581.0, + 1146.0, + 581.0, + 1146.0, + 600.0, + 1130.0, + 600.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1157.0, + 581.0, + 1182.0, + 581.0, + 1182.0, + 600.0, + 1157.0, + 600.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1188.0, + 579.0, + 1247.0, + 579.0, + 1247.0, + 603.0, + 1188.0, + 603.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 463.0, + 615.0, + 946.0, + 615.0, + 946.0, + 634.0, + 463.0, + 634.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 991.0, + 615.0, + 1055.0, + 615.0, + 1055.0, + 635.0, + 991.0, + 635.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1130.0, + 614.0, + 1239.0, + 614.0, + 1239.0, + 636.0, + 1130.0, + 636.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 452.0, + 632.0, + 481.0, + 632.0, + 481.0, + 654.0, + 452.0, + 654.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 549.0, + 637.0, + 565.0, + 637.0, + 565.0, + 654.0, + 549.0, + 654.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 573.0, + 647.0, + 601.0, + 647.0, + 601.0, + 677.0, + 573.0, + 677.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 613.0, + 641.0, + 639.0, + 641.0, + 639.0, + 664.0, + 613.0, + 664.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 770.0, + 640.0, + 800.0, + 640.0, + 800.0, + 662.0, + 770.0, + 662.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 925.0, + 637.0, + 960.0, + 637.0, + 960.0, + 659.0, + 925.0, + 659.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1034.0, + 651.0, + 1045.0, + 651.0, + 1045.0, + 665.0, + 1034.0, + 665.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1095.0, + 630.0, + 1128.0, + 630.0, + 1128.0, + 652.0, + 1095.0, + 652.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 453.0, + 667.0, + 480.0, + 667.0, + 480.0, + 686.0, + 453.0, + 686.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 614.0, + 668.0, + 638.0, + 668.0, + 638.0, + 687.0, + 614.0, + 687.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 707.0, + 660.0, + 723.0, + 660.0, + 723.0, + 670.0, + 707.0, + 670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 770.0, + 671.0, + 802.0, + 671.0, + 802.0, + 692.0, + 770.0, + 692.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 925.0, + 665.0, + 959.0, + 665.0, + 959.0, + 686.0, + 925.0, + 686.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1096.0, + 662.0, + 1120.0, + 662.0, + 1120.0, + 681.0, + 1096.0, + 681.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1188.0, + 681.0, + 1211.0, + 681.0, + 1211.0, + 704.0, + 1188.0, + 704.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 453.0, + 700.0, + 478.0, + 700.0, + 478.0, + 720.0, + 453.0, + 720.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 614.0, + 692.0, + 638.0, + 692.0, + 638.0, + 712.0, + 614.0, + 712.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 770.0, + 702.0, + 802.0, + 702.0, + 802.0, + 724.0, + 770.0, + 724.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 925.0, + 693.0, + 960.0, + 693.0, + 960.0, + 714.0, + 925.0, + 714.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1095.0, + 691.0, + 1123.0, + 691.0, + 1123.0, + 713.0, + 1095.0, + 713.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 613.0, + 716.0, + 640.0, + 716.0, + 640.0, + 738.0, + 613.0, + 738.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 925.0, + 720.0, + 959.0, + 720.0, + 959.0, + 741.0, + 925.0, + 741.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1096.0, + 723.0, + 1121.0, + 723.0, + 1121.0, + 743.0, + 1096.0, + 743.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 459.0, + 734.0, + 485.0, + 734.0, + 485.0, + 753.0, + 459.0, + 753.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 770.0, + 733.0, + 806.0, + 733.0, + 806.0, + 754.0, + 770.0, + 754.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 487.0, + 750.0, + 501.0, + 750.0, + 501.0, + 769.0, + 487.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 514.0, + 751.0, + 538.0, + 751.0, + 538.0, + 769.0, + 514.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 545.0, + 750.0, + 568.0, + 750.0, + 568.0, + 769.0, + 545.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 571.0, + 751.0, + 602.0, + 751.0, + 602.0, + 769.0, + 571.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 647.0, + 750.0, + 663.0, + 750.0, + 663.0, + 769.0, + 647.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 675.0, + 750.0, + 698.0, + 750.0, + 698.0, + 769.0, + 675.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 705.0, + 751.0, + 729.0, + 751.0, + 729.0, + 769.0, + 705.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 732.0, + 751.0, + 762.0, + 751.0, + 762.0, + 769.0, + 732.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 809.0, + 750.0, + 823.0, + 750.0, + 823.0, + 769.0, + 809.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 835.0, + 750.0, + 859.0, + 750.0, + 859.0, + 769.0, + 835.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 865.0, + 750.0, + 924.0, + 750.0, + 924.0, + 770.0, + 865.0, + 770.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 969.0, + 750.0, + 984.0, + 750.0, + 984.0, + 769.0, + 969.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 997.0, + 751.0, + 1020.0, + 751.0, + 1020.0, + 769.0, + 997.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1027.0, + 751.0, + 1051.0, + 751.0, + 1051.0, + 769.0, + 1027.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1054.0, + 751.0, + 1085.0, + 751.0, + 1085.0, + 769.0, + 1054.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1130.0, + 751.0, + 1146.0, + 751.0, + 1146.0, + 768.0, + 1130.0, + 768.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1158.0, + 752.0, + 1179.0, + 752.0, + 1179.0, + 768.0, + 1158.0, + 768.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1188.0, + 751.0, + 1212.0, + 751.0, + 1212.0, + 769.0, + 1188.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1215.0, + 751.0, + 1245.0, + 751.0, + 1245.0, + 769.0, + 1215.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 843.25, + 482.0, + 849.25, + 482.0, + 849.25, + 495.0, + 843.25, + 495.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 967.0, + 499.5, + 986.0, + 499.5, + 986.0, + 510.5, + 967.0, + 510.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 742.0, + 649.5, + 753.0, + 649.5, + 753.0, + 667.5, + 742.0, + 667.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 835.0, + 1405.0, + 835.0, + 1405.0, + 873.0, + 294.0, + 873.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 867.0, + 1404.0, + 867.0, + 1404.0, + 901.0, + 292.0, + 901.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 894.0, + 761.0, + 894.0, + 761.0, + 937.0, + 294.0, + 937.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1416.0, + 959.0, + 1416.0, + 959.0, + 1465.0, + 291.0, + 1465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1745.0, + 730.0, + 1745.0, + 730.0, + 1796.0, + 291.0, + 1796.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 980.0, + 721.0, + 980.0, + 721.0, + 1026.0, + 294.0, + 1026.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 829.0, + 2084.0, + 870.0, + 2084.0, + 870.0, + 2125.0, + 829.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1491.0, + 1404.0, + 1491.0, + 1404.0, + 1526.0, + 296.0, + 1526.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1519.0, + 1404.0, + 1519.0, + 1404.0, + 1558.0, + 292.0, + 1558.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1550.0, + 1405.0, + 1550.0, + 1405.0, + 1585.0, + 294.0, + 1585.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1581.0, + 1404.0, + 1581.0, + 1404.0, + 1616.0, + 294.0, + 1616.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1609.0, + 1406.0, + 1609.0, + 1406.0, + 1650.0, + 291.0, + 1650.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1638.0, + 1406.0, + 1638.0, + 1406.0, + 1684.0, + 291.0, + 1684.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1672.0, + 1005.0, + 1672.0, + 1005.0, + 1710.0, + 295.0, + 1710.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1819.0, + 1405.0, + 1819.0, + 1405.0, + 1853.0, + 294.0, + 1853.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1851.0, + 1403.0, + 1851.0, + 1403.0, + 1882.0, + 296.0, + 1882.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1882.0, + 1403.0, + 1882.0, + 1403.0, + 1917.0, + 296.0, + 1917.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1909.0, + 1405.0, + 1909.0, + 1405.0, + 1949.0, + 293.0, + 1949.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1939.0, + 1403.0, + 1939.0, + 1403.0, + 1977.0, + 293.0, + 1977.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1970.0, + 1403.0, + 1970.0, + 1403.0, + 2008.0, + 293.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 2002.0, + 844.0, + 2002.0, + 844.0, + 2037.0, + 293.0, + 2037.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 908.0, + 2002.0, + 1403.0, + 2002.0, + 1403.0, + 2037.0, + 908.0, + 2037.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1052.0, + 1404.0, + 1052.0, + 1404.0, + 1089.0, + 296.0, + 1089.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1083.0, + 1404.0, + 1083.0, + 1404.0, + 1120.0, + 294.0, + 1120.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1113.0, + 1405.0, + 1113.0, + 1405.0, + 1150.0, + 293.0, + 1150.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1141.0, + 1404.0, + 1141.0, + 1404.0, + 1182.0, + 293.0, + 1182.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1174.0, + 724.0, + 1174.0, + 724.0, + 1210.0, + 295.0, + 1210.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1221.0, + 1143.0, + 1221.0, + 1143.0, + 1258.0, + 295.0, + 1258.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1169.0, + 1221.0, + 1406.0, + 1221.0, + 1406.0, + 1258.0, + 1169.0, + 1258.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1246.0, + 1405.0, + 1246.0, + 1405.0, + 1294.0, + 293.0, + 1294.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1275.0, + 553.0, + 1275.0, + 553.0, + 1324.0, + 292.0, + 1324.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 574.0, + 1275.0, + 1408.0, + 1275.0, + 1408.0, + 1324.0, + 574.0, + 1324.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1310.0, + 1406.0, + 1310.0, + 1406.0, + 1350.0, + 293.0, + 1350.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1341.0, + 534.0, + 1341.0, + 534.0, + 1378.0, + 295.0, + 1378.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 617.0, + 1341.0, + 1123.0, + 1341.0, + 1123.0, + 1378.0, + 617.0, + 1378.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 20, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 5, + "poly": [ + 301, + 371, + 1423, + 371, + 1423, + 1417, + 301, + 1417 + ], + "score": 0.984, + "html": "
TaskLoraHubavg with thresholdLoraHubavg without threshold
Boolean Expressions55.554.0
Causal Judgement54.354.8
Date Understanding32.917.7
Disambiguation45.240.6
Dyck Languages1.01.1
Formal Fallacies52.851.7
Geometric Shapes7.46.7
Hyperbaton62.855.5
Logical DeductionS (five objects)36.136.5
Logical DeductionS (seven objects)36.835.6
Logical DeductionS45.7
(three objects) Movie Recommendation49.9
Multistep Arithmetic55.359.3
Navigate0.40.7
Object Counting47.147.6
33.734.7
Penguins in a Table35.933.8
Reasoning about Colored Objects40.037.9
Ruin Names24.424.0
Salient Translation Error Detection36.037.1
Snarks56.951.6
Sports Understanding56.755.9
Temporal Sequences18.216.7
Tracking Shuffled ObjectsS (five objects)12.312.3
Tracking Shuffled ObjectsS (seven objects)7.78.5
Tracking Shuffled ObjectsS (three objects)29.229.8
Web of Lies50.150.3
Word Sorting1.11.3
Avg Performance Per Task34.733.5
" + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 106, + 298, + 106 + ], + "score": 0.91 + }, + { + "category_id": 1, + "poly": [ + 296, + 228, + 1403, + 228, + 1403, + 295, + 296, + 295 + ], + "score": 0.906 + }, + { + "category_id": 6, + "poly": [ + 378, + 314, + 1320, + 314, + 1320, + 349, + 378, + 349 + ], + "score": 0.899 + }, + { + "category_id": 2, + "poly": [ + 834, + 2088, + 865, + 2088, + 865, + 2112, + 834, + 2112 + ], + "score": 0.849 + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 378.0, + 313.0, + 1322.0, + 313.0, + 1322.0, + 352.0, + 378.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 829.0, + 2084.0, + 871.0, + 2084.0, + 871.0, + 2125.0, + 829.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 224.0, + 1405.0, + 224.0, + 1405.0, + 271.0, + 293.0, + 271.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 263.0, + 425.0, + 263.0, + 425.0, + 297.0, + 294.0, + 297.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 21, + "width": 1700, + "height": 2200 + } + } +] \ No newline at end of file diff --git a/parse/test/rzQGHXNReU/rzQGHXNReU.md b/parse/test/rzQGHXNReU/rzQGHXNReU.md new file mode 100644 index 0000000000000000000000000000000000000000..58f20823a82596d380f3afb6d592f4f45df796e3 --- /dev/null +++ b/parse/test/rzQGHXNReU/rzQGHXNReU.md @@ -0,0 +1,203 @@ +# RAFT: Adapting Language Model to Domain Specific RAG + +Tianjun Zhang \* +Department of Computer Science +UC Berkeley +Berkeley, CA 94720, USA +{tianjunz}@berkeley.edu +Shishir G. Patil, Naman Jain, Sheng Shen +Department of Computer Science +UC Berkeley +Berkeley, CA 94720, USA +{shishirpatil,naman_jain,sheng.s}@berkeley.edu +Matei Zaharia, Ion Stoica, Joseph E. Gonzalez +Department of Computer Science +UC Berkeley +Berkeley, CA 94720, USA +{matei,istoica,jegonzal}@berkeley.edu + +# Abstract + +Pretraining Large Language Models (LLMs) on large corpora of textual data is now a standard paradigm. When using these LLMs for many downstream applications, it is common to additionally incorporate new information into the pretrained model either through RAG-based-prompting, or finetuning. However, the best methodology to incorporate information remains an open question. In this paper, we present Retrieval Augmented Fine Tuning (RAFT), a training recipe which improves the model’s ability to answer questions in "open-book" in-domain settings. In training RAFT, given a question, and a set of retrieved documents, we train the model to ignore those documents that don’t help in answering the question, which we call, distractor documents. RAFT accomplishes this by citing verbatim the right sequence from the relevant document to help answer the question. This coupled with RAFT’s chain-of-thought-style response helps improve the model’s ability to reason. In domain specific RAG, RAFT consistently improves the model’s performance across PubMed, HotpotQA, and Gorilla datasets, presenting a post-training recipe to improve pre-trained LLMs to in-domain RAG. + +# 1 Introduction + +Trained on vast quantities of public data, Large Language Models LLMs have achieved significant advances in a wide range of general knowledge reasoning tasks Brown et al. (2020); Wei et al. (2022). However, increasingly LLMs are being employed in specialized domains to support tasks ranging from code completion for specific software frameworks to question answering on specific document collections (e.g., legal or medical documents). In these settings, general knowledge reasoning is less critical and instead the primary goal is to maximize accuracy based on a given set of documents. Indeed, adapting LLMs to the specialized domains (e.g., recent news, enterprise private documents, or program resources constructed after the training cutoff) is essential to many emerging applications (Vu et al., 2023; Lazaridou et al., 2022) and is the focus of this work. + +This paper studies the following question – How do we adapt pre-trained LLMs for Retrieval Augmented Generation (RAG) in specialized domains? + +When it comes to adapting LLMs to specialized domains, we consider the following two candidates: in-context learning through Retrieval-Augmented Generation (RAG) and supervised fine-tuning. RAG based methods allow the LLM to reference the documents when answering questions. However, RAG based in-context learning methods fail to leverage the learning opportunity afforded by the fixed domain setting and early access to the test documents. Alternatively, supervised fine-tuning offers the opportunity to learn more general patterns in the documents and better align to end tasks and user preferences Zhou et al. (2023). However, existing fine-tuning based approaches either fail to leverage the documents at test time (don’t incorporate RAG) or fail to account for the imperfections in retrieval process during training. + +![](images/f0db1ef1b057bc1bc9295a8166a6d296edcd745858acff236281a68693676d87.jpg) +Figure 1: How best to prepare for an Exam?(a) Fine-tuning based approaches implement "studying" by either directly "memorizing" the input documents or answering practice QA without referencing the documents. (b) Alternatively, in-context retrieval methods fail to leverage the learning opportunity afforded by the fixed domain and are equivalent to taking an open-book exam without studying. In contrast, our approach (c) RAFT leverages fine-tuning with question-answer pairs while referencing the documents in a simulated imperfect retrieval setting β€” thereby effectively preparing for the open-book exam setting. + +We can draw an analogy to an open-book exam. Existing in-context retrieval methods are equivalent to taking an open-book exam without studying. Alternatively, existing finetuning based approaches implement β€œstudying" by either directly β€œmemorizing" Xiong et al. (2023) the input documents or answering practice questions Wang et al. (2022) without referencing the documents. While these approaches leverage in-domain learning they fail to prepare for the open-book nature of the test setting. + +In this paper, we study how to combine instruction fine-tuning (IFT) with retrieval augmented generation (RAG). We propose a novel adaptation strategy – Retrieval-Augmented Fine Tuning (RAFT). RAFT specifically addresses the challenge of fine-tuning LLMs to both incorporate domain knowledge while also improving in-domain RAG performance. RAFT aims to not only enable models to learn domain-specific knowledge through fine-tuning, but also to ensure robustness against distracting retrieved information. This is achieved by training the models to understand the dynamics between the question (prompt), the domain-specific documents retrieved, and the right answer. Going back to our analogy to the open book exam, our approach is analogous to studying for an open-book exam by recognizing relevant, and irrelevant retrieved documents. + +In RAFT, we train the model to answer the question (Q) from Document(s) $( \mathrm { D ^ { * } } )$ to generate answer $( \mathrm { A } ^ { * } )$ , where $\mathsf { A } ^ { * }$ includes chain-of-thought reasoning Wei et al. (2022); Anthropic (2023), and in the presence of distractor documents $( D _ { k } )$ . We explain the methodology in Section 3 and analyze the sensitivity to the number of distractor documents $( k )$ at train- and test- time in Section 5. RAFT consistently outperforms Supervised-finetuning both withand without- RAG across PubMed Dernoncourt & Lee (2017), HotPot QA Yang et al. (2018), and HuggingFace Hub, Torch Hub, and Tensorflow Hub Gorilla datasets Patil et al. (2023), presenting a novel, yet simple technique to improve pre-trained LLMs for in-domain RAG. Our code is available at https://github.com/ShishirPatil/gorilla. + +# 2 LLMs for Open-Book Exam + +To understand our goal better, we expand on our analogy between training an LLM with the real-world setting of prepararing for an exam. + +Closed-Book Exam A closed book exam often refers to the scenario where the LLMs do not have access to any additional documents or references to answer the questions during the exam. For LLMs, this is equivalent to the scenario, for example, in which the LLM is used as a chatbot. In this scenario the LLM draws from the knowledge baked in during pre-training and supervised-finetuning to respond to the users’ prompt. + +![](images/d54b49279897f74e204c908fc173727448ff6a0c168d92f2063727d1a38456d9.jpg) +Figure 2: Overview of our RAFT method. The top-left figure depicts our approach of adapting LLMs to reading solution from a set of positive and distractor documents in contrast to standard RAG setup where models are trained based on the retriever outputs, which is a mixture of both memorization and reading. At test time, all methods follow the standard RAG setting, provided with a top-k retrieved documents in the context. + +Open Book Exam In contrast, we liken the open-book exam setting to the scenario in which the LLM can refer to external sources of information (e.g., a website or a book chapter). In such scenarios, typically, the LLM is paired with retriever which retrieves $^ { \prime } \mathbf { k } ^ { \prime }$ documents (or specific segments of the document) which are appended to the users’ prompt. It is only through these documents retrieved that the LLM gains access to β€œdomain-specific information”. As a result, we argue that the LLM’s performance in these settings, where it is trained as a general-purpose LLM is largely dependent on the quality of the retriever and how accurately the retriever can identify the most relevant piece of information. + +Domain-Specific Open-Book Exam In this paper, we focus on the narrower but increasingly popular domain than the general open book exam, which we call the domain-specific open-book exam. Here, we know apriori the domain in which the LLM will be tested. The LLM can respond to the users’ prompt using use any and all information from this specific domain, which it has been fine-tuned on. Examples of domain specific examples include enterprise documents, code repositories belonging to an organization, etc. In all these scenarios, the LLM will be used to respond to the questions, whose answers can be found within a collection of documents. The retrieval technique itself has little to no-impact on the mechanism (though it may impact the accuracy). This paper studies the domain-specific open-book setting and how to adapt a pretrained LLM to this specific domain, including how to make it more robust to a varying number of retrieved documents and distractors. + +# 3 RAFT + +In this section, we present RAFT, a novel way of training LLMs for domain-specific openbook exams. We first introduce the classical technique of supervised fine-tuning, followed with the key takeaways from our experiments. Then, we introduce RAFT , a modified version of general instruction tuning. Lastly, we provide an overview of the experiments to expect in the later sections. + +# Supervised Finetuning + +Consider the supervised fine-tuning (SFT) setting for a Question-Answer dataset. The formulation consists of the Dataset $( \bar { D } )$ from which a set of Question (Q) and corresponding answer $( A )$ pairs are derived or already available. In the classical SFT setting, the model is trained to improve it’s ability to answer the questions based on it’s knowledge - obtained either during pre-training, or during the SFT training phase. The model so trained can also + +Figure 3: RAFT prompt to help LLM evaluate its own generated reasoning and answers, contrasting them with the correct reasoning and answers. The LLM is prompted to identify errors in its reasoning and extract key insights for improvement. This figure specifically represents the β€˜GenerateExplanationβ€˜ step in the RAFT algorithm (Section 3). + +be used at test-time with Retrieval Augmented Generation (RAG) setting, where additional documents can be introduced in the prompt to help the model answer the question. This can be represented as follows: + +{Train: $\mathbf Q \to \mathbf A _ { \mathrm { j } } ^ { \prime }$ , {0-shot Inference: $\mathbf Q \to \mathbf A \}$ , {RAG Inference: $\mathbf { Q } + \mathbf { D } \mathbf { A } \}$ + +RAFT: Retrieval Augmented Fine-Tuning (RAFT), presents a novel recipe to prepare finetuning data to tailor the models for domain-specific open-book setting, equivalent to indomain RAG In RAFT, we prepare the training data such that each data point contains a question $( Q )$ , a set of documents $( D _ { k } )$ , and a corresponding Chain-of-though style answer $( \hat { \boldsymbol { A } } ^ { * } )$ generated from one of the document $( D ^ { * } )$ . We differentiate between two types of documents: β€˜golden’ documents $( D * )$ i.e. the documents from which the answer to the question can be deduced, and β€˜distractor’ documents $( D _ { i } )$ that do not contain answerrelevant information. As an implementation detail, the β€˜golden’ document doesn’t need to be a single document, but can be more than one document, as is the case in HotpotQA Yang et al. (2018). Then, for $P$ fraction of the questions $( q _ { i } )$ in the dataset, we retain the golden document $( d _ { i } ^ { * } )$ along with distractor documents $( d _ { k - 1 } )$ . For $( 1 - P )$ fraction of the questions $( q _ { i } )$ in the dataset, we include no golden document and only include distractor documents $( d _ { k } )$ . We then fine-tune the language model using standard supervised training (SFT) technique, training it to generate answers from the provided documents and question. Fig. 2 illustrates the high-level design principal for RAFT . + +We demonstrate that our RAG approach trains the model to perform better RAG on the set of documents it is trained on i.e., in-domain. By removing the golden documents in some instances, we are compelling the model to memorize answers instead of deriving them from the context. The training data for RAFT is as follows, and an example training data can be seen in Fig. 3: + +$\mathbf { P } \%$ of data: $\mathbf { Q } + \mathbf { D } ^ { * } + \mathbf { D } _ { 1 } + \mathbf { D } _ { 2 } + \ldots + \mathbf { D } _ { k } \mathbf { A } *$ $( 1 - \mathbf { P } ) \%$ of data: $\mathbf { Q } + \mathbf { D } _ { 1 } + \mathbf { D } _ { 2 } + \ldots + \mathbf { D } _ { k } \mathbf { A } *$ + +Subsequently, for the test scenario, the model is provided with the Q and top-k documents retrieved by the RAG pipeline. Note that RAFT is independent of the retriever used. + +A key factor in enhancing training quality is the generation of a reasoning process, such as Chain-of-Thought, to explain the provided answers. RAFT approach is similar: we demonstrate that creating a full reasoning chain and in-addition, clearly citing sources enhances the model’s accuracy in answering questions. In Fig. 3, we illustrate this setup. Generating the training data in this fashion, involves presenting the model with a question, context, and verified answers, and then requesting it to form a reasoning chain that appropriately references the original context. + +For all the datasets in our experiments, we generate the answers using the technique described above. Note that the Gorilla APIBench dataset, already includes reasoning in the answers. We provide an example of the generation step in Fig. 3, the detailed reasoning answer includes a citation from the original context inside ##begin_quote## and ##end_quote## as well as the detailed explanation on how to reach the conclusion based on the citations. We demonstrate that adding detailed reasoning paragraphs can help boost the model’s performance in our experiment section. + +Table 1: RAFT improves RAG performance for all specialized domains: Across PubMed, HotPot, HuggingFace, Torch Hub, and Tensorflow Hub, we see that Domain-specific Finetuning improves significantly of the performance of the base model, RAFT consistently outperforms the existing domain-specific finetuning method with or without RAG. This suggests the need to train the model with context. We compare our model with LLaMA finetuning receipes, and provide GPT-3.5 for reference. + +
PubMedHotPotHuggingFaceTorch HubTensorFlow
GPT-3.5 + RAG71.6041.529.0860.2165.59
LLaMA2-7B56.50.540.2200
LLaMA2-7B + RAG58.80.0326.4308.6043.06
DSF59.76.3861.0684.9486.56
DSF + RAG71.64.4142.5982.8060.29
RAFT (LLaMA2-7B)73.3035.2874.0084.9586.86
+ +# 4 Evaluation + +We design our experiments to study how well RAFT performs compared to various baselines. We find that the RAFT-7B model (a finetuned version of LlaMA-2) is better at reading and extracting information from in-domain documents, than domain-specific finetuned model, and general-purpose model with RAG. As an ablation, we also demonstrate how important it is for the model to learn with Chain-of-Thought responses. In this section, we will first introduce all the datasets we used in the experiments, then all the baseline model/fine-tuning techniques that we benchmark against. + +Datasets In our experiments, we use the following datasets to evaluate our model and all baselines. We selected these datasets to represent both popular and diverse domains including Wikipedia, Coding/API documents, and question-answering on medical documents. Natural Questions (NQ) Kwiatkowski et al. (2019), Trivia QA Joshi et al. (2017) and HotpotQA Yang et al. (2018) are the open-domain question-answers based on Wikipedia, mainly focused on common knowledge (e.g., movies, sports, etc). HuggingFace, Torch Hub, and TensorFlow Hub are from the APIBench Patil et al. (2023) proposed in the Gorilla paper. These benchmarks measure how to generate the correct, functional, and executable API calls based on the documentation. PubMed QA Jin et al. (2019) is a question-answering dataset tailored only for biomedical-research question-answering. It mainly focuses on answering medical and biology questions based on a given set of documents. We would like to highlight that $( \mathrm { N Q } ,$ Trivia $\{ \hat { \mathrm { Q A } } ,$ and HotpotQA) are relatively general domain whereas the latter two domains are on domain-specific documents. + +Baselines We consider the following baselines for our experiments: + +β€’ LlaMA2-7B-chat model with 0-shot prompting: this is the commonly used instruction-finetuned model for QA tasks, where we provide clearly written instructions, but no reference documentation. +β€’ LlaMA2-7B-chat model with RAG (Llama2 $^ +$ RAG): similar to the previous setting, except here we include reference documents. This is a popular technique when dealing with domain-specific QA tasks. Domain-Specific Finetuning with 0-shot prompting (DSF): Standard supervisedfinetuning, without documents in context. We find that its mostly useful to align the answering style of the model as well as get familiar with the domain context. +Domain-Specific Finetuning with RAG $( \mathrm { D S F } + \mathrm { R A G } )$ ): Equip a domain-specific finetuned-model with external knowledge using RAG. So, for the β€œknowledge” the model does not know, it can still refer to the context. + +Table 2: Ablation on Chain-of-Thought: The numbers of RAFT and RAFT without CoT. Results on various datasets show that adding CoT can significantly improve the performance of the finetuned model. With a gains of $9 . 6 6 \%$ and $1 \bar { 4 } . 9 3 \%$ in the Hotpot QA and HuggingFace datasets respectively. + +
PubMedHotpotQAHuggingFaceTorch HubTensorFlow
RAFT w.0 CoT68.3025.6259.0786.5683.21
RAFT73.3035.2874.0084.9586.86
+ +# 4.1 Results + +Using the above datasets and baselines, we evaluate our model RAFT and demonstrate the effectiveness of RAFT in Tab. 1. We see that RAFT consistently and significantly outperforms the baselines. Compared with the base Llama-2 instruction-tuned model, RAFT with RAG does much better in terms of extracting information as well as being robust towards distractors. The gain can be as big as $3 5 . 2 5 \%$ on Hotpot QA and $7 6 . 3 5 \%$ on Torch Hub evaluation. Compared with DSF on the specific dataset, our model does better at relying on the provided context to solve the problem. RAFT does much better on the tasks like Hotpot and HuggingFace datasets $( 3 0 . { \bar { 8 } } 7 \%$ on Hotpot and $3 1 . 4 1 \%$ on HuggingFace). Note that for PubMed QA, since it is a binary yes/no question, we don’t observe significant gains when we compare our model with $\mathrm { D } \mathbf { \dot { S } } \mathbf { \dot { F } } + \mathbf { R } \mathbf { A } \mathbf { G }$ . Even compared with a much larger and better model GPT-3.5, RAFT demonstrates significant advantages. + +Overall, the LLaMA-7B model, both with and without the RAG, performs poorly due to its answering style not aligning with the ground truth. By applying domain-specific tuning, we significantly enhance its performance. This process enables the model to learn and adopt the appropriate style of answering. However, introducing RAG to a domain-specifically fine-tuned (DSF) model doesn’t invariably lead to better outcomes. This might indicate that the model lacks training in context processing and extracting useful information from it. By incorporating our method, RAFT , we train the model not only to match its answering style with that required but also to improve its document processing capabilities. Consequently, our approach outperforms all others. + +# 4.2 Effect of CoT + +We also conduct an analysis to evaluate the effectiveness of the Chain-of-Thought approach in enhancing the model’s performance. As indicated in Table 2, simply providing the answer to a question may not always be adequate. This approach can lead to a rapid decrease in loss, resulting in the model beginning to overfit. Incorporating a reasoning chain that not only guides the model to the answer but also enriches the model’s understanding can improve the overall accuracy and prevent overfitting to concise answers. In our experiments, integrating the Chain-of-Thought significantly enhances training robustness. We employ GPT-4-1106 to generate our Chain-of-Thought prompts and include an example of the prompt we used in Figure 3. + +# 4.3 Qualitative Analysis + +To illustrate the potential advantages of RAFT over the domain-specifically fine-tuned (DSF) approach, we present a comparative example in Figure 4. This example qualitatively demonstrates a scenario where the DSF model becomes confused by a question asking for the identity of a screenwriter. Instead of providing the correct name, it mistakenly cites one of the films written by the screenwriter. In contrast, the RAFT model accurately answers the question. This discrepancy suggests that training a model solely with question-answer pairs may impair its ability to derive relevant context from provided documents. The comparison underscores the importance of incorporating both standard instructional tuning and context comprehension into the training dataset to preserve and enhance the model’s ability to process text effectively. + +# HotPot QA + +Question: What screenwriter with credits for β€œEvolution”[0/1879]e a film starring Nicolas Cage and TΓ©a Leoni? +Documents: . . . David Weissman is a screenwriter and director. His film credits include β€œThe Family Man” (2000), β€œEvolution” (2001), and β€œWhen in Rome” (2010). The Family Man is a 2000 American romantic comedy-drama film directed by Brett Ratner, written by David Diamond and David Weissman, and starring Nicolas Cage and TΓ©a Leoni. + +![](images/0bcc3d38e5c0488526bd52fb267fd171f556aebd1617289e7668d2a9205a03f4.jpg) +Figure 4: Comparison of RAFT and DSF: On the HotPot QA dataset, we can see that DSF model extracts the wrong information from the context when the question is asking who is the screen writer and it answers a film name. RAFT manages to get the accurate results . + +4.4 Should we train the LLM always with the golden context for RAG? + +In our exploration of whether large language models (LLMs) should always be trained with the golden context for Retrieval-Augmented Generation (RAG), we address a key question: what proportion $( \mathrm { p \% ) }$ of the training data should include golden documents? Intuitively, one might assume that for effective training in reading and extracting information from context (e.g., RAG tasks), the golden document should always be included during training $\mathrm { ( P = 1 0 0 \% }$ ). However, our findings challenge this assumption: incorporating a portion of the training data without the golden document in the context $\mathrm { ( P = 8 0 \% }$ ) appears to enhance the model’s performance on RAG tasks. + +Figure 5 presents our investigation into the hyperparameter $\mathrm { P \% }$ , which represents the percentage of training instances that should include golden documents. We find that the optimal proportion varies across datasets, with $\mathrm { P \% }$ ranging from $4 0 \%$ , $6 0 \% ,$ and $1 0 0 \%$ . This indicates that training your LLM without the correct corresponding context at times can be beneficial for the downstream task of answering questions related to the documents. In our training setup, we include four distractor documents alongside the golden document, and at test time, we maintain this format by providing the golden document with four distractors. Our findings suggest that, for domain-specific RAG tasks, including a certain percentage of training data without the golden documents in the context proves to be advantageous. + +# 5 RAFT Generalizes to Top-K RAG + +We now study another important problem: How does the number of distractor documents in RAFT affect the model’s performance when augmented with top-k RAG results during evaluation? Previous research has highlighted the vulnerability of LLMs to irrelevant text (see studies (Shi et al., 2023a; Weston & Sukhbaatar, 2023; Liu et al., 2023)). This issue is particularly critical for LLMs $^ +$ RAG since top-k RAG is frequently employed at test time to ensure high recall. Such a scenario necessitates the model to have the ability to discern and disregard irrelevant content, focusing solely on pertinent information. + +![](images/8e086fb8cb885d22dfb048538cf47a1ac09dfb1873a60ba60c02530cd07d066d.jpg) +Figure 5: How many golden documents to involve? We study the hyperparameter $\mathrm { P \% }$ where it indicates how much portion of training data is with golden document. Results on NQ, TQA and HotpotQA suggest that mixing some amount of data that the golden document is not put in the context is helpful for in-domain RAG. + +# 5.1 Making Model Robust to top-K RAG + +To tackle the challenge of enhancing large language models’ (LLMs) ability to sift through irrelevant text within the retrieval pipeline, our analysis revealed that training solely with golden (highly relevant) documents can inadvertently diminish the model’s ability to discern and disregard irrelevant information. To address this, our algorithm, RAFT , adopts a strategy that integrates golden documents with a mix of irrelevant ones. This methodology prompts us to investigate the ideal fraction of distractor (irrelevant) documents to incorporate throughout the training process and to assess how well this training approach adapts to different volumes of documents encountered by the Retrieval-Augmented Generation (RAG) during the test phase. Our aim is to refine the balance between relevant and irrelevant information to strenghten the model’s efficiency in identifying and utilizing pertinent content. Notice that Sec 4.4 looked what what $\mathrm { P \% }$ of training data should include distractors, while in this section, we study test-time scenarios. + +Training with Distractor Documents To enhance the robustness of LLMs against irrelevant text in retrieved documents, we adopted a finetuning approach that incorporates both golden (highly relevant) documents and distractor (irrelevant) documents. The model was trained with varying numbers of distractor documents, but consistently evaluated using the top-3 documents obtained from the retriever - not to be confused with $p$ . Our findings, detailed in Fig. 6, reveal that finetuning with only the golden document frequently results in inferior performance compared to configurations that include a greater number of distractor documents. As we can see in the figure, the better performance for Natural Questions is training with $D ^ { * } + 3 D$ and it is $D ^ { * } + 1 D$ documents with Hotpot QA. This insight has been particularly beneficial for our algorithm, RAFT . In our experiments, we consistently employ a training setup consisting of one golden document alongside four distractor documents. + +Generalization to a variable number of test-time documents. We extended our research to examine the impact of different quantities of test-time documents on the model’s performance. Specifically, our experiments focused on assessing how models, trained with varying numbers of distractor documents, respond to changes in the number of documents presented at test time. The results, illustrated in Fig. 6, confirm that the inclusion of distractor documents during training indeed makes the model more resilient to fluctuations in the number of documents encountered during testing. This ability to maintain consistent performance despite variations in test-time document numbers further validates the robustness of our approach, RAFT . This finding underscores the importance of a well-calibrated training environment to prepare the model for a range of scenarios it may encounter in real-world. + +# 6 Related Works + +Retrieval-Augmented Language Models Retrieval-Augmented Language Models (RALMs) enhance LLMs by integrating a retrieval module that sources relevant information from external knowledge bases, significantly improving performance across various NLP tasks, including language modeling (Guu et al., 2020; Borgeaud et al., 2022; Khandelwal et al., + +![](images/12b8e955ae9a0307c0a7f13890daa53d74edc9d6d0f2d3be9e950103c883cfdd.jpg) +Figure 6: Test-Time Documents Varying: To analyze how robust RAFT is to varying number of test-time documents, we study three domains – NQ, Trivia QA and HotPot QA. In ${ \mathrm { N Q } } ,$ we find that training with 4 documents leads to optimal performance, and this changes to 3 and 2 for for Trivia QA and HotPot QA respectively. However, we see that training with only golden documents leads to poor performance. + +2019; Shi et al., 2023d; Lin et al., 2023b; Shi et al., 2023c; Asai et al., 2023; Xu et al., 2023; Wang et al., 2023) and open-domain question answering (Izacard et al., 2023; Lewis et al., 2020). For instance, Atlas (Izacard et al., 2023) fine-tunes T5 models with the retriever, treating documents as latent variables, while RETRO (Borgeaud et al., 2022) modifies the decoder-only architecture to include retrieved texts and conducts pre-training from scratch. kNN-LM (Khandelwal et al., 2019) interpolates between the LM’s next token distribution and distributions computed from retrieved tokens at inference. (Shi et al., 2023d; Ram et al., 2023) assume black-box access to an LLM, combining it with either off-the-shelf or fine-tuned retriever. + +Memorization A key question around large neural language models is whether they truly β€œunderstand” text (Feldman, 2020; Power et al., 2022) or simply rely on surface pattern memorization (Carlini et al., 2019; TΓ€nzer et al., 2022). (Feldman, 2020; Carlini et al., 2019; 2022) develop methodologies to quantify the extent of memorization in neural models. (Brown et al., 2020; Power et al., 2022; Liu et al., 2022) further explored how memorization impacts the models’ generalization capabilities. (Carlini et al., 2021; Shi et al., 2023b) demonstrated the ability of language models to memorize and regurgitate training data, raising significant privacy concerns (Kandpal et al., 2022; Pan et al., 2020). + +Finetuning for RAG More recently, several papers have been exploring the idea of finetuning a pretrained LLM to be better at RAG tasks (Lin et al., 2023a; Wang et al., 2023; Xu et al., 2023; Liu et al., 2024). These works focus on constructing a combination of finetuning dataset for RAG and train a model to perform well on these tasks. In particular, in their settings, at test time, the domain or documents can be different than the training time; whereas our paper studies a slightly opposite scenario where we only care about testing the LLM on the same set of documents. + +# 7 Conclusion + +RAFT is a training strategy designed to enhance the model’s performance in answering questions within a specific domain, in "open-book" settings. We highlight several crucial design decisions, such as training the model alongside distractor documents, organizing the dataset so a portion lacks golden documents in their context, and formulating answers in a chain-of-thought manner with direct quotations from the relevant text. Our evaluations on PubMed, HotpotQA, and Gorilla API Bench underline RAFT’s significant potential. + +# References + +Anthropic. Prompt engineering for claude’s long context window. 2023. + +Asai, A., Wu, Z., Wang, Y., Sil, A., and Hajishirzi, H. Self-rag: Learning to retrieve, generate, and critique through self-reflection. arXiv preprint arXiv:2310.11511, 2023. + +Borgeaud, S., Mensch, A., Hoffmann, J., Cai, T., Rutherford, E., Millican, K., Van Den Driessche, G. B., Lespiau, J.-B., Damoc, B., Clark, A., et al. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, pp. 2206–2240. PMLR, 2022. +Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877–1901, 2020. +Carlini, N., Liu, C., Erlingsson, Ú., Kos, J., and Song, D. The secret sharer: Evaluating and testing unintended memorization in neural networks. In 28th USENIX Security Symposium (USENIX Security 19), pp. 267–284, 2019. +Carlini, N., Tramer, F., Wallace, E., Jagielski, M., Herbert-Voss, A., Lee, K., Roberts, A., Brown, T., Song, D., Erlingsson, U., et al. Extracting training data from large language models. In 30th USENIX Security Symposium (USENIX Security 21), pp. 2633–2650, 2021. +Carlini, N., Ippolito, D., Jagielski, M., Lee, K., Tramer, F., and Zhang, C. Quantifying memorization across neural language models. In The Eleventh International Conference on Learning Representations, 2022. +Dernoncourt, F. and Lee, J. Y. Pubmed 200k rct: a dataset for sequential sentence classification in medical abstracts. arXiv preprint arXiv:1710.06071, 2017. +Feldman, V. Does learning require memorization? a short tale about a long tail. In Proceedings of the 52nd Annual ACM SIGACT Symposium on Theory of Computing, pp. 954–959, 2020. +Guu, K., Lee, K., Tung, Z., Pasupat, P., and Chang, M. Retrieval augmented language model pre-training. In International conference on machine learning, pp. 3929–3938. PMLR, 2020. +Izacard, G., Lewis, P., Lomeli, M., Hosseini, L., Petroni, F., Schick, T., Dwivedi-Yu, J., Joulin, A., Riedel, S., and Grave, E. Atlas: Few-shot learning with retrieval augmented language models. Journal of Machine Learning Research, 24(251):1–43, 2023. URL http: //jmlr.org/papers/v24/23-0037.html. +Jin, Q., Dhingra, B., Liu, Z., Cohen, W. W., and Lu, X. Pubmedqa: A dataset for biomedical research question answering. arXiv preprint arXiv:1909.06146, 2019. +Joshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. arXiv preprint arXiv:1705.03551, 2017. +Kandpal, N., Wallace, E., and Raffel, C. Deduplicating training data mitigates privacy risks in language models. In International Conference on Machine Learning, pp. 10697–10707. PMLR, 2022. +Khandelwal, U., Levy, O., Jurafsky, D., Zettlemoyer, L., and Lewis, M. Generalization through memorization: Nearest neighbor language models. arXiv preprint arXiv:1911.00172, 2019. +Kwiatkowski, T., Palomaki, J., Redfield, O., Collins, M., Parikh, A., Alberti, C., Epstein, D., Polosukhin, I., Devlin, J., Lee, K., et al. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453–466, 2019. +Lazaridou, A., Gribovskaya, E., Stokowiec, W., and Grigorev, N. Internet-augmented language models through few-shot prompting for open-domain question answering. arXiv preprint arXiv:2203.05115, 2022. +Lewis, P., Perez, E., Piktus, A., Petroni, F., Karpukhin, V., Goyal, N., KΓΌttler, H., Lewis, M., Yih, W.-t., RocktΓ€schel, T., et al. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems, 33:9459–9474, 2020. +Lin, X. V., Chen, X., Chen, M., Shi, W., Lomeli, M., James, R., Rodriguez, P., Kahn, J., Szilvasy, G., Lewis, M., et al. Ra-dit: Retrieval-augmented dual instruction tuning. arXiv preprint arXiv:2310.01352, 2023a. +Lin, X. V., Chen, X., Chen, M., Shi, W., Lomeli, M., James, R., Rodriguez, P., Kahn, J., Szilvasy, G., Lewis, M., et al. Ra-dit: Retrieval-augmented dual instruction tuning. arXiv preprint arXiv:2310.01352, 2023b. +Liu, N. F., Lin, K., Hewitt, J., Paranjape, A., Bevilacqua, M., Petroni, F., and Liang, P. Lost in the middle: How language models use long contexts. arXiv preprint arXiv:2307.03172, 2023. +Liu, Z., Kitouni, O., Nolte, N. S., Michaud, E., Tegmark, M., and Williams, M. Towards understanding grokking: An effective theory of representation learning. Advances in Neural Information Processing Systems, 35:34651–34663, 2022. +Liu, Z., Ping, W., Roy, R., Xu, P., Shoeybi, M., and Catanzaro, B. Chatqa: Building gpt-4 level conversational qa models. arXiv preprint arXiv:2401.10225, 2024. +Pan, X., Zhang, M., Ji, S., and Yang, M. Privacy risks of general-purpose language models. In 2020 IEEE Symposium on Security and Privacy (SP), pp. 1314–1331. IEEE, 2020. +Patil, S. G., Zhang, T., Wang, X., and Gonzalez, J. E. Gorilla: Large language model connected with massive apis. arXiv preprint arXiv:2305.15334, 2023. +Power, A., Burda, Y., Edwards, H., Babuschkin, I., and Misra, V. Grokking: Generalization beyond overfitting on small algorithmic datasets. arXiv preprint arXiv:2201.02177, 2022. +Ram, O., Levine, Y., Dalmedigos, I., Muhlgay, D., Shashua, A., Leyton-Brown, K., and Shoham, Y. In-context retrieval-augmented language models. arXiv preprint arXiv:2302.00083, 2023. +Shi, F., Chen, X., Misra, K., Scales, N., Dohan, D., Chi, E. H., SchΓ€rli, N., and Zhou, D. Large language models can be easily distracted by irrelevant context. In International Conference on Machine Learning, pp. 31210–31227. PMLR, 2023a. +Shi, W., Ajith, A., Xia, M., Huang, Y., Liu, D., Blevins, T., Chen, D., and Zettlemoyer, L. Detecting pretraining data from large language models. arXiv preprint arXiv:2310.16789, 2023b. +Shi, W., Min, S., Lomeli, M., Zhou, C., Li, M., Lin, V., Smith, N. A., Zettlemoyer, L., Yih, S., and Lewis, M. In-context pretraining: Language modeling beyond document boundaries. arXiv preprint arXiv:2310.10638, 2023c. +Shi, W., Min, S., Yasunaga, M., Seo, M., James, R., Lewis, M., Zettlemoyer, L., and Yih, W.-t. Replug: Retrieval-augmented black-box language models. arXiv preprint arXiv:2301.12652, 2023d. +TΓ€nzer, M., Ruder, S., and Rei, M. Memorisation versus generalisation in pre-trained language models. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 7564–7578, 2022. +Vu, T., Iyyer, M., Wang, X., Constant, N., Wei, J., Wei, J., Tar, C., Sung, Y.-H., Zhou, D., Le, Q., et al. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214, 2023. +Wang, B., Ping, W., McAfee, L., Xu, P., Li, B., Shoeybi, M., and Catanzaro, B. Instructretro: Instruction tuning post retrieval-augmented pretraining. arXiv preprint arXiv:2310.07713, 2023. +Wang, Y., Kordi, Y., Mishra, S., Liu, A., Smith, N. A., Khashabi, D., and Hajishirzi, H. Self-instruct: Aligning language models with self-generated instructions. arXiv preprint arXiv:2212.10560, 2022. +Wei, J., Wang, X., Schuurmans, D., Bosma, M., Xia, F., Chi, E., Le, Q. V., Zhou, D., et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, 35:24824–24837, 2022. +Weston, J. and Sukhbaatar, S. System 2 attention (is something you might need too). arXiv preprint arXiv:2311.11829, 2023. +Xiong, W., Liu, J., Molybog, I., Zhang, H., Bhargava, P., Hou, R., Martin, L., Rungta, R., Sankararaman, K. A., Oguz, B., et al. Effective long-context scaling of foundation models. arXiv preprint arXiv:2309.16039, 2023. +Xu, P., Ping, W., Wu, X., McAfee, L., Zhu, C., Liu, Z., Subramanian, S., Bakhturina, E., Shoeybi, M., and Catanzaro, B. Retrieval meets long context large language models. arXiv preprint arXiv:2310.03025, 2023. +Yang, Z., Qi, P., Zhang, S., Bengio, Y., Cohen, W. W., Salakhutdinov, R., and Manning, C. D. Hotpotqa: A dataset for diverse, explainable multi-hop question answering. arXiv preprint arXiv:1809.09600, 2018. +Zhou, C., Liu, P., Xu, P., Iyer, S., Sun, J., Mao, Y., Ma, X., Efrat, A., Yu, P., Yu, L., et al. Lima: Less is more for alignment. arXiv preprint arXiv:2305.11206, 2023. \ No newline at end of file diff --git a/parse/test/rzQGHXNReU/rzQGHXNReU_content_list.json b/parse/test/rzQGHXNReU/rzQGHXNReU_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..d215bf4ca6031a49773c8bffdf55deaf6d5cd3d5 --- /dev/null +++ b/parse/test/rzQGHXNReU/rzQGHXNReU_content_list.json @@ -0,0 +1,428 @@ +[ + { + "type": "text", + "text": "RAFT: Adapting Language Model to Domain Specific RAG ", + "text_level": 1, + "page_idx": 0 + }, + { + "type": "text", + "text": "Tianjun Zhang \\* \nDepartment of Computer Science \nUC Berkeley \nBerkeley, CA 94720, USA \n{tianjunz}@berkeley.edu \nShishir G. Patil, Naman Jain, Sheng Shen \nDepartment of Computer Science \nUC Berkeley \nBerkeley, CA 94720, USA \n{shishirpatil,naman_jain,sheng.s}@berkeley.edu \nMatei Zaharia, Ion Stoica, Joseph E. Gonzalez \nDepartment of Computer Science \nUC Berkeley \nBerkeley, CA 94720, USA \n{matei,istoica,jegonzal}@berkeley.edu ", + "page_idx": 0 + }, + { + "type": "text", + "text": "", + "page_idx": 0 + }, + { + "type": "text", + "text": "", + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract ", + "text_level": 1, + "page_idx": 0 + }, + { + "type": "text", + "text": "Pretraining Large Language Models (LLMs) on large corpora of textual data is now a standard paradigm. When using these LLMs for many downstream applications, it is common to additionally incorporate new information into the pretrained model either through RAG-based-prompting, or finetuning. However, the best methodology to incorporate information remains an open question. In this paper, we present Retrieval Augmented Fine Tuning (RAFT), a training recipe which improves the model’s ability to answer questions in \"open-book\" in-domain settings. In training RAFT, given a question, and a set of retrieved documents, we train the model to ignore those documents that don’t help in answering the question, which we call, distractor documents. RAFT accomplishes this by citing verbatim the right sequence from the relevant document to help answer the question. This coupled with RAFT’s chain-of-thought-style response helps improve the model’s ability to reason. In domain specific RAG, RAFT consistently improves the model’s performance across PubMed, HotpotQA, and Gorilla datasets, presenting a post-training recipe to improve pre-trained LLMs to in-domain RAG. ", + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction ", + "text_level": 1, + "page_idx": 0 + }, + { + "type": "text", + "text": "Trained on vast quantities of public data, Large Language Models LLMs have achieved significant advances in a wide range of general knowledge reasoning tasks Brown et al. (2020); Wei et al. (2022). However, increasingly LLMs are being employed in specialized domains to support tasks ranging from code completion for specific software frameworks to question answering on specific document collections (e.g., legal or medical documents). In these settings, general knowledge reasoning is less critical and instead the primary goal is to maximize accuracy based on a given set of documents. Indeed, adapting LLMs to the specialized domains (e.g., recent news, enterprise private documents, or program resources constructed after the training cutoff) is essential to many emerging applications (Vu et al., 2023; Lazaridou et al., 2022) and is the focus of this work. ", + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper studies the following question – How do we adapt pre-trained LLMs for Retrieval Augmented Generation (RAG) in specialized domains? ", + "page_idx": 0 + }, + { + "type": "text", + "text": "When it comes to adapting LLMs to specialized domains, we consider the following two candidates: in-context learning through Retrieval-Augmented Generation (RAG) and supervised fine-tuning. RAG based methods allow the LLM to reference the documents when answering questions. However, RAG based in-context learning methods fail to leverage the learning opportunity afforded by the fixed domain setting and early access to the test documents. Alternatively, supervised fine-tuning offers the opportunity to learn more general patterns in the documents and better align to end tasks and user preferences Zhou et al. (2023). However, existing fine-tuning based approaches either fail to leverage the documents at test time (don’t incorporate RAG) or fail to account for the imperfections in retrieval process during training. ", + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f0db1ef1b057bc1bc9295a8166a6d296edcd745858acff236281a68693676d87.jpg", + "image_caption": [ + "Figure 1: How best to prepare for an Exam?(a) Fine-tuning based approaches implement \"studying\" by either directly \"memorizing\" the input documents or answering practice QA without referencing the documents. (b) Alternatively, in-context retrieval methods fail to leverage the learning opportunity afforded by the fixed domain and are equivalent to taking an open-book exam without studying. In contrast, our approach (c) RAFT leverages fine-tuning with question-answer pairs while referencing the documents in a simulated imperfect retrieval setting β€” thereby effectively preparing for the open-book exam setting. " + ], + "image_footnote": [], + "page_idx": 1 + }, + { + "type": "text", + "text": "", + "page_idx": 1 + }, + { + "type": "text", + "text": "We can draw an analogy to an open-book exam. Existing in-context retrieval methods are equivalent to taking an open-book exam without studying. Alternatively, existing finetuning based approaches implement β€œstudying\" by either directly β€œmemorizing\" Xiong et al. (2023) the input documents or answering practice questions Wang et al. (2022) without referencing the documents. While these approaches leverage in-domain learning they fail to prepare for the open-book nature of the test setting. ", + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we study how to combine instruction fine-tuning (IFT) with retrieval augmented generation (RAG). We propose a novel adaptation strategy – Retrieval-Augmented Fine Tuning (RAFT). RAFT specifically addresses the challenge of fine-tuning LLMs to both incorporate domain knowledge while also improving in-domain RAG performance. RAFT aims to not only enable models to learn domain-specific knowledge through fine-tuning, but also to ensure robustness against distracting retrieved information. This is achieved by training the models to understand the dynamics between the question (prompt), the domain-specific documents retrieved, and the right answer. Going back to our analogy to the open book exam, our approach is analogous to studying for an open-book exam by recognizing relevant, and irrelevant retrieved documents. ", + "page_idx": 1 + }, + { + "type": "text", + "text": "In RAFT, we train the model to answer the question (Q) from Document(s) $( \\mathrm { D ^ { * } } )$ to generate answer $( \\mathrm { A } ^ { * } )$ , where $\\mathsf { A } ^ { * }$ includes chain-of-thought reasoning Wei et al. (2022); Anthropic (2023), and in the presence of distractor documents $( D _ { k } )$ . We explain the methodology in Section 3 and analyze the sensitivity to the number of distractor documents $( k )$ at train- and test- time in Section 5. RAFT consistently outperforms Supervised-finetuning both withand without- RAG across PubMed Dernoncourt & Lee (2017), HotPot QA Yang et al. (2018), and HuggingFace Hub, Torch Hub, and Tensorflow Hub Gorilla datasets Patil et al. (2023), presenting a novel, yet simple technique to improve pre-trained LLMs for in-domain RAG. Our code is available at https://github.com/ShishirPatil/gorilla. ", + "page_idx": 1 + }, + { + "type": "text", + "text": "2 LLMs for Open-Book Exam ", + "text_level": 1, + "page_idx": 1 + }, + { + "type": "text", + "text": "To understand our goal better, we expand on our analogy between training an LLM with the real-world setting of prepararing for an exam. ", + "page_idx": 1 + }, + { + "type": "text", + "text": "Closed-Book Exam A closed book exam often refers to the scenario where the LLMs do not have access to any additional documents or references to answer the questions during the exam. For LLMs, this is equivalent to the scenario, for example, in which the LLM is used as a chatbot. In this scenario the LLM draws from the knowledge baked in during pre-training and supervised-finetuning to respond to the users’ prompt. ", + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d54b49279897f74e204c908fc173727448ff6a0c168d92f2063727d1a38456d9.jpg", + "image_caption": [ + "Figure 2: Overview of our RAFT method. The top-left figure depicts our approach of adapting LLMs to reading solution from a set of positive and distractor documents in contrast to standard RAG setup where models are trained based on the retriever outputs, which is a mixture of both memorization and reading. At test time, all methods follow the standard RAG setting, provided with a top-k retrieved documents in the context. " + ], + "image_footnote": [], + "page_idx": 2 + }, + { + "type": "text", + "text": "", + "page_idx": 2 + }, + { + "type": "text", + "text": "Open Book Exam In contrast, we liken the open-book exam setting to the scenario in which the LLM can refer to external sources of information (e.g., a website or a book chapter). In such scenarios, typically, the LLM is paired with retriever which retrieves $^ { \\prime } \\mathbf { k } ^ { \\prime }$ documents (or specific segments of the document) which are appended to the users’ prompt. It is only through these documents retrieved that the LLM gains access to β€œdomain-specific information”. As a result, we argue that the LLM’s performance in these settings, where it is trained as a general-purpose LLM is largely dependent on the quality of the retriever and how accurately the retriever can identify the most relevant piece of information. ", + "page_idx": 2 + }, + { + "type": "text", + "text": "Domain-Specific Open-Book Exam In this paper, we focus on the narrower but increasingly popular domain than the general open book exam, which we call the domain-specific open-book exam. Here, we know apriori the domain in which the LLM will be tested. The LLM can respond to the users’ prompt using use any and all information from this specific domain, which it has been fine-tuned on. Examples of domain specific examples include enterprise documents, code repositories belonging to an organization, etc. In all these scenarios, the LLM will be used to respond to the questions, whose answers can be found within a collection of documents. The retrieval technique itself has little to no-impact on the mechanism (though it may impact the accuracy). This paper studies the domain-specific open-book setting and how to adapt a pretrained LLM to this specific domain, including how to make it more robust to a varying number of retrieved documents and distractors. ", + "page_idx": 2 + }, + { + "type": "text", + "text": "3 RAFT ", + "text_level": 1, + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we present RAFT, a novel way of training LLMs for domain-specific openbook exams. We first introduce the classical technique of supervised fine-tuning, followed with the key takeaways from our experiments. Then, we introduce RAFT , a modified version of general instruction tuning. Lastly, we provide an overview of the experiments to expect in the later sections. ", + "page_idx": 2 + }, + { + "type": "text", + "text": "Supervised Finetuning ", + "text_level": 1, + "page_idx": 2 + }, + { + "type": "text", + "text": "Consider the supervised fine-tuning (SFT) setting for a Question-Answer dataset. The formulation consists of the Dataset $( \\bar { D } )$ from which a set of Question (Q) and corresponding answer $( A )$ pairs are derived or already available. In the classical SFT setting, the model is trained to improve it’s ability to answer the questions based on it’s knowledge - obtained either during pre-training, or during the SFT training phase. The model so trained can also ", + "page_idx": 2 + }, + { + "type": "text", + "text": "Figure 3: RAFT prompt to help LLM evaluate its own generated reasoning and answers, contrasting them with the correct reasoning and answers. The LLM is prompted to identify errors in its reasoning and extract key insights for improvement. This figure specifically represents the β€˜GenerateExplanationβ€˜ step in the RAFT algorithm (Section 3). ", + "page_idx": 3 + }, + { + "type": "text", + "text": "be used at test-time with Retrieval Augmented Generation (RAG) setting, where additional documents can be introduced in the prompt to help the model answer the question. This can be represented as follows: ", + "page_idx": 3 + }, + { + "type": "text", + "text": "{Train: $\\mathbf Q \\to \\mathbf A _ { \\mathrm { j } } ^ { \\prime }$ , {0-shot Inference: $\\mathbf Q \\to \\mathbf A \\}$ , {RAG Inference: $\\mathbf { Q } + \\mathbf { D } \\mathbf { A } \\}$ ", + "page_idx": 3 + }, + { + "type": "text", + "text": "RAFT: Retrieval Augmented Fine-Tuning (RAFT), presents a novel recipe to prepare finetuning data to tailor the models for domain-specific open-book setting, equivalent to indomain RAG In RAFT, we prepare the training data such that each data point contains a question $( Q )$ , a set of documents $( D _ { k } )$ , and a corresponding Chain-of-though style answer $( \\hat { \\boldsymbol { A } } ^ { * } )$ generated from one of the document $( D ^ { * } )$ . We differentiate between two types of documents: β€˜golden’ documents $( D * )$ i.e. the documents from which the answer to the question can be deduced, and β€˜distractor’ documents $( D _ { i } )$ that do not contain answerrelevant information. As an implementation detail, the β€˜golden’ document doesn’t need to be a single document, but can be more than one document, as is the case in HotpotQA Yang et al. (2018). Then, for $P$ fraction of the questions $( q _ { i } )$ in the dataset, we retain the golden document $( d _ { i } ^ { * } )$ along with distractor documents $( d _ { k - 1 } )$ . For $( 1 - P )$ fraction of the questions $( q _ { i } )$ in the dataset, we include no golden document and only include distractor documents $( d _ { k } )$ . We then fine-tune the language model using standard supervised training (SFT) technique, training it to generate answers from the provided documents and question. Fig. 2 illustrates the high-level design principal for RAFT . ", + "page_idx": 3 + }, + { + "type": "text", + "text": "We demonstrate that our RAG approach trains the model to perform better RAG on the set of documents it is trained on i.e., in-domain. By removing the golden documents in some instances, we are compelling the model to memorize answers instead of deriving them from the context. The training data for RAFT is as follows, and an example training data can be seen in Fig. 3: ", + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\mathbf { P } \\%$ of data: $\\mathbf { Q } + \\mathbf { D } ^ { * } + \\mathbf { D } _ { 1 } + \\mathbf { D } _ { 2 } + \\ldots + \\mathbf { D } _ { k } \\mathbf { A } *$ $( 1 - \\mathbf { P } ) \\%$ of data: $\\mathbf { Q } + \\mathbf { D } _ { 1 } + \\mathbf { D } _ { 2 } + \\ldots + \\mathbf { D } _ { k } \\mathbf { A } *$ ", + "page_idx": 3 + }, + { + "type": "text", + "text": "Subsequently, for the test scenario, the model is provided with the Q and top-k documents retrieved by the RAG pipeline. Note that RAFT is independent of the retriever used. ", + "page_idx": 3 + }, + { + "type": "text", + "text": "A key factor in enhancing training quality is the generation of a reasoning process, such as Chain-of-Thought, to explain the provided answers. RAFT approach is similar: we demonstrate that creating a full reasoning chain and in-addition, clearly citing sources enhances the model’s accuracy in answering questions. In Fig. 3, we illustrate this setup. Generating the training data in this fashion, involves presenting the model with a question, context, and verified answers, and then requesting it to form a reasoning chain that appropriately references the original context. ", + "page_idx": 3 + }, + { + "type": "text", + "text": "For all the datasets in our experiments, we generate the answers using the technique described above. Note that the Gorilla APIBench dataset, already includes reasoning in the answers. We provide an example of the generation step in Fig. 3, the detailed reasoning answer includes a citation from the original context inside ##begin_quote## and ##end_quote## as well as the detailed explanation on how to reach the conclusion based on the citations. We demonstrate that adding detailed reasoning paragraphs can help boost the model’s performance in our experiment section. ", + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/eabf5ecf2f67950e464b9edcd9f8c5d50f934120774dd57d884d10a27971c5e1.jpg", + "table_caption": [ + "Table 1: RAFT improves RAG performance for all specialized domains: Across PubMed, HotPot, HuggingFace, Torch Hub, and Tensorflow Hub, we see that Domain-specific Finetuning improves significantly of the performance of the base model, RAFT consistently outperforms the existing domain-specific finetuning method with or without RAG. This suggests the need to train the model with context. We compare our model with LLaMA finetuning receipes, and provide GPT-3.5 for reference. " + ], + "table_footnote": [], + "table_body": "
PubMedHotPotHuggingFaceTorch HubTensorFlow
GPT-3.5 + RAG71.6041.529.0860.2165.59
LLaMA2-7B56.50.540.2200
LLaMA2-7B + RAG58.80.0326.4308.6043.06
DSF59.76.3861.0684.9486.56
DSF + RAG71.64.4142.5982.8060.29
RAFT (LLaMA2-7B)73.3035.2874.0084.9586.86
", + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Evaluation ", + "text_level": 1, + "page_idx": 4 + }, + { + "type": "text", + "text": "We design our experiments to study how well RAFT performs compared to various baselines. We find that the RAFT-7B model (a finetuned version of LlaMA-2) is better at reading and extracting information from in-domain documents, than domain-specific finetuned model, and general-purpose model with RAG. As an ablation, we also demonstrate how important it is for the model to learn with Chain-of-Thought responses. In this section, we will first introduce all the datasets we used in the experiments, then all the baseline model/fine-tuning techniques that we benchmark against. ", + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets In our experiments, we use the following datasets to evaluate our model and all baselines. We selected these datasets to represent both popular and diverse domains including Wikipedia, Coding/API documents, and question-answering on medical documents. Natural Questions (NQ) Kwiatkowski et al. (2019), Trivia QA Joshi et al. (2017) and HotpotQA Yang et al. (2018) are the open-domain question-answers based on Wikipedia, mainly focused on common knowledge (e.g., movies, sports, etc). HuggingFace, Torch Hub, and TensorFlow Hub are from the APIBench Patil et al. (2023) proposed in the Gorilla paper. These benchmarks measure how to generate the correct, functional, and executable API calls based on the documentation. PubMed QA Jin et al. (2019) is a question-answering dataset tailored only for biomedical-research question-answering. It mainly focuses on answering medical and biology questions based on a given set of documents. We would like to highlight that $( \\mathrm { N Q } ,$ Trivia $\\{ \\hat { \\mathrm { Q A } } ,$ and HotpotQA) are relatively general domain whereas the latter two domains are on domain-specific documents. ", + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines We consider the following baselines for our experiments: ", + "page_idx": 4 + }, + { + "type": "text", + "text": "β€’ LlaMA2-7B-chat model with 0-shot prompting: this is the commonly used instruction-finetuned model for QA tasks, where we provide clearly written instructions, but no reference documentation. \nβ€’ LlaMA2-7B-chat model with RAG (Llama2 $^ +$ RAG): similar to the previous setting, except here we include reference documents. This is a popular technique when dealing with domain-specific QA tasks. Domain-Specific Finetuning with 0-shot prompting (DSF): Standard supervisedfinetuning, without documents in context. We find that its mostly useful to align the answering style of the model as well as get familiar with the domain context. \nDomain-Specific Finetuning with RAG $( \\mathrm { D S F } + \\mathrm { R A G } )$ ): Equip a domain-specific finetuned-model with external knowledge using RAG. So, for the β€œknowledge” the model does not know, it can still refer to the context. ", + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/19bf288f1875c260ddfbb6355555262c6edf9186b41d826d401d956d5d148e14.jpg", + "table_caption": [ + "Table 2: Ablation on Chain-of-Thought: The numbers of RAFT and RAFT without CoT. Results on various datasets show that adding CoT can significantly improve the performance of the finetuned model. With a gains of $9 . 6 6 \\%$ and $1 \\bar { 4 } . 9 3 \\%$ in the Hotpot QA and HuggingFace datasets respectively. " + ], + "table_footnote": [], + "table_body": "
PubMedHotpotQAHuggingFaceTorch HubTensorFlow
RAFT w.0 CoT68.3025.6259.0786.5683.21
RAFT73.3035.2874.0084.9586.86
", + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Results ", + "text_level": 1, + "page_idx": 5 + }, + { + "type": "text", + "text": "Using the above datasets and baselines, we evaluate our model RAFT and demonstrate the effectiveness of RAFT in Tab. 1. We see that RAFT consistently and significantly outperforms the baselines. Compared with the base Llama-2 instruction-tuned model, RAFT with RAG does much better in terms of extracting information as well as being robust towards distractors. The gain can be as big as $3 5 . 2 5 \\%$ on Hotpot QA and $7 6 . 3 5 \\%$ on Torch Hub evaluation. Compared with DSF on the specific dataset, our model does better at relying on the provided context to solve the problem. RAFT does much better on the tasks like Hotpot and HuggingFace datasets $( 3 0 . { \\bar { 8 } } 7 \\%$ on Hotpot and $3 1 . 4 1 \\%$ on HuggingFace). Note that for PubMed QA, since it is a binary yes/no question, we don’t observe significant gains when we compare our model with $\\mathrm { D } \\mathbf { \\dot { S } } \\mathbf { \\dot { F } } + \\mathbf { R } \\mathbf { A } \\mathbf { G }$ . Even compared with a much larger and better model GPT-3.5, RAFT demonstrates significant advantages. ", + "page_idx": 5 + }, + { + "type": "text", + "text": "Overall, the LLaMA-7B model, both with and without the RAG, performs poorly due to its answering style not aligning with the ground truth. By applying domain-specific tuning, we significantly enhance its performance. This process enables the model to learn and adopt the appropriate style of answering. However, introducing RAG to a domain-specifically fine-tuned (DSF) model doesn’t invariably lead to better outcomes. This might indicate that the model lacks training in context processing and extracting useful information from it. By incorporating our method, RAFT , we train the model not only to match its answering style with that required but also to improve its document processing capabilities. Consequently, our approach outperforms all others. ", + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Effect of CoT ", + "text_level": 1, + "page_idx": 5 + }, + { + "type": "text", + "text": "We also conduct an analysis to evaluate the effectiveness of the Chain-of-Thought approach in enhancing the model’s performance. As indicated in Table 2, simply providing the answer to a question may not always be adequate. This approach can lead to a rapid decrease in loss, resulting in the model beginning to overfit. Incorporating a reasoning chain that not only guides the model to the answer but also enriches the model’s understanding can improve the overall accuracy and prevent overfitting to concise answers. In our experiments, integrating the Chain-of-Thought significantly enhances training robustness. We employ GPT-4-1106 to generate our Chain-of-Thought prompts and include an example of the prompt we used in Figure 3. ", + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3 Qualitative Analysis ", + "text_level": 1, + "page_idx": 5 + }, + { + "type": "text", + "text": "To illustrate the potential advantages of RAFT over the domain-specifically fine-tuned (DSF) approach, we present a comparative example in Figure 4. This example qualitatively demonstrates a scenario where the DSF model becomes confused by a question asking for the identity of a screenwriter. Instead of providing the correct name, it mistakenly cites one of the films written by the screenwriter. In contrast, the RAFT model accurately answers the question. This discrepancy suggests that training a model solely with question-answer pairs may impair its ability to derive relevant context from provided documents. The comparison underscores the importance of incorporating both standard instructional tuning and context comprehension into the training dataset to preserve and enhance the model’s ability to process text effectively. ", + "page_idx": 5 + }, + { + "type": "text", + "text": "HotPot QA ", + "text_level": 1, + "page_idx": 6 + }, + { + "type": "text", + "text": "Question: What screenwriter with credits for β€œEvolution”[0/1879]e a film starring Nicolas Cage and TΓ©a Leoni? \nDocuments: . . . David Weissman is a screenwriter and director. His film credits include β€œThe Family Man” (2000), β€œEvolution” (2001), and β€œWhen in Rome” (2010). The Family Man is a 2000 American romantic comedy-drama film directed by Brett Ratner, written by David Diamond and David Weissman, and starring Nicolas Cage and TΓ©a Leoni. ", + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0bcc3d38e5c0488526bd52fb267fd171f556aebd1617289e7668d2a9205a03f4.jpg", + "image_caption": [ + "Figure 4: Comparison of RAFT and DSF: On the HotPot QA dataset, we can see that DSF model extracts the wrong information from the context when the question is asking who is the screen writer and it answers a film name. RAFT manages to get the accurate results . " + ], + "image_footnote": [], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4 Should we train the LLM always with the golden context for RAG? ", + "page_idx": 6 + }, + { + "type": "text", + "text": "In our exploration of whether large language models (LLMs) should always be trained with the golden context for Retrieval-Augmented Generation (RAG), we address a key question: what proportion $( \\mathrm { p \\% ) }$ of the training data should include golden documents? Intuitively, one might assume that for effective training in reading and extracting information from context (e.g., RAG tasks), the golden document should always be included during training $\\mathrm { ( P = 1 0 0 \\% }$ ). However, our findings challenge this assumption: incorporating a portion of the training data without the golden document in the context $\\mathrm { ( P = 8 0 \\% }$ ) appears to enhance the model’s performance on RAG tasks. ", + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 5 presents our investigation into the hyperparameter $\\mathrm { P \\% }$ , which represents the percentage of training instances that should include golden documents. We find that the optimal proportion varies across datasets, with $\\mathrm { P \\% }$ ranging from $4 0 \\%$ , $6 0 \\% ,$ and $1 0 0 \\%$ . This indicates that training your LLM without the correct corresponding context at times can be beneficial for the downstream task of answering questions related to the documents. In our training setup, we include four distractor documents alongside the golden document, and at test time, we maintain this format by providing the golden document with four distractors. Our findings suggest that, for domain-specific RAG tasks, including a certain percentage of training data without the golden documents in the context proves to be advantageous. ", + "page_idx": 6 + }, + { + "type": "text", + "text": "5 RAFT Generalizes to Top-K RAG ", + "text_level": 1, + "page_idx": 6 + }, + { + "type": "text", + "text": "We now study another important problem: How does the number of distractor documents in RAFT affect the model’s performance when augmented with top-k RAG results during evaluation? Previous research has highlighted the vulnerability of LLMs to irrelevant text (see studies (Shi et al., 2023a; Weston & Sukhbaatar, 2023; Liu et al., 2023)). This issue is particularly critical for LLMs $^ +$ RAG since top-k RAG is frequently employed at test time to ensure high recall. Such a scenario necessitates the model to have the ability to discern and disregard irrelevant content, focusing solely on pertinent information. ", + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8e086fb8cb885d22dfb048538cf47a1ac09dfb1873a60ba60c02530cd07d066d.jpg", + "image_caption": [ + "Figure 5: How many golden documents to involve? We study the hyperparameter $\\mathrm { P \\% }$ where it indicates how much portion of training data is with golden document. Results on NQ, TQA and HotpotQA suggest that mixing some amount of data that the golden document is not put in the context is helpful for in-domain RAG. " + ], + "image_footnote": [], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.1 Making Model Robust to top-K RAG ", + "text_level": 1, + "page_idx": 7 + }, + { + "type": "text", + "text": "To tackle the challenge of enhancing large language models’ (LLMs) ability to sift through irrelevant text within the retrieval pipeline, our analysis revealed that training solely with golden (highly relevant) documents can inadvertently diminish the model’s ability to discern and disregard irrelevant information. To address this, our algorithm, RAFT , adopts a strategy that integrates golden documents with a mix of irrelevant ones. This methodology prompts us to investigate the ideal fraction of distractor (irrelevant) documents to incorporate throughout the training process and to assess how well this training approach adapts to different volumes of documents encountered by the Retrieval-Augmented Generation (RAG) during the test phase. Our aim is to refine the balance between relevant and irrelevant information to strenghten the model’s efficiency in identifying and utilizing pertinent content. Notice that Sec 4.4 looked what what $\\mathrm { P \\% }$ of training data should include distractors, while in this section, we study test-time scenarios. ", + "page_idx": 7 + }, + { + "type": "text", + "text": "Training with Distractor Documents To enhance the robustness of LLMs against irrelevant text in retrieved documents, we adopted a finetuning approach that incorporates both golden (highly relevant) documents and distractor (irrelevant) documents. The model was trained with varying numbers of distractor documents, but consistently evaluated using the top-3 documents obtained from the retriever - not to be confused with $p$ . Our findings, detailed in Fig. 6, reveal that finetuning with only the golden document frequently results in inferior performance compared to configurations that include a greater number of distractor documents. As we can see in the figure, the better performance for Natural Questions is training with $D ^ { * } + 3 D$ and it is $D ^ { * } + 1 D$ documents with Hotpot QA. This insight has been particularly beneficial for our algorithm, RAFT . In our experiments, we consistently employ a training setup consisting of one golden document alongside four distractor documents. ", + "page_idx": 7 + }, + { + "type": "text", + "text": "Generalization to a variable number of test-time documents. We extended our research to examine the impact of different quantities of test-time documents on the model’s performance. Specifically, our experiments focused on assessing how models, trained with varying numbers of distractor documents, respond to changes in the number of documents presented at test time. The results, illustrated in Fig. 6, confirm that the inclusion of distractor documents during training indeed makes the model more resilient to fluctuations in the number of documents encountered during testing. This ability to maintain consistent performance despite variations in test-time document numbers further validates the robustness of our approach, RAFT . This finding underscores the importance of a well-calibrated training environment to prepare the model for a range of scenarios it may encounter in real-world. ", + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Related Works ", + "text_level": 1, + "page_idx": 7 + }, + { + "type": "text", + "text": "Retrieval-Augmented Language Models Retrieval-Augmented Language Models (RALMs) enhance LLMs by integrating a retrieval module that sources relevant information from external knowledge bases, significantly improving performance across various NLP tasks, including language modeling (Guu et al., 2020; Borgeaud et al., 2022; Khandelwal et al., ", + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/12b8e955ae9a0307c0a7f13890daa53d74edc9d6d0f2d3be9e950103c883cfdd.jpg", + "image_caption": [ + "Figure 6: Test-Time Documents Varying: To analyze how robust RAFT is to varying number of test-time documents, we study three domains – NQ, Trivia QA and HotPot QA. In ${ \\mathrm { N Q } } ,$ we find that training with 4 documents leads to optimal performance, and this changes to 3 and 2 for for Trivia QA and HotPot QA respectively. However, we see that training with only golden documents leads to poor performance. " + ], + "image_footnote": [], + "page_idx": 8 + }, + { + "type": "text", + "text": "2019; Shi et al., 2023d; Lin et al., 2023b; Shi et al., 2023c; Asai et al., 2023; Xu et al., 2023; Wang et al., 2023) and open-domain question answering (Izacard et al., 2023; Lewis et al., 2020). For instance, Atlas (Izacard et al., 2023) fine-tunes T5 models with the retriever, treating documents as latent variables, while RETRO (Borgeaud et al., 2022) modifies the decoder-only architecture to include retrieved texts and conducts pre-training from scratch. kNN-LM (Khandelwal et al., 2019) interpolates between the LM’s next token distribution and distributions computed from retrieved tokens at inference. (Shi et al., 2023d; Ram et al., 2023) assume black-box access to an LLM, combining it with either off-the-shelf or fine-tuned retriever. ", + "page_idx": 8 + }, + { + "type": "text", + "text": "Memorization A key question around large neural language models is whether they truly β€œunderstand” text (Feldman, 2020; Power et al., 2022) or simply rely on surface pattern memorization (Carlini et al., 2019; TΓ€nzer et al., 2022). (Feldman, 2020; Carlini et al., 2019; 2022) develop methodologies to quantify the extent of memorization in neural models. (Brown et al., 2020; Power et al., 2022; Liu et al., 2022) further explored how memorization impacts the models’ generalization capabilities. (Carlini et al., 2021; Shi et al., 2023b) demonstrated the ability of language models to memorize and regurgitate training data, raising significant privacy concerns (Kandpal et al., 2022; Pan et al., 2020). ", + "page_idx": 8 + }, + { + "type": "text", + "text": "Finetuning for RAG More recently, several papers have been exploring the idea of finetuning a pretrained LLM to be better at RAG tasks (Lin et al., 2023a; Wang et al., 2023; Xu et al., 2023; Liu et al., 2024). These works focus on constructing a combination of finetuning dataset for RAG and train a model to perform well on these tasks. In particular, in their settings, at test time, the domain or documents can be different than the training time; whereas our paper studies a slightly opposite scenario where we only care about testing the LLM on the same set of documents. ", + "page_idx": 8 + }, + { + "type": "text", + "text": "7 Conclusion ", + "text_level": 1, + "page_idx": 8 + }, + { + "type": "text", + "text": "RAFT is a training strategy designed to enhance the model’s performance in answering questions within a specific domain, in \"open-book\" settings. We highlight several crucial design decisions, such as training the model alongside distractor documents, organizing the dataset so a portion lacks golden documents in their context, and formulating answers in a chain-of-thought manner with direct quotations from the relevant text. Our evaluations on PubMed, HotpotQA, and Gorilla API Bench underline RAFT’s significant potential. ", + "page_idx": 8 + }, + { + "type": "text", + "text": "References ", + "text_level": 1, + "page_idx": 8 + }, + { + "type": "text", + "text": "Anthropic. Prompt engineering for claude’s long context window. 2023. ", + "page_idx": 8 + }, + { + "type": "text", + "text": "Asai, A., Wu, Z., Wang, Y., Sil, A., and Hajishirzi, H. Self-rag: Learning to retrieve, generate, and critique through self-reflection. arXiv preprint arXiv:2310.11511, 2023. ", + "page_idx": 9 + }, + { + "type": "text", + "text": "Borgeaud, S., Mensch, A., Hoffmann, J., Cai, T., Rutherford, E., Millican, K., Van Den Driessche, G. B., Lespiau, J.-B., Damoc, B., Clark, A., et al. Improving language models by retrieving from trillions of tokens. In International conference on machine learning, pp. 2206–2240. PMLR, 2022. \nBrown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877–1901, 2020. \nCarlini, N., Liu, C., Erlingsson, Ú., Kos, J., and Song, D. The secret sharer: Evaluating and testing unintended memorization in neural networks. In 28th USENIX Security Symposium (USENIX Security 19), pp. 267–284, 2019. \nCarlini, N., Tramer, F., Wallace, E., Jagielski, M., Herbert-Voss, A., Lee, K., Roberts, A., Brown, T., Song, D., Erlingsson, U., et al. Extracting training data from large language models. In 30th USENIX Security Symposium (USENIX Security 21), pp. 2633–2650, 2021. \nCarlini, N., Ippolito, D., Jagielski, M., Lee, K., Tramer, F., and Zhang, C. Quantifying memorization across neural language models. In The Eleventh International Conference on Learning Representations, 2022. \nDernoncourt, F. and Lee, J. Y. Pubmed 200k rct: a dataset for sequential sentence classification in medical abstracts. arXiv preprint arXiv:1710.06071, 2017. \nFeldman, V. Does learning require memorization? a short tale about a long tail. In Proceedings of the 52nd Annual ACM SIGACT Symposium on Theory of Computing, pp. 954–959, 2020. \nGuu, K., Lee, K., Tung, Z., Pasupat, P., and Chang, M. Retrieval augmented language model pre-training. In International conference on machine learning, pp. 3929–3938. PMLR, 2020. \nIzacard, G., Lewis, P., Lomeli, M., Hosseini, L., Petroni, F., Schick, T., Dwivedi-Yu, J., Joulin, A., Riedel, S., and Grave, E. Atlas: Few-shot learning with retrieval augmented language models. Journal of Machine Learning Research, 24(251):1–43, 2023. URL http: //jmlr.org/papers/v24/23-0037.html. \nJin, Q., Dhingra, B., Liu, Z., Cohen, W. W., and Lu, X. Pubmedqa: A dataset for biomedical research question answering. arXiv preprint arXiv:1909.06146, 2019. \nJoshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. arXiv preprint arXiv:1705.03551, 2017. \nKandpal, N., Wallace, E., and Raffel, C. Deduplicating training data mitigates privacy risks in language models. In International Conference on Machine Learning, pp. 10697–10707. PMLR, 2022. \nKhandelwal, U., Levy, O., Jurafsky, D., Zettlemoyer, L., and Lewis, M. Generalization through memorization: Nearest neighbor language models. arXiv preprint arXiv:1911.00172, 2019. \nKwiatkowski, T., Palomaki, J., Redfield, O., Collins, M., Parikh, A., Alberti, C., Epstein, D., Polosukhin, I., Devlin, J., Lee, K., et al. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453–466, 2019. \nLazaridou, A., Gribovskaya, E., Stokowiec, W., and Grigorev, N. Internet-augmented language models through few-shot prompting for open-domain question answering. arXiv preprint arXiv:2203.05115, 2022. \nLewis, P., Perez, E., Piktus, A., Petroni, F., Karpukhin, V., Goyal, N., KΓΌttler, H., Lewis, M., Yih, W.-t., RocktΓ€schel, T., et al. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems, 33:9459–9474, 2020. \nLin, X. V., Chen, X., Chen, M., Shi, W., Lomeli, M., James, R., Rodriguez, P., Kahn, J., Szilvasy, G., Lewis, M., et al. Ra-dit: Retrieval-augmented dual instruction tuning. arXiv preprint arXiv:2310.01352, 2023a. \nLin, X. V., Chen, X., Chen, M., Shi, W., Lomeli, M., James, R., Rodriguez, P., Kahn, J., Szilvasy, G., Lewis, M., et al. Ra-dit: Retrieval-augmented dual instruction tuning. arXiv preprint arXiv:2310.01352, 2023b. \nLiu, N. F., Lin, K., Hewitt, J., Paranjape, A., Bevilacqua, M., Petroni, F., and Liang, P. Lost in the middle: How language models use long contexts. arXiv preprint arXiv:2307.03172, 2023. \nLiu, Z., Kitouni, O., Nolte, N. S., Michaud, E., Tegmark, M., and Williams, M. Towards understanding grokking: An effective theory of representation learning. Advances in Neural Information Processing Systems, 35:34651–34663, 2022. \nLiu, Z., Ping, W., Roy, R., Xu, P., Shoeybi, M., and Catanzaro, B. Chatqa: Building gpt-4 level conversational qa models. arXiv preprint arXiv:2401.10225, 2024. \nPan, X., Zhang, M., Ji, S., and Yang, M. Privacy risks of general-purpose language models. In 2020 IEEE Symposium on Security and Privacy (SP), pp. 1314–1331. IEEE, 2020. \nPatil, S. G., Zhang, T., Wang, X., and Gonzalez, J. E. Gorilla: Large language model connected with massive apis. arXiv preprint arXiv:2305.15334, 2023. \nPower, A., Burda, Y., Edwards, H., Babuschkin, I., and Misra, V. Grokking: Generalization beyond overfitting on small algorithmic datasets. arXiv preprint arXiv:2201.02177, 2022. \nRam, O., Levine, Y., Dalmedigos, I., Muhlgay, D., Shashua, A., Leyton-Brown, K., and Shoham, Y. In-context retrieval-augmented language models. arXiv preprint arXiv:2302.00083, 2023. \nShi, F., Chen, X., Misra, K., Scales, N., Dohan, D., Chi, E. H., SchΓ€rli, N., and Zhou, D. Large language models can be easily distracted by irrelevant context. In International Conference on Machine Learning, pp. 31210–31227. PMLR, 2023a. \nShi, W., Ajith, A., Xia, M., Huang, Y., Liu, D., Blevins, T., Chen, D., and Zettlemoyer, L. Detecting pretraining data from large language models. arXiv preprint arXiv:2310.16789, 2023b. \nShi, W., Min, S., Lomeli, M., Zhou, C., Li, M., Lin, V., Smith, N. A., Zettlemoyer, L., Yih, S., and Lewis, M. In-context pretraining: Language modeling beyond document boundaries. arXiv preprint arXiv:2310.10638, 2023c. \nShi, W., Min, S., Yasunaga, M., Seo, M., James, R., Lewis, M., Zettlemoyer, L., and Yih, W.-t. Replug: Retrieval-augmented black-box language models. arXiv preprint arXiv:2301.12652, 2023d. \nTΓ€nzer, M., Ruder, S., and Rei, M. Memorisation versus generalisation in pre-trained language models. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 7564–7578, 2022. \nVu, T., Iyyer, M., Wang, X., Constant, N., Wei, J., Wei, J., Tar, C., Sung, Y.-H., Zhou, D., Le, Q., et al. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214, 2023. \nWang, B., Ping, W., McAfee, L., Xu, P., Li, B., Shoeybi, M., and Catanzaro, B. Instructretro: Instruction tuning post retrieval-augmented pretraining. arXiv preprint arXiv:2310.07713, 2023. \nWang, Y., Kordi, Y., Mishra, S., Liu, A., Smith, N. A., Khashabi, D., and Hajishirzi, H. Self-instruct: Aligning language models with self-generated instructions. arXiv preprint arXiv:2212.10560, 2022. \nWei, J., Wang, X., Schuurmans, D., Bosma, M., Xia, F., Chi, E., Le, Q. V., Zhou, D., et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, 35:24824–24837, 2022. \nWeston, J. and Sukhbaatar, S. System 2 attention (is something you might need too). arXiv preprint arXiv:2311.11829, 2023. \nXiong, W., Liu, J., Molybog, I., Zhang, H., Bhargava, P., Hou, R., Martin, L., Rungta, R., Sankararaman, K. A., Oguz, B., et al. Effective long-context scaling of foundation models. arXiv preprint arXiv:2309.16039, 2023. \nXu, P., Ping, W., Wu, X., McAfee, L., Zhu, C., Liu, Z., Subramanian, S., Bakhturina, E., Shoeybi, M., and Catanzaro, B. Retrieval meets long context large language models. arXiv preprint arXiv:2310.03025, 2023. \nYang, Z., Qi, P., Zhang, S., Bengio, Y., Cohen, W. W., Salakhutdinov, R., and Manning, C. D. Hotpotqa: A dataset for diverse, explainable multi-hop question answering. arXiv preprint arXiv:1809.09600, 2018. \nZhou, C., Liu, P., Xu, P., Iyer, S., Sun, J., Mao, Y., Ma, X., Efrat, A., Yu, P., Yu, L., et al. Lima: Less is more for alignment. arXiv preprint arXiv:2305.11206, 2023. ", + "page_idx": 9 + }, + { + "type": "text", + "text": "", + "page_idx": 10 + }, + { + "type": "text", + "text": "", + "page_idx": 11 + } +] \ No newline at end of file diff --git a/parse/test/rzQGHXNReU/rzQGHXNReU_middle.json b/parse/test/rzQGHXNReU/rzQGHXNReU_middle.json new file mode 100644 index 0000000000000000000000000000000000000000..51d2c86112ff80edeb614f28147d2c32907bcfa6 --- /dev/null +++ b/parse/test/rzQGHXNReU/rzQGHXNReU_middle.json @@ -0,0 +1,28338 @@ +{ + "pdf_info": [ + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 105, + 78, + 493, + 96 + ], + "lines": [ + { + "bbox": [ + 104, + 77, + 496, + 99 + ], + "spans": [ + { + "bbox": [ + 104, + 77, + 496, + 99 + ], + "score": 1.0, + "content": "RAFT: Adapting Language Model to Domain Specific RAG", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 113, + 117, + 261, + 172 + ], + "lines": [ + { + "bbox": [ + 112, + 115, + 189, + 131 + ], + "spans": [ + { + "bbox": [ + 112, + 115, + 183, + 131 + ], + "score": 1.0, + "content": "Tianjun Zhang", + "type": "text" + }, + { + "bbox": [ + 182, + 119, + 189, + 122 + ], + "score": 0.873, + "content": "*", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 127, + 262, + 143 + ], + "spans": [ + { + "bbox": [ + 111, + 127, + 262, + 143 + ], + "score": 1.0, + "content": "Department of Computer Science", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 137, + 171, + 153 + ], + "spans": [ + { + "bbox": [ + 111, + 137, + 171, + 153 + ], + "score": 1.0, + "content": "UC Berkeley", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 112, + 150, + 224, + 162 + ], + "spans": [ + { + "bbox": [ + 112, + 150, + 224, + 162 + ], + "score": 1.0, + "content": "Berkeley, CA 94720, USA", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 112, + 161, + 235, + 174 + ], + "spans": [ + { + "bbox": [ + 112, + 161, + 235, + 174 + ], + "score": 1.0, + "content": "{tianjunz}@berkeley.edu", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 272, + 117, + 514, + 173 + ], + "lines": [ + { + "bbox": [ + 271, + 116, + 462, + 131 + ], + "spans": [ + { + "bbox": [ + 271, + 116, + 462, + 131 + ], + "score": 1.0, + "content": "Shishir G. Patil, Naman Jain, Sheng Shen", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 270, + 128, + 421, + 141 + ], + "spans": [ + { + "bbox": [ + 270, + 128, + 421, + 141 + ], + "score": 1.0, + "content": "Department of Computer Science", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 270, + 137, + 330, + 152 + ], + "spans": [ + { + "bbox": [ + 270, + 137, + 330, + 152 + ], + "score": 1.0, + "content": "UC Berkeley", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 271, + 150, + 384, + 162 + ], + "spans": [ + { + "bbox": [ + 271, + 150, + 384, + 162 + ], + "score": 1.0, + "content": "Berkeley, CA 94720, USA", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 271, + 161, + 514, + 174 + ], + "spans": [ + { + "bbox": [ + 271, + 161, + 514, + 174 + ], + "score": 1.0, + "content": "{shishirpatil,naman_jain,sheng.s}@berkeley.edu", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 6 + }, + { + "type": "text", + "bbox": [ + 113, + 189, + 321, + 244 + ], + "lines": [ + { + "bbox": [ + 111, + 188, + 322, + 203 + ], + "spans": [ + { + "bbox": [ + 111, + 188, + 322, + 203 + ], + "score": 1.0, + "content": "Matei Zaharia, Ion Stoica, Joseph E. Gonzalez", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 199, + 263, + 214 + ], + "spans": [ + { + "bbox": [ + 111, + 199, + 263, + 214 + ], + "score": 1.0, + "content": "Department of Computer Science", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 111, + 209, + 171, + 225 + ], + "spans": [ + { + "bbox": [ + 111, + 209, + 171, + 225 + ], + "score": 1.0, + "content": "UC Berkeley", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 112, + 222, + 225, + 235 + ], + "spans": [ + { + "bbox": [ + 112, + 222, + 225, + 235 + ], + "score": 1.0, + "content": "Berkeley, CA 94720, USA", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 112, + 233, + 308, + 246 + ], + "spans": [ + { + "bbox": [ + 112, + 233, + 308, + 246 + ], + "score": 1.0, + "content": "{matei,istoica,jegonzal}@berkeley.edu", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 13 + }, + { + "type": "title", + "bbox": [ + 283, + 273, + 329, + 286 + ], + "lines": [ + { + "bbox": [ + 281, + 271, + 331, + 288 + ], + "spans": [ + { + "bbox": [ + 281, + 271, + 331, + 288 + ], + "score": 1.0, + "content": "Abstract", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "text", + "bbox": [ + 143, + 299, + 469, + 486 + ], + "lines": [ + { + "bbox": [ + 141, + 299, + 469, + 313 + ], + "spans": [ + { + "bbox": [ + 141, + 299, + 469, + 313 + ], + "score": 1.0, + "content": "Pretraining Large Language Models (LLMs) on large corpora of textual", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 140, + 308, + 470, + 325 + ], + "spans": [ + { + "bbox": [ + 140, + 308, + 470, + 325 + ], + "score": 1.0, + "content": "data is now a standard paradigm. When using these LLMs for many", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 321, + 470, + 335 + ], + "spans": [ + { + "bbox": [ + 141, + 321, + 470, + 335 + ], + "score": 1.0, + "content": "downstream applications, it is common to additionally incorporate new in-", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 140, + 330, + 471, + 347 + ], + "spans": [ + { + "bbox": [ + 140, + 330, + 471, + 347 + ], + "score": 1.0, + "content": "formation into the pretrained model either through RAG-based-prompting,", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 343, + 470, + 357 + ], + "spans": [ + { + "bbox": [ + 141, + 343, + 470, + 357 + ], + "score": 1.0, + "content": "or finetuning. However, the best methodology to incorporate information", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 354, + 470, + 367 + ], + "spans": [ + { + "bbox": [ + 141, + 354, + 470, + 367 + ], + "score": 1.0, + "content": "remains an open question. In this paper, we present Retrieval Augmented", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 141, + 365, + 469, + 378 + ], + "spans": [ + { + "bbox": [ + 141, + 365, + 469, + 378 + ], + "score": 1.0, + "content": "Fine Tuning (RAFT), a training recipe which improves the model’s ability", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 141, + 376, + 470, + 389 + ], + "spans": [ + { + "bbox": [ + 141, + 376, + 470, + 389 + ], + "score": 1.0, + "content": "to answer questions in \"open-book\" in-domain settings. In training RAFT,", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 141, + 387, + 470, + 400 + ], + "spans": [ + { + "bbox": [ + 141, + 387, + 470, + 400 + ], + "score": 1.0, + "content": "given a question, and a set of retrieved documents, we train the model to", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 141, + 398, + 470, + 412 + ], + "spans": [ + { + "bbox": [ + 141, + 398, + 470, + 412 + ], + "score": 1.0, + "content": "ignore those documents that don’t help in answering the question, which", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 141, + 408, + 469, + 423 + ], + "spans": [ + { + "bbox": [ + 141, + 408, + 469, + 423 + ], + "score": 1.0, + "content": "we call, distractor documents. RAFT accomplishes this by citing verbatim", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 141, + 419, + 470, + 433 + ], + "spans": [ + { + "bbox": [ + 141, + 419, + 470, + 433 + ], + "score": 1.0, + "content": "the right sequence from the relevant document to help answer the question.", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 141, + 430, + 470, + 444 + ], + "spans": [ + { + "bbox": [ + 141, + 430, + 470, + 444 + ], + "score": 1.0, + "content": "This coupled with RAFT’s chain-of-thought-style response helps improve", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 141, + 441, + 469, + 455 + ], + "spans": [ + { + "bbox": [ + 141, + 441, + 469, + 455 + ], + "score": 1.0, + "content": "the model’s ability to reason. In domain specific RAG, RAFT consistently", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 141, + 453, + 470, + 467 + ], + "spans": [ + { + "bbox": [ + 141, + 453, + 470, + 467 + ], + "score": 1.0, + "content": "improves the model’s performance across PubMed, HotpotQA, and Gorilla", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 141, + 462, + 470, + 478 + ], + "spans": [ + { + "bbox": [ + 141, + 462, + 470, + 478 + ], + "score": 1.0, + "content": "datasets, presenting a post-training recipe to improve pre-trained LLMs to", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 142, + 475, + 217, + 487 + ], + "spans": [ + { + "bbox": [ + 142, + 475, + 217, + 487 + ], + "score": 1.0, + "content": "in-domain RAG.", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 25 + }, + { + "type": "title", + "bbox": [ + 107, + 508, + 194, + 522 + ], + "lines": [ + { + "bbox": [ + 105, + 508, + 196, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 508, + 196, + 524 + ], + "score": 1.0, + "content": "1 Introduction", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 34 + }, + { + "type": "text", + "bbox": [ + 107, + 534, + 505, + 645 + ], + "lines": [ + { + "bbox": [ + 105, + 533, + 506, + 549 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 506, + 549 + ], + "score": 1.0, + "content": "Trained on vast quantities of public data, Large Language Models LLMs have achieved", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 546, + 506, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 506, + 560 + ], + "score": 1.0, + "content": "significant advances in a wide range of general knowledge reasoning tasks Brown et al.", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 556, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 505, + 570 + ], + "score": 1.0, + "content": "(2020); Wei et al. (2022). However, increasingly LLMs are being employed in specialized", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 566, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 566, + 506, + 582 + ], + "score": 1.0, + "content": "domains to support tasks ranging from code completion for specific software frameworks", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 578, + 507, + 593 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 507, + 593 + ], + "score": 1.0, + "content": "to question answering on specific document collections (e.g., legal or medical documents).", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 590, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 590, + 506, + 604 + ], + "score": 1.0, + "content": "In these settings, general knowledge reasoning is less critical and instead the primary goal", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 104, + 600, + 506, + 614 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 506, + 614 + ], + "score": 1.0, + "content": "is to maximize accuracy based on a given set of documents. Indeed, adapting LLMs to the", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 610, + 506, + 626 + ], + "spans": [ + { + "bbox": [ + 105, + 610, + 506, + 626 + ], + "score": 1.0, + "content": "specialized domains (e.g., recent news, enterprise private documents, or program resources", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 622, + 507, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 622, + 507, + 637 + ], + "score": 1.0, + "content": "constructed after the training cutoff) is essential to many emerging applications (Vu et al.,", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 634, + 360, + 645 + ], + "spans": [ + { + "bbox": [ + 106, + 634, + 360, + 645 + ], + "score": 1.0, + "content": "2023; Lazaridou et al., 2022) and is the focus of this work.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 39.5 + }, + { + "type": "text", + "bbox": [ + 108, + 650, + 503, + 673 + ], + "lines": [ + { + "bbox": [ + 106, + 650, + 505, + 663 + ], + "spans": [ + { + "bbox": [ + 106, + 650, + 505, + 663 + ], + "score": 1.0, + "content": "This paper studies the following question – How do we adapt pre-trained LLMs for Retrieval", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 106, + 661, + 330, + 674 + ], + "spans": [ + { + "bbox": [ + 106, + 661, + 330, + 674 + ], + "score": 1.0, + "content": "Augmented Generation (RAG) in specialized domains?", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 45.5 + }, + { + "type": "text", + "bbox": [ + 108, + 678, + 505, + 712 + ], + "lines": [ + { + "bbox": [ + 105, + 678, + 505, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 505, + 692 + ], + "score": 1.0, + "content": "When it comes to adapting LLMs to specialized domains, we consider the following two", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 689, + 506, + 703 + ], + "spans": [ + { + "bbox": [ + 106, + 689, + 506, + 703 + ], + "score": 1.0, + "content": "candidates: in-context learning through Retrieval-Augmented Generation (RAG) and super-", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 700, + 505, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 505, + 713 + ], + "score": 1.0, + "content": "vised fine-tuning. RAG based methods allow the LLM to reference the documents when", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 48 + } + ], + "page_idx": 0, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 118, + 721, + 373, + 732 + ], + "lines": [ + { + "bbox": [ + 118, + 720, + 374, + 734 + ], + "spans": [ + { + "bbox": [ + 118, + 720, + 374, + 734 + ], + "score": 1.0, + "content": "βˆ—Corresponding author, personal website: tianjunz.github.io", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 752, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 763 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 763 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 13, + "width": 8 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 105, + 78, + 493, + 96 + ], + "lines": [ + { + "bbox": [ + 104, + 77, + 496, + 99 + ], + "spans": [ + { + "bbox": [ + 104, + 77, + 496, + 99 + ], + "score": 1.0, + "content": "RAFT: Adapting Language Model to Domain Specific RAG", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "list", + "bbox": [ + 113, + 117, + 261, + 172 + ], + "lines": [ + { + "bbox": [ + 112, + 115, + 189, + 131 + ], + "spans": [ + { + "bbox": [ + 112, + 115, + 183, + 131 + ], + "score": 1.0, + "content": "Tianjun Zhang", + "type": "text" + }, + { + "bbox": [ + 182, + 119, + 189, + 122 + ], + "score": 0.873, + "content": "*", + "type": "text" + } + ], + "index": 1, + "is_list_start_line": true + }, + { + "bbox": [ + 111, + 127, + 262, + 143 + ], + "spans": [ + { + "bbox": [ + 111, + 127, + 262, + 143 + ], + "score": 1.0, + "content": "Department of Computer Science", + "type": "text" + } + ], + "index": 3, + "is_list_start_line": true + }, + { + "bbox": [ + 111, + 137, + 171, + 153 + ], + "spans": [ + { + "bbox": [ + 111, + 137, + 171, + 153 + ], + "score": 1.0, + "content": "UC Berkeley", + "type": "text" + } + ], + "index": 5, + "is_list_start_line": true + }, + { + "bbox": [ + 112, + 150, + 224, + 162 + ], + "spans": [ + { + "bbox": [ + 112, + 150, + 224, + 162 + ], + "score": 1.0, + "content": "Berkeley, CA 94720, USA", + "type": "text" + } + ], + "index": 7, + "is_list_start_line": true + }, + { + "bbox": [ + 112, + 161, + 235, + 174 + ], + "spans": [ + { + "bbox": [ + 112, + 161, + 235, + 174 + ], + "score": 1.0, + "content": "{tianjunz}@berkeley.edu", + "type": "text" + } + ], + "index": 9, + "is_list_start_line": true + }, + { + "bbox": [ + 271, + 116, + 462, + 131 + ], + "spans": [ + { + "bbox": [ + 271, + 116, + 462, + 131 + ], + "score": 1.0, + "content": "Shishir G. Patil, Naman Jain, Sheng Shen", + "type": "text" + } + ], + "index": 2, + "is_list_start_line": true + }, + { + "bbox": [ + 270, + 128, + 421, + 141 + ], + "spans": [ + { + "bbox": [ + 270, + 128, + 421, + 141 + ], + "score": 1.0, + "content": "Department of Computer Science", + "type": "text" + } + ], + "index": 4, + "is_list_start_line": true + }, + { + "bbox": [ + 270, + 137, + 330, + 152 + ], + "spans": [ + { + "bbox": [ + 270, + 137, + 330, + 152 + ], + "score": 1.0, + "content": "UC Berkeley", + "type": "text" + } + ], + "index": 6, + "is_list_start_line": true + }, + { + "bbox": [ + 271, + 150, + 384, + 162 + ], + "spans": [ + { + "bbox": [ + 271, + 150, + 384, + 162 + ], + "score": 1.0, + "content": "Berkeley, CA 94720, USA", + "type": "text" + } + ], + "index": 8, + "is_list_start_line": true + }, + { + "bbox": [ + 271, + 161, + 514, + 174 + ], + "spans": [ + { + "bbox": [ + 271, + 161, + 514, + 174 + ], + "score": 1.0, + "content": "{shishirpatil,naman_jain,sheng.s}@berkeley.edu", + "type": "text" + } + ], + "index": 10, + "is_list_start_line": true + }, + { + "bbox": [ + 111, + 188, + 322, + 203 + ], + "spans": [ + { + "bbox": [ + 111, + 188, + 322, + 203 + ], + "score": 1.0, + "content": "Matei Zaharia, Ion Stoica, Joseph E. Gonzalez", + "type": "text" + } + ], + "index": 11, + "is_list_start_line": true + }, + { + "bbox": [ + 111, + 199, + 263, + 214 + ], + "spans": [ + { + "bbox": [ + 111, + 199, + 263, + 214 + ], + "score": 1.0, + "content": "Department of Computer Science", + "type": "text" + } + ], + "index": 12, + "is_list_start_line": true + }, + { + "bbox": [ + 111, + 209, + 171, + 225 + ], + "spans": [ + { + "bbox": [ + 111, + 209, + 171, + 225 + ], + "score": 1.0, + "content": "UC Berkeley", + "type": "text" + } + ], + "index": 13, + "is_list_start_line": true + }, + { + "bbox": [ + 112, + 222, + 225, + 235 + ], + "spans": [ + { + "bbox": [ + 112, + 222, + 225, + 235 + ], + "score": 1.0, + "content": "Berkeley, CA 94720, USA", + "type": "text" + } + ], + "index": 14, + "is_list_start_line": true + }, + { + "bbox": [ + 112, + 233, + 308, + 246 + ], + "spans": [ + { + "bbox": [ + 112, + 233, + 308, + 246 + ], + "score": 1.0, + "content": "{matei,istoica,jegonzal}@berkeley.edu", + "type": "text" + } + ], + "index": 15, + "is_list_start_line": true + } + ], + "index": 5, + "bbox_fs": [ + 111, + 115, + 262, + 174 + ] + }, + { + "type": "list", + "bbox": [ + 272, + 117, + 514, + 173 + ], + "lines": [], + "index": 6, + "bbox_fs": [ + 270, + 116, + 514, + 174 + ], + "lines_deleted": true + }, + { + "type": "list", + "bbox": [ + 113, + 189, + 321, + 244 + ], + "lines": [], + "index": 13, + "bbox_fs": [ + 111, + 188, + 322, + 246 + ], + "lines_deleted": true + }, + { + "type": "title", + "bbox": [ + 283, + 273, + 329, + 286 + ], + "lines": [ + { + "bbox": [ + 281, + 271, + 331, + 288 + ], + "spans": [ + { + "bbox": [ + 281, + 271, + 331, + 288 + ], + "score": 1.0, + "content": "Abstract", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 16 + }, + { + "type": "text", + "bbox": [ + 143, + 299, + 469, + 486 + ], + "lines": [ + { + "bbox": [ + 141, + 299, + 469, + 313 + ], + "spans": [ + { + "bbox": [ + 141, + 299, + 469, + 313 + ], + "score": 1.0, + "content": "Pretraining Large Language Models (LLMs) on large corpora of textual", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 140, + 308, + 470, + 325 + ], + "spans": [ + { + "bbox": [ + 140, + 308, + 470, + 325 + ], + "score": 1.0, + "content": "data is now a standard paradigm. When using these LLMs for many", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 321, + 470, + 335 + ], + "spans": [ + { + "bbox": [ + 141, + 321, + 470, + 335 + ], + "score": 1.0, + "content": "downstream applications, it is common to additionally incorporate new in-", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 140, + 330, + 471, + 347 + ], + "spans": [ + { + "bbox": [ + 140, + 330, + 471, + 347 + ], + "score": 1.0, + "content": "formation into the pretrained model either through RAG-based-prompting,", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 343, + 470, + 357 + ], + "spans": [ + { + "bbox": [ + 141, + 343, + 470, + 357 + ], + "score": 1.0, + "content": "or finetuning. However, the best methodology to incorporate information", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 354, + 470, + 367 + ], + "spans": [ + { + "bbox": [ + 141, + 354, + 470, + 367 + ], + "score": 1.0, + "content": "remains an open question. In this paper, we present Retrieval Augmented", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 141, + 365, + 469, + 378 + ], + "spans": [ + { + "bbox": [ + 141, + 365, + 469, + 378 + ], + "score": 1.0, + "content": "Fine Tuning (RAFT), a training recipe which improves the model’s ability", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 141, + 376, + 470, + 389 + ], + "spans": [ + { + "bbox": [ + 141, + 376, + 470, + 389 + ], + "score": 1.0, + "content": "to answer questions in \"open-book\" in-domain settings. In training RAFT,", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 141, + 387, + 470, + 400 + ], + "spans": [ + { + "bbox": [ + 141, + 387, + 470, + 400 + ], + "score": 1.0, + "content": "given a question, and a set of retrieved documents, we train the model to", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 141, + 398, + 470, + 412 + ], + "spans": [ + { + "bbox": [ + 141, + 398, + 470, + 412 + ], + "score": 1.0, + "content": "ignore those documents that don’t help in answering the question, which", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 141, + 408, + 469, + 423 + ], + "spans": [ + { + "bbox": [ + 141, + 408, + 469, + 423 + ], + "score": 1.0, + "content": "we call, distractor documents. RAFT accomplishes this by citing verbatim", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 141, + 419, + 470, + 433 + ], + "spans": [ + { + "bbox": [ + 141, + 419, + 470, + 433 + ], + "score": 1.0, + "content": "the right sequence from the relevant document to help answer the question.", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 141, + 430, + 470, + 444 + ], + "spans": [ + { + "bbox": [ + 141, + 430, + 470, + 444 + ], + "score": 1.0, + "content": "This coupled with RAFT’s chain-of-thought-style response helps improve", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 141, + 441, + 469, + 455 + ], + "spans": [ + { + "bbox": [ + 141, + 441, + 469, + 455 + ], + "score": 1.0, + "content": "the model’s ability to reason. In domain specific RAG, RAFT consistently", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 141, + 453, + 470, + 467 + ], + "spans": [ + { + "bbox": [ + 141, + 453, + 470, + 467 + ], + "score": 1.0, + "content": "improves the model’s performance across PubMed, HotpotQA, and Gorilla", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 141, + 462, + 470, + 478 + ], + "spans": [ + { + "bbox": [ + 141, + 462, + 470, + 478 + ], + "score": 1.0, + "content": "datasets, presenting a post-training recipe to improve pre-trained LLMs to", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 142, + 475, + 217, + 487 + ], + "spans": [ + { + "bbox": [ + 142, + 475, + 217, + 487 + ], + "score": 1.0, + "content": "in-domain RAG.", + "type": "text" + } + ], + "index": 33 + } + ], + "index": 25, + "bbox_fs": [ + 140, + 299, + 471, + 487 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 508, + 194, + 522 + ], + "lines": [ + { + "bbox": [ + 105, + 508, + 196, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 508, + 196, + 524 + ], + "score": 1.0, + "content": "1 Introduction", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 34 + }, + { + "type": "text", + "bbox": [ + 107, + 534, + 505, + 645 + ], + "lines": [ + { + "bbox": [ + 105, + 533, + 506, + 549 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 506, + 549 + ], + "score": 1.0, + "content": "Trained on vast quantities of public data, Large Language Models LLMs have achieved", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 546, + 506, + 560 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 506, + 560 + ], + "score": 1.0, + "content": "significant advances in a wide range of general knowledge reasoning tasks Brown et al.", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 556, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 505, + 570 + ], + "score": 1.0, + "content": "(2020); Wei et al. (2022). However, increasingly LLMs are being employed in specialized", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 566, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 566, + 506, + 582 + ], + "score": 1.0, + "content": "domains to support tasks ranging from code completion for specific software frameworks", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 578, + 507, + 593 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 507, + 593 + ], + "score": 1.0, + "content": "to question answering on specific document collections (e.g., legal or medical documents).", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 590, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 590, + 506, + 604 + ], + "score": 1.0, + "content": "In these settings, general knowledge reasoning is less critical and instead the primary goal", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 104, + 600, + 506, + 614 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 506, + 614 + ], + "score": 1.0, + "content": "is to maximize accuracy based on a given set of documents. Indeed, adapting LLMs to the", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 610, + 506, + 626 + ], + "spans": [ + { + "bbox": [ + 105, + 610, + 506, + 626 + ], + "score": 1.0, + "content": "specialized domains (e.g., recent news, enterprise private documents, or program resources", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 622, + 507, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 622, + 507, + 637 + ], + "score": 1.0, + "content": "constructed after the training cutoff) is essential to many emerging applications (Vu et al.,", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 634, + 360, + 645 + ], + "spans": [ + { + "bbox": [ + 106, + 634, + 360, + 645 + ], + "score": 1.0, + "content": "2023; Lazaridou et al., 2022) and is the focus of this work.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 39.5, + "bbox_fs": [ + 104, + 533, + 507, + 645 + ] + }, + { + "type": "text", + "bbox": [ + 108, + 650, + 503, + 673 + ], + "lines": [ + { + "bbox": [ + 106, + 650, + 505, + 663 + ], + "spans": [ + { + "bbox": [ + 106, + 650, + 505, + 663 + ], + "score": 1.0, + "content": "This paper studies the following question – How do we adapt pre-trained LLMs for Retrieval", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 106, + 661, + 330, + 674 + ], + "spans": [ + { + "bbox": [ + 106, + 661, + 330, + 674 + ], + "score": 1.0, + "content": "Augmented Generation (RAG) in specialized domains?", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 45.5, + "bbox_fs": [ + 106, + 650, + 505, + 674 + ] + }, + { + "type": "text", + "bbox": [ + 108, + 678, + 505, + 712 + ], + "lines": [ + { + "bbox": [ + 105, + 678, + 505, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 505, + 692 + ], + "score": 1.0, + "content": "When it comes to adapting LLMs to specialized domains, we consider the following two", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 689, + 506, + 703 + ], + "spans": [ + { + "bbox": [ + 106, + 689, + 506, + 703 + ], + "score": 1.0, + "content": "candidates: in-context learning through Retrieval-Augmented Generation (RAG) and super-", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 700, + 505, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 505, + 713 + ], + "score": 1.0, + "content": "vised fine-tuning. RAG based methods allow the LLM to reference the documents when", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 104, + 260, + 505, + 276 + ], + "spans": [ + { + "bbox": [ + 104, + 260, + 505, + 276 + ], + "score": 1.0, + "content": "answering questions. However, RAG based in-context learning methods fail to leverage", + "type": "text", + "cross_page": true + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 271, + 506, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 506, + 286 + ], + "score": 1.0, + "content": "the learning opportunity afforded by the fixed domain setting and early access to the test", + "type": "text", + "cross_page": true + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 282, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 505, + 297 + ], + "score": 1.0, + "content": "documents. Alternatively, supervised fine-tuning offers the opportunity to learn more", + "type": "text", + "cross_page": true + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 293, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 506, + 308 + ], + "score": 1.0, + "content": "general patterns in the documents and better align to end tasks and user preferences Zhou", + "type": "text", + "cross_page": true + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 304, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 506, + 319 + ], + "score": 1.0, + "content": "et al. (2023). However, existing fine-tuning based approaches either fail to leverage the", + "type": "text", + "cross_page": true + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 315, + 506, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 506, + 330 + ], + "score": 1.0, + "content": "documents at test time (don’t incorporate RAG) or fail to account for the imperfections in", + "type": "text", + "cross_page": true + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 325, + 254, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 325, + 254, + 342 + ], + "score": 1.0, + "content": "retrieval process during training.", + "type": "text", + "cross_page": true + } + ], + "index": 16 + } + ], + "index": 48, + "bbox_fs": [ + 105, + 678, + 506, + 713 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 81, + 502, + 152 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 107, + 81, + 502, + 152 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 502, + 152 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 502, + 152 + ], + "score": 0.96, + "type": "image", + "image_path": "f0db1ef1b057bc1bc9295a8166a6d296edcd745858acff236281a68693676d87.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 107, + 81, + 502, + 104.66666666666667 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 107, + 104.66666666666667, + 502, + 128.33333333333334 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 107, + 128.33333333333334, + 502, + 152.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 161, + 505, + 240 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 160, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 105, + 160, + 506, + 176 + ], + "score": 1.0, + "content": "Figure 1: How best to prepare for an Exam?(a) Fine-tuning based approaches implement", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 172, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 172, + 506, + 186 + ], + "score": 1.0, + "content": "\"studying\" by either directly \"memorizing\" the input documents or answering practice", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 183, + 506, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 506, + 196 + ], + "score": 1.0, + "content": "QA without referencing the documents. (b) Alternatively, in-context retrieval methods fail", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 194, + 506, + 209 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 506, + 209 + ], + "score": 1.0, + "content": "to leverage the learning opportunity afforded by the fixed domain and are equivalent to", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 205, + 505, + 219 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 505, + 219 + ], + "score": 1.0, + "content": "taking an open-book exam without studying. In contrast, our approach (c) RAFT leverages", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 216, + 505, + 230 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 505, + 230 + ], + "score": 1.0, + "content": "fine-tuning with question-answer pairs while referencing the documents in a simulated", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 226, + 505, + 243 + ], + "spans": [ + { + "bbox": [ + 105, + 226, + 505, + 243 + ], + "score": 1.0, + "content": "imperfect retrieval setting β€” thereby effectively preparing for the open-book exam setting.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 6 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 107, + 261, + 505, + 339 + ], + "lines": [ + { + "bbox": [ + 104, + 260, + 505, + 276 + ], + "spans": [ + { + "bbox": [ + 104, + 260, + 505, + 276 + ], + "score": 1.0, + "content": "answering questions. However, RAG based in-context learning methods fail to leverage", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 271, + 506, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 506, + 286 + ], + "score": 1.0, + "content": "the learning opportunity afforded by the fixed domain setting and early access to the test", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 282, + 505, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 505, + 297 + ], + "score": 1.0, + "content": "documents. Alternatively, supervised fine-tuning offers the opportunity to learn more", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 293, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 506, + 308 + ], + "score": 1.0, + "content": "general patterns in the documents and better align to end tasks and user preferences Zhou", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 304, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 506, + 319 + ], + "score": 1.0, + "content": "et al. (2023). However, existing fine-tuning based approaches either fail to leverage the", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 315, + 506, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 506, + 330 + ], + "score": 1.0, + "content": "documents at test time (don’t incorporate RAG) or fail to account for the imperfections in", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 325, + 254, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 325, + 254, + 342 + ], + "score": 1.0, + "content": "retrieval process during training.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 13 + }, + { + "type": "text", + "bbox": [ + 107, + 344, + 505, + 411 + ], + "lines": [ + { + "bbox": [ + 106, + 344, + 505, + 357 + ], + "spans": [ + { + "bbox": [ + 106, + 344, + 505, + 357 + ], + "score": 1.0, + "content": "We can draw an analogy to an open-book exam. Existing in-context retrieval methods are", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 354, + 506, + 369 + ], + "spans": [ + { + "bbox": [ + 106, + 354, + 506, + 369 + ], + "score": 1.0, + "content": "equivalent to taking an open-book exam without studying. Alternatively, existing fine-", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 363, + 507, + 382 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 507, + 382 + ], + "score": 1.0, + "content": "tuning based approaches implement β€œstudying\" by either directly β€œmemorizing\" Xiong", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 376, + 505, + 390 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 505, + 390 + ], + "score": 1.0, + "content": "et al. (2023) the input documents or answering practice questions Wang et al. (2022) without", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 388, + 506, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 388, + 506, + 402 + ], + "score": 1.0, + "content": "referencing the documents. While these approaches leverage in-domain learning they fail to", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 399, + 335, + 413 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 335, + 413 + ], + "score": 1.0, + "content": "prepare for the open-book nature of the test setting.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 19.5 + }, + { + "type": "text", + "bbox": [ + 107, + 415, + 505, + 526 + ], + "lines": [ + { + "bbox": [ + 104, + 413, + 507, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 413, + 507, + 431 + ], + "score": 1.0, + "content": "In this paper, we study how to combine instruction fine-tuning (IFT) with retrieval aug-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 426, + 505, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 505, + 440 + ], + "score": 1.0, + "content": "mented generation (RAG). We propose a novel adaptation strategy – Retrieval-Augmented", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 437, + 506, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 506, + 451 + ], + "score": 1.0, + "content": "Fine Tuning (RAFT). RAFT specifically addresses the challenge of fine-tuning LLMs to both", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 447, + 507, + 463 + ], + "spans": [ + { + "bbox": [ + 104, + 447, + 507, + 463 + ], + "score": 1.0, + "content": "incorporate domain knowledge while also improving in-domain RAG performance. RAFT", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 457, + 507, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 507, + 475 + ], + "score": 1.0, + "content": "aims to not only enable models to learn domain-specific knowledge through fine-tuning,", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 470, + 506, + 484 + ], + "spans": [ + { + "bbox": [ + 105, + 470, + 506, + 484 + ], + "score": 1.0, + "content": "but also to ensure robustness against distracting retrieved information. This is achieved", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 481, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 506, + 495 + ], + "score": 1.0, + "content": "by training the models to understand the dynamics between the question (prompt), the", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 492, + 506, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 506, + 506 + ], + "score": 1.0, + "content": "domain-specific documents retrieved, and the right answer. Going back to our analogy to", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 503, + 505, + 518 + ], + "spans": [ + { + "bbox": [ + 105, + 503, + 505, + 518 + ], + "score": 1.0, + "content": "the open book exam, our approach is analogous to studying for an open-book exam by", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 515, + 363, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 363, + 527 + ], + "score": 1.0, + "content": "recognizing relevant, and irrelevant retrieved documents.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 27.5 + }, + { + "type": "text", + "bbox": [ + 106, + 531, + 506, + 631 + ], + "lines": [ + { + "bbox": [ + 105, + 530, + 506, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 434, + 545 + ], + "score": 1.0, + "content": "In RAFT, we train the model to answer the question (Q) from Document(s)", + "type": "text" + }, + { + "bbox": [ + 434, + 532, + 453, + 542 + ], + "score": 0.79, + "content": "( \\mathrm { D ^ { * } } )", + "type": "inline_equation" + }, + { + "bbox": [ + 453, + 530, + 506, + 545 + ], + "score": 1.0, + "content": "to generate", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 541, + 505, + 555 + ], + "spans": [ + { + "bbox": [ + 105, + 541, + 142, + 555 + ], + "score": 1.0, + "content": "answer", + "type": "text" + }, + { + "bbox": [ + 142, + 542, + 162, + 554 + ], + "score": 0.8, + "content": "( \\mathrm { A } ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 163, + 541, + 197, + 555 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 197, + 542, + 211, + 553 + ], + "score": 0.8, + "content": "\\mathsf { A } ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 541, + 505, + 555 + ], + "score": 1.0, + "content": "includes chain-of-thought reasoning Wei et al. (2022); Anthropic", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 552, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 338, + 567 + ], + "score": 1.0, + "content": "(2023), and in the presence of distractor documents", + "type": "text" + }, + { + "bbox": [ + 338, + 554, + 357, + 565 + ], + "score": 0.88, + "content": "( D _ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 357, + 552, + 506, + 567 + ], + "score": 1.0, + "content": ". We explain the methodology in", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 104, + 562, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 562, + 435, + 578 + ], + "score": 1.0, + "content": "Section 3 and analyze the sensitivity to the number of distractor documents", + "type": "text" + }, + { + "bbox": [ + 436, + 564, + 448, + 576 + ], + "score": 0.64, + "content": "( k )", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 562, + 506, + 578 + ], + "score": 1.0, + "content": "at train- and", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 574, + 507, + 589 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 507, + 589 + ], + "score": 1.0, + "content": "test- time in Section 5. RAFT consistently outperforms Supervised-finetuning both with-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 585, + 507, + 600 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 507, + 600 + ], + "score": 1.0, + "content": "and without- RAG across PubMed Dernoncourt & Lee (2017), HotPot QA Yang et al. (2018),", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 596, + 507, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 507, + 611 + ], + "score": 1.0, + "content": "and HuggingFace Hub, Torch Hub, and Tensorflow Hub Gorilla datasets Patil et al. (2023),", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 104, + 608, + 507, + 621 + ], + "spans": [ + { + "bbox": [ + 104, + 608, + 507, + 621 + ], + "score": 1.0, + "content": "presenting a novel, yet simple technique to improve pre-trained LLMs for in-domain RAG.", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 618, + 421, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 618, + 421, + 632 + ], + "score": 1.0, + "content": "Our code is available at https://github.com/ShishirPatil/gorilla.", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 37 + }, + { + "type": "title", + "bbox": [ + 108, + 647, + 275, + 661 + ], + "lines": [ + { + "bbox": [ + 104, + 645, + 276, + 664 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 276, + 664 + ], + "score": 1.0, + "content": "2 LLMs for Open-Book Exam", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 42 + }, + { + "type": "text", + "bbox": [ + 107, + 673, + 504, + 696 + ], + "lines": [ + { + "bbox": [ + 105, + 672, + 505, + 687 + ], + "spans": [ + { + "bbox": [ + 105, + 672, + 505, + 687 + ], + "score": 1.0, + "content": "To understand our goal better, we expand on our analogy between training an LLM with", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 684, + 327, + 698 + ], + "spans": [ + { + "bbox": [ + 106, + 684, + 327, + 698 + ], + "score": 1.0, + "content": "the real-world setting of prepararing for an exam.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 43.5 + }, + { + "type": "text", + "bbox": [ + 107, + 709, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "Closed-Book Exam A closed book exam often refers to the scenario where the LLMs do", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 718, + 506, + 735 + ], + "spans": [ + { + "bbox": [ + 105, + 718, + 506, + 735 + ], + "score": 1.0, + "content": "not have access to any additional documents or references to answer the questions during", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 45.5 + } + ], + "page_idx": 1, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 27, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 302, + 750, + 310, + 763 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 310, + 763 + ], + "score": 1.0, + "content": "2", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 81, + 502, + 152 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 107, + 81, + 502, + 152 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 502, + 152 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 502, + 152 + ], + "score": 0.96, + "type": "image", + "image_path": "f0db1ef1b057bc1bc9295a8166a6d296edcd745858acff236281a68693676d87.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 107, + 81, + 502, + 104.66666666666667 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 107, + 104.66666666666667, + 502, + 128.33333333333334 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 107, + 128.33333333333334, + 502, + 152.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 161, + 505, + 240 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 160, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 105, + 160, + 506, + 176 + ], + "score": 1.0, + "content": "Figure 1: How best to prepare for an Exam?(a) Fine-tuning based approaches implement", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 172, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 172, + 506, + 186 + ], + "score": 1.0, + "content": "\"studying\" by either directly \"memorizing\" the input documents or answering practice", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 183, + 506, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 183, + 506, + 196 + ], + "score": 1.0, + "content": "QA without referencing the documents. (b) Alternatively, in-context retrieval methods fail", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 194, + 506, + 209 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 506, + 209 + ], + "score": 1.0, + "content": "to leverage the learning opportunity afforded by the fixed domain and are equivalent to", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 205, + 505, + 219 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 505, + 219 + ], + "score": 1.0, + "content": "taking an open-book exam without studying. In contrast, our approach (c) RAFT leverages", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 216, + 505, + 230 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 505, + 230 + ], + "score": 1.0, + "content": "fine-tuning with question-answer pairs while referencing the documents in a simulated", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 226, + 505, + 243 + ], + "spans": [ + { + "bbox": [ + 105, + 226, + 505, + 243 + ], + "score": 1.0, + "content": "imperfect retrieval setting β€” thereby effectively preparing for the open-book exam setting.", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 6 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 107, + 261, + 505, + 339 + ], + "lines": [], + "index": 13, + "bbox_fs": [ + 104, + 260, + 506, + 342 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 107, + 344, + 505, + 411 + ], + "lines": [ + { + "bbox": [ + 106, + 344, + 505, + 357 + ], + "spans": [ + { + "bbox": [ + 106, + 344, + 505, + 357 + ], + "score": 1.0, + "content": "We can draw an analogy to an open-book exam. Existing in-context retrieval methods are", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 354, + 506, + 369 + ], + "spans": [ + { + "bbox": [ + 106, + 354, + 506, + 369 + ], + "score": 1.0, + "content": "equivalent to taking an open-book exam without studying. Alternatively, existing fine-", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 363, + 507, + 382 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 507, + 382 + ], + "score": 1.0, + "content": "tuning based approaches implement β€œstudying\" by either directly β€œmemorizing\" Xiong", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 376, + 505, + 390 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 505, + 390 + ], + "score": 1.0, + "content": "et al. (2023) the input documents or answering practice questions Wang et al. (2022) without", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 388, + 506, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 388, + 506, + 402 + ], + "score": 1.0, + "content": "referencing the documents. While these approaches leverage in-domain learning they fail to", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 399, + 335, + 413 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 335, + 413 + ], + "score": 1.0, + "content": "prepare for the open-book nature of the test setting.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 19.5, + "bbox_fs": [ + 104, + 344, + 507, + 413 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 415, + 505, + 526 + ], + "lines": [ + { + "bbox": [ + 104, + 413, + 507, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 413, + 507, + 431 + ], + "score": 1.0, + "content": "In this paper, we study how to combine instruction fine-tuning (IFT) with retrieval aug-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 426, + 505, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 505, + 440 + ], + "score": 1.0, + "content": "mented generation (RAG). We propose a novel adaptation strategy – Retrieval-Augmented", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 437, + 506, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 506, + 451 + ], + "score": 1.0, + "content": "Fine Tuning (RAFT). RAFT specifically addresses the challenge of fine-tuning LLMs to both", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 447, + 507, + 463 + ], + "spans": [ + { + "bbox": [ + 104, + 447, + 507, + 463 + ], + "score": 1.0, + "content": "incorporate domain knowledge while also improving in-domain RAG performance. RAFT", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 457, + 507, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 507, + 475 + ], + "score": 1.0, + "content": "aims to not only enable models to learn domain-specific knowledge through fine-tuning,", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 470, + 506, + 484 + ], + "spans": [ + { + "bbox": [ + 105, + 470, + 506, + 484 + ], + "score": 1.0, + "content": "but also to ensure robustness against distracting retrieved information. This is achieved", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 481, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 506, + 495 + ], + "score": 1.0, + "content": "by training the models to understand the dynamics between the question (prompt), the", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 492, + 506, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 506, + 506 + ], + "score": 1.0, + "content": "domain-specific documents retrieved, and the right answer. Going back to our analogy to", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 503, + 505, + 518 + ], + "spans": [ + { + "bbox": [ + 105, + 503, + 505, + 518 + ], + "score": 1.0, + "content": "the open book exam, our approach is analogous to studying for an open-book exam by", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 515, + 363, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 363, + 527 + ], + "score": 1.0, + "content": "recognizing relevant, and irrelevant retrieved documents.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 27.5, + "bbox_fs": [ + 104, + 413, + 507, + 527 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 531, + 506, + 631 + ], + "lines": [ + { + "bbox": [ + 105, + 530, + 506, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 434, + 545 + ], + "score": 1.0, + "content": "In RAFT, we train the model to answer the question (Q) from Document(s)", + "type": "text" + }, + { + "bbox": [ + 434, + 532, + 453, + 542 + ], + "score": 0.79, + "content": "( \\mathrm { D ^ { * } } )", + "type": "inline_equation" + }, + { + "bbox": [ + 453, + 530, + 506, + 545 + ], + "score": 1.0, + "content": "to generate", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 541, + 505, + 555 + ], + "spans": [ + { + "bbox": [ + 105, + 541, + 142, + 555 + ], + "score": 1.0, + "content": "answer", + "type": "text" + }, + { + "bbox": [ + 142, + 542, + 162, + 554 + ], + "score": 0.8, + "content": "( \\mathrm { A } ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 163, + 541, + 197, + 555 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 197, + 542, + 211, + 553 + ], + "score": 0.8, + "content": "\\mathsf { A } ^ { * }", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 541, + 505, + 555 + ], + "score": 1.0, + "content": "includes chain-of-thought reasoning Wei et al. (2022); Anthropic", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 552, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 338, + 567 + ], + "score": 1.0, + "content": "(2023), and in the presence of distractor documents", + "type": "text" + }, + { + "bbox": [ + 338, + 554, + 357, + 565 + ], + "score": 0.88, + "content": "( D _ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 357, + 552, + 506, + 567 + ], + "score": 1.0, + "content": ". We explain the methodology in", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 104, + 562, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 562, + 435, + 578 + ], + "score": 1.0, + "content": "Section 3 and analyze the sensitivity to the number of distractor documents", + "type": "text" + }, + { + "bbox": [ + 436, + 564, + 448, + 576 + ], + "score": 0.64, + "content": "( k )", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 562, + 506, + 578 + ], + "score": 1.0, + "content": "at train- and", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 574, + 507, + 589 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 507, + 589 + ], + "score": 1.0, + "content": "test- time in Section 5. RAFT consistently outperforms Supervised-finetuning both with-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 585, + 507, + 600 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 507, + 600 + ], + "score": 1.0, + "content": "and without- RAG across PubMed Dernoncourt & Lee (2017), HotPot QA Yang et al. (2018),", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 596, + 507, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 507, + 611 + ], + "score": 1.0, + "content": "and HuggingFace Hub, Torch Hub, and Tensorflow Hub Gorilla datasets Patil et al. (2023),", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 104, + 608, + 507, + 621 + ], + "spans": [ + { + "bbox": [ + 104, + 608, + 507, + 621 + ], + "score": 1.0, + "content": "presenting a novel, yet simple technique to improve pre-trained LLMs for in-domain RAG.", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 618, + 421, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 618, + 421, + 632 + ], + "score": 1.0, + "content": "Our code is available at https://github.com/ShishirPatil/gorilla.", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 37, + "bbox_fs": [ + 104, + 530, + 507, + 632 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 647, + 275, + 661 + ], + "lines": [ + { + "bbox": [ + 104, + 645, + 276, + 664 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 276, + 664 + ], + "score": 1.0, + "content": "2 LLMs for Open-Book Exam", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 42 + }, + { + "type": "text", + "bbox": [ + 107, + 673, + 504, + 696 + ], + "lines": [ + { + "bbox": [ + 105, + 672, + 505, + 687 + ], + "spans": [ + { + "bbox": [ + 105, + 672, + 505, + 687 + ], + "score": 1.0, + "content": "To understand our goal better, we expand on our analogy between training an LLM with", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 684, + 327, + 698 + ], + "spans": [ + { + "bbox": [ + 106, + 684, + 327, + 698 + ], + "score": 1.0, + "content": "the real-world setting of prepararing for an exam.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 43.5, + "bbox_fs": [ + 105, + 672, + 505, + 698 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 709, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "Closed-Book Exam A closed book exam often refers to the scenario where the LLMs do", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 718, + 506, + 735 + ], + "spans": [ + { + "bbox": [ + 105, + 718, + 506, + 735 + ], + "score": 1.0, + "content": "not have access to any additional documents or references to answer the questions during", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 106, + 284, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 106, + 284, + 506, + 298 + ], + "score": 1.0, + "content": "the exam. For LLMs, this is equivalent to the scenario, for example, in which the LLM is", + "type": "text", + "cross_page": true + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 294, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 105, + 294, + 505, + 309 + ], + "score": 1.0, + "content": "used as a chatbot. In this scenario the LLM draws from the knowledge baked in during", + "type": "text", + "cross_page": true + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 307, + 425, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 425, + 320 + ], + "score": 1.0, + "content": "pre-training and supervised-finetuning to respond to the users’ prompt.", + "type": "text", + "cross_page": true + } + ], + "index": 10 + } + ], + "index": 45.5, + "bbox_fs": [ + 105, + 709, + 506, + 735 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 146, + 81, + 465, + 198 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 146, + 81, + 465, + 198 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 146, + 81, + 465, + 198 + ], + "spans": [ + { + "bbox": [ + 146, + 81, + 465, + 198 + ], + "score": 0.966, + "type": "image", + "image_path": "d54b49279897f74e204c908fc173727448ff6a0c168d92f2063727d1a38456d9.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 146, + 81, + 465, + 120.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 146, + 120.0, + 465, + 159.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 146, + 159.0, + 465, + 198.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 206, + 506, + 262 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 205, + 506, + 219 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 506, + 219 + ], + "score": 1.0, + "content": "Figure 2: Overview of our RAFT method. The top-left figure depicts our approach of", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 217, + 506, + 231 + ], + "spans": [ + { + "bbox": [ + 106, + 217, + 506, + 231 + ], + "score": 1.0, + "content": "adapting LLMs to reading solution from a set of positive and distractor documents in", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 227, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 105, + 227, + 506, + 242 + ], + "score": 1.0, + "content": "contrast to standard RAG setup where models are trained based on the retriever outputs,", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 239, + 505, + 251 + ], + "spans": [ + { + "bbox": [ + 106, + 239, + 505, + 251 + ], + "score": 1.0, + "content": "which is a mixture of both memorization and reading. At test time, all methods follow the", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 249, + 465, + 264 + ], + "spans": [ + { + "bbox": [ + 105, + 249, + 465, + 264 + ], + "score": 1.0, + "content": "standard RAG setting, provided with a top-k retrieved documents in the context.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 5 + } + ], + "index": 3.0 + }, + { + "type": "text", + "bbox": [ + 108, + 284, + 505, + 318 + ], + "lines": [ + { + "bbox": [ + 106, + 284, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 106, + 284, + 506, + 298 + ], + "score": 1.0, + "content": "the exam. For LLMs, this is equivalent to the scenario, for example, in which the LLM is", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 294, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 105, + 294, + 505, + 309 + ], + "score": 1.0, + "content": "used as a chatbot. In this scenario the LLM draws from the knowledge baked in during", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 307, + 425, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 425, + 320 + ], + "score": 1.0, + "content": "pre-training and supervised-finetuning to respond to the users’ prompt.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 9 + }, + { + "type": "text", + "bbox": [ + 107, + 331, + 505, + 420 + ], + "lines": [ + { + "bbox": [ + 106, + 331, + 505, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 331, + 505, + 345 + ], + "score": 1.0, + "content": "Open Book Exam In contrast, we liken the open-book exam setting to the scenario in", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 342, + 506, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 506, + 355 + ], + "score": 1.0, + "content": "which the LLM can refer to external sources of information (e.g., a website or a book chapter).", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 353, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 353, + 441, + 367 + ], + "score": 1.0, + "content": "In such scenarios, typically, the LLM is paired with retriever which retrieves", + "type": "text" + }, + { + "bbox": [ + 441, + 354, + 453, + 364 + ], + "score": 0.45, + "content": "^ { \\prime } \\mathbf { k } ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 454, + 353, + 506, + 367 + ], + "score": 1.0, + "content": "documents", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 365, + 506, + 378 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 506, + 378 + ], + "score": 1.0, + "content": "(or specific segments of the document) which are appended to the users’ prompt. It is", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 375, + 506, + 389 + ], + "spans": [ + { + "bbox": [ + 106, + 375, + 506, + 389 + ], + "score": 1.0, + "content": "only through these documents retrieved that the LLM gains access to β€œdomain-specific", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 385, + 506, + 400 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 506, + 400 + ], + "score": 1.0, + "content": "information”. As a result, we argue that the LLM’s performance in these settings, where it", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 397, + 506, + 411 + ], + "spans": [ + { + "bbox": [ + 105, + 397, + 506, + 411 + ], + "score": 1.0, + "content": "is trained as a general-purpose LLM is largely dependent on the quality of the retriever and", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 409, + 460, + 421 + ], + "spans": [ + { + "bbox": [ + 105, + 409, + 460, + 421 + ], + "score": 1.0, + "content": "how accurately the retriever can identify the most relevant piece of information.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 14.5 + }, + { + "type": "text", + "bbox": [ + 107, + 433, + 505, + 555 + ], + "lines": [ + { + "bbox": [ + 106, + 433, + 506, + 447 + ], + "spans": [ + { + "bbox": [ + 106, + 433, + 506, + 447 + ], + "score": 1.0, + "content": "Domain-Specific Open-Book Exam In this paper, we focus on the narrower but increas-", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 444, + 506, + 458 + ], + "spans": [ + { + "bbox": [ + 105, + 444, + 506, + 458 + ], + "score": 1.0, + "content": "ingly popular domain than the general open book exam, which we call the domain-specific", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 456, + 506, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 506, + 468 + ], + "score": 1.0, + "content": "open-book exam. Here, we know apriori the domain in which the LLM will be tested. The", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 466, + 506, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 506, + 481 + ], + "score": 1.0, + "content": "LLM can respond to the users’ prompt using use any and all information from this specific", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 477, + 505, + 491 + ], + "spans": [ + { + "bbox": [ + 106, + 477, + 505, + 491 + ], + "score": 1.0, + "content": "domain, which it has been fine-tuned on. Examples of domain specific examples include", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 488, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 506, + 502 + ], + "score": 1.0, + "content": "enterprise documents, code repositories belonging to an organization, etc. In all these", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 499, + 505, + 513 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 505, + 513 + ], + "score": 1.0, + "content": "scenarios, the LLM will be used to respond to the questions, whose answers can be found", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 510, + 505, + 523 + ], + "spans": [ + { + "bbox": [ + 106, + 510, + 505, + 523 + ], + "score": 1.0, + "content": "within a collection of documents. The retrieval technique itself has little to no-impact on the", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 521, + 506, + 535 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 506, + 535 + ], + "score": 1.0, + "content": "mechanism (though it may impact the accuracy). This paper studies the domain-specific", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 532, + 506, + 546 + ], + "spans": [ + { + "bbox": [ + 106, + 532, + 506, + 546 + ], + "score": 1.0, + "content": "open-book setting and how to adapt a pretrained LLM to this specific domain, including", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 543, + 501, + 557 + ], + "spans": [ + { + "bbox": [ + 105, + 543, + 501, + 557 + ], + "score": 1.0, + "content": "how to make it more robust to a varying number of retrieved documents and distractors.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 24 + }, + { + "type": "title", + "bbox": [ + 107, + 572, + 158, + 585 + ], + "lines": [ + { + "bbox": [ + 104, + 570, + 160, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 570, + 160, + 589 + ], + "score": 1.0, + "content": "3 RAFT", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 30 + }, + { + "type": "text", + "bbox": [ + 107, + 598, + 505, + 654 + ], + "lines": [ + { + "bbox": [ + 105, + 598, + 507, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 598, + 507, + 612 + ], + "score": 1.0, + "content": "In this section, we present RAFT, a novel way of training LLMs for domain-specific open-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 609, + 505, + 623 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 505, + 623 + ], + "score": 1.0, + "content": "book exams. We first introduce the classical technique of supervised fine-tuning, followed", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 621, + 505, + 634 + ], + "spans": [ + { + "bbox": [ + 105, + 621, + 505, + 634 + ], + "score": 1.0, + "content": "with the key takeaways from our experiments. Then, we introduce RAFT , a modified", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 631, + 506, + 646 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 506, + 646 + ], + "score": 1.0, + "content": "version of general instruction tuning. Lastly, we provide an overview of the experiments to", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 643, + 228, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 643, + 228, + 656 + ], + "score": 1.0, + "content": "expect in the later sections.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 33 + }, + { + "type": "title", + "bbox": [ + 107, + 659, + 213, + 672 + ], + "lines": [ + { + "bbox": [ + 105, + 656, + 215, + 676 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 215, + 676 + ], + "score": 1.0, + "content": "Supervised Finetuning", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 36 + }, + { + "type": "text", + "bbox": [ + 107, + 676, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 676, + 506, + 689 + ], + "spans": [ + { + "bbox": [ + 106, + 676, + 506, + 689 + ], + "score": 1.0, + "content": "Consider the supervised fine-tuning (SFT) setting for a Question-Answer dataset. The", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 685, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 259, + 702 + ], + "score": 1.0, + "content": "formulation consists of the Dataset", + "type": "text" + }, + { + "bbox": [ + 259, + 688, + 275, + 699 + ], + "score": 0.44, + "content": "( \\bar { D } )", + "type": "inline_equation" + }, + { + "bbox": [ + 275, + 685, + 506, + 702 + ], + "score": 1.0, + "content": "from which a set of Question (Q) and corresponding", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 141, + 711 + ], + "score": 1.0, + "content": "answer", + "type": "text" + }, + { + "bbox": [ + 141, + 699, + 157, + 710 + ], + "score": 0.49, + "content": "( A )", + "type": "inline_equation" + }, + { + "bbox": [ + 157, + 698, + 505, + 711 + ], + "score": 1.0, + "content": "pairs are derived or already available. In the classical SFT setting, the model is", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 710, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 710, + 505, + 722 + ], + "score": 1.0, + "content": "trained to improve it’s ability to answer the questions based on it’s knowledge - obtained", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 719, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 506, + 734 + ], + "score": 1.0, + "content": "either during pre-training, or during the SFT training phase. The model so trained can also", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 39 + } + ], + "page_idx": 2, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 27, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 309, + 760 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "score": 1.0, + "content": "3", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 146, + 81, + 465, + 198 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 146, + 81, + 465, + 198 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 146, + 81, + 465, + 198 + ], + "spans": [ + { + "bbox": [ + 146, + 81, + 465, + 198 + ], + "score": 0.966, + "type": "image", + "image_path": "d54b49279897f74e204c908fc173727448ff6a0c168d92f2063727d1a38456d9.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 146, + 81, + 465, + 120.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 146, + 120.0, + 465, + 159.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 146, + 159.0, + 465, + 198.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 206, + 506, + 262 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 205, + 506, + 219 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 506, + 219 + ], + "score": 1.0, + "content": "Figure 2: Overview of our RAFT method. The top-left figure depicts our approach of", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 217, + 506, + 231 + ], + "spans": [ + { + "bbox": [ + 106, + 217, + 506, + 231 + ], + "score": 1.0, + "content": "adapting LLMs to reading solution from a set of positive and distractor documents in", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 227, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 105, + 227, + 506, + 242 + ], + "score": 1.0, + "content": "contrast to standard RAG setup where models are trained based on the retriever outputs,", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 239, + 505, + 251 + ], + "spans": [ + { + "bbox": [ + 106, + 239, + 505, + 251 + ], + "score": 1.0, + "content": "which is a mixture of both memorization and reading. At test time, all methods follow the", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 249, + 465, + 264 + ], + "spans": [ + { + "bbox": [ + 105, + 249, + 465, + 264 + ], + "score": 1.0, + "content": "standard RAG setting, provided with a top-k retrieved documents in the context.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 5 + } + ], + "index": 3.0 + }, + { + "type": "text", + "bbox": [ + 108, + 284, + 505, + 318 + ], + "lines": [], + "index": 9, + "bbox_fs": [ + 105, + 284, + 506, + 320 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 107, + 331, + 505, + 420 + ], + "lines": [ + { + "bbox": [ + 106, + 331, + 505, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 331, + 505, + 345 + ], + "score": 1.0, + "content": "Open Book Exam In contrast, we liken the open-book exam setting to the scenario in", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 342, + 506, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 506, + 355 + ], + "score": 1.0, + "content": "which the LLM can refer to external sources of information (e.g., a website or a book chapter).", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 353, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 353, + 441, + 367 + ], + "score": 1.0, + "content": "In such scenarios, typically, the LLM is paired with retriever which retrieves", + "type": "text" + }, + { + "bbox": [ + 441, + 354, + 453, + 364 + ], + "score": 0.45, + "content": "^ { \\prime } \\mathbf { k } ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 454, + 353, + 506, + 367 + ], + "score": 1.0, + "content": "documents", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 365, + 506, + 378 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 506, + 378 + ], + "score": 1.0, + "content": "(or specific segments of the document) which are appended to the users’ prompt. It is", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 375, + 506, + 389 + ], + "spans": [ + { + "bbox": [ + 106, + 375, + 506, + 389 + ], + "score": 1.0, + "content": "only through these documents retrieved that the LLM gains access to β€œdomain-specific", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 385, + 506, + 400 + ], + "spans": [ + { + "bbox": [ + 105, + 385, + 506, + 400 + ], + "score": 1.0, + "content": "information”. As a result, we argue that the LLM’s performance in these settings, where it", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 397, + 506, + 411 + ], + "spans": [ + { + "bbox": [ + 105, + 397, + 506, + 411 + ], + "score": 1.0, + "content": "is trained as a general-purpose LLM is largely dependent on the quality of the retriever and", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 409, + 460, + 421 + ], + "spans": [ + { + "bbox": [ + 105, + 409, + 460, + 421 + ], + "score": 1.0, + "content": "how accurately the retriever can identify the most relevant piece of information.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 14.5, + "bbox_fs": [ + 105, + 331, + 506, + 421 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 433, + 505, + 555 + ], + "lines": [ + { + "bbox": [ + 106, + 433, + 506, + 447 + ], + "spans": [ + { + "bbox": [ + 106, + 433, + 506, + 447 + ], + "score": 1.0, + "content": "Domain-Specific Open-Book Exam In this paper, we focus on the narrower but increas-", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 444, + 506, + 458 + ], + "spans": [ + { + "bbox": [ + 105, + 444, + 506, + 458 + ], + "score": 1.0, + "content": "ingly popular domain than the general open book exam, which we call the domain-specific", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 456, + 506, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 506, + 468 + ], + "score": 1.0, + "content": "open-book exam. Here, we know apriori the domain in which the LLM will be tested. The", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 466, + 506, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 506, + 481 + ], + "score": 1.0, + "content": "LLM can respond to the users’ prompt using use any and all information from this specific", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 477, + 505, + 491 + ], + "spans": [ + { + "bbox": [ + 106, + 477, + 505, + 491 + ], + "score": 1.0, + "content": "domain, which it has been fine-tuned on. Examples of domain specific examples include", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 488, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 506, + 502 + ], + "score": 1.0, + "content": "enterprise documents, code repositories belonging to an organization, etc. In all these", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 499, + 505, + 513 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 505, + 513 + ], + "score": 1.0, + "content": "scenarios, the LLM will be used to respond to the questions, whose answers can be found", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 510, + 505, + 523 + ], + "spans": [ + { + "bbox": [ + 106, + 510, + 505, + 523 + ], + "score": 1.0, + "content": "within a collection of documents. The retrieval technique itself has little to no-impact on the", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 521, + 506, + 535 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 506, + 535 + ], + "score": 1.0, + "content": "mechanism (though it may impact the accuracy). This paper studies the domain-specific", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 532, + 506, + 546 + ], + "spans": [ + { + "bbox": [ + 106, + 532, + 506, + 546 + ], + "score": 1.0, + "content": "open-book setting and how to adapt a pretrained LLM to this specific domain, including", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 543, + 501, + 557 + ], + "spans": [ + { + "bbox": [ + 105, + 543, + 501, + 557 + ], + "score": 1.0, + "content": "how to make it more robust to a varying number of retrieved documents and distractors.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 24, + "bbox_fs": [ + 105, + 433, + 506, + 557 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 572, + 158, + 585 + ], + "lines": [ + { + "bbox": [ + 104, + 570, + 160, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 570, + 160, + 589 + ], + "score": 1.0, + "content": "3 RAFT", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 30 + }, + { + "type": "text", + "bbox": [ + 107, + 598, + 505, + 654 + ], + "lines": [ + { + "bbox": [ + 105, + 598, + 507, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 598, + 507, + 612 + ], + "score": 1.0, + "content": "In this section, we present RAFT, a novel way of training LLMs for domain-specific open-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 609, + 505, + 623 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 505, + 623 + ], + "score": 1.0, + "content": "book exams. We first introduce the classical technique of supervised fine-tuning, followed", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 621, + 505, + 634 + ], + "spans": [ + { + "bbox": [ + 105, + 621, + 505, + 634 + ], + "score": 1.0, + "content": "with the key takeaways from our experiments. Then, we introduce RAFT , a modified", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 631, + 506, + 646 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 506, + 646 + ], + "score": 1.0, + "content": "version of general instruction tuning. Lastly, we provide an overview of the experiments to", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 643, + 228, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 643, + 228, + 656 + ], + "score": 1.0, + "content": "expect in the later sections.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 33, + "bbox_fs": [ + 105, + 598, + 507, + 656 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 659, + 213, + 672 + ], + "lines": [ + { + "bbox": [ + 105, + 656, + 215, + 676 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 215, + 676 + ], + "score": 1.0, + "content": "Supervised Finetuning", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 36 + }, + { + "type": "text", + "bbox": [ + 107, + 676, + 504, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 676, + 506, + 689 + ], + "spans": [ + { + "bbox": [ + 106, + 676, + 506, + 689 + ], + "score": 1.0, + "content": "Consider the supervised fine-tuning (SFT) setting for a Question-Answer dataset. The", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 685, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 685, + 259, + 702 + ], + "score": 1.0, + "content": "formulation consists of the Dataset", + "type": "text" + }, + { + "bbox": [ + 259, + 688, + 275, + 699 + ], + "score": 0.44, + "content": "( \\bar { D } )", + "type": "inline_equation" + }, + { + "bbox": [ + 275, + 685, + 506, + 702 + ], + "score": 1.0, + "content": "from which a set of Question (Q) and corresponding", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 698, + 505, + 711 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 141, + 711 + ], + "score": 1.0, + "content": "answer", + "type": "text" + }, + { + "bbox": [ + 141, + 699, + 157, + 710 + ], + "score": 0.49, + "content": "( A )", + "type": "inline_equation" + }, + { + "bbox": [ + 157, + 698, + 505, + 711 + ], + "score": 1.0, + "content": "pairs are derived or already available. In the classical SFT setting, the model is", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 710, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 710, + 505, + 722 + ], + "score": 1.0, + "content": "trained to improve it’s ability to answer the questions based on it’s knowledge - obtained", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 719, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 506, + 734 + ], + "score": 1.0, + "content": "either during pre-training, or during the SFT training phase. The model so trained can also", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 39, + "bbox_fs": [ + 105, + 676, + 506, + 734 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 107, + 505, + 153 + ], + "lines": [ + { + "bbox": [ + 105, + 107, + 507, + 121 + ], + "spans": [ + { + "bbox": [ + 105, + 107, + 507, + 121 + ], + "score": 1.0, + "content": "Figure 3: RAFT prompt to help LLM evaluate its own generated reasoning and answers,", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 119, + 506, + 132 + ], + "spans": [ + { + "bbox": [ + 106, + 119, + 506, + 132 + ], + "score": 1.0, + "content": "contrasting them with the correct reasoning and answers. The LLM is prompted to identify", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 130, + 505, + 143 + ], + "spans": [ + { + "bbox": [ + 105, + 130, + 505, + 143 + ], + "score": 1.0, + "content": "errors in its reasoning and extract key insights for improvement. This figure specifically", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 141, + 445, + 154 + ], + "spans": [ + { + "bbox": [ + 105, + 141, + 445, + 154 + ], + "score": 1.0, + "content": "represents the β€˜GenerateExplanationβ€˜ step in the RAFT algorithm (Section 3).", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 1.5 + }, + { + "type": "text", + "bbox": [ + 107, + 222, + 505, + 257 + ], + "lines": [ + { + "bbox": [ + 105, + 222, + 505, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 505, + 236 + ], + "score": 1.0, + "content": "be used at test-time with Retrieval Augmented Generation (RAG) setting, where additional", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 233, + 505, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 505, + 248 + ], + "score": 1.0, + "content": "documents can be introduced in the prompt to help the model answer the question. This", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 245, + 241, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 241, + 258 + ], + "score": 1.0, + "content": "can be represented as follows:", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 106, + 261, + 426, + 274 + ], + "lines": [ + { + "bbox": [ + 104, + 260, + 426, + 276 + ], + "spans": [ + { + "bbox": [ + 104, + 260, + 136, + 276 + ], + "score": 1.0, + "content": "{Train:", + "type": "text" + }, + { + "bbox": [ + 137, + 262, + 171, + 274 + ], + "score": 0.85, + "content": "\\mathbf Q \\to \\mathbf A _ { \\mathrm { j } } ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 172, + 260, + 255, + 276 + ], + "score": 1.0, + "content": ", {0-shot Inference:", + "type": "text" + }, + { + "bbox": [ + 255, + 262, + 291, + 274 + ], + "score": 0.86, + "content": "\\mathbf Q \\to \\mathbf A \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 292, + 260, + 370, + 276 + ], + "score": 1.0, + "content": ", {RAG Inference:", + "type": "text" + }, + { + "bbox": [ + 370, + 262, + 426, + 274 + ], + "score": 0.85, + "content": "\\mathbf { Q } + \\mathbf { D } \\mathbf { A } \\}", + "type": "inline_equation" + } + ], + "index": 7 + } + ], + "index": 7 + }, + { + "type": "text", + "bbox": [ + 107, + 278, + 506, + 445 + ], + "lines": [ + { + "bbox": [ + 105, + 278, + 507, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 278, + 507, + 293 + ], + "score": 1.0, + "content": "RAFT: Retrieval Augmented Fine-Tuning (RAFT), presents a novel recipe to prepare fine-", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 289, + 507, + 304 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 507, + 304 + ], + "score": 1.0, + "content": "tuning data to tailor the models for domain-specific open-book setting, equivalent to in-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 299, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 299, + 506, + 315 + ], + "score": 1.0, + "content": "domain RAG In RAFT, we prepare the training data such that each data point contains a", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 312, + 506, + 325 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 147, + 325 + ], + "score": 1.0, + "content": "question", + "type": "text" + }, + { + "bbox": [ + 147, + 312, + 163, + 324 + ], + "score": 0.48, + "content": "( Q )", + "type": "inline_equation" + }, + { + "bbox": [ + 163, + 312, + 252, + 325 + ], + "score": 1.0, + "content": ", a set of documents", + "type": "text" + }, + { + "bbox": [ + 253, + 312, + 272, + 324 + ], + "score": 0.87, + "content": "( D _ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 272, + 312, + 506, + 325 + ], + "score": 1.0, + "content": ", and a corresponding Chain-of-though style answer", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 321, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 106, + 323, + 127, + 335 + ], + "score": 0.86, + "content": "( \\hat { \\boldsymbol { A } } ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 127, + 321, + 300, + 336 + ], + "score": 1.0, + "content": "generated from one of the document", + "type": "text" + }, + { + "bbox": [ + 301, + 323, + 321, + 334 + ], + "score": 0.89, + "content": "( D ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 321, + 321, + 506, + 336 + ], + "score": 1.0, + "content": ". We differentiate between two types of", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 334, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 106, + 334, + 256, + 346 + ], + "score": 1.0, + "content": "documents: β€˜golden’ documents", + "type": "text" + }, + { + "bbox": [ + 256, + 334, + 278, + 345 + ], + "score": 0.88, + "content": "( D * )", + "type": "inline_equation" + }, + { + "bbox": [ + 278, + 334, + 505, + 346 + ], + "score": 1.0, + "content": "i.e. the documents from which the answer to the", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 344, + 507, + 359 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 355, + 359 + ], + "score": 1.0, + "content": "question can be deduced, and β€˜distractor’ documents", + "type": "text" + }, + { + "bbox": [ + 355, + 345, + 374, + 357 + ], + "score": 0.87, + "content": "( D _ { i } )", + "type": "inline_equation" + }, + { + "bbox": [ + 374, + 344, + 507, + 359 + ], + "score": 1.0, + "content": "that do not contain answer-", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 356, + 506, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 506, + 369 + ], + "score": 1.0, + "content": "relevant information. As an implementation detail, the β€˜golden’ document doesn’t need to", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 366, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 506, + 380 + ], + "score": 1.0, + "content": "be a single document, but can be more than one document, as is the case in HotpotQA Yang", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 376, + 506, + 391 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 207, + 391 + ], + "score": 1.0, + "content": "et al. (2018). Then, for", + "type": "text" + }, + { + "bbox": [ + 207, + 378, + 215, + 388 + ], + "score": 0.79, + "content": "P", + "type": "inline_equation" + }, + { + "bbox": [ + 216, + 376, + 327, + 391 + ], + "score": 1.0, + "content": "fraction of the questions", + "type": "text" + }, + { + "bbox": [ + 328, + 378, + 343, + 389 + ], + "score": 0.87, + "content": "( q _ { i } )", + "type": "inline_equation" + }, + { + "bbox": [ + 344, + 376, + 506, + 391 + ], + "score": 1.0, + "content": "in the dataset, we retain the golden", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 388, + 506, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 388, + 153, + 402 + ], + "score": 1.0, + "content": "document", + "type": "text" + }, + { + "bbox": [ + 153, + 389, + 170, + 401 + ], + "score": 0.88, + "content": "( d _ { i } ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 171, + 388, + 315, + 402 + ], + "score": 1.0, + "content": "along with distractor documents", + "type": "text" + }, + { + "bbox": [ + 315, + 389, + 342, + 401 + ], + "score": 0.91, + "content": "( d _ { k - 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 342, + 388, + 364, + 402 + ], + "score": 1.0, + "content": ". For", + "type": "text" + }, + { + "bbox": [ + 364, + 389, + 397, + 401 + ], + "score": 0.92, + "content": "( 1 - P )", + "type": "inline_equation" + }, + { + "bbox": [ + 397, + 388, + 506, + 402 + ], + "score": 1.0, + "content": "fraction of the questions", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 399, + 506, + 412 + ], + "spans": [ + { + "bbox": [ + 106, + 400, + 122, + 411 + ], + "score": 0.85, + "content": "( q _ { i } )", + "type": "inline_equation" + }, + { + "bbox": [ + 122, + 399, + 506, + 412 + ], + "score": 1.0, + "content": "in the dataset, we include no golden document and only include distractor documents", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 410, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 107, + 411, + 123, + 423 + ], + "score": 0.87, + "content": "( d _ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 123, + 410, + 506, + 425 + ], + "score": 1.0, + "content": ". We then fine-tune the language model using standard supervised training (SFT)", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 421, + 506, + 435 + ], + "spans": [ + { + "bbox": [ + 105, + 421, + 506, + 435 + ], + "score": 1.0, + "content": "technique, training it to generate answers from the provided documents and question. Fig. 2", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 432, + 338, + 446 + ], + "spans": [ + { + "bbox": [ + 106, + 432, + 338, + 446 + ], + "score": 1.0, + "content": "illustrates the high-level design principal for RAFT .", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 15 + }, + { + "type": "text", + "bbox": [ + 106, + 449, + 505, + 506 + ], + "lines": [ + { + "bbox": [ + 106, + 449, + 506, + 462 + ], + "spans": [ + { + "bbox": [ + 106, + 449, + 506, + 462 + ], + "score": 1.0, + "content": "We demonstrate that our RAG approach trains the model to perform better RAG on the set", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 459, + 506, + 474 + ], + "spans": [ + { + "bbox": [ + 105, + 459, + 506, + 474 + ], + "score": 1.0, + "content": "of documents it is trained on i.e., in-domain. By removing the golden documents in some", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 471, + 505, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 505, + 485 + ], + "score": 1.0, + "content": "instances, we are compelling the model to memorize answers instead of deriving them from", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 481, + 505, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 505, + 496 + ], + "score": 1.0, + "content": "the context. The training data for RAFT is as follows, and an example training data can be", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 493, + 169, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 169, + 506 + ], + "score": 1.0, + "content": "seen in Fig. 3:", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 25 + }, + { + "type": "text", + "bbox": [ + 106, + 509, + 320, + 540 + ], + "lines": [ + { + "bbox": [ + 106, + 508, + 320, + 523 + ], + "spans": [ + { + "bbox": [ + 106, + 510, + 125, + 521 + ], + "score": 0.83, + "content": "\\mathbf { P } \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 126, + 508, + 162, + 523 + ], + "score": 1.0, + "content": "of data:", + "type": "text" + }, + { + "bbox": [ + 162, + 510, + 320, + 523 + ], + "score": 0.84, + "content": "\\mathbf { Q } + \\mathbf { D } ^ { * } + \\mathbf { D } _ { 1 } + \\mathbf { D } _ { 2 } + \\ldots + \\mathbf { D } _ { k } \\mathbf { A } *", + "type": "inline_equation" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 527, + 319, + 540 + ], + "spans": [ + { + "bbox": [ + 106, + 527, + 149, + 539 + ], + "score": 0.89, + "content": "( 1 - \\mathbf { P } ) \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 149, + 527, + 185, + 540 + ], + "score": 1.0, + "content": "of data:", + "type": "text" + }, + { + "bbox": [ + 185, + 527, + 319, + 540 + ], + "score": 0.87, + "content": "\\mathbf { Q } + \\mathbf { D } _ { 1 } + \\mathbf { D } _ { 2 } + \\ldots + \\mathbf { D } _ { k } \\mathbf { A } *", + "type": "inline_equation" + } + ], + "index": 29 + } + ], + "index": 28.5 + }, + { + "type": "text", + "bbox": [ + 106, + 544, + 504, + 567 + ], + "lines": [ + { + "bbox": [ + 106, + 544, + 505, + 557 + ], + "spans": [ + { + "bbox": [ + 106, + 544, + 505, + 557 + ], + "score": 1.0, + "content": "Subsequently, for the test scenario, the model is provided with the Q and top-k documents", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 554, + 480, + 568 + ], + "spans": [ + { + "bbox": [ + 105, + 554, + 480, + 568 + ], + "score": 1.0, + "content": "retrieved by the RAG pipeline. Note that RAFT is independent of the retriever used.", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 30.5 + }, + { + "type": "text", + "bbox": [ + 106, + 572, + 505, + 650 + ], + "lines": [ + { + "bbox": [ + 104, + 571, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 506, + 586 + ], + "score": 1.0, + "content": "A key factor in enhancing training quality is the generation of a reasoning process, such", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 582, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 506, + 596 + ], + "score": 1.0, + "content": "as Chain-of-Thought, to explain the provided answers. RAFT approach is similar: we", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 592, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 506, + 608 + ], + "score": 1.0, + "content": "demonstrate that creating a full reasoning chain and in-addition, clearly citing sources", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 604, + 507, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 507, + 619 + ], + "score": 1.0, + "content": "enhances the model’s accuracy in answering questions. In Fig. 3, we illustrate this set-", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 615, + 506, + 630 + ], + "spans": [ + { + "bbox": [ + 106, + 615, + 506, + 630 + ], + "score": 1.0, + "content": "up. Generating the training data in this fashion, involves presenting the model with a", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 626, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 506, + 641 + ], + "score": 1.0, + "content": "question, context, and verified answers, and then requesting it to form a reasoning chain", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 638, + 326, + 651 + ], + "spans": [ + { + "bbox": [ + 106, + 638, + 326, + 651 + ], + "score": 1.0, + "content": "that appropriately references the original context.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 35 + }, + { + "type": "text", + "bbox": [ + 107, + 655, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 654, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 505, + 668 + ], + "score": 1.0, + "content": "For all the datasets in our experiments, we generate the answers using the technique", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 663, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 663, + 506, + 681 + ], + "score": 1.0, + "content": "described above. Note that the Gorilla APIBench dataset, already includes reasoning", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 676, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 676, + 505, + 690 + ], + "score": 1.0, + "content": "in the answers. We provide an example of the generation step in Fig. 3, the detailed", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "reasoning answer includes a citation from the original context inside ##begin_quote## and", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 698, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 712 + ], + "score": 1.0, + "content": "##end_quote## as well as the detailed explanation on how to reach the conclusion based on", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "score": 1.0, + "content": "the citations. We demonstrate that adding detailed reasoning paragraphs can help boost the", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 720, + 320, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 320, + 734 + ], + "score": 1.0, + "content": "model’s performance in our experiment section.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 42 + } + ], + "page_idx": 3, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 752, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 12, + "width": 9 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 107, + 505, + 153 + ], + "lines": [ + { + "bbox": [ + 105, + 107, + 507, + 121 + ], + "spans": [ + { + "bbox": [ + 105, + 107, + 507, + 121 + ], + "score": 1.0, + "content": "Figure 3: RAFT prompt to help LLM evaluate its own generated reasoning and answers,", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 119, + 506, + 132 + ], + "spans": [ + { + "bbox": [ + 106, + 119, + 506, + 132 + ], + "score": 1.0, + "content": "contrasting them with the correct reasoning and answers. The LLM is prompted to identify", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 130, + 505, + 143 + ], + "spans": [ + { + "bbox": [ + 105, + 130, + 505, + 143 + ], + "score": 1.0, + "content": "errors in its reasoning and extract key insights for improvement. This figure specifically", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 141, + 445, + 154 + ], + "spans": [ + { + "bbox": [ + 105, + 141, + 445, + 154 + ], + "score": 1.0, + "content": "represents the β€˜GenerateExplanationβ€˜ step in the RAFT algorithm (Section 3).", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 1.5, + "bbox_fs": [ + 105, + 107, + 507, + 154 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 222, + 505, + 257 + ], + "lines": [ + { + "bbox": [ + 105, + 222, + 505, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 505, + 236 + ], + "score": 1.0, + "content": "be used at test-time with Retrieval Augmented Generation (RAG) setting, where additional", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 233, + 505, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 505, + 248 + ], + "score": 1.0, + "content": "documents can be introduced in the prompt to help the model answer the question. This", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 245, + 241, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 241, + 258 + ], + "score": 1.0, + "content": "can be represented as follows:", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 5, + "bbox_fs": [ + 105, + 222, + 505, + 258 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 261, + 426, + 274 + ], + "lines": [ + { + "bbox": [ + 104, + 260, + 426, + 276 + ], + "spans": [ + { + "bbox": [ + 104, + 260, + 136, + 276 + ], + "score": 1.0, + "content": "{Train:", + "type": "text" + }, + { + "bbox": [ + 137, + 262, + 171, + 274 + ], + "score": 0.85, + "content": "\\mathbf Q \\to \\mathbf A _ { \\mathrm { j } } ^ { \\prime }", + "type": "inline_equation" + }, + { + "bbox": [ + 172, + 260, + 255, + 276 + ], + "score": 1.0, + "content": ", {0-shot Inference:", + "type": "text" + }, + { + "bbox": [ + 255, + 262, + 291, + 274 + ], + "score": 0.86, + "content": "\\mathbf Q \\to \\mathbf A \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 292, + 260, + 370, + 276 + ], + "score": 1.0, + "content": ", {RAG Inference:", + "type": "text" + }, + { + "bbox": [ + 370, + 262, + 426, + 274 + ], + "score": 0.85, + "content": "\\mathbf { Q } + \\mathbf { D } \\mathbf { A } \\}", + "type": "inline_equation" + } + ], + "index": 7 + } + ], + "index": 7, + "bbox_fs": [ + 104, + 260, + 426, + 276 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 278, + 506, + 445 + ], + "lines": [ + { + "bbox": [ + 105, + 278, + 507, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 278, + 507, + 293 + ], + "score": 1.0, + "content": "RAFT: Retrieval Augmented Fine-Tuning (RAFT), presents a novel recipe to prepare fine-", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 289, + 507, + 304 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 507, + 304 + ], + "score": 1.0, + "content": "tuning data to tailor the models for domain-specific open-book setting, equivalent to in-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 299, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 299, + 506, + 315 + ], + "score": 1.0, + "content": "domain RAG In RAFT, we prepare the training data such that each data point contains a", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 312, + 506, + 325 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 147, + 325 + ], + "score": 1.0, + "content": "question", + "type": "text" + }, + { + "bbox": [ + 147, + 312, + 163, + 324 + ], + "score": 0.48, + "content": "( Q )", + "type": "inline_equation" + }, + { + "bbox": [ + 163, + 312, + 252, + 325 + ], + "score": 1.0, + "content": ", a set of documents", + "type": "text" + }, + { + "bbox": [ + 253, + 312, + 272, + 324 + ], + "score": 0.87, + "content": "( D _ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 272, + 312, + 506, + 325 + ], + "score": 1.0, + "content": ", and a corresponding Chain-of-though style answer", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 321, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 106, + 323, + 127, + 335 + ], + "score": 0.86, + "content": "( \\hat { \\boldsymbol { A } } ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 127, + 321, + 300, + 336 + ], + "score": 1.0, + "content": "generated from one of the document", + "type": "text" + }, + { + "bbox": [ + 301, + 323, + 321, + 334 + ], + "score": 0.89, + "content": "( D ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 321, + 321, + 506, + 336 + ], + "score": 1.0, + "content": ". We differentiate between two types of", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 334, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 106, + 334, + 256, + 346 + ], + "score": 1.0, + "content": "documents: β€˜golden’ documents", + "type": "text" + }, + { + "bbox": [ + 256, + 334, + 278, + 345 + ], + "score": 0.88, + "content": "( D * )", + "type": "inline_equation" + }, + { + "bbox": [ + 278, + 334, + 505, + 346 + ], + "score": 1.0, + "content": "i.e. the documents from which the answer to the", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 344, + 507, + 359 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 355, + 359 + ], + "score": 1.0, + "content": "question can be deduced, and β€˜distractor’ documents", + "type": "text" + }, + { + "bbox": [ + 355, + 345, + 374, + 357 + ], + "score": 0.87, + "content": "( D _ { i } )", + "type": "inline_equation" + }, + { + "bbox": [ + 374, + 344, + 507, + 359 + ], + "score": 1.0, + "content": "that do not contain answer-", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 356, + 506, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 506, + 369 + ], + "score": 1.0, + "content": "relevant information. As an implementation detail, the β€˜golden’ document doesn’t need to", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 366, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 506, + 380 + ], + "score": 1.0, + "content": "be a single document, but can be more than one document, as is the case in HotpotQA Yang", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 376, + 506, + 391 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 207, + 391 + ], + "score": 1.0, + "content": "et al. (2018). Then, for", + "type": "text" + }, + { + "bbox": [ + 207, + 378, + 215, + 388 + ], + "score": 0.79, + "content": "P", + "type": "inline_equation" + }, + { + "bbox": [ + 216, + 376, + 327, + 391 + ], + "score": 1.0, + "content": "fraction of the questions", + "type": "text" + }, + { + "bbox": [ + 328, + 378, + 343, + 389 + ], + "score": 0.87, + "content": "( q _ { i } )", + "type": "inline_equation" + }, + { + "bbox": [ + 344, + 376, + 506, + 391 + ], + "score": 1.0, + "content": "in the dataset, we retain the golden", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 388, + 506, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 388, + 153, + 402 + ], + "score": 1.0, + "content": "document", + "type": "text" + }, + { + "bbox": [ + 153, + 389, + 170, + 401 + ], + "score": 0.88, + "content": "( d _ { i } ^ { * } )", + "type": "inline_equation" + }, + { + "bbox": [ + 171, + 388, + 315, + 402 + ], + "score": 1.0, + "content": "along with distractor documents", + "type": "text" + }, + { + "bbox": [ + 315, + 389, + 342, + 401 + ], + "score": 0.91, + "content": "( d _ { k - 1 } )", + "type": "inline_equation" + }, + { + "bbox": [ + 342, + 388, + 364, + 402 + ], + "score": 1.0, + "content": ". For", + "type": "text" + }, + { + "bbox": [ + 364, + 389, + 397, + 401 + ], + "score": 0.92, + "content": "( 1 - P )", + "type": "inline_equation" + }, + { + "bbox": [ + 397, + 388, + 506, + 402 + ], + "score": 1.0, + "content": "fraction of the questions", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 399, + 506, + 412 + ], + "spans": [ + { + "bbox": [ + 106, + 400, + 122, + 411 + ], + "score": 0.85, + "content": "( q _ { i } )", + "type": "inline_equation" + }, + { + "bbox": [ + 122, + 399, + 506, + 412 + ], + "score": 1.0, + "content": "in the dataset, we include no golden document and only include distractor documents", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 410, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 107, + 411, + 123, + 423 + ], + "score": 0.87, + "content": "( d _ { k } )", + "type": "inline_equation" + }, + { + "bbox": [ + 123, + 410, + 506, + 425 + ], + "score": 1.0, + "content": ". We then fine-tune the language model using standard supervised training (SFT)", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 421, + 506, + 435 + ], + "spans": [ + { + "bbox": [ + 105, + 421, + 506, + 435 + ], + "score": 1.0, + "content": "technique, training it to generate answers from the provided documents and question. Fig. 2", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 432, + 338, + 446 + ], + "spans": [ + { + "bbox": [ + 106, + 432, + 338, + 446 + ], + "score": 1.0, + "content": "illustrates the high-level design principal for RAFT .", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 15, + "bbox_fs": [ + 104, + 278, + 507, + 446 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 449, + 505, + 506 + ], + "lines": [ + { + "bbox": [ + 106, + 449, + 506, + 462 + ], + "spans": [ + { + "bbox": [ + 106, + 449, + 506, + 462 + ], + "score": 1.0, + "content": "We demonstrate that our RAG approach trains the model to perform better RAG on the set", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 459, + 506, + 474 + ], + "spans": [ + { + "bbox": [ + 105, + 459, + 506, + 474 + ], + "score": 1.0, + "content": "of documents it is trained on i.e., in-domain. By removing the golden documents in some", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 471, + 505, + 485 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 505, + 485 + ], + "score": 1.0, + "content": "instances, we are compelling the model to memorize answers instead of deriving them from", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 481, + 505, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 505, + 496 + ], + "score": 1.0, + "content": "the context. The training data for RAFT is as follows, and an example training data can be", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 493, + 169, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 169, + 506 + ], + "score": 1.0, + "content": "seen in Fig. 3:", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 25, + "bbox_fs": [ + 105, + 449, + 506, + 506 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 509, + 320, + 540 + ], + "lines": [ + { + "bbox": [ + 106, + 508, + 320, + 523 + ], + "spans": [ + { + "bbox": [ + 106, + 510, + 125, + 521 + ], + "score": 0.83, + "content": "\\mathbf { P } \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 126, + 508, + 162, + 523 + ], + "score": 1.0, + "content": "of data:", + "type": "text" + }, + { + "bbox": [ + 162, + 510, + 320, + 523 + ], + "score": 0.84, + "content": "\\mathbf { Q } + \\mathbf { D } ^ { * } + \\mathbf { D } _ { 1 } + \\mathbf { D } _ { 2 } + \\ldots + \\mathbf { D } _ { k } \\mathbf { A } *", + "type": "inline_equation" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 527, + 319, + 540 + ], + "spans": [ + { + "bbox": [ + 106, + 527, + 149, + 539 + ], + "score": 0.89, + "content": "( 1 - \\mathbf { P } ) \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 149, + 527, + 185, + 540 + ], + "score": 1.0, + "content": "of data:", + "type": "text" + }, + { + "bbox": [ + 185, + 527, + 319, + 540 + ], + "score": 0.87, + "content": "\\mathbf { Q } + \\mathbf { D } _ { 1 } + \\mathbf { D } _ { 2 } + \\ldots + \\mathbf { D } _ { k } \\mathbf { A } *", + "type": "inline_equation" + } + ], + "index": 29 + } + ], + "index": 28.5, + "bbox_fs": [ + 106, + 508, + 320, + 540 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 544, + 504, + 567 + ], + "lines": [ + { + "bbox": [ + 106, + 544, + 505, + 557 + ], + "spans": [ + { + "bbox": [ + 106, + 544, + 505, + 557 + ], + "score": 1.0, + "content": "Subsequently, for the test scenario, the model is provided with the Q and top-k documents", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 554, + 480, + 568 + ], + "spans": [ + { + "bbox": [ + 105, + 554, + 480, + 568 + ], + "score": 1.0, + "content": "retrieved by the RAG pipeline. Note that RAFT is independent of the retriever used.", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 30.5, + "bbox_fs": [ + 105, + 544, + 505, + 568 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 572, + 505, + 650 + ], + "lines": [ + { + "bbox": [ + 104, + 571, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 506, + 586 + ], + "score": 1.0, + "content": "A key factor in enhancing training quality is the generation of a reasoning process, such", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 582, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 506, + 596 + ], + "score": 1.0, + "content": "as Chain-of-Thought, to explain the provided answers. RAFT approach is similar: we", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 592, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 506, + 608 + ], + "score": 1.0, + "content": "demonstrate that creating a full reasoning chain and in-addition, clearly citing sources", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 604, + 507, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 507, + 619 + ], + "score": 1.0, + "content": "enhances the model’s accuracy in answering questions. In Fig. 3, we illustrate this set-", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 615, + 506, + 630 + ], + "spans": [ + { + "bbox": [ + 106, + 615, + 506, + 630 + ], + "score": 1.0, + "content": "up. Generating the training data in this fashion, involves presenting the model with a", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 626, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 506, + 641 + ], + "score": 1.0, + "content": "question, context, and verified answers, and then requesting it to form a reasoning chain", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 638, + 326, + 651 + ], + "spans": [ + { + "bbox": [ + 106, + 638, + 326, + 651 + ], + "score": 1.0, + "content": "that appropriately references the original context.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 35, + "bbox_fs": [ + 104, + 571, + 507, + 651 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 655, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 654, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 505, + 668 + ], + "score": 1.0, + "content": "For all the datasets in our experiments, we generate the answers using the technique", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 663, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 663, + 506, + 681 + ], + "score": 1.0, + "content": "described above. Note that the Gorilla APIBench dataset, already includes reasoning", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 676, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 676, + 505, + 690 + ], + "score": 1.0, + "content": "in the answers. We provide an example of the generation step in Fig. 3, the detailed", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 700 + ], + "score": 1.0, + "content": "reasoning answer includes a citation from the original context inside ##begin_quote## and", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 698, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 505, + 712 + ], + "score": 1.0, + "content": "##end_quote## as well as the detailed explanation on how to reach the conclusion based on", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 723 + ], + "score": 1.0, + "content": "the citations. We demonstrate that adding detailed reasoning paragraphs can help boost the", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 720, + 320, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 320, + 734 + ], + "score": 1.0, + "content": "model’s performance in our experiment section.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 42, + "bbox_fs": [ + 105, + 654, + 506, + 734 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 155, + 505, + 249 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 106, + 79, + 506, + 147 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 507, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 507, + 94 + ], + "score": 1.0, + "content": "Table 1: RAFT improves RAG performance for all specialized domains: Across PubMed,", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 91, + 507, + 105 + ], + "spans": [ + { + "bbox": [ + 105, + 91, + 507, + 105 + ], + "score": 1.0, + "content": "HotPot, HuggingFace, Torch Hub, and Tensorflow Hub, we see that Domain-specific Fine-", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 101, + 506, + 115 + ], + "spans": [ + { + "bbox": [ + 105, + 101, + 506, + 115 + ], + "score": 1.0, + "content": "tuning improves significantly of the performance of the base model, RAFT consistently", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 112, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 105, + 112, + 506, + 127 + ], + "score": 1.0, + "content": "outperforms the existing domain-specific finetuning method with or without RAG. This", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 124, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 506, + 137 + ], + "score": 1.0, + "content": "suggests the need to train the model with context. We compare our model with LLaMA", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 135, + 350, + 148 + ], + "spans": [ + { + "bbox": [ + 106, + 135, + 350, + 148 + ], + "score": 1.0, + "content": "finetuning receipes, and provide GPT-3.5 for reference.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 2.5 + }, + { + "type": "table_body", + "bbox": [ + 106, + 155, + 505, + 249 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 155, + 505, + 249 + ], + "spans": [ + { + "bbox": [ + 106, + 155, + 505, + 249 + ], + "score": 0.983, + "html": "
PubMedHotPotHuggingFaceTorch HubTensorFlow
GPT-3.5 + RAG71.6041.529.0860.2165.59
LLaMA2-7B56.50.540.2200
LLaMA2-7B + RAG58.80.0326.4308.6043.06
DSF59.76.3861.0684.9486.56
DSF + RAG71.64.4142.5982.8060.29
RAFT (LLaMA2-7B)73.3035.2874.0084.9586.86
", + "type": "table", + "image_path": "eabf5ecf2f67950e464b9edcd9f8c5d50f934120774dd57d884d10a27971c5e1.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 106, + 155, + 505, + 186.33333333333334 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 106, + 186.33333333333334, + 505, + 217.66666666666669 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 106, + 217.66666666666669, + 505, + 249.00000000000003 + ], + "spans": [], + "index": 8 + } + ] + } + ], + "index": 4.75 + }, + { + "type": "title", + "bbox": [ + 107, + 272, + 185, + 286 + ], + "lines": [ + { + "bbox": [ + 104, + 271, + 187, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 271, + 187, + 288 + ], + "score": 1.0, + "content": "4 Evaluation", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9 + }, + { + "type": "text", + "bbox": [ + 106, + 299, + 505, + 378 + ], + "lines": [ + { + "bbox": [ + 105, + 300, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 506, + 313 + ], + "score": 1.0, + "content": "We design our experiments to study how well RAFT performs compared to various base-", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 309, + 506, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 309, + 506, + 325 + ], + "score": 1.0, + "content": "lines. We find that the RAFT-7B model (a finetuned version of LlaMA-2) is better at reading", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 322, + 505, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 322, + 505, + 335 + ], + "score": 1.0, + "content": "and extracting information from in-domain documents, than domain-specific finetuned", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 333, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 333, + 505, + 346 + ], + "score": 1.0, + "content": "model, and general-purpose model with RAG. As an ablation, we also demonstrate how", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 344, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 506, + 357 + ], + "score": 1.0, + "content": "important it is for the model to learn with Chain-of-Thought responses. In this section,", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 355, + 506, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 355, + 506, + 369 + ], + "score": 1.0, + "content": "we will first introduce all the datasets we used in the experiments, then all the baseline", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 366, + 367, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 367, + 380 + ], + "score": 1.0, + "content": "model/fine-tuning techniques that we benchmark against.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 13 + }, + { + "type": "text", + "bbox": [ + 107, + 394, + 506, + 537 + ], + "lines": [ + { + "bbox": [ + 106, + 394, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 106, + 394, + 506, + 407 + ], + "score": 1.0, + "content": "Datasets In our experiments, we use the following datasets to evaluate our model and", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 405, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 106, + 405, + 506, + 418 + ], + "score": 1.0, + "content": "all baselines. We selected these datasets to represent both popular and diverse domains", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 416, + 507, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 507, + 430 + ], + "score": 1.0, + "content": "including Wikipedia, Coding/API documents, and question-answering on medical docu-", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 427, + 506, + 440 + ], + "spans": [ + { + "bbox": [ + 106, + 427, + 506, + 440 + ], + "score": 1.0, + "content": "ments. Natural Questions (NQ) Kwiatkowski et al. (2019), Trivia QA Joshi et al. (2017) and", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 438, + 506, + 451 + ], + "spans": [ + { + "bbox": [ + 106, + 438, + 506, + 451 + ], + "score": 1.0, + "content": "HotpotQA Yang et al. (2018) are the open-domain question-answers based on Wikipedia,", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 448, + 507, + 463 + ], + "spans": [ + { + "bbox": [ + 105, + 448, + 507, + 463 + ], + "score": 1.0, + "content": "mainly focused on common knowledge (e.g., movies, sports, etc). HuggingFace, Torch Hub,", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 458, + 507, + 474 + ], + "spans": [ + { + "bbox": [ + 104, + 458, + 507, + 474 + ], + "score": 1.0, + "content": "and TensorFlow Hub are from the APIBench Patil et al. (2023) proposed in the Gorilla paper.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 471, + 506, + 484 + ], + "spans": [ + { + "bbox": [ + 106, + 471, + 506, + 484 + ], + "score": 1.0, + "content": "These benchmarks measure how to generate the correct, functional, and executable API", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 480, + 506, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 506, + 496 + ], + "score": 1.0, + "content": "calls based on the documentation. PubMed QA Jin et al. (2019) is a question-answering", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 492, + 506, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 506, + 507 + ], + "score": 1.0, + "content": "dataset tailored only for biomedical-research question-answering. It mainly focuses on", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 503, + 506, + 518 + ], + "spans": [ + { + "bbox": [ + 105, + 503, + 506, + 518 + ], + "score": 1.0, + "content": "answering medical and biology questions based on a given set of documents. We would", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 514, + 505, + 528 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 198, + 528 + ], + "score": 1.0, + "content": "like to highlight that", + "type": "text" + }, + { + "bbox": [ + 199, + 515, + 219, + 526 + ], + "score": 0.25, + "content": "( \\mathrm { N Q } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 219, + 514, + 247, + 528 + ], + "score": 1.0, + "content": "Trivia", + "type": "text" + }, + { + "bbox": [ + 247, + 515, + 267, + 526 + ], + "score": 0.29, + "content": "\\{ \\hat { \\mathrm { Q A } } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 267, + 514, + 505, + 528 + ], + "score": 1.0, + "content": "and HotpotQA) are relatively general domain whereas", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 525, + 364, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 525, + 364, + 540 + ], + "score": 1.0, + "content": "the latter two domains are on domain-specific documents.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 23 + }, + { + "type": "text", + "bbox": [ + 107, + 554, + 411, + 565 + ], + "lines": [ + { + "bbox": [ + 105, + 551, + 413, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 551, + 413, + 569 + ], + "score": 1.0, + "content": "Baselines We consider the following baselines for our experiments:", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 30 + }, + { + "type": "text", + "bbox": [ + 131, + 577, + 506, + 732 + ], + "lines": [ + { + "bbox": [ + 132, + 575, + 505, + 591 + ], + "spans": [ + { + "bbox": [ + 132, + 575, + 505, + 591 + ], + "score": 1.0, + "content": "β€’ LlaMA2-7B-chat model with 0-shot prompting: this is the commonly used", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 141, + 587, + 506, + 601 + ], + "spans": [ + { + "bbox": [ + 141, + 587, + 506, + 601 + ], + "score": 1.0, + "content": "instruction-finetuned model for QA tasks, where we provide clearly written instruc-", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 142, + 599, + 314, + 612 + ], + "spans": [ + { + "bbox": [ + 142, + 599, + 314, + 612 + ], + "score": 1.0, + "content": "tions, but no reference documentation.", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 135, + 615, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 135, + 615, + 331, + 631 + ], + "score": 1.0, + "content": "β€’ LlaMA2-7B-chat model with RAG (Llama2", + "type": "text" + }, + { + "bbox": [ + 331, + 618, + 340, + 628 + ], + "score": 0.29, + "content": "^ +", + "type": "inline_equation" + }, + { + "bbox": [ + 341, + 615, + 506, + 631 + ], + "score": 1.0, + "content": "RAG): similar to the previous setting,", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 142, + 628, + 505, + 642 + ], + "spans": [ + { + "bbox": [ + 142, + 628, + 505, + 642 + ], + "score": 1.0, + "content": "except here we include reference documents. This is a popular technique when", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 142, + 639, + 318, + 653 + ], + "spans": [ + { + "bbox": [ + 142, + 639, + 318, + 653 + ], + "score": 1.0, + "content": "dealing with domain-specific QA tasks.", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 140, + 657, + 507, + 672 + ], + "spans": [ + { + "bbox": [ + 140, + 657, + 507, + 672 + ], + "score": 1.0, + "content": "Domain-Specific Finetuning with 0-shot prompting (DSF): Standard supervised-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 141, + 669, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 141, + 669, + 505, + 682 + ], + "score": 1.0, + "content": "finetuning, without documents in context. We find that its mostly useful to align", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 141, + 679, + 500, + 694 + ], + "spans": [ + { + "bbox": [ + 141, + 679, + 500, + 694 + ], + "score": 1.0, + "content": "the answering style of the model as well as get familiar with the domain context.", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 138, + 697, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 138, + 697, + 325, + 713 + ], + "score": 1.0, + "content": "Domain-Specific Finetuning with RAG", + "type": "text" + }, + { + "bbox": [ + 326, + 699, + 384, + 710 + ], + "score": 0.31, + "content": "( \\mathrm { D S F } + \\mathrm { R A G } )", + "type": "inline_equation" + }, + { + "bbox": [ + 385, + 697, + 506, + 713 + ], + "score": 1.0, + "content": "): Equip a domain-specific", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 141, + 709, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 141, + 709, + 505, + 723 + ], + "score": 1.0, + "content": "finetuned-model with external knowledge using RAG. So, for the β€œknowledge” the", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 142, + 721, + 375, + 733 + ], + "spans": [ + { + "bbox": [ + 142, + 721, + 375, + 733 + ], + "score": 1.0, + "content": "model does not know, it can still refer to the context.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 36.5 + } + ], + "page_idx": 4, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 27, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 12, + "width": 9 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 155, + 505, + 249 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 106, + 79, + 506, + 147 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 507, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 507, + 94 + ], + "score": 1.0, + "content": "Table 1: RAFT improves RAG performance for all specialized domains: Across PubMed,", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 91, + 507, + 105 + ], + "spans": [ + { + "bbox": [ + 105, + 91, + 507, + 105 + ], + "score": 1.0, + "content": "HotPot, HuggingFace, Torch Hub, and Tensorflow Hub, we see that Domain-specific Fine-", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 101, + 506, + 115 + ], + "spans": [ + { + "bbox": [ + 105, + 101, + 506, + 115 + ], + "score": 1.0, + "content": "tuning improves significantly of the performance of the base model, RAFT consistently", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 112, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 105, + 112, + 506, + 127 + ], + "score": 1.0, + "content": "outperforms the existing domain-specific finetuning method with or without RAG. This", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 124, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 506, + 137 + ], + "score": 1.0, + "content": "suggests the need to train the model with context. We compare our model with LLaMA", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 135, + 350, + 148 + ], + "spans": [ + { + "bbox": [ + 106, + 135, + 350, + 148 + ], + "score": 1.0, + "content": "finetuning receipes, and provide GPT-3.5 for reference.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 2.5 + }, + { + "type": "table_body", + "bbox": [ + 106, + 155, + 505, + 249 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 155, + 505, + 249 + ], + "spans": [ + { + "bbox": [ + 106, + 155, + 505, + 249 + ], + "score": 0.983, + "html": "
PubMedHotPotHuggingFaceTorch HubTensorFlow
GPT-3.5 + RAG71.6041.529.0860.2165.59
LLaMA2-7B56.50.540.2200
LLaMA2-7B + RAG58.80.0326.4308.6043.06
DSF59.76.3861.0684.9486.56
DSF + RAG71.64.4142.5982.8060.29
RAFT (LLaMA2-7B)73.3035.2874.0084.9586.86
", + "type": "table", + "image_path": "eabf5ecf2f67950e464b9edcd9f8c5d50f934120774dd57d884d10a27971c5e1.jpg" + } + ] + } + ], + "index": 7, + "virtual_lines": [ + { + "bbox": [ + 106, + 155, + 505, + 186.33333333333334 + ], + "spans": [], + "index": 6 + }, + { + "bbox": [ + 106, + 186.33333333333334, + 505, + 217.66666666666669 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 106, + 217.66666666666669, + 505, + 249.00000000000003 + ], + "spans": [], + "index": 8 + } + ] + } + ], + "index": 4.75 + }, + { + "type": "title", + "bbox": [ + 107, + 272, + 185, + 286 + ], + "lines": [ + { + "bbox": [ + 104, + 271, + 187, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 271, + 187, + 288 + ], + "score": 1.0, + "content": "4 Evaluation", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 9 + }, + { + "type": "text", + "bbox": [ + 106, + 299, + 505, + 378 + ], + "lines": [ + { + "bbox": [ + 105, + 300, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 506, + 313 + ], + "score": 1.0, + "content": "We design our experiments to study how well RAFT performs compared to various base-", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 309, + 506, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 309, + 506, + 325 + ], + "score": 1.0, + "content": "lines. We find that the RAFT-7B model (a finetuned version of LlaMA-2) is better at reading", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 322, + 505, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 322, + 505, + 335 + ], + "score": 1.0, + "content": "and extracting information from in-domain documents, than domain-specific finetuned", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 333, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 333, + 505, + 346 + ], + "score": 1.0, + "content": "model, and general-purpose model with RAG. As an ablation, we also demonstrate how", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 344, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 506, + 357 + ], + "score": 1.0, + "content": "important it is for the model to learn with Chain-of-Thought responses. In this section,", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 355, + 506, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 355, + 506, + 369 + ], + "score": 1.0, + "content": "we will first introduce all the datasets we used in the experiments, then all the baseline", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 366, + 367, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 367, + 380 + ], + "score": 1.0, + "content": "model/fine-tuning techniques that we benchmark against.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 13, + "bbox_fs": [ + 105, + 300, + 506, + 380 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 394, + 506, + 537 + ], + "lines": [ + { + "bbox": [ + 106, + 394, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 106, + 394, + 506, + 407 + ], + "score": 1.0, + "content": "Datasets In our experiments, we use the following datasets to evaluate our model and", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 405, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 106, + 405, + 506, + 418 + ], + "score": 1.0, + "content": "all baselines. We selected these datasets to represent both popular and diverse domains", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 416, + 507, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 507, + 430 + ], + "score": 1.0, + "content": "including Wikipedia, Coding/API documents, and question-answering on medical docu-", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 427, + 506, + 440 + ], + "spans": [ + { + "bbox": [ + 106, + 427, + 506, + 440 + ], + "score": 1.0, + "content": "ments. Natural Questions (NQ) Kwiatkowski et al. (2019), Trivia QA Joshi et al. (2017) and", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 438, + 506, + 451 + ], + "spans": [ + { + "bbox": [ + 106, + 438, + 506, + 451 + ], + "score": 1.0, + "content": "HotpotQA Yang et al. (2018) are the open-domain question-answers based on Wikipedia,", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 448, + 507, + 463 + ], + "spans": [ + { + "bbox": [ + 105, + 448, + 507, + 463 + ], + "score": 1.0, + "content": "mainly focused on common knowledge (e.g., movies, sports, etc). HuggingFace, Torch Hub,", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 458, + 507, + 474 + ], + "spans": [ + { + "bbox": [ + 104, + 458, + 507, + 474 + ], + "score": 1.0, + "content": "and TensorFlow Hub are from the APIBench Patil et al. (2023) proposed in the Gorilla paper.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 471, + 506, + 484 + ], + "spans": [ + { + "bbox": [ + 106, + 471, + 506, + 484 + ], + "score": 1.0, + "content": "These benchmarks measure how to generate the correct, functional, and executable API", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 480, + 506, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 506, + 496 + ], + "score": 1.0, + "content": "calls based on the documentation. PubMed QA Jin et al. (2019) is a question-answering", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 492, + 506, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 506, + 507 + ], + "score": 1.0, + "content": "dataset tailored only for biomedical-research question-answering. It mainly focuses on", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 503, + 506, + 518 + ], + "spans": [ + { + "bbox": [ + 105, + 503, + 506, + 518 + ], + "score": 1.0, + "content": "answering medical and biology questions based on a given set of documents. We would", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 514, + 505, + 528 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 198, + 528 + ], + "score": 1.0, + "content": "like to highlight that", + "type": "text" + }, + { + "bbox": [ + 199, + 515, + 219, + 526 + ], + "score": 0.25, + "content": "( \\mathrm { N Q } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 219, + 514, + 247, + 528 + ], + "score": 1.0, + "content": "Trivia", + "type": "text" + }, + { + "bbox": [ + 247, + 515, + 267, + 526 + ], + "score": 0.29, + "content": "\\{ \\hat { \\mathrm { Q A } } ,", + "type": "inline_equation" + }, + { + "bbox": [ + 267, + 514, + 505, + 528 + ], + "score": 1.0, + "content": "and HotpotQA) are relatively general domain whereas", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 525, + 364, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 525, + 364, + 540 + ], + "score": 1.0, + "content": "the latter two domains are on domain-specific documents.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 23, + "bbox_fs": [ + 104, + 394, + 507, + 540 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 554, + 411, + 565 + ], + "lines": [ + { + "bbox": [ + 105, + 551, + 413, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 551, + 413, + 569 + ], + "score": 1.0, + "content": "Baselines We consider the following baselines for our experiments:", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 30, + "bbox_fs": [ + 105, + 551, + 413, + 569 + ] + }, + { + "type": "list", + "bbox": [ + 131, + 577, + 506, + 732 + ], + "lines": [ + { + "bbox": [ + 132, + 575, + 505, + 591 + ], + "spans": [ + { + "bbox": [ + 132, + 575, + 505, + 591 + ], + "score": 1.0, + "content": "β€’ LlaMA2-7B-chat model with 0-shot prompting: this is the commonly used", + "type": "text" + } + ], + "index": 31, + "is_list_start_line": true + }, + { + "bbox": [ + 141, + 587, + 506, + 601 + ], + "spans": [ + { + "bbox": [ + 141, + 587, + 506, + 601 + ], + "score": 1.0, + "content": "instruction-finetuned model for QA tasks, where we provide clearly written instruc-", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 142, + 599, + 314, + 612 + ], + "spans": [ + { + "bbox": [ + 142, + 599, + 314, + 612 + ], + "score": 1.0, + "content": "tions, but no reference documentation.", + "type": "text" + } + ], + "index": 33, + "is_list_end_line": true + }, + { + "bbox": [ + 135, + 615, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 135, + 615, + 331, + 631 + ], + "score": 1.0, + "content": "β€’ LlaMA2-7B-chat model with RAG (Llama2", + "type": "text" + }, + { + "bbox": [ + 331, + 618, + 340, + 628 + ], + "score": 0.29, + "content": "^ +", + "type": "inline_equation" + }, + { + "bbox": [ + 341, + 615, + 506, + 631 + ], + "score": 1.0, + "content": "RAG): similar to the previous setting,", + "type": "text" + } + ], + "index": 34, + "is_list_start_line": true + }, + { + "bbox": [ + 142, + 628, + 505, + 642 + ], + "spans": [ + { + "bbox": [ + 142, + 628, + 505, + 642 + ], + "score": 1.0, + "content": "except here we include reference documents. This is a popular technique when", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 142, + 639, + 318, + 653 + ], + "spans": [ + { + "bbox": [ + 142, + 639, + 318, + 653 + ], + "score": 1.0, + "content": "dealing with domain-specific QA tasks.", + "type": "text" + } + ], + "index": 36, + "is_list_end_line": true + }, + { + "bbox": [ + 140, + 657, + 507, + 672 + ], + "spans": [ + { + "bbox": [ + 140, + 657, + 507, + 672 + ], + "score": 1.0, + "content": "Domain-Specific Finetuning with 0-shot prompting (DSF): Standard supervised-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 141, + 669, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 141, + 669, + 505, + 682 + ], + "score": 1.0, + "content": "finetuning, without documents in context. We find that its mostly useful to align", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 141, + 679, + 500, + 694 + ], + "spans": [ + { + "bbox": [ + 141, + 679, + 500, + 694 + ], + "score": 1.0, + "content": "the answering style of the model as well as get familiar with the domain context.", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 138, + 697, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 138, + 697, + 325, + 713 + ], + "score": 1.0, + "content": "Domain-Specific Finetuning with RAG", + "type": "text" + }, + { + "bbox": [ + 326, + 699, + 384, + 710 + ], + "score": 0.31, + "content": "( \\mathrm { D S F } + \\mathrm { R A G } )", + "type": "inline_equation" + }, + { + "bbox": [ + 385, + 697, + 506, + 713 + ], + "score": 1.0, + "content": "): Equip a domain-specific", + "type": "text" + } + ], + "index": 40, + "is_list_start_line": true + }, + { + "bbox": [ + 141, + 709, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 141, + 709, + 505, + 723 + ], + "score": 1.0, + "content": "finetuned-model with external knowledge using RAG. So, for the β€œknowledge” the", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 142, + 721, + 375, + 733 + ], + "spans": [ + { + "bbox": [ + 142, + 721, + 375, + 733 + ], + "score": 1.0, + "content": "model does not know, it can still refer to the context.", + "type": "text" + } + ], + "index": 42, + "is_list_end_line": true + } + ], + "index": 36.5, + "bbox_fs": [ + 132, + 575, + 507, + 733 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 133, + 506, + 177 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 106, + 79, + 505, + 125 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 506, + 92 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 506, + 92 + ], + "score": 1.0, + "content": "Table 2: Ablation on Chain-of-Thought: The numbers of RAFT and RAFT without", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 90, + 505, + 104 + ], + "spans": [ + { + "bbox": [ + 105, + 90, + 505, + 104 + ], + "score": 1.0, + "content": "CoT. Results on various datasets show that adding CoT can significantly improve the", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 102, + 505, + 115 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 342, + 115 + ], + "score": 1.0, + "content": "performance of the finetuned model. With a gains of", + "type": "text" + }, + { + "bbox": [ + 343, + 102, + 370, + 113 + ], + "score": 0.87, + "content": "9 . 6 6 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 371, + 102, + 390, + 115 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 391, + 102, + 424, + 113 + ], + "score": 0.88, + "content": "1 \\bar { 4 } . 9 3 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 424, + 102, + 505, + 115 + ], + "score": 1.0, + "content": "in the Hotpot QA", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 113, + 282, + 126 + ], + "spans": [ + { + "bbox": [ + 106, + 113, + 282, + 126 + ], + "score": 1.0, + "content": "and HuggingFace datasets respectively.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 1.5 + }, + { + "type": "table_body", + "bbox": [ + 106, + 133, + 506, + 177 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 133, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 133, + 506, + 177 + ], + "score": 0.974, + "html": "
PubMedHotpotQAHuggingFaceTorch HubTensorFlow
RAFT w.0 CoT68.3025.6259.0786.5683.21
RAFT73.3035.2874.0084.9586.86
", + "type": "table", + "image_path": "19bf288f1875c260ddfbb6355555262c6edf9186b41d826d401d956d5d148e14.jpg" + } + ] + } + ], + "index": 5, + "virtual_lines": [ + { + "bbox": [ + 106, + 133, + 506, + 147.66666666666666 + ], + "spans": [], + "index": 4 + }, + { + "bbox": [ + 106, + 147.66666666666666, + 506, + 162.33333333333331 + ], + "spans": [], + "index": 5 + }, + { + "bbox": [ + 106, + 162.33333333333331, + 506, + 176.99999999999997 + ], + "spans": [], + "index": 6 + } + ] + } + ], + "index": 3.25 + }, + { + "type": "title", + "bbox": [ + 107, + 200, + 163, + 211 + ], + "lines": [ + { + "bbox": [ + 105, + 199, + 163, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 199, + 163, + 213 + ], + "score": 1.0, + "content": "4.1 Results", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 7 + }, + { + "type": "text", + "bbox": [ + 107, + 221, + 505, + 343 + ], + "lines": [ + { + "bbox": [ + 106, + 221, + 505, + 234 + ], + "spans": [ + { + "bbox": [ + 106, + 221, + 505, + 234 + ], + "score": 1.0, + "content": "Using the above datasets and baselines, we evaluate our model RAFT and demonstrate", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 232, + 505, + 245 + ], + "spans": [ + { + "bbox": [ + 106, + 232, + 505, + 245 + ], + "score": 1.0, + "content": "the effectiveness of RAFT in Tab. 1. We see that RAFT consistently and significantly", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 244, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 244, + 506, + 255 + ], + "score": 1.0, + "content": "outperforms the baselines. Compared with the base Llama-2 instruction-tuned model,", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 253, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 253, + 506, + 269 + ], + "score": 1.0, + "content": "RAFT with RAG does much better in terms of extracting information as well as being", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 264, + 506, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 264, + 339, + 279 + ], + "score": 1.0, + "content": "robust towards distractors. The gain can be as big as", + "type": "text" + }, + { + "bbox": [ + 339, + 266, + 372, + 276 + ], + "score": 0.88, + "content": "3 5 . 2 5 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 372, + 264, + 457, + 279 + ], + "score": 1.0, + "content": "on Hotpot QA and", + "type": "text" + }, + { + "bbox": [ + 458, + 266, + 490, + 276 + ], + "score": 0.87, + "content": "7 6 . 3 5 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 491, + 264, + 506, + 279 + ], + "score": 1.0, + "content": "on", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 276, + 506, + 290 + ], + "spans": [ + { + "bbox": [ + 105, + 276, + 506, + 290 + ], + "score": 1.0, + "content": "Torch Hub evaluation. Compared with DSF on the specific dataset, our model does better at", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 287, + 506, + 301 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 506, + 301 + ], + "score": 1.0, + "content": "relying on the provided context to solve the problem. RAFT does much better on the tasks", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 298, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 105, + 298, + 285, + 312 + ], + "score": 1.0, + "content": "like Hotpot and HuggingFace datasets", + "type": "text" + }, + { + "bbox": [ + 285, + 299, + 319, + 309 + ], + "score": 0.83, + "content": "( 3 0 . { \\bar { 8 } } 7 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 320, + 298, + 389, + 312 + ], + "score": 1.0, + "content": "on Hotpot and", + "type": "text" + }, + { + "bbox": [ + 389, + 298, + 423, + 309 + ], + "score": 0.87, + "content": "3 1 . 4 1 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 423, + 298, + 506, + 312 + ], + "score": 1.0, + "content": "on HuggingFace).", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 308, + 506, + 323 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 506, + 323 + ], + "score": 1.0, + "content": "Note that for PubMed QA, since it is a binary yes/no question, we don’t observe significant", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 319, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 289, + 335 + ], + "score": 1.0, + "content": "gains when we compare our model with", + "type": "text" + }, + { + "bbox": [ + 289, + 321, + 342, + 331 + ], + "score": 0.42, + "content": "\\mathrm { D } \\mathbf { \\dot { S } } \\mathbf { \\dot { F } } + \\mathbf { R } \\mathbf { A } \\mathbf { G }", + "type": "inline_equation" + }, + { + "bbox": [ + 343, + 319, + 506, + 335 + ], + "score": 1.0, + "content": ". Even compared with a much larger", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 330, + 421, + 345 + ], + "spans": [ + { + "bbox": [ + 105, + 330, + 421, + 345 + ], + "score": 1.0, + "content": "and better model GPT-3.5, RAFT demonstrates significant advantages.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 13 + }, + { + "type": "text", + "bbox": [ + 107, + 348, + 505, + 447 + ], + "lines": [ + { + "bbox": [ + 106, + 348, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 106, + 348, + 506, + 361 + ], + "score": 1.0, + "content": "Overall, the LLaMA-7B model, both with and without the RAG, performs poorly due to its", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 358, + 507, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 507, + 374 + ], + "score": 1.0, + "content": "answering style not aligning with the ground truth. By applying domain-specific tuning,", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 371, + 506, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 506, + 383 + ], + "score": 1.0, + "content": "we significantly enhance its performance. This process enables the model to learn and adopt", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 380, + 506, + 395 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 506, + 395 + ], + "score": 1.0, + "content": "the appropriate style of answering. However, introducing RAG to a domain-specifically", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 392, + 506, + 405 + ], + "spans": [ + { + "bbox": [ + 106, + 392, + 506, + 405 + ], + "score": 1.0, + "content": "fine-tuned (DSF) model doesn’t invariably lead to better outcomes. This might indicate that", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 402, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 506, + 417 + ], + "score": 1.0, + "content": "the model lacks training in context processing and extracting useful information from it. By", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 414, + 506, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 506, + 428 + ], + "score": 1.0, + "content": "incorporating our method, RAFT , we train the model not only to match its answering style", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 424, + 507, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 424, + 507, + 439 + ], + "score": 1.0, + "content": "with that required but also to improve its document processing capabilities. Consequently,", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 436, + 272, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 272, + 449 + ], + "score": 1.0, + "content": "our approach outperforms all others.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 23 + }, + { + "type": "title", + "bbox": [ + 107, + 463, + 187, + 475 + ], + "lines": [ + { + "bbox": [ + 105, + 462, + 189, + 476 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 189, + 476 + ], + "score": 1.0, + "content": "4.2 Effect of CoT", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28 + }, + { + "type": "text", + "bbox": [ + 107, + 485, + 505, + 585 + ], + "lines": [ + { + "bbox": [ + 105, + 483, + 506, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 506, + 499 + ], + "score": 1.0, + "content": "We also conduct an analysis to evaluate the effectiveness of the Chain-of-Thought approach", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 494, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 494, + 506, + 510 + ], + "score": 1.0, + "content": "in enhancing the model’s performance. As indicated in Table 2, simply providing the answer", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 506, + 505, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 505, + 520 + ], + "score": 1.0, + "content": "to a question may not always be adequate. This approach can lead to a rapid decrease", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 517, + 506, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 506, + 532 + ], + "score": 1.0, + "content": "in loss, resulting in the model beginning to overfit. Incorporating a reasoning chain that", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 104, + 528, + 506, + 543 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 506, + 543 + ], + "score": 1.0, + "content": "not only guides the model to the answer but also enriches the model’s understanding can", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 540, + 506, + 553 + ], + "spans": [ + { + "bbox": [ + 106, + 540, + 506, + 553 + ], + "score": 1.0, + "content": "improve the overall accuracy and prevent overfitting to concise answers. In our experiments,", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 549, + 506, + 565 + ], + "spans": [ + { + "bbox": [ + 105, + 549, + 506, + 565 + ], + "score": 1.0, + "content": "integrating the Chain-of-Thought significantly enhances training robustness. We employ", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 560, + 506, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 506, + 576 + ], + "score": 1.0, + "content": "GPT-4-1106 to generate our Chain-of-Thought prompts and include an example of the", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 573, + 234, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 234, + 586 + ], + "score": 1.0, + "content": "prompt we used in Figure 3.", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 33 + }, + { + "type": "title", + "bbox": [ + 107, + 600, + 221, + 612 + ], + "lines": [ + { + "bbox": [ + 105, + 599, + 222, + 614 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 222, + 614 + ], + "score": 1.0, + "content": "4.3 Qualitative Analysis", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 38 + }, + { + "type": "text", + "bbox": [ + 107, + 621, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 621, + 505, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 621, + 505, + 635 + ], + "score": 1.0, + "content": "To illustrate the potential advantages of RAFT over the domain-specifically fine-tuned", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 632, + 505, + 646 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 505, + 646 + ], + "score": 1.0, + "content": "(DSF) approach, we present a comparative example in Figure 4. This example qualitatively", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 642, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 642, + 506, + 657 + ], + "score": 1.0, + "content": "demonstrates a scenario where the DSF model becomes confused by a question asking for", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 653, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 505, + 668 + ], + "score": 1.0, + "content": "the identity of a screenwriter. Instead of providing the correct name, it mistakenly cites one", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 664, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 664, + 505, + 679 + ], + "score": 1.0, + "content": "of the films written by the screenwriter. In contrast, the RAFT model accurately answers the", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 104, + 676, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 676, + 506, + 691 + ], + "score": 1.0, + "content": "question. This discrepancy suggests that training a model solely with question-answer pairs", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 688, + 505, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 505, + 700 + ], + "score": 1.0, + "content": "may impair its ability to derive relevant context from provided documents. The comparison", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 699, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 505, + 712 + ], + "score": 1.0, + "content": "underscores the importance of incorporating both standard instructional tuning and context", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 709, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 723 + ], + "score": 1.0, + "content": "comprehension into the training dataset to preserve and enhance the model’s ability to", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 720, + 209, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 209, + 734 + ], + "score": 1.0, + "content": "process text effectively.", + "type": "text" + } + ], + "index": 48 + } + ], + "index": 43.5 + } + ], + "page_idx": 5, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 752, + 308, + 760 + ], + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 762 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 762 + ], + "score": 1.0, + "content": "6", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 133, + 506, + 177 + ], + "blocks": [ + { + "type": "table_caption", + "bbox": [ + 106, + 79, + 505, + 125 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 506, + 92 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 506, + 92 + ], + "score": 1.0, + "content": "Table 2: Ablation on Chain-of-Thought: The numbers of RAFT and RAFT without", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 90, + 505, + 104 + ], + "spans": [ + { + "bbox": [ + 105, + 90, + 505, + 104 + ], + "score": 1.0, + "content": "CoT. Results on various datasets show that adding CoT can significantly improve the", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 102, + 505, + 115 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 342, + 115 + ], + "score": 1.0, + "content": "performance of the finetuned model. With a gains of", + "type": "text" + }, + { + "bbox": [ + 343, + 102, + 370, + 113 + ], + "score": 0.87, + "content": "9 . 6 6 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 371, + 102, + 390, + 115 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 391, + 102, + 424, + 113 + ], + "score": 0.88, + "content": "1 \\bar { 4 } . 9 3 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 424, + 102, + 505, + 115 + ], + "score": 1.0, + "content": "in the Hotpot QA", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 113, + 282, + 126 + ], + "spans": [ + { + "bbox": [ + 106, + 113, + 282, + 126 + ], + "score": 1.0, + "content": "and HuggingFace datasets respectively.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 1.5 + }, + { + "type": "table_body", + "bbox": [ + 106, + 133, + 506, + 177 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 133, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 133, + 506, + 177 + ], + "score": 0.974, + "html": "
PubMedHotpotQAHuggingFaceTorch HubTensorFlow
RAFT w.0 CoT68.3025.6259.0786.5683.21
RAFT73.3035.2874.0084.9586.86
", + "type": "table", + "image_path": "19bf288f1875c260ddfbb6355555262c6edf9186b41d826d401d956d5d148e14.jpg" + } + ] + } + ], + "index": 5, + "virtual_lines": [ + { + "bbox": [ + 106, + 133, + 506, + 147.66666666666666 + ], + "spans": [], + "index": 4 + }, + { + "bbox": [ + 106, + 147.66666666666666, + 506, + 162.33333333333331 + ], + "spans": [], + "index": 5 + }, + { + "bbox": [ + 106, + 162.33333333333331, + 506, + 176.99999999999997 + ], + "spans": [], + "index": 6 + } + ] + } + ], + "index": 3.25 + }, + { + "type": "title", + "bbox": [ + 107, + 200, + 163, + 211 + ], + "lines": [ + { + "bbox": [ + 105, + 199, + 163, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 199, + 163, + 213 + ], + "score": 1.0, + "content": "4.1 Results", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 7 + }, + { + "type": "text", + "bbox": [ + 107, + 221, + 505, + 343 + ], + "lines": [ + { + "bbox": [ + 106, + 221, + 505, + 234 + ], + "spans": [ + { + "bbox": [ + 106, + 221, + 505, + 234 + ], + "score": 1.0, + "content": "Using the above datasets and baselines, we evaluate our model RAFT and demonstrate", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 232, + 505, + 245 + ], + "spans": [ + { + "bbox": [ + 106, + 232, + 505, + 245 + ], + "score": 1.0, + "content": "the effectiveness of RAFT in Tab. 1. We see that RAFT consistently and significantly", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 244, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 244, + 506, + 255 + ], + "score": 1.0, + "content": "outperforms the baselines. Compared with the base Llama-2 instruction-tuned model,", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 253, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 253, + 506, + 269 + ], + "score": 1.0, + "content": "RAFT with RAG does much better in terms of extracting information as well as being", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 264, + 506, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 264, + 339, + 279 + ], + "score": 1.0, + "content": "robust towards distractors. The gain can be as big as", + "type": "text" + }, + { + "bbox": [ + 339, + 266, + 372, + 276 + ], + "score": 0.88, + "content": "3 5 . 2 5 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 372, + 264, + 457, + 279 + ], + "score": 1.0, + "content": "on Hotpot QA and", + "type": "text" + }, + { + "bbox": [ + 458, + 266, + 490, + 276 + ], + "score": 0.87, + "content": "7 6 . 3 5 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 491, + 264, + 506, + 279 + ], + "score": 1.0, + "content": "on", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 276, + 506, + 290 + ], + "spans": [ + { + "bbox": [ + 105, + 276, + 506, + 290 + ], + "score": 1.0, + "content": "Torch Hub evaluation. Compared with DSF on the specific dataset, our model does better at", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 287, + 506, + 301 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 506, + 301 + ], + "score": 1.0, + "content": "relying on the provided context to solve the problem. RAFT does much better on the tasks", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 298, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 105, + 298, + 285, + 312 + ], + "score": 1.0, + "content": "like Hotpot and HuggingFace datasets", + "type": "text" + }, + { + "bbox": [ + 285, + 299, + 319, + 309 + ], + "score": 0.83, + "content": "( 3 0 . { \\bar { 8 } } 7 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 320, + 298, + 389, + 312 + ], + "score": 1.0, + "content": "on Hotpot and", + "type": "text" + }, + { + "bbox": [ + 389, + 298, + 423, + 309 + ], + "score": 0.87, + "content": "3 1 . 4 1 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 423, + 298, + 506, + 312 + ], + "score": 1.0, + "content": "on HuggingFace).", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 308, + 506, + 323 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 506, + 323 + ], + "score": 1.0, + "content": "Note that for PubMed QA, since it is a binary yes/no question, we don’t observe significant", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 319, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 289, + 335 + ], + "score": 1.0, + "content": "gains when we compare our model with", + "type": "text" + }, + { + "bbox": [ + 289, + 321, + 342, + 331 + ], + "score": 0.42, + "content": "\\mathrm { D } \\mathbf { \\dot { S } } \\mathbf { \\dot { F } } + \\mathbf { R } \\mathbf { A } \\mathbf { G }", + "type": "inline_equation" + }, + { + "bbox": [ + 343, + 319, + 506, + 335 + ], + "score": 1.0, + "content": ". Even compared with a much larger", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 330, + 421, + 345 + ], + "spans": [ + { + "bbox": [ + 105, + 330, + 421, + 345 + ], + "score": 1.0, + "content": "and better model GPT-3.5, RAFT demonstrates significant advantages.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 13, + "bbox_fs": [ + 104, + 221, + 506, + 345 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 348, + 505, + 447 + ], + "lines": [ + { + "bbox": [ + 106, + 348, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 106, + 348, + 506, + 361 + ], + "score": 1.0, + "content": "Overall, the LLaMA-7B model, both with and without the RAG, performs poorly due to its", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 358, + 507, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 507, + 374 + ], + "score": 1.0, + "content": "answering style not aligning with the ground truth. By applying domain-specific tuning,", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 371, + 506, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 371, + 506, + 383 + ], + "score": 1.0, + "content": "we significantly enhance its performance. This process enables the model to learn and adopt", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 380, + 506, + 395 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 506, + 395 + ], + "score": 1.0, + "content": "the appropriate style of answering. However, introducing RAG to a domain-specifically", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 392, + 506, + 405 + ], + "spans": [ + { + "bbox": [ + 106, + 392, + 506, + 405 + ], + "score": 1.0, + "content": "fine-tuned (DSF) model doesn’t invariably lead to better outcomes. This might indicate that", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 402, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 506, + 417 + ], + "score": 1.0, + "content": "the model lacks training in context processing and extracting useful information from it. By", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 414, + 506, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 506, + 428 + ], + "score": 1.0, + "content": "incorporating our method, RAFT , we train the model not only to match its answering style", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 424, + 507, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 424, + 507, + 439 + ], + "score": 1.0, + "content": "with that required but also to improve its document processing capabilities. Consequently,", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 436, + 272, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 272, + 449 + ], + "score": 1.0, + "content": "our approach outperforms all others.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 23, + "bbox_fs": [ + 104, + 348, + 507, + 449 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 463, + 187, + 475 + ], + "lines": [ + { + "bbox": [ + 105, + 462, + 189, + 476 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 189, + 476 + ], + "score": 1.0, + "content": "4.2 Effect of CoT", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28 + }, + { + "type": "text", + "bbox": [ + 107, + 485, + 505, + 585 + ], + "lines": [ + { + "bbox": [ + 105, + 483, + 506, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 506, + 499 + ], + "score": 1.0, + "content": "We also conduct an analysis to evaluate the effectiveness of the Chain-of-Thought approach", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 494, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 494, + 506, + 510 + ], + "score": 1.0, + "content": "in enhancing the model’s performance. As indicated in Table 2, simply providing the answer", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 506, + 505, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 505, + 520 + ], + "score": 1.0, + "content": "to a question may not always be adequate. This approach can lead to a rapid decrease", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 517, + 506, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 506, + 532 + ], + "score": 1.0, + "content": "in loss, resulting in the model beginning to overfit. Incorporating a reasoning chain that", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 104, + 528, + 506, + 543 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 506, + 543 + ], + "score": 1.0, + "content": "not only guides the model to the answer but also enriches the model’s understanding can", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 540, + 506, + 553 + ], + "spans": [ + { + "bbox": [ + 106, + 540, + 506, + 553 + ], + "score": 1.0, + "content": "improve the overall accuracy and prevent overfitting to concise answers. In our experiments,", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 549, + 506, + 565 + ], + "spans": [ + { + "bbox": [ + 105, + 549, + 506, + 565 + ], + "score": 1.0, + "content": "integrating the Chain-of-Thought significantly enhances training robustness. We employ", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 560, + 506, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 506, + 576 + ], + "score": 1.0, + "content": "GPT-4-1106 to generate our Chain-of-Thought prompts and include an example of the", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 573, + 234, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 234, + 586 + ], + "score": 1.0, + "content": "prompt we used in Figure 3.", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 33, + "bbox_fs": [ + 104, + 483, + 506, + 586 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 600, + 221, + 612 + ], + "lines": [ + { + "bbox": [ + 105, + 599, + 222, + 614 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 222, + 614 + ], + "score": 1.0, + "content": "4.3 Qualitative Analysis", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 38 + }, + { + "type": "text", + "bbox": [ + 107, + 621, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 621, + 505, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 621, + 505, + 635 + ], + "score": 1.0, + "content": "To illustrate the potential advantages of RAFT over the domain-specifically fine-tuned", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 632, + 505, + 646 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 505, + 646 + ], + "score": 1.0, + "content": "(DSF) approach, we present a comparative example in Figure 4. This example qualitatively", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 642, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 642, + 506, + 657 + ], + "score": 1.0, + "content": "demonstrates a scenario where the DSF model becomes confused by a question asking for", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 653, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 505, + 668 + ], + "score": 1.0, + "content": "the identity of a screenwriter. Instead of providing the correct name, it mistakenly cites one", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 664, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 664, + 505, + 679 + ], + "score": 1.0, + "content": "of the films written by the screenwriter. In contrast, the RAFT model accurately answers the", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 104, + 676, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 676, + 506, + 691 + ], + "score": 1.0, + "content": "question. This discrepancy suggests that training a model solely with question-answer pairs", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 688, + 505, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 505, + 700 + ], + "score": 1.0, + "content": "may impair its ability to derive relevant context from provided documents. The comparison", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 699, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 505, + 712 + ], + "score": 1.0, + "content": "underscores the importance of incorporating both standard instructional tuning and context", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 709, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 505, + 723 + ], + "score": 1.0, + "content": "comprehension into the training dataset to preserve and enhance the model’s ability to", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 720, + 209, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 209, + 734 + ], + "score": 1.0, + "content": "process text effectively.", + "type": "text" + } + ], + "index": 48 + } + ], + "index": 43.5, + "bbox_fs": [ + 104, + 621, + 506, + 734 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 118, + 86, + 170, + 97 + ], + "lines": [ + { + "bbox": [ + 116, + 84, + 172, + 99 + ], + "spans": [ + { + "bbox": [ + 116, + 84, + 172, + 99 + ], + "score": 1.0, + "content": "HotPot QA", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 114, + 110, + 494, + 207 + ], + "lines": [ + { + "bbox": [ + 115, + 108, + 495, + 126 + ], + "spans": [ + { + "bbox": [ + 115, + 108, + 495, + 126 + ], + "score": 1.0, + "content": "Question: What screenwriter with credits for β€œEvolution”[0/1879]e a film starring", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 117, + 122, + 249, + 135 + ], + "spans": [ + { + "bbox": [ + 117, + 122, + 249, + 135 + ], + "score": 1.0, + "content": "Nicolas Cage and TΓ©a Leoni?", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 117, + 133, + 495, + 147 + ], + "spans": [ + { + "bbox": [ + 117, + 133, + 495, + 147 + ], + "score": 1.0, + "content": "Documents: . . . David Weissman is a screenwriter and director. His film credits", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 116, + 145, + 487, + 160 + ], + "spans": [ + { + "bbox": [ + 116, + 145, + 487, + 160 + ], + "score": 1.0, + "content": "include β€œThe Family Man” (2000), β€œEvolution” (2001), and β€œWhen in Rome” (2010).", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 167, + 495, + 181 + ], + "spans": [ + { + "bbox": [ + 117, + 167, + 495, + 181 + ], + "score": 1.0, + "content": "The Family Man is a 2000 American romantic comedy-drama film directed by Brett", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 178, + 494, + 195 + ], + "spans": [ + { + "bbox": [ + 116, + 178, + 494, + 195 + ], + "score": 1.0, + "content": "Ratner, written by David Diamond and David Weissman, and starring Nicolas Cage", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 117, + 192, + 185, + 208 + ], + "spans": [ + { + "bbox": [ + 117, + 192, + 185, + 208 + ], + "score": 1.0, + "content": "and TΓ©a Leoni.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 115, + 225, + 497, + 318 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 115, + 225, + 497, + 318 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 115, + 225, + 497, + 318 + ], + "spans": [ + { + "bbox": [ + 115, + 225, + 497, + 318 + ], + "score": 0.395, + "type": "image", + "image_path": "0bcc3d38e5c0488526bd52fb267fd171f556aebd1617289e7668d2a9205a03f4.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 115, + 225, + 497, + 256.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 115, + 256.0, + 497, + 287.0 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 115, + 287.0, + 497, + 318.0 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 331, + 505, + 366 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 506, + 345 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 506, + 345 + ], + "score": 1.0, + "content": "Figure 4: Comparison of RAFT and DSF: On the HotPot QA dataset, we can see that DSF", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 342, + 506, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 506, + 356 + ], + "score": 1.0, + "content": "model extracts the wrong information from the context when the question is asking who is", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 352, + 500, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 352, + 500, + 367 + ], + "score": 1.0, + "content": "the screen writer and it answers a film name. RAFT manages to get the accurate results .", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 12 + } + ], + "index": 10.5 + }, + { + "type": "text", + "bbox": [ + 108, + 390, + 425, + 402 + ], + "lines": [ + { + "bbox": [ + 105, + 390, + 426, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 426, + 404 + ], + "score": 1.0, + "content": "4.4 Should we train the LLM always with the golden context for RAG?", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 14 + }, + { + "type": "text", + "bbox": [ + 107, + 412, + 505, + 501 + ], + "lines": [ + { + "bbox": [ + 105, + 412, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 412, + 506, + 427 + ], + "score": 1.0, + "content": "In our exploration of whether large language models (LLMs) should always be trained with", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 423, + 507, + 437 + ], + "spans": [ + { + "bbox": [ + 105, + 423, + 507, + 437 + ], + "score": 1.0, + "content": "the golden context for Retrieval-Augmented Generation (RAG), we address a key question:", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 434, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 183, + 449 + ], + "score": 1.0, + "content": "what proportion", + "type": "text" + }, + { + "bbox": [ + 183, + 435, + 205, + 447 + ], + "score": 0.87, + "content": "( \\mathrm { p \\% ) }", + "type": "inline_equation" + }, + { + "bbox": [ + 205, + 434, + 506, + 449 + ], + "score": 1.0, + "content": "of the training data should include golden documents? Intuitively,", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 446, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 446, + 505, + 459 + ], + "score": 1.0, + "content": "one might assume that for effective training in reading and extracting information from", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 455, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 455, + 506, + 472 + ], + "score": 1.0, + "content": "context (e.g., RAG tasks), the golden document should always be included during training", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 108, + 466, + 506, + 482 + ], + "spans": [ + { + "bbox": [ + 108, + 468, + 153, + 479 + ], + "score": 0.88, + "content": "\\mathrm { ( P = 1 0 0 \\% }", + "type": "inline_equation" + }, + { + "bbox": [ + 153, + 466, + 506, + 482 + ], + "score": 1.0, + "content": "). However, our findings challenge this assumption: incorporating a portion of", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 478, + 506, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 377, + 492 + ], + "score": 1.0, + "content": "the training data without the golden document in the context", + "type": "text" + }, + { + "bbox": [ + 377, + 479, + 415, + 490 + ], + "score": 0.88, + "content": "\\mathrm { ( P = 8 0 \\% }", + "type": "inline_equation" + }, + { + "bbox": [ + 415, + 478, + 506, + 492 + ], + "score": 1.0, + "content": ") appears to enhance", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 489, + 284, + 503 + ], + "spans": [ + { + "bbox": [ + 106, + 489, + 284, + 503 + ], + "score": 1.0, + "content": "the model’s performance on RAG tasks.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 18.5 + }, + { + "type": "text", + "bbox": [ + 106, + 506, + 505, + 606 + ], + "lines": [ + { + "bbox": [ + 105, + 506, + 506, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 385, + 520 + ], + "score": 1.0, + "content": "Figure 5 presents our investigation into the hyperparameter", + "type": "text" + }, + { + "bbox": [ + 386, + 507, + 403, + 518 + ], + "score": 0.85, + "content": "\\mathrm { P \\% }", + "type": "inline_equation" + }, + { + "bbox": [ + 403, + 506, + 506, + 520 + ], + "score": 1.0, + "content": ", which represents the", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 517, + 505, + 531 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 505, + 531 + ], + "score": 1.0, + "content": "percentage of training instances that should include golden documents. We find that the", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 528, + 506, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 312, + 542 + ], + "score": 1.0, + "content": "optimal proportion varies across datasets, with", + "type": "text" + }, + { + "bbox": [ + 313, + 529, + 329, + 540 + ], + "score": 0.82, + "content": "\\mathrm { P \\% }", + "type": "inline_equation" + }, + { + "bbox": [ + 329, + 528, + 389, + 542 + ], + "score": 1.0, + "content": "ranging from", + "type": "text" + }, + { + "bbox": [ + 390, + 529, + 409, + 540 + ], + "score": 0.85, + "content": "4 0 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 410, + 528, + 412, + 542 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 413, + 529, + 433, + 540 + ], + "score": 0.86, + "content": "6 0 \\% ,", + "type": "inline_equation" + }, + { + "bbox": [ + 434, + 528, + 455, + 542 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 455, + 529, + 479, + 540 + ], + "score": 0.88, + "content": "1 0 0 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 480, + 528, + 506, + 542 + ], + "score": 1.0, + "content": ". This", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 538, + 506, + 553 + ], + "spans": [ + { + "bbox": [ + 104, + 538, + 506, + 553 + ], + "score": 1.0, + "content": "indicates that training your LLM without the correct corresponding context at times can be", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 549, + 506, + 565 + ], + "spans": [ + { + "bbox": [ + 104, + 549, + 506, + 565 + ], + "score": 1.0, + "content": "beneficial for the downstream task of answering questions related to the documents. In our", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 561, + 506, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 561, + 506, + 574 + ], + "score": 1.0, + "content": "training setup, we include four distractor documents alongside the golden document, and at", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 572, + 507, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 507, + 586 + ], + "score": 1.0, + "content": "test time, we maintain this format by providing the golden document with four distractors.", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 582, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 506, + 597 + ], + "score": 1.0, + "content": "Our findings suggest that, for domain-specific RAG tasks, including a certain percentage of", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 593, + 489, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 593, + 489, + 608 + ], + "score": 1.0, + "content": "training data without the golden documents in the context proves to be advantageous.", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 27 + }, + { + "type": "title", + "bbox": [ + 107, + 626, + 308, + 640 + ], + "lines": [ + { + "bbox": [ + 104, + 623, + 309, + 645 + ], + "spans": [ + { + "bbox": [ + 104, + 623, + 309, + 645 + ], + "score": 1.0, + "content": "5 RAFT Generalizes to Top-K RAG", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 106, + 654, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 655, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 106, + 655, + 505, + 667 + ], + "score": 1.0, + "content": "We now study another important problem: How does the number of distractor documents", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 664, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 104, + 664, + 505, + 679 + ], + "score": 1.0, + "content": "in RAFT affect the model’s performance when augmented with top-k RAG results during", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 677, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 106, + 677, + 505, + 689 + ], + "score": 1.0, + "content": "evaluation? Previous research has highlighted the vulnerability of LLMs to irrelevant text", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 687, + 505, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 505, + 701 + ], + "score": 1.0, + "content": "(see studies (Shi et al., 2023a; Weston & Sukhbaatar, 2023; Liu et al., 2023)). This issue is", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 698, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 233, + 712 + ], + "score": 1.0, + "content": "particularly critical for LLMs", + "type": "text" + }, + { + "bbox": [ + 234, + 700, + 243, + 709 + ], + "score": 0.6, + "content": "^ +", + "type": "inline_equation" + }, + { + "bbox": [ + 243, + 698, + 505, + 712 + ], + "score": 1.0, + "content": "RAG since top-k RAG is frequently employed at test time to", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "ensure high recall. Such a scenario necessitates the model to have the ability to discern and", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 719, + 417, + 735 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 417, + 735 + ], + "score": 1.0, + "content": "disregard irrelevant content, focusing solely on pertinent information.", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 36 + } + ], + "page_idx": 6, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 759 + ], + "lines": [ + { + "bbox": [ + 302, + 750, + 310, + 763 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 310, + 763 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 13, + "width": 8 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 118, + 86, + 170, + 97 + ], + "lines": [ + { + "bbox": [ + 116, + 84, + 172, + 99 + ], + "spans": [ + { + "bbox": [ + 116, + 84, + 172, + 99 + ], + "score": 1.0, + "content": "HotPot QA", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "list", + "bbox": [ + 114, + 110, + 494, + 207 + ], + "lines": [ + { + "bbox": [ + 115, + 108, + 495, + 126 + ], + "spans": [ + { + "bbox": [ + 115, + 108, + 495, + 126 + ], + "score": 1.0, + "content": "Question: What screenwriter with credits for β€œEvolution”[0/1879]e a film starring", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 117, + 122, + 249, + 135 + ], + "spans": [ + { + "bbox": [ + 117, + 122, + 249, + 135 + ], + "score": 1.0, + "content": "Nicolas Cage and TΓ©a Leoni?", + "type": "text" + } + ], + "index": 2, + "is_list_end_line": true + }, + { + "bbox": [ + 117, + 133, + 495, + 147 + ], + "spans": [ + { + "bbox": [ + 117, + 133, + 495, + 147 + ], + "score": 1.0, + "content": "Documents: . . . David Weissman is a screenwriter and director. His film credits", + "type": "text" + } + ], + "index": 3, + "is_list_start_line": true + }, + { + "bbox": [ + 116, + 145, + 487, + 160 + ], + "spans": [ + { + "bbox": [ + 116, + 145, + 487, + 160 + ], + "score": 1.0, + "content": "include β€œThe Family Man” (2000), β€œEvolution” (2001), and β€œWhen in Rome” (2010).", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 167, + 495, + 181 + ], + "spans": [ + { + "bbox": [ + 117, + 167, + 495, + 181 + ], + "score": 1.0, + "content": "The Family Man is a 2000 American romantic comedy-drama film directed by Brett", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 178, + 494, + 195 + ], + "spans": [ + { + "bbox": [ + 116, + 178, + 494, + 195 + ], + "score": 1.0, + "content": "Ratner, written by David Diamond and David Weissman, and starring Nicolas Cage", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 117, + 192, + 185, + 208 + ], + "spans": [ + { + "bbox": [ + 117, + 192, + 185, + 208 + ], + "score": 1.0, + "content": "and TΓ©a Leoni.", + "type": "text" + } + ], + "index": 7, + "is_list_end_line": true + } + ], + "index": 4, + "bbox_fs": [ + 115, + 108, + 495, + 208 + ] + }, + { + "type": "image", + "bbox": [ + 115, + 225, + 497, + 318 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 115, + 225, + 497, + 318 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 115, + 225, + 497, + 318 + ], + "spans": [ + { + "bbox": [ + 115, + 225, + 497, + 318 + ], + "score": 0.395, + "type": "image", + "image_path": "0bcc3d38e5c0488526bd52fb267fd171f556aebd1617289e7668d2a9205a03f4.jpg" + } + ] + } + ], + "index": 9, + "virtual_lines": [ + { + "bbox": [ + 115, + 225, + 497, + 256.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 115, + 256.0, + 497, + 287.0 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 115, + 287.0, + 497, + 318.0 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 331, + 505, + 366 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 506, + 345 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 506, + 345 + ], + "score": 1.0, + "content": "Figure 4: Comparison of RAFT and DSF: On the HotPot QA dataset, we can see that DSF", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 342, + 506, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 506, + 356 + ], + "score": 1.0, + "content": "model extracts the wrong information from the context when the question is asking who is", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 352, + 500, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 352, + 500, + 367 + ], + "score": 1.0, + "content": "the screen writer and it answers a film name. RAFT manages to get the accurate results .", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 12 + } + ], + "index": 10.5 + }, + { + "type": "text", + "bbox": [ + 108, + 390, + 425, + 402 + ], + "lines": [ + { + "bbox": [ + 105, + 390, + 426, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 426, + 404 + ], + "score": 1.0, + "content": "4.4 Should we train the LLM always with the golden context for RAG?", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 14, + "bbox_fs": [ + 105, + 390, + 426, + 404 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 412, + 505, + 501 + ], + "lines": [ + { + "bbox": [ + 105, + 412, + 506, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 412, + 506, + 427 + ], + "score": 1.0, + "content": "In our exploration of whether large language models (LLMs) should always be trained with", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 423, + 507, + 437 + ], + "spans": [ + { + "bbox": [ + 105, + 423, + 507, + 437 + ], + "score": 1.0, + "content": "the golden context for Retrieval-Augmented Generation (RAG), we address a key question:", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 434, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 183, + 449 + ], + "score": 1.0, + "content": "what proportion", + "type": "text" + }, + { + "bbox": [ + 183, + 435, + 205, + 447 + ], + "score": 0.87, + "content": "( \\mathrm { p \\% ) }", + "type": "inline_equation" + }, + { + "bbox": [ + 205, + 434, + 506, + 449 + ], + "score": 1.0, + "content": "of the training data should include golden documents? Intuitively,", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 446, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 446, + 505, + 459 + ], + "score": 1.0, + "content": "one might assume that for effective training in reading and extracting information from", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 455, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 455, + 506, + 472 + ], + "score": 1.0, + "content": "context (e.g., RAG tasks), the golden document should always be included during training", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 108, + 466, + 506, + 482 + ], + "spans": [ + { + "bbox": [ + 108, + 468, + 153, + 479 + ], + "score": 0.88, + "content": "\\mathrm { ( P = 1 0 0 \\% }", + "type": "inline_equation" + }, + { + "bbox": [ + 153, + 466, + 506, + 482 + ], + "score": 1.0, + "content": "). However, our findings challenge this assumption: incorporating a portion of", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 478, + 506, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 377, + 492 + ], + "score": 1.0, + "content": "the training data without the golden document in the context", + "type": "text" + }, + { + "bbox": [ + 377, + 479, + 415, + 490 + ], + "score": 0.88, + "content": "\\mathrm { ( P = 8 0 \\% }", + "type": "inline_equation" + }, + { + "bbox": [ + 415, + 478, + 506, + 492 + ], + "score": 1.0, + "content": ") appears to enhance", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 489, + 284, + 503 + ], + "spans": [ + { + "bbox": [ + 106, + 489, + 284, + 503 + ], + "score": 1.0, + "content": "the model’s performance on RAG tasks.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 18.5, + "bbox_fs": [ + 104, + 412, + 507, + 503 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 506, + 505, + 606 + ], + "lines": [ + { + "bbox": [ + 105, + 506, + 506, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 385, + 520 + ], + "score": 1.0, + "content": "Figure 5 presents our investigation into the hyperparameter", + "type": "text" + }, + { + "bbox": [ + 386, + 507, + 403, + 518 + ], + "score": 0.85, + "content": "\\mathrm { P \\% }", + "type": "inline_equation" + }, + { + "bbox": [ + 403, + 506, + 506, + 520 + ], + "score": 1.0, + "content": ", which represents the", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 517, + 505, + 531 + ], + "spans": [ + { + "bbox": [ + 105, + 517, + 505, + 531 + ], + "score": 1.0, + "content": "percentage of training instances that should include golden documents. We find that the", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 528, + 506, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 312, + 542 + ], + "score": 1.0, + "content": "optimal proportion varies across datasets, with", + "type": "text" + }, + { + "bbox": [ + 313, + 529, + 329, + 540 + ], + "score": 0.82, + "content": "\\mathrm { P \\% }", + "type": "inline_equation" + }, + { + "bbox": [ + 329, + 528, + 389, + 542 + ], + "score": 1.0, + "content": "ranging from", + "type": "text" + }, + { + "bbox": [ + 390, + 529, + 409, + 540 + ], + "score": 0.85, + "content": "4 0 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 410, + 528, + 412, + 542 + ], + "score": 1.0, + "content": ",", + "type": "text" + }, + { + "bbox": [ + 413, + 529, + 433, + 540 + ], + "score": 0.86, + "content": "6 0 \\% ,", + "type": "inline_equation" + }, + { + "bbox": [ + 434, + 528, + 455, + 542 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 455, + 529, + 479, + 540 + ], + "score": 0.88, + "content": "1 0 0 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 480, + 528, + 506, + 542 + ], + "score": 1.0, + "content": ". This", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 538, + 506, + 553 + ], + "spans": [ + { + "bbox": [ + 104, + 538, + 506, + 553 + ], + "score": 1.0, + "content": "indicates that training your LLM without the correct corresponding context at times can be", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 549, + 506, + 565 + ], + "spans": [ + { + "bbox": [ + 104, + 549, + 506, + 565 + ], + "score": 1.0, + "content": "beneficial for the downstream task of answering questions related to the documents. In our", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 561, + 506, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 561, + 506, + 574 + ], + "score": 1.0, + "content": "training setup, we include four distractor documents alongside the golden document, and at", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 572, + 507, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 507, + 586 + ], + "score": 1.0, + "content": "test time, we maintain this format by providing the golden document with four distractors.", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 582, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 506, + 597 + ], + "score": 1.0, + "content": "Our findings suggest that, for domain-specific RAG tasks, including a certain percentage of", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 593, + 489, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 593, + 489, + 608 + ], + "score": 1.0, + "content": "training data without the golden documents in the context proves to be advantageous.", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 27, + "bbox_fs": [ + 104, + 506, + 507, + 608 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 626, + 308, + 640 + ], + "lines": [ + { + "bbox": [ + 104, + 623, + 309, + 645 + ], + "spans": [ + { + "bbox": [ + 104, + 623, + 309, + 645 + ], + "score": 1.0, + "content": "5 RAFT Generalizes to Top-K RAG", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 106, + 654, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 655, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 106, + 655, + 505, + 667 + ], + "score": 1.0, + "content": "We now study another important problem: How does the number of distractor documents", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 664, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 104, + 664, + 505, + 679 + ], + "score": 1.0, + "content": "in RAFT affect the model’s performance when augmented with top-k RAG results during", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 677, + 505, + 689 + ], + "spans": [ + { + "bbox": [ + 106, + 677, + 505, + 689 + ], + "score": 1.0, + "content": "evaluation? Previous research has highlighted the vulnerability of LLMs to irrelevant text", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 687, + 505, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 505, + 701 + ], + "score": 1.0, + "content": "(see studies (Shi et al., 2023a; Weston & Sukhbaatar, 2023; Liu et al., 2023)). This issue is", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 104, + 698, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 233, + 712 + ], + "score": 1.0, + "content": "particularly critical for LLMs", + "type": "text" + }, + { + "bbox": [ + 234, + 700, + 243, + 709 + ], + "score": 0.6, + "content": "^ +", + "type": "inline_equation" + }, + { + "bbox": [ + 243, + 698, + 505, + 712 + ], + "score": 1.0, + "content": "RAG since top-k RAG is frequently employed at test time to", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 709, + 505, + 722 + ], + "score": 1.0, + "content": "ensure high recall. Such a scenario necessitates the model to have the ability to discern and", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 719, + 417, + 735 + ], + "spans": [ + { + "bbox": [ + 105, + 719, + 417, + 735 + ], + "score": 1.0, + "content": "disregard irrelevant content, focusing solely on pertinent information.", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 36, + "bbox_fs": [ + 104, + 655, + 505, + 735 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 80, + 504, + 172 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 107, + 80, + 504, + 172 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 107, + 80, + 504, + 172 + ], + "spans": [ + { + "bbox": [ + 107, + 80, + 504, + 172 + ], + "score": 0.964, + "type": "image", + "image_path": "8e086fb8cb885d22dfb048538cf47a1ac09dfb1873a60ba60c02530cd07d066d.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 107, + 80, + 504, + 110.66666666666667 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 107, + 110.66666666666667, + 504, + 141.33333333333334 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 107, + 141.33333333333334, + 504, + 172.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 181, + 505, + 226 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 181, + 505, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 181, + 488, + 194 + ], + "score": 1.0, + "content": "Figure 5: How many golden documents to involve? We study the hyperparameter", + "type": "text" + }, + { + "bbox": [ + 488, + 181, + 505, + 192 + ], + "score": 0.85, + "content": "\\mathrm { P \\% }", + "type": "inline_equation" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 192, + 505, + 204 + ], + "spans": [ + { + "bbox": [ + 106, + 192, + 505, + 204 + ], + "score": 1.0, + "content": "where it indicates how much portion of training data is with golden document. Results", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 203, + 505, + 216 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 505, + 216 + ], + "score": 1.0, + "content": "on NQ, TQA and HotpotQA suggest that mixing some amount of data that the golden", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 213, + 394, + 226 + ], + "spans": [ + { + "bbox": [ + 106, + 213, + 394, + 226 + ], + "score": 1.0, + "content": "document is not put in the context is helpful for in-domain RAG.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 4.5 + } + ], + "index": 2.75 + }, + { + "type": "title", + "bbox": [ + 108, + 248, + 293, + 260 + ], + "lines": [ + { + "bbox": [ + 104, + 247, + 294, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 294, + 262 + ], + "score": 1.0, + "content": "5.1 Making Model Robust to top-K RAG", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 7 + }, + { + "type": "text", + "bbox": [ + 107, + 269, + 505, + 402 + ], + "lines": [ + { + "bbox": [ + 105, + 268, + 506, + 283 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 506, + 283 + ], + "score": 1.0, + "content": "To tackle the challenge of enhancing large language models’ (LLMs) ability to sift through", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 280, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 505, + 294 + ], + "score": 1.0, + "content": "irrelevant text within the retrieval pipeline, our analysis revealed that training solely with", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 291, + 506, + 305 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 506, + 305 + ], + "score": 1.0, + "content": "golden (highly relevant) documents can inadvertently diminish the model’s ability to dis-", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 302, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 302, + 506, + 316 + ], + "score": 1.0, + "content": "cern and disregard irrelevant information. To address this, our algorithm, RAFT , adopts", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 314, + 506, + 326 + ], + "spans": [ + { + "bbox": [ + 105, + 314, + 506, + 326 + ], + "score": 1.0, + "content": "a strategy that integrates golden documents with a mix of irrelevant ones. This method-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 324, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 505, + 338 + ], + "score": 1.0, + "content": "ology prompts us to investigate the ideal fraction of distractor (irrelevant) documents to", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 335, + 506, + 349 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 506, + 349 + ], + "score": 1.0, + "content": "incorporate throughout the training process and to assess how well this training approach", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 345, + 507, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 345, + 507, + 360 + ], + "score": 1.0, + "content": "adapts to different volumes of documents encountered by the Retrieval-Augmented Gen-", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 358, + 506, + 370 + ], + "spans": [ + { + "bbox": [ + 106, + 358, + 506, + 370 + ], + "score": 1.0, + "content": "eration (RAG) during the test phase. Our aim is to refine the balance between relevant", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 366, + 506, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 506, + 383 + ], + "score": 1.0, + "content": "and irrelevant information to strenghten the model’s efficiency in identifying and utilizing", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 379, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 350, + 392 + ], + "score": 1.0, + "content": "pertinent content. Notice that Sec 4.4 looked what what", + "type": "text" + }, + { + "bbox": [ + 350, + 379, + 366, + 390 + ], + "score": 0.85, + "content": "\\mathrm { P \\% }", + "type": "inline_equation" + }, + { + "bbox": [ + 367, + 379, + 506, + 392 + ], + "score": 1.0, + "content": "of training data should include", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 390, + 380, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 380, + 403 + ], + "score": 1.0, + "content": "distractors, while in this section, we study test-time scenarios.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 13.5 + }, + { + "type": "text", + "bbox": [ + 107, + 407, + 505, + 528 + ], + "lines": [ + { + "bbox": [ + 106, + 407, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 106, + 407, + 505, + 420 + ], + "score": 1.0, + "content": "Training with Distractor Documents To enhance the robustness of LLMs against irrelevant", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 417, + 506, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 417, + 506, + 432 + ], + "score": 1.0, + "content": "text in retrieved documents, we adopted a finetuning approach that incorporates both", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 429, + 506, + 442 + ], + "spans": [ + { + "bbox": [ + 106, + 429, + 506, + 442 + ], + "score": 1.0, + "content": "golden (highly relevant) documents and distractor (irrelevant) documents. The model was", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 438, + 506, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 438, + 506, + 455 + ], + "score": 1.0, + "content": "trained with varying numbers of distractor documents, but consistently evaluated using", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 449, + 507, + 466 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 433, + 466 + ], + "score": 1.0, + "content": "the top-3 documents obtained from the retriever - not to be confused with", + "type": "text" + }, + { + "bbox": [ + 433, + 452, + 440, + 463 + ], + "score": 0.62, + "content": "p", + "type": "inline_equation" + }, + { + "bbox": [ + 441, + 449, + 507, + 466 + ], + "score": 1.0, + "content": ". Our findings,", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 461, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 461, + 506, + 475 + ], + "score": 1.0, + "content": "detailed in Fig. 6, reveal that finetuning with only the golden document frequently results in", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 473, + 505, + 486 + ], + "spans": [ + { + "bbox": [ + 106, + 473, + 505, + 486 + ], + "score": 1.0, + "content": "inferior performance compared to configurations that include a greater number of distractor", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 484, + 505, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 505, + 496 + ], + "score": 1.0, + "content": "documents. As we can see in the figure, the better performance for Natural Questions is", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 495, + 505, + 507 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 165, + 507 + ], + "score": 1.0, + "content": "training with", + "type": "text" + }, + { + "bbox": [ + 166, + 495, + 205, + 506 + ], + "score": 0.91, + "content": "D ^ { * } + 3 D", + "type": "inline_equation" + }, + { + "bbox": [ + 206, + 495, + 244, + 507 + ], + "score": 1.0, + "content": "and it is", + "type": "text" + }, + { + "bbox": [ + 244, + 495, + 283, + 506 + ], + "score": 0.91, + "content": "D ^ { * } + 1 D", + "type": "inline_equation" + }, + { + "bbox": [ + 284, + 495, + 505, + 507 + ], + "score": 1.0, + "content": "documents with Hotpot QA. This insight has been", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 104, + 505, + 506, + 520 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 506, + 520 + ], + "score": 1.0, + "content": "particularly beneficial for our algorithm, RAFT . In our experiments, we consistently employ", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 104, + 517, + 502, + 530 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 502, + 530 + ], + "score": 1.0, + "content": "a training setup consisting of one golden document alongside four distractor documents.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 25 + }, + { + "type": "text", + "bbox": [ + 107, + 533, + 505, + 644 + ], + "lines": [ + { + "bbox": [ + 106, + 534, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 106, + 534, + 505, + 545 + ], + "score": 1.0, + "content": "Generalization to a variable number of test-time documents. We extended our research", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 544, + 506, + 557 + ], + "spans": [ + { + "bbox": [ + 106, + 544, + 506, + 557 + ], + "score": 1.0, + "content": "to examine the impact of different quantities of test-time documents on the model’s per-", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 555, + 506, + 568 + ], + "spans": [ + { + "bbox": [ + 106, + 555, + 506, + 568 + ], + "score": 1.0, + "content": "formance. Specifically, our experiments focused on assessing how models, trained with", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 566, + 506, + 579 + ], + "spans": [ + { + "bbox": [ + 104, + 566, + 506, + 579 + ], + "score": 1.0, + "content": "varying numbers of distractor documents, respond to changes in the number of documents", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 577, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 577, + 506, + 590 + ], + "score": 1.0, + "content": "presented at test time. The results, illustrated in Fig. 6, confirm that the inclusion of distrac-", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 588, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 106, + 588, + 505, + 601 + ], + "score": 1.0, + "content": "tor documents during training indeed makes the model more resilient to fluctuations in the", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 599, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 506, + 612 + ], + "score": 1.0, + "content": "number of documents encountered during testing. This ability to maintain consistent perfor-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 610, + 506, + 623 + ], + "spans": [ + { + "bbox": [ + 106, + 610, + 506, + 623 + ], + "score": 1.0, + "content": "mance despite variations in test-time document numbers further validates the robustness of", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 620, + 506, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 620, + 506, + 636 + ], + "score": 1.0, + "content": "our approach, RAFT . This finding underscores the importance of a well-calibrated training", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 632, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 505, + 645 + ], + "score": 1.0, + "content": "environment to prepare the model for a range of scenarios it may encounter in real-world.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 35.5 + }, + { + "type": "title", + "bbox": [ + 108, + 660, + 205, + 674 + ], + "lines": [ + { + "bbox": [ + 105, + 660, + 207, + 676 + ], + "spans": [ + { + "bbox": [ + 105, + 660, + 207, + 676 + ], + "score": 1.0, + "content": "6 Related Works", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 41 + }, + { + "type": "text", + "bbox": [ + 108, + 687, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "score": 1.0, + "content": "Retrieval-Augmented Language Models Retrieval-Augmented Language Models (RALMs)", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 106, + 698, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 106, + 698, + 506, + 712 + ], + "score": 1.0, + "content": "enhance LLMs by integrating a retrieval module that sources relevant information from", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 709, + 507, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 507, + 722 + ], + "score": 1.0, + "content": "external knowledge bases, significantly improving performance across various NLP tasks,", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 720, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 507, + 733 + ], + "score": 1.0, + "content": "including language modeling (Guu et al., 2020; Borgeaud et al., 2022; Khandelwal et al.,", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 43.5 + } + ], + "page_idx": 7, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 27, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 759 + ], + "lines": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 310, + 762 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 12, + "width": 9 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 80, + 504, + 172 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 107, + 80, + 504, + 172 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 107, + 80, + 504, + 172 + ], + "spans": [ + { + "bbox": [ + 107, + 80, + 504, + 172 + ], + "score": 0.964, + "type": "image", + "image_path": "8e086fb8cb885d22dfb048538cf47a1ac09dfb1873a60ba60c02530cd07d066d.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 107, + 80, + 504, + 110.66666666666667 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 107, + 110.66666666666667, + 504, + 141.33333333333334 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 107, + 141.33333333333334, + 504, + 172.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 181, + 505, + 226 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 181, + 505, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 181, + 488, + 194 + ], + "score": 1.0, + "content": "Figure 5: How many golden documents to involve? We study the hyperparameter", + "type": "text" + }, + { + "bbox": [ + 488, + 181, + 505, + 192 + ], + "score": 0.85, + "content": "\\mathrm { P \\% }", + "type": "inline_equation" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 192, + 505, + 204 + ], + "spans": [ + { + "bbox": [ + 106, + 192, + 505, + 204 + ], + "score": 1.0, + "content": "where it indicates how much portion of training data is with golden document. Results", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 203, + 505, + 216 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 505, + 216 + ], + "score": 1.0, + "content": "on NQ, TQA and HotpotQA suggest that mixing some amount of data that the golden", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 213, + 394, + 226 + ], + "spans": [ + { + "bbox": [ + 106, + 213, + 394, + 226 + ], + "score": 1.0, + "content": "document is not put in the context is helpful for in-domain RAG.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 4.5 + } + ], + "index": 2.75 + }, + { + "type": "title", + "bbox": [ + 108, + 248, + 293, + 260 + ], + "lines": [ + { + "bbox": [ + 104, + 247, + 294, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 294, + 262 + ], + "score": 1.0, + "content": "5.1 Making Model Robust to top-K RAG", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 7 + }, + { + "type": "text", + "bbox": [ + 107, + 269, + 505, + 402 + ], + "lines": [ + { + "bbox": [ + 105, + 268, + 506, + 283 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 506, + 283 + ], + "score": 1.0, + "content": "To tackle the challenge of enhancing large language models’ (LLMs) ability to sift through", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 280, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 505, + 294 + ], + "score": 1.0, + "content": "irrelevant text within the retrieval pipeline, our analysis revealed that training solely with", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 291, + 506, + 305 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 506, + 305 + ], + "score": 1.0, + "content": "golden (highly relevant) documents can inadvertently diminish the model’s ability to dis-", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 302, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 302, + 506, + 316 + ], + "score": 1.0, + "content": "cern and disregard irrelevant information. To address this, our algorithm, RAFT , adopts", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 314, + 506, + 326 + ], + "spans": [ + { + "bbox": [ + 105, + 314, + 506, + 326 + ], + "score": 1.0, + "content": "a strategy that integrates golden documents with a mix of irrelevant ones. This method-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 324, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 505, + 338 + ], + "score": 1.0, + "content": "ology prompts us to investigate the ideal fraction of distractor (irrelevant) documents to", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 335, + 506, + 349 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 506, + 349 + ], + "score": 1.0, + "content": "incorporate throughout the training process and to assess how well this training approach", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 345, + 507, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 345, + 507, + 360 + ], + "score": 1.0, + "content": "adapts to different volumes of documents encountered by the Retrieval-Augmented Gen-", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 358, + 506, + 370 + ], + "spans": [ + { + "bbox": [ + 106, + 358, + 506, + 370 + ], + "score": 1.0, + "content": "eration (RAG) during the test phase. Our aim is to refine the balance between relevant", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 366, + 506, + 383 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 506, + 383 + ], + "score": 1.0, + "content": "and irrelevant information to strenghten the model’s efficiency in identifying and utilizing", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 379, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 350, + 392 + ], + "score": 1.0, + "content": "pertinent content. Notice that Sec 4.4 looked what what", + "type": "text" + }, + { + "bbox": [ + 350, + 379, + 366, + 390 + ], + "score": 0.85, + "content": "\\mathrm { P \\% }", + "type": "inline_equation" + }, + { + "bbox": [ + 367, + 379, + 506, + 392 + ], + "score": 1.0, + "content": "of training data should include", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 390, + 380, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 380, + 403 + ], + "score": 1.0, + "content": "distractors, while in this section, we study test-time scenarios.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 13.5, + "bbox_fs": [ + 105, + 268, + 507, + 403 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 407, + 505, + 528 + ], + "lines": [ + { + "bbox": [ + 106, + 407, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 106, + 407, + 505, + 420 + ], + "score": 1.0, + "content": "Training with Distractor Documents To enhance the robustness of LLMs against irrelevant", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 417, + 506, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 417, + 506, + 432 + ], + "score": 1.0, + "content": "text in retrieved documents, we adopted a finetuning approach that incorporates both", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 429, + 506, + 442 + ], + "spans": [ + { + "bbox": [ + 106, + 429, + 506, + 442 + ], + "score": 1.0, + "content": "golden (highly relevant) documents and distractor (irrelevant) documents. The model was", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 438, + 506, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 438, + 506, + 455 + ], + "score": 1.0, + "content": "trained with varying numbers of distractor documents, but consistently evaluated using", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 449, + 507, + 466 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 433, + 466 + ], + "score": 1.0, + "content": "the top-3 documents obtained from the retriever - not to be confused with", + "type": "text" + }, + { + "bbox": [ + 433, + 452, + 440, + 463 + ], + "score": 0.62, + "content": "p", + "type": "inline_equation" + }, + { + "bbox": [ + 441, + 449, + 507, + 466 + ], + "score": 1.0, + "content": ". Our findings,", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 461, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 461, + 506, + 475 + ], + "score": 1.0, + "content": "detailed in Fig. 6, reveal that finetuning with only the golden document frequently results in", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 473, + 505, + 486 + ], + "spans": [ + { + "bbox": [ + 106, + 473, + 505, + 486 + ], + "score": 1.0, + "content": "inferior performance compared to configurations that include a greater number of distractor", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 484, + 505, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 505, + 496 + ], + "score": 1.0, + "content": "documents. As we can see in the figure, the better performance for Natural Questions is", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 495, + 505, + 507 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 165, + 507 + ], + "score": 1.0, + "content": "training with", + "type": "text" + }, + { + "bbox": [ + 166, + 495, + 205, + 506 + ], + "score": 0.91, + "content": "D ^ { * } + 3 D", + "type": "inline_equation" + }, + { + "bbox": [ + 206, + 495, + 244, + 507 + ], + "score": 1.0, + "content": "and it is", + "type": "text" + }, + { + "bbox": [ + 244, + 495, + 283, + 506 + ], + "score": 0.91, + "content": "D ^ { * } + 1 D", + "type": "inline_equation" + }, + { + "bbox": [ + 284, + 495, + 505, + 507 + ], + "score": 1.0, + "content": "documents with Hotpot QA. This insight has been", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 104, + 505, + 506, + 520 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 506, + 520 + ], + "score": 1.0, + "content": "particularly beneficial for our algorithm, RAFT . In our experiments, we consistently employ", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 104, + 517, + 502, + 530 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 502, + 530 + ], + "score": 1.0, + "content": "a training setup consisting of one golden document alongside four distractor documents.", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 25, + "bbox_fs": [ + 104, + 407, + 507, + 530 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 533, + 505, + 644 + ], + "lines": [ + { + "bbox": [ + 106, + 534, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 106, + 534, + 505, + 545 + ], + "score": 1.0, + "content": "Generalization to a variable number of test-time documents. We extended our research", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 544, + 506, + 557 + ], + "spans": [ + { + "bbox": [ + 106, + 544, + 506, + 557 + ], + "score": 1.0, + "content": "to examine the impact of different quantities of test-time documents on the model’s per-", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 555, + 506, + 568 + ], + "spans": [ + { + "bbox": [ + 106, + 555, + 506, + 568 + ], + "score": 1.0, + "content": "formance. Specifically, our experiments focused on assessing how models, trained with", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 566, + 506, + 579 + ], + "spans": [ + { + "bbox": [ + 104, + 566, + 506, + 579 + ], + "score": 1.0, + "content": "varying numbers of distractor documents, respond to changes in the number of documents", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 577, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 577, + 506, + 590 + ], + "score": 1.0, + "content": "presented at test time. The results, illustrated in Fig. 6, confirm that the inclusion of distrac-", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 588, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 106, + 588, + 505, + 601 + ], + "score": 1.0, + "content": "tor documents during training indeed makes the model more resilient to fluctuations in the", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 599, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 506, + 612 + ], + "score": 1.0, + "content": "number of documents encountered during testing. This ability to maintain consistent perfor-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 610, + 506, + 623 + ], + "spans": [ + { + "bbox": [ + 106, + 610, + 506, + 623 + ], + "score": 1.0, + "content": "mance despite variations in test-time document numbers further validates the robustness of", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 620, + 506, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 620, + 506, + 636 + ], + "score": 1.0, + "content": "our approach, RAFT . This finding underscores the importance of a well-calibrated training", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 632, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 505, + 645 + ], + "score": 1.0, + "content": "environment to prepare the model for a range of scenarios it may encounter in real-world.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 35.5, + "bbox_fs": [ + 104, + 534, + 506, + 645 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 660, + 205, + 674 + ], + "lines": [ + { + "bbox": [ + 105, + 660, + 207, + 676 + ], + "spans": [ + { + "bbox": [ + 105, + 660, + 207, + 676 + ], + "score": 1.0, + "content": "6 Related Works", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 41 + }, + { + "type": "text", + "bbox": [ + 108, + 687, + 505, + 732 + ], + "lines": [ + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 701 + ], + "score": 1.0, + "content": "Retrieval-Augmented Language Models Retrieval-Augmented Language Models (RALMs)", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 106, + 698, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 106, + 698, + 506, + 712 + ], + "score": 1.0, + "content": "enhance LLMs by integrating a retrieval module that sources relevant information from", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 709, + 507, + 722 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 507, + 722 + ], + "score": 1.0, + "content": "external knowledge bases, significantly improving performance across various NLP tasks,", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 720, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 720, + 507, + 733 + ], + "score": 1.0, + "content": "including language modeling (Guu et al., 2020; Borgeaud et al., 2022; Khandelwal et al.,", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 43.5, + "bbox_fs": [ + 105, + 687, + 507, + 733 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 144, + 81, + 467, + 191 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 144, + 81, + 467, + 191 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 144, + 81, + 467, + 191 + ], + "spans": [ + { + "bbox": [ + 144, + 81, + 467, + 191 + ], + "score": 0.972, + "type": "image", + "image_path": "12b8e955ae9a0307c0a7f13890daa53d74edc9d6d0f2d3be9e950103c883cfdd.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 144, + 81, + 467, + 117.66666666666666 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 144, + 117.66666666666666, + 467, + 154.33333333333331 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 144, + 154.33333333333331, + 467, + 190.99999999999997 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 201, + 506, + 257 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 200, + 506, + 214 + ], + "spans": [ + { + "bbox": [ + 105, + 200, + 506, + 214 + ], + "score": 1.0, + "content": "Figure 6: Test-Time Documents Varying: To analyze how robust RAFT is to varying number", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 212, + 505, + 225 + ], + "spans": [ + { + "bbox": [ + 106, + 213, + 484, + 225 + ], + "score": 1.0, + "content": "of test-time documents, we study three domains – NQ, Trivia QA and HotPot QA. In", + "type": "text" + }, + { + "bbox": [ + 485, + 212, + 505, + 224 + ], + "score": 0.51, + "content": "{ \\mathrm { N Q } } ,", + "type": "inline_equation" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 222, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 104, + 222, + 506, + 237 + ], + "score": 1.0, + "content": "we find that training with 4 documents leads to optimal performance, and this changes to 3", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 233, + 506, + 249 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 506, + 249 + ], + "score": 1.0, + "content": "and 2 for for Trivia QA and HotPot QA respectively. However, we see that training with", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 244, + 330, + 260 + ], + "spans": [ + { + "bbox": [ + 106, + 244, + 330, + 260 + ], + "score": 1.0, + "content": "only golden documents leads to poor performance.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 5 + } + ], + "index": 3.0 + }, + { + "type": "text", + "bbox": [ + 107, + 282, + 506, + 382 + ], + "lines": [ + { + "bbox": [ + 106, + 283, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 106, + 283, + 506, + 295 + ], + "score": 1.0, + "content": "2019; Shi et al., 2023d; Lin et al., 2023b; Shi et al., 2023c; Asai et al., 2023; Xu et al., 2023;", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 293, + 506, + 307 + ], + "spans": [ + { + "bbox": [ + 106, + 293, + 506, + 307 + ], + "score": 1.0, + "content": "Wang et al., 2023) and open-domain question answering (Izacard et al., 2023; Lewis et al.,", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "score": 1.0, + "content": "2020). For instance, Atlas (Izacard et al., 2023) fine-tunes T5 models with the retriever,", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 315, + 506, + 329 + ], + "spans": [ + { + "bbox": [ + 106, + 315, + 506, + 329 + ], + "score": 1.0, + "content": "treating documents as latent variables, while RETRO (Borgeaud et al., 2022) modifies the", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 326, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 506, + 340 + ], + "score": 1.0, + "content": "decoder-only architecture to include retrieved texts and conducts pre-training from scratch.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 336, + 506, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 336, + 506, + 351 + ], + "score": 1.0, + "content": "kNN-LM (Khandelwal et al., 2019) interpolates between the LM’s next token distribution", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 348, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 106, + 348, + 505, + 362 + ], + "score": 1.0, + "content": "and distributions computed from retrieved tokens at inference. (Shi et al., 2023d; Ram", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 360, + 505, + 373 + ], + "spans": [ + { + "bbox": [ + 106, + 360, + 505, + 373 + ], + "score": 1.0, + "content": "et al., 2023) assume black-box access to an LLM, combining it with either off-the-shelf or", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 370, + 196, + 383 + ], + "spans": [ + { + "bbox": [ + 106, + 370, + 196, + 383 + ], + "score": 1.0, + "content": "fine-tuned retriever.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 12 + }, + { + "type": "text", + "bbox": [ + 106, + 387, + 506, + 476 + ], + "lines": [ + { + "bbox": [ + 105, + 387, + 506, + 401 + ], + "spans": [ + { + "bbox": [ + 105, + 387, + 506, + 401 + ], + "score": 1.0, + "content": "Memorization A key question around large neural language models is whether they truly", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 397, + 506, + 412 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 506, + 412 + ], + "score": 1.0, + "content": "β€œunderstand” text (Feldman, 2020; Power et al., 2022) or simply rely on surface pattern", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 409, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 105, + 409, + 506, + 422 + ], + "score": 1.0, + "content": "memorization (Carlini et al., 2019; TΓ€nzer et al., 2022). (Feldman, 2020; Carlini et al., 2019;", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 420, + 507, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 507, + 434 + ], + "score": 1.0, + "content": "2022) develop methodologies to quantify the extent of memorization in neural models.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 431, + 506, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 431, + 506, + 444 + ], + "score": 1.0, + "content": "(Brown et al., 2020; Power et al., 2022; Liu et al., 2022) further explored how memorization", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 442, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 105, + 442, + 506, + 456 + ], + "score": 1.0, + "content": "impacts the models’ generalization capabilities. (Carlini et al., 2021; Shi et al., 2023b)", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 453, + 507, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 507, + 467 + ], + "score": 1.0, + "content": "demonstrated the ability of language models to memorize and regurgitate training data,", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 464, + 433, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 433, + 477 + ], + "score": 1.0, + "content": "raising significant privacy concerns (Kandpal et al., 2022; Pan et al., 2020).", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 20.5 + }, + { + "type": "text", + "bbox": [ + 107, + 481, + 506, + 558 + ], + "lines": [ + { + "bbox": [ + 106, + 480, + 507, + 494 + ], + "spans": [ + { + "bbox": [ + 106, + 480, + 507, + 494 + ], + "score": 1.0, + "content": "Finetuning for RAG More recently, several papers have been exploring the idea of fine-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 492, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 506, + 505 + ], + "score": 1.0, + "content": "tuning a pretrained LLM to be better at RAG tasks (Lin et al., 2023a; Wang et al., 2023; Xu", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 502, + 506, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 506, + 517 + ], + "score": 1.0, + "content": "et al., 2023; Liu et al., 2024). These works focus on constructing a combination of finetuning", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 514, + 506, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 506, + 527 + ], + "score": 1.0, + "content": "dataset for RAG and train a model to perform well on these tasks. In particular, in their", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 524, + 507, + 538 + ], + "spans": [ + { + "bbox": [ + 105, + 524, + 507, + 538 + ], + "score": 1.0, + "content": "settings, at test time, the domain or documents can be different than the training time;", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 536, + 506, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 536, + 506, + 550 + ], + "score": 1.0, + "content": "whereas our paper studies a slightly opposite scenario where we only care about testing the", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 546, + 266, + 559 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 266, + 559 + ], + "score": 1.0, + "content": "LLM on the same set of documents.", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 28 + }, + { + "type": "title", + "bbox": [ + 107, + 580, + 188, + 594 + ], + "lines": [ + { + "bbox": [ + 104, + 577, + 190, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 190, + 597 + ], + "score": 1.0, + "content": "7 Conclusion", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 107, + 609, + 505, + 676 + ], + "lines": [ + { + "bbox": [ + 104, + 607, + 506, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 607, + 506, + 624 + ], + "score": 1.0, + "content": "RAFT is a training strategy designed to enhance the model’s performance in answering", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 619, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 619, + 505, + 633 + ], + "score": 1.0, + "content": "questions within a specific domain, in \"open-book\" settings. We highlight several crucial", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 630, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 630, + 505, + 645 + ], + "score": 1.0, + "content": "design decisions, such as training the model alongside distractor documents, organizing the", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 641, + 506, + 655 + ], + "spans": [ + { + "bbox": [ + 105, + 641, + 506, + 655 + ], + "score": 1.0, + "content": "dataset so a portion lacks golden documents in their context, and formulating answers in a", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 653, + 506, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 506, + 666 + ], + "score": 1.0, + "content": "chain-of-thought manner with direct quotations from the relevant text. Our evaluations on", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 663, + 477, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 663, + 477, + 677 + ], + "score": 1.0, + "content": "PubMed, HotpotQA, and Gorilla API Bench underline RAFT’s significant potential.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 35.5 + }, + { + "type": "title", + "bbox": [ + 107, + 697, + 167, + 711 + ], + "lines": [ + { + "bbox": [ + 105, + 696, + 169, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 696, + 169, + 713 + ], + "score": 1.0, + "content": "References", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 39 + }, + { + "type": "text", + "bbox": [ + 107, + 720, + 424, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 719, + 427, + 735 + ], + "spans": [ + { + "bbox": [ + 106, + 719, + 427, + 735 + ], + "score": 1.0, + "content": "Anthropic. Prompt engineering for claude’s long context window. 2023.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 40 + } + ], + "page_idx": 8, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 27, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 751, + 308, + 759 + ], + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "score": 1.0, + "content": "9", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 144, + 81, + 467, + 191 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 144, + 81, + 467, + 191 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 144, + 81, + 467, + 191 + ], + "spans": [ + { + "bbox": [ + 144, + 81, + 467, + 191 + ], + "score": 0.972, + "type": "image", + "image_path": "12b8e955ae9a0307c0a7f13890daa53d74edc9d6d0f2d3be9e950103c883cfdd.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 144, + 81, + 467, + 117.66666666666666 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 144, + 117.66666666666666, + 467, + 154.33333333333331 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 144, + 154.33333333333331, + 467, + 190.99999999999997 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 201, + 506, + 257 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 200, + 506, + 214 + ], + "spans": [ + { + "bbox": [ + 105, + 200, + 506, + 214 + ], + "score": 1.0, + "content": "Figure 6: Test-Time Documents Varying: To analyze how robust RAFT is to varying number", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 212, + 505, + 225 + ], + "spans": [ + { + "bbox": [ + 106, + 213, + 484, + 225 + ], + "score": 1.0, + "content": "of test-time documents, we study three domains – NQ, Trivia QA and HotPot QA. In", + "type": "text" + }, + { + "bbox": [ + 485, + 212, + 505, + 224 + ], + "score": 0.51, + "content": "{ \\mathrm { N Q } } ,", + "type": "inline_equation" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 222, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 104, + 222, + 506, + 237 + ], + "score": 1.0, + "content": "we find that training with 4 documents leads to optimal performance, and this changes to 3", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 233, + 506, + 249 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 506, + 249 + ], + "score": 1.0, + "content": "and 2 for for Trivia QA and HotPot QA respectively. However, we see that training with", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 244, + 330, + 260 + ], + "spans": [ + { + "bbox": [ + 106, + 244, + 330, + 260 + ], + "score": 1.0, + "content": "only golden documents leads to poor performance.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 5 + } + ], + "index": 3.0 + }, + { + "type": "text", + "bbox": [ + 107, + 282, + 506, + 382 + ], + "lines": [ + { + "bbox": [ + 106, + 283, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 106, + 283, + 506, + 295 + ], + "score": 1.0, + "content": "2019; Shi et al., 2023d; Lin et al., 2023b; Shi et al., 2023c; Asai et al., 2023; Xu et al., 2023;", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 293, + 506, + 307 + ], + "spans": [ + { + "bbox": [ + 106, + 293, + 506, + 307 + ], + "score": 1.0, + "content": "Wang et al., 2023) and open-domain question answering (Izacard et al., 2023; Lewis et al.,", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 506, + 318 + ], + "score": 1.0, + "content": "2020). For instance, Atlas (Izacard et al., 2023) fine-tunes T5 models with the retriever,", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 315, + 506, + 329 + ], + "spans": [ + { + "bbox": [ + 106, + 315, + 506, + 329 + ], + "score": 1.0, + "content": "treating documents as latent variables, while RETRO (Borgeaud et al., 2022) modifies the", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 326, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 506, + 340 + ], + "score": 1.0, + "content": "decoder-only architecture to include retrieved texts and conducts pre-training from scratch.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 336, + 506, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 336, + 506, + 351 + ], + "score": 1.0, + "content": "kNN-LM (Khandelwal et al., 2019) interpolates between the LM’s next token distribution", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 348, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 106, + 348, + 505, + 362 + ], + "score": 1.0, + "content": "and distributions computed from retrieved tokens at inference. (Shi et al., 2023d; Ram", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 360, + 505, + 373 + ], + "spans": [ + { + "bbox": [ + 106, + 360, + 505, + 373 + ], + "score": 1.0, + "content": "et al., 2023) assume black-box access to an LLM, combining it with either off-the-shelf or", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 370, + 196, + 383 + ], + "spans": [ + { + "bbox": [ + 106, + 370, + 196, + 383 + ], + "score": 1.0, + "content": "fine-tuned retriever.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 12, + "bbox_fs": [ + 105, + 283, + 506, + 383 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 387, + 506, + 476 + ], + "lines": [ + { + "bbox": [ + 105, + 387, + 506, + 401 + ], + "spans": [ + { + "bbox": [ + 105, + 387, + 506, + 401 + ], + "score": 1.0, + "content": "Memorization A key question around large neural language models is whether they truly", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 397, + 506, + 412 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 506, + 412 + ], + "score": 1.0, + "content": "β€œunderstand” text (Feldman, 2020; Power et al., 2022) or simply rely on surface pattern", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 409, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 105, + 409, + 506, + 422 + ], + "score": 1.0, + "content": "memorization (Carlini et al., 2019; TΓ€nzer et al., 2022). (Feldman, 2020; Carlini et al., 2019;", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 420, + 507, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 420, + 507, + 434 + ], + "score": 1.0, + "content": "2022) develop methodologies to quantify the extent of memorization in neural models.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 431, + 506, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 431, + 506, + 444 + ], + "score": 1.0, + "content": "(Brown et al., 2020; Power et al., 2022; Liu et al., 2022) further explored how memorization", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 442, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 105, + 442, + 506, + 456 + ], + "score": 1.0, + "content": "impacts the models’ generalization capabilities. (Carlini et al., 2021; Shi et al., 2023b)", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 453, + 507, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 507, + 467 + ], + "score": 1.0, + "content": "demonstrated the ability of language models to memorize and regurgitate training data,", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 464, + 433, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 464, + 433, + 477 + ], + "score": 1.0, + "content": "raising significant privacy concerns (Kandpal et al., 2022; Pan et al., 2020).", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 20.5, + "bbox_fs": [ + 104, + 387, + 507, + 477 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 481, + 506, + 558 + ], + "lines": [ + { + "bbox": [ + 106, + 480, + 507, + 494 + ], + "spans": [ + { + "bbox": [ + 106, + 480, + 507, + 494 + ], + "score": 1.0, + "content": "Finetuning for RAG More recently, several papers have been exploring the idea of fine-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 492, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 506, + 505 + ], + "score": 1.0, + "content": "tuning a pretrained LLM to be better at RAG tasks (Lin et al., 2023a; Wang et al., 2023; Xu", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 502, + 506, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 506, + 517 + ], + "score": 1.0, + "content": "et al., 2023; Liu et al., 2024). These works focus on constructing a combination of finetuning", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 514, + 506, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 506, + 527 + ], + "score": 1.0, + "content": "dataset for RAG and train a model to perform well on these tasks. In particular, in their", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 524, + 507, + 538 + ], + "spans": [ + { + "bbox": [ + 105, + 524, + 507, + 538 + ], + "score": 1.0, + "content": "settings, at test time, the domain or documents can be different than the training time;", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 536, + 506, + 550 + ], + "spans": [ + { + "bbox": [ + 105, + 536, + 506, + 550 + ], + "score": 1.0, + "content": "whereas our paper studies a slightly opposite scenario where we only care about testing the", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 546, + 266, + 559 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 266, + 559 + ], + "score": 1.0, + "content": "LLM on the same set of documents.", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 28, + "bbox_fs": [ + 105, + 480, + 507, + 559 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 580, + 188, + 594 + ], + "lines": [ + { + "bbox": [ + 104, + 577, + 190, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 190, + 597 + ], + "score": 1.0, + "content": "7 Conclusion", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 107, + 609, + 505, + 676 + ], + "lines": [ + { + "bbox": [ + 104, + 607, + 506, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 607, + 506, + 624 + ], + "score": 1.0, + "content": "RAFT is a training strategy designed to enhance the model’s performance in answering", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 104, + 619, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 619, + 505, + 633 + ], + "score": 1.0, + "content": "questions within a specific domain, in \"open-book\" settings. We highlight several crucial", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 630, + 505, + 645 + ], + "spans": [ + { + "bbox": [ + 105, + 630, + 505, + 645 + ], + "score": 1.0, + "content": "design decisions, such as training the model alongside distractor documents, organizing the", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 641, + 506, + 655 + ], + "spans": [ + { + "bbox": [ + 105, + 641, + 506, + 655 + ], + "score": 1.0, + "content": "dataset so a portion lacks golden documents in their context, and formulating answers in a", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 653, + 506, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 506, + 666 + ], + "score": 1.0, + "content": "chain-of-thought manner with direct quotations from the relevant text. Our evaluations on", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 663, + 477, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 663, + 477, + 677 + ], + "score": 1.0, + "content": "PubMed, HotpotQA, and Gorilla API Bench underline RAFT’s significant potential.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 35.5, + "bbox_fs": [ + 104, + 607, + 506, + 677 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 697, + 167, + 711 + ], + "lines": [ + { + "bbox": [ + 105, + 696, + 169, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 696, + 169, + 713 + ], + "score": 1.0, + "content": "References", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 39 + }, + { + "type": "text", + "bbox": [ + 107, + 720, + 424, + 732 + ], + "lines": [ + { + "bbox": [ + 106, + 719, + 427, + 735 + ], + "spans": [ + { + "bbox": [ + 106, + 719, + 427, + 735 + ], + "score": 1.0, + "content": "Anthropic. Prompt engineering for claude’s long context window. 2023.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 40, + "bbox_fs": [ + 106, + 719, + 427, + 735 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 81, + 505, + 106 + ], + "lines": [ + { + "bbox": [ + 105, + 80, + 506, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 506, + 96 + ], + "score": 1.0, + "content": "Asai, A., Wu, Z., Wang, Y., Sil, A., and Hajishirzi, H. Self-rag: Learning to retrieve, generate,", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 93, + 441, + 106 + ], + "spans": [ + { + "bbox": [ + 116, + 93, + 441, + 106 + ], + "score": 1.0, + "content": "and critique through self-reflection. arXiv preprint arXiv:2310.11511, 2023.", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "text", + "bbox": [ + 100, + 113, + 506, + 739 + ], + "lines": [ + { + "bbox": [ + 106, + 113, + 507, + 128 + ], + "spans": [ + { + "bbox": [ + 106, + 113, + 507, + 128 + ], + "score": 1.0, + "content": "Borgeaud, S., Mensch, A., Hoffmann, J., Cai, T., Rutherford, E., Millican, K., Van Den Driess-", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 114, + 123, + 506, + 141 + ], + "spans": [ + { + "bbox": [ + 114, + 123, + 506, + 141 + ], + "score": 1.0, + "content": "che, G. B., Lespiau, J.-B., Damoc, B., Clark, A., et al. Improving language models by", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 114, + 132, + 507, + 153 + ], + "spans": [ + { + "bbox": [ + 114, + 132, + 507, + 153 + ], + "score": 1.0, + "content": "retrieving from trillions of tokens. In International conference on machine learning, pp.", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 146, + 225, + 162 + ], + "spans": [ + { + "bbox": [ + 115, + 146, + 225, + 162 + ], + "score": 1.0, + "content": "2206–2240. PMLR, 2022.", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 165, + 507, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 507, + 182 + ], + "score": 1.0, + "content": "Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A.,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 178, + 507, + 193 + ], + "spans": [ + { + "bbox": [ + 115, + 178, + 507, + 193 + ], + "score": 1.0, + "content": "Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 114, + 188, + 375, + 205 + ], + "spans": [ + { + "bbox": [ + 114, + 188, + 375, + 205 + ], + "score": 1.0, + "content": "in neural information processing systems, 33:1877–1901, 2020.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 102, + 208, + 507, + 228 + ], + "spans": [ + { + "bbox": [ + 102, + 208, + 507, + 228 + ], + "score": 1.0, + "content": "Carlini, N., Liu, C., Erlingsson, Ú., Kos, J., and Song, D. The secret sharer: Evaluating and", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 222, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 115, + 222, + 506, + 237 + ], + "score": 1.0, + "content": "testing unintended memorization in neural networks. In 28th USENIX Security Symposium", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 233, + 297, + 249 + ], + "spans": [ + { + "bbox": [ + 114, + 233, + 297, + 249 + ], + "score": 1.0, + "content": "(USENIX Security 19), pp. 267–284, 2019.", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 252, + 507, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 252, + 507, + 270 + ], + "score": 1.0, + "content": "Carlini, N., Tramer, F., Wallace, E., Jagielski, M., Herbert-Voss, A., Lee, K., Roberts, A.,", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 264, + 505, + 280 + ], + "spans": [ + { + "bbox": [ + 114, + 264, + 505, + 280 + ], + "score": 1.0, + "content": "Brown, T., Song, D., Erlingsson, U., et al. Extracting training data from large language", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 275, + 505, + 292 + ], + "spans": [ + { + "bbox": [ + 114, + 275, + 505, + 292 + ], + "score": 1.0, + "content": "models. In 30th USENIX Security Symposium (USENIX Security 21), pp. 2633–2650, 2021.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 294, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 294, + 506, + 313 + ], + "score": 1.0, + "content": "Carlini, N., Ippolito, D., Jagielski, M., Lee, K., Tramer, F., and Zhang, C. Quantifying", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 307, + 505, + 322 + ], + "spans": [ + { + "bbox": [ + 114, + 307, + 505, + 322 + ], + "score": 1.0, + "content": "memorization across neural language models. In The Eleventh International Conference on", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 317, + 250, + 333 + ], + "spans": [ + { + "bbox": [ + 114, + 317, + 250, + 333 + ], + "score": 1.0, + "content": "Learning Representations, 2022.", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 338, + 505, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 505, + 353 + ], + "score": 1.0, + "content": "Dernoncourt, F. and Lee, J. Y. Pubmed 200k rct: a dataset for sequential sentence classification", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 113, + 349, + 375, + 364 + ], + "spans": [ + { + "bbox": [ + 113, + 349, + 375, + 364 + ], + "score": 1.0, + "content": "in medical abstracts. arXiv preprint arXiv:1710.06071, 2017.", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 370, + 505, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 370, + 505, + 385 + ], + "score": 1.0, + "content": "Feldman, V. Does learning require memorization? a short tale about a long tail. In Proceedings", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 115, + 381, + 498, + 396 + ], + "spans": [ + { + "bbox": [ + 115, + 381, + 498, + 396 + ], + "score": 1.0, + "content": "of the 52nd Annual ACM SIGACT Symposium on Theory of Computing, pp. 954–959, 2020.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 401, + 506, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 506, + 416 + ], + "score": 1.0, + "content": "Guu, K., Lee, K., Tung, Z., Pasupat, P., and Chang, M. Retrieval augmented language model", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 113, + 411, + 501, + 428 + ], + "spans": [ + { + "bbox": [ + 113, + 411, + 501, + 428 + ], + "score": 1.0, + "content": "pre-training. In International conference on machine learning, pp. 3929–3938. PMLR, 2020.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 432, + 507, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 507, + 447 + ], + "score": 1.0, + "content": "Izacard, G., Lewis, P., Lomeli, M., Hosseini, L., Petroni, F., Schick, T., Dwivedi-Yu, J.,", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 115, + 444, + 506, + 459 + ], + "spans": [ + { + "bbox": [ + 115, + 444, + 506, + 459 + ], + "score": 1.0, + "content": "Joulin, A., Riedel, S., and Grave, E. Atlas: Few-shot learning with retrieval augmented", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 453, + 507, + 471 + ], + "spans": [ + { + "bbox": [ + 114, + 453, + 507, + 471 + ], + "score": 1.0, + "content": "language models. Journal of Machine Learning Research, 24(251):1–43, 2023. URL http:", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 466, + 300, + 480 + ], + "spans": [ + { + "bbox": [ + 114, + 466, + 300, + 480 + ], + "score": 1.0, + "content": "//jmlr.org/papers/v24/23-0037.html.", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 104, + 486, + 505, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 486, + 505, + 501 + ], + "score": 1.0, + "content": "Jin, Q., Dhingra, B., Liu, Z., Cohen, W. W., and Lu, X. Pubmedqa: A dataset for biomedical", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 114, + 497, + 413, + 512 + ], + "spans": [ + { + "bbox": [ + 114, + 497, + 413, + 512 + ], + "score": 1.0, + "content": "research question answering. arXiv preprint arXiv:1909.06146, 2019.", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 104, + 517, + 505, + 532 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 505, + 532 + ], + "score": 1.0, + "content": "Joshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. Triviaqa: A large scale distantly", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 529, + 507, + 544 + ], + "spans": [ + { + "bbox": [ + 114, + 529, + 507, + 544 + ], + "score": 1.0, + "content": "supervised challenge dataset for reading comprehension. arXiv preprint arXiv:1705.03551,", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 539, + 143, + 554 + ], + "spans": [ + { + "bbox": [ + 115, + 539, + 143, + 554 + ], + "score": 1.0, + "content": "2017.", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "score": 1.0, + "content": "Kandpal, N., Wallace, E., and Raffel, C. Deduplicating training data mitigates privacy risks", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 113, + 570, + 507, + 587 + ], + "spans": [ + { + "bbox": [ + 113, + 570, + 507, + 587 + ], + "score": 1.0, + "content": "in language models. In International Conference on Machine Learning, pp. 10697–10707.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 581, + 175, + 597 + ], + "spans": [ + { + "bbox": [ + 114, + 581, + 175, + 597 + ], + "score": 1.0, + "content": "PMLR, 2022.", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 104, + 602, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 602, + 506, + 617 + ], + "score": 1.0, + "content": "Khandelwal, U., Levy, O., Jurafsky, D., Zettlemoyer, L., and Lewis, M. General-", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 115, + 614, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 115, + 614, + 506, + 629 + ], + "score": 1.0, + "content": "ization through memorization: Nearest neighbor language models. arXiv preprint", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 115, + 624, + 221, + 639 + ], + "spans": [ + { + "bbox": [ + 115, + 624, + 221, + 639 + ], + "score": 1.0, + "content": "arXiv:1911.00172, 2019.", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 104, + 644, + 507, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 644, + 507, + 659 + ], + "score": 1.0, + "content": "Kwiatkowski, T., Palomaki, J., Redfield, O., Collins, M., Parikh, A., Alberti, C., Epstein, D.,", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 115, + 656, + 507, + 671 + ], + "spans": [ + { + "bbox": [ + 115, + 656, + 507, + 671 + ], + "score": 1.0, + "content": "Polosukhin, I., Devlin, J., Lee, K., et al. Natural questions: a benchmark for question", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 115, + 667, + 507, + 682 + ], + "spans": [ + { + "bbox": [ + 115, + 667, + 507, + 682 + ], + "score": 1.0, + "content": "answering research. Transactions of the Association for Computational Linguistics, 7:453–466,", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 676, + 143, + 693 + ], + "spans": [ + { + "bbox": [ + 115, + 676, + 143, + 693 + ], + "score": 1.0, + "content": "2019.", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 104, + 697, + 506, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 697, + 506, + 714 + ], + "score": 1.0, + "content": "Lazaridou, A., Gribovskaya, E., Stokowiec, W., and Grigorev, N. Internet-augmented", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 113, + 707, + 509, + 728 + ], + "spans": [ + { + "bbox": [ + 113, + 707, + 509, + 728 + ], + "score": 1.0, + "content": "language models through few-shot prompting for open-domain question answering.", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 720, + 281, + 735 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 281, + 735 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:2203.05115, 2022.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 23.5 + } + ], + "page_idx": 9, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 313, + 765 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 15, + "width": 15 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 81, + 505, + 106 + ], + "lines": [ + { + "bbox": [ + 105, + 80, + 506, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 506, + 96 + ], + "score": 1.0, + "content": "Asai, A., Wu, Z., Wang, Y., Sil, A., and Hajishirzi, H. Self-rag: Learning to retrieve, generate,", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 93, + 441, + 106 + ], + "spans": [ + { + "bbox": [ + 116, + 93, + 441, + 106 + ], + "score": 1.0, + "content": "and critique through self-reflection. arXiv preprint arXiv:2310.11511, 2023.", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5, + "bbox_fs": [ + 105, + 80, + 506, + 106 + ] + }, + { + "type": "list", + "bbox": [ + 100, + 113, + 506, + 739 + ], + "lines": [ + { + "bbox": [ + 106, + 113, + 507, + 128 + ], + "spans": [ + { + "bbox": [ + 106, + 113, + 507, + 128 + ], + "score": 1.0, + "content": "Borgeaud, S., Mensch, A., Hoffmann, J., Cai, T., Rutherford, E., Millican, K., Van Den Driess-", + "type": "text" + } + ], + "index": 2, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 123, + 506, + 141 + ], + "spans": [ + { + "bbox": [ + 114, + 123, + 506, + 141 + ], + "score": 1.0, + "content": "che, G. B., Lespiau, J.-B., Damoc, B., Clark, A., et al. Improving language models by", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 114, + 132, + 507, + 153 + ], + "spans": [ + { + "bbox": [ + 114, + 132, + 507, + 153 + ], + "score": 1.0, + "content": "retrieving from trillions of tokens. In International conference on machine learning, pp.", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 146, + 225, + 162 + ], + "spans": [ + { + "bbox": [ + 115, + 146, + 225, + 162 + ], + "score": 1.0, + "content": "2206–2240. PMLR, 2022.", + "type": "text" + } + ], + "index": 5, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 165, + 507, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 507, + 182 + ], + "score": 1.0, + "content": "Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A.,", + "type": "text" + } + ], + "index": 6, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 178, + 507, + 193 + ], + "spans": [ + { + "bbox": [ + 115, + 178, + 507, + 193 + ], + "score": 1.0, + "content": "Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. Advances", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 114, + 188, + 375, + 205 + ], + "spans": [ + { + "bbox": [ + 114, + 188, + 375, + 205 + ], + "score": 1.0, + "content": "in neural information processing systems, 33:1877–1901, 2020.", + "type": "text" + } + ], + "index": 8, + "is_list_end_line": true + }, + { + "bbox": [ + 102, + 208, + 507, + 228 + ], + "spans": [ + { + "bbox": [ + 102, + 208, + 507, + 228 + ], + "score": 1.0, + "content": "Carlini, N., Liu, C., Erlingsson, Ú., Kos, J., and Song, D. The secret sharer: Evaluating and", + "type": "text" + } + ], + "index": 9, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 222, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 115, + 222, + 506, + 237 + ], + "score": 1.0, + "content": "testing unintended memorization in neural networks. In 28th USENIX Security Symposium", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 233, + 297, + 249 + ], + "spans": [ + { + "bbox": [ + 114, + 233, + 297, + 249 + ], + "score": 1.0, + "content": "(USENIX Security 19), pp. 267–284, 2019.", + "type": "text" + } + ], + "index": 11, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 252, + 507, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 252, + 507, + 270 + ], + "score": 1.0, + "content": "Carlini, N., Tramer, F., Wallace, E., Jagielski, M., Herbert-Voss, A., Lee, K., Roberts, A.,", + "type": "text" + } + ], + "index": 12, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 264, + 505, + 280 + ], + "spans": [ + { + "bbox": [ + 114, + 264, + 505, + 280 + ], + "score": 1.0, + "content": "Brown, T., Song, D., Erlingsson, U., et al. Extracting training data from large language", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 114, + 275, + 505, + 292 + ], + "spans": [ + { + "bbox": [ + 114, + 275, + 505, + 292 + ], + "score": 1.0, + "content": "models. In 30th USENIX Security Symposium (USENIX Security 21), pp. 2633–2650, 2021.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 294, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 294, + 506, + 313 + ], + "score": 1.0, + "content": "Carlini, N., Ippolito, D., Jagielski, M., Lee, K., Tramer, F., and Zhang, C. Quantifying", + "type": "text" + } + ], + "index": 15, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 307, + 505, + 322 + ], + "spans": [ + { + "bbox": [ + 114, + 307, + 505, + 322 + ], + "score": 1.0, + "content": "memorization across neural language models. In The Eleventh International Conference on", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 114, + 317, + 250, + 333 + ], + "spans": [ + { + "bbox": [ + 114, + 317, + 250, + 333 + ], + "score": 1.0, + "content": "Learning Representations, 2022.", + "type": "text" + } + ], + "index": 17, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 338, + 505, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 338, + 505, + 353 + ], + "score": 1.0, + "content": "Dernoncourt, F. and Lee, J. Y. Pubmed 200k rct: a dataset for sequential sentence classification", + "type": "text" + } + ], + "index": 18, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 349, + 375, + 364 + ], + "spans": [ + { + "bbox": [ + 113, + 349, + 375, + 364 + ], + "score": 1.0, + "content": "in medical abstracts. arXiv preprint arXiv:1710.06071, 2017.", + "type": "text" + } + ], + "index": 19, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 370, + 505, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 370, + 505, + 385 + ], + "score": 1.0, + "content": "Feldman, V. Does learning require memorization? a short tale about a long tail. In Proceedings", + "type": "text" + } + ], + "index": 20, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 381, + 498, + 396 + ], + "spans": [ + { + "bbox": [ + 115, + 381, + 498, + 396 + ], + "score": 1.0, + "content": "of the 52nd Annual ACM SIGACT Symposium on Theory of Computing, pp. 954–959, 2020.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 401, + 506, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 506, + 416 + ], + "score": 1.0, + "content": "Guu, K., Lee, K., Tung, Z., Pasupat, P., and Chang, M. Retrieval augmented language model", + "type": "text" + } + ], + "index": 22, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 411, + 501, + 428 + ], + "spans": [ + { + "bbox": [ + 113, + 411, + 501, + 428 + ], + "score": 1.0, + "content": "pre-training. In International conference on machine learning, pp. 3929–3938. PMLR, 2020.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 432, + 507, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 507, + 447 + ], + "score": 1.0, + "content": "Izacard, G., Lewis, P., Lomeli, M., Hosseini, L., Petroni, F., Schick, T., Dwivedi-Yu, J.,", + "type": "text" + } + ], + "index": 24, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 444, + 506, + 459 + ], + "spans": [ + { + "bbox": [ + 115, + 444, + 506, + 459 + ], + "score": 1.0, + "content": "Joulin, A., Riedel, S., and Grave, E. Atlas: Few-shot learning with retrieval augmented", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 114, + 453, + 507, + 471 + ], + "spans": [ + { + "bbox": [ + 114, + 453, + 507, + 471 + ], + "score": 1.0, + "content": "language models. Journal of Machine Learning Research, 24(251):1–43, 2023. URL http:", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 466, + 300, + 480 + ], + "spans": [ + { + "bbox": [ + 114, + 466, + 300, + 480 + ], + "score": 1.0, + "content": "//jmlr.org/papers/v24/23-0037.html.", + "type": "text" + } + ], + "index": 27, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 486, + 505, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 486, + 505, + 501 + ], + "score": 1.0, + "content": "Jin, Q., Dhingra, B., Liu, Z., Cohen, W. W., and Lu, X. Pubmedqa: A dataset for biomedical", + "type": "text" + } + ], + "index": 28, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 497, + 413, + 512 + ], + "spans": [ + { + "bbox": [ + 114, + 497, + 413, + 512 + ], + "score": 1.0, + "content": "research question answering. arXiv preprint arXiv:1909.06146, 2019.", + "type": "text" + } + ], + "index": 29, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 517, + 505, + 532 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 505, + 532 + ], + "score": 1.0, + "content": "Joshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. Triviaqa: A large scale distantly", + "type": "text" + } + ], + "index": 30, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 529, + 507, + 544 + ], + "spans": [ + { + "bbox": [ + 114, + 529, + 507, + 544 + ], + "score": 1.0, + "content": "supervised challenge dataset for reading comprehension. arXiv preprint arXiv:1705.03551,", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 115, + 539, + 143, + 554 + ], + "spans": [ + { + "bbox": [ + 115, + 539, + 143, + 554 + ], + "score": 1.0, + "content": "2017.", + "type": "text" + } + ], + "index": 32, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 560, + 506, + 575 + ], + "score": 1.0, + "content": "Kandpal, N., Wallace, E., and Raffel, C. Deduplicating training data mitigates privacy risks", + "type": "text" + } + ], + "index": 33, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 570, + 507, + 587 + ], + "spans": [ + { + "bbox": [ + 113, + 570, + 507, + 587 + ], + "score": 1.0, + "content": "in language models. In International Conference on Machine Learning, pp. 10697–10707.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 114, + 581, + 175, + 597 + ], + "spans": [ + { + "bbox": [ + 114, + 581, + 175, + 597 + ], + "score": 1.0, + "content": "PMLR, 2022.", + "type": "text" + } + ], + "index": 35, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 602, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 602, + 506, + 617 + ], + "score": 1.0, + "content": "Khandelwal, U., Levy, O., Jurafsky, D., Zettlemoyer, L., and Lewis, M. General-", + "type": "text" + } + ], + "index": 36, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 614, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 115, + 614, + 506, + 629 + ], + "score": 1.0, + "content": "ization through memorization: Nearest neighbor language models. arXiv preprint", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 115, + 624, + 221, + 639 + ], + "spans": [ + { + "bbox": [ + 115, + 624, + 221, + 639 + ], + "score": 1.0, + "content": "arXiv:1911.00172, 2019.", + "type": "text" + } + ], + "index": 38, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 644, + 507, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 644, + 507, + 659 + ], + "score": 1.0, + "content": "Kwiatkowski, T., Palomaki, J., Redfield, O., Collins, M., Parikh, A., Alberti, C., Epstein, D.,", + "type": "text" + } + ], + "index": 39, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 656, + 507, + 671 + ], + "spans": [ + { + "bbox": [ + 115, + 656, + 507, + 671 + ], + "score": 1.0, + "content": "Polosukhin, I., Devlin, J., Lee, K., et al. Natural questions: a benchmark for question", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 115, + 667, + 507, + 682 + ], + "spans": [ + { + "bbox": [ + 115, + 667, + 507, + 682 + ], + "score": 1.0, + "content": "answering research. Transactions of the Association for Computational Linguistics, 7:453–466,", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 115, + 676, + 143, + 693 + ], + "spans": [ + { + "bbox": [ + 115, + 676, + 143, + 693 + ], + "score": 1.0, + "content": "2019.", + "type": "text" + } + ], + "index": 42, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 697, + 506, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 697, + 506, + 714 + ], + "score": 1.0, + "content": "Lazaridou, A., Gribovskaya, E., Stokowiec, W., and Grigorev, N. Internet-augmented", + "type": "text" + } + ], + "index": 43, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 707, + 509, + 728 + ], + "spans": [ + { + "bbox": [ + 113, + 707, + 509, + 728 + ], + "score": 1.0, + "content": "language models through few-shot prompting for open-domain question answering.", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 115, + 720, + 281, + 735 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 281, + 735 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:2203.05115, 2022.", + "type": "text" + } + ], + "index": 45, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 80, + 507, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 507, + 97 + ], + "score": 1.0, + "content": "Lewis, P., Perez, E., Piktus, A., Petroni, F., Karpukhin, V., Goyal, N., KΓΌttler, H., Lewis, M.,", + "type": "text", + "cross_page": true + } + ], + "index": 0, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 91, + 506, + 108 + ], + "spans": [ + { + "bbox": [ + 114, + 91, + 506, + 108 + ], + "score": 1.0, + "content": "Yih, W.-t., RocktΓ€schel, T., et al. Retrieval-augmented generation for knowledge-intensive", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 114, + 103, + 468, + 119 + ], + "spans": [ + { + "bbox": [ + 114, + 103, + 468, + 119 + ], + "score": 1.0, + "content": "nlp tasks. Advances in Neural Information Processing Systems, 33:9459–9474, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 2, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 121, + 506, + 138 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 506, + 138 + ], + "score": 1.0, + "content": "Lin, X. V., Chen, X., Chen, M., Shi, W., Lomeli, M., James, R., Rodriguez, P., Kahn, J., Szilvasy,", + "type": "text", + "cross_page": true + } + ], + "index": 3, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 133, + 505, + 150 + ], + "spans": [ + { + "bbox": [ + 114, + 133, + 505, + 150 + ], + "score": 1.0, + "content": "G., Lewis, M., et al. Ra-dit: Retrieval-augmented dual instruction tuning. arXiv preprint", + "type": "text", + "cross_page": true + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 146, + 224, + 158 + ], + "spans": [ + { + "bbox": [ + 115, + 146, + 224, + 158 + ], + "score": 1.0, + "content": "arXiv:2310.01352, 2023a.", + "type": "text", + "cross_page": true + } + ], + "index": 5, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 163, + 507, + 180 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 507, + 180 + ], + "score": 1.0, + "content": "Lin, X. V., Chen, X., Chen, M., Shi, W., Lomeli, M., James, R., Rodriguez, P., Kahn, J., Szilvasy,", + "type": "text", + "cross_page": true + } + ], + "index": 6, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 174, + 506, + 191 + ], + "spans": [ + { + "bbox": [ + 113, + 174, + 506, + 191 + ], + "score": 1.0, + "content": "G., Lewis, M., et al. Ra-dit: Retrieval-augmented dual instruction tuning. arXiv preprint", + "type": "text", + "cross_page": true + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 187, + 225, + 200 + ], + "spans": [ + { + "bbox": [ + 115, + 187, + 225, + 200 + ], + "score": 1.0, + "content": "arXiv:2310.01352, 2023b.", + "type": "text", + "cross_page": true + } + ], + "index": 8, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 205, + 506, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 506, + 220 + ], + "score": 1.0, + "content": "Liu, N. F., Lin, K., Hewitt, J., Paranjape, A., Bevilacqua, M., Petroni, F., and Liang, P. Lost", + "type": "text", + "cross_page": true + } + ], + "index": 9, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 217, + 506, + 231 + ], + "spans": [ + { + "bbox": [ + 114, + 217, + 506, + 231 + ], + "score": 1.0, + "content": "in the middle: How language models use long contexts. arXiv preprint arXiv:2307.03172,", + "type": "text", + "cross_page": true + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 227, + 143, + 243 + ], + "spans": [ + { + "bbox": [ + 114, + 227, + 143, + 243 + ], + "score": 1.0, + "content": "2023.", + "type": "text", + "cross_page": true + } + ], + "index": 11, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 246, + 505, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 246, + 505, + 261 + ], + "score": 1.0, + "content": "Liu, Z., Kitouni, O., Nolte, N. S., Michaud, E., Tegmark, M., and Williams, M. Towards", + "type": "text", + "cross_page": true + } + ], + "index": 12, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 258, + 507, + 274 + ], + "spans": [ + { + "bbox": [ + 114, + 258, + 507, + 274 + ], + "score": 1.0, + "content": "understanding grokking: An effective theory of representation learning. Advances in", + "type": "text", + "cross_page": true + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 269, + 378, + 283 + ], + "spans": [ + { + "bbox": [ + 115, + 269, + 378, + 283 + ], + "score": 1.0, + "content": "Neural Information Processing Systems, 35:34651–34663, 2022.", + "type": "text", + "cross_page": true + } + ], + "index": 14, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 287, + 506, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 506, + 303 + ], + "score": 1.0, + "content": "Liu, Z., Ping, W., Roy, R., Xu, P., Shoeybi, M., and Catanzaro, B. Chatqa: Building gpt-4 level", + "type": "text", + "cross_page": true + } + ], + "index": 15, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 300, + 399, + 312 + ], + "spans": [ + { + "bbox": [ + 114, + 300, + 399, + 312 + ], + "score": 1.0, + "content": "conversational qa models. arXiv preprint arXiv:2401.10225, 2024.", + "type": "text", + "cross_page": true + } + ], + "index": 16, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 318, + 507, + 334 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 507, + 334 + ], + "score": 1.0, + "content": "Pan, X., Zhang, M., Ji, S., and Yang, M. Privacy risks of general-purpose language models.", + "type": "text", + "cross_page": true + } + ], + "index": 17, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 329, + 468, + 345 + ], + "spans": [ + { + "bbox": [ + 113, + 329, + 468, + 345 + ], + "score": 1.0, + "content": "In 2020 IEEE Symposium on Security and Privacy (SP), pp. 1314–1331. IEEE, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 18, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 346, + 506, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 506, + 365 + ], + "score": 1.0, + "content": "Patil, S. G., Zhang, T., Wang, X., and Gonzalez, J. E. Gorilla: Large language model connected", + "type": "text", + "cross_page": true + } + ], + "index": 19, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 360, + 366, + 374 + ], + "spans": [ + { + "bbox": [ + 114, + 360, + 366, + 374 + ], + "score": 1.0, + "content": "with massive apis. arXiv preprint arXiv:2305.15334, 2023.", + "type": "text", + "cross_page": true + } + ], + "index": 20, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 379, + 505, + 393 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 505, + 393 + ], + "score": 1.0, + "content": "Power, A., Burda, Y., Edwards, H., Babuschkin, I., and Misra, V. Grokking: Generalization", + "type": "text", + "cross_page": true + } + ], + "index": 21, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 391, + 501, + 405 + ], + "spans": [ + { + "bbox": [ + 114, + 391, + 501, + 405 + ], + "score": 1.0, + "content": "beyond overfitting on small algorithmic datasets. arXiv preprint arXiv:2201.02177, 2022.", + "type": "text", + "cross_page": true + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 410, + 505, + 423 + ], + "spans": [ + { + "bbox": [ + 106, + 410, + 505, + 423 + ], + "score": 1.0, + "content": "Ram, O., Levine, Y., Dalmedigos, I., Muhlgay, D., Shashua, A., Leyton-Brown, K.,", + "type": "text", + "cross_page": true + } + ], + "index": 23, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 419, + 506, + 436 + ], + "spans": [ + { + "bbox": [ + 114, + 419, + 506, + 436 + ], + "score": 1.0, + "content": "and Shoham, Y. In-context retrieval-augmented language models. arXiv preprint", + "type": "text", + "cross_page": true + } + ], + "index": 24 + }, + { + "bbox": [ + 115, + 432, + 219, + 444 + ], + "spans": [ + { + "bbox": [ + 115, + 432, + 219, + 444 + ], + "score": 1.0, + "content": "arXiv:2302.00083, 2023.", + "type": "text", + "cross_page": true + } + ], + "index": 25, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 449, + 506, + 466 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 506, + 466 + ], + "score": 1.0, + "content": "Shi, F., Chen, X., Misra, K., Scales, N., Dohan, D., Chi, E. H., SchΓ€rli, N., and Zhou, D. Large", + "type": "text", + "cross_page": true + } + ], + "index": 26, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 462, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 114, + 462, + 506, + 477 + ], + "score": 1.0, + "content": "language models can be easily distracted by irrelevant context. In International Conference", + "type": "text", + "cross_page": true + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 473, + 348, + 487 + ], + "spans": [ + { + "bbox": [ + 114, + 473, + 348, + 487 + ], + "score": 1.0, + "content": "on Machine Learning, pp. 31210–31227. PMLR, 2023a.", + "type": "text", + "cross_page": true + } + ], + "index": 28, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 491, + 507, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 507, + 507 + ], + "score": 1.0, + "content": "Shi, W., Ajith, A., Xia, M., Huang, Y., Liu, D., Blevins, T., Chen, D., and Zettlemoyer, L.", + "type": "text", + "cross_page": true + } + ], + "index": 29, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 502, + 507, + 519 + ], + "spans": [ + { + "bbox": [ + 114, + 502, + 507, + 519 + ], + "score": 1.0, + "content": "Detecting pretraining data from large language models. arXiv preprint arXiv:2310.16789,", + "type": "text", + "cross_page": true + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 514, + 149, + 528 + ], + "spans": [ + { + "bbox": [ + 114, + 514, + 149, + 528 + ], + "score": 1.0, + "content": "2023b.", + "type": "text", + "cross_page": true + } + ], + "index": 31, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 533, + 507, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 507, + 547 + ], + "score": 1.0, + "content": "Shi, W., Min, S., Lomeli, M., Zhou, C., Li, M., Lin, V., Smith, N. A., Zettlemoyer, L., Yih, S.,", + "type": "text", + "cross_page": true + } + ], + "index": 32, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 543, + 507, + 560 + ], + "spans": [ + { + "bbox": [ + 114, + 543, + 507, + 560 + ], + "score": 1.0, + "content": "and Lewis, M. In-context pretraining: Language modeling beyond document boundaries.", + "type": "text", + "cross_page": true + } + ], + "index": 33 + }, + { + "bbox": [ + 113, + 554, + 286, + 570 + ], + "spans": [ + { + "bbox": [ + 113, + 554, + 286, + 570 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:2310.10638, 2023c.", + "type": "text", + "cross_page": true + } + ], + "index": 34, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 574, + 507, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 507, + 590 + ], + "score": 1.0, + "content": "Shi, W., Min, S., Yasunaga, M., Seo, M., James, R., Lewis, M., Zettlemoyer, L., and Yih, W.-t.", + "type": "text", + "cross_page": true + } + ], + "index": 35, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 585, + 506, + 599 + ], + "spans": [ + { + "bbox": [ + 114, + 585, + 506, + 599 + ], + "score": 1.0, + "content": "Replug: Retrieval-augmented black-box language models. arXiv preprint arXiv:2301.12652,", + "type": "text", + "cross_page": true + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 595, + 150, + 610 + ], + "spans": [ + { + "bbox": [ + 114, + 595, + 150, + 610 + ], + "score": 1.0, + "content": "2023d.", + "type": "text", + "cross_page": true + } + ], + "index": 37, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 614, + 507, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 614, + 507, + 631 + ], + "score": 1.0, + "content": "TΓ€nzer, M., Ruder, S., and Rei, M. Memorisation versus generalisation in pre-trained lan-", + "type": "text", + "cross_page": true + } + ], + "index": 38, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 625, + 507, + 642 + ], + "spans": [ + { + "bbox": [ + 113, + 625, + 507, + 642 + ], + "score": 1.0, + "content": "guage models. In Proceedings of the 60th Annual Meeting of the Association for Computational", + "type": "text", + "cross_page": true + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 637, + 361, + 653 + ], + "spans": [ + { + "bbox": [ + 114, + 637, + 361, + 653 + ], + "score": 1.0, + "content": "Linguistics (Volume 1: Long Papers), pp. 7564–7578, 2022.", + "type": "text", + "cross_page": true + } + ], + "index": 40, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 657, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 106, + 657, + 506, + 672 + ], + "score": 1.0, + "content": "Vu, T., Iyyer, M., Wang, X., Constant, N., Wei, J., Wei, J., Tar, C., Sung, Y.-H., Zhou, D., Le,", + "type": "text", + "cross_page": true + } + ], + "index": 41, + "is_list_start_line": true + }, + { + "bbox": [ + 113, + 666, + 508, + 685 + ], + "spans": [ + { + "bbox": [ + 113, + 666, + 508, + 685 + ], + "score": 1.0, + "content": "Q., et al. Freshllms: Refreshing large language models with search engine augmentation.", + "type": "text", + "cross_page": true + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 679, + 282, + 693 + ], + "spans": [ + { + "bbox": [ + 114, + 679, + 282, + 693 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:2310.03214, 2023.", + "type": "text", + "cross_page": true + } + ], + "index": 43, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 697, + 507, + 714 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 507, + 714 + ], + "score": 1.0, + "content": "Wang, B., Ping, W., McAfee, L., Xu, P., Li, B., Shoeybi, M., and Catanzaro, B. Instructretro:", + "type": "text", + "cross_page": true + } + ], + "index": 44, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 708, + 507, + 725 + ], + "spans": [ + { + "bbox": [ + 114, + 708, + 507, + 725 + ], + "score": 1.0, + "content": "Instruction tuning post retrieval-augmented pretraining. arXiv preprint arXiv:2310.07713,", + "type": "text", + "cross_page": true + } + ], + "index": 45 + }, + { + "bbox": [ + 115, + 720, + 142, + 733 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 142, + 733 + ], + "score": 1.0, + "content": "2023.", + "type": "text", + "cross_page": true + } + ], + "index": 46, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 82, + 506, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 506, + 96 + ], + "score": 1.0, + "content": "Wang, Y., Kordi, Y., Mishra, S., Liu, A., Smith, N. A., Khashabi, D., and Hajishirzi, H.", + "type": "text", + "cross_page": true + } + ], + "index": 0, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 92, + 506, + 108 + ], + "spans": [ + { + "bbox": [ + 114, + 92, + 506, + 108 + ], + "score": 1.0, + "content": "Self-instruct: Aligning language models with self-generated instructions. arXiv preprint", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 116, + 104, + 219, + 117 + ], + "spans": [ + { + "bbox": [ + 116, + 104, + 219, + 117 + ], + "score": 1.0, + "content": "arXiv:2212.10560, 2022.", + "type": "text", + "cross_page": true + } + ], + "index": 2, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 122, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 506, + 137 + ], + "score": 1.0, + "content": "Wei, J., Wang, X., Schuurmans, D., Bosma, M., Xia, F., Chi, E., Le, Q. V., Zhou, D., et al.", + "type": "text", + "cross_page": true + } + ], + "index": 3, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 132, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 115, + 132, + 506, + 149 + ], + "score": 1.0, + "content": "Chain-of-thought prompting elicits reasoning in large language models. Advances in", + "type": "text", + "cross_page": true + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 144, + 379, + 159 + ], + "spans": [ + { + "bbox": [ + 115, + 144, + 379, + 159 + ], + "score": 1.0, + "content": "Neural Information Processing Systems, 35:24824–24837, 2022.", + "type": "text", + "cross_page": true + } + ], + "index": 5, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 163, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 178 + ], + "score": 1.0, + "content": "Weston, J. and Sukhbaatar, S. System 2 attention (is something you might need too). arXiv", + "type": "text", + "cross_page": true + } + ], + "index": 6, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 175, + 255, + 188 + ], + "spans": [ + { + "bbox": [ + 114, + 175, + 255, + 188 + ], + "score": 1.0, + "content": "preprint arXiv:2311.11829, 2023.", + "type": "text", + "cross_page": true + } + ], + "index": 7, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 193, + 506, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 506, + 208 + ], + "score": 1.0, + "content": "Xiong, W., Liu, J., Molybog, I., Zhang, H., Bhargava, P., Hou, R., Martin, L., Rungta, R.,", + "type": "text", + "cross_page": true + } + ], + "index": 8, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 204, + 506, + 219 + ], + "spans": [ + { + "bbox": [ + 115, + 204, + 506, + 219 + ], + "score": 1.0, + "content": "Sankararaman, K. A., Oguz, B., et al. Effective long-context scaling of foundation models.", + "type": "text", + "cross_page": true + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 215, + 281, + 230 + ], + "spans": [ + { + "bbox": [ + 115, + 215, + 281, + 230 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:2309.16039, 2023.", + "type": "text", + "cross_page": true + } + ], + "index": 10, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 234, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 506, + 248 + ], + "score": 1.0, + "content": "Xu, P., Ping, W., Wu, X., McAfee, L., Zhu, C., Liu, Z., Subramanian, S., Bakhturina, E.,", + "type": "text", + "cross_page": true + } + ], + "index": 11, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 245, + 506, + 259 + ], + "spans": [ + { + "bbox": [ + 115, + 245, + 506, + 259 + ], + "score": 1.0, + "content": "Shoeybi, M., and Catanzaro, B. Retrieval meets long context large language models. arXiv", + "type": "text", + "cross_page": true + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 257, + 255, + 269 + ], + "spans": [ + { + "bbox": [ + 115, + 257, + 255, + 269 + ], + "score": 1.0, + "content": "preprint arXiv:2310.03025, 2023.", + "type": "text", + "cross_page": true + } + ], + "index": 13, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 274, + 506, + 290 + ], + "spans": [ + { + "bbox": [ + 105, + 274, + 506, + 290 + ], + "score": 1.0, + "content": "Yang, Z., Qi, P., Zhang, S., Bengio, Y., Cohen, W. W., Salakhutdinov, R., and Manning, C. D.", + "type": "text", + "cross_page": true + } + ], + "index": 14, + "is_list_start_line": true + }, + { + "bbox": [ + 114, + 285, + 505, + 300 + ], + "spans": [ + { + "bbox": [ + 114, + 285, + 505, + 300 + ], + "score": 1.0, + "content": "Hotpotqa: A dataset for diverse, explainable multi-hop question answering. arXiv preprint", + "type": "text", + "cross_page": true + } + ], + "index": 15 + }, + { + "bbox": [ + 116, + 298, + 219, + 310 + ], + "spans": [ + { + "bbox": [ + 116, + 298, + 219, + 310 + ], + "score": 1.0, + "content": "arXiv:1809.09600, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 16, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 316, + 506, + 329 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 506, + 329 + ], + "score": 1.0, + "content": "Zhou, C., Liu, P., Xu, P., Iyer, S., Sun, J., Mao, Y., Ma, X., Efrat, A., Yu, P., Yu, L., et al. Lima:", + "type": "text", + "cross_page": true + } + ], + "index": 17, + "is_list_start_line": true + }, + { + "bbox": [ + 115, + 327, + 404, + 341 + ], + "spans": [ + { + "bbox": [ + 115, + 327, + 404, + 341 + ], + "score": 1.0, + "content": "Less is more for alignment. arXiv preprint arXiv:2305.11206, 2023.", + "type": "text", + "cross_page": true + } + ], + "index": 18, + "is_list_end_line": true + } + ], + "index": 23.5, + "bbox_fs": [ + 102, + 113, + 509, + 735 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 104, + 45, + 507, + 732 + ], + "lines": [ + { + "bbox": [ + 104, + 80, + 507, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 507, + 97 + ], + "score": 1.0, + "content": "Lewis, P., Perez, E., Piktus, A., Petroni, F., Karpukhin, V., Goyal, N., KΓΌttler, H., Lewis, M.,", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 114, + 91, + 506, + 108 + ], + "spans": [ + { + "bbox": [ + 114, + 91, + 506, + 108 + ], + "score": 1.0, + "content": "Yih, W.-t., RocktΓ€schel, T., et al. Retrieval-augmented generation for knowledge-intensive", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 114, + 103, + 468, + 119 + ], + "spans": [ + { + "bbox": [ + 114, + 103, + 468, + 119 + ], + "score": 1.0, + "content": "nlp tasks. Advances in Neural Information Processing Systems, 33:9459–9474, 2020.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 121, + 506, + 138 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 506, + 138 + ], + "score": 1.0, + "content": "Lin, X. V., Chen, X., Chen, M., Shi, W., Lomeli, M., James, R., Rodriguez, P., Kahn, J., Szilvasy,", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 114, + 133, + 505, + 150 + ], + "spans": [ + { + "bbox": [ + 114, + 133, + 505, + 150 + ], + "score": 1.0, + "content": "G., Lewis, M., et al. Ra-dit: Retrieval-augmented dual instruction tuning. arXiv preprint", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 146, + 224, + 158 + ], + "spans": [ + { + "bbox": [ + 115, + 146, + 224, + 158 + ], + "score": 1.0, + "content": "arXiv:2310.01352, 2023a.", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 163, + 507, + 180 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 507, + 180 + ], + "score": 1.0, + "content": "Lin, X. V., Chen, X., Chen, M., Shi, W., Lomeli, M., James, R., Rodriguez, P., Kahn, J., Szilvasy,", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 113, + 174, + 506, + 191 + ], + "spans": [ + { + "bbox": [ + 113, + 174, + 506, + 191 + ], + "score": 1.0, + "content": "G., Lewis, M., et al. Ra-dit: Retrieval-augmented dual instruction tuning. arXiv preprint", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 187, + 225, + 200 + ], + "spans": [ + { + "bbox": [ + 115, + 187, + 225, + 200 + ], + "score": 1.0, + "content": "arXiv:2310.01352, 2023b.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 205, + 506, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 506, + 220 + ], + "score": 1.0, + "content": "Liu, N. F., Lin, K., Hewitt, J., Paranjape, A., Bevilacqua, M., Petroni, F., and Liang, P. Lost", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 217, + 506, + 231 + ], + "spans": [ + { + "bbox": [ + 114, + 217, + 506, + 231 + ], + "score": 1.0, + "content": "in the middle: How language models use long contexts. arXiv preprint arXiv:2307.03172,", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 227, + 143, + 243 + ], + "spans": [ + { + "bbox": [ + 114, + 227, + 143, + 243 + ], + "score": 1.0, + "content": "2023.", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 246, + 505, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 246, + 505, + 261 + ], + "score": 1.0, + "content": "Liu, Z., Kitouni, O., Nolte, N. S., Michaud, E., Tegmark, M., and Williams, M. Towards", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 114, + 258, + 507, + 274 + ], + "spans": [ + { + "bbox": [ + 114, + 258, + 507, + 274 + ], + "score": 1.0, + "content": "understanding grokking: An effective theory of representation learning. Advances in", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 115, + 269, + 378, + 283 + ], + "spans": [ + { + "bbox": [ + 115, + 269, + 378, + 283 + ], + "score": 1.0, + "content": "Neural Information Processing Systems, 35:34651–34663, 2022.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 287, + 506, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 287, + 506, + 303 + ], + "score": 1.0, + "content": "Liu, Z., Ping, W., Roy, R., Xu, P., Shoeybi, M., and Catanzaro, B. Chatqa: Building gpt-4 level", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 300, + 399, + 312 + ], + "spans": [ + { + "bbox": [ + 114, + 300, + 399, + 312 + ], + "score": 1.0, + "content": "conversational qa models. arXiv preprint arXiv:2401.10225, 2024.", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 318, + 507, + 334 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 507, + 334 + ], + "score": 1.0, + "content": "Pan, X., Zhang, M., Ji, S., and Yang, M. Privacy risks of general-purpose language models.", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 113, + 329, + 468, + 345 + ], + "spans": [ + { + "bbox": [ + 113, + 329, + 468, + 345 + ], + "score": 1.0, + "content": "In 2020 IEEE Symposium on Security and Privacy (SP), pp. 1314–1331. IEEE, 2020.", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 346, + 506, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 346, + 506, + 365 + ], + "score": 1.0, + "content": "Patil, S. G., Zhang, T., Wang, X., and Gonzalez, J. E. Gorilla: Large language model connected", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 114, + 360, + 366, + 374 + ], + "spans": [ + { + "bbox": [ + 114, + 360, + 366, + 374 + ], + "score": 1.0, + "content": "with massive apis. arXiv preprint arXiv:2305.15334, 2023.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 379, + 505, + 393 + ], + "spans": [ + { + "bbox": [ + 105, + 379, + 505, + 393 + ], + "score": 1.0, + "content": "Power, A., Burda, Y., Edwards, H., Babuschkin, I., and Misra, V. Grokking: Generalization", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 114, + 391, + 501, + 405 + ], + "spans": [ + { + "bbox": [ + 114, + 391, + 501, + 405 + ], + "score": 1.0, + "content": "beyond overfitting on small algorithmic datasets. arXiv preprint arXiv:2201.02177, 2022.", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 410, + 505, + 423 + ], + "spans": [ + { + "bbox": [ + 106, + 410, + 505, + 423 + ], + "score": 1.0, + "content": "Ram, O., Levine, Y., Dalmedigos, I., Muhlgay, D., Shashua, A., Leyton-Brown, K.,", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 114, + 419, + 506, + 436 + ], + "spans": [ + { + "bbox": [ + 114, + 419, + 506, + 436 + ], + "score": 1.0, + "content": "and Shoham, Y. In-context retrieval-augmented language models. arXiv preprint", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 115, + 432, + 219, + 444 + ], + "spans": [ + { + "bbox": [ + 115, + 432, + 219, + 444 + ], + "score": 1.0, + "content": "arXiv:2302.00083, 2023.", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 449, + 506, + 466 + ], + "spans": [ + { + "bbox": [ + 104, + 449, + 506, + 466 + ], + "score": 1.0, + "content": "Shi, F., Chen, X., Misra, K., Scales, N., Dohan, D., Chi, E. H., SchΓ€rli, N., and Zhou, D. Large", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 114, + 462, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 114, + 462, + 506, + 477 + ], + "score": 1.0, + "content": "language models can be easily distracted by irrelevant context. In International Conference", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 114, + 473, + 348, + 487 + ], + "spans": [ + { + "bbox": [ + 114, + 473, + 348, + 487 + ], + "score": 1.0, + "content": "on Machine Learning, pp. 31210–31227. PMLR, 2023a.", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 491, + 507, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 507, + 507 + ], + "score": 1.0, + "content": "Shi, W., Ajith, A., Xia, M., Huang, Y., Liu, D., Blevins, T., Chen, D., and Zettlemoyer, L.", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 114, + 502, + 507, + 519 + ], + "spans": [ + { + "bbox": [ + 114, + 502, + 507, + 519 + ], + "score": 1.0, + "content": "Detecting pretraining data from large language models. arXiv preprint arXiv:2310.16789,", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 114, + 514, + 149, + 528 + ], + "spans": [ + { + "bbox": [ + 114, + 514, + 149, + 528 + ], + "score": 1.0, + "content": "2023b.", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 533, + 507, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 507, + 547 + ], + "score": 1.0, + "content": "Shi, W., Min, S., Lomeli, M., Zhou, C., Li, M., Lin, V., Smith, N. A., Zettlemoyer, L., Yih, S.,", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 114, + 543, + 507, + 560 + ], + "spans": [ + { + "bbox": [ + 114, + 543, + 507, + 560 + ], + "score": 1.0, + "content": "and Lewis, M. In-context pretraining: Language modeling beyond document boundaries.", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 113, + 554, + 286, + 570 + ], + "spans": [ + { + "bbox": [ + 113, + 554, + 286, + 570 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:2310.10638, 2023c.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 574, + 507, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 507, + 590 + ], + "score": 1.0, + "content": "Shi, W., Min, S., Yasunaga, M., Seo, M., James, R., Lewis, M., Zettlemoyer, L., and Yih, W.-t.", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 114, + 585, + 506, + 599 + ], + "spans": [ + { + "bbox": [ + 114, + 585, + 506, + 599 + ], + "score": 1.0, + "content": "Replug: Retrieval-augmented black-box language models. arXiv preprint arXiv:2301.12652,", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 114, + 595, + 150, + 610 + ], + "spans": [ + { + "bbox": [ + 114, + 595, + 150, + 610 + ], + "score": 1.0, + "content": "2023d.", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 614, + 507, + 631 + ], + "spans": [ + { + "bbox": [ + 105, + 614, + 507, + 631 + ], + "score": 1.0, + "content": "TΓ€nzer, M., Ruder, S., and Rei, M. Memorisation versus generalisation in pre-trained lan-", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 113, + 625, + 507, + 642 + ], + "spans": [ + { + "bbox": [ + 113, + 625, + 507, + 642 + ], + "score": 1.0, + "content": "guage models. In Proceedings of the 60th Annual Meeting of the Association for Computational", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 114, + 637, + 361, + 653 + ], + "spans": [ + { + "bbox": [ + 114, + 637, + 361, + 653 + ], + "score": 1.0, + "content": "Linguistics (Volume 1: Long Papers), pp. 7564–7578, 2022.", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 657, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 106, + 657, + 506, + 672 + ], + "score": 1.0, + "content": "Vu, T., Iyyer, M., Wang, X., Constant, N., Wei, J., Wei, J., Tar, C., Sung, Y.-H., Zhou, D., Le,", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 113, + 666, + 508, + 685 + ], + "spans": [ + { + "bbox": [ + 113, + 666, + 508, + 685 + ], + "score": 1.0, + "content": "Q., et al. Freshllms: Refreshing large language models with search engine augmentation.", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 114, + 679, + 282, + 693 + ], + "spans": [ + { + "bbox": [ + 114, + 679, + 282, + 693 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:2310.03214, 2023.", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 697, + 507, + 714 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 507, + 714 + ], + "score": 1.0, + "content": "Wang, B., Ping, W., McAfee, L., Xu, P., Li, B., Shoeybi, M., and Catanzaro, B. Instructretro:", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 114, + 708, + 507, + 725 + ], + "spans": [ + { + "bbox": [ + 114, + 708, + 507, + 725 + ], + "score": 1.0, + "content": "Instruction tuning post retrieval-augmented pretraining. arXiv preprint arXiv:2310.07713,", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 115, + 720, + 142, + 733 + ], + "spans": [ + { + "bbox": [ + 115, + 720, + 142, + 733 + ], + "score": 1.0, + "content": "2023.", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 23 + } + ], + "page_idx": 10, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 310, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 312, + 764 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 312, + 764 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 14, + "width": 14 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "list", + "bbox": [ + 104, + 45, + 507, + 732 + ], + "lines": [], + "index": 23, + "bbox_fs": [ + 104, + 80, + 508, + 733 + ], + "lines_deleted": true + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 81, + 507, + 340 + ], + "lines": [ + { + "bbox": [ + 106, + 82, + 506, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 82, + 506, + 96 + ], + "score": 1.0, + "content": "Wang, Y., Kordi, Y., Mishra, S., Liu, A., Smith, N. A., Khashabi, D., and Hajishirzi, H.", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 114, + 92, + 506, + 108 + ], + "spans": [ + { + "bbox": [ + 114, + 92, + 506, + 108 + ], + "score": 1.0, + "content": "Self-instruct: Aligning language models with self-generated instructions. arXiv preprint", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 116, + 104, + 219, + 117 + ], + "spans": [ + { + "bbox": [ + 116, + 104, + 219, + 117 + ], + "score": 1.0, + "content": "arXiv:2212.10560, 2022.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 122, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 506, + 137 + ], + "score": 1.0, + "content": "Wei, J., Wang, X., Schuurmans, D., Bosma, M., Xia, F., Chi, E., Le, Q. V., Zhou, D., et al.", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 115, + 132, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 115, + 132, + 506, + 149 + ], + "score": 1.0, + "content": "Chain-of-thought prompting elicits reasoning in large language models. Advances in", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 144, + 379, + 159 + ], + "spans": [ + { + "bbox": [ + 115, + 144, + 379, + 159 + ], + "score": 1.0, + "content": "Neural Information Processing Systems, 35:24824–24837, 2022.", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 163, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 178 + ], + "score": 1.0, + "content": "Weston, J. and Sukhbaatar, S. System 2 attention (is something you might need too). arXiv", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 114, + 175, + 255, + 188 + ], + "spans": [ + { + "bbox": [ + 114, + 175, + 255, + 188 + ], + "score": 1.0, + "content": "preprint arXiv:2311.11829, 2023.", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 193, + 506, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 506, + 208 + ], + "score": 1.0, + "content": "Xiong, W., Liu, J., Molybog, I., Zhang, H., Bhargava, P., Hou, R., Martin, L., Rungta, R.,", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 204, + 506, + 219 + ], + "spans": [ + { + "bbox": [ + 115, + 204, + 506, + 219 + ], + "score": 1.0, + "content": "Sankararaman, K. A., Oguz, B., et al. Effective long-context scaling of foundation models.", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 115, + 215, + 281, + 230 + ], + "spans": [ + { + "bbox": [ + 115, + 215, + 281, + 230 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:2309.16039, 2023.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 234, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 506, + 248 + ], + "score": 1.0, + "content": "Xu, P., Ping, W., Wu, X., McAfee, L., Zhu, C., Liu, Z., Subramanian, S., Bakhturina, E.,", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 245, + 506, + 259 + ], + "spans": [ + { + "bbox": [ + 115, + 245, + 506, + 259 + ], + "score": 1.0, + "content": "Shoeybi, M., and Catanzaro, B. Retrieval meets long context large language models. arXiv", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 257, + 255, + 269 + ], + "spans": [ + { + "bbox": [ + 115, + 257, + 255, + 269 + ], + "score": 1.0, + "content": "preprint arXiv:2310.03025, 2023.", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 274, + 506, + 290 + ], + "spans": [ + { + "bbox": [ + 105, + 274, + 506, + 290 + ], + "score": 1.0, + "content": "Yang, Z., Qi, P., Zhang, S., Bengio, Y., Cohen, W. W., Salakhutdinov, R., and Manning, C. D.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 114, + 285, + 505, + 300 + ], + "spans": [ + { + "bbox": [ + 114, + 285, + 505, + 300 + ], + "score": 1.0, + "content": "Hotpotqa: A dataset for diverse, explainable multi-hop question answering. arXiv preprint", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 116, + 298, + 219, + 310 + ], + "spans": [ + { + "bbox": [ + 116, + 298, + 219, + 310 + ], + "score": 1.0, + "content": "arXiv:1809.09600, 2018.", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 316, + 506, + 329 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 506, + 329 + ], + "score": 1.0, + "content": "Zhou, C., Liu, P., Xu, P., Iyer, S., Sun, J., Mao, Y., Ma, X., Efrat, A., Yu, P., Yu, L., et al. Lima:", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 327, + 404, + 341 + ], + "spans": [ + { + "bbox": [ + 115, + 327, + 404, + 341 + ], + "score": 1.0, + "content": "Less is more for alignment. arXiv preprint arXiv:2305.11206, 2023.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 9 + } + ], + "page_idx": 11, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 26, + 316, + 38 + ], + "lines": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "spans": [ + { + "bbox": [ + 106, + 25, + 316, + 39 + ], + "score": 1.0, + "content": "Published as a conference paper at COLM 2024", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 300, + 751, + 311, + 760 + ], + "lines": [ + { + "bbox": [ + 298, + 750, + 312, + 764 + ], + "spans": [ + { + "bbox": [ + 298, + 750, + 312, + 764 + ], + "score": 1.0, + "content": "12", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "list", + "bbox": [ + 105, + 81, + 507, + 340 + ], + "lines": [], + "index": 9, + "bbox_fs": [ + 105, + 82, + 506, + 341 + ], + "lines_deleted": true + } + ] + } + ], + "_backend": "pipeline", + "_version_name": "2.1.11" +} \ No newline at end of file diff --git a/parse/test/rzQGHXNReU/rzQGHXNReU_model.json b/parse/test/rzQGHXNReU/rzQGHXNReU_model.json new file mode 100644 index 0000000000000000000000000000000000000000..b17d59395a33f49005344db7826359922f15157b --- /dev/null +++ b/parse/test/rzQGHXNReU/rzQGHXNReU_model.json @@ -0,0 +1,13575 @@ +[ + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1486, + 1405, + 1486, + 1405, + 1792, + 298, + 1792 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 398, + 833, + 1304, + 833, + 1304, + 1350, + 398, + 1350 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 300, + 1885, + 1404, + 1885, + 1404, + 1980, + 300, + 1980 + ], + "score": 0.964 + }, + { + "category_id": 1, + "poly": [ + 314, + 327, + 726, + 327, + 726, + 480, + 314, + 480 + ], + "score": 0.957 + }, + { + "category_id": 1, + "poly": [ + 301, + 1807, + 1398, + 1807, + 1398, + 1871, + 301, + 1871 + ], + "score": 0.939 + }, + { + "category_id": 0, + "poly": [ + 293, + 218, + 1372, + 218, + 1372, + 268, + 293, + 268 + ], + "score": 0.929 + }, + { + "category_id": 2, + "poly": [ + 328, + 2005, + 1038, + 2005, + 1038, + 2034, + 328, + 2034 + ], + "score": 0.911 + }, + { + "category_id": 0, + "poly": [ + 299, + 1413, + 541, + 1413, + 541, + 1452, + 299, + 1452 + ], + "score": 0.906 + }, + { + "category_id": 0, + "poly": [ + 787, + 759, + 914, + 759, + 914, + 796, + 787, + 796 + ], + "score": 0.884 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 879, + 74, + 879, + 106, + 298, + 106 + ], + "score": 0.88 + }, + { + "category_id": 2, + "poly": [ + 840, + 2089, + 856, + 2089, + 856, + 2112, + 840, + 2112 + ], + "score": 0.768 + }, + { + "category_id": 1, + "poly": [ + 757, + 326, + 1428, + 326, + 1428, + 481, + 757, + 481 + ], + "score": 0.658 + }, + { + "category_id": 1, + "poly": [ + 314, + 525, + 894, + 525, + 894, + 680, + 314, + 680 + ], + "score": 0.137 + }, + { + "category_id": 15, + "poly": [ + 290.0, + 215.0, + 1379.0, + 215.0, + 1379.0, + 277.0, + 290.0, + 277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 330.0, + 2001.0, + 1041.0, + 2001.0, + 1041.0, + 2040.0, + 330.0, + 2040.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1412.0, + 546.0, + 1412.0, + 546.0, + 1457.0, + 292.0, + 1457.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 782.0, + 754.0, + 921.0, + 754.0, + 921.0, + 801.0, + 782.0, + 801.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 879.0, + 71.0, + 879.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2086.0, + 861.0, + 2086.0, + 861.0, + 2122.0, + 838.0, + 2122.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1483.0, + 1407.0, + 1483.0, + 1407.0, + 1526.0, + 292.0, + 1526.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1517.0, + 1408.0, + 1517.0, + 1408.0, + 1557.0, + 293.0, + 1557.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1546.0, + 1405.0, + 1546.0, + 1405.0, + 1586.0, + 293.0, + 1586.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1574.0, + 1406.0, + 1574.0, + 1406.0, + 1619.0, + 292.0, + 1619.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1606.0, + 1410.0, + 1606.0, + 1410.0, + 1648.0, + 292.0, + 1648.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1639.0, + 1406.0, + 1639.0, + 1406.0, + 1678.0, + 293.0, + 1678.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1668.0, + 1406.0, + 1668.0, + 1406.0, + 1706.0, + 291.0, + 1706.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1697.0, + 1407.0, + 1697.0, + 1407.0, + 1740.0, + 292.0, + 1740.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1730.0, + 1410.0, + 1730.0, + 1410.0, + 1770.0, + 293.0, + 1770.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1762.0, + 1001.0, + 1762.0, + 1001.0, + 1794.0, + 296.0, + 1794.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 832.0, + 1305.0, + 832.0, + 1305.0, + 872.0, + 393.0, + 872.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 391.0, + 857.0, + 1308.0, + 857.0, + 1308.0, + 904.0, + 391.0, + 904.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 892.0, + 1308.0, + 892.0, + 1308.0, + 931.0, + 393.0, + 931.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 391.0, + 918.0, + 1310.0, + 918.0, + 1310.0, + 965.0, + 391.0, + 965.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 953.0, + 1307.0, + 953.0, + 1307.0, + 992.0, + 393.0, + 992.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 985.0, + 1306.0, + 985.0, + 1306.0, + 1021.0, + 392.0, + 1021.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1015.0, + 1304.0, + 1015.0, + 1304.0, + 1052.0, + 393.0, + 1052.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1047.0, + 1307.0, + 1047.0, + 1307.0, + 1083.0, + 393.0, + 1083.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 1075.0, + 1307.0, + 1075.0, + 1307.0, + 1113.0, + 392.0, + 1113.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 1107.0, + 1306.0, + 1107.0, + 1306.0, + 1145.0, + 392.0, + 1145.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 1135.0, + 1305.0, + 1135.0, + 1305.0, + 1175.0, + 392.0, + 1175.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1166.0, + 1308.0, + 1166.0, + 1308.0, + 1205.0, + 393.0, + 1205.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1195.0, + 1307.0, + 1195.0, + 1307.0, + 1235.0, + 393.0, + 1235.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1226.0, + 1305.0, + 1226.0, + 1305.0, + 1265.0, + 393.0, + 1265.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1259.0, + 1306.0, + 1259.0, + 1306.0, + 1298.0, + 393.0, + 1298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 1286.0, + 1306.0, + 1286.0, + 1306.0, + 1328.0, + 392.0, + 1328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 395.0, + 1322.0, + 605.0, + 1322.0, + 605.0, + 1353.0, + 395.0, + 1353.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1885.0, + 1404.0, + 1885.0, + 1404.0, + 1923.0, + 294.0, + 1923.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1915.0, + 1408.0, + 1915.0, + 1408.0, + 1953.0, + 295.0, + 1953.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1946.0, + 1405.0, + 1946.0, + 1405.0, + 1982.0, + 294.0, + 1982.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 312.0, + 322.0, + 511.0, + 322.0, + 511.0, + 365.0, + 312.0, + 365.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 309.0, + 353.0, + 730.0, + 353.0, + 730.0, + 398.0, + 309.0, + 398.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 310.0, + 382.0, + 477.0, + 382.0, + 477.0, + 427.0, + 310.0, + 427.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 312.0, + 417.0, + 624.0, + 417.0, + 624.0, + 452.0, + 312.0, + 452.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 312.0, + 448.0, + 653.0, + 448.0, + 653.0, + 486.0, + 312.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 508.0, + 331.5, + 525.0, + 331.5, + 525.0, + 339.5, + 508.0, + 339.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1807.0, + 1404.0, + 1807.0, + 1404.0, + 1843.0, + 297.0, + 1843.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1838.0, + 918.0, + 1838.0, + 918.0, + 1874.0, + 297.0, + 1874.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 753.0, + 324.0, + 1284.0, + 324.0, + 1284.0, + 365.0, + 753.0, + 365.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 752.0, + 356.0, + 1171.0, + 356.0, + 1171.0, + 394.0, + 752.0, + 394.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 752.0, + 383.0, + 918.0, + 383.0, + 918.0, + 424.0, + 752.0, + 424.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 753.0, + 417.0, + 1067.0, + 417.0, + 1067.0, + 452.0, + 753.0, + 452.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 754.0, + 448.0, + 1429.0, + 448.0, + 1429.0, + 486.0, + 754.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 311.0, + 523.0, + 897.0, + 523.0, + 897.0, + 564.0, + 311.0, + 564.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 311.0, + 555.0, + 731.0, + 555.0, + 731.0, + 596.0, + 311.0, + 596.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 310.0, + 583.0, + 476.0, + 583.0, + 476.0, + 625.0, + 310.0, + 625.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 312.0, + 618.0, + 626.0, + 618.0, + 626.0, + 653.0, + 312.0, + 653.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 312.0, + 648.0, + 856.0, + 648.0, + 856.0, + 685.0, + 312.0, + 685.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 0, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1155, + 1404, + 1155, + 1404, + 1462, + 298, + 1462 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 297, + 1476, + 1406, + 1476, + 1406, + 1754, + 297, + 1754 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 298, + 957, + 1405, + 957, + 1405, + 1142, + 298, + 1142 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 725, + 1404, + 725, + 1404, + 944, + 298, + 944 + ], + "score": 0.978 + }, + { + "category_id": 4, + "poly": [ + 296, + 448, + 1404, + 448, + 1404, + 667, + 296, + 667 + ], + "score": 0.97 + }, + { + "category_id": 3, + "poly": [ + 299, + 226, + 1395, + 226, + 1395, + 423, + 299, + 423 + ], + "score": 0.96 + }, + { + "category_id": 1, + "poly": [ + 299, + 1871, + 1402, + 1871, + 1402, + 1936, + 299, + 1936 + ], + "score": 0.952 + }, + { + "category_id": 1, + "poly": [ + 299, + 1971, + 1401, + 1971, + 1401, + 2034, + 299, + 2034 + ], + "score": 0.943 + }, + { + "category_id": 0, + "poly": [ + 300, + 1798, + 765, + 1798, + 765, + 1838, + 300, + 1838 + ], + "score": 0.921 + }, + { + "category_id": 2, + "poly": [ + 297, + 75, + 878, + 75, + 878, + 106, + 297, + 106 + ], + "score": 0.898 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 858, + 2088, + 858, + 2112, + 841, + 2112 + ], + "score": 0.755 + }, + { + "category_id": 13, + "poly": [ + 940, + 1539, + 993, + 1539, + 993, + 1571, + 940, + 1571 + ], + "score": 0.88, + "latex": "( D _ { k } )" + }, + { + "category_id": 13, + "poly": [ + 397, + 1508, + 452, + 1508, + 452, + 1539, + 397, + 1539 + ], + "score": 0.8, + "latex": "( \\mathrm { A } ^ { * } )" + }, + { + "category_id": 13, + "poly": [ + 549, + 1508, + 588, + 1508, + 588, + 1537, + 549, + 1537 + ], + "score": 0.8, + "latex": "\\mathsf { A } ^ { * }" + }, + { + "category_id": 13, + "poly": [ + 1207, + 1479, + 1259, + 1479, + 1259, + 1508, + 1207, + 1508 + ], + "score": 0.79, + "latex": "( \\mathrm { D ^ { * } } )" + }, + { + "category_id": 13, + "poly": [ + 1212, + 1569, + 1245, + 1569, + 1245, + 1600, + 1212, + 1600 + ], + "score": 0.64, + "latex": "( k )" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 447.0, + 1406.0, + 447.0, + 1406.0, + 489.0, + 293.0, + 489.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 478.0, + 1406.0, + 478.0, + 1406.0, + 519.0, + 295.0, + 519.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 509.0, + 1406.0, + 509.0, + 1406.0, + 546.0, + 294.0, + 546.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 539.0, + 1406.0, + 539.0, + 1406.0, + 581.0, + 293.0, + 581.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 571.0, + 1405.0, + 571.0, + 1405.0, + 610.0, + 294.0, + 610.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 601.0, + 1405.0, + 601.0, + 1405.0, + 640.0, + 294.0, + 640.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 630.0, + 1405.0, + 630.0, + 1405.0, + 675.0, + 293.0, + 675.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1063.0, + 241.0, + 1160.0, + 241.0, + 1160.0, + 262.0, + 1063.0, + 262.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 258.0, + 417.0, + 258.0, + 417.0, + 286.0, + 298.0, + 286.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 688.0, + 261.0, + 781.0, + 261.0, + 781.0, + 282.0, + 688.0, + 282.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1036.0, + 260.0, + 1187.0, + 260.0, + 1187.0, + 281.0, + 1036.0, + 281.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 315.0, + 279.0, + 402.0, + 279.0, + 402.0, + 301.0, + 315.0, + 301.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 671.0, + 278.0, + 798.0, + 278.0, + 798.0, + 299.0, + 671.0, + 299.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 861.0, + 278.0, + 912.0, + 278.0, + 912.0, + 350.0, + 861.0, + 350.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 929.0, + 279.0, + 939.0, + 279.0, + 939.0, + 289.0, + 929.0, + 289.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 319.0, + 361.0, + 319.0, + 361.0, + 342.0, + 318.0, + 342.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 456.0, + 293.0, + 508.0, + 293.0, + 508.0, + 363.0, + 456.0, + 363.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 542.0, + 283.0, + 569.0, + 283.0, + 569.0, + 310.0, + 542.0, + 310.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 592.0, + 315.0, + 649.0, + 315.0, + 649.0, + 336.0, + 592.0, + 336.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 676.0, + 318.0, + 727.0, + 318.0, + 727.0, + 340.0, + 676.0, + 340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 866.0, + 315.0, + 903.0, + 315.0, + 903.0, + 360.0, + 866.0, + 360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 957.0, + 313.0, + 1013.0, + 313.0, + 1013.0, + 352.0, + 957.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1050.0, + 316.0, + 1096.0, + 316.0, + 1096.0, + 336.0, + 1050.0, + 336.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1232.0, + 283.0, + 1278.0, + 283.0, + 1278.0, + 355.0, + 1232.0, + 355.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1330.0, + 314.0, + 1379.0, + 314.0, + 1379.0, + 334.0, + 1330.0, + 334.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 609.0, + 348.0, + 620.0, + 348.0, + 620.0, + 359.0, + 609.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 977.0, + 350.0, + 989.0, + 350.0, + 989.0, + 359.0, + 977.0, + 359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 401.0, + 391.0, + 556.0, + 391.0, + 556.0, + 422.0, + 401.0, + 422.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 783.0, + 388.0, + 930.0, + 388.0, + 930.0, + 425.0, + 783.0, + 425.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1139.0, + 388.0, + 1318.0, + 388.0, + 1318.0, + 425.0, + 1139.0, + 425.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 618.0, + 341.5, + 636.0, + 341.5, + 636.0, + 353.0, + 618.0, + 353.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1794.0, + 769.0, + 1794.0, + 769.0, + 1845.0, + 290.0, + 1845.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2121.0, + 839.0, + 2121.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1149.0, + 1409.0, + 1149.0, + 1409.0, + 1198.0, + 291.0, + 1198.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1184.0, + 1405.0, + 1184.0, + 1405.0, + 1223.0, + 292.0, + 1223.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1216.0, + 1406.0, + 1216.0, + 1406.0, + 1255.0, + 293.0, + 1255.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1244.0, + 1409.0, + 1244.0, + 1409.0, + 1287.0, + 291.0, + 1287.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1272.0, + 1409.0, + 1272.0, + 1409.0, + 1320.0, + 291.0, + 1320.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1306.0, + 1406.0, + 1306.0, + 1406.0, + 1345.0, + 292.0, + 1345.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1338.0, + 1408.0, + 1338.0, + 1408.0, + 1377.0, + 293.0, + 1377.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1367.0, + 1406.0, + 1367.0, + 1406.0, + 1408.0, + 292.0, + 1408.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1398.0, + 1405.0, + 1398.0, + 1405.0, + 1440.0, + 293.0, + 1440.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1432.0, + 1009.0, + 1432.0, + 1009.0, + 1464.0, + 293.0, + 1464.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1473.0, + 1206.0, + 1473.0, + 1206.0, + 1514.0, + 292.0, + 1514.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1260.0, + 1473.0, + 1406.0, + 1473.0, + 1406.0, + 1514.0, + 1260.0, + 1514.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1503.0, + 396.0, + 1503.0, + 396.0, + 1544.0, + 292.0, + 1544.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 453.0, + 1503.0, + 548.0, + 1503.0, + 548.0, + 1544.0, + 453.0, + 1544.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 589.0, + 1503.0, + 1404.0, + 1503.0, + 1404.0, + 1544.0, + 589.0, + 1544.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1534.0, + 939.0, + 1534.0, + 939.0, + 1577.0, + 292.0, + 1577.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 994.0, + 1534.0, + 1407.0, + 1534.0, + 1407.0, + 1577.0, + 994.0, + 1577.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1562.0, + 1211.0, + 1562.0, + 1211.0, + 1606.0, + 291.0, + 1606.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1246.0, + 1562.0, + 1407.0, + 1562.0, + 1407.0, + 1606.0, + 1246.0, + 1606.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1597.0, + 1409.0, + 1597.0, + 1409.0, + 1637.0, + 292.0, + 1637.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1627.0, + 1409.0, + 1627.0, + 1409.0, + 1668.0, + 294.0, + 1668.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1657.0, + 1409.0, + 1657.0, + 1409.0, + 1698.0, + 292.0, + 1698.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1689.0, + 1411.0, + 1689.0, + 1411.0, + 1727.0, + 291.0, + 1727.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1719.0, + 1171.0, + 1719.0, + 1171.0, + 1757.0, + 294.0, + 1757.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 957.0, + 1405.0, + 957.0, + 1405.0, + 992.0, + 295.0, + 992.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 986.0, + 1408.0, + 986.0, + 1408.0, + 1025.0, + 295.0, + 1025.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1010.0, + 1409.0, + 1010.0, + 1409.0, + 1062.0, + 290.0, + 1062.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1047.0, + 1405.0, + 1047.0, + 1405.0, + 1086.0, + 293.0, + 1086.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1079.0, + 1406.0, + 1079.0, + 1406.0, + 1118.0, + 293.0, + 1118.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1109.0, + 933.0, + 1109.0, + 933.0, + 1149.0, + 292.0, + 1149.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 724.0, + 1405.0, + 724.0, + 1405.0, + 767.0, + 291.0, + 767.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 755.0, + 1408.0, + 755.0, + 1408.0, + 797.0, + 292.0, + 797.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 785.0, + 1405.0, + 785.0, + 1405.0, + 827.0, + 292.0, + 827.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 816.0, + 1407.0, + 816.0, + 1407.0, + 857.0, + 291.0, + 857.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 846.0, + 1406.0, + 846.0, + 1406.0, + 887.0, + 291.0, + 887.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 877.0, + 1408.0, + 877.0, + 1408.0, + 919.0, + 292.0, + 919.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 905.0, + 707.0, + 905.0, + 707.0, + 951.0, + 292.0, + 951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1869.0, + 1404.0, + 1869.0, + 1404.0, + 1909.0, + 294.0, + 1909.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1901.0, + 911.0, + 1901.0, + 911.0, + 1940.0, + 295.0, + 1940.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1971.0, + 1405.0, + 1971.0, + 1405.0, + 2006.0, + 297.0, + 2006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1996.0, + 1407.0, + 1996.0, + 1407.0, + 2043.0, + 292.0, + 2043.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 1, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1205, + 1405, + 1205, + 1405, + 1544, + 298, + 1544 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 922, + 1405, + 922, + 1405, + 1169, + 298, + 1169 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 1663, + 1404, + 1663, + 1404, + 1818, + 298, + 1818 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 299, + 1880, + 1402, + 1880, + 1402, + 2036, + 299, + 2036 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 300, + 791, + 1403, + 791, + 1403, + 886, + 300, + 886 + ], + "score": 0.971 + }, + { + "category_id": 3, + "poly": [ + 408, + 226, + 1292, + 226, + 1292, + 550, + 408, + 550 + ], + "score": 0.966 + }, + { + "category_id": 4, + "poly": [ + 296, + 573, + 1406, + 573, + 1406, + 729, + 296, + 729 + ], + "score": 0.959 + }, + { + "category_id": 0, + "poly": [ + 299, + 1833, + 593, + 1833, + 593, + 1868, + 299, + 1868 + ], + "score": 0.928 + }, + { + "category_id": 0, + "poly": [ + 298, + 1591, + 440, + 1591, + 440, + 1627, + 298, + 1627 + ], + "score": 0.9 + }, + { + "category_id": 2, + "poly": [ + 297, + 75, + 878, + 75, + 878, + 106, + 297, + 106 + ], + "score": 0.897 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 859, + 2088, + 859, + 2112, + 841, + 2112 + ], + "score": 0.741 + }, + { + "category_id": 13, + "poly": [ + 394, + 1943, + 437, + 1943, + 437, + 1974, + 394, + 1974 + ], + "score": 0.49, + "latex": "( A )" + }, + { + "category_id": 13, + "poly": [ + 1226, + 984, + 1261, + 984, + 1261, + 1013, + 1226, + 1013 + ], + "score": 0.45, + "latex": "^ { \\prime } \\mathbf { k } ^ { \\prime }" + }, + { + "category_id": 13, + "poly": [ + 722, + 1913, + 765, + 1913, + 765, + 1943, + 722, + 1943 + ], + "score": 0.44, + "latex": "( \\bar { D } )" + }, + { + "category_id": 15, + "poly": [ + 433.0, + 235.0, + 602.0, + 235.0, + 602.0, + 254.0, + 433.0, + 254.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 682.0, + 233.0, + 803.0, + 233.0, + 803.0, + 263.0, + 682.0, + 263.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 915.0, + 229.0, + 1083.0, + 229.0, + 1083.0, + 269.0, + 915.0, + 269.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1177.0, + 241.0, + 1282.0, + 241.0, + 1282.0, + 264.0, + 1177.0, + 264.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1171.0, + 259.0, + 1288.0, + 259.0, + 1288.0, + 282.0, + 1171.0, + 282.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 424.0, + 287.0, + 473.0, + 287.0, + 473.0, + 313.0, + 424.0, + 313.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 487.0, + 286.0, + 536.0, + 286.0, + 536.0, + 312.0, + 487.0, + 312.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 550.0, + 287.0, + 604.0, + 287.0, + 604.0, + 311.0, + 550.0, + 311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 611.0, + 280.0, + 698.0, + 280.0, + 698.0, + 321.0, + 611.0, + 321.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 728.0, + 278.0, + 827.0, + 278.0, + 827.0, + 320.0, + 728.0, + 320.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1203.0, + 291.0, + 1256.0, + 291.0, + 1256.0, + 319.0, + 1203.0, + 319.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 852.0, + 315.0, + 869.0, + 315.0, + 869.0, + 330.0, + 852.0, + 330.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 732.0, + 344.0, + 774.0, + 344.0, + 774.0, + 366.0, + 732.0, + 366.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1008.0, + 343.0, + 1067.0, + 343.0, + 1067.0, + 367.0, + 1008.0, + 367.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1100.0, + 347.0, + 1115.0, + 347.0, + 1115.0, + 372.0, + 1100.0, + 372.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 914.0, + 405.0, + 960.0, + 405.0, + 960.0, + 427.0, + 914.0, + 427.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1010.0, + 400.0, + 1066.0, + 400.0, + 1066.0, + 431.0, + 1010.0, + 431.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1202.0, + 385.0, + 1260.0, + 385.0, + 1260.0, + 462.0, + 1202.0, + 462.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 613.0, + 418.0, + 696.0, + 418.0, + 696.0, + 438.0, + 613.0, + 438.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 730.0, + 418.0, + 826.0, + 418.0, + 826.0, + 440.0, + 730.0, + 440.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 908.0, + 421.0, + 968.0, + 421.0, + 968.0, + 444.0, + 908.0, + 444.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1006.0, + 422.0, + 1068.0, + 422.0, + 1068.0, + 446.0, + 1006.0, + 446.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 448.0, + 434.0, + 583.0, + 434.0, + 583.0, + 497.0, + 448.0, + 497.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 609.0, + 435.0, + 699.0, + 435.0, + 699.0, + 458.0, + 609.0, + 458.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 728.0, + 435.0, + 886.0, + 435.0, + 886.0, + 512.0, + 728.0, + 512.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1002.0, + 479.0, + 1072.0, + 479.0, + 1072.0, + 499.0, + 1002.0, + 499.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1093.0, + 471.0, + 1120.0, + 471.0, + 1120.0, + 508.0, + 1093.0, + 508.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 853.0, + 330.0, + 870.0, + 330.0, + 870.0, + 353.5, + 853.0, + 353.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 572.0, + 1406.0, + 572.0, + 1406.0, + 610.0, + 295.0, + 610.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 604.0, + 1406.0, + 604.0, + 1406.0, + 642.0, + 295.0, + 642.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 631.0, + 1408.0, + 631.0, + 1408.0, + 674.0, + 293.0, + 674.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 666.0, + 1404.0, + 666.0, + 1404.0, + 699.0, + 295.0, + 699.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 694.0, + 1293.0, + 694.0, + 1293.0, + 735.0, + 293.0, + 735.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1824.0, + 598.0, + 1824.0, + 598.0, + 1879.0, + 294.0, + 1879.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1586.0, + 446.0, + 1586.0, + 446.0, + 1638.0, + 290.0, + 1638.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2084.0, + 862.0, + 2084.0, + 862.0, + 2117.0, + 838.0, + 2117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1204.0, + 1408.0, + 1204.0, + 1408.0, + 1242.0, + 295.0, + 1242.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1236.0, + 1406.0, + 1236.0, + 1406.0, + 1273.0, + 292.0, + 1273.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1267.0, + 1406.0, + 1267.0, + 1406.0, + 1302.0, + 292.0, + 1302.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1295.0, + 1407.0, + 1295.0, + 1407.0, + 1337.0, + 293.0, + 1337.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1325.0, + 1405.0, + 1325.0, + 1405.0, + 1365.0, + 295.0, + 1365.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1358.0, + 1406.0, + 1358.0, + 1406.0, + 1397.0, + 293.0, + 1397.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1387.0, + 1403.0, + 1387.0, + 1403.0, + 1426.0, + 293.0, + 1426.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1419.0, + 1405.0, + 1419.0, + 1405.0, + 1454.0, + 295.0, + 1454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1448.0, + 1406.0, + 1448.0, + 1406.0, + 1487.0, + 293.0, + 1487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1479.0, + 1406.0, + 1479.0, + 1406.0, + 1518.0, + 295.0, + 1518.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1509.0, + 1392.0, + 1509.0, + 1392.0, + 1548.0, + 293.0, + 1548.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 921.0, + 1405.0, + 921.0, + 1405.0, + 959.0, + 296.0, + 959.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 952.0, + 1407.0, + 952.0, + 1407.0, + 988.0, + 292.0, + 988.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 983.0, + 1225.0, + 983.0, + 1225.0, + 1020.0, + 293.0, + 1020.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1262.0, + 983.0, + 1406.0, + 983.0, + 1406.0, + 1020.0, + 1262.0, + 1020.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1014.0, + 1406.0, + 1014.0, + 1406.0, + 1050.0, + 293.0, + 1050.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1044.0, + 1406.0, + 1044.0, + 1406.0, + 1082.0, + 295.0, + 1082.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1072.0, + 1407.0, + 1072.0, + 1407.0, + 1112.0, + 292.0, + 1112.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1105.0, + 1406.0, + 1105.0, + 1406.0, + 1142.0, + 292.0, + 1142.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1137.0, + 1279.0, + 1137.0, + 1279.0, + 1171.0, + 293.0, + 1171.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1663.0, + 1409.0, + 1663.0, + 1409.0, + 1701.0, + 294.0, + 1701.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1694.0, + 1405.0, + 1694.0, + 1405.0, + 1731.0, + 294.0, + 1731.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1725.0, + 1405.0, + 1725.0, + 1405.0, + 1762.0, + 293.0, + 1762.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1754.0, + 1406.0, + 1754.0, + 1406.0, + 1795.0, + 293.0, + 1795.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1787.0, + 634.0, + 1787.0, + 634.0, + 1824.0, + 294.0, + 1824.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1878.0, + 1406.0, + 1878.0, + 1406.0, + 1915.0, + 295.0, + 1915.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1903.0, + 721.0, + 1903.0, + 721.0, + 1951.0, + 293.0, + 1951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 766.0, + 1903.0, + 1407.0, + 1903.0, + 1407.0, + 1951.0, + 766.0, + 1951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1939.0, + 393.0, + 1939.0, + 393.0, + 1977.0, + 294.0, + 1977.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 438.0, + 1939.0, + 1404.0, + 1939.0, + 1404.0, + 1977.0, + 438.0, + 1977.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1973.0, + 1403.0, + 1973.0, + 1403.0, + 2006.0, + 297.0, + 2006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1999.0, + 1407.0, + 1999.0, + 1407.0, + 2039.0, + 294.0, + 2039.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 790.0, + 1407.0, + 790.0, + 1407.0, + 828.0, + 295.0, + 828.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 819.0, + 1404.0, + 819.0, + 1404.0, + 860.0, + 294.0, + 860.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 853.0, + 1181.0, + 853.0, + 1181.0, + 891.0, + 294.0, + 891.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 2, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 774, + 1406, + 774, + 1406, + 1237, + 298, + 1237 + ], + "score": 0.984 + }, + { + "category_id": 1, + "poly": [ + 297, + 1589, + 1405, + 1589, + 1405, + 1806, + 297, + 1806 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 298, + 1820, + 1404, + 1820, + 1404, + 2035, + 298, + 2035 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 297, + 1249, + 1404, + 1249, + 1404, + 1406, + 297, + 1406 + ], + "score": 0.973 + }, + { + "category_id": 1, + "poly": [ + 299, + 619, + 1404, + 619, + 1404, + 714, + 299, + 714 + ], + "score": 0.969 + }, + { + "category_id": 1, + "poly": [ + 296, + 1512, + 1400, + 1512, + 1400, + 1576, + 296, + 1576 + ], + "score": 0.95 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 106, + 298, + 106 + ], + "score": 0.909 + }, + { + "category_id": 1, + "poly": [ + 297, + 299, + 1405, + 299, + 1405, + 426, + 297, + 426 + ], + "score": 0.844 + }, + { + "category_id": 1, + "poly": [ + 296, + 726, + 1185, + 726, + 1185, + 762, + 296, + 762 + ], + "score": 0.842 + }, + { + "category_id": 2, + "poly": [ + 841, + 2089, + 858, + 2089, + 858, + 2112, + 841, + 2112 + ], + "score": 0.758 + }, + { + "category_id": 4, + "poly": [ + 297, + 299, + 1405, + 299, + 1405, + 426, + 297, + 426 + ], + "score": 0.244 + }, + { + "category_id": 1, + "poly": [ + 297, + 1416, + 890, + 1416, + 890, + 1501, + 297, + 1501 + ], + "score": 0.179 + }, + { + "category_id": 13, + "poly": [ + 1013, + 1081, + 1103, + 1081, + 1103, + 1114, + 1013, + 1114 + ], + "score": 0.92, + "latex": "( 1 - P )" + }, + { + "category_id": 13, + "poly": [ + 877, + 1083, + 950, + 1083, + 950, + 1115, + 877, + 1115 + ], + "score": 0.91, + "latex": "( d _ { k - 1 } )" + }, + { + "category_id": 13, + "poly": [ + 297, + 1466, + 414, + 1466, + 414, + 1499, + 297, + 1499 + ], + "score": 0.89, + "latex": "( 1 - \\mathbf { P } ) \\%" + }, + { + "category_id": 13, + "poly": [ + 837, + 899, + 893, + 899, + 893, + 930, + 837, + 930 + ], + "score": 0.89, + "latex": "( D ^ { * } )" + }, + { + "category_id": 13, + "poly": [ + 713, + 930, + 773, + 930, + 773, + 961, + 713, + 961 + ], + "score": 0.88, + "latex": "( D * )" + }, + { + "category_id": 13, + "poly": [ + 426, + 1082, + 474, + 1082, + 474, + 1116, + 426, + 1116 + ], + "score": 0.88, + "latex": "( d _ { i } ^ { * } )" + }, + { + "category_id": 13, + "poly": [ + 703, + 869, + 757, + 869, + 757, + 901, + 703, + 901 + ], + "score": 0.87, + "latex": "( D _ { k } )" + }, + { + "category_id": 13, + "poly": [ + 516, + 1465, + 888, + 1465, + 888, + 1500, + 516, + 1500 + ], + "score": 0.87, + "latex": "\\mathbf { Q } + \\mathbf { D } _ { 1 } + \\mathbf { D } _ { 2 } + \\ldots + \\mathbf { D } _ { k } \\mathbf { A } *" + }, + { + "category_id": 13, + "poly": [ + 988, + 960, + 1039, + 960, + 1039, + 992, + 988, + 992 + ], + "score": 0.87, + "latex": "( D _ { i } )" + }, + { + "category_id": 13, + "poly": [ + 298, + 1144, + 343, + 1144, + 343, + 1175, + 298, + 1175 + ], + "score": 0.87, + "latex": "( d _ { k } )" + }, + { + "category_id": 13, + "poly": [ + 912, + 1051, + 955, + 1051, + 955, + 1083, + 912, + 1083 + ], + "score": 0.87, + "latex": "( q _ { i } )" + }, + { + "category_id": 13, + "poly": [ + 297, + 899, + 354, + 899, + 354, + 931, + 297, + 931 + ], + "score": 0.86, + "latex": "( \\hat { \\boldsymbol { A } } ^ { * } )" + }, + { + "category_id": 13, + "poly": [ + 711, + 729, + 811, + 729, + 811, + 762, + 711, + 762 + ], + "score": 0.86, + "latex": "\\mathbf Q \\to \\mathbf A \\}" + }, + { + "category_id": 13, + "poly": [ + 381, + 728, + 477, + 728, + 477, + 762, + 381, + 762 + ], + "score": 0.85, + "latex": "\\mathbf Q \\to \\mathbf A _ { \\mathrm { j } } ^ { \\prime }" + }, + { + "category_id": 13, + "poly": [ + 297, + 1112, + 339, + 1112, + 339, + 1144, + 297, + 1144 + ], + "score": 0.85, + "latex": "( q _ { i } )" + }, + { + "category_id": 13, + "poly": [ + 1029, + 728, + 1185, + 728, + 1185, + 762, + 1029, + 762 + ], + "score": 0.85, + "latex": "\\mathbf { Q } + \\mathbf { D } \\mathbf { A } \\}" + }, + { + "category_id": 13, + "poly": [ + 451, + 1417, + 890, + 1417, + 890, + 1453, + 451, + 1453 + ], + "score": 0.84, + "latex": "\\mathbf { Q } + \\mathbf { D } ^ { * } + \\mathbf { D } _ { 1 } + \\mathbf { D } _ { 2 } + \\ldots + \\mathbf { D } _ { k } \\mathbf { A } *" + }, + { + "category_id": 13, + "poly": [ + 297, + 1418, + 349, + 1418, + 349, + 1448, + 297, + 1448 + ], + "score": 0.83, + "latex": "\\mathbf { P } \\%" + }, + { + "category_id": 13, + "poly": [ + 577, + 1052, + 599, + 1052, + 599, + 1078, + 577, + 1078 + ], + "score": 0.79, + "latex": "P" + }, + { + "category_id": 13, + "poly": [ + 410, + 869, + 454, + 869, + 454, + 901, + 410, + 901 + ], + "score": 0.48, + "latex": "( Q )" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2086.0, + 862.0, + 2086.0, + 862.0, + 2119.0, + 838.0, + 2119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 299.0, + 1410.0, + 299.0, + 1410.0, + 338.0, + 294.0, + 338.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 332.0, + 1406.0, + 332.0, + 1406.0, + 368.0, + 295.0, + 368.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 362.0, + 1403.0, + 362.0, + 1403.0, + 399.0, + 294.0, + 399.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 393.0, + 1238.0, + 393.0, + 1238.0, + 429.0, + 294.0, + 429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 773.0, + 1409.0, + 773.0, + 1409.0, + 816.0, + 293.0, + 816.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 804.0, + 1409.0, + 804.0, + 1409.0, + 846.0, + 293.0, + 846.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 833.0, + 1407.0, + 833.0, + 1407.0, + 875.0, + 292.0, + 875.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 867.0, + 409.0, + 867.0, + 409.0, + 905.0, + 290.0, + 905.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 455.0, + 867.0, + 702.0, + 867.0, + 702.0, + 905.0, + 455.0, + 905.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 758.0, + 867.0, + 1407.0, + 867.0, + 1407.0, + 905.0, + 758.0, + 905.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 894.0, + 296.0, + 894.0, + 296.0, + 936.0, + 292.0, + 936.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 894.0, + 836.0, + 894.0, + 836.0, + 936.0, + 355.0, + 936.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 894.0, + 894.0, + 1408.0, + 894.0, + 1408.0, + 936.0, + 894.0, + 936.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 928.0, + 712.0, + 928.0, + 712.0, + 963.0, + 295.0, + 963.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 774.0, + 928.0, + 1404.0, + 928.0, + 1404.0, + 963.0, + 774.0, + 963.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 957.0, + 987.0, + 957.0, + 987.0, + 998.0, + 291.0, + 998.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1040.0, + 957.0, + 1411.0, + 957.0, + 1411.0, + 998.0, + 1040.0, + 998.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 990.0, + 1406.0, + 990.0, + 1406.0, + 1026.0, + 293.0, + 1026.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1017.0, + 1406.0, + 1017.0, + 1406.0, + 1058.0, + 292.0, + 1058.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1047.0, + 576.0, + 1047.0, + 576.0, + 1088.0, + 292.0, + 1088.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 600.0, + 1047.0, + 911.0, + 1047.0, + 911.0, + 1088.0, + 600.0, + 1088.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 956.0, + 1047.0, + 1406.0, + 1047.0, + 1406.0, + 1088.0, + 956.0, + 1088.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1080.0, + 425.0, + 1080.0, + 425.0, + 1119.0, + 293.0, + 1119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 475.0, + 1080.0, + 876.0, + 1080.0, + 876.0, + 1119.0, + 475.0, + 1119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 951.0, + 1080.0, + 1012.0, + 1080.0, + 1012.0, + 1119.0, + 951.0, + 1119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1104.0, + 1080.0, + 1406.0, + 1080.0, + 1406.0, + 1119.0, + 1104.0, + 1119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1110.0, + 296.0, + 1110.0, + 296.0, + 1146.0, + 293.0, + 1146.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 340.0, + 1110.0, + 1406.0, + 1110.0, + 1406.0, + 1146.0, + 340.0, + 1146.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1139.0, + 297.0, + 1139.0, + 297.0, + 1181.0, + 292.0, + 1181.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 344.0, + 1139.0, + 1408.0, + 1139.0, + 1408.0, + 1181.0, + 344.0, + 1181.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1171.0, + 1406.0, + 1171.0, + 1406.0, + 1210.0, + 293.0, + 1210.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1202.0, + 940.0, + 1202.0, + 940.0, + 1241.0, + 295.0, + 1241.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1587.0, + 1406.0, + 1587.0, + 1406.0, + 1629.0, + 291.0, + 1629.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1619.0, + 1406.0, + 1619.0, + 1406.0, + 1658.0, + 294.0, + 1658.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1647.0, + 1406.0, + 1647.0, + 1406.0, + 1689.0, + 292.0, + 1689.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1679.0, + 1410.0, + 1679.0, + 1410.0, + 1721.0, + 294.0, + 1721.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1711.0, + 1406.0, + 1711.0, + 1406.0, + 1750.0, + 295.0, + 1750.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1740.0, + 1406.0, + 1740.0, + 1406.0, + 1782.0, + 291.0, + 1782.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1773.0, + 908.0, + 1773.0, + 908.0, + 1811.0, + 295.0, + 1811.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1817.0, + 1405.0, + 1817.0, + 1405.0, + 1857.0, + 293.0, + 1857.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1843.0, + 1406.0, + 1843.0, + 1406.0, + 1892.0, + 292.0, + 1892.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1880.0, + 1404.0, + 1880.0, + 1404.0, + 1918.0, + 293.0, + 1918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1910.0, + 1406.0, + 1910.0, + 1406.0, + 1947.0, + 292.0, + 1947.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1940.0, + 1405.0, + 1940.0, + 1405.0, + 1978.0, + 293.0, + 1978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1969.0, + 1405.0, + 1969.0, + 1405.0, + 2010.0, + 293.0, + 2010.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 2001.0, + 891.0, + 2001.0, + 891.0, + 2039.0, + 294.0, + 2039.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1249.0, + 1406.0, + 1249.0, + 1406.0, + 1286.0, + 295.0, + 1286.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1277.0, + 1406.0, + 1277.0, + 1406.0, + 1317.0, + 294.0, + 1317.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1310.0, + 1405.0, + 1310.0, + 1405.0, + 1348.0, + 294.0, + 1348.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1338.0, + 1404.0, + 1338.0, + 1404.0, + 1378.0, + 294.0, + 1378.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1370.0, + 472.0, + 1370.0, + 472.0, + 1408.0, + 292.0, + 1408.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 619.0, + 1405.0, + 619.0, + 1405.0, + 657.0, + 293.0, + 657.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 649.0, + 1405.0, + 649.0, + 1405.0, + 690.0, + 293.0, + 690.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 681.0, + 672.0, + 681.0, + 672.0, + 719.0, + 294.0, + 719.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1512.0, + 1404.0, + 1512.0, + 1404.0, + 1548.0, + 296.0, + 1548.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1541.0, + 1335.0, + 1541.0, + 1335.0, + 1579.0, + 292.0, + 1579.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 299.0, + 1410.0, + 299.0, + 1410.0, + 338.0, + 294.0, + 338.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 332.0, + 1406.0, + 332.0, + 1406.0, + 368.0, + 295.0, + 368.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 362.0, + 1403.0, + 362.0, + 1403.0, + 399.0, + 294.0, + 399.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 393.0, + 1238.0, + 393.0, + 1238.0, + 429.0, + 294.0, + 429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 724.0, + 380.0, + 724.0, + 380.0, + 768.0, + 291.0, + 768.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 478.0, + 724.0, + 710.0, + 724.0, + 710.0, + 768.0, + 478.0, + 768.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 812.0, + 724.0, + 1028.0, + 724.0, + 1028.0, + 768.0, + 812.0, + 768.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1413.0, + 296.0, + 1413.0, + 296.0, + 1455.0, + 292.0, + 1455.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 350.0, + 1413.0, + 450.0, + 1413.0, + 450.0, + 1455.0, + 350.0, + 1455.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 415.0, + 1465.0, + 515.0, + 1465.0, + 515.0, + 1502.0, + 415.0, + 1502.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 3, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 5, + "poly": [ + 297, + 433, + 1404, + 433, + 1404, + 694, + 297, + 694 + ], + "score": 0.983, + "html": "
PubMedHotPotHuggingFaceTorch HubTensorFlow
GPT-3.5 + RAG71.6041.529.0860.2165.59
LLaMA2-7B56.50.540.2200
LLaMA2-7B + RAG58.80.0326.4308.6043.06
DSF59.76.3861.0684.9486.56
DSF + RAG71.64.4142.5982.8060.29
RAFT (LLaMA2-7B)73.3035.2874.0084.9586.86
" + }, + { + "category_id": 1, + "poly": [ + 298, + 1096, + 1406, + 1096, + 1406, + 1493, + 298, + 1493 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 297, + 833, + 1405, + 833, + 1405, + 1051, + 297, + 1051 + ], + "score": 0.976 + }, + { + "category_id": 0, + "poly": [ + 298, + 756, + 514, + 756, + 514, + 795, + 298, + 795 + ], + "score": 0.912 + }, + { + "category_id": 1, + "poly": [ + 366, + 1603, + 1407, + 1603, + 1407, + 2034, + 366, + 2034 + ], + "score": 0.907 + }, + { + "category_id": 2, + "poly": [ + 298, + 75, + 878, + 75, + 878, + 106, + 298, + 106 + ], + "score": 0.901 + }, + { + "category_id": 6, + "poly": [ + 296, + 222, + 1406, + 222, + 1406, + 409, + 296, + 409 + ], + "score": 0.9 + }, + { + "category_id": 1, + "poly": [ + 298, + 1539, + 1142, + 1539, + 1142, + 1572, + 298, + 1572 + ], + "score": 0.891 + }, + { + "category_id": 2, + "poly": [ + 840, + 2088, + 858, + 2088, + 858, + 2112, + 840, + 2112 + ], + "score": 0.78 + }, + { + "category_id": 1, + "poly": [ + 296, + 222, + 1406, + 222, + 1406, + 409, + 296, + 409 + ], + "score": 0.126 + }, + { + "category_id": 13, + "poly": [ + 971, + 1946, + 995, + 1946, + 995, + 1970, + 971, + 1970 + ], + "score": 0.41, + "latex": "^ +" + }, + { + "category_id": 13, + "poly": [ + 906, + 1942, + 1069, + 1942, + 1069, + 1973, + 906, + 1973 + ], + "score": 0.31, + "latex": "( \\mathrm { D S F } + \\mathrm { R A G } )" + }, + { + "category_id": 13, + "poly": [ + 688, + 1432, + 742, + 1432, + 742, + 1463, + 688, + 1463 + ], + "score": 0.29, + "latex": "\\{ \\hat { \\mathrm { Q A } } ," + }, + { + "category_id": 13, + "poly": [ + 921, + 1719, + 947, + 1719, + 947, + 1745, + 921, + 1745 + ], + "score": 0.29, + "latex": "^ +" + }, + { + "category_id": 13, + "poly": [ + 553, + 1432, + 610, + 1432, + 610, + 1463, + 553, + 1463 + ], + "score": 0.25, + "latex": "( \\mathrm { N Q } ," + }, + { + "category_id": 15, + "poly": [ + 291.0, + 755.0, + 520.0, + 755.0, + 520.0, + 800.0, + 291.0, + 800.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 222.0, + 1409.0, + 222.0, + 1409.0, + 262.0, + 293.0, + 262.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 253.0, + 1411.0, + 253.0, + 1411.0, + 292.0, + 293.0, + 292.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 283.0, + 1407.0, + 283.0, + 1407.0, + 322.0, + 293.0, + 322.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 313.0, + 1407.0, + 313.0, + 1407.0, + 353.0, + 294.0, + 353.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 345.0, + 1407.0, + 345.0, + 1407.0, + 381.0, + 293.0, + 381.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 377.0, + 973.0, + 377.0, + 973.0, + 413.0, + 295.0, + 413.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2118.0, + 838.0, + 2118.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1096.0, + 1406.0, + 1096.0, + 1406.0, + 1133.0, + 296.0, + 1133.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1126.0, + 1406.0, + 1126.0, + 1406.0, + 1162.0, + 296.0, + 1162.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1156.0, + 1409.0, + 1156.0, + 1409.0, + 1196.0, + 293.0, + 1196.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1187.0, + 1406.0, + 1187.0, + 1406.0, + 1223.0, + 295.0, + 1223.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1217.0, + 1407.0, + 1217.0, + 1407.0, + 1253.0, + 295.0, + 1253.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1247.0, + 1409.0, + 1247.0, + 1409.0, + 1287.0, + 292.0, + 1287.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1274.0, + 1411.0, + 1274.0, + 1411.0, + 1318.0, + 291.0, + 1318.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1309.0, + 1407.0, + 1309.0, + 1407.0, + 1345.0, + 295.0, + 1345.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1335.0, + 1406.0, + 1335.0, + 1406.0, + 1380.0, + 292.0, + 1380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1367.0, + 1406.0, + 1367.0, + 1406.0, + 1410.0, + 292.0, + 1410.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1399.0, + 1407.0, + 1399.0, + 1407.0, + 1439.0, + 293.0, + 1439.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1428.0, + 552.0, + 1428.0, + 552.0, + 1469.0, + 293.0, + 1469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 611.0, + 1428.0, + 687.0, + 1428.0, + 687.0, + 1469.0, + 611.0, + 1469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 743.0, + 1428.0, + 1404.0, + 1428.0, + 1404.0, + 1469.0, + 743.0, + 1469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1459.0, + 1013.0, + 1459.0, + 1013.0, + 1500.0, + 293.0, + 1500.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 834.0, + 1408.0, + 834.0, + 1408.0, + 872.0, + 294.0, + 872.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 861.0, + 1406.0, + 861.0, + 1406.0, + 905.0, + 292.0, + 905.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 895.0, + 1405.0, + 895.0, + 1405.0, + 932.0, + 292.0, + 932.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 927.0, + 1405.0, + 927.0, + 1405.0, + 962.0, + 294.0, + 962.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 956.0, + 1408.0, + 956.0, + 1408.0, + 994.0, + 294.0, + 994.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 987.0, + 1406.0, + 987.0, + 1406.0, + 1025.0, + 294.0, + 1025.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1018.0, + 1021.0, + 1018.0, + 1021.0, + 1056.0, + 294.0, + 1056.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 368.0, + 1599.0, + 1405.0, + 1599.0, + 1405.0, + 1642.0, + 368.0, + 1642.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 1631.0, + 1408.0, + 1631.0, + 1408.0, + 1672.0, + 392.0, + 1672.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 396.0, + 1664.0, + 873.0, + 1664.0, + 873.0, + 1700.0, + 396.0, + 1700.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 377.0, + 1711.0, + 920.0, + 1711.0, + 920.0, + 1755.0, + 377.0, + 1755.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 948.0, + 1711.0, + 1406.0, + 1711.0, + 1406.0, + 1755.0, + 948.0, + 1755.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 396.0, + 1747.0, + 1404.0, + 1747.0, + 1404.0, + 1784.0, + 396.0, + 1784.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 397.0, + 1777.0, + 886.0, + 1777.0, + 886.0, + 1814.0, + 397.0, + 1814.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 389.0, + 1827.0, + 1409.0, + 1827.0, + 1409.0, + 1867.0, + 389.0, + 1867.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 1859.0, + 1405.0, + 1859.0, + 1405.0, + 1896.0, + 394.0, + 1896.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 1888.0, + 1390.0, + 1888.0, + 1390.0, + 1928.0, + 394.0, + 1928.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 386.0, + 1938.0, + 905.0, + 1938.0, + 905.0, + 1982.0, + 386.0, + 1982.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1070.0, + 1938.0, + 1406.0, + 1938.0, + 1406.0, + 1982.0, + 1070.0, + 1982.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1970.0, + 1405.0, + 1970.0, + 1405.0, + 2009.0, + 393.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 396.0, + 2003.0, + 1042.0, + 2003.0, + 1042.0, + 2037.0, + 396.0, + 2037.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1533.0, + 1149.0, + 1533.0, + 1149.0, + 1582.0, + 293.0, + 1582.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 222.0, + 1409.0, + 222.0, + 1409.0, + 262.0, + 293.0, + 262.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 253.0, + 1411.0, + 253.0, + 1411.0, + 292.0, + 293.0, + 292.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 283.0, + 1407.0, + 283.0, + 1407.0, + 322.0, + 293.0, + 322.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 313.0, + 1407.0, + 313.0, + 1407.0, + 353.0, + 294.0, + 353.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 345.0, + 1407.0, + 345.0, + 1407.0, + 381.0, + 293.0, + 381.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 377.0, + 973.0, + 377.0, + 973.0, + 413.0, + 295.0, + 413.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 4, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1727, + 1404, + 1727, + 1404, + 2035, + 298, + 2035 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 1348, + 1405, + 1348, + 1405, + 1625, + 298, + 1625 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 298, + 616, + 1405, + 616, + 1405, + 954, + 298, + 954 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 298, + 967, + 1405, + 967, + 1405, + 1244, + 298, + 1244 + ], + "score": 0.981 + }, + { + "category_id": 5, + "poly": [ + 296, + 372, + 1407, + 372, + 1407, + 494, + 296, + 494 + ], + "score": 0.974, + "html": "
PubMedHotpotQAHuggingFaceTorch HubTensorFlow
RAFT w.0 CoT68.3025.6259.0786.5683.21
RAFT73.3035.2874.0084.9586.86
" + }, + { + "category_id": 6, + "poly": [ + 296, + 222, + 1405, + 222, + 1405, + 349, + 296, + 349 + ], + "score": 0.972 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 106, + 298, + 106 + ], + "score": 0.906 + }, + { + "category_id": 0, + "poly": [ + 299, + 1668, + 614, + 1668, + 614, + 1701, + 299, + 1701 + ], + "score": 0.895 + }, + { + "category_id": 0, + "poly": [ + 298, + 556, + 453, + 556, + 453, + 588, + 298, + 588 + ], + "score": 0.893 + }, + { + "category_id": 0, + "poly": [ + 299, + 1287, + 521, + 1287, + 521, + 1320, + 299, + 1320 + ], + "score": 0.889 + }, + { + "category_id": 2, + "poly": [ + 840, + 2089, + 858, + 2089, + 858, + 2112, + 840, + 2112 + ], + "score": 0.778 + }, + { + "category_id": 13, + "poly": [ + 1087, + 286, + 1178, + 286, + 1178, + 315, + 1087, + 315 + ], + "score": 0.88, + "latex": "1 \\bar { 4 } . 9 3 \\%" + }, + { + "category_id": 13, + "poly": [ + 943, + 739, + 1034, + 739, + 1034, + 769, + 943, + 769 + ], + "score": 0.88, + "latex": "3 5 . 2 5 \\%" + }, + { + "category_id": 13, + "poly": [ + 1083, + 830, + 1175, + 830, + 1175, + 861, + 1083, + 861 + ], + "score": 0.87, + "latex": "3 1 . 4 1 \\%" + }, + { + "category_id": 13, + "poly": [ + 953, + 285, + 1030, + 285, + 1030, + 316, + 953, + 316 + ], + "score": 0.87, + "latex": "9 . 6 6 \\%" + }, + { + "category_id": 13, + "poly": [ + 1273, + 740, + 1363, + 740, + 1363, + 769, + 1273, + 769 + ], + "score": 0.87, + "latex": "7 6 . 3 5 \\%" + }, + { + "category_id": 13, + "poly": [ + 793, + 831, + 888, + 831, + 888, + 861, + 793, + 861 + ], + "score": 0.83, + "latex": "( 3 0 . { \\bar { 8 } } 7 \\%" + }, + { + "category_id": 13, + "poly": [ + 861, + 893, + 886, + 893, + 886, + 918, + 861, + 918 + ], + "score": 0.51, + "latex": "^ +" + }, + { + "category_id": 13, + "poly": [ + 804, + 892, + 952, + 892, + 952, + 922, + 804, + 922 + ], + "score": 0.42, + "latex": "\\mathrm { D } \\mathbf { \\dot { S } } \\mathbf { \\dot { F } } + \\mathbf { R } \\mathbf { A } \\mathbf { G }" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 222.0, + 1406.0, + 222.0, + 1406.0, + 258.0, + 293.0, + 258.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 251.0, + 1405.0, + 251.0, + 1405.0, + 290.0, + 293.0, + 290.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 285.0, + 952.0, + 285.0, + 952.0, + 322.0, + 294.0, + 322.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1031.0, + 285.0, + 1086.0, + 285.0, + 1086.0, + 322.0, + 1031.0, + 322.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1179.0, + 285.0, + 1405.0, + 285.0, + 1405.0, + 322.0, + 1179.0, + 322.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 314.0, + 785.0, + 314.0, + 785.0, + 352.0, + 295.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1665.0, + 618.0, + 1665.0, + 618.0, + 1707.0, + 293.0, + 1707.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 554.0, + 455.0, + 554.0, + 455.0, + 592.0, + 294.0, + 592.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1285.0, + 527.0, + 1285.0, + 527.0, + 1324.0, + 293.0, + 1324.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 2087.0, + 861.0, + 2087.0, + 861.0, + 2118.0, + 840.0, + 2118.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1725.0, + 1404.0, + 1725.0, + 1404.0, + 1764.0, + 293.0, + 1764.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1756.0, + 1405.0, + 1756.0, + 1405.0, + 1797.0, + 292.0, + 1797.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1785.0, + 1406.0, + 1785.0, + 1406.0, + 1827.0, + 292.0, + 1827.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1816.0, + 1405.0, + 1816.0, + 1405.0, + 1858.0, + 292.0, + 1858.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1847.0, + 1405.0, + 1847.0, + 1405.0, + 1887.0, + 292.0, + 1887.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1878.0, + 1406.0, + 1878.0, + 1406.0, + 1922.0, + 291.0, + 1922.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1912.0, + 1405.0, + 1912.0, + 1405.0, + 1947.0, + 294.0, + 1947.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1942.0, + 1405.0, + 1942.0, + 1405.0, + 1978.0, + 294.0, + 1978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1970.0, + 1405.0, + 1970.0, + 1405.0, + 2010.0, + 292.0, + 2010.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 2001.0, + 583.0, + 2001.0, + 583.0, + 2039.0, + 293.0, + 2039.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1344.0, + 1406.0, + 1344.0, + 1406.0, + 1387.0, + 292.0, + 1387.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1374.0, + 1406.0, + 1374.0, + 1406.0, + 1418.0, + 292.0, + 1418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1407.0, + 1405.0, + 1407.0, + 1405.0, + 1447.0, + 293.0, + 1447.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1438.0, + 1407.0, + 1438.0, + 1407.0, + 1479.0, + 293.0, + 1479.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1468.0, + 1407.0, + 1468.0, + 1407.0, + 1509.0, + 291.0, + 1509.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1501.0, + 1407.0, + 1501.0, + 1407.0, + 1538.0, + 295.0, + 1538.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1527.0, + 1406.0, + 1527.0, + 1406.0, + 1570.0, + 292.0, + 1570.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1556.0, + 1406.0, + 1556.0, + 1406.0, + 1602.0, + 292.0, + 1602.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1593.0, + 651.0, + 1593.0, + 651.0, + 1630.0, + 292.0, + 1630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 616.0, + 1405.0, + 616.0, + 1405.0, + 651.0, + 296.0, + 651.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 646.0, + 1405.0, + 646.0, + 1405.0, + 683.0, + 295.0, + 683.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 678.0, + 1407.0, + 678.0, + 1407.0, + 710.0, + 294.0, + 710.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 703.0, + 1406.0, + 703.0, + 1406.0, + 748.0, + 292.0, + 748.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 736.0, + 942.0, + 736.0, + 942.0, + 775.0, + 293.0, + 775.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1035.0, + 736.0, + 1272.0, + 736.0, + 1272.0, + 775.0, + 1035.0, + 775.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1364.0, + 736.0, + 1406.0, + 736.0, + 1406.0, + 775.0, + 1364.0, + 775.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 768.0, + 1407.0, + 768.0, + 1407.0, + 806.0, + 293.0, + 806.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 799.0, + 1406.0, + 799.0, + 1406.0, + 837.0, + 293.0, + 837.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 829.0, + 792.0, + 829.0, + 792.0, + 867.0, + 293.0, + 867.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 889.0, + 829.0, + 1082.0, + 829.0, + 1082.0, + 867.0, + 889.0, + 867.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1176.0, + 829.0, + 1408.0, + 829.0, + 1408.0, + 867.0, + 1176.0, + 867.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 857.0, + 1406.0, + 857.0, + 1406.0, + 898.0, + 291.0, + 898.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 888.0, + 803.0, + 888.0, + 803.0, + 931.0, + 291.0, + 931.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 953.0, + 888.0, + 1406.0, + 888.0, + 1406.0, + 931.0, + 953.0, + 931.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 919.0, + 1170.0, + 919.0, + 1170.0, + 960.0, + 292.0, + 960.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 968.0, + 1406.0, + 968.0, + 1406.0, + 1005.0, + 296.0, + 1005.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 996.0, + 1411.0, + 996.0, + 1411.0, + 1041.0, + 291.0, + 1041.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1031.0, + 1407.0, + 1031.0, + 1407.0, + 1065.0, + 293.0, + 1065.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1058.0, + 1406.0, + 1058.0, + 1406.0, + 1099.0, + 293.0, + 1099.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1090.0, + 1407.0, + 1090.0, + 1407.0, + 1126.0, + 295.0, + 1126.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1117.0, + 1406.0, + 1117.0, + 1406.0, + 1159.0, + 293.0, + 1159.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1150.0, + 1406.0, + 1150.0, + 1406.0, + 1191.0, + 293.0, + 1191.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1179.0, + 1409.0, + 1179.0, + 1409.0, + 1222.0, + 292.0, + 1222.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1212.0, + 756.0, + 1212.0, + 756.0, + 1249.0, + 292.0, + 1249.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 5, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 1818, + 1403, + 1818, + 1403, + 2035, + 297, + 2035 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 297, + 1408, + 1405, + 1408, + 1405, + 1686, + 297, + 1686 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 298, + 1147, + 1405, + 1147, + 1405, + 1394, + 298, + 1394 + ], + "score": 0.978 + }, + { + "category_id": 0, + "poly": [ + 298, + 1740, + 857, + 1740, + 857, + 1780, + 298, + 1780 + ], + "score": 0.941 + }, + { + "category_id": 1, + "poly": [ + 300, + 1084, + 1181, + 1084, + 1181, + 1119, + 300, + 1119 + ], + "score": 0.907 + }, + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 106, + 298, + 106 + ], + "score": 0.904 + }, + { + "category_id": 4, + "poly": [ + 297, + 922, + 1405, + 922, + 1405, + 1017, + 297, + 1017 + ], + "score": 0.888 + }, + { + "category_id": 0, + "poly": [ + 328, + 240, + 474, + 240, + 474, + 272, + 328, + 272 + ], + "score": 0.834 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 858, + 2088, + 858, + 2111, + 841, + 2111 + ], + "score": 0.77 + }, + { + "category_id": 3, + "poly": [ + 321, + 627, + 1381, + 627, + 1381, + 884, + 321, + 884 + ], + "score": 0.395 + }, + { + "category_id": 1, + "poly": [ + 319, + 307, + 1373, + 307, + 1373, + 575, + 319, + 575 + ], + "score": 0.226 + }, + { + "category_id": 1, + "poly": [ + 297, + 922, + 1405, + 922, + 1405, + 1017, + 297, + 1017 + ], + "score": 0.136 + }, + { + "category_id": 1, + "poly": [ + 321, + 627, + 1381, + 627, + 1381, + 884, + 321, + 884 + ], + "score": 0.114 + }, + { + "category_id": 13, + "poly": [ + 301, + 1302, + 426, + 1302, + 426, + 1332, + 301, + 1332 + ], + "score": 0.88, + "latex": "\\mathrm { ( P = 1 0 0 \\% }" + }, + { + "category_id": 13, + "poly": [ + 1266, + 1470, + 1333, + 1470, + 1333, + 1500, + 1266, + 1500 + ], + "score": 0.88, + "latex": "1 0 0 \\%" + }, + { + "category_id": 13, + "poly": [ + 1049, + 1331, + 1153, + 1331, + 1153, + 1362, + 1049, + 1362 + ], + "score": 0.88, + "latex": "\\mathrm { ( P = 8 0 \\% }" + }, + { + "category_id": 13, + "poly": [ + 510, + 1211, + 571, + 1211, + 571, + 1244, + 510, + 1244 + ], + "score": 0.87, + "latex": "( \\mathrm { p \\% ) }" + }, + { + "category_id": 13, + "poly": [ + 1148, + 1470, + 1205, + 1470, + 1205, + 1501, + 1148, + 1501 + ], + "score": 0.86, + "latex": "6 0 \\% ," + }, + { + "category_id": 13, + "poly": [ + 1084, + 1470, + 1138, + 1470, + 1138, + 1501, + 1084, + 1501 + ], + "score": 0.85, + "latex": "4 0 \\%" + }, + { + "category_id": 13, + "poly": [ + 1073, + 1409, + 1120, + 1409, + 1120, + 1440, + 1073, + 1440 + ], + "score": 0.85, + "latex": "\\mathrm { P \\% }" + }, + { + "category_id": 13, + "poly": [ + 870, + 1471, + 915, + 1471, + 915, + 1500, + 870, + 1500 + ], + "score": 0.82, + "latex": "\\mathrm { P \\% }" + }, + { + "category_id": 13, + "poly": [ + 650, + 1945, + 675, + 1945, + 675, + 1970, + 650, + 1970 + ], + "score": 0.6, + "latex": "^ +" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1731.0, + 860.0, + 1731.0, + 860.0, + 1792.0, + 289.0, + 1792.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 111.0, + 295.0, + 111.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 920.0, + 1406.0, + 920.0, + 1406.0, + 959.0, + 292.0, + 959.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 950.0, + 1406.0, + 950.0, + 1406.0, + 989.0, + 292.0, + 989.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 980.0, + 1390.0, + 980.0, + 1390.0, + 1021.0, + 294.0, + 1021.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 235.0, + 479.0, + 235.0, + 479.0, + 276.0, + 324.0, + 276.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2122.0, + 839.0, + 2122.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 620.0, + 1375.0, + 620.0, + 1375.0, + 669.0, + 323.0, + 669.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 659.0, + 1373.0, + 659.0, + 1373.0, + 694.0, + 327.0, + 694.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 690.0, + 1373.0, + 690.0, + 1373.0, + 725.0, + 327.0, + 725.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 720.0, + 1377.0, + 720.0, + 1377.0, + 754.0, + 325.0, + 754.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 748.0, + 1379.0, + 748.0, + 1379.0, + 788.0, + 324.0, + 788.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 328.0, + 782.0, + 543.0, + 782.0, + 543.0, + 814.0, + 328.0, + 814.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 546.0, + 788.0, + 562.0, + 788.0, + 562.0, + 804.0, + 546.0, + 804.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 811.0, + 608.0, + 811.0, + 608.0, + 855.0, + 324.0, + 855.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 607.0, + 820.0, + 620.0, + 820.0, + 620.0, + 842.0, + 607.0, + 842.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1820.0, + 1403.0, + 1820.0, + 1403.0, + 1855.0, + 295.0, + 1855.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1847.0, + 1405.0, + 1847.0, + 1405.0, + 1888.0, + 291.0, + 1888.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1881.0, + 1405.0, + 1881.0, + 1405.0, + 1915.0, + 295.0, + 1915.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1910.0, + 1405.0, + 1910.0, + 1405.0, + 1949.0, + 293.0, + 1949.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1939.0, + 649.0, + 1939.0, + 649.0, + 1980.0, + 290.0, + 1980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 676.0, + 1939.0, + 1404.0, + 1939.0, + 1404.0, + 1980.0, + 676.0, + 1980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1972.0, + 1404.0, + 1972.0, + 1404.0, + 2007.0, + 295.0, + 2007.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1998.0, + 1159.0, + 1998.0, + 1159.0, + 2043.0, + 294.0, + 2043.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1407.0, + 1072.0, + 1407.0, + 1072.0, + 1446.0, + 292.0, + 1446.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1121.0, + 1407.0, + 1407.0, + 1407.0, + 1407.0, + 1446.0, + 1121.0, + 1446.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1438.0, + 1405.0, + 1438.0, + 1405.0, + 1475.0, + 292.0, + 1475.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1468.0, + 869.0, + 1468.0, + 869.0, + 1506.0, + 292.0, + 1506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 916.0, + 1468.0, + 1083.0, + 1468.0, + 1083.0, + 1506.0, + 916.0, + 1506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1139.0, + 1468.0, + 1147.0, + 1468.0, + 1147.0, + 1506.0, + 1139.0, + 1506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1206.0, + 1468.0, + 1265.0, + 1468.0, + 1265.0, + 1506.0, + 1206.0, + 1506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1334.0, + 1468.0, + 1406.0, + 1468.0, + 1406.0, + 1506.0, + 1334.0, + 1506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1496.0, + 1406.0, + 1496.0, + 1406.0, + 1538.0, + 291.0, + 1538.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1527.0, + 1408.0, + 1527.0, + 1408.0, + 1571.0, + 291.0, + 1571.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1559.0, + 1407.0, + 1559.0, + 1407.0, + 1597.0, + 292.0, + 1597.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1589.0, + 1410.0, + 1589.0, + 1410.0, + 1630.0, + 292.0, + 1630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1618.0, + 1407.0, + 1618.0, + 1407.0, + 1660.0, + 292.0, + 1660.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1649.0, + 1361.0, + 1649.0, + 1361.0, + 1691.0, + 292.0, + 1691.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1146.0, + 1406.0, + 1146.0, + 1406.0, + 1187.0, + 292.0, + 1187.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1177.0, + 1410.0, + 1177.0, + 1410.0, + 1216.0, + 293.0, + 1216.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1208.0, + 509.0, + 1208.0, + 509.0, + 1248.0, + 293.0, + 1248.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 572.0, + 1208.0, + 1407.0, + 1208.0, + 1407.0, + 1248.0, + 572.0, + 1248.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1239.0, + 1405.0, + 1239.0, + 1405.0, + 1277.0, + 293.0, + 1277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1266.0, + 1407.0, + 1266.0, + 1407.0, + 1312.0, + 291.0, + 1312.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1297.0, + 300.0, + 1297.0, + 300.0, + 1340.0, + 291.0, + 1340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 427.0, + 1297.0, + 1407.0, + 1297.0, + 1407.0, + 1340.0, + 427.0, + 1340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1328.0, + 1048.0, + 1328.0, + 1048.0, + 1368.0, + 292.0, + 1368.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1154.0, + 1328.0, + 1406.0, + 1328.0, + 1406.0, + 1368.0, + 1154.0, + 1368.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1361.0, + 791.0, + 1361.0, + 791.0, + 1398.0, + 295.0, + 1398.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1084.0, + 1186.0, + 1084.0, + 1186.0, + 1124.0, + 293.0, + 1124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 300.0, + 1377.0, + 300.0, + 1377.0, + 352.0, + 322.0, + 352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 339.0, + 693.0, + 339.0, + 693.0, + 375.0, + 325.0, + 375.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 371.0, + 1375.0, + 371.0, + 1375.0, + 410.0, + 325.0, + 410.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 403.0, + 1353.0, + 403.0, + 1353.0, + 447.0, + 323.0, + 447.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 326.0, + 466.0, + 1375.0, + 466.0, + 1375.0, + 504.0, + 326.0, + 504.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 497.0, + 1374.0, + 497.0, + 1374.0, + 544.0, + 324.0, + 544.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 536.0, + 514.0, + 536.0, + 514.0, + 578.0, + 325.0, + 578.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 920.0, + 1406.0, + 920.0, + 1406.0, + 959.0, + 292.0, + 959.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 950.0, + 1406.0, + 950.0, + 1406.0, + 989.0, + 292.0, + 989.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 980.0, + 1390.0, + 980.0, + 1390.0, + 1021.0, + 294.0, + 1021.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 620.0, + 1375.0, + 620.0, + 1375.0, + 669.0, + 323.0, + 669.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 659.0, + 1373.0, + 659.0, + 1373.0, + 694.0, + 327.0, + 694.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 690.0, + 1373.0, + 690.0, + 1373.0, + 725.0, + 327.0, + 725.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 720.0, + 1377.0, + 720.0, + 1377.0, + 754.0, + 325.0, + 754.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 748.0, + 1379.0, + 748.0, + 1379.0, + 788.0, + 324.0, + 788.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 328.0, + 782.0, + 543.0, + 782.0, + 543.0, + 814.0, + 328.0, + 814.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 546.0, + 788.0, + 562.0, + 788.0, + 562.0, + 804.0, + 546.0, + 804.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 811.0, + 608.0, + 811.0, + 608.0, + 855.0, + 324.0, + 855.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 607.0, + 820.0, + 620.0, + 820.0, + 620.0, + 842.0, + 607.0, + 842.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 6, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 299, + 748, + 1405, + 748, + 1405, + 1117, + 299, + 1117 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 298, + 1483, + 1405, + 1483, + 1405, + 1791, + 298, + 1791 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 298, + 1131, + 1405, + 1131, + 1405, + 1469, + 298, + 1469 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 300, + 1911, + 1405, + 1911, + 1405, + 2035, + 300, + 2035 + ], + "score": 0.973 + }, + { + "category_id": 3, + "poly": [ + 299, + 223, + 1401, + 223, + 1401, + 479, + 299, + 479 + ], + "score": 0.964 + }, + { + "category_id": 4, + "poly": [ + 297, + 505, + 1405, + 505, + 1405, + 628, + 297, + 628 + ], + "score": 0.956 + }, + { + "category_id": 0, + "poly": [ + 300, + 1836, + 572, + 1836, + 572, + 1874, + 300, + 1874 + ], + "score": 0.909 + }, + { + "category_id": 2, + "poly": [ + 297, + 75, + 878, + 75, + 878, + 106, + 297, + 106 + ], + "score": 0.89 + }, + { + "category_id": 0, + "poly": [ + 300, + 690, + 814, + 690, + 814, + 723, + 300, + 723 + ], + "score": 0.858 + }, + { + "category_id": 2, + "poly": [ + 840, + 2088, + 858, + 2088, + 858, + 2111, + 840, + 2111 + ], + "score": 0.79 + }, + { + "category_id": 13, + "poly": [ + 462, + 1376, + 572, + 1376, + 572, + 1406, + 462, + 1406 + ], + "score": 0.91, + "latex": "D ^ { * } + 3 D" + }, + { + "category_id": 13, + "poly": [ + 679, + 1376, + 788, + 1376, + 788, + 1406, + 679, + 1406 + ], + "score": 0.91, + "latex": "D ^ { * } + 1 D" + }, + { + "category_id": 13, + "poly": [ + 1358, + 505, + 1404, + 505, + 1404, + 534, + 1358, + 534 + ], + "score": 0.85, + "latex": "\\mathrm { P \\% }" + }, + { + "category_id": 13, + "poly": [ + 974, + 1055, + 1019, + 1055, + 1019, + 1085, + 974, + 1085 + ], + "score": 0.85, + "latex": "\\mathrm { P \\% }" + }, + { + "category_id": 13, + "poly": [ + 1205, + 1258, + 1224, + 1258, + 1224, + 1288, + 1205, + 1288 + ], + "score": 0.62, + "latex": "p" + }, + { + "category_id": 15, + "poly": [ + 446.0, + 222.0, + 563.0, + 222.0, + 563.0, + 250.0, + 446.0, + 250.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 812.0, + 225.0, + 935.0, + 225.0, + 935.0, + 248.0, + 812.0, + 248.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1178.0, + 225.0, + 1307.0, + 225.0, + 1307.0, + 248.0, + 1178.0, + 248.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 313.0, + 251.0, + 355.0, + 251.0, + 355.0, + 277.0, + 313.0, + 277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 471.0, + 248.0, + 488.0, + 248.0, + 488.0, + 263.0, + 471.0, + 263.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 524.0, + 258.0, + 542.0, + 258.0, + 542.0, + 273.0, + 524.0, + 273.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 837.0, + 248.0, + 858.0, + 248.0, + 858.0, + 268.0, + 837.0, + 268.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 894.0, + 248.0, + 909.0, + 248.0, + 909.0, + 264.0, + 894.0, + 264.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1206.0, + 253.0, + 1226.0, + 253.0, + 1226.0, + 274.0, + 1206.0, + 274.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1259.0, + 247.0, + 1283.0, + 247.0, + 1283.0, + 271.0, + 1259.0, + 271.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1316.0, + 247.0, + 1337.0, + 247.0, + 1337.0, + 269.0, + 1316.0, + 269.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1370.0, + 246.0, + 1391.0, + 246.0, + 1391.0, + 266.0, + 1370.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 284.0, + 267.0, + 374.0, + 267.0, + 374.0, + 416.0, + 284.0, + 416.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 416.0, + 273.0, + 432.0, + 273.0, + 432.0, + 288.0, + 416.0, + 288.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 581.0, + 267.0, + 596.0, + 267.0, + 596.0, + 282.0, + 581.0, + 282.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 648.0, + 273.0, + 742.0, + 273.0, + 742.0, + 420.0, + 648.0, + 420.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 785.0, + 268.0, + 800.0, + 268.0, + 800.0, + 285.0, + 785.0, + 285.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1008.0, + 279.0, + 1018.0, + 279.0, + 1018.0, + 293.0, + 1008.0, + 293.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1050.0, + 264.0, + 1094.0, + 264.0, + 1094.0, + 290.0, + 1050.0, + 290.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1154.0, + 269.0, + 1167.0, + 269.0, + 1167.0, + 281.0, + 1154.0, + 281.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 636.0, + 313.0, + 652.0, + 313.0, + 652.0, + 329.0, + 636.0, + 329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1037.0, + 290.0, + 1093.0, + 290.0, + 1093.0, + 400.0, + 1037.0, + 400.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 314.0, + 324.0, + 354.0, + 324.0, + 354.0, + 351.0, + 314.0, + 351.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 682.0, + 328.0, + 725.0, + 328.0, + 725.0, + 353.0, + 682.0, + 353.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1052.0, + 344.0, + 1094.0, + 344.0, + 1094.0, + 371.0, + 1052.0, + 371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 313.0, + 360.0, + 355.0, + 360.0, + 355.0, + 387.0, + 313.0, + 387.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1052.0, + 384.0, + 1094.0, + 384.0, + 1094.0, + 410.0, + 1052.0, + 410.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 313.0, + 398.0, + 355.0, + 398.0, + 355.0, + 424.0, + 313.0, + 424.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 364.0, + 433.0, + 370.0, + 433.0, + 370.0, + 439.0, + 364.0, + 439.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 682.0, + 424.0, + 734.0, + 424.0, + 734.0, + 450.0, + 682.0, + 450.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1050.0, + 424.0, + 1100.0, + 424.0, + 1100.0, + 450.0, + 1050.0, + 450.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 358.0, + 442.0, + 378.0, + 442.0, + 378.0, + 463.0, + 358.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 408.0, + 441.0, + 438.0, + 441.0, + 438.0, + 463.0, + 408.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 463.0, + 441.0, + 493.0, + 441.0, + 493.0, + 463.0, + 463.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 518.0, + 441.0, + 549.0, + 441.0, + 549.0, + 463.0, + 518.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 572.0, + 441.0, + 603.0, + 441.0, + 603.0, + 463.0, + 572.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 625.0, + 441.0, + 662.0, + 441.0, + 662.0, + 463.0, + 625.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 726.0, + 442.0, + 746.0, + 442.0, + 746.0, + 463.0, + 726.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 776.0, + 441.0, + 806.0, + 441.0, + 806.0, + 465.0, + 776.0, + 465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 832.0, + 441.0, + 861.0, + 441.0, + 861.0, + 465.0, + 832.0, + 465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 886.0, + 441.0, + 916.0, + 441.0, + 916.0, + 465.0, + 886.0, + 465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 941.0, + 441.0, + 971.0, + 441.0, + 971.0, + 465.0, + 941.0, + 465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 994.0, + 441.0, + 1030.0, + 441.0, + 1030.0, + 465.0, + 994.0, + 465.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1095.0, + 442.0, + 1115.0, + 442.0, + 1115.0, + 463.0, + 1095.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1147.0, + 442.0, + 1173.0, + 442.0, + 1173.0, + 462.0, + 1147.0, + 462.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1201.0, + 441.0, + 1231.0, + 441.0, + 1231.0, + 463.0, + 1201.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1256.0, + 441.0, + 1286.0, + 441.0, + 1286.0, + 463.0, + 1256.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1311.0, + 441.0, + 1341.0, + 441.0, + 1341.0, + 463.0, + 1311.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1362.0, + 442.0, + 1398.0, + 442.0, + 1398.0, + 463.0, + 1362.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 367.0, + 454.0, + 643.0, + 454.0, + 643.0, + 482.0, + 367.0, + 482.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 742.0, + 455.0, + 1005.0, + 455.0, + 1005.0, + 481.0, + 742.0, + 481.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1104.0, + 455.0, + 1381.0, + 455.0, + 1381.0, + 481.0, + 1104.0, + 481.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1092.25, + 427.5, + 1114.25, + 427.5, + 1114.25, + 442.0, + 1092.25, + 442.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 504.0, + 1357.0, + 504.0, + 1357.0, + 540.0, + 294.0, + 540.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 536.0, + 1403.0, + 536.0, + 1403.0, + 569.0, + 295.0, + 569.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 564.0, + 1405.0, + 564.0, + 1405.0, + 602.0, + 294.0, + 602.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 594.0, + 1096.0, + 594.0, + 1096.0, + 630.0, + 295.0, + 630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1835.0, + 576.0, + 1835.0, + 576.0, + 1879.0, + 294.0, + 1879.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 688.0, + 817.0, + 688.0, + 817.0, + 728.0, + 291.0, + 728.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2085.0, + 862.0, + 2085.0, + 862.0, + 2117.0, + 838.0, + 2117.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 747.0, + 1406.0, + 747.0, + 1406.0, + 788.0, + 292.0, + 788.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 780.0, + 1405.0, + 780.0, + 1405.0, + 818.0, + 294.0, + 818.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 809.0, + 1407.0, + 809.0, + 1407.0, + 848.0, + 292.0, + 848.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 840.0, + 1407.0, + 840.0, + 1407.0, + 880.0, + 292.0, + 880.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 873.0, + 1408.0, + 873.0, + 1408.0, + 907.0, + 292.0, + 907.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 901.0, + 1405.0, + 901.0, + 1405.0, + 939.0, + 293.0, + 939.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 932.0, + 1406.0, + 932.0, + 1406.0, + 970.0, + 293.0, + 970.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 961.0, + 1409.0, + 961.0, + 1409.0, + 1000.0, + 293.0, + 1000.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 995.0, + 1406.0, + 995.0, + 1406.0, + 1029.0, + 295.0, + 1029.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1018.0, + 1406.0, + 1018.0, + 1406.0, + 1064.0, + 292.0, + 1064.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1053.0, + 973.0, + 1053.0, + 973.0, + 1091.0, + 292.0, + 1091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1020.0, + 1053.0, + 1406.0, + 1053.0, + 1406.0, + 1091.0, + 1020.0, + 1091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1084.0, + 1058.0, + 1084.0, + 1058.0, + 1121.0, + 294.0, + 1121.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1484.0, + 1403.0, + 1484.0, + 1403.0, + 1516.0, + 297.0, + 1516.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1512.0, + 1406.0, + 1512.0, + 1406.0, + 1548.0, + 295.0, + 1548.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1544.0, + 1406.0, + 1544.0, + 1406.0, + 1580.0, + 295.0, + 1580.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1573.0, + 1407.0, + 1573.0, + 1407.0, + 1610.0, + 291.0, + 1610.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1605.0, + 1408.0, + 1605.0, + 1408.0, + 1640.0, + 292.0, + 1640.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1634.0, + 1405.0, + 1634.0, + 1405.0, + 1670.0, + 295.0, + 1670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1665.0, + 1408.0, + 1665.0, + 1408.0, + 1701.0, + 293.0, + 1701.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1697.0, + 1407.0, + 1697.0, + 1407.0, + 1733.0, + 295.0, + 1733.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1724.0, + 1407.0, + 1724.0, + 1407.0, + 1767.0, + 292.0, + 1767.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1758.0, + 1403.0, + 1758.0, + 1403.0, + 1793.0, + 293.0, + 1793.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1132.0, + 1405.0, + 1132.0, + 1405.0, + 1167.0, + 296.0, + 1167.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1161.0, + 1406.0, + 1161.0, + 1406.0, + 1202.0, + 292.0, + 1202.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1192.0, + 1406.0, + 1192.0, + 1406.0, + 1230.0, + 295.0, + 1230.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1218.0, + 1406.0, + 1218.0, + 1406.0, + 1264.0, + 293.0, + 1264.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1248.0, + 1204.0, + 1248.0, + 1204.0, + 1295.0, + 291.0, + 1295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1225.0, + 1248.0, + 1410.0, + 1248.0, + 1410.0, + 1295.0, + 1225.0, + 1295.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1281.0, + 1407.0, + 1281.0, + 1407.0, + 1321.0, + 292.0, + 1321.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1315.0, + 1405.0, + 1315.0, + 1405.0, + 1350.0, + 295.0, + 1350.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1345.0, + 1405.0, + 1345.0, + 1405.0, + 1380.0, + 293.0, + 1380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1376.0, + 461.0, + 1376.0, + 461.0, + 1411.0, + 296.0, + 1411.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 573.0, + 1376.0, + 678.0, + 1376.0, + 678.0, + 1411.0, + 573.0, + 1411.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 789.0, + 1376.0, + 1405.0, + 1376.0, + 1405.0, + 1411.0, + 789.0, + 1411.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1405.0, + 1406.0, + 1405.0, + 1406.0, + 1446.0, + 291.0, + 1446.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1437.0, + 1395.0, + 1437.0, + 1395.0, + 1474.0, + 291.0, + 1474.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1909.0, + 1407.0, + 1909.0, + 1407.0, + 1949.0, + 294.0, + 1949.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1941.0, + 1406.0, + 1941.0, + 1406.0, + 1978.0, + 295.0, + 1978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1972.0, + 1410.0, + 1972.0, + 1410.0, + 2008.0, + 294.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 2002.0, + 1410.0, + 2002.0, + 1410.0, + 2038.0, + 294.0, + 2038.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 7, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1337, + 1406, + 1337, + 1406, + 1552, + 298, + 1552 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 297, + 1077, + 1406, + 1077, + 1406, + 1324, + 297, + 1324 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 298, + 1693, + 1404, + 1693, + 1404, + 1878, + 298, + 1878 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 298, + 786, + 1406, + 786, + 1406, + 1063, + 298, + 1063 + ], + "score": 0.978 + }, + { + "category_id": 3, + "poly": [ + 402, + 225, + 1298, + 225, + 1298, + 533, + 402, + 533 + ], + "score": 0.972 + }, + { + "category_id": 4, + "poly": [ + 296, + 559, + 1406, + 559, + 1406, + 715, + 296, + 715 + ], + "score": 0.967 + }, + { + "category_id": 1, + "poly": [ + 299, + 2002, + 1180, + 2002, + 1180, + 2035, + 299, + 2035 + ], + "score": 0.91 + }, + { + "category_id": 0, + "poly": [ + 298, + 1612, + 524, + 1612, + 524, + 1651, + 298, + 1651 + ], + "score": 0.904 + }, + { + "category_id": 0, + "poly": [ + 299, + 1938, + 466, + 1938, + 466, + 1975, + 299, + 1975 + ], + "score": 0.902 + }, + { + "category_id": 2, + "poly": [ + 297, + 75, + 878, + 75, + 878, + 106, + 297, + 106 + ], + "score": 0.893 + }, + { + "category_id": 2, + "poly": [ + 841, + 2088, + 858, + 2088, + 858, + 2110, + 841, + 2110 + ], + "score": 0.784 + }, + { + "category_id": 13, + "poly": [ + 1348, + 591, + 1404, + 591, + 1404, + 623, + 1348, + 623 + ], + "score": 0.51, + "latex": "{ \\mathrm { N Q } } ," + }, + { + "category_id": 15, + "poly": [ + 576.0, + 226.0, + 725.0, + 226.0, + 725.0, + 253.0, + 576.0, + 253.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1063.0, + 227.0, + 1152.0, + 227.0, + 1152.0, + 251.0, + 1063.0, + 251.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 402.0, + 239.0, + 468.0, + 239.0, + 468.0, + 432.0, + 402.0, + 432.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 658.0, + 253.0, + 678.0, + 253.0, + 678.0, + 270.0, + 658.0, + 270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 708.0, + 258.0, + 827.0, + 258.0, + 827.0, + 349.0, + 708.0, + 349.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 848.0, + 259.0, + 924.0, + 259.0, + 924.0, + 432.0, + 848.0, + 432.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 930.0, + 342.0, + 948.0, + 342.0, + 948.0, + 360.0, + 930.0, + 360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 968.0, + 297.0, + 985.0, + 297.0, + 985.0, + 314.0, + 968.0, + 314.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1003.0, + 274.0, + 1021.0, + 274.0, + 1021.0, + 292.0, + 1003.0, + 292.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1042.0, + 254.0, + 1060.0, + 254.0, + 1060.0, + 272.0, + 1042.0, + 272.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1166.0, + 256.0, + 1285.0, + 256.0, + 1285.0, + 349.0, + 1166.0, + 349.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 418.0, + 374.0, + 466.0, + 374.0, + 466.0, + 403.0, + 418.0, + 403.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 868.0, + 379.0, + 922.0, + 379.0, + 922.0, + 403.0, + 868.0, + 403.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 737.0, + 411.0, + 751.0, + 411.0, + 751.0, + 424.0, + 737.0, + 424.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 420.0, + 422.0, + 466.0, + 422.0, + 466.0, + 447.0, + 420.0, + 447.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 868.0, + 417.0, + 923.0, + 417.0, + 923.0, + 443.0, + 868.0, + 443.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 420.0, + 467.0, + 465.0, + 467.0, + 465.0, + 493.0, + 420.0, + 493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 473.0, + 461.0, + 492.0, + 461.0, + 492.0, + 488.0, + 473.0, + 488.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 868.0, + 457.0, + 922.0, + 457.0, + 922.0, + 482.0, + 868.0, + 482.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 931.0, + 473.0, + 948.0, + 473.0, + 948.0, + 489.0, + 931.0, + 489.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 510.0, + 491.0, + 529.0, + 491.0, + 529.0, + 515.0, + 510.0, + 515.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 585.0, + 493.0, + 603.0, + 493.0, + 603.0, + 512.0, + 585.0, + 512.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 659.0, + 489.0, + 679.0, + 489.0, + 679.0, + 514.0, + 659.0, + 514.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 734.0, + 491.0, + 754.0, + 491.0, + 754.0, + 514.0, + 734.0, + 514.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 803.0, + 491.0, + 834.0, + 491.0, + 834.0, + 516.0, + 803.0, + 516.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 968.0, + 492.0, + 985.0, + 492.0, + 985.0, + 513.0, + 968.0, + 513.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1043.0, + 492.0, + 1061.0, + 492.0, + 1061.0, + 512.0, + 1043.0, + 512.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1117.0, + 489.0, + 1136.0, + 489.0, + 1136.0, + 514.0, + 1117.0, + 514.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1191.0, + 489.0, + 1211.0, + 489.0, + 1211.0, + 514.0, + 1191.0, + 514.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1261.0, + 491.0, + 1292.0, + 491.0, + 1292.0, + 515.0, + 1261.0, + 515.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 547.0, + 510.0, + 754.0, + 510.0, + 754.0, + 533.0, + 547.0, + 533.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1005.0, + 510.0, + 1210.0, + 510.0, + 1210.0, + 533.0, + 1005.0, + 533.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 558.0, + 1406.0, + 558.0, + 1406.0, + 596.0, + 294.0, + 596.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 592.0, + 1347.0, + 592.0, + 1347.0, + 625.0, + 296.0, + 625.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 618.0, + 1407.0, + 618.0, + 1407.0, + 660.0, + 291.0, + 660.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 648.0, + 1407.0, + 648.0, + 1407.0, + 693.0, + 294.0, + 693.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 678.0, + 917.0, + 678.0, + 917.0, + 724.0, + 295.0, + 724.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1604.0, + 530.0, + 1604.0, + 530.0, + 1660.0, + 289.0, + 1660.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1935.0, + 470.0, + 1935.0, + 470.0, + 1981.0, + 294.0, + 1981.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2087.0, + 860.0, + 2087.0, + 860.0, + 2116.0, + 839.0, + 2116.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1336.0, + 1409.0, + 1336.0, + 1409.0, + 1374.0, + 295.0, + 1374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1368.0, + 1406.0, + 1368.0, + 1406.0, + 1405.0, + 292.0, + 1405.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1395.0, + 1406.0, + 1395.0, + 1406.0, + 1437.0, + 292.0, + 1437.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1428.0, + 1406.0, + 1428.0, + 1406.0, + 1464.0, + 293.0, + 1464.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1458.0, + 1409.0, + 1458.0, + 1409.0, + 1497.0, + 292.0, + 1497.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1489.0, + 1406.0, + 1489.0, + 1406.0, + 1528.0, + 292.0, + 1528.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1518.0, + 740.0, + 1518.0, + 740.0, + 1555.0, + 293.0, + 1555.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1075.0, + 1406.0, + 1075.0, + 1406.0, + 1116.0, + 292.0, + 1116.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1105.0, + 1406.0, + 1105.0, + 1406.0, + 1146.0, + 291.0, + 1146.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1138.0, + 1408.0, + 1138.0, + 1408.0, + 1174.0, + 294.0, + 1174.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1169.0, + 1409.0, + 1169.0, + 1409.0, + 1207.0, + 294.0, + 1207.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1198.0, + 1406.0, + 1198.0, + 1406.0, + 1235.0, + 294.0, + 1235.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1229.0, + 1408.0, + 1229.0, + 1408.0, + 1267.0, + 294.0, + 1267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1260.0, + 1409.0, + 1260.0, + 1409.0, + 1299.0, + 294.0, + 1299.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1291.0, + 1203.0, + 1291.0, + 1203.0, + 1327.0, + 294.0, + 1327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1687.0, + 1408.0, + 1687.0, + 1408.0, + 1735.0, + 291.0, + 1735.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1722.0, + 1405.0, + 1722.0, + 1405.0, + 1761.0, + 291.0, + 1761.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1751.0, + 1405.0, + 1751.0, + 1405.0, + 1794.0, + 292.0, + 1794.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1783.0, + 1406.0, + 1783.0, + 1406.0, + 1821.0, + 293.0, + 1821.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1816.0, + 1406.0, + 1816.0, + 1406.0, + 1852.0, + 294.0, + 1852.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1844.0, + 1326.0, + 1844.0, + 1326.0, + 1882.0, + 293.0, + 1882.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 788.0, + 1408.0, + 788.0, + 1408.0, + 821.0, + 296.0, + 821.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 816.0, + 1408.0, + 816.0, + 1408.0, + 853.0, + 295.0, + 853.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 846.0, + 1408.0, + 846.0, + 1408.0, + 885.0, + 293.0, + 885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 877.0, + 1406.0, + 877.0, + 1406.0, + 914.0, + 295.0, + 914.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 906.0, + 1408.0, + 906.0, + 1408.0, + 945.0, + 293.0, + 945.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 936.0, + 1406.0, + 936.0, + 1406.0, + 976.0, + 293.0, + 976.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 969.0, + 1404.0, + 969.0, + 1404.0, + 1006.0, + 295.0, + 1006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1000.0, + 1404.0, + 1000.0, + 1404.0, + 1037.0, + 295.0, + 1037.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1028.0, + 546.0, + 1028.0, + 546.0, + 1065.0, + 295.0, + 1065.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1998.0, + 1187.0, + 1998.0, + 1187.0, + 2043.0, + 295.0, + 2043.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 8, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 107, + 298, + 107 + ], + "score": 0.894 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 865, + 2088, + 865, + 2113, + 835, + 2113 + ], + "score": 0.836 + }, + { + "category_id": 1, + "poly": [ + 292, + 227, + 1405, + 227, + 1405, + 295, + 292, + 295 + ], + "score": 0.647 + }, + { + "category_id": 1, + "poly": [ + 278, + 314, + 1407, + 314, + 1407, + 2055, + 278, + 2055 + ], + "score": 0.297 + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 829.0, + 2084.0, + 870.0, + 2084.0, + 870.0, + 2125.0, + 829.0, + 2125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 224.0, + 1408.0, + 224.0, + 1408.0, + 269.0, + 294.0, + 269.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 260.0, + 1225.0, + 260.0, + 1225.0, + 296.0, + 323.0, + 296.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 316.0, + 1411.0, + 316.0, + 1411.0, + 358.0, + 295.0, + 358.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 343.0, + 1407.0, + 343.0, + 1407.0, + 393.0, + 318.0, + 393.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 369.0, + 1409.0, + 369.0, + 1409.0, + 425.0, + 317.0, + 425.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 408.0, + 626.0, + 408.0, + 626.0, + 450.0, + 320.0, + 450.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 461.0, + 1409.0, + 461.0, + 1409.0, + 508.0, + 291.0, + 508.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 496.0, + 1409.0, + 496.0, + 1409.0, + 538.0, + 320.0, + 538.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 523.0, + 1042.0, + 523.0, + 1042.0, + 571.0, + 318.0, + 571.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 286.0, + 580.0, + 1411.0, + 580.0, + 1411.0, + 636.0, + 286.0, + 636.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 619.0, + 1407.0, + 619.0, + 1407.0, + 661.0, + 320.0, + 661.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 649.0, + 825.0, + 649.0, + 825.0, + 692.0, + 318.0, + 692.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 701.0, + 1411.0, + 701.0, + 1411.0, + 751.0, + 290.0, + 751.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 734.0, + 1405.0, + 734.0, + 1405.0, + 780.0, + 318.0, + 780.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 765.0, + 1403.0, + 765.0, + 1403.0, + 812.0, + 318.0, + 812.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 818.0, + 1407.0, + 818.0, + 1407.0, + 870.0, + 290.0, + 870.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 855.0, + 1405.0, + 855.0, + 1405.0, + 897.0, + 318.0, + 897.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 883.0, + 695.0, + 883.0, + 695.0, + 926.0, + 318.0, + 926.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 941.0, + 1405.0, + 941.0, + 1405.0, + 983.0, + 293.0, + 983.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 316.0, + 972.0, + 1044.0, + 972.0, + 1044.0, + 1012.0, + 316.0, + 1012.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1029.0, + 1405.0, + 1029.0, + 1405.0, + 1071.0, + 293.0, + 1071.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1060.0, + 1386.0, + 1060.0, + 1386.0, + 1102.0, + 320.0, + 1102.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1115.0, + 1407.0, + 1115.0, + 1407.0, + 1158.0, + 293.0, + 1158.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 314.0, + 1144.0, + 1392.0, + 1144.0, + 1392.0, + 1190.0, + 314.0, + 1190.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1202.0, + 1409.0, + 1202.0, + 1409.0, + 1244.0, + 293.0, + 1244.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1234.0, + 1407.0, + 1234.0, + 1407.0, + 1277.0, + 320.0, + 1277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1261.0, + 1411.0, + 1261.0, + 1411.0, + 1311.0, + 318.0, + 1311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1296.0, + 835.0, + 1296.0, + 835.0, + 1336.0, + 318.0, + 1336.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1351.0, + 1405.0, + 1351.0, + 1405.0, + 1394.0, + 291.0, + 1394.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1382.0, + 1148.0, + 1382.0, + 1148.0, + 1424.0, + 318.0, + 1424.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1438.0, + 1405.0, + 1438.0, + 1405.0, + 1480.0, + 291.0, + 1480.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 318.0, + 1470.0, + 1409.0, + 1470.0, + 1409.0, + 1512.0, + 318.0, + 1512.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1499.0, + 399.0, + 1499.0, + 399.0, + 1539.0, + 320.0, + 1539.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1557.0, + 1407.0, + 1557.0, + 1407.0, + 1599.0, + 293.0, + 1599.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 316.0, + 1585.0, + 1411.0, + 1585.0, + 1411.0, + 1633.0, + 316.0, + 1633.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1614.0, + 488.0, + 1614.0, + 488.0, + 1660.0, + 319.0, + 1660.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1674.0, + 1407.0, + 1674.0, + 1407.0, + 1716.0, + 291.0, + 1716.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1706.0, + 1407.0, + 1706.0, + 1407.0, + 1748.0, + 320.0, + 1748.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1735.0, + 614.0, + 1735.0, + 614.0, + 1777.0, + 320.0, + 1777.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1789.0, + 1409.0, + 1789.0, + 1409.0, + 1833.0, + 289.0, + 1833.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1823.0, + 1409.0, + 1823.0, + 1409.0, + 1865.0, + 320.0, + 1865.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1854.0, + 1411.0, + 1854.0, + 1411.0, + 1896.0, + 320.0, + 1896.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1878.0, + 398.0, + 1878.0, + 398.0, + 1926.0, + 320.0, + 1926.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1938.0, + 1407.0, + 1938.0, + 1407.0, + 1984.0, + 291.0, + 1984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 316.0, + 1965.0, + 1415.0, + 1965.0, + 1415.0, + 2023.0, + 316.0, + 2023.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 2001.0, + 781.0, + 2001.0, + 781.0, + 2044.0, + 320.0, + 2044.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 9, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 107, + 298, + 107 + ], + "score": 0.892 + }, + { + "category_id": 2, + "poly": [ + 834, + 2088, + 862, + 2088, + 862, + 2113, + 834, + 2113 + ], + "score": 0.83 + }, + { + "category_id": 1, + "poly": [ + 291, + 127, + 1410, + 127, + 1410, + 2034, + 291, + 2034 + ], + "score": 0.528 + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2085.0, + 868.0, + 2085.0, + 868.0, + 2124.0, + 830.0, + 2124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 223.0, + 1411.0, + 223.0, + 1411.0, + 271.0, + 290.0, + 271.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 255.0, + 1407.0, + 255.0, + 1407.0, + 301.0, + 319.0, + 301.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 288.0, + 1301.0, + 288.0, + 1301.0, + 332.0, + 317.0, + 332.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 338.0, + 1407.0, + 338.0, + 1407.0, + 386.0, + 292.0, + 386.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 370.0, + 1405.0, + 370.0, + 1405.0, + 418.0, + 317.0, + 418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 407.0, + 624.0, + 407.0, + 624.0, + 441.0, + 321.0, + 441.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 453.0, + 1409.0, + 453.0, + 1409.0, + 501.0, + 292.0, + 501.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 315.0, + 485.0, + 1407.0, + 485.0, + 1407.0, + 533.0, + 315.0, + 533.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 522.0, + 626.0, + 522.0, + 626.0, + 556.0, + 321.0, + 556.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 572.0, + 1407.0, + 572.0, + 1407.0, + 612.0, + 294.0, + 612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 604.0, + 1407.0, + 604.0, + 1407.0, + 644.0, + 319.0, + 644.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 633.0, + 398.0, + 633.0, + 398.0, + 675.0, + 317.0, + 675.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 685.0, + 1405.0, + 685.0, + 1405.0, + 725.0, + 294.0, + 725.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 717.0, + 1409.0, + 717.0, + 1409.0, + 763.0, + 317.0, + 763.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 748.0, + 1052.0, + 748.0, + 1052.0, + 788.0, + 321.0, + 788.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 798.0, + 1407.0, + 798.0, + 1407.0, + 844.0, + 292.0, + 844.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 834.0, + 1111.0, + 834.0, + 1111.0, + 869.0, + 319.0, + 869.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 884.0, + 1409.0, + 884.0, + 1409.0, + 930.0, + 292.0, + 930.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 315.0, + 915.0, + 1301.0, + 915.0, + 1301.0, + 961.0, + 315.0, + 961.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 963.0, + 1407.0, + 963.0, + 1407.0, + 1014.0, + 290.0, + 1014.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 1001.0, + 1018.0, + 1001.0, + 1018.0, + 1039.0, + 317.0, + 1039.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1053.0, + 1405.0, + 1053.0, + 1405.0, + 1093.0, + 294.0, + 1093.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1087.0, + 1392.0, + 1087.0, + 1392.0, + 1126.0, + 319.0, + 1126.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1141.0, + 1405.0, + 1141.0, + 1405.0, + 1175.0, + 296.0, + 1175.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 1164.0, + 1407.0, + 1164.0, + 1407.0, + 1212.0, + 317.0, + 1212.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 1202.0, + 611.0, + 1202.0, + 611.0, + 1235.0, + 321.0, + 1235.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1248.0, + 1407.0, + 1248.0, + 1407.0, + 1296.0, + 290.0, + 1296.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1285.0, + 1407.0, + 1285.0, + 1407.0, + 1325.0, + 319.0, + 1325.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1315.0, + 969.0, + 1315.0, + 969.0, + 1354.0, + 319.0, + 1354.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1365.0, + 1409.0, + 1365.0, + 1409.0, + 1411.0, + 292.0, + 1411.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1396.0, + 1411.0, + 1396.0, + 1411.0, + 1442.0, + 319.0, + 1442.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1428.0, + 415.0, + 1428.0, + 415.0, + 1469.0, + 319.0, + 1469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1482.0, + 1409.0, + 1482.0, + 1409.0, + 1522.0, + 294.0, + 1522.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 1511.0, + 1411.0, + 1511.0, + 1411.0, + 1557.0, + 317.0, + 1557.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 315.0, + 1541.0, + 795.0, + 1541.0, + 795.0, + 1584.0, + 315.0, + 1584.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1595.0, + 1409.0, + 1595.0, + 1409.0, + 1641.0, + 292.0, + 1641.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1626.0, + 1407.0, + 1626.0, + 1407.0, + 1666.0, + 319.0, + 1666.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1655.0, + 417.0, + 1655.0, + 417.0, + 1697.0, + 319.0, + 1697.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1706.0, + 1411.0, + 1706.0, + 1411.0, + 1754.0, + 292.0, + 1754.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 315.0, + 1737.0, + 1411.0, + 1737.0, + 1411.0, + 1785.0, + 315.0, + 1785.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1770.0, + 1005.0, + 1770.0, + 1005.0, + 1816.0, + 319.0, + 1816.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1827.0, + 1407.0, + 1827.0, + 1407.0, + 1867.0, + 296.0, + 1867.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 315.0, + 1852.0, + 1413.0, + 1852.0, + 1413.0, + 1904.0, + 315.0, + 1904.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 1887.0, + 785.0, + 1887.0, + 785.0, + 1927.0, + 319.0, + 1927.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1938.0, + 1411.0, + 1938.0, + 1411.0, + 1984.0, + 294.0, + 1984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 317.0, + 1969.0, + 1411.0, + 1969.0, + 1411.0, + 2015.0, + 317.0, + 2015.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 2000.0, + 395.0, + 2000.0, + 395.0, + 2038.0, + 321.0, + 2038.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 10, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 2, + "poly": [ + 298, + 74, + 878, + 74, + 878, + 107, + 298, + 107 + ], + "score": 0.891 + }, + { + "category_id": 2, + "poly": [ + 835, + 2088, + 864, + 2088, + 864, + 2113, + 835, + 2113 + ], + "score": 0.836 + }, + { + "category_id": 1, + "poly": [ + 292, + 225, + 1410, + 225, + 1410, + 947, + 292, + 947 + ], + "score": 0.771 + }, + { + "category_id": 15, + "poly": [ + 295.0, + 71.0, + 880.0, + 71.0, + 880.0, + 110.0, + 295.0, + 110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 830.0, + 2085.0, + 869.0, + 2085.0, + 869.0, + 2124.0, + 830.0, + 2124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 228.0, + 1408.0, + 228.0, + 1408.0, + 267.0, + 295.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 257.0, + 1407.0, + 257.0, + 1407.0, + 301.0, + 319.0, + 301.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 291.0, + 610.0, + 291.0, + 610.0, + 327.0, + 323.0, + 327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 341.0, + 1408.0, + 341.0, + 1408.0, + 382.0, + 293.0, + 382.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 369.0, + 1407.0, + 369.0, + 1407.0, + 415.0, + 321.0, + 415.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 401.0, + 1053.0, + 401.0, + 1053.0, + 443.0, + 321.0, + 443.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 454.0, + 1405.0, + 454.0, + 1405.0, + 495.0, + 293.0, + 495.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 487.0, + 709.0, + 487.0, + 709.0, + 523.0, + 319.0, + 523.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 538.0, + 1408.0, + 538.0, + 1408.0, + 578.0, + 293.0, + 578.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 569.0, + 1408.0, + 569.0, + 1408.0, + 609.0, + 321.0, + 609.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 599.0, + 781.0, + 599.0, + 781.0, + 639.0, + 322.0, + 639.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 652.0, + 1408.0, + 652.0, + 1408.0, + 691.0, + 293.0, + 691.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 681.0, + 1407.0, + 681.0, + 1407.0, + 722.0, + 321.0, + 722.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 716.0, + 709.0, + 716.0, + 709.0, + 749.0, + 322.0, + 749.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 763.0, + 1408.0, + 763.0, + 1408.0, + 807.0, + 294.0, + 807.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 319.0, + 794.0, + 1405.0, + 794.0, + 1405.0, + 836.0, + 319.0, + 836.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 830.0, + 610.0, + 830.0, + 610.0, + 862.0, + 324.0, + 862.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 880.0, + 1408.0, + 880.0, + 1408.0, + 916.0, + 295.0, + 916.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 910.0, + 1124.0, + 910.0, + 1124.0, + 949.0, + 321.0, + 949.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 11, + "width": 1700, + "height": 2200 + } + } +] \ No newline at end of file diff --git a/parse/train/fy4ZBWxYbIo/fy4ZBWxYbIo.md b/parse/train/fy4ZBWxYbIo/fy4ZBWxYbIo.md new file mode 100644 index 0000000000000000000000000000000000000000..8ce8283677ad2d9e9a1dca0a1b9c12bbe840df5c --- /dev/null +++ b/parse/train/fy4ZBWxYbIo/fy4ZBWxYbIo.md @@ -0,0 +1,239 @@ +# A Workflow for Offline Model-Free Robotic Reinforcement Learning + +Aviral Kumar?,1, Anikait Singh?,1, Stephen $\mathbf { T i a n } ^ { 1 }$ , Chelsea $\mathbf { F i n n ^ { 2 } }$ , Sergey Levine1 1 UC Berkeley, 2 Stanford University (βˆ— Equal Contribution) aviralk@berkeley.edu, asap7772@berkeley.edu + +Abstract: Offline reinforcement learning (RL) enables learning control policies by utilizing only prior experience, without any online interaction. This can allow robots to acquire generalizable skills from large and diverse datasets, without any costly or unsafe online data collection. Despite recent algorithmic advances in offline RL, applying these methods to real-world problems has proven challenging. Although offline RL methods can learn from prior data, there is no clear and well-understood process for making various design choices, from model architecture to algorithm hyperparameters, without actually evaluating the learned policies online. In this paper, our aim is to develop a practical workflow for using offline RL analogous to the relatively well-understood workflows for supervised learning problems. To this end, we devise a set of metrics and conditions that can be tracked over the course of offline training, and can inform the practitioner about how the algorithm and model architecture should be adjusted to improve final performance. Our workflow is derived from a conceptual understanding of the behavior of conservative offline RL algorithms and cross-validation in supervised learning. We demonstrate the efficacy of this workflow in producing effective policies without any online tuning, both in several simulated robotic learning scenarios and for three tasks on two distinct real robots, focusing on learning manipulation skills with raw image observations with sparse binary rewards. Explanatory video and additional content can be found at sites.google.com/view/offline-rl-workflow. + +Keywords: workflow, offline RL, offline tuning + +# 1 Introduction + +Offline reinforcement learning (RL) can in principle make it possible to convert existing large datasets of robotic experience into effective policies, without the need for costly or dangerous online interaction for each training run. While offline RL algorithms have improved significantly [1, 2, 3, 4, 5], applying such methods to real-world robotic control problems presents a number of major challenges. In standard online RL, any intermediate policy found during training is executed in the environment to collect more experience, which naturally allows for an evaluation of the policy performance. This ability to evaluate intermediate policies lets practitioners use β€œbrute + +![](images/93b250387a2633b12ccbb56017457a77edb5b8bb68d22310145c16c8258e9664.jpg) +Figure 1: Our proposed workflow aims to detect overfitting and underfitting, and provides guidelines for addressing these issues via policy selection, regularization, and architecture design. We evaluate this workflow on two real-world robotic systems and simulation domains, and we find it to be effective. + +force” to evaluate the effects of various design factors, such as model capacity and expressivity, the number of training steps, and so forth, and facilitates comparatively straightforward tuning. In contrast, offline RL methods do not have access to real-world on-policy rollouts for evaluating the learned policy. Thus, in order for these methods to be truly practical for real-world applications, we not only require effective algorithms, but also an effective workflow: a set of protocols and metrics that can be used to reliably and consistently adjust model capacity, regularization, etc in offline RL to obtain policies with good performance, without requiring real-world rollouts for tuning. + +A number of prior works have studied model selection in offline RL by utilizing off-policy evaluation (OPE) methods [6] to estimate policy performance. These methods can be based either on model or value learning [7, 8, 9, 10] or importance sampling [6, 11, 12, 13]. However, developing reliable OPE methods is itself an open problem, and modern OPE methods themselves suffer from hyperparameter selection challenges (see Fu et al. [14] for an empirical study). Moreover, accurate off-policy evaluation is likely not necessary to simply tune algorithms for best performance – we do not need a precise estimate of how good our policy is, but rather a workflow that enables us to best improve it by adjusting various algorithm hyperparameters. + +In this paper, we devise a practical workflow for selecting regularizers, model architectures, and policy checkpoints for offline RL methods in robotic learning settings. We focus on a specific class of conservative offline RL algorithms [15, 2] that regularize the Q-function, but also show that our workflow can be effectively applied to policy constraint methods [16]. Our aim is not to focus on complete off-policy evaluation or to devise a new approach for off-policy evaluation, but rather to adopt a strategy similar to the one in supervised learning. Analogously to how supervised learning practitioners can detect overfitting and underfitting by tracking training and validation losses, and then adjust hyperparameters based on these metrics, our workflow (see Figure 1 for a schematic) first defines and characterizes overfitting and underfitting, proposes metrics and conditions that users can track to determine if an offline RL exhibits overfitting or underfitting, and then utilizes these metrics to inform design decisions pertaining to neural net architectures, regularization, and early stopping. This protocol is intended to act as a β€œuser’s manual” for a practitioner, with guidelines for how to modify algorithm parameters for best results without real-world evaluation rollouts. + +The primary contribution of this paper is a simple yet effective workflow for robotic offline RL. We propose metrics and protocols to assist practitioners in selecting policy checkpoints, regularization parameters, and model architectures for conservative offline RL algorithms such as CQL [2] and BRAC [16]. We empirically verify the efficacy of our proposed workflow on simulated robotic manipulation problems as well as three real-world robotic manipulation problems on two different robots, with diverse objects, pixel observations, and sparse binary reward supervision. Experimentally, we evaluate our method on two real-world robots (the Sawyer and WidowX robots), and one realistic simulated tasks. Our approach is effective in all of these cases, and on two tasks with the Sawyer robot that initially fail completely, our workflow improves the success rate to $70 \%$ . + +# 2 Preliminaries, Background, and Definitions + +The goal in RL is to optimize the infinite horizon discounted return $\begin{array} { r } { R = \sum _ { t = 0 } ^ { \infty } \gamma ^ { t } r ( \mathbf { s } _ { t } , \mathbf { a } _ { t } ) } \end{array}$ , where $r ( s , a )$ represents the reward function evaluated at a state-action pair $( \mathbf { s } , \mathbf { a } )$ . We operate in the offline RL setting and are provided with a fixed dataset $\mathcal { D } = \{ ( \mathbf { s } , \mathbf { a } , r ( \mathbf { s } , \mathbf { a } ) , \mathbf { s } ^ { \prime } ) \}$ , consisting of transition tuples obtained from rollouts under a behavior policy $\pi _ { \beta } ( \mathbf { a } | \mathbf { s } )$ . Our goal is to obtain the best possible policy by only training on this fixed offline dataset $\mathcal { D }$ , with no access to online rollouts. We focus on conservative offline RL algorithms that modify the $\mathrm { Q }$ -function to penalize distributional shift, with most experiments on CQL [2], though we also adapt our workflow to BRAC [16] in Appendix F.1. + +Conservative Q-learning (CQL). The actor-critic formulation of CQL trains a Q-function $Q _ { \boldsymbol { \theta } } ( \mathbf { s } , \mathbf { a } )$ with a separate policy $\pi _ { \phi } ( \mathbf { a } | \mathbf { s } )$ , which maximizes the expected $\mathrm { Q }$ -value $\begin{array} { r } { \mathbb { E } _ { \mathbf { s } \sim \mathcal { D } , \mathbf { a } \sim \pi _ { \phi } } \left[ Q _ { \theta } ( \mathbf { s } , \mathbf { a } ) \right] } \end{array}$ like other standard actor-critic deep RL methods [17, 18, 19]. However, in addition to the standard TD error ${ \mathcal { L } } _ { \mathrm { T D } } ( \theta )$ (in blue below), CQL applies a regularizer ${ \mathcal { R } } ( \theta )$ (in red below) to prevent overestimation of $\mathrm { Q }$ -values for out-of-distribution (OOD) actions. This term minimizes the $\mathrm { Q }$ -values under a distribution $\mu ( \mathbf { a } | \mathbf { s } )$ , which is automatically chosen to pick actions a with high Q-values $Q _ { \boldsymbol { \theta } } ( \mathbf { s } , \mathbf { a } )$ , and counterbalances this term by maximizing the values of the actions in the dataset: + +$$ +\begin{array} { r l } { \underset { \theta } { \mathrm { m i n } } \ : \ : \ : } & { { } \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : } \\ { \mathrm { m i n } \ : \ : \ : \ : \ : } & { { } \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : \ : } & { \mathrm { ~ \ : ~ \ : \ : \ : \ : } \ : \ : \ : } \end{array} +$$ + +where $B ^ { \pi } \bar { Q } ( \mathbf { s } , \mathbf { a } )$ is the Bellman backup operator with a delayed target Q-function, $\bar { Q }$ : $B ^ { \pi } \bar { Q } ( { \bf s } , { \bf a } ) : =$ $r ( \mathbf { s } , \mathbf { a } ) + \gamma \mathbb { E } _ { \mathbf { a ^ { \prime } } \sim \pi ( \mathbf { a ^ { \prime } } | \mathbf { s ^ { \prime } } ) } [ \bar { Q } ( \mathbf { s ^ { \prime } } , \mathbf { a ^ { \prime } } ) ]$ . In practice, CQL computes $\mu ( \mathbf { a } | \mathbf { s } )$ using actions sampled from the policy $\pi _ { \phi } ( \mathbf { a } | \mathbf { s } )$ . More discussion of CQL is in Appendix B. In this paper, we will utilize CQL as a base algorithm that our workflow intends to tune, but we also extend it to BRAC. + +Overfitting and underfitting in CQL. Conservative offline RL algorithms [2, 20] like CQL can be sensitive to design choices, including number of gradient steps for training [21, 22] and network capacity. These challenges are also present in supervised learning, but supervised learning methods benefit from a simple and powerful workflow that involves using training error and validation error to characterize overfitting and underfitting. A practitioner can then make tuning choices based on these characterizations. To derive an analogous workflow for offline RL, we first ask: what do overfitting and underfitting actually mean for the case of conservative offline RL? + +To define overfitting and underfitting generically for any conservative offline RL method, we consider an abstract optimization formulation for such methods [2]: + +$$ +\pi ^ { * } : = \arg \operatorname* { m a x } _ { \pi } ~ J _ { \mathcal { D } } ( \pi ) - \alpha D ( \pi , \pi _ { \beta } ) +$$ + +(Conservative offline RL). + +$J _ { \mathcal { D } } ( \pi )$ denotes the average return of policy $\pi$ in the empirical MDP induced by the transitions in the offline dataset $\mathcal { D }$ , and $D ( \pi , \pi _ { \beta } )$ denotes a closeness constraint to the behavior policy, effectively applied by the offline RL method. Our definition of conservative offline RL requires that this + +Table 1: Summary of train error, test error and our definitions of overfitting and underfitting in supervised learning and conservative offline RL methods. We will propose metrics to measure these phenomena in a purely offline manner and recommend how to tune the underlying method accordingly. + +
QuantitySupervised LearningConservative Offline RL
Test errorLoss L evaluated on test data,DtestPerformance of policy,J(Ο€)
Train errorLoss L evaluated on train data,DtrainObjective in Equations 2,1
OverfittingL(Dtrain) low,L(Dval) high,Dval is a validation set drawn i.i.d.as DtrainTraining objective in Equation l is ex- tremely low,low value of J(Ο€)
Underfittinghigh value of train error L(Dtrain)Training objective in Equation 1 is ex- tremely high,low value of J(Ο€)
+ +divergence be computed in expectation over the state visitation distribution of the learned policy $\pi$ in the empirical MDP as discussed in Appendix F.1. For example, Equation 1 translates to utilizing $\begin{array} { r } { D _ { \mathrm { C Q L } } ( p , \bar { q } ) : = \sum _ { \mathbf { x } } p ( \mathbf { x } ) ( p ( \mathbf { x } ) / q ( \mathbf { x } ) - \bar { 1 } ) } \end{array}$ in Equation 2 (see Theorem 3.5 in Kumar et al. [2] for a proof). The training loss is discussed in Equations 1 and 2 and the test loss is equal to the negative of the actual return $J ( \pi )$ of the learned policy. Analogously to supervised learning, we can use the notion of train and test error to define overfitting and underfitting in offline RL, as discussed in Table 1. However, note that the conditions summarized in Table 1 are not measurable completely offline. Precisely estimating if a run of an offline RL method overfits or underfits requires evaluating the learned policy via interaction with the real-world environment. In Section 3, our goal will be to devise offline metrics for characterizing overfitting that do not have this requirement. We will tailor our study specifically towards CQL, though we extend it to BRAC in Appendix F.1. A similar procedure could be devised for other offline RL methods, but we leave this for future work. + +# 3 Detecting Overfitting and Underfitting in Conservative Offline RL + +In standard supervised learning, we can determine if a method overfits or underfits by comparing the training loss to the same loss function evaluated on a held-out validation dataset, which serves as a β€œproxy” test dataset. In contrast, the return of the learned policy $J ( \pi )$ in RL does not have a direct proxy that can be computed offline. Thus, our goal is to identify offline metrics and conditions that allow us to measure overfitting and underfitting in conservative offline RL, with a focus on CQL. We also adapt these conditions to BRAC [16], a policy-constraint method in Appendix F.2. + +Detecting overfitting in CQL. Our definition of overfitting (Table 1) corresponds to a low value for the training loss (Equation 1), but poor actual policy performance $J ( \pi )$ . To detect this, we analyze the time series of the estimated Q-values averaged over the dataset samples $( \mathbf { s } , \mathbf { a } , r , \mathbf { s } ^ { \prime } ) \in \mathcal { D }$ over the course of training with a large number of gradient steps. A run is labeled as overfitting if we see that the expected dataset Q-value exhibits a non-monotonic trend: if the average Q-values first increase and then decrease as shown in the figure on the right. Additionally, we would see that training loss in Equation 1 eventually becomes very low. Why do we see such a trend in the average dataset $\mathbf { Q }$ -value? Since CQL selectively penalizes the average Qvalue under the distribution $\mu ( \mathbf { a } | \mathbf { s } )$ supported on actions with large Q-values, we would expect the Q-values on states from the dataset s $\sim \mathcal { D }$ and the learned $\mathbf { a } \sim \pi ( \cdot | \mathbf { s } )$ to be small since the policy is trained to maximize the Q-function as well. This in turn would lead to an eventual reduction in the average Q-value on dataset actions, $\mathbb { E } _ { \mathbf { s } , \mathbf { a } \sim \mathcal { D } } [ Q _ { \theta } ( \mathbf { s } , \mathbf { a } ) ]$ . This would be visible after sufficiently many steps of training, when values have propagated via Bellman backups in Equation 1 giving rise to the non-monotonic trend. If such a trend is observed, this raises two questions, as we discuss next. + +![](images/a834916e9152742e6ff0bc3376e3cee94d030ab397761d3b7ccebf41e7204020.jpg) + +What does a low average $\varrho$ -value $\mathbb { E } _ { \mathbf { s } , \mathbf { a } \sim \mathcal { D } } [ Q _ { \theta } ( \mathbf { s } , \mathbf { a } ) ]$ imply about $J ( \pi )$ ? We show in Appendix A that, in principle, CQL training (Equation 1) should never learn Q-values smaller than the dataset Monte-Carlo return, and the $\mathrm { Q }$ -values should increase unless the learned policy $\pi$ is better than $\pi _ { \beta }$ . Intuitively, this is because the objective in Equation 1 aims to also maximize the average dataset + +Q-value and thus the Q-values for the behavior policy are not underestimated in expectation. Now, if the policy optimizer finds a policy that attains a smaller learned Q-value than the dataset return, the policy can always be updated further towards the behavior policy so as to raise the Q-value. Therefore, Q-values can only decrease when the policy found by CQL is better than the behavior policy. We formalize this intuition in Appendix A in Theorem A.1. Thus, a low $\mathrm { Q }$ -value on $( \mathbf { s } , \mathbf { a } ) \in$ $\mathcal { D }$ indicates that the Q-function predicts extremely small Q-values on actions sampled from $\mu ( \mathbf { a } | \mathbf { s } )$ . Typically, this would mean the highest Q-value actions a at a state $\mathbf { s } \in \mathcal { D }$ are those sampled from the offline dataset, drawn from the behavior policy. Thus, policy optimization, which aims to maximize the Q-value, would make $\pi ( \mathbf { a } | \mathbf { s } )$ closer to the behavior policy $\pi _ { \beta } ( \mathbf { a } | \mathbf { s } )$ on $\mathbf { s } \in \mathcal { D }$ . + +Which training checkpoint is likely to attain the best policy performance? Tracking overfitting in supervised learning is important for selecting the best-performing checkpoint, before overfitting becomes severe. Analogously, we can compare the average dataset Q-value across different checkpoints within the same run to pick the best policy. Since CQL aims to increase the average dataset Q-value (Equation 1), we would expect Q-values to initially increase, until learning starts to overfit and the average dataset $\mathrm { Q }$ -value starts decreasing. We should therefore select the latest checkpoint that corresponds to a peak in the estimated dataset Q-value. A visual illustration of this idea is shown in the figure on the previous page, where the checkpoint marked by the green line is recommended to be chosen. In summary, (a) to detect overfitting we can track: + +Metric 3.1 (Overfitting). A low average data $Q$ -value $\mathbb { E } _ { \mathbf { s } , \mathbf { a } \sim \mathcal { D } } [ Q _ { \theta } ( \mathbf { s } , \mathbf { a } ) ]$ that decreases with more gradient steps on Equation 1 indicates that the offline RL algorithm is overfitting. + +and (b) further, given a run that exhibits overfitting, our principle for policy selection is given by: + +Guideline 3.1 (Policy selection). If a run overfits (per Metric 3.1), select the checkpoint that attains the highest average dataset $Q$ -value before overfitting for deployment. + +Finally, for actor-critic algorithms [18] that update the actor slower than the critic, the next policy checkpoint after the peak in the average dataset Q-value appears must be selected. In most of our experiments, we find that simply utilizing the policy checkpoint at the point of the peak in the Qvalue also leads to good results making this a rare concern, but in some cases, utilizing the next checkpoint after the Q-value peak performs better empirically. + +Detecting underfitting in CQL. Next, we turn to devising a procedure to detect underfitting. As summarized in Table 1, underfitting occurs when the RL algorithm is unable to minimize the training objective in Equation 1 effectively. Therefore, large values for the TD error, the CQL regularizer, or both imply underfitting. A large value for the CQL regularizer, ${ \mathcal { R } } ( \theta )$ , indicates an overestimation of Q-values relative to their true value [2] and thus, unlike the overfitting regime, we would not expect the average learned $\mathrm { Q }$ -value to decrease with more training. Thus, one approach to predict underfitting is to track both the TD error, ${ \mathcal { L } } _ { \mathrm { T D } } ( \theta )$ , and the CQL regularizer, ${ \mathcal { R } } ( \theta )$ , and check if the value of even one of these quantities is large. More discussion is provided in Appendix A. + +![](images/4b1e144e763ab56eff446401eff0f116a6cc810b779d08af3c7a767883b102d8.jpg) + +How do we determine if the $\mathbf { \nabla } ^ { T D }$ error and the CQL regularizer are β€œlarge”? In order to determine if the error of a particular run is large, we can rerun the base CQL algorithm but with models of higher capacity, which does not necessarily correspond to the function approximator size, as we will discuss in Section 4. For each model, we record the corresponding training errors and check if the training TD error and CQL regularizer value are reduced with capacity increase. If increasing capacity leads to a reduction in the loss without exhibiting the overfitting signs described previously, then we are in an underfitting regime. Another approach to answer the question is to utilize the value of the TD error $\left( \mathcal { L } _ { \mathrm { T D } } ( \theta ) \right)$ and the task horizon $( 1 / ( 1 - \gamma ) )$ to estimate the overall error in the learned Q-values against the actual Q-value, which is equal to $\dot { \mathcal { L } } _ { \mathrm { T D } } ( \theta ) / ( 1 - \gamma )$ [23] (see Appendix A). If this overall error spans the range of allowed Q-values on the task – which could be inferred based on the structure of the reward function in the task – then we can say that the algorithm is underfitting. + +Metric 3.2 (Underfitting). Compute the values of the training $T D$ error, ${ \mathcal { L } } _ { \mathrm { T D } } ( \theta )$ and CQL regularizer, ${ \mathcal { R } } ( \theta )$ for the current run and another identical run with increased model capacity. If the training errors reduce with increasing model capacity, the original run was underfitting. + +# 4 Addressing Overfitting and Underfitting in Conservative Offline RL + +The typical workflow for supervised learning not only identifies overfitting and underfitting, but also guides the practitioner how to adjust their method so as to alleviate it (e.g., by modifying regularization or model capacity), thus improving performance. Can we devise similar guidelines to address overfitting and underfitting with conservative offline RL? Here, we discuss some ways to adjust regularization and model capacity to alleviate these phenomena. + +Capacity-decreasing regularization for overfitting. As we observed in Section 3, the mechanism behind extremely low $\mathbf { Q }$ -values on the dataset is that CQL training minimizes $\mathrm { Q }$ -values on actions sampled from $\mu ( \mathbf { a } | \mathbf { s } )$ . Two possible approaches to preventing over-minimization of these values are (1) applying regularization such as dropout [24] on Q-function layers, similar to supervised learning, and (2) enforcing that representations of the learned Q-function match a pre-specified target for all state-action tuples. For (2), we can apply techniques such as a variational information bottleneck (VIB) [25, 26] regularizer on the learned representations, $\phi ( \mathbf { s } )$ . Formally, let $( \mathbf { s } , \mathbf { a } )$ denote a stateaction pair. Instead of predicting a deterministic $\phi ( \mathbf { s } ) \in \mathbb { R } ^ { d }$ (Figure 10), we modify the Q-network to predict two distinct vectors, $\phi _ { m } ( \mathbf { s } ) \in \mathbb { R } ^ { d }$ and $\phi _ { \Sigma } ( \mathbf { s } ) \in \mathbb { R } ^ { d }$ , and sample $\phi ( \mathbf { s } )$ randomly from a Gaussian centered at $\phi _ { m }$ with covariance $\phi _ { \Sigma }$ , i.e., $\phi ( \mathbf { s } ) \sim \mathcal { N } ( \phi _ { m } ( \mathbf { s } ) , \mathrm { d i a g } ( \phi _ { \Sigma } ( \mathbf { s } ) )$ . VIB then regularizes $\mathcal { N } ( \phi _ { m } ( \mathbf { s } ) , \mathrm { d i a g } ( \phi _ { \Sigma } ( \mathbf { s } ) )$ to be close to a prior distribution, $\mathcal { N } ( 0 , \mathbb { I } )$ : + +$$ +\operatorname* { m i n } _ { \theta } \ \mathcal { L } _ { \mathrm { C Q L } } ( \theta ) + \beta \mathbb { E } _ { \mathrm { s } \sim \mathcal { D } } \left[ \mathrm { D } _ { \mathrm { K L } } \left( \mathcal { N } ( \phi _ { m } ( \mathbf { s } ) , \mathrm { d i a g } ( \phi _ { \Sigma } ( \mathbf { s } ) ) ) \ | | \mathcal { N } ( 0 , \mathbb { I } ) \right) \right] \quad ( \mathrm { V I B ~ r e g u l a r i z e r } ) , +$$ + +Guideline 4.1. To address overfitting, we recommend using some form of capacity-decreasing regularization on the $Q$ -function, such as dropout or the VIB regularizer shown in Equation 3. + +Capacity-increasing techniques for underfitting. To address underfitting, we need to increase model capacity to improve optimization of the training objective. Analogous to supervised learning, model capacity can be increased by using more expressive neural nets (e.g., ResNets [27], transformers [28]) for representing the learned policy. We use ResNets in our experiments (Figure 10). However, the RL setting presents an additional challenge with capacity: while larger models in principle have more capacity, recent work [29, 21, 22] has shown that utilizing larger networks to represent Q-functions does not always improve its capacity in practice, because TD-based RL methods introduce an β€œimplicit under-parameterization” effect that can result in aliased (i.e., similar) internal representations for different state-action inputs, even for very large neural networks that can express the true Q-function effectively. To address this issue, these works apply a β€œcapacityincreasing” regularizer to Q-function training. For instance, we can use the DR3 regularizer [22], which penalizes the dot product of $\phi ( \mathbf { s } )$ and $\phi ( \mathbf { s } ^ { \prime } )$ for a transition $( \mathbf { s } , \mathbf { a } , \mathbf { s } ^ { \prime } ) \in \mathcal { D }$ , and hence reduces aliasing. This objective is given by: + +$$ +\operatorname* { m i n } _ { \theta } \ \mathcal { L } _ { \mathrm { C Q L } } ( \theta ) + \beta \mathbb { E } _ { { \mathbf s } , { \mathbf a } , { \mathbf s } ^ { \prime } \sim \mathcal { D } } \left[ \left| \phi ( { \mathbf s } ) ^ { \top } \phi ( { \mathbf s } ^ { \prime } ) \right| \right] \qquad ( { \mathrm { D R 3 ~ r e g u l a r i z e r ~ } } [ 2 2 ] ) , +$$ + +Guideline 4.2. To address underfitting, we recommend using some capacity-increasing regularization on the Q-function and the policy either in conjunction or separately. Examples: (1) bigger policy networks (e.g., ResNets), (2) DR3 regularizer on the Q-network. + +# 5 Evaluation of Our Workflow Metrics and Protocols in Simulation + +Next, we empirically validate the workflow proposed in Sections 3 and 4 on a suite of simulated robotic manipulation domains that mimic real-robot scenarios, from image observations with sparse binary rewards. We will examine how applying the workflow in Section 3 to detect overfitting or underfitting and then utilizing the strategies in Section 4 affects the performance of offline RL methods. An improved performance would indicate the efficacy of our workflow in making successful design decisions without any online tuning. + +![](images/feadf98282550c14dd6939d9b1aaebdb526316856a1034070ef48af1c82a1480.jpg) + +Experimental setup. We use the environments from Singh et al. [3] to design offline RL tasks and datasets that we use for our empirical analysis. We consider two tasks: (1) a pick and place task and (2) a grasping object from a drawer task. Examples of trajectories in both of these simulated domains are shown in Figure 2 and are detailed in Appendix D. Briefly, the pick and place task consists of a 6-DoF WidowX robot in front of a tray with an object. The goal is to put the object inside the tray. A non-zero reward of $+ 1$ is provided only when the object has been placed in the box. The offline dataset for this task consists of trajectories that grasp an object with a $3 5 \%$ success and other trajectories that place an object with a $40 \%$ success. Our second task is a grasping from drawer task where the WidowX robot is placed in front of a drawer and multiple objects. The robot can open or close the drawer, grasp objects from inside the drawer or on the table, and place them anywhere in the scene. The goal is to close the top drawer, then open the bottom drawer and take the object out. Only if the object has been taken out, a reward of $+ 1$ is obtained. The offline dataset consists of trajectories with a $3 0 { - } 4 0 \%$ success rate for opening and closing a drawer and other trajectories with only $40 \%$ placing success. We use $\alpha = 1 . 0$ for CQL training in all experiments, which is directly taken from prior work [3], without any tuning. However, too low or too high $\alpha$ values will inhibit the effectiveness of regular CQL and we first need to tune $\alpha$ as discussed in Appendix G. More details are provided in Appendix D. + +![](images/7149b62299170ecd7a26a8a110d939daa58337b186bfd1671932f83c2c649963.jpg) +Figure 3: Policy performance (Top) and average dataset Q-values of CQL (bottom) with varying number of trajectories. Vertical bands indicate regions around the peak in average $\mathrm { Q }$ -value and observe that these regions correspond to policies with good actual performance. + +Scenario #1: Variable amount of training data. Our first scenario consists of the simulated tasks discussed above with a variable number of trajectories in the training data (50, 100, 500, 10000). We run CQL and track metrics 3.1 and 3.2 in each case. Observe in Figure 3 (bottom) that with fewer trajectories, the average dataset Q-value $\mathbb { E } _ { \mathbf { s } , \mathbf { a } \sim \mathcal { D } } [ Q _ { \theta } ( \mathbf { s } , \mathbf { a } ) ]$ first rises, and then drops. This matches the description of overfitting in Section 3. Observe in Figure 4 (left) that, at the same time, the value of the CQL regularizer is very low, which is not consistent with what we expect of underfitting. Thus, we can conclude that these conditions exhibit overfitting, especially with 50 and 100 trajectories. The vertical dashed lines indicate the checkpoints that would be selected for evaluation per Guideline 3.1. We further visualize the performance of the chosen checkpoints against the actual return of each intermediate policy in Figure 3 (top). Note that this value is obtained by rolling out the learned policy, and would not be available in a realistic offline RL setting, but is provided only for analysis. Selecting the checkpoint based on Guideline 3.1 leads us to select a model with close to the peak performance over the training process, validating the efficacy of Guideline 3.1. + +Since we detected overfitting by following our workflow, we now aim to address it by using the VIB regularizer in the setting with 100 trajectories. As shown in Figure 4 (right), applying this regularizer not only alleviates the drop in Q-values after many training steps, but allows us to pick later checkpoints in training which perform better than base CQL on both the tasks. This validates that overfitting, as detected via our workflow, can be effectively mitigated by decreasing capacity, in this case by using VIB. We evaluate dropout, $\ell _ { 1 }$ and $\ell _ { 2 }$ regularization schemes in Appendix J. + +![](images/c8b831ad68078d018a8733761f0da0593a9224d909869ecb852f0ffd8ee06f24.jpg) +Figure 4: Left: CQL regularizer attains low values, especially with 50 and 100 trajectories in the pick and place task, Right: Using VIB mitigates overfitting, giving rise to a stable trend in $\mathrm { Q }$ -values and better performance which does not degrade with more training steps. + +Scenario #2: Multiple training objects. Our second test scenario consists of the pick and place task, modified to include a variable number of object types (1, 5, 10, 20, 35). Handling more objects requires higher capacity, since each object has a different shape and appearance. In each case, CQL is provided with 5000 trajectories. Following our workflow from Section 3, we first compute the average dataset Q-value and the training TD error. We observe in Figure 5 that, unlike in Scenario #1, Q-values do not generally decrease when trained for many steps, suggesting that the Q-function is likely not overfitting. To check for underfitting, we visualize the training TD error and find that, with 10, 20 and 35 objects, TD error magnitudes are in the range of [1.0, 2.0], which suggests a overall Q-value error of [30.0, 60.0] since the task horizon is 30. On an absolute scale, this error magnitude is large: since the rewards are $_ { 0 / 1 }$ , the range of difference between actual Q-values for any two policies is at most 30, which suggests that the error magnitude in the runs in Figure 5 are high. Hence, we conclude that this scenario generally exhibits underfitting with more objects. Indeed this trend is reflected in the policy performance that we plot for analysis in Figure 5: note that the policy return decreases with an increased number of objects, and the policy performance initially increases and saturates at a suboptimal value. + +![](images/abb86c89607619cd53397449a92fc47f01701017a73f7df2599b8c654f14d0ca.jpg) +Figure 5: Performance (left), TD error (middle) and average dataset Qvalues (right) for the pick and place task with a variable number of objects. Note that while the learned Q-values increase and stabilize, the TD error values in scenarios with more than 10 objects are large (1.0-2.0). Correspondingly, the performance generally decreases as the number of objects increases. + +![](images/b4ce278afd05bdaaa57ddb722d6767ecad2d352a23720d7d7f144183cc608616.jpg) +Figure 6: Correcting underfitting by applying our workflow for 35 objects. + +To address underfitting in the multi-object case, we apply the proposed capacity-increasing measures to the 35-object task (results for 10 and 20 object settings are in Appendix I). We use a more expressive ResNet architecture for the policy and the DR3 regularizer for the Q-function together. Observe in the figure on the right that this combination (shown in red) improves policy performance in this setting (compared to green), which validates our workflow protocol for addressing underfitting. + +# 6 Tuning CQL for Real-World Robotic Manipulation + +Having evaluated the efficacy of our proposed workflow in simulation, we now utilize our workflow to tune CQL for real-world robotic manipulation. We test in two setups that require the robot to learn from sparse binary rewards and image observations. The settings differ in robot platform, task specification, and dataset size. Additional results and robot videos are at the following website: https://sites. google.com/view/offline-rl-workflow + +Sawyer manipulation tasks [30]. First, we train a + +![](images/566e1138148ac542d1676690513ab2bab4af3cbfec1257dc70a864019d00681c.jpg) +Figure 7: Real-world tasks. Successful rollouts of CQL tuned with our workflow from Sections 3 & 4. Top to bottom: Sawyer lid on pot, Sawyer drawer opening, WidowX pick-place task. + +Sawyer robot in a tabletop setting to perform two tasks: (1) placing the lid onto a pot and (2) opening a drawer. The robot must perform these tasks in the presence of visual distractor objects, as shown in Figure 7. We directly use the dataset of 100 trajectories for each task collected by Khazatsky et al. [30] for our experiments so as to mimic the real-world use case of leveraging existing data with offline RL. We use four-dimensional actions with 3D end-effector velocity control in xyz-space and 1D gripper open/close action. More details regarding the setup are provided in Appendix D. + +We run default CQL on these tasks and track the average Q-value, TD error, and CQL regularizer value. As shown in Figure 8, the average Q-value does not decrease over training, and the TD error (and CQL regularizer shown in Appendix E.2) is large. Per our discussion in Section 3, this indicates underfitting. Following our guidelines from Section 4, we utilize a more expressive ResNet policy (Figure 10), which increases the number of total convolutional layers from 3 to 9. We observe that this reduces the values of both the TD error Figure 8 and CQL regularizer (Appendix E.2) on both tasks. We + +![](images/d5317090e1ad32bc153c1daf1e4d3802dfab85d171f13b7f15890473be1358cc.jpg) +Figure 8: Average Q-value and TD error on Sawyer tasks as model capacity increases. Q-values increase over training with lower capacity ruling out overfitting and increasing model capacity leads to a reduction in TD error indicating the presence of underfitting. + +then evaluate the learned policy over 12 trials conducted with different sets of distractor objects, including ones that are unseen during training. While the policy trained using base CQL is unable to successfully complete either task even once attaining a score of 0/12 on both tasks, the run that uses ResNet attains a significantly better success rate of 9/12 on the put lid on pot task and 8/12 on the drawer opening task, equal to $7 0 . 8 \%$ success rate on average. + +![](images/b9dc053e0a74e8660fec33a5ef35e25d9ae5b1e03b4824f6ff4428e483bb79a7.jpg) +Figure 9: Q-values (left) and performance of CQL with (middle) and without (right) the variational information bottleneck correction for overfitting on the real-world widowX pick and place task. Since the Q-values start to decrease with more training, our workflow detects that CQL is overfitting. Using our policy selection guideline (Guideline 3.1) enables us to choose checkpoint 50 marked with the green vertical dashed line (right) which performs well. Further, addressing overfitting by applying the VIB regularizer stabilizes the Q-values (brown) which do not decrease unlike base CQL (blue) (left). Finally, applying the VIB regularizer improves performance and reduces sensitivity to policy selection (middle). + +WidowX pick and place task. In our second setting, we tune CQL on a pick and place task with a WidowX 250 robotic arm, shown in Figure 7. The dataset consists of 200 trajectories collected by running a noisy scripted policy (Appendix D) with $3 5 \%$ success. We run CQL on this task and track the average Q-values, which we find initially increase and then decrease (Figure 9 (left; labeled as β€œQ-values”)), indicating overfitting. We then evaluate our policy selection scheme, which in this case suggests deploying checkpoint 50, the immediate checkpoint after the peak in Q-values. To see if this checkpoint is effective, we evaluate the performance of a few other policy checkpoints (for analysis only) and plot this performance trend in Figure 9 (right) as a dashed line. Observe that indeed the checkpoint found by our workflow attains the highest success rate (7/9) compared to other checkpoints, which only succeed $\leq 4 / 9$ times. + +Since overfitting is detected, we now turn to addressing overfitting by adding the VIB regularizer (Equation 3) during training. As shown in Figure 9 (left), the Q-values obtained after the addition of this regularizer (shown in brown; labeled β€œQ-values (VIB)”) are now stable and do not decrease over the course of training and so we can choose any policy for evaluation. We evaluate multiple policies, for visualization pur + +
Real-world WidowX pick and place
MethodEpoch5075100200
CQL7/94/94/92/9
CQL + VIB3/98/97/97/9
+ +Table 2: Performance of various policy checkpoints of CQL and $\mathrm { C Q L + V I B }$ on the real WidowX pick and place task (bold entry denotes the checkpoint selected by our workflow). Note that when overfitting is corrected via VIB, multiple checkpoints perform well. + +poses only, in Figure 9 (middle), we find that all of them attain a $\geq 7 / 9$ success, comparable or better than the base CQL algorithm (Figure 9 (right)). This indicates that addressing overfitting not only leads to some gains in performance but also greatly simplifies policy selection as all checkpoints perform similarly and well. Table 2 summarizes these results below, where the bold entries denote the checkpoints found by our policy selection rule. These results indicate the effectiveness of our workflow in tuning CQL by addressing overfitting and underfitting on multiple real robot platforms. + +# 7 Discussion + +While offline RL algorithms have improved significantly, applying these methods to real-world robotic domains is still challenging due to little guidance on tuning them. In this paper, we devise a workflow for algorithms such as CQL and BRAC, which consists of a set of metrics and conditions that can be tracked by a practitioner over the course of offline training to detect overfitting and underfitting, and recommendations to addresses the observed challenges. Applying our workflow both in simulation and the real world shows strong performance benefits. While our proposed workflow is an initial step towards practical robotic offline RL and is based on our best conceptual understanding of certain offline RL algorithms, these guidelines are heuristic. To some extent this is unavoidable, since a workflow is a set of guidelines and recommendations, rather than a rigid algorithm. Regardless of how theoretically justified it is, in the end, its value is determined by its ability to produce good results. We believe the breadth of tasks considered, which consist of two different real robots and multiple simulated tasks, indicates its broad applicability. However, deriving theoretical guarantees regarding workflows of this type is an important direction for future research. + +# Acknowledgements + +We thank Ilya Kostrikov, Avi Singh, Ashvin Nair, Alexander Khazatsky, Albert Yu, Jedrzej Orbik, and Jonathan Yang for their help with setting up and debugging various aspects of the experimental setup as well as for providing us with offline datasets we could test our workflow on. We thank Dibya Ghosh, anonymous reviewers, and the area chair from CoRL for constructive feedback on an earlier version of this paper. AK thanks George Tucker and Rishabh Agarwal for valuable discussions. This research was funded by the DARPA Assued Autonomy Program and compute support from Google and Microsoft Azure. + +# References + +[1] D. Kalashnikov, J. Varley, Y. Chebotar, B. Swanson, R. Jonschkowski, C. Finn, S. Levine, and K. Hausman. Mt-opt: Continuous multi-task robotic reinforcement learning at scale. arXiv preprint arXiv:2104.08212, 2021. +[2] A. Kumar, A. Zhou, G. Tucker, and S. Levine. Conservative q-learning for offline reinforcement learning. arXiv preprint arXiv:2006.04779, 2020. +[3] A. Singh, A. Yu, J. Yang, J. Zhang, A. Kumar, and S. Levine. Cog: Connecting new skills to past experience with offline reinforcement learning. arXiv preprint arXiv:2010.14500, 2020. +[4] Y. Chebotar, K. Hausman, Y. Lu, T. Xiao, D. Kalashnikov, J. Varley, A. Irpan, B. Eysenbach, R. Julian, C. Finn, and S. Levine. Actionable models: Unsupervised offline reinforcement learning of robotic skills. arXiv preprint arXiv:2104.07749, 2021. +[5] D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan, V. Vanhoucke, et al. Scalable deep reinforcement learning for vision-based robotic manipulation. In Conference on Robot Learning, pages 651–673. PMLR, 2018. +[6] D. Precup. Eligibility traces for off-policy policy evaluation. Computer Science Department Faculty Publication Series, page 80, 2000. +[7] I. Kostrikov and O. Nachum. Statistical bootstrapping for uncertainty estimation in off-policy evaluation. arXiv preprint arXiv:2007.13609, 2020. +[8] C. Paduraru. Off-policy evaluation in Markov decision processes. PhD thesis, Ph. D. Dissertation. McGill University, 2012. +[9] T. L. Paine, C. Paduraru, A. Michi, C. Gulcehre, K. Zolna, A. Novikov, Z. Wang, and N. de Freitas. Hyperparameter selection for offline reinforcement learning. arXiv preprint arXiv:2007.09055, 2020. +[10] O. Nachum and B. Dai. Reinforcement learning via fenchel-rockafellar duality. arXiv preprint arXiv:2001.01866, 2020. +[11] P. Thomas, G. Theocharous, and M. Ghavamzadeh. High confidence policy improvement. In International Conference on Machine Learning, pages 2380–2388, 2015. +[12] P. S. Thomas, G. Theocharous, and M. Ghavamzadeh. High-confidence off-policy evaluation. In Twenty-Ninth AAAI Conference on Artificial Intelligence, 2015. +[13] N. Jiang and L. Li. Doubly robust off-policy value evaluation for reinforcement learning. arXiv preprint arXiv:1511.03722, 2015. +[14] J. Fu, M. Norouzi, O. Nachum, G. Tucker, ziyu wang, A. Novikov, M. Yang, M. R. Zhang, Y. Chen, A. Kumar, C. Paduraru, S. Levine, and T. Paine. Benchmarks for deep off-policy evaluation. In International Conference on Learning Representations, 2021. URL https: //openreview.net/forum?id=kWSeGEeHvF8. +[15] S. Levine, A. Kumar, G. Tucker, and J. Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020. +[16] Y. Wu, G. Tucker, and O. Nachum. Behavior regularized offline reinforcement learning. arXiv preprint arXiv:1911.11361, 2019. +[17] T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015. +[18] S. Fujimoto, H. Van Hoof, and D. Meger. Addressing function approximation error in actorcritic methods. arXiv preprint arXiv:1802.09477, 2018. +[19] T. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018. +[20] I. Kostrikov, J. Tompson, R. Fergus, and O. Nachum. Offline reinforcement learning with fisher divergence critic regularization. arXiv preprint arXiv:2103.08050, 2021. +[21] A. Kumar, R. Agarwal, D. Ghosh, and S. Levine. Implicit under-parameterization inhibits data-efficient deep reinforcement learning. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=O9bnihsFfXU. +[22] A. Kumar, R. Agarwal, A. Courville, T. Ma, G. Tucker, and S. Levine. Value-based deep reinforcement learning requires explicit regularization. In RL for Real Life Workshop & Overparameterization: Pitfalls and Opportunities Workshop, ICML, 2021. URL https: //drive.google.com/file/d/1Fg43H5oagQp-ksjpWBf_aDYEzAFMVJm6/view. +[23] R. Munos. Error bounds for approximate policy iteration. In Proceedings of the Twentieth International Conference on International Conference on Machine Learning, ICML’03, page 560–567. AAAI Press, 2003. ISBN 1577351894. +[24] N. Srivastava, G. Hinton, A. Krizhevsky, I. Sutskever, and R. Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15 (1):1929–1958, 2014. +[25] A. A. Alemi, I. Fischer, J. V. Dillon, and K. Murphy. Deep variational information bottleneck. arXiv preprint arXiv:1612.00410, 2016. +[26] A. Achille and S. Soatto. Emergence of invariance and disentanglement in deep representations. The Journal of Machine Learning Research, 19(1):1947–1980, 2018. +[27] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770–778, 2016. +[28] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin. Attention is all you need. arXiv preprint arXiv:1706.03762, 2017. +[29] D. Ghosh and M. G. Bellemare. Representations for stable off-policy reinforcement learning. arXiv preprint arXiv:2007.05520, 2020. +[30] A. Khazatsky, A. Nair, D. Jing, and S. Levine. What can i do here? learning new skills by imagining visual affordances. arXiv preprint arXiv:2106.00671, 2021. +[31] D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan, V. Vanhoucke, et al. Scalable deep reinforcement learning for vision-based robotic manipulation. In Conference on Robot Learning, pages 651–673, 2018. +[32] A. Zeng, S. Song, S. Welker, J. Lee, A. Rodriguez, and T. Funkhouser. Learning synergies between pushing and grasping with self-supervised deep reinforcement learning. 2018. +[33] OpenAI. Learning dexterous in-hand manipulation. In arXiv preprint arXiv:1808.00177, 2018. +[34] H. van Hoof, T. Hermans, G. Neumann, and J. Peters. Learning robot in-hand manipulation with tactile features. 2015. +[35] A. Rajeswaran, V. Kumar, A. Gupta, G. Vezzani, J. Schulman, E. Todorov, and S. Levine. Learning complex dexterous manipulation with deep reinforcement learning and demonstrations. In RSS, 2018. +[36] V. Kumar, A. Gupta, E. Todorov, and S. Levine. Learning dexterous manipulation policies from experience and imitation. CoRR, abs/1611.05095, 2016. +[37] C. Schenck and D. Fox. Visual closed-loop control for pouring liquids. In International Conference on Robotics and Automation (ICRA), 2017. +[38] A. Yahya, A. Li, M. Kalakrishnan, Y. Chebotar, and S. Levine. Collective robot reinforcement learning with distributed asynchronous guided policy search. In IROS, 2017. +[39] J. Matas, S. James, and A. J. Davison. Sim-to-real reinforcement learning for deformable object manipulation. In Conference on Robot Learning (CoRL), 2018. +[40] R. Julian, B. Swanson, G. S. Sukhatme, S. Levine, C. Finn, and K. Hausman. Efficient adaptation for end-to-end vision-based robotic manipulation. arXiv arXiv:2004.10190, 2020. +[41] S. Cabi, S. G. Colmenarejo, A. Novikov, K. Konyushkova, S. Reed, R. Jeong, K. ZoΕ‚na, Y. Ay- Λ™ tar, D. Budden, M. Vecerik, et al. A framework for data-driven robotics. arXiv preprint arXiv:1909.12200, 2019. +[42] C. Finn and S. Levine. Deep visual foresight for planning robot motion. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 2786–2793. IEEE, 2017. +[43] F. Ebert, C. Finn, S. Dasari, A. Xie, A. Lee, and S. Levine. Visual foresight: Model-based deep reinforcement learning for vision-based robotic control. arXiv preprint arXiv:1812.00568, 2018. +[44] A. Xie, F. Ebert, S. Levine, and C. Finn. Improvisation through physical understanding: Using novel objects as tools with visual foresight. Robotics: Science and Systems (RSS), 2019. +[45] Y. Hristov, A. Lascarides, and S. Ramamoorthy. Interpretable latent spaces for learning from demonstration. arXiv preprint arXiv:1807.06583, 2018. +[46] S. Tian, S. Nair, F. Ebert, S. Dasari, B. Eysenbach, C. Finn, and S. Levine. Model-based visual planning with self-supervised functional distances. arXiv preprint arXiv:2012.15373, 2020. +[47] S. Young, D. Gandhi, S. Tulsiani, A. Gupta, P. Abbeel, and L. Pinto. Visual imitation made easy. arXiv e-prints, pages arXiv–2008, 2020. +[48] E. Johns. Coarse-to-fine imitation learning: Robot manipulation from a single demonstration. arXiv preprint arXiv:2105.06411, 2021. +[49] A. Mandlekar, F. Ramos, B. Boots, S. Savarese, L. Fei-Fei, A. Garg, and D. Fox. Iris: Implicit reinforcement without interaction at scale for learning control from offline robot manipulation data. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 4414–4420. IEEE, 2020. +[50] A. Mandlekar, D. Xu, R. MartΒ΄Δ±n-MartΒ΄Δ±n, S. Savarese, and L. Fei-Fei. Learning to generalize across long-horizon tasks from human demonstrations, 2020. +[51] S. Lange, T. Gabel, and M. Riedmiller. Batch reinforcement learning. In Reinforcement learning, pages 45–73. Springer, 2012. +[52] S. Fujimoto, D. Meger, and D. Precup. Off-policy deep reinforcement learning without exploration. arXiv preprint arXiv:1812.02900, 2018. +[53] A. Kumar, J. Fu, M. Soh, G. Tucker, and S. Levine. Stabilizing off-policy q-learning via bootstrapping error reduction. In Advances in Neural Information Processing Systems, pages 11761–11771, 2019. +[54] X. B. Peng, A. Kumar, G. Zhang, and S. Levine. Advantage-weighted regression: Simple and scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019. +[55] N. Jaques, A. Ghandeharioun, J. H. Shen, C. Ferguson, A. Lapedriza, N. Jones, S. Gu, and R. Picard. Way off-policy batch deep reinforcement learning of implicit human preferences in dialog. arXiv preprint arXiv:1907.00456, 2019. +[56] A. Nair, M. Dalal, A. Gupta, and S. Levine. Accelerating online reinforcement learning with offline datasets. arXiv preprint arXiv:2006.09359, 2020. +[57] R. Fakoor, J. Mueller, P. Chaudhari, and A. J. Smola. Continuous doubly constrained batch reinforcement learning. arXiv preprint arXiv:2102.09225, 2021. +[58] T. Yu, G. Thomas, L. Yu, S. Ermon, J. Zou, S. Levine, C. Finn, and T. Ma. Mopo: Model-based offline policy optimization. arXiv preprint arXiv:2005.13239, 2020. +[59] R. Kidambi, A. Rajeswaran, P. Netrapalli, and T. Joachims. Morel: Model-based offline reinforcement learning. arXiv preprint arXiv:2005.05951, 2020. +[60] R. Rafailov, T. Yu, A. Rajeswaran, and C. Finn. Offline reinforcement learning from images with latent space models. Learning for Decision Making and Control (L4DC), 2021. +[61] D. Precup, R. S. Sutton, and S. Dasgupta. Off-policy temporal-difference learning with function approximation. In ICML, pages 417–424, 2001. +[62] C. Voloshin, H. M. Le, N. Jiang, and Y. Yue. Empirical study of off-policy policy evaluation for reinforcement learning. arXiv preprint arXiv:1911.06854, 2019. +[63] O. Nachum, Y. Chow, B. Dai, and L. Li. Dualdice: Behavior-agnostic estimation of discounted stationary distribution corrections. In Advances in Neural Information Processing Systems, pages 2315–2325, 2019. +[64] R. Qin, S. Gao, X. Zhang, Z. Xu, S. Huang, Z. Li, W. Zhang, and Y. Yu. Neorl: A near real-world benchmark for offline reinforcement learning. arXiv preprint arXiv:2102.00714, 2021. +[65] T. Haarnoja, H. Tang, P. Abbeel, and S. Levine. Reinforcement learning with deep energybased policies. In International Conference on Machine Learning (ICML), 2017. +[66] X. B. Peng, A. Kumar, G. Zhang, and S. Levine. Advantage-weighted regression: Simple and scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019. +[67] S. Fujimoto and S. S. Gu. A minimalist approach to offline reinforcement learning. arXiv preprint arXiv:2106.06860, 2021. \ No newline at end of file diff --git a/parse/train/fy4ZBWxYbIo/fy4ZBWxYbIo_content_list.json b/parse/train/fy4ZBWxYbIo/fy4ZBWxYbIo_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7f0ddff53501f93458797a02e46e5c453ffcaa63 --- /dev/null +++ b/parse/train/fy4ZBWxYbIo/fy4ZBWxYbIo_content_list.json @@ -0,0 +1,1056 @@ +[ + { + "type": "text", + "text": "A Workflow for Offline Model-Free Robotic Reinforcement Learning ", + "text_level": 1, + "bbox": [ + 235, + 102, + 763, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Aviral Kumar?,1, Anikait Singh?,1, Stephen $\\mathbf { T i a n } ^ { 1 }$ , Chelsea $\\mathbf { F i n n ^ { 2 } }$ , Sergey Levine1 1 UC Berkeley, 2 Stanford University (βˆ— Equal Contribution) aviralk@berkeley.edu, asap7772@berkeley.edu ", + "bbox": [ + 218, + 176, + 781, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract: Offline reinforcement learning (RL) enables learning control policies by utilizing only prior experience, without any online interaction. This can allow robots to acquire generalizable skills from large and diverse datasets, without any costly or unsafe online data collection. Despite recent algorithmic advances in offline RL, applying these methods to real-world problems has proven challenging. Although offline RL methods can learn from prior data, there is no clear and well-understood process for making various design choices, from model architecture to algorithm hyperparameters, without actually evaluating the learned policies online. In this paper, our aim is to develop a practical workflow for using offline RL analogous to the relatively well-understood workflows for supervised learning problems. To this end, we devise a set of metrics and conditions that can be tracked over the course of offline training, and can inform the practitioner about how the algorithm and model architecture should be adjusted to improve final performance. Our workflow is derived from a conceptual understanding of the behavior of conservative offline RL algorithms and cross-validation in supervised learning. We demonstrate the efficacy of this workflow in producing effective policies without any online tuning, both in several simulated robotic learning scenarios and for three tasks on two distinct real robots, focusing on learning manipulation skills with raw image observations with sparse binary rewards. Explanatory video and additional content can be found at sites.google.com/view/offline-rl-workflow. ", + "bbox": [ + 232, + 261, + 764, + 537 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: workflow, offline RL, offline tuning ", + "bbox": [ + 235, + 549, + 547, + 563 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction ", + "text_level": 1, + "bbox": [ + 176, + 580, + 312, + 595 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Offline reinforcement learning (RL) can in principle make it possible to convert existing large datasets of robotic experience into effective policies, without the need for costly or dangerous online interaction for each training run. While offline RL algorithms have improved significantly [1, 2, 3, 4, 5], applying such methods to real-world robotic control problems presents a number of major challenges. In standard online RL, any intermediate policy found during training is executed in the environment to collect more experience, which naturally allows for an evaluation of the policy performance. This ability to evaluate intermediate policies lets practitioners use β€œbrute", + "bbox": [ + 174, + 602, + 482, + 808 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/93b250387a2633b12ccbb56017457a77edb5b8bb68d22310145c16c8258e9664.jpg", + "image_caption": [ + "Figure 1: Our proposed workflow aims to detect overfitting and underfitting, and provides guidelines for addressing these issues via policy selection, regularization, and architecture design. We evaluate this workflow on two real-world robotic systems and simulation domains, and we find it to be effective. " + ], + "image_footnote": [], + "bbox": [ + 501, + 582, + 808, + 720 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "force” to evaluate the effects of various design factors, such as model capacity and expressivity, the number of training steps, and so forth, and facilitates comparatively straightforward tuning. In contrast, offline RL methods do not have access to real-world on-policy rollouts for evaluating the learned policy. Thus, in order for these methods to be truly practical for real-world applications, we not only require effective algorithms, but also an effective workflow: a set of protocols and metrics that can be used to reliably and consistently adjust model capacity, regularization, etc in offline RL to obtain policies with good performance, without requiring real-world rollouts for tuning. ", + "bbox": [ + 174, + 808, + 825, + 904 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A number of prior works have studied model selection in offline RL by utilizing off-policy evaluation (OPE) methods [6] to estimate policy performance. These methods can be based either on model or value learning [7, 8, 9, 10] or importance sampling [6, 11, 12, 13]. However, developing reliable OPE methods is itself an open problem, and modern OPE methods themselves suffer from hyperparameter selection challenges (see Fu et al. [14] for an empirical study). Moreover, accurate off-policy evaluation is likely not necessary to simply tune algorithms for best performance – we do not need a precise estimate of how good our policy is, but rather a workflow that enables us to best improve it by adjusting various algorithm hyperparameters. ", + "bbox": [ + 173, + 90, + 825, + 203 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we devise a practical workflow for selecting regularizers, model architectures, and policy checkpoints for offline RL methods in robotic learning settings. We focus on a specific class of conservative offline RL algorithms [15, 2] that regularize the Q-function, but also show that our workflow can be effectively applied to policy constraint methods [16]. Our aim is not to focus on complete off-policy evaluation or to devise a new approach for off-policy evaluation, but rather to adopt a strategy similar to the one in supervised learning. Analogously to how supervised learning practitioners can detect overfitting and underfitting by tracking training and validation losses, and then adjust hyperparameters based on these metrics, our workflow (see Figure 1 for a schematic) first defines and characterizes overfitting and underfitting, proposes metrics and conditions that users can track to determine if an offline RL exhibits overfitting or underfitting, and then utilizes these metrics to inform design decisions pertaining to neural net architectures, regularization, and early stopping. This protocol is intended to act as a β€œuser’s manual” for a practitioner, with guidelines for how to modify algorithm parameters for best results without real-world evaluation rollouts. ", + "bbox": [ + 174, + 208, + 825, + 388 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The primary contribution of this paper is a simple yet effective workflow for robotic offline RL. We propose metrics and protocols to assist practitioners in selecting policy checkpoints, regularization parameters, and model architectures for conservative offline RL algorithms such as CQL [2] and BRAC [16]. We empirically verify the efficacy of our proposed workflow on simulated robotic manipulation problems as well as three real-world robotic manipulation problems on two different robots, with diverse objects, pixel observations, and sparse binary reward supervision. Experimentally, we evaluate our method on two real-world robots (the Sawyer and WidowX robots), and one realistic simulated tasks. Our approach is effective in all of these cases, and on two tasks with the Sawyer robot that initially fail completely, our workflow improves the success rate to $70 \\%$ . ", + "bbox": [ + 173, + 393, + 825, + 520 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Preliminaries, Background, and Definitions ", + "text_level": 1, + "bbox": [ + 173, + 525, + 568, + 541 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The goal in RL is to optimize the infinite horizon discounted return $\\begin{array} { r } { R = \\sum _ { t = 0 } ^ { \\infty } \\gamma ^ { t } r ( \\mathbf { s } _ { t } , \\mathbf { a } _ { t } ) } \\end{array}$ , where $r ( s , a )$ represents the reward function evaluated at a state-action pair $( \\mathbf { s } , \\mathbf { a } )$ . We operate in the offline RL setting and are provided with a fixed dataset $\\mathcal { D } = \\{ ( \\mathbf { s } , \\mathbf { a } , r ( \\mathbf { s } , \\mathbf { a } ) , \\mathbf { s } ^ { \\prime } ) \\}$ , consisting of transition tuples obtained from rollouts under a behavior policy $\\pi _ { \\beta } ( \\mathbf { a } | \\mathbf { s } )$ . Our goal is to obtain the best possible policy by only training on this fixed offline dataset $\\mathcal { D }$ , with no access to online rollouts. We focus on conservative offline RL algorithms that modify the $\\mathrm { Q }$ -function to penalize distributional shift, with most experiments on CQL [2], though we also adapt our workflow to BRAC [16] in Appendix F.1. ", + "bbox": [ + 173, + 542, + 825, + 640 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Conservative Q-learning (CQL). The actor-critic formulation of CQL trains a Q-function $Q _ { \\boldsymbol { \\theta } } ( \\mathbf { s } , \\mathbf { a } )$ with a separate policy $\\pi _ { \\phi } ( \\mathbf { a } | \\mathbf { s } )$ , which maximizes the expected $\\mathrm { Q }$ -value $\\begin{array} { r } { \\mathbb { E } _ { \\mathbf { s } \\sim \\mathcal { D } , \\mathbf { a } \\sim \\pi _ { \\phi } } \\left[ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) \\right] } \\end{array}$ like other standard actor-critic deep RL methods [17, 18, 19]. However, in addition to the standard TD error ${ \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta )$ (in blue below), CQL applies a regularizer ${ \\mathcal { R } } ( \\theta )$ (in red below) to prevent overestimation of $\\mathrm { Q }$ -values for out-of-distribution (OOD) actions. This term minimizes the $\\mathrm { Q }$ -values under a distribution $\\mu ( \\mathbf { a } | \\mathbf { s } )$ , which is automatically chosen to pick actions a with high Q-values $Q _ { \\boldsymbol { \\theta } } ( \\mathbf { s } , \\mathbf { a } )$ , and counterbalances this term by maximizing the values of the actions in the dataset: ", + "bbox": [ + 173, + 645, + 825, + 742 + ], + "page_idx": 1 + }, + { + "type": "equation", + "img_path": "images/14481712837aa8b4041c0afbae4d0a62161a885580f62a118ffd01fc68c921ec.jpg", + "text": "$$\n\\begin{array} { r l } { \\underset { \\theta } { \\mathrm { m i n } } \\ : \\ : \\ : } & { { } \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : } \\\\ { \\mathrm { m i n } \\ : \\ : \\ : \\ : \\ : } & { { } \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : } & { \\mathrm { ~ \\ : ~ \\ : \\ : \\ : \\ : } \\ : \\ : \\ : } \\end{array}\n$$", + "text_format": "latex", + "bbox": [ + 163, + 741, + 810, + 770 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $B ^ { \\pi } \\bar { Q } ( \\mathbf { s } , \\mathbf { a } )$ is the Bellman backup operator with a delayed target Q-function, $\\bar { Q }$ : $B ^ { \\pi } \\bar { Q } ( { \\bf s } , { \\bf a } ) : =$ $r ( \\mathbf { s } , \\mathbf { a } ) + \\gamma \\mathbb { E } _ { \\mathbf { a ^ { \\prime } } \\sim \\pi ( \\mathbf { a ^ { \\prime } } | \\mathbf { s ^ { \\prime } } ) } [ \\bar { Q } ( \\mathbf { s ^ { \\prime } } , \\mathbf { a ^ { \\prime } } ) ]$ . In practice, CQL computes $\\mu ( \\mathbf { a } | \\mathbf { s } )$ using actions sampled from the policy $\\pi _ { \\phi } ( \\mathbf { a } | \\mathbf { s } )$ . More discussion of CQL is in Appendix B. In this paper, we will utilize CQL as a base algorithm that our workflow intends to tune, but we also extend it to BRAC. ", + "bbox": [ + 173, + 777, + 825, + 835 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Overfitting and underfitting in CQL. Conservative offline RL algorithms [2, 20] like CQL can be sensitive to design choices, including number of gradient steps for training [21, 22] and network capacity. These challenges are also present in supervised learning, but supervised learning methods benefit from a simple and powerful workflow that involves using training error and validation error to characterize overfitting and underfitting. A practitioner can then make tuning choices based on these characterizations. To derive an analogous workflow for offline RL, we first ask: what do overfitting and underfitting actually mean for the case of conservative offline RL? ", + "bbox": [ + 174, + 842, + 825, + 911 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "", + "bbox": [ + 171, + 90, + 823, + 119 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To define overfitting and underfitting generically for any conservative offline RL method, we consider an abstract optimization formulation for such methods [2]: ", + "bbox": [ + 174, + 126, + 823, + 154 + ], + "page_idx": 2 + }, + { + "type": "equation", + "img_path": "images/5f87e2d9edb1ebc744585eaf9bbbefae1189640155a200a885d288ec1f092126.jpg", + "text": "$$\n\\pi ^ { * } : = \\arg \\operatorname* { m a x } _ { \\pi } ~ J _ { \\mathcal { D } } ( \\pi ) - \\alpha D ( \\pi , \\pi _ { \\beta } )\n$$", + "text_format": "latex", + "bbox": [ + 212, + 155, + 464, + 176 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "(Conservative offline RL). ", + "bbox": [ + 504, + 156, + 676, + 171 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$J _ { \\mathcal { D } } ( \\pi )$ denotes the average return of policy $\\pi$ in the empirical MDP induced by the transitions in the offline dataset $\\mathcal { D }$ , and $D ( \\pi , \\pi _ { \\beta } )$ denotes a closeness constraint to the behavior policy, effectively applied by the offline RL method. Our definition of conservative offline RL requires that this ", + "bbox": [ + 173, + 189, + 354, + 340 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/1e432827fa691c00d934499d561870f9ab21e128eee2a69b3018b096d8e40450.jpg", + "table_caption": [ + "Table 1: Summary of train error, test error and our definitions of overfitting and underfitting in supervised learning and conservative offline RL methods. We will propose metrics to measure these phenomena in a purely offline manner and recommend how to tune the underlying method accordingly. " + ], + "table_footnote": [], + "table_body": "
QuantitySupervised LearningConservative Offline RL
Test errorLoss L evaluated on test data,DtestPerformance of policy,J(Ο€)
Train errorLoss L evaluated on train data,DtrainObjective in Equations 2,1
OverfittingL(Dtrain) low,L(Dval) high,Dval is a validation set drawn i.i.d.as DtrainTraining objective in Equation l is ex- tremely low,low value of J(Ο€)
Underfittinghigh value of train error L(Dtrain)Training objective in Equation 1 is ex- tremely high,low value of J(Ο€)
", + "bbox": [ + 377, + 193, + 812, + 281 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "divergence be computed in expectation over the state visitation distribution of the learned policy $\\pi$ in the empirical MDP as discussed in Appendix F.1. For example, Equation 1 translates to utilizing $\\begin{array} { r } { D _ { \\mathrm { C Q L } } ( p , \\bar { q } ) : = \\sum _ { \\mathbf { x } } p ( \\mathbf { x } ) ( p ( \\mathbf { x } ) / q ( \\mathbf { x } ) - \\bar { 1 } ) } \\end{array}$ in Equation 2 (see Theorem 3.5 in Kumar et al. [2] for a proof). The training loss is discussed in Equations 1 and 2 and the test loss is equal to the negative of the actual return $J ( \\pi )$ of the learned policy. Analogously to supervised learning, we can use the notion of train and test error to define overfitting and underfitting in offline RL, as discussed in Table 1. However, note that the conditions summarized in Table 1 are not measurable completely offline. Precisely estimating if a run of an offline RL method overfits or underfits requires evaluating the learned policy via interaction with the real-world environment. In Section 3, our goal will be to devise offline metrics for characterizing overfitting that do not have this requirement. We will tailor our study specifically towards CQL, though we extend it to BRAC in Appendix F.1. A similar procedure could be devised for other offline RL methods, but we leave this for future work. ", + "bbox": [ + 173, + 340, + 825, + 507 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Detecting Overfitting and Underfitting in Conservative Offline RL ", + "text_level": 1, + "bbox": [ + 173, + 517, + 754, + 534 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In standard supervised learning, we can determine if a method overfits or underfits by comparing the training loss to the same loss function evaluated on a held-out validation dataset, which serves as a β€œproxy” test dataset. In contrast, the return of the learned policy $J ( \\pi )$ in RL does not have a direct proxy that can be computed offline. Thus, our goal is to identify offline metrics and conditions that allow us to measure overfitting and underfitting in conservative offline RL, with a focus on CQL. We also adapt these conditions to BRAC [16], a policy-constraint method in Appendix F.2. ", + "bbox": [ + 173, + 539, + 825, + 622 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Detecting overfitting in CQL. Our definition of overfitting (Table 1) corresponds to a low value for the training loss (Equation 1), but poor actual policy performance $J ( \\pi )$ . To detect this, we analyze the time series of the estimated Q-values averaged over the dataset samples $( \\mathbf { s } , \\mathbf { a } , r , \\mathbf { s } ^ { \\prime } ) \\in \\mathcal { D }$ over the course of training with a large number of gradient steps. A run is labeled as overfitting if we see that the expected dataset Q-value exhibits a non-monotonic trend: if the average Q-values first increase and then decrease as shown in the figure on the right. Additionally, we would see that training loss in Equation 1 eventually becomes very low. Why do we see such a trend in the average dataset $\\mathbf { Q }$ -value? Since CQL selectively penalizes the average Qvalue under the distribution $\\mu ( \\mathbf { a } | \\mathbf { s } )$ supported on actions with large Q-values, we would expect the Q-values on states from the dataset s $\\sim \\mathcal { D }$ and the learned $\\mathbf { a } \\sim \\pi ( \\cdot | \\mathbf { s } )$ to be small since the policy is trained to maximize the Q-function as well. This in turn would lead to an eventual reduction in the average Q-value on dataset actions, $\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]$ . This would be visible after sufficiently many steps of training, when values have propagated via Bellman backups in Equation 1 giving rise to the non-monotonic trend. If such a trend is observed, this raises two questions, as we discuss next. ", + "bbox": [ + 174, + 628, + 647, + 738 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a834916e9152742e6ff0bc3376e3cee94d030ab397761d3b7ccebf41e7204020.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 636, + 823, + 736 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "", + "bbox": [ + 173, + 739, + 825, + 849 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "What does a low average $\\varrho$ -value $\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]$ imply about $J ( \\pi )$ ? We show in Appendix A that, in principle, CQL training (Equation 1) should never learn Q-values smaller than the dataset Monte-Carlo return, and the $\\mathrm { Q }$ -values should increase unless the learned policy $\\pi$ is better than $\\pi _ { \\beta }$ . Intuitively, this is because the objective in Equation 1 aims to also maximize the average dataset ", + "bbox": [ + 174, + 854, + 825, + 911 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Q-value and thus the Q-values for the behavior policy are not underestimated in expectation. Now, if the policy optimizer finds a policy that attains a smaller learned Q-value than the dataset return, the policy can always be updated further towards the behavior policy so as to raise the Q-value. Therefore, Q-values can only decrease when the policy found by CQL is better than the behavior policy. We formalize this intuition in Appendix A in Theorem A.1. Thus, a low $\\mathrm { Q }$ -value on $( \\mathbf { s } , \\mathbf { a } ) \\in$ $\\mathcal { D }$ indicates that the Q-function predicts extremely small Q-values on actions sampled from $\\mu ( \\mathbf { a } | \\mathbf { s } )$ . Typically, this would mean the highest Q-value actions a at a state $\\mathbf { s } \\in \\mathcal { D }$ are those sampled from the offline dataset, drawn from the behavior policy. Thus, policy optimization, which aims to maximize the Q-value, would make $\\pi ( \\mathbf { a } | \\mathbf { s } )$ closer to the behavior policy $\\pi _ { \\beta } ( \\mathbf { a } | \\mathbf { s } )$ on $\\mathbf { s } \\in \\mathcal { D }$ . ", + "bbox": [ + 173, + 90, + 825, + 217 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Which training checkpoint is likely to attain the best policy performance? Tracking overfitting in supervised learning is important for selecting the best-performing checkpoint, before overfitting becomes severe. Analogously, we can compare the average dataset Q-value across different checkpoints within the same run to pick the best policy. Since CQL aims to increase the average dataset Q-value (Equation 1), we would expect Q-values to initially increase, until learning starts to overfit and the average dataset $\\mathrm { Q }$ -value starts decreasing. We should therefore select the latest checkpoint that corresponds to a peak in the estimated dataset Q-value. A visual illustration of this idea is shown in the figure on the previous page, where the checkpoint marked by the green line is recommended to be chosen. In summary, (a) to detect overfitting we can track: ", + "bbox": [ + 173, + 222, + 825, + 347 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Metric 3.1 (Overfitting). A low average data $Q$ -value $\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]$ that decreases with more gradient steps on Equation 1 indicates that the offline RL algorithm is overfitting. ", + "bbox": [ + 189, + 358, + 807, + 387 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and (b) further, given a run that exhibits overfitting, our principle for policy selection is given by: ", + "bbox": [ + 173, + 402, + 805, + 417 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Guideline 3.1 (Policy selection). If a run overfits (per Metric 3.1), select the checkpoint that attains the highest average dataset $Q$ -value before overfitting for deployment. ", + "bbox": [ + 183, + 428, + 805, + 458 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Finally, for actor-critic algorithms [18] that update the actor slower than the critic, the next policy checkpoint after the peak in the average dataset Q-value appears must be selected. In most of our experiments, we find that simply utilizing the policy checkpoint at the point of the peak in the Qvalue also leads to good results making this a rare concern, but in some cases, utilizing the next checkpoint after the Q-value peak performs better empirically. ", + "bbox": [ + 173, + 473, + 823, + 542 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Detecting underfitting in CQL. Next, we turn to devising a procedure to detect underfitting. As summarized in Table 1, underfitting occurs when the RL algorithm is unable to minimize the training objective in Equation 1 effectively. Therefore, large values for the TD error, the CQL regularizer, or both imply underfitting. A large value for the CQL regularizer, ${ \\mathcal { R } } ( \\theta )$ , indicates an overestimation of Q-values relative to their true value [2] and thus, unlike the overfitting regime, we would not expect the average learned $\\mathrm { Q }$ -value to decrease with more training. Thus, one approach to predict underfitting is to track both the TD error, ${ \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta )$ , and the CQL regularizer, ${ \\mathcal { R } } ( \\theta )$ , and check if the value of even one of these quantities is large. More discussion is provided in Appendix A. ", + "bbox": [ + 173, + 549, + 648, + 646 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4b1e144e763ab56eff446401eff0f116a6cc810b779d08af3c7a767883b102d8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 547, + 818, + 645 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "", + "bbox": [ + 173, + 647, + 818, + 688 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "How do we determine if the $\\mathbf { \\nabla } ^ { T D }$ error and the CQL regularizer are β€œlarge”? In order to determine if the error of a particular run is large, we can rerun the base CQL algorithm but with models of higher capacity, which does not necessarily correspond to the function approximator size, as we will discuss in Section 4. For each model, we record the corresponding training errors and check if the training TD error and CQL regularizer value are reduced with capacity increase. If increasing capacity leads to a reduction in the loss without exhibiting the overfitting signs described previously, then we are in an underfitting regime. Another approach to answer the question is to utilize the value of the TD error $\\left( \\mathcal { L } _ { \\mathrm { T D } } ( \\theta ) \\right)$ and the task horizon $( 1 / ( 1 - \\gamma ) )$ to estimate the overall error in the learned Q-values against the actual Q-value, which is equal to $\\dot { \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta ) / ( 1 - \\gamma )$ [23] (see Appendix A). If this overall error spans the range of allowed Q-values on the task – which could be inferred based on the structure of the reward function in the task – then we can say that the algorithm is underfitting. ", + "bbox": [ + 173, + 694, + 825, + 847 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Metric 3.2 (Underfitting). Compute the values of the training $T D$ error, ${ \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta )$ and CQL regularizer, ${ \\mathcal { R } } ( \\theta )$ for the current run and another identical run with increased model capacity. If the training errors reduce with increasing model capacity, the original run was underfitting. ", + "bbox": [ + 192, + 857, + 805, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 Addressing Overfitting and Underfitting in Conservative Offline RL ", + "text_level": 1, + "bbox": [ + 173, + 89, + 767, + 107 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The typical workflow for supervised learning not only identifies overfitting and underfitting, but also guides the practitioner how to adjust their method so as to alleviate it (e.g., by modifying regularization or model capacity), thus improving performance. Can we devise similar guidelines to address overfitting and underfitting with conservative offline RL? Here, we discuss some ways to adjust regularization and model capacity to alleviate these phenomena. ", + "bbox": [ + 174, + 108, + 825, + 178 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Capacity-decreasing regularization for overfitting. As we observed in Section 3, the mechanism behind extremely low $\\mathbf { Q }$ -values on the dataset is that CQL training minimizes $\\mathrm { Q }$ -values on actions sampled from $\\mu ( \\mathbf { a } | \\mathbf { s } )$ . Two possible approaches to preventing over-minimization of these values are (1) applying regularization such as dropout [24] on Q-function layers, similar to supervised learning, and (2) enforcing that representations of the learned Q-function match a pre-specified target for all state-action tuples. For (2), we can apply techniques such as a variational information bottleneck (VIB) [25, 26] regularizer on the learned representations, $\\phi ( \\mathbf { s } )$ . Formally, let $( \\mathbf { s } , \\mathbf { a } )$ denote a stateaction pair. Instead of predicting a deterministic $\\phi ( \\mathbf { s } ) \\in \\mathbb { R } ^ { d }$ (Figure 10), we modify the Q-network to predict two distinct vectors, $\\phi _ { m } ( \\mathbf { s } ) \\in \\mathbb { R } ^ { d }$ and $\\phi _ { \\Sigma } ( \\mathbf { s } ) \\in \\mathbb { R } ^ { d }$ , and sample $\\phi ( \\mathbf { s } )$ randomly from a Gaussian centered at $\\phi _ { m }$ with covariance $\\phi _ { \\Sigma }$ , i.e., $\\phi ( \\mathbf { s } ) \\sim \\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } ) , \\mathrm { d i a g } ( \\phi _ { \\Sigma } ( \\mathbf { s } ) )$ . VIB then regularizes $\\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } ) , \\mathrm { d i a g } ( \\phi _ { \\Sigma } ( \\mathbf { s } ) )$ to be close to a prior distribution, $\\mathcal { N } ( 0 , \\mathbb { I } )$ : ", + "bbox": [ + 173, + 183, + 825, + 338 + ], + "page_idx": 4 + }, + { + "type": "equation", + "img_path": "images/9db0d18bcf179c6a8325f93c9a8d0886149e29c38e5c7dbf3d677141313eeab5.jpg", + "text": "$$\n\\operatorname* { m i n } _ { \\theta } \\ \\mathcal { L } _ { \\mathrm { C Q L } } ( \\theta ) + \\beta \\mathbb { E } _ { \\mathrm { s } \\sim \\mathcal { D } } \\left[ \\mathrm { D } _ { \\mathrm { K L } } \\left( \\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } ) , \\mathrm { d i a g } ( \\phi _ { \\Sigma } ( \\mathbf { s } ) ) ) \\ | | \\mathcal { N } ( 0 , \\mathbb { I } ) \\right) \\right] \\quad ( \\mathrm { V I B ~ r e g u l a r i z e r } ) ,\n$$", + "text_format": "latex", + "bbox": [ + 209, + 340, + 803, + 362 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Guideline 4.1. To address overfitting, we recommend using some form of capacity-decreasing regularization on the $Q$ -function, such as dropout or the VIB regularizer shown in Equation 3. ", + "bbox": [ + 187, + 373, + 805, + 402 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Capacity-increasing techniques for underfitting. To address underfitting, we need to increase model capacity to improve optimization of the training objective. Analogous to supervised learning, model capacity can be increased by using more expressive neural nets (e.g., ResNets [27], transformers [28]) for representing the learned policy. We use ResNets in our experiments (Figure 10). However, the RL setting presents an additional challenge with capacity: while larger models in principle have more capacity, recent work [29, 21, 22] has shown that utilizing larger networks to represent Q-functions does not always improve its capacity in practice, because TD-based RL methods introduce an β€œimplicit under-parameterization” effect that can result in aliased (i.e., similar) internal representations for different state-action inputs, even for very large neural networks that can express the true Q-function effectively. To address this issue, these works apply a β€œcapacityincreasing” regularizer to Q-function training. For instance, we can use the DR3 regularizer [22], which penalizes the dot product of $\\phi ( \\mathbf { s } )$ and $\\phi ( \\mathbf { s } ^ { \\prime } )$ for a transition $( \\mathbf { s } , \\mathbf { a } , \\mathbf { s } ^ { \\prime } ) \\in \\mathcal { D }$ , and hence reduces aliasing. This objective is given by: ", + "bbox": [ + 173, + 419, + 825, + 599 + ], + "page_idx": 4 + }, + { + "type": "equation", + "img_path": "images/55e6e675c67ec1e3e4745a24fa9f244367408982eb82461b79c4a5d6cace2f4e.jpg", + "text": "$$\n\\operatorname* { m i n } _ { \\theta } \\ \\mathcal { L } _ { \\mathrm { C Q L } } ( \\theta ) + \\beta \\mathbb { E } _ { { \\mathbf s } , { \\mathbf a } , { \\mathbf s } ^ { \\prime } \\sim \\mathcal { D } } \\left[ \\left| \\phi ( { \\mathbf s } ) ^ { \\top } \\phi ( { \\mathbf s } ^ { \\prime } ) \\right| \\right] \\qquad ( { \\mathrm { D R 3 ~ r e g u l a r i z e r ~ } } [ 2 2 ] ) ,\n$$", + "text_format": "latex", + "bbox": [ + 210, + 606, + 699, + 631 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Guideline 4.2. To address underfitting, we recommend using some capacity-increasing regularization on the Q-function and the policy either in conjunction or separately. Examples: (1) bigger policy networks (e.g., ResNets), (2) DR3 regularizer on the Q-network. ", + "bbox": [ + 191, + 640, + 807, + 683 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5 Evaluation of Our Workflow Metrics and Protocols in Simulation ", + "text_level": 1, + "bbox": [ + 173, + 695, + 750, + 713 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Next, we empirically validate the workflow proposed in Sections 3 and 4 on a suite of simulated robotic manipulation domains that mimic real-robot scenarios, from image observations with sparse binary rewards. We will examine how applying the workflow in Section 3 to detect overfitting or underfitting and then utilizing the strategies in Section 4 affects the performance of offline RL methods. An improved performance would indicate the efficacy of our workflow in making successful design decisions without any online tuning. ", + "bbox": [ + 174, + 715, + 549, + 833 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/feadf98282550c14dd6939d9b1aaebdb526316856a1034070ef48af1c82a1480.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 562, + 717, + 823, + 809 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "", + "bbox": [ + 178, + 827, + 821, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Experimental setup. We use the environments from Singh et al. [3] to design offline RL tasks and datasets that we use for our empirical analysis. We consider two tasks: (1) a pick and place task and (2) a grasping object from a drawer task. Examples of trajectories in both of these simulated domains are shown in Figure 2 and are detailed in Appendix D. Briefly, the pick and place task consists of a 6-DoF WidowX robot in front of a tray with an object. The goal is to put the object inside the tray. A non-zero reward of $+ 1$ is provided only when the object has been placed in the box. The offline dataset for this task consists of trajectories that grasp an object with a $3 5 \\%$ success and other trajectories that place an object with a $40 \\%$ success. Our second task is a grasping from drawer task where the WidowX robot is placed in front of a drawer and multiple objects. The robot can open or close the drawer, grasp objects from inside the drawer or on the table, and place them anywhere in the scene. The goal is to close the top drawer, then open the bottom drawer and take the object out. Only if the object has been taken out, a reward of $+ 1$ is obtained. The offline dataset consists of trajectories with a $3 0 { - } 4 0 \\%$ success rate for opening and closing a drawer and other trajectories with only $40 \\%$ placing success. We use $\\alpha = 1 . 0$ for CQL training in all experiments, which is directly taken from prior work [3], without any tuning. However, too low or too high $\\alpha$ values will inhibit the effectiveness of regular CQL and we first need to tune $\\alpha$ as discussed in Appendix G. More details are provided in Appendix D. ", + "bbox": [ + 174, + 845, + 825, + 915 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "", + "bbox": [ + 174, + 90, + 549, + 367 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7149b62299170ecd7a26a8a110d939daa58337b186bfd1671932f83c2c649963.jpg", + "image_caption": [ + "Figure 3: Policy performance (Top) and average dataset Q-values of CQL (bottom) with varying number of trajectories. Vertical bands indicate regions around the peak in average $\\mathrm { Q }$ -value and observe that these regions correspond to policies with good actual performance. " + ], + "image_footnote": [], + "bbox": [ + 575, + 97, + 812, + 268 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Scenario #1: Variable amount of training data. Our first scenario consists of the simulated tasks discussed above with a variable number of trajectories in the training data (50, 100, 500, 10000). We run CQL and track metrics 3.1 and 3.2 in each case. Observe in Figure 3 (bottom) that with fewer trajectories, the average dataset Q-value $\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]$ first rises, and then drops. This matches the description of overfitting in Section 3. Observe in Figure 4 (left) that, at the same time, the value of the CQL regularizer is very low, which is not consistent with what we expect of underfitting. Thus, we can conclude that these conditions exhibit overfitting, especially with 50 and 100 trajectories. The vertical dashed lines indicate the checkpoints that would be selected for evaluation per Guideline 3.1. We further visualize the performance of the chosen checkpoints against the actual return of each intermediate policy in Figure 3 (top). Note that this value is obtained by rolling out the learned policy, and would not be available in a realistic offline RL setting, but is provided only for analysis. Selecting the checkpoint based on Guideline 3.1 leads us to select a model with close to the peak performance over the training process, validating the efficacy of Guideline 3.1. ", + "bbox": [ + 173, + 373, + 825, + 429 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "", + "bbox": [ + 174, + 430, + 516, + 662 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Since we detected overfitting by following our workflow, we now aim to address it by using the VIB regularizer in the setting with 100 trajectories. As shown in Figure 4 (right), applying this regularizer not only alleviates the drop in Q-values after many training steps, but allows us to pick later checkpoints in training which perform better than base CQL on both the tasks. This validates that overfitting, as detected via our workflow, can be effectively mitigated by decreasing capacity, in this case by using VIB. We evaluate dropout, $\\ell _ { 1 }$ and $\\ell _ { 2 }$ regularization schemes in Appendix J. ", + "bbox": [ + 173, + 670, + 516, + 710 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c8b831ad68078d018a8733761f0da0593a9224d909869ecb852f0ffd8ee06f24.jpg", + "image_caption": [ + "Figure 4: Left: CQL regularizer attains low values, especially with 50 and 100 trajectories in the pick and place task, Right: Using VIB mitigates overfitting, giving rise to a stable trend in $\\mathrm { Q }$ -values and better performance which does not degrade with more training steps. " + ], + "image_footnote": [], + "bbox": [ + 535, + 434, + 815, + 627 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "", + "bbox": [ + 173, + 710, + 825, + 781 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Scenario #2: Multiple training objects. Our second test scenario consists of the pick and place task, modified to include a variable number of object types (1, 5, 10, 20, 35). Handling more objects requires higher capacity, since each object has a different shape and appearance. In each case, CQL is provided with 5000 trajectories. Following our workflow from Section 3, we first compute the average dataset Q-value and the training TD error. We observe in Figure 5 that, unlike in Scenario #1, Q-values do not generally decrease when trained for many steps, suggesting that the Q-function is likely not overfitting. To check for underfitting, we visualize the training TD error and find that, with 10, 20 and 35 objects, TD error magnitudes are in the range of [1.0, 2.0], which suggests a overall Q-value error of [30.0, 60.0] since the task horizon is 30. On an absolute scale, this error magnitude is large: since the rewards are $_ { 0 / 1 }$ , the range of difference between actual Q-values for any two policies is at most 30, which suggests that the error magnitude in the runs in Figure 5 are high. Hence, we conclude that this scenario generally exhibits underfitting with more objects. Indeed this trend is reflected in the policy performance that we plot for analysis in Figure 5: note that the policy return decreases with an increased number of objects, and the policy performance initially increases and saturates at a suboptimal value. ", + "bbox": [ + 173, + 786, + 825, + 898 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "", + "bbox": [ + 173, + 90, + 825, + 188 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/abb86c89607619cd53397449a92fc47f01701017a73f7df2599b8c654f14d0ca.jpg", + "image_caption": [ + "Figure 5: Performance (left), TD error (middle) and average dataset Qvalues (right) for the pick and place task with a variable number of objects. Note that while the learned Q-values increase and stabilize, the TD error values in scenarios with more than 10 objects are large (1.0-2.0). Correspondingly, the performance generally decreases as the number of objects increases. " + ], + "image_footnote": [], + "bbox": [ + 187, + 196, + 632, + 303 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b4ce278afd05bdaaa57ddb722d6767ecad2d352a23720d7d7f144183cc608616.jpg", + "image_caption": [ + "Figure 6: Correcting underfitting by applying our workflow for 35 objects. " + ], + "image_footnote": [], + "bbox": [ + 673, + 200, + 818, + 315 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To address underfitting in the multi-object case, we apply the proposed capacity-increasing measures to the 35-object task (results for 10 and 20 object settings are in Appendix I). We use a more expressive ResNet architecture for the policy and the DR3 regularizer for the Q-function together. Observe in the figure on the right that this combination (shown in red) improves policy performance in this setting (compared to green), which validates our workflow protocol for addressing underfitting. ", + "bbox": [ + 174, + 372, + 826, + 443 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6 Tuning CQL for Real-World Robotic Manipulation ", + "text_level": 1, + "bbox": [ + 173, + 450, + 633, + 467 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Having evaluated the efficacy of our proposed workflow in simulation, we now utilize our workflow to tune CQL for real-world robotic manipulation. We test in two setups that require the robot to learn from sparse binary rewards and image observations. The settings differ in robot platform, task specification, and dataset size. Additional results and robot videos are at the following website: https://sites. google.com/view/offline-rl-workflow ", + "bbox": [ + 174, + 469, + 516, + 594 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Sawyer manipulation tasks [30]. First, we train a ", + "bbox": [ + 173, + 601, + 516, + 614 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/566e1138148ac542d1676690513ab2bab4af3cbfec1257dc70a864019d00681c.jpg", + "image_caption": [ + "Figure 7: Real-world tasks. Successful rollouts of CQL tuned with our workflow from Sections 3 & 4. Top to bottom: Sawyer lid on pot, Sawyer drawer opening, WidowX pick-place task. " + ], + "image_footnote": [], + "bbox": [ + 531, + 476, + 821, + 549 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Sawyer robot in a tabletop setting to perform two tasks: (1) placing the lid onto a pot and (2) opening a drawer. The robot must perform these tasks in the presence of visual distractor objects, as shown in Figure 7. We directly use the dataset of 100 trajectories for each task collected by Khazatsky et al. [30] for our experiments so as to mimic the real-world use case of leveraging existing data with offline RL. We use four-dimensional actions with 3D end-effector velocity control in xyz-space and 1D gripper open/close action. More details regarding the setup are provided in Appendix D. ", + "bbox": [ + 173, + 616, + 825, + 698 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We run default CQL on these tasks and track the average Q-value, TD error, and CQL regularizer value. As shown in Figure 8, the average Q-value does not decrease over training, and the TD error (and CQL regularizer shown in Appendix E.2) is large. Per our discussion in Section 3, this indicates underfitting. Following our guidelines from Section 4, we utilize a more expressive ResNet policy (Figure 10), which increases the number of total convolutional layers from 3 to 9. We observe that this reduces the values of both the TD error Figure 8 and CQL regularizer (Appendix E.2) on both tasks. We ", + "bbox": [ + 174, + 705, + 483, + 907 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d5317090e1ad32bc153c1daf1e4d3802dfab85d171f13b7f15890473be1358cc.jpg", + "image_caption": [ + "Figure 8: Average Q-value and TD error on Sawyer tasks as model capacity increases. Q-values increase over training with lower capacity ruling out overfitting and increasing model capacity leads to a reduction in TD error indicating the presence of underfitting. " + ], + "image_footnote": [], + "bbox": [ + 504, + 699, + 816, + 815 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "then evaluate the learned policy over 12 trials conducted with different sets of distractor objects, including ones that are unseen during training. While the policy trained using base CQL is unable to successfully complete either task even once attaining a score of 0/12 on both tasks, the run that uses ResNet attains a significantly better success rate of 9/12 on the put lid on pot task and 8/12 on the drawer opening task, equal to $7 0 . 8 \\%$ success rate on average. ", + "bbox": [ + 173, + 883, + 825, + 911 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b9dc053e0a74e8660fec33a5ef35e25d9ae5b1e03b4824f6ff4428e483bb79a7.jpg", + "image_caption": [ + "Figure 9: Q-values (left) and performance of CQL with (middle) and without (right) the variational information bottleneck correction for overfitting on the real-world widowX pick and place task. Since the Q-values start to decrease with more training, our workflow detects that CQL is overfitting. Using our policy selection guideline (Guideline 3.1) enables us to choose checkpoint 50 marked with the green vertical dashed line (right) which performs well. Further, addressing overfitting by applying the VIB regularizer stabilizes the Q-values (brown) which do not decrease unlike base CQL (blue) (left). Finally, applying the VIB regularizer improves performance and reduces sensitivity to policy selection (middle). " + ], + "image_footnote": [], + "bbox": [ + 205, + 93, + 785, + 196 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "", + "bbox": [ + 176, + 303, + 821, + 344 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "WidowX pick and place task. In our second setting, we tune CQL on a pick and place task with a WidowX 250 robotic arm, shown in Figure 7. The dataset consists of 200 trajectories collected by running a noisy scripted policy (Appendix D) with $3 5 \\%$ success. We run CQL on this task and track the average Q-values, which we find initially increase and then decrease (Figure 9 (left; labeled as β€œQ-values”)), indicating overfitting. We then evaluate our policy selection scheme, which in this case suggests deploying checkpoint 50, the immediate checkpoint after the peak in Q-values. To see if this checkpoint is effective, we evaluate the performance of a few other policy checkpoints (for analysis only) and plot this performance trend in Figure 9 (right) as a dashed line. Observe that indeed the checkpoint found by our workflow attains the highest success rate (7/9) compared to other checkpoints, which only succeed $\\leq 4 / 9$ times. ", + "bbox": [ + 173, + 351, + 825, + 489 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Since overfitting is detected, we now turn to addressing overfitting by adding the VIB regularizer (Equation 3) during training. As shown in Figure 9 (left), the Q-values obtained after the addition of this regularizer (shown in brown; labeled β€œQ-values (VIB)”) are now stable and do not decrease over the course of training and so we can choose any policy for evaluation. We evaluate multiple policies, for visualization pur", + "bbox": [ + 174, + 496, + 483, + 619 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/b3b18140952eb1d991a800b9e93dd4974b62b6845fbf24ef1ed2536ac7a948e7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Real-world WidowX pick and place
MethodEpoch5075100200
CQL7/94/94/92/9
CQL + VIB3/98/97/97/9
", + "bbox": [ + 501, + 494, + 815, + 549 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 2: Performance of various policy checkpoints of CQL and $\\mathrm { C Q L + V I B }$ on the real WidowX pick and place task (bold entry denotes the checkpoint selected by our workflow). Note that when overfitting is corrected via VIB, multiple checkpoints perform well. ", + "bbox": [ + 496, + 553, + 823, + 617 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "poses only, in Figure 9 (middle), we find that all of them attain a $\\geq 7 / 9$ success, comparable or better than the base CQL algorithm (Figure 9 (right)). This indicates that addressing overfitting not only leads to some gains in performance but also greatly simplifies policy selection as all checkpoints perform similarly and well. Table 2 summarizes these results below, where the bold entries denote the checkpoints found by our policy selection rule. These results indicate the effectiveness of our workflow in tuning CQL by addressing overfitting and underfitting on multiple real robot platforms. ", + "bbox": [ + 174, + 621, + 825, + 703 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7 Discussion ", + "text_level": 1, + "bbox": [ + 174, + 713, + 294, + 729 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "While offline RL algorithms have improved significantly, applying these methods to real-world robotic domains is still challenging due to little guidance on tuning them. In this paper, we devise a workflow for algorithms such as CQL and BRAC, which consists of a set of metrics and conditions that can be tracked by a practitioner over the course of offline training to detect overfitting and underfitting, and recommendations to addresses the observed challenges. Applying our workflow both in simulation and the real world shows strong performance benefits. While our proposed workflow is an initial step towards practical robotic offline RL and is based on our best conceptual understanding of certain offline RL algorithms, these guidelines are heuristic. To some extent this is unavoidable, since a workflow is a set of guidelines and recommendations, rather than a rigid algorithm. Regardless of how theoretically justified it is, in the end, its value is determined by its ability to produce good results. We believe the breadth of tasks considered, which consist of two different real robots and multiple simulated tasks, indicates its broad applicability. However, deriving theoretical guarantees regarding workflows of this type is an important direction for future research. ", + "bbox": [ + 173, + 736, + 825, + 915 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements ", + "text_level": 1, + "bbox": [ + 176, + 89, + 338, + 106 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We thank Ilya Kostrikov, Avi Singh, Ashvin Nair, Alexander Khazatsky, Albert Yu, Jedrzej Orbik, and Jonathan Yang for their help with setting up and debugging various aspects of the experimental setup as well as for providing us with offline datasets we could test our workflow on. We thank Dibya Ghosh, anonymous reviewers, and the area chair from CoRL for constructive feedback on an earlier version of this paper. AK thanks George Tucker and Rishabh Agarwal for valuable discussions. This research was funded by the DARPA Assued Autonomy Program and compute support from Google and Microsoft Azure. ", + "bbox": [ + 174, + 114, + 825, + 212 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References ", + "text_level": 1, + "bbox": [ + 174, + 232, + 266, + 248 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "[1] D. Kalashnikov, J. Varley, Y. Chebotar, B. Swanson, R. Jonschkowski, C. Finn, S. Levine, and K. Hausman. Mt-opt: Continuous multi-task robotic reinforcement learning at scale. arXiv preprint arXiv:2104.08212, 2021. \n[2] A. Kumar, A. Zhou, G. Tucker, and S. Levine. Conservative q-learning for offline reinforcement learning. arXiv preprint arXiv:2006.04779, 2020. \n[3] A. Singh, A. Yu, J. Yang, J. Zhang, A. Kumar, and S. Levine. Cog: Connecting new skills to past experience with offline reinforcement learning. arXiv preprint arXiv:2010.14500, 2020. \n[4] Y. Chebotar, K. Hausman, Y. Lu, T. Xiao, D. Kalashnikov, J. Varley, A. Irpan, B. Eysenbach, R. Julian, C. Finn, and S. Levine. Actionable models: Unsupervised offline reinforcement learning of robotic skills. arXiv preprint arXiv:2104.07749, 2021. \n[5] D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan, V. Vanhoucke, et al. Scalable deep reinforcement learning for vision-based robotic manipulation. In Conference on Robot Learning, pages 651–673. PMLR, 2018. \n[6] D. Precup. Eligibility traces for off-policy policy evaluation. Computer Science Department Faculty Publication Series, page 80, 2000. \n[7] I. Kostrikov and O. Nachum. Statistical bootstrapping for uncertainty estimation in off-policy evaluation. arXiv preprint arXiv:2007.13609, 2020. \n[8] C. Paduraru. Off-policy evaluation in Markov decision processes. PhD thesis, Ph. D. Dissertation. McGill University, 2012. \n[9] T. L. Paine, C. Paduraru, A. Michi, C. Gulcehre, K. Zolna, A. Novikov, Z. Wang, and N. de Freitas. Hyperparameter selection for offline reinforcement learning. arXiv preprint arXiv:2007.09055, 2020. \n[10] O. Nachum and B. Dai. Reinforcement learning via fenchel-rockafellar duality. arXiv preprint arXiv:2001.01866, 2020. \n[11] P. Thomas, G. Theocharous, and M. Ghavamzadeh. High confidence policy improvement. In International Conference on Machine Learning, pages 2380–2388, 2015. \n[12] P. S. Thomas, G. Theocharous, and M. Ghavamzadeh. High-confidence off-policy evaluation. In Twenty-Ninth AAAI Conference on Artificial Intelligence, 2015. \n[13] N. Jiang and L. Li. Doubly robust off-policy value evaluation for reinforcement learning. arXiv preprint arXiv:1511.03722, 2015. \n[14] J. Fu, M. Norouzi, O. Nachum, G. Tucker, ziyu wang, A. Novikov, M. Yang, M. R. Zhang, Y. Chen, A. Kumar, C. Paduraru, S. Levine, and T. Paine. Benchmarks for deep off-policy evaluation. In International Conference on Learning Representations, 2021. URL https: //openreview.net/forum?id=kWSeGEeHvF8. \n[15] S. Levine, A. Kumar, G. Tucker, and J. Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020. \n[16] Y. Wu, G. Tucker, and O. Nachum. Behavior regularized offline reinforcement learning. arXiv preprint arXiv:1911.11361, 2019. \n[17] T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015. \n[18] S. Fujimoto, H. Van Hoof, and D. Meger. Addressing function approximation error in actorcritic methods. arXiv preprint arXiv:1802.09477, 2018. \n[19] T. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018. \n[20] I. Kostrikov, J. Tompson, R. Fergus, and O. Nachum. Offline reinforcement learning with fisher divergence critic regularization. arXiv preprint arXiv:2103.08050, 2021. \n[21] A. Kumar, R. Agarwal, D. Ghosh, and S. Levine. Implicit under-parameterization inhibits data-efficient deep reinforcement learning. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=O9bnihsFfXU. \n[22] A. Kumar, R. Agarwal, A. Courville, T. Ma, G. Tucker, and S. Levine. Value-based deep reinforcement learning requires explicit regularization. In RL for Real Life Workshop & Overparameterization: Pitfalls and Opportunities Workshop, ICML, 2021. URL https: //drive.google.com/file/d/1Fg43H5oagQp-ksjpWBf_aDYEzAFMVJm6/view. \n[23] R. Munos. Error bounds for approximate policy iteration. In Proceedings of the Twentieth International Conference on International Conference on Machine Learning, ICML’03, page 560–567. AAAI Press, 2003. ISBN 1577351894. \n[24] N. Srivastava, G. Hinton, A. Krizhevsky, I. Sutskever, and R. Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15 (1):1929–1958, 2014. \n[25] A. A. Alemi, I. Fischer, J. V. Dillon, and K. Murphy. Deep variational information bottleneck. arXiv preprint arXiv:1612.00410, 2016. \n[26] A. Achille and S. Soatto. Emergence of invariance and disentanglement in deep representations. The Journal of Machine Learning Research, 19(1):1947–1980, 2018. \n[27] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770–778, 2016. \n[28] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin. Attention is all you need. arXiv preprint arXiv:1706.03762, 2017. \n[29] D. Ghosh and M. G. Bellemare. Representations for stable off-policy reinforcement learning. arXiv preprint arXiv:2007.05520, 2020. \n[30] A. Khazatsky, A. Nair, D. Jing, and S. Levine. What can i do here? learning new skills by imagining visual affordances. arXiv preprint arXiv:2106.00671, 2021. \n[31] D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly, M. Kalakrishnan, V. Vanhoucke, et al. Scalable deep reinforcement learning for vision-based robotic manipulation. In Conference on Robot Learning, pages 651–673, 2018. \n[32] A. Zeng, S. Song, S. Welker, J. Lee, A. Rodriguez, and T. Funkhouser. Learning synergies between pushing and grasping with self-supervised deep reinforcement learning. 2018. \n[33] OpenAI. Learning dexterous in-hand manipulation. In arXiv preprint arXiv:1808.00177, 2018. \n[34] H. van Hoof, T. Hermans, G. Neumann, and J. Peters. Learning robot in-hand manipulation with tactile features. 2015. \n[35] A. Rajeswaran, V. Kumar, A. Gupta, G. Vezzani, J. Schulman, E. Todorov, and S. Levine. Learning complex dexterous manipulation with deep reinforcement learning and demonstrations. In RSS, 2018. \n[36] V. Kumar, A. Gupta, E. Todorov, and S. Levine. Learning dexterous manipulation policies from experience and imitation. CoRR, abs/1611.05095, 2016. \n[37] C. Schenck and D. Fox. Visual closed-loop control for pouring liquids. In International Conference on Robotics and Automation (ICRA), 2017. \n[38] A. Yahya, A. Li, M. Kalakrishnan, Y. Chebotar, and S. Levine. Collective robot reinforcement learning with distributed asynchronous guided policy search. In IROS, 2017. \n[39] J. Matas, S. James, and A. J. Davison. Sim-to-real reinforcement learning for deformable object manipulation. In Conference on Robot Learning (CoRL), 2018. \n[40] R. Julian, B. Swanson, G. S. Sukhatme, S. Levine, C. Finn, and K. Hausman. Efficient adaptation for end-to-end vision-based robotic manipulation. arXiv arXiv:2004.10190, 2020. \n[41] S. Cabi, S. G. Colmenarejo, A. Novikov, K. Konyushkova, S. Reed, R. Jeong, K. ZoΕ‚na, Y. Ay- Λ™ tar, D. Budden, M. Vecerik, et al. A framework for data-driven robotics. arXiv preprint arXiv:1909.12200, 2019. \n[42] C. Finn and S. Levine. Deep visual foresight for planning robot motion. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 2786–2793. IEEE, 2017. \n[43] F. Ebert, C. Finn, S. Dasari, A. Xie, A. Lee, and S. Levine. Visual foresight: Model-based deep reinforcement learning for vision-based robotic control. arXiv preprint arXiv:1812.00568, 2018. \n[44] A. Xie, F. Ebert, S. Levine, and C. Finn. Improvisation through physical understanding: Using novel objects as tools with visual foresight. Robotics: Science and Systems (RSS), 2019. \n[45] Y. Hristov, A. Lascarides, and S. Ramamoorthy. Interpretable latent spaces for learning from demonstration. arXiv preprint arXiv:1807.06583, 2018. \n[46] S. Tian, S. Nair, F. Ebert, S. Dasari, B. Eysenbach, C. Finn, and S. Levine. Model-based visual planning with self-supervised functional distances. arXiv preprint arXiv:2012.15373, 2020. \n[47] S. Young, D. Gandhi, S. Tulsiani, A. Gupta, P. Abbeel, and L. Pinto. Visual imitation made easy. arXiv e-prints, pages arXiv–2008, 2020. \n[48] E. Johns. Coarse-to-fine imitation learning: Robot manipulation from a single demonstration. arXiv preprint arXiv:2105.06411, 2021. \n[49] A. Mandlekar, F. Ramos, B. Boots, S. Savarese, L. Fei-Fei, A. Garg, and D. Fox. Iris: Implicit reinforcement without interaction at scale for learning control from offline robot manipulation data. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 4414–4420. IEEE, 2020. \n[50] A. Mandlekar, D. Xu, R. MartΒ΄Δ±n-MartΒ΄Δ±n, S. Savarese, and L. Fei-Fei. Learning to generalize across long-horizon tasks from human demonstrations, 2020. \n[51] S. Lange, T. Gabel, and M. Riedmiller. Batch reinforcement learning. In Reinforcement learning, pages 45–73. Springer, 2012. \n[52] S. Fujimoto, D. Meger, and D. Precup. Off-policy deep reinforcement learning without exploration. arXiv preprint arXiv:1812.02900, 2018. \n[53] A. Kumar, J. Fu, M. Soh, G. Tucker, and S. Levine. Stabilizing off-policy q-learning via bootstrapping error reduction. In Advances in Neural Information Processing Systems, pages 11761–11771, 2019. \n[54] X. B. Peng, A. Kumar, G. Zhang, and S. Levine. Advantage-weighted regression: Simple and scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019. \n[55] N. Jaques, A. Ghandeharioun, J. H. Shen, C. Ferguson, A. Lapedriza, N. Jones, S. Gu, and R. Picard. Way off-policy batch deep reinforcement learning of implicit human preferences in dialog. arXiv preprint arXiv:1907.00456, 2019. \n[56] A. Nair, M. Dalal, A. Gupta, and S. Levine. Accelerating online reinforcement learning with offline datasets. arXiv preprint arXiv:2006.09359, 2020. \n[57] R. Fakoor, J. Mueller, P. Chaudhari, and A. J. Smola. Continuous doubly constrained batch reinforcement learning. arXiv preprint arXiv:2102.09225, 2021. \n[58] T. Yu, G. Thomas, L. Yu, S. Ermon, J. Zou, S. Levine, C. Finn, and T. Ma. Mopo: Model-based offline policy optimization. arXiv preprint arXiv:2005.13239, 2020. \n[59] R. Kidambi, A. Rajeswaran, P. Netrapalli, and T. Joachims. Morel: Model-based offline reinforcement learning. arXiv preprint arXiv:2005.05951, 2020. \n[60] R. Rafailov, T. Yu, A. Rajeswaran, and C. Finn. Offline reinforcement learning from images with latent space models. Learning for Decision Making and Control (L4DC), 2021. \n[61] D. Precup, R. S. Sutton, and S. Dasgupta. Off-policy temporal-difference learning with function approximation. In ICML, pages 417–424, 2001. \n[62] C. Voloshin, H. M. Le, N. Jiang, and Y. Yue. Empirical study of off-policy policy evaluation for reinforcement learning. arXiv preprint arXiv:1911.06854, 2019. \n[63] O. Nachum, Y. Chow, B. Dai, and L. Li. Dualdice: Behavior-agnostic estimation of discounted stationary distribution corrections. In Advances in Neural Information Processing Systems, pages 2315–2325, 2019. \n[64] R. Qin, S. Gao, X. Zhang, Z. Xu, S. Huang, Z. Li, W. Zhang, and Y. Yu. Neorl: A near real-world benchmark for offline reinforcement learning. arXiv preprint arXiv:2102.00714, 2021. \n[65] T. Haarnoja, H. Tang, P. Abbeel, and S. Levine. Reinforcement learning with deep energybased policies. In International Conference on Machine Learning (ICML), 2017. \n[66] X. B. Peng, A. Kumar, G. Zhang, and S. Levine. Advantage-weighted regression: Simple and scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019. \n[67] S. Fujimoto and S. S. Gu. A minimalist approach to offline reinforcement learning. arXiv preprint arXiv:2106.06860, 2021. ", + "bbox": [ + 179, + 253, + 826, + 911 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "", + "bbox": [ + 171, + 50, + 828, + 912 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "", + "bbox": [ + 171, + 70, + 828, + 919 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "", + "bbox": [ + 171, + 89, + 828, + 619 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/parse/train/fy4ZBWxYbIo/fy4ZBWxYbIo_middle.json b/parse/train/fy4ZBWxYbIo/fy4ZBWxYbIo_middle.json new file mode 100644 index 0000000000000000000000000000000000000000..b3c5b2176e238bb3711ee3d6d7654f4c207bc821 --- /dev/null +++ b/parse/train/fy4ZBWxYbIo/fy4ZBWxYbIo_middle.json @@ -0,0 +1,37722 @@ +{ + "pdf_info": [ + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 144, + 81, + 467, + 120 + ], + "lines": [ + { + "bbox": [ + 143, + 79, + 468, + 101 + ], + "spans": [ + { + "bbox": [ + 143, + 79, + 468, + 101 + ], + "score": 1.0, + "content": "A Workflow for Offline Model-Free Robotic", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 213, + 100, + 398, + 122 + ], + "spans": [ + { + "bbox": [ + 213, + 100, + 398, + 122 + ], + "score": 1.0, + "content": "Reinforcement Learning", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "text", + "bbox": [ + 134, + 140, + 478, + 175 + ], + "lines": [ + { + "bbox": [ + 132, + 139, + 480, + 154 + ], + "spans": [ + { + "bbox": [ + 132, + 139, + 320, + 154 + ], + "score": 1.0, + "content": "Aviral Kumar?,1, Anikait Singh?,1, Stephen", + "type": "text" + }, + { + "bbox": [ + 320, + 140, + 345, + 152 + ], + "score": 0.32, + "content": "\\mathbf { T i a n } ^ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 345, + 139, + 384, + 154 + ], + "score": 1.0, + "content": ", Chelsea", + "type": "text" + }, + { + "bbox": [ + 384, + 140, + 410, + 152 + ], + "score": 0.56, + "content": "\\mathbf { F i n n ^ { 2 } }", + "type": "inline_equation" + }, + { + "bbox": [ + 411, + 139, + 480, + 154 + ], + "score": 1.0, + "content": ", Sergey Levine1", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 173, + 151, + 438, + 165 + ], + "spans": [ + { + "bbox": [ + 173, + 151, + 326, + 165 + ], + "score": 1.0, + "content": "1 UC Berkeley, 2 Stanford University", + "type": "text" + }, + { + "bbox": [ + 342, + 152, + 438, + 165 + ], + "score": 1.0, + "content": "(βˆ— Equal Contribution)", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 191, + 164, + 420, + 176 + ], + "spans": [ + { + "bbox": [ + 191, + 164, + 420, + 176 + ], + "score": 1.0, + "content": "aviralk@berkeley.edu, asap7772@berkeley.edu", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3 + }, + { + "type": "text", + "bbox": [ + 142, + 207, + 468, + 426 + ], + "lines": [ + { + "bbox": [ + 141, + 207, + 469, + 221 + ], + "spans": [ + { + "bbox": [ + 141, + 207, + 469, + 221 + ], + "score": 1.0, + "content": "Abstract: Offline reinforcement learning (RL) enables learning control policies", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 141, + 219, + 469, + 232 + ], + "spans": [ + { + "bbox": [ + 141, + 219, + 469, + 232 + ], + "score": 1.0, + "content": "by utilizing only prior experience, without any online interaction. This can allow", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 141, + 230, + 469, + 243 + ], + "spans": [ + { + "bbox": [ + 141, + 230, + 469, + 243 + ], + "score": 1.0, + "content": "robots to acquire generalizable skills from large and diverse datasets, without any", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 240, + 469, + 254 + ], + "spans": [ + { + "bbox": [ + 141, + 240, + 469, + 254 + ], + "score": 1.0, + "content": "costly or unsafe online data collection. Despite recent algorithmic advances in", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 141, + 251, + 469, + 265 + ], + "spans": [ + { + "bbox": [ + 141, + 251, + 469, + 265 + ], + "score": 1.0, + "content": "offline RL, applying these methods to real-world problems has proven challeng-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 263, + 469, + 275 + ], + "spans": [ + { + "bbox": [ + 141, + 263, + 469, + 275 + ], + "score": 1.0, + "content": "ing. Although offline RL methods can learn from prior data, there is no clear", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 142, + 274, + 470, + 285 + ], + "spans": [ + { + "bbox": [ + 142, + 274, + 470, + 285 + ], + "score": 1.0, + "content": "and well-understood process for making various design choices, from model ar-", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 142, + 284, + 469, + 297 + ], + "spans": [ + { + "bbox": [ + 142, + 284, + 469, + 297 + ], + "score": 1.0, + "content": "chitecture to algorithm hyperparameters, without actually evaluating the learned", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 295, + 470, + 308 + ], + "spans": [ + { + "bbox": [ + 141, + 295, + 470, + 308 + ], + "score": 1.0, + "content": "policies online. In this paper, our aim is to develop a practical workflow for using", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 142, + 306, + 469, + 318 + ], + "spans": [ + { + "bbox": [ + 142, + 306, + 469, + 318 + ], + "score": 1.0, + "content": "offline RL analogous to the relatively well-understood workflows for supervised", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 317, + 470, + 329 + ], + "spans": [ + { + "bbox": [ + 141, + 317, + 470, + 329 + ], + "score": 1.0, + "content": "learning problems. To this end, we devise a set of metrics and conditions that", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 328, + 469, + 340 + ], + "spans": [ + { + "bbox": [ + 141, + 328, + 469, + 340 + ], + "score": 1.0, + "content": "can be tracked over the course of offline training, and can inform the practitioner", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 142, + 339, + 469, + 351 + ], + "spans": [ + { + "bbox": [ + 142, + 339, + 469, + 351 + ], + "score": 1.0, + "content": "about how the algorithm and model architecture should be adjusted to improve fi-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 350, + 470, + 363 + ], + "spans": [ + { + "bbox": [ + 141, + 350, + 470, + 363 + ], + "score": 1.0, + "content": "nal performance. Our workflow is derived from a conceptual understanding of the", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 361, + 470, + 373 + ], + "spans": [ + { + "bbox": [ + 141, + 361, + 470, + 373 + ], + "score": 1.0, + "content": "behavior of conservative offline RL algorithms and cross-validation in supervised", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 141, + 372, + 469, + 384 + ], + "spans": [ + { + "bbox": [ + 141, + 372, + 469, + 384 + ], + "score": 1.0, + "content": "learning. We demonstrate the efficacy of this workflow in producing effective poli-", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 383, + 469, + 395 + ], + "spans": [ + { + "bbox": [ + 141, + 383, + 469, + 395 + ], + "score": 1.0, + "content": "cies without any online tuning, both in several simulated robotic learning scenarios", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 393, + 469, + 406 + ], + "spans": [ + { + "bbox": [ + 141, + 393, + 469, + 406 + ], + "score": 1.0, + "content": "and for three tasks on two distinct real robots, focusing on learning manipulation", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 142, + 405, + 469, + 416 + ], + "spans": [ + { + "bbox": [ + 142, + 405, + 469, + 416 + ], + "score": 1.0, + "content": "skills with raw image observations with sparse binary rewards. Explanatory video", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 142, + 415, + 467, + 427 + ], + "spans": [ + { + "bbox": [ + 142, + 415, + 467, + 427 + ], + "score": 1.0, + "content": "and additional content can be found at sites.google.com/view/offline-rl-workflow.", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 14.5 + }, + { + "type": "text", + "bbox": [ + 144, + 435, + 335, + 446 + ], + "lines": [ + { + "bbox": [ + 141, + 432, + 336, + 450 + ], + "spans": [ + { + "bbox": [ + 141, + 432, + 336, + 450 + ], + "score": 1.0, + "content": "Keywords: workflow, offline RL, offline tuning", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25 + }, + { + "type": "title", + "bbox": [ + 108, + 460, + 191, + 472 + ], + "lines": [ + { + "bbox": [ + 105, + 458, + 192, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 458, + 192, + 475 + ], + "score": 1.0, + "content": "1 Introduction", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 107, + 477, + 295, + 640 + ], + "lines": [ + { + "bbox": [ + 106, + 475, + 297, + 488 + ], + "spans": [ + { + "bbox": [ + 106, + 475, + 297, + 488 + ], + "score": 1.0, + "content": "Offline reinforcement learning (RL) can in", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 487, + 297, + 500 + ], + "spans": [ + { + "bbox": [ + 106, + 487, + 297, + 500 + ], + "score": 1.0, + "content": "principle make it possible to convert existing", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 497, + 298, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 298, + 509 + ], + "score": 1.0, + "content": "large datasets of robotic experience into ef-", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 508, + 297, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 508, + 297, + 520 + ], + "score": 1.0, + "content": "fective policies, without the need for costly", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 519, + 298, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 519, + 298, + 532 + ], + "score": 1.0, + "content": "or dangerous online interaction for each train-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 530, + 298, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 298, + 542 + ], + "score": 1.0, + "content": "ing run. While offline RL algorithms have", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 542, + 297, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 542, + 297, + 553 + ], + "score": 1.0, + "content": "improved significantly [1, 2, 3, 4, 5], apply-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 552, + 298, + 565 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 298, + 565 + ], + "score": 1.0, + "content": "ing such methods to real-world robotic con-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 563, + 298, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 298, + 575 + ], + "score": 1.0, + "content": "trol problems presents a number of major chal-", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 574, + 298, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 298, + 586 + ], + "score": 1.0, + "content": "lenges. In standard online RL, any interme-", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 585, + 298, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 298, + 597 + ], + "score": 1.0, + "content": "diate policy found during training is executed", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 595, + 297, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 595, + 297, + 608 + ], + "score": 1.0, + "content": "in the environment to collect more experience,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 607, + 297, + 618 + ], + "spans": [ + { + "bbox": [ + 106, + 607, + 297, + 618 + ], + "score": 1.0, + "content": "which naturally allows for an evaluation of the", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 618, + 297, + 630 + ], + "spans": [ + { + "bbox": [ + 105, + 618, + 297, + 630 + ], + "score": 1.0, + "content": "policy performance. This ability to evaluate in-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 629, + 297, + 640 + ], + "spans": [ + { + "bbox": [ + 106, + 629, + 297, + 640 + ], + "score": 1.0, + "content": "termediate policies lets practitioners use β€œbrute-", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 307, + 461, + 495, + 571 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 307, + 461, + 495, + 571 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 307, + 461, + 495, + 571 + ], + "spans": [ + { + "bbox": [ + 307, + 461, + 495, + 571 + ], + "score": 0.972, + "type": "image", + "image_path": "93b250387a2633b12ccbb56017457a77edb5b8bb68d22310145c16c8258e9664.jpg" + } + ] + } + ], + "index": 46, + "virtual_lines": [ + { + "bbox": [ + 307, + 461, + 495, + 473.22222222222223 + ], + "spans": [], + "index": 42 + }, + { + "bbox": [ + 307, + 473.22222222222223, + 495, + 485.44444444444446 + ], + "spans": [], + "index": 43 + }, + { + "bbox": [ + 307, + 485.44444444444446, + 495, + 497.6666666666667 + ], + "spans": [], + "index": 44 + }, + { + "bbox": [ + 307, + 497.6666666666667, + 495, + 509.8888888888889 + ], + "spans": [], + "index": 45 + }, + { + "bbox": [ + 307, + 509.8888888888889, + 495, + 522.1111111111111 + ], + "spans": [], + "index": 46 + }, + { + "bbox": [ + 307, + 522.1111111111111, + 495, + 534.3333333333333 + ], + "spans": [], + "index": 47 + }, + { + "bbox": [ + 307, + 534.3333333333333, + 495, + 546.5555555555554 + ], + "spans": [], + "index": 48 + }, + { + "bbox": [ + 307, + 546.5555555555554, + 495, + 558.7777777777776 + ], + "spans": [], + "index": 49 + }, + { + "bbox": [ + 307, + 558.7777777777776, + 495, + 570.9999999999998 + ], + "spans": [], + "index": 50 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 304, + 579, + 504, + 639 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 304, + 579, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 304, + 579, + 506, + 590 + ], + "score": 1.0, + "content": "Figure 1: Our proposed workflow aims to detect", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 304, + 589, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 304, + 589, + 505, + 601 + ], + "score": 1.0, + "content": "overfitting and underfitting, and provides guidelines for", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 304, + 599, + 505, + 610 + ], + "spans": [ + { + "bbox": [ + 304, + 599, + 505, + 610 + ], + "score": 1.0, + "content": "addressing these issues via policy selection, regulariza-", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 304, + 609, + 505, + 620 + ], + "spans": [ + { + "bbox": [ + 304, + 609, + 505, + 620 + ], + "score": 1.0, + "content": "tion, and architecture design. We evaluate this work-", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 304, + 619, + 505, + 630 + ], + "spans": [ + { + "bbox": [ + 304, + 619, + 505, + 630 + ], + "score": 1.0, + "content": "flow on two real-world robotic systems and simulation", + "type": "text" + } + ], + "index": 55 + }, + { + "bbox": [ + 304, + 629, + 446, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 446, + 640 + ], + "score": 1.0, + "content": "domains, and we find it to be effective.", + "type": "text" + } + ], + "index": 56 + } + ], + "index": 53.5 + } + ], + "index": 49.75 + }, + { + "type": "text", + "bbox": [ + 107, + 640, + 505, + 716 + ], + "lines": [ + { + "bbox": [ + 105, + 639, + 505, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 505, + 652 + ], + "score": 1.0, + "content": "force” to evaluate the effects of various design factors, such as model capacity and expressivity, the", + "type": "text" + } + ], + "index": 57 + }, + { + "bbox": [ + 106, + 650, + 505, + 663 + ], + "spans": [ + { + "bbox": [ + 106, + 650, + 505, + 663 + ], + "score": 1.0, + "content": "number of training steps, and so forth, and facilitates comparatively straightforward tuning. In", + "type": "text" + } + ], + "index": 58 + }, + { + "bbox": [ + 105, + 660, + 505, + 674 + ], + "spans": [ + { + "bbox": [ + 105, + 660, + 505, + 674 + ], + "score": 1.0, + "content": "contrast, offline RL methods do not have access to real-world on-policy rollouts for evaluating the", + "type": "text" + } + ], + "index": 59 + }, + { + "bbox": [ + 105, + 672, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 105, + 672, + 505, + 685 + ], + "score": 1.0, + "content": "learned policy. Thus, in order for these methods to be truly practical for real-world applications, we", + "type": "text" + } + ], + "index": 60 + }, + { + "bbox": [ + 105, + 683, + 505, + 695 + ], + "spans": [ + { + "bbox": [ + 105, + 683, + 505, + 695 + ], + "score": 1.0, + "content": "not only require effective algorithms, but also an effective workflow: a set of protocols and metrics", + "type": "text" + } + ], + "index": 61 + }, + { + "bbox": [ + 106, + 694, + 506, + 707 + ], + "spans": [ + { + "bbox": [ + 106, + 694, + 506, + 707 + ], + "score": 1.0, + "content": "that can be used to reliably and consistently adjust model capacity, regularization, etc in offline RL", + "type": "text" + } + ], + "index": 62 + }, + { + "bbox": [ + 105, + 705, + 468, + 718 + ], + "spans": [ + { + "bbox": [ + 105, + 705, + 468, + 718 + ], + "score": 1.0, + "content": "to obtain policies with good performance, without requiring real-world rollouts for tuning.", + "type": "text" + } + ], + "index": 63 + } + ], + "index": 60 + } + ], + "page_idx": 0, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 731, + 335, + 741 + ], + "lines": [ + { + "bbox": [ + 106, + 731, + 335, + 743 + ], + "spans": [ + { + "bbox": [ + 106, + 731, + 335, + 743 + ], + "score": 1.0, + "content": "5th Conference on Robot Learning (CoRL 2021), London, UK.", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 144, + 81, + 467, + 120 + ], + "lines": [ + { + "bbox": [ + 143, + 79, + 468, + 101 + ], + "spans": [ + { + "bbox": [ + 143, + 79, + 468, + 101 + ], + "score": 1.0, + "content": "A Workflow for Offline Model-Free Robotic", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 213, + 100, + 398, + 122 + ], + "spans": [ + { + "bbox": [ + 213, + 100, + 398, + 122 + ], + "score": 1.0, + "content": "Reinforcement Learning", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "text", + "bbox": [ + 134, + 140, + 478, + 175 + ], + "lines": [ + { + "bbox": [ + 132, + 139, + 480, + 154 + ], + "spans": [ + { + "bbox": [ + 132, + 139, + 320, + 154 + ], + "score": 1.0, + "content": "Aviral Kumar?,1, Anikait Singh?,1, Stephen", + "type": "text" + }, + { + "bbox": [ + 320, + 140, + 345, + 152 + ], + "score": 0.32, + "content": "\\mathbf { T i a n } ^ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 345, + 139, + 384, + 154 + ], + "score": 1.0, + "content": ", Chelsea", + "type": "text" + }, + { + "bbox": [ + 384, + 140, + 410, + 152 + ], + "score": 0.56, + "content": "\\mathbf { F i n n ^ { 2 } }", + "type": "inline_equation" + }, + { + "bbox": [ + 411, + 139, + 480, + 154 + ], + "score": 1.0, + "content": ", Sergey Levine1", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 173, + 151, + 438, + 165 + ], + "spans": [ + { + "bbox": [ + 173, + 151, + 326, + 165 + ], + "score": 1.0, + "content": "1 UC Berkeley, 2 Stanford University", + "type": "text" + }, + { + "bbox": [ + 342, + 152, + 438, + 165 + ], + "score": 1.0, + "content": "(βˆ— Equal Contribution)", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 191, + 164, + 420, + 176 + ], + "spans": [ + { + "bbox": [ + 191, + 164, + 420, + 176 + ], + "score": 1.0, + "content": "aviralk@berkeley.edu, asap7772@berkeley.edu", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 3, + "bbox_fs": [ + 132, + 139, + 480, + 176 + ] + }, + { + "type": "text", + "bbox": [ + 142, + 207, + 468, + 426 + ], + "lines": [ + { + "bbox": [ + 141, + 207, + 469, + 221 + ], + "spans": [ + { + "bbox": [ + 141, + 207, + 469, + 221 + ], + "score": 1.0, + "content": "Abstract: Offline reinforcement learning (RL) enables learning control policies", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 141, + 219, + 469, + 232 + ], + "spans": [ + { + "bbox": [ + 141, + 219, + 469, + 232 + ], + "score": 1.0, + "content": "by utilizing only prior experience, without any online interaction. This can allow", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 141, + 230, + 469, + 243 + ], + "spans": [ + { + "bbox": [ + 141, + 230, + 469, + 243 + ], + "score": 1.0, + "content": "robots to acquire generalizable skills from large and diverse datasets, without any", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 240, + 469, + 254 + ], + "spans": [ + { + "bbox": [ + 141, + 240, + 469, + 254 + ], + "score": 1.0, + "content": "costly or unsafe online data collection. Despite recent algorithmic advances in", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 141, + 251, + 469, + 265 + ], + "spans": [ + { + "bbox": [ + 141, + 251, + 469, + 265 + ], + "score": 1.0, + "content": "offline RL, applying these methods to real-world problems has proven challeng-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 263, + 469, + 275 + ], + "spans": [ + { + "bbox": [ + 141, + 263, + 469, + 275 + ], + "score": 1.0, + "content": "ing. Although offline RL methods can learn from prior data, there is no clear", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 142, + 274, + 470, + 285 + ], + "spans": [ + { + "bbox": [ + 142, + 274, + 470, + 285 + ], + "score": 1.0, + "content": "and well-understood process for making various design choices, from model ar-", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 142, + 284, + 469, + 297 + ], + "spans": [ + { + "bbox": [ + 142, + 284, + 469, + 297 + ], + "score": 1.0, + "content": "chitecture to algorithm hyperparameters, without actually evaluating the learned", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 141, + 295, + 470, + 308 + ], + "spans": [ + { + "bbox": [ + 141, + 295, + 470, + 308 + ], + "score": 1.0, + "content": "policies online. In this paper, our aim is to develop a practical workflow for using", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 142, + 306, + 469, + 318 + ], + "spans": [ + { + "bbox": [ + 142, + 306, + 469, + 318 + ], + "score": 1.0, + "content": "offline RL analogous to the relatively well-understood workflows for supervised", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 317, + 470, + 329 + ], + "spans": [ + { + "bbox": [ + 141, + 317, + 470, + 329 + ], + "score": 1.0, + "content": "learning problems. To this end, we devise a set of metrics and conditions that", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 328, + 469, + 340 + ], + "spans": [ + { + "bbox": [ + 141, + 328, + 469, + 340 + ], + "score": 1.0, + "content": "can be tracked over the course of offline training, and can inform the practitioner", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 142, + 339, + 469, + 351 + ], + "spans": [ + { + "bbox": [ + 142, + 339, + 469, + 351 + ], + "score": 1.0, + "content": "about how the algorithm and model architecture should be adjusted to improve fi-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 350, + 470, + 363 + ], + "spans": [ + { + "bbox": [ + 141, + 350, + 470, + 363 + ], + "score": 1.0, + "content": "nal performance. Our workflow is derived from a conceptual understanding of the", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 361, + 470, + 373 + ], + "spans": [ + { + "bbox": [ + 141, + 361, + 470, + 373 + ], + "score": 1.0, + "content": "behavior of conservative offline RL algorithms and cross-validation in supervised", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 141, + 372, + 469, + 384 + ], + "spans": [ + { + "bbox": [ + 141, + 372, + 469, + 384 + ], + "score": 1.0, + "content": "learning. We demonstrate the efficacy of this workflow in producing effective poli-", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 383, + 469, + 395 + ], + "spans": [ + { + "bbox": [ + 141, + 383, + 469, + 395 + ], + "score": 1.0, + "content": "cies without any online tuning, both in several simulated robotic learning scenarios", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 393, + 469, + 406 + ], + "spans": [ + { + "bbox": [ + 141, + 393, + 469, + 406 + ], + "score": 1.0, + "content": "and for three tasks on two distinct real robots, focusing on learning manipulation", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 142, + 405, + 469, + 416 + ], + "spans": [ + { + "bbox": [ + 142, + 405, + 469, + 416 + ], + "score": 1.0, + "content": "skills with raw image observations with sparse binary rewards. Explanatory video", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 142, + 415, + 467, + 427 + ], + "spans": [ + { + "bbox": [ + 142, + 415, + 467, + 427 + ], + "score": 1.0, + "content": "and additional content can be found at sites.google.com/view/offline-rl-workflow.", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 14.5, + "bbox_fs": [ + 141, + 207, + 470, + 427 + ] + }, + { + "type": "text", + "bbox": [ + 144, + 435, + 335, + 446 + ], + "lines": [ + { + "bbox": [ + 141, + 432, + 336, + 450 + ], + "spans": [ + { + "bbox": [ + 141, + 432, + 336, + 450 + ], + "score": 1.0, + "content": "Keywords: workflow, offline RL, offline tuning", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25, + "bbox_fs": [ + 141, + 432, + 336, + 450 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 460, + 191, + 472 + ], + "lines": [ + { + "bbox": [ + 105, + 458, + 192, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 458, + 192, + 475 + ], + "score": 1.0, + "content": "1 Introduction", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 107, + 477, + 295, + 640 + ], + "lines": [ + { + "bbox": [ + 106, + 475, + 297, + 488 + ], + "spans": [ + { + "bbox": [ + 106, + 475, + 297, + 488 + ], + "score": 1.0, + "content": "Offline reinforcement learning (RL) can in", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 487, + 297, + 500 + ], + "spans": [ + { + "bbox": [ + 106, + 487, + 297, + 500 + ], + "score": 1.0, + "content": "principle make it possible to convert existing", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 497, + 298, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 298, + 509 + ], + "score": 1.0, + "content": "large datasets of robotic experience into ef-", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 508, + 297, + 520 + ], + "spans": [ + { + "bbox": [ + 105, + 508, + 297, + 520 + ], + "score": 1.0, + "content": "fective policies, without the need for costly", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 519, + 298, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 519, + 298, + 532 + ], + "score": 1.0, + "content": "or dangerous online interaction for each train-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 530, + 298, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 298, + 542 + ], + "score": 1.0, + "content": "ing run. While offline RL algorithms have", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 542, + 297, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 542, + 297, + 553 + ], + "score": 1.0, + "content": "improved significantly [1, 2, 3, 4, 5], apply-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 552, + 298, + 565 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 298, + 565 + ], + "score": 1.0, + "content": "ing such methods to real-world robotic con-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 563, + 298, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 298, + 575 + ], + "score": 1.0, + "content": "trol problems presents a number of major chal-", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 574, + 298, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 298, + 586 + ], + "score": 1.0, + "content": "lenges. In standard online RL, any interme-", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 585, + 298, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 585, + 298, + 597 + ], + "score": 1.0, + "content": "diate policy found during training is executed", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 595, + 297, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 595, + 297, + 608 + ], + "score": 1.0, + "content": "in the environment to collect more experience,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 607, + 297, + 618 + ], + "spans": [ + { + "bbox": [ + 106, + 607, + 297, + 618 + ], + "score": 1.0, + "content": "which naturally allows for an evaluation of the", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 618, + 297, + 630 + ], + "spans": [ + { + "bbox": [ + 105, + 618, + 297, + 630 + ], + "score": 1.0, + "content": "policy performance. This ability to evaluate in-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 629, + 297, + 640 + ], + "spans": [ + { + "bbox": [ + 106, + 629, + 297, + 640 + ], + "score": 1.0, + "content": "termediate policies lets practitioners use β€œbrute-", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 34, + "bbox_fs": [ + 105, + 475, + 298, + 640 + ] + }, + { + "type": "image", + "bbox": [ + 307, + 461, + 495, + 571 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 307, + 461, + 495, + 571 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 307, + 461, + 495, + 571 + ], + "spans": [ + { + "bbox": [ + 307, + 461, + 495, + 571 + ], + "score": 0.972, + "type": "image", + "image_path": "93b250387a2633b12ccbb56017457a77edb5b8bb68d22310145c16c8258e9664.jpg" + } + ] + } + ], + "index": 46, + "virtual_lines": [ + { + "bbox": [ + 307, + 461, + 495, + 473.22222222222223 + ], + "spans": [], + "index": 42 + }, + { + "bbox": [ + 307, + 473.22222222222223, + 495, + 485.44444444444446 + ], + "spans": [], + "index": 43 + }, + { + "bbox": [ + 307, + 485.44444444444446, + 495, + 497.6666666666667 + ], + "spans": [], + "index": 44 + }, + { + "bbox": [ + 307, + 497.6666666666667, + 495, + 509.8888888888889 + ], + "spans": [], + "index": 45 + }, + { + "bbox": [ + 307, + 509.8888888888889, + 495, + 522.1111111111111 + ], + "spans": [], + "index": 46 + }, + { + "bbox": [ + 307, + 522.1111111111111, + 495, + 534.3333333333333 + ], + "spans": [], + "index": 47 + }, + { + "bbox": [ + 307, + 534.3333333333333, + 495, + 546.5555555555554 + ], + "spans": [], + "index": 48 + }, + { + "bbox": [ + 307, + 546.5555555555554, + 495, + 558.7777777777776 + ], + "spans": [], + "index": 49 + }, + { + "bbox": [ + 307, + 558.7777777777776, + 495, + 570.9999999999998 + ], + "spans": [], + "index": 50 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 304, + 579, + 504, + 639 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 304, + 579, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 304, + 579, + 506, + 590 + ], + "score": 1.0, + "content": "Figure 1: Our proposed workflow aims to detect", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 304, + 589, + 505, + 601 + ], + "spans": [ + { + "bbox": [ + 304, + 589, + 505, + 601 + ], + "score": 1.0, + "content": "overfitting and underfitting, and provides guidelines for", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 304, + 599, + 505, + 610 + ], + "spans": [ + { + "bbox": [ + 304, + 599, + 505, + 610 + ], + "score": 1.0, + "content": "addressing these issues via policy selection, regulariza-", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 304, + 609, + 505, + 620 + ], + "spans": [ + { + "bbox": [ + 304, + 609, + 505, + 620 + ], + "score": 1.0, + "content": "tion, and architecture design. We evaluate this work-", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 304, + 619, + 505, + 630 + ], + "spans": [ + { + "bbox": [ + 304, + 619, + 505, + 630 + ], + "score": 1.0, + "content": "flow on two real-world robotic systems and simulation", + "type": "text" + } + ], + "index": 55 + }, + { + "bbox": [ + 304, + 629, + 446, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 446, + 640 + ], + "score": 1.0, + "content": "domains, and we find it to be effective.", + "type": "text" + } + ], + "index": 56 + } + ], + "index": 53.5 + } + ], + "index": 49.75 + }, + { + "type": "text", + "bbox": [ + 107, + 640, + 505, + 716 + ], + "lines": [ + { + "bbox": [ + 105, + 639, + 505, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 505, + 652 + ], + "score": 1.0, + "content": "force” to evaluate the effects of various design factors, such as model capacity and expressivity, the", + "type": "text" + } + ], + "index": 57 + }, + { + "bbox": [ + 106, + 650, + 505, + 663 + ], + "spans": [ + { + "bbox": [ + 106, + 650, + 505, + 663 + ], + "score": 1.0, + "content": "number of training steps, and so forth, and facilitates comparatively straightforward tuning. In", + "type": "text" + } + ], + "index": 58 + }, + { + "bbox": [ + 105, + 660, + 505, + 674 + ], + "spans": [ + { + "bbox": [ + 105, + 660, + 505, + 674 + ], + "score": 1.0, + "content": "contrast, offline RL methods do not have access to real-world on-policy rollouts for evaluating the", + "type": "text" + } + ], + "index": 59 + }, + { + "bbox": [ + 105, + 672, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 105, + 672, + 505, + 685 + ], + "score": 1.0, + "content": "learned policy. Thus, in order for these methods to be truly practical for real-world applications, we", + "type": "text" + } + ], + "index": 60 + }, + { + "bbox": [ + 105, + 683, + 505, + 695 + ], + "spans": [ + { + "bbox": [ + 105, + 683, + 505, + 695 + ], + "score": 1.0, + "content": "not only require effective algorithms, but also an effective workflow: a set of protocols and metrics", + "type": "text" + } + ], + "index": 61 + }, + { + "bbox": [ + 106, + 694, + 506, + 707 + ], + "spans": [ + { + "bbox": [ + 106, + 694, + 506, + 707 + ], + "score": 1.0, + "content": "that can be used to reliably and consistently adjust model capacity, regularization, etc in offline RL", + "type": "text" + } + ], + "index": 62 + }, + { + "bbox": [ + 105, + 705, + 468, + 718 + ], + "spans": [ + { + "bbox": [ + 105, + 705, + 468, + 718 + ], + "score": 1.0, + "content": "to obtain policies with good performance, without requiring real-world rollouts for tuning.", + "type": "text" + } + ], + "index": 63 + } + ], + "index": 60, + "bbox_fs": [ + 105, + 639, + 506, + 718 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 72, + 505, + 161 + ], + "lines": [ + { + "bbox": [ + 106, + 73, + 505, + 85 + ], + "spans": [ + { + "bbox": [ + 106, + 73, + 505, + 85 + ], + "score": 1.0, + "content": "A number of prior works have studied model selection in offline RL by utilizing off-policy eval-", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 84, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 84, + 504, + 95 + ], + "score": 1.0, + "content": "uation (OPE) methods [6] to estimate policy performance. These methods can be based either on", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 93, + 505, + 108 + ], + "spans": [ + { + "bbox": [ + 104, + 93, + 505, + 108 + ], + "score": 1.0, + "content": "model or value learning [7, 8, 9, 10] or importance sampling [6, 11, 12, 13]. However, developing", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 105, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 505, + 118 + ], + "score": 1.0, + "content": "reliable OPE methods is itself an open problem, and modern OPE methods themselves suffer from", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 116, + 506, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 116, + 506, + 129 + ], + "score": 1.0, + "content": "hyperparameter selection challenges (see Fu et al. [14] for an empirical study). Moreover, accurate", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 127, + 505, + 139 + ], + "spans": [ + { + "bbox": [ + 106, + 127, + 505, + 139 + ], + "score": 1.0, + "content": "off-policy evaluation is likely not necessary to simply tune algorithms for best performance – we do", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 138, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 105, + 138, + 506, + 150 + ], + "score": 1.0, + "content": "not need a precise estimate of how good our policy is, but rather a workflow that enables us to best", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 150, + 345, + 161 + ], + "spans": [ + { + "bbox": [ + 106, + 150, + 345, + 161 + ], + "score": 1.0, + "content": "improve it by adjusting various algorithm hyperparameters.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 107, + 165, + 505, + 308 + ], + "lines": [ + { + "bbox": [ + 105, + 164, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 164, + 506, + 178 + ], + "score": 1.0, + "content": "In this paper, we devise a practical workflow for selecting regularizers, model architectures, and", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 177, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 177, + 506, + 189 + ], + "score": 1.0, + "content": "policy checkpoints for offline RL methods in robotic learning settings. We focus on a specific class", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 187, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 506, + 200 + ], + "score": 1.0, + "content": "of conservative offline RL algorithms [15, 2] that regularize the Q-function, but also show that our", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 198, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 506, + 212 + ], + "score": 1.0, + "content": "workflow can be effectively applied to policy constraint methods [16]. Our aim is not to focus on", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 209, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 105, + 209, + 506, + 222 + ], + "score": 1.0, + "content": "complete off-policy evaluation or to devise a new approach for off-policy evaluation, but rather to", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 218, + 505, + 234 + ], + "spans": [ + { + "bbox": [ + 105, + 218, + 505, + 234 + ], + "score": 1.0, + "content": "adopt a strategy similar to the one in supervised learning. Analogously to how supervised learning", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 231, + 505, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 505, + 244 + ], + "score": 1.0, + "content": "practitioners can detect overfitting and underfitting by tracking training and validation losses, and", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 241, + 505, + 254 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 505, + 254 + ], + "score": 1.0, + "content": "then adjust hyperparameters based on these metrics, our workflow (see Figure 1 for a schematic) first", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 252, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 506, + 266 + ], + "score": 1.0, + "content": "defines and characterizes overfitting and underfitting, proposes metrics and conditions that users can", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 263, + 505, + 276 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 505, + 276 + ], + "score": 1.0, + "content": "track to determine if an offline RL exhibits overfitting or underfitting, and then utilizes these metrics", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 272, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 272, + 505, + 289 + ], + "score": 1.0, + "content": "to inform design decisions pertaining to neural net architectures, regularization, and early stopping.", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 285, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 506, + 298 + ], + "score": 1.0, + "content": "This protocol is intended to act as a β€œuser’s manual” for a practitioner, with guidelines for how to", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 297, + 442, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 297, + 442, + 308 + ], + "score": 1.0, + "content": "modify algorithm parameters for best results without real-world evaluation rollouts.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 14 + }, + { + "type": "text", + "bbox": [ + 106, + 312, + 505, + 412 + ], + "lines": [ + { + "bbox": [ + 106, + 312, + 505, + 325 + ], + "spans": [ + { + "bbox": [ + 106, + 312, + 505, + 325 + ], + "score": 1.0, + "content": "The primary contribution of this paper is a simple yet effective workflow for robotic offline RL.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 323, + 505, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 505, + 336 + ], + "score": 1.0, + "content": "We propose metrics and protocols to assist practitioners in selecting policy checkpoints, regulariza-", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 334, + 505, + 348 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 505, + 348 + ], + "score": 1.0, + "content": "tion parameters, and model architectures for conservative offline RL algorithms such as CQL [2]", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 345, + 506, + 358 + ], + "spans": [ + { + "bbox": [ + 106, + 345, + 506, + 358 + ], + "score": 1.0, + "content": "and BRAC [16]. We empirically verify the efficacy of our proposed workflow on simulated robotic", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 356, + 506, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 506, + 369 + ], + "score": 1.0, + "content": "manipulation problems as well as three real-world robotic manipulation problems on two different", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 367, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 505, + 380 + ], + "score": 1.0, + "content": "robots, with diverse objects, pixel observations, and sparse binary reward supervision. Experimen-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 378, + 504, + 390 + ], + "spans": [ + { + "bbox": [ + 106, + 378, + 504, + 390 + ], + "score": 1.0, + "content": "tally, we evaluate our method on two real-world robots (the Sawyer and WidowX robots), and one", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 388, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 388, + 505, + 402 + ], + "score": 1.0, + "content": "realistic simulated tasks. Our approach is effective in all of these cases, and on two tasks with the", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 399, + 473, + 412 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 447, + 412 + ], + "score": 1.0, + "content": "Sawyer robot that initially fail completely, our workflow improves the success rate to", + "type": "text" + }, + { + "bbox": [ + 447, + 400, + 468, + 411 + ], + "score": 0.87, + "content": "70 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 468, + 399, + 473, + 412 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 25 + }, + { + "type": "title", + "bbox": [ + 106, + 416, + 348, + 429 + ], + "lines": [ + { + "bbox": [ + 104, + 415, + 348, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 348, + 431 + ], + "score": 1.0, + "content": "2 Preliminaries, Background, and Definitions", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 30 + }, + { + "type": "text", + "bbox": [ + 106, + 430, + 505, + 507 + ], + "lines": [ + { + "bbox": [ + 102, + 425, + 509, + 447 + ], + "spans": [ + { + "bbox": [ + 102, + 425, + 381, + 447 + ], + "score": 1.0, + "content": "The goal in RL is to optimize the infinite horizon discounted return", + "type": "text" + }, + { + "bbox": [ + 381, + 429, + 474, + 443 + ], + "score": 0.91, + "content": "\\begin{array} { r } { R = \\sum _ { t = 0 } ^ { \\infty } \\gamma ^ { t } r ( \\mathbf { s } _ { t } , \\mathbf { a } _ { t } ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 425, + 509, + 447 + ], + "score": 1.0, + "content": ", where", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 440, + 505, + 454 + ], + "spans": [ + { + "bbox": [ + 106, + 441, + 134, + 453 + ], + "score": 0.92, + "content": "r ( s , a )", + "type": "inline_equation" + }, + { + "bbox": [ + 135, + 440, + 379, + 454 + ], + "score": 1.0, + "content": "represents the reward function evaluated at a state-action pair", + "type": "text" + }, + { + "bbox": [ + 380, + 442, + 402, + 452 + ], + "score": 0.7, + "content": "( \\mathbf { s } , \\mathbf { a } )", + "type": "inline_equation" + }, + { + "bbox": [ + 403, + 440, + 505, + 454 + ], + "score": 1.0, + "content": ". We operate in the offline", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 104, + 450, + 505, + 465 + ], + "spans": [ + { + "bbox": [ + 104, + 450, + 305, + 465 + ], + "score": 1.0, + "content": "RL setting and are provided with a fixed dataset", + "type": "text" + }, + { + "bbox": [ + 306, + 452, + 405, + 464 + ], + "score": 0.9, + "content": "\\mathcal { D } = \\{ ( \\mathbf { s } , \\mathbf { a } , r ( \\mathbf { s } , \\mathbf { a } ) , \\mathbf { s } ^ { \\prime } ) \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 406, + 450, + 505, + 465 + ], + "score": 1.0, + "content": ", consisting of transition", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 462, + 505, + 476 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 318, + 476 + ], + "score": 1.0, + "content": "tuples obtained from rollouts under a behavior policy", + "type": "text" + }, + { + "bbox": [ + 318, + 464, + 350, + 475 + ], + "score": 0.88, + "content": "\\pi _ { \\beta } ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 351, + 462, + 505, + 476 + ], + "score": 1.0, + "content": ". Our goal is to obtain the best possible", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 473, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 473, + 308, + 486 + ], + "score": 1.0, + "content": "policy by only training on this fixed offline dataset", + "type": "text" + }, + { + "bbox": [ + 308, + 474, + 317, + 483 + ], + "score": 0.82, + "content": "\\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 317, + 473, + 506, + 486 + ], + "score": 1.0, + "content": ", with no access to online rollouts. We focus on", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 484, + 505, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 313, + 496 + ], + "score": 1.0, + "content": "conservative offline RL algorithms that modify the", + "type": "text" + }, + { + "bbox": [ + 313, + 485, + 322, + 496 + ], + "score": 0.26, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 484, + 505, + 496 + ], + "score": 1.0, + "content": "-function to penalize distributional shift, with", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 496, + 502, + 508 + ], + "spans": [ + { + "bbox": [ + 106, + 496, + 502, + 508 + ], + "score": 1.0, + "content": "most experiments on CQL [2], though we also adapt our workflow to BRAC [16] in Appendix F.1.", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 34 + }, + { + "type": "text", + "bbox": [ + 106, + 511, + 505, + 588 + ], + "lines": [ + { + "bbox": [ + 106, + 511, + 504, + 524 + ], + "spans": [ + { + "bbox": [ + 106, + 511, + 468, + 524 + ], + "score": 1.0, + "content": "Conservative Q-learning (CQL). The actor-critic formulation of CQL trains a Q-function", + "type": "text" + }, + { + "bbox": [ + 468, + 511, + 504, + 523 + ], + "score": 0.9, + "content": "Q _ { \\boldsymbol { \\theta } } ( \\mathbf { s } , \\mathbf { a } )", + "type": "inline_equation" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 522, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 198, + 537 + ], + "score": 1.0, + "content": "with a separate policy", + "type": "text" + }, + { + "bbox": [ + 199, + 523, + 231, + 535 + ], + "score": 0.93, + "content": "\\pi _ { \\phi } ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 231, + 522, + 363, + 537 + ], + "score": 1.0, + "content": ", which maximizes the expected", + "type": "text" + }, + { + "bbox": [ + 363, + 523, + 372, + 534 + ], + "score": 0.33, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 373, + 522, + 399, + 537 + ], + "score": 1.0, + "content": "-value", + "type": "text" + }, + { + "bbox": [ + 399, + 523, + 485, + 536 + ], + "score": 0.92, + "content": "\\begin{array} { r } { \\mathbb { E } _ { \\mathbf { s } \\sim \\mathcal { D } , \\mathbf { a } \\sim \\pi _ { \\phi } } \\left[ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) \\right] } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 486, + 522, + 506, + 537 + ], + "score": 1.0, + "content": "like", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 533, + 505, + 546 + ], + "spans": [ + { + "bbox": [ + 106, + 533, + 505, + 546 + ], + "score": 1.0, + "content": "other standard actor-critic deep RL methods [17, 18, 19]. However, in addition to the standard TD", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 544, + 505, + 557 + ], + "spans": [ + { + "bbox": [ + 105, + 545, + 128, + 557 + ], + "score": 1.0, + "content": "error", + "type": "text" + }, + { + "bbox": [ + 129, + 544, + 159, + 556 + ], + "score": 0.92, + "content": "{ \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 159, + 545, + 331, + 557 + ], + "score": 1.0, + "content": "(in blue below), CQL applies a regularizer", + "type": "text" + }, + { + "bbox": [ + 332, + 545, + 353, + 556 + ], + "score": 0.91, + "content": "{ \\mathcal { R } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 354, + 545, + 505, + 557 + ], + "score": 1.0, + "content": "(in red below) to prevent overestima-", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 555, + 506, + 568 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 136, + 568 + ], + "score": 1.0, + "content": "tion of", + "type": "text" + }, + { + "bbox": [ + 136, + 556, + 145, + 567 + ], + "score": 0.29, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 145, + 555, + 433, + 568 + ], + "score": 1.0, + "content": "-values for out-of-distribution (OOD) actions. This term minimizes the", + "type": "text" + }, + { + "bbox": [ + 434, + 556, + 443, + 567 + ], + "score": 0.39, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 443, + 555, + 506, + 568 + ], + "score": 1.0, + "content": "-values under a", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 565, + 506, + 579 + ], + "spans": [ + { + "bbox": [ + 105, + 565, + 155, + 579 + ], + "score": 1.0, + "content": "distribution", + "type": "text" + }, + { + "bbox": [ + 155, + 566, + 183, + 578 + ], + "score": 0.92, + "content": "\\mu ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 565, + 465, + 579 + ], + "score": 1.0, + "content": ", which is automatically chosen to pick actions a with high Q-values", + "type": "text" + }, + { + "bbox": [ + 465, + 566, + 501, + 578 + ], + "score": 0.92, + "content": "Q _ { \\boldsymbol { \\theta } } ( \\mathbf { s } , \\mathbf { a } )", + "type": "inline_equation" + }, + { + "bbox": [ + 501, + 565, + 506, + 579 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 577, + 447, + 590 + ], + "spans": [ + { + "bbox": [ + 106, + 577, + 447, + 590 + ], + "score": 1.0, + "content": "and counterbalances this term by maximizing the values of the actions in the dataset:", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 41 + }, + { + "type": "interline_equation", + "bbox": [ + 100, + 587, + 496, + 610 + ], + "lines": [ + { + "bbox": [ + 100, + 587, + 496, + 610 + ], + "spans": [ + { + "bbox": [ + 100, + 587, + 496, + 610 + ], + "score": 0.93, + "content": "\\begin{array} { r l } { \\underset { \\theta } { \\mathrm { m i n } } \\ : \\ : \\ : } & { { } \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : } \\\\ { \\mathrm { m i n } \\ : \\ : \\ : \\ : \\ : } & { { } \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : } & { \\mathrm { ~ \\ : ~ \\ : \\ : \\ : \\ : } \\ : \\ : \\ : } \\end{array}", + "type": "interline_equation", + "image_path": "14481712837aa8b4041c0afbae4d0a62161a885580f62a118ffd01fc68c921ec.jpg" + } + ] + } + ], + "index": 45, + "virtual_lines": [ + { + "bbox": [ + 100, + 587, + 496, + 610 + ], + "spans": [], + "index": 45 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 616, + 505, + 662 + ], + "lines": [ + { + "bbox": [ + 105, + 615, + 505, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 133, + 629 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 616, + 176, + 628 + ], + "score": 0.92, + "content": "B ^ { \\pi } \\bar { Q } ( \\mathbf { s } , \\mathbf { a } )", + "type": "inline_equation" + }, + { + "bbox": [ + 176, + 615, + 433, + 629 + ], + "score": 1.0, + "content": "is the Bellman backup operator with a delayed target Q-function,", + "type": "text" + }, + { + "bbox": [ + 433, + 615, + 442, + 628 + ], + "score": 0.54, + "content": "\\bar { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 443, + 615, + 445, + 629 + ], + "score": 1.0, + "content": ":", + "type": "text" + }, + { + "bbox": [ + 446, + 615, + 505, + 629 + ], + "score": 0.69, + "content": "B ^ { \\pi } \\bar { Q } ( { \\bf s } , { \\bf a } ) : =", + "type": "inline_equation" + } + ], + "index": 46 + }, + { + "bbox": [ + 107, + 627, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 107, + 627, + 241, + 641 + ], + "score": 0.91, + "content": "r ( \\mathbf { s } , \\mathbf { a } ) + \\gamma \\mathbb { E } _ { \\mathbf { a ^ { \\prime } } \\sim \\pi ( \\mathbf { a ^ { \\prime } } | \\mathbf { s ^ { \\prime } } ) } [ \\bar { Q } ( \\mathbf { s ^ { \\prime } } , \\mathbf { a ^ { \\prime } } ) ]", + "type": "inline_equation" + }, + { + "bbox": [ + 242, + 627, + 361, + 641 + ], + "score": 1.0, + "content": ". In practice, CQL computes", + "type": "text" + }, + { + "bbox": [ + 361, + 628, + 389, + 640 + ], + "score": 0.92, + "content": "\\mu ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 389, + 627, + 506, + 641 + ], + "score": 1.0, + "content": "using actions sampled from", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 640, + 505, + 652 + ], + "spans": [ + { + "bbox": [ + 106, + 640, + 148, + 652 + ], + "score": 1.0, + "content": "the policy", + "type": "text" + }, + { + "bbox": [ + 149, + 641, + 181, + 652 + ], + "score": 0.88, + "content": "\\pi _ { \\phi } ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 181, + 640, + 505, + 652 + ], + "score": 1.0, + "content": ". More discussion of CQL is in Appendix B. In this paper, we will utilize CQL as", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 650, + 438, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 650, + 438, + 662 + ], + "score": 1.0, + "content": "a base algorithm that our workflow intends to tune, but we also extend it to BRAC.", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 47.5 + }, + { + "type": "text", + "bbox": [ + 107, + 667, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 667, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 106, + 667, + 505, + 679 + ], + "score": 1.0, + "content": "Overfitting and underfitting in CQL. Conservative offline RL algorithms [2, 20] like CQL can", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 106, + 678, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 106, + 678, + 505, + 690 + ], + "score": 1.0, + "content": "be sensitive to design choices, including number of gradient steps for training [21, 22] and network", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 689, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 505, + 702 + ], + "score": 1.0, + "content": "capacity. These challenges are also present in supervised learning, but supervised learning methods", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 105, + 699, + 505, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 505, + 713 + ], + "score": 1.0, + "content": "benefit from a simple and powerful workflow that involves using training error and validation error", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 106, + 711, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 711, + 505, + 723 + ], + "score": 1.0, + "content": "to characterize overfitting and underfitting. A practitioner can then make tuning choices based on", + "type": "text" + } + ], + "index": 54 + } + ], + "index": 52 + } + ], + "page_idx": 1, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 301, + 740, + 310, + 753 + ], + "spans": [ + { + "bbox": [ + 301, + 740, + 310, + 753 + ], + "score": 1.0, + "content": "2", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 72, + 505, + 161 + ], + "lines": [ + { + "bbox": [ + 106, + 73, + 505, + 85 + ], + "spans": [ + { + "bbox": [ + 106, + 73, + 505, + 85 + ], + "score": 1.0, + "content": "A number of prior works have studied model selection in offline RL by utilizing off-policy eval-", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 84, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 84, + 504, + 95 + ], + "score": 1.0, + "content": "uation (OPE) methods [6] to estimate policy performance. These methods can be based either on", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 93, + 505, + 108 + ], + "spans": [ + { + "bbox": [ + 104, + 93, + 505, + 108 + ], + "score": 1.0, + "content": "model or value learning [7, 8, 9, 10] or importance sampling [6, 11, 12, 13]. However, developing", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 105, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 505, + 118 + ], + "score": 1.0, + "content": "reliable OPE methods is itself an open problem, and modern OPE methods themselves suffer from", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 116, + 506, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 116, + 506, + 129 + ], + "score": 1.0, + "content": "hyperparameter selection challenges (see Fu et al. [14] for an empirical study). Moreover, accurate", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 127, + 505, + 139 + ], + "spans": [ + { + "bbox": [ + 106, + 127, + 505, + 139 + ], + "score": 1.0, + "content": "off-policy evaluation is likely not necessary to simply tune algorithms for best performance – we do", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 138, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 105, + 138, + 506, + 150 + ], + "score": 1.0, + "content": "not need a precise estimate of how good our policy is, but rather a workflow that enables us to best", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 150, + 345, + 161 + ], + "spans": [ + { + "bbox": [ + 106, + 150, + 345, + 161 + ], + "score": 1.0, + "content": "improve it by adjusting various algorithm hyperparameters.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 3.5, + "bbox_fs": [ + 104, + 73, + 506, + 161 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 165, + 505, + 308 + ], + "lines": [ + { + "bbox": [ + 105, + 164, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 164, + 506, + 178 + ], + "score": 1.0, + "content": "In this paper, we devise a practical workflow for selecting regularizers, model architectures, and", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 177, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 177, + 506, + 189 + ], + "score": 1.0, + "content": "policy checkpoints for offline RL methods in robotic learning settings. We focus on a specific class", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 187, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 506, + 200 + ], + "score": 1.0, + "content": "of conservative offline RL algorithms [15, 2] that regularize the Q-function, but also show that our", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 198, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 506, + 212 + ], + "score": 1.0, + "content": "workflow can be effectively applied to policy constraint methods [16]. Our aim is not to focus on", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 209, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 105, + 209, + 506, + 222 + ], + "score": 1.0, + "content": "complete off-policy evaluation or to devise a new approach for off-policy evaluation, but rather to", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 218, + 505, + 234 + ], + "spans": [ + { + "bbox": [ + 105, + 218, + 505, + 234 + ], + "score": 1.0, + "content": "adopt a strategy similar to the one in supervised learning. Analogously to how supervised learning", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 231, + 505, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 505, + 244 + ], + "score": 1.0, + "content": "practitioners can detect overfitting and underfitting by tracking training and validation losses, and", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 241, + 505, + 254 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 505, + 254 + ], + "score": 1.0, + "content": "then adjust hyperparameters based on these metrics, our workflow (see Figure 1 for a schematic) first", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 252, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 506, + 266 + ], + "score": 1.0, + "content": "defines and characterizes overfitting and underfitting, proposes metrics and conditions that users can", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 263, + 505, + 276 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 505, + 276 + ], + "score": 1.0, + "content": "track to determine if an offline RL exhibits overfitting or underfitting, and then utilizes these metrics", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 272, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 272, + 505, + 289 + ], + "score": 1.0, + "content": "to inform design decisions pertaining to neural net architectures, regularization, and early stopping.", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 285, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 506, + 298 + ], + "score": 1.0, + "content": "This protocol is intended to act as a β€œuser’s manual” for a practitioner, with guidelines for how to", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 297, + 442, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 297, + 442, + 308 + ], + "score": 1.0, + "content": "modify algorithm parameters for best results without real-world evaluation rollouts.", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 14, + "bbox_fs": [ + 104, + 164, + 506, + 308 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 312, + 505, + 412 + ], + "lines": [ + { + "bbox": [ + 106, + 312, + 505, + 325 + ], + "spans": [ + { + "bbox": [ + 106, + 312, + 505, + 325 + ], + "score": 1.0, + "content": "The primary contribution of this paper is a simple yet effective workflow for robotic offline RL.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 323, + 505, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 505, + 336 + ], + "score": 1.0, + "content": "We propose metrics and protocols to assist practitioners in selecting policy checkpoints, regulariza-", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 334, + 505, + 348 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 505, + 348 + ], + "score": 1.0, + "content": "tion parameters, and model architectures for conservative offline RL algorithms such as CQL [2]", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 345, + 506, + 358 + ], + "spans": [ + { + "bbox": [ + 106, + 345, + 506, + 358 + ], + "score": 1.0, + "content": "and BRAC [16]. We empirically verify the efficacy of our proposed workflow on simulated robotic", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 356, + 506, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 506, + 369 + ], + "score": 1.0, + "content": "manipulation problems as well as three real-world robotic manipulation problems on two different", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 367, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 505, + 380 + ], + "score": 1.0, + "content": "robots, with diverse objects, pixel observations, and sparse binary reward supervision. Experimen-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 378, + 504, + 390 + ], + "spans": [ + { + "bbox": [ + 106, + 378, + 504, + 390 + ], + "score": 1.0, + "content": "tally, we evaluate our method on two real-world robots (the Sawyer and WidowX robots), and one", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 388, + 505, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 388, + 505, + 402 + ], + "score": 1.0, + "content": "realistic simulated tasks. Our approach is effective in all of these cases, and on two tasks with the", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 399, + 473, + 412 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 447, + 412 + ], + "score": 1.0, + "content": "Sawyer robot that initially fail completely, our workflow improves the success rate to", + "type": "text" + }, + { + "bbox": [ + 447, + 400, + 468, + 411 + ], + "score": 0.87, + "content": "70 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 468, + 399, + 473, + 412 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 25, + "bbox_fs": [ + 105, + 312, + 506, + 412 + ] + }, + { + "type": "title", + "bbox": [ + 106, + 416, + 348, + 429 + ], + "lines": [ + { + "bbox": [ + 104, + 415, + 348, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 348, + 431 + ], + "score": 1.0, + "content": "2 Preliminaries, Background, and Definitions", + "type": "text" + } + ], + "index": 30 + } + ], + "index": 30 + }, + { + "type": "text", + "bbox": [ + 106, + 430, + 505, + 507 + ], + "lines": [ + { + "bbox": [ + 102, + 425, + 509, + 447 + ], + "spans": [ + { + "bbox": [ + 102, + 425, + 381, + 447 + ], + "score": 1.0, + "content": "The goal in RL is to optimize the infinite horizon discounted return", + "type": "text" + }, + { + "bbox": [ + 381, + 429, + 474, + 443 + ], + "score": 0.91, + "content": "\\begin{array} { r } { R = \\sum _ { t = 0 } ^ { \\infty } \\gamma ^ { t } r ( \\mathbf { s } _ { t } , \\mathbf { a } _ { t } ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 474, + 425, + 509, + 447 + ], + "score": 1.0, + "content": ", where", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 440, + 505, + 454 + ], + "spans": [ + { + "bbox": [ + 106, + 441, + 134, + 453 + ], + "score": 0.92, + "content": "r ( s , a )", + "type": "inline_equation" + }, + { + "bbox": [ + 135, + 440, + 379, + 454 + ], + "score": 1.0, + "content": "represents the reward function evaluated at a state-action pair", + "type": "text" + }, + { + "bbox": [ + 380, + 442, + 402, + 452 + ], + "score": 0.7, + "content": "( \\mathbf { s } , \\mathbf { a } )", + "type": "inline_equation" + }, + { + "bbox": [ + 403, + 440, + 505, + 454 + ], + "score": 1.0, + "content": ". We operate in the offline", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 104, + 450, + 505, + 465 + ], + "spans": [ + { + "bbox": [ + 104, + 450, + 305, + 465 + ], + "score": 1.0, + "content": "RL setting and are provided with a fixed dataset", + "type": "text" + }, + { + "bbox": [ + 306, + 452, + 405, + 464 + ], + "score": 0.9, + "content": "\\mathcal { D } = \\{ ( \\mathbf { s } , \\mathbf { a } , r ( \\mathbf { s } , \\mathbf { a } ) , \\mathbf { s } ^ { \\prime } ) \\}", + "type": "inline_equation" + }, + { + "bbox": [ + 406, + 450, + 505, + 465 + ], + "score": 1.0, + "content": ", consisting of transition", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 462, + 505, + 476 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 318, + 476 + ], + "score": 1.0, + "content": "tuples obtained from rollouts under a behavior policy", + "type": "text" + }, + { + "bbox": [ + 318, + 464, + 350, + 475 + ], + "score": 0.88, + "content": "\\pi _ { \\beta } ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 351, + 462, + 505, + 476 + ], + "score": 1.0, + "content": ". Our goal is to obtain the best possible", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 473, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 473, + 308, + 486 + ], + "score": 1.0, + "content": "policy by only training on this fixed offline dataset", + "type": "text" + }, + { + "bbox": [ + 308, + 474, + 317, + 483 + ], + "score": 0.82, + "content": "\\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 317, + 473, + 506, + 486 + ], + "score": 1.0, + "content": ", with no access to online rollouts. We focus on", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 484, + 505, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 313, + 496 + ], + "score": 1.0, + "content": "conservative offline RL algorithms that modify the", + "type": "text" + }, + { + "bbox": [ + 313, + 485, + 322, + 496 + ], + "score": 0.26, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 484, + 505, + 496 + ], + "score": 1.0, + "content": "-function to penalize distributional shift, with", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 496, + 502, + 508 + ], + "spans": [ + { + "bbox": [ + 106, + 496, + 502, + 508 + ], + "score": 1.0, + "content": "most experiments on CQL [2], though we also adapt our workflow to BRAC [16] in Appendix F.1.", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 34, + "bbox_fs": [ + 102, + 425, + 509, + 508 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 511, + 505, + 588 + ], + "lines": [ + { + "bbox": [ + 106, + 511, + 504, + 524 + ], + "spans": [ + { + "bbox": [ + 106, + 511, + 468, + 524 + ], + "score": 1.0, + "content": "Conservative Q-learning (CQL). The actor-critic formulation of CQL trains a Q-function", + "type": "text" + }, + { + "bbox": [ + 468, + 511, + 504, + 523 + ], + "score": 0.9, + "content": "Q _ { \\boldsymbol { \\theta } } ( \\mathbf { s } , \\mathbf { a } )", + "type": "inline_equation" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 522, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 198, + 537 + ], + "score": 1.0, + "content": "with a separate policy", + "type": "text" + }, + { + "bbox": [ + 199, + 523, + 231, + 535 + ], + "score": 0.93, + "content": "\\pi _ { \\phi } ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 231, + 522, + 363, + 537 + ], + "score": 1.0, + "content": ", which maximizes the expected", + "type": "text" + }, + { + "bbox": [ + 363, + 523, + 372, + 534 + ], + "score": 0.33, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 373, + 522, + 399, + 537 + ], + "score": 1.0, + "content": "-value", + "type": "text" + }, + { + "bbox": [ + 399, + 523, + 485, + 536 + ], + "score": 0.92, + "content": "\\begin{array} { r } { \\mathbb { E } _ { \\mathbf { s } \\sim \\mathcal { D } , \\mathbf { a } \\sim \\pi _ { \\phi } } \\left[ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) \\right] } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 486, + 522, + 506, + 537 + ], + "score": 1.0, + "content": "like", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 533, + 505, + 546 + ], + "spans": [ + { + "bbox": [ + 106, + 533, + 505, + 546 + ], + "score": 1.0, + "content": "other standard actor-critic deep RL methods [17, 18, 19]. However, in addition to the standard TD", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 544, + 505, + 557 + ], + "spans": [ + { + "bbox": [ + 105, + 545, + 128, + 557 + ], + "score": 1.0, + "content": "error", + "type": "text" + }, + { + "bbox": [ + 129, + 544, + 159, + 556 + ], + "score": 0.92, + "content": "{ \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 159, + 545, + 331, + 557 + ], + "score": 1.0, + "content": "(in blue below), CQL applies a regularizer", + "type": "text" + }, + { + "bbox": [ + 332, + 545, + 353, + 556 + ], + "score": 0.91, + "content": "{ \\mathcal { R } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 354, + 545, + 505, + 557 + ], + "score": 1.0, + "content": "(in red below) to prevent overestima-", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 555, + 506, + 568 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 136, + 568 + ], + "score": 1.0, + "content": "tion of", + "type": "text" + }, + { + "bbox": [ + 136, + 556, + 145, + 567 + ], + "score": 0.29, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 145, + 555, + 433, + 568 + ], + "score": 1.0, + "content": "-values for out-of-distribution (OOD) actions. This term minimizes the", + "type": "text" + }, + { + "bbox": [ + 434, + 556, + 443, + 567 + ], + "score": 0.39, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 443, + 555, + 506, + 568 + ], + "score": 1.0, + "content": "-values under a", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 565, + 506, + 579 + ], + "spans": [ + { + "bbox": [ + 105, + 565, + 155, + 579 + ], + "score": 1.0, + "content": "distribution", + "type": "text" + }, + { + "bbox": [ + 155, + 566, + 183, + 578 + ], + "score": 0.92, + "content": "\\mu ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 565, + 465, + 579 + ], + "score": 1.0, + "content": ", which is automatically chosen to pick actions a with high Q-values", + "type": "text" + }, + { + "bbox": [ + 465, + 566, + 501, + 578 + ], + "score": 0.92, + "content": "Q _ { \\boldsymbol { \\theta } } ( \\mathbf { s } , \\mathbf { a } )", + "type": "inline_equation" + }, + { + "bbox": [ + 501, + 565, + 506, + 579 + ], + "score": 1.0, + "content": ",", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 577, + 447, + 590 + ], + "spans": [ + { + "bbox": [ + 106, + 577, + 447, + 590 + ], + "score": 1.0, + "content": "and counterbalances this term by maximizing the values of the actions in the dataset:", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 41, + "bbox_fs": [ + 105, + 511, + 506, + 590 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 100, + 587, + 496, + 610 + ], + "lines": [ + { + "bbox": [ + 100, + 587, + 496, + 610 + ], + "spans": [ + { + "bbox": [ + 100, + 587, + 496, + 610 + ], + "score": 0.93, + "content": "\\begin{array} { r l } { \\underset { \\theta } { \\mathrm { m i n } } \\ : \\ : \\ : } & { { } \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : } \\\\ { \\mathrm { m i n } \\ : \\ : \\ : \\ : \\ : } & { { } \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : } & { \\mathrm { ~ \\ : ~ \\ : \\ : \\ : \\ : } \\ : \\ : \\ : } \\end{array}", + "type": "interline_equation", + "image_path": "14481712837aa8b4041c0afbae4d0a62161a885580f62a118ffd01fc68c921ec.jpg" + } + ] + } + ], + "index": 45, + "virtual_lines": [ + { + "bbox": [ + 100, + 587, + 496, + 610 + ], + "spans": [], + "index": 45 + } + ] + }, + { + "type": "text", + "bbox": [ + 106, + 616, + 505, + 662 + ], + "lines": [ + { + "bbox": [ + 105, + 615, + 505, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 133, + 629 + ], + "score": 1.0, + "content": "where", + "type": "text" + }, + { + "bbox": [ + 133, + 616, + 176, + 628 + ], + "score": 0.92, + "content": "B ^ { \\pi } \\bar { Q } ( \\mathbf { s } , \\mathbf { a } )", + "type": "inline_equation" + }, + { + "bbox": [ + 176, + 615, + 433, + 629 + ], + "score": 1.0, + "content": "is the Bellman backup operator with a delayed target Q-function,", + "type": "text" + }, + { + "bbox": [ + 433, + 615, + 442, + 628 + ], + "score": 0.54, + "content": "\\bar { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 443, + 615, + 445, + 629 + ], + "score": 1.0, + "content": ":", + "type": "text" + }, + { + "bbox": [ + 446, + 615, + 505, + 629 + ], + "score": 0.69, + "content": "B ^ { \\pi } \\bar { Q } ( { \\bf s } , { \\bf a } ) : =", + "type": "inline_equation" + } + ], + "index": 46 + }, + { + "bbox": [ + 107, + 627, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 107, + 627, + 241, + 641 + ], + "score": 0.91, + "content": "r ( \\mathbf { s } , \\mathbf { a } ) + \\gamma \\mathbb { E } _ { \\mathbf { a ^ { \\prime } } \\sim \\pi ( \\mathbf { a ^ { \\prime } } | \\mathbf { s ^ { \\prime } } ) } [ \\bar { Q } ( \\mathbf { s ^ { \\prime } } , \\mathbf { a ^ { \\prime } } ) ]", + "type": "inline_equation" + }, + { + "bbox": [ + 242, + 627, + 361, + 641 + ], + "score": 1.0, + "content": ". In practice, CQL computes", + "type": "text" + }, + { + "bbox": [ + 361, + 628, + 389, + 640 + ], + "score": 0.92, + "content": "\\mu ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 389, + 627, + 506, + 641 + ], + "score": 1.0, + "content": "using actions sampled from", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 640, + 505, + 652 + ], + "spans": [ + { + "bbox": [ + 106, + 640, + 148, + 652 + ], + "score": 1.0, + "content": "the policy", + "type": "text" + }, + { + "bbox": [ + 149, + 641, + 181, + 652 + ], + "score": 0.88, + "content": "\\pi _ { \\phi } ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 181, + 640, + 505, + 652 + ], + "score": 1.0, + "content": ". More discussion of CQL is in Appendix B. In this paper, we will utilize CQL as", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 650, + 438, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 650, + 438, + 662 + ], + "score": 1.0, + "content": "a base algorithm that our workflow intends to tune, but we also extend it to BRAC.", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 47.5, + "bbox_fs": [ + 105, + 615, + 506, + 662 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 667, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 667, + 505, + 679 + ], + "spans": [ + { + "bbox": [ + 106, + 667, + 505, + 679 + ], + "score": 1.0, + "content": "Overfitting and underfitting in CQL. Conservative offline RL algorithms [2, 20] like CQL can", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 106, + 678, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 106, + 678, + 505, + 690 + ], + "score": 1.0, + "content": "be sensitive to design choices, including number of gradient steps for training [21, 22] and network", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 689, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 505, + 702 + ], + "score": 1.0, + "content": "capacity. These challenges are also present in supervised learning, but supervised learning methods", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 105, + 699, + 505, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 505, + 713 + ], + "score": 1.0, + "content": "benefit from a simple and powerful workflow that involves using training error and validation error", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 106, + 711, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 711, + 505, + 723 + ], + "score": 1.0, + "content": "to characterize overfitting and underfitting. A practitioner can then make tuning choices based on", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 106, + 73, + 505, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 73, + 505, + 84 + ], + "score": 1.0, + "content": "these characterizations. To derive an analogous workflow for offline RL, we first ask: what do", + "type": "text", + "cross_page": true + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 83, + 455, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 83, + 455, + 96 + ], + "score": 1.0, + "content": "overfitting and underfitting actually mean for the case of conservative offline RL?", + "type": "text", + "cross_page": true + } + ], + "index": 1 + } + ], + "index": 52, + "bbox_fs": [ + 105, + 667, + 505, + 723 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 72, + 504, + 95 + ], + "lines": [ + { + "bbox": [ + 106, + 73, + 505, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 73, + 505, + 84 + ], + "score": 1.0, + "content": "these characterizations. To derive an analogous workflow for offline RL, we first ask: what do", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 83, + 455, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 83, + 455, + 96 + ], + "score": 1.0, + "content": "overfitting and underfitting actually mean for the case of conservative offline RL?", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "text", + "bbox": [ + 107, + 100, + 504, + 122 + ], + "lines": [ + { + "bbox": [ + 106, + 100, + 504, + 112 + ], + "spans": [ + { + "bbox": [ + 106, + 100, + 504, + 112 + ], + "score": 1.0, + "content": "To define overfitting and underfitting generically for any conservative offline RL method, we con-", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 110, + 364, + 123 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 364, + 123 + ], + "score": 1.0, + "content": "sider an abstract optimization formulation for such methods [2]:", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 2.5 + }, + { + "type": "interline_equation", + "bbox": [ + 130, + 123, + 284, + 140 + ], + "lines": [ + { + "bbox": [ + 130, + 123, + 284, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 123, + 284, + 140 + ], + "score": 0.85, + "content": "\\pi ^ { * } : = \\arg \\operatorname* { m a x } _ { \\pi } ~ J _ { \\mathcal { D } } ( \\pi ) - \\alpha D ( \\pi , \\pi _ { \\beta } )", + "type": "interline_equation", + "image_path": "5f87e2d9edb1ebc744585eaf9bbbefae1189640155a200a885d288ec1f092126.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 130, + 123, + 284, + 140 + ], + "spans": [], + "index": 4 + } + ] + }, + { + "type": "text", + "bbox": [ + 309, + 124, + 414, + 136 + ], + "lines": [ + { + "bbox": [ + 309, + 123, + 416, + 137 + ], + "spans": [ + { + "bbox": [ + 309, + 123, + 416, + 137 + ], + "score": 1.0, + "content": "(Conservative offline RL).", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 106, + 150, + 217, + 270 + ], + "lines": [ + { + "bbox": [ + 106, + 149, + 218, + 163 + ], + "spans": [ + { + "bbox": [ + 106, + 150, + 134, + 163 + ], + "score": 0.95, + "content": "J _ { \\mathcal { D } } ( \\pi )", + "type": "inline_equation" + }, + { + "bbox": [ + 134, + 149, + 218, + 163 + ], + "score": 1.0, + "content": "denotes the average", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 161, + 218, + 173 + ], + "spans": [ + { + "bbox": [ + 106, + 161, + 178, + 173 + ], + "score": 1.0, + "content": "return of policy", + "type": "text" + }, + { + "bbox": [ + 178, + 163, + 187, + 172 + ], + "score": 0.73, + "content": "\\pi", + "type": "inline_equation" + }, + { + "bbox": [ + 187, + 161, + 218, + 173 + ], + "score": 1.0, + "content": "in the", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 172, + 218, + 184 + ], + "spans": [ + { + "bbox": [ + 106, + 172, + 218, + 184 + ], + "score": 1.0, + "content": "empirical MDP induced by", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 183, + 218, + 194 + ], + "spans": [ + { + "bbox": [ + 106, + 183, + 218, + 194 + ], + "score": 1.0, + "content": "the transitions in the offline", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 192, + 216, + 207 + ], + "spans": [ + { + "bbox": [ + 105, + 192, + 140, + 207 + ], + "score": 1.0, + "content": "dataset", + "type": "text" + }, + { + "bbox": [ + 140, + 194, + 149, + 204 + ], + "score": 0.74, + "content": "\\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 150, + 192, + 177, + 207 + ], + "score": 1.0, + "content": ", and", + "type": "text" + }, + { + "bbox": [ + 178, + 194, + 216, + 207 + ], + "score": 0.92, + "content": "D ( \\pi , \\pi _ { \\beta } )", + "type": "inline_equation" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 205, + 218, + 216 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 218, + 216 + ], + "score": 1.0, + "content": "denotes a closeness con-", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 216, + 218, + 227 + ], + "spans": [ + { + "bbox": [ + 106, + 216, + 218, + 227 + ], + "score": 1.0, + "content": "straint to the behavior pol-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 226, + 218, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 226, + 218, + 240 + ], + "score": 1.0, + "content": "icy, effectively applied by", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 237, + 218, + 248 + ], + "spans": [ + { + "bbox": [ + 106, + 237, + 218, + 248 + ], + "score": 1.0, + "content": "the offline RL method. Our", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 248, + 218, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 218, + 261 + ], + "score": 1.0, + "content": "definition of conservative", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 260, + 217, + 271 + ], + "spans": [ + { + "bbox": [ + 106, + 260, + 217, + 271 + ], + "score": 1.0, + "content": "offline RL requires that this", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 231, + 153, + 497, + 223 + ], + "blocks": [ + { + "type": "table_body", + "bbox": [ + 231, + 153, + 497, + 223 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 231, + 153, + 497, + 223 + ], + "spans": [ + { + "bbox": [ + 231, + 153, + 497, + 223 + ], + "score": 0.979, + "html": "
QuantitySupervised LearningConservative Offline RL
Test errorLoss L evaluated on test data,DtestPerformance of policy,J(Ο€)
Train errorLoss L evaluated on train data,DtrainObjective in Equations 2,1
OverfittingL(Dtrain) low,L(Dval) high,Dval is a validation set drawn i.i.d.as DtrainTraining objective in Equation l is ex- tremely low,low value of J(Ο€)
Underfittinghigh value of train error L(Dtrain)Training objective in Equation 1 is ex- tremely high,low value of J(Ο€)
", + "type": "table", + "image_path": "1e432827fa691c00d934499d561870f9ab21e128eee2a69b3018b096d8e40450.jpg" + } + ] + } + ], + "index": 18, + "virtual_lines": [ + { + "bbox": [ + 231, + 153, + 497, + 176.33333333333334 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 231, + 176.33333333333334, + 497, + 199.66666666666669 + ], + "spans": [], + "index": 18 + }, + { + "bbox": [ + 231, + 199.66666666666669, + 497, + 223.00000000000003 + ], + "spans": [], + "index": 19 + } + ] + }, + { + "type": "table_caption", + "bbox": [ + 225, + 226, + 504, + 266 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 225, + 225, + 505, + 238 + ], + "spans": [ + { + "bbox": [ + 225, + 225, + 505, + 238 + ], + "score": 1.0, + "content": "Table 1: Summary of train error, test error and our definitions of overfitting", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 225, + 236, + 505, + 246 + ], + "spans": [ + { + "bbox": [ + 225, + 236, + 505, + 246 + ], + "score": 1.0, + "content": "and underfitting in supervised learning and conservative offline RL methods.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 225, + 246, + 505, + 257 + ], + "spans": [ + { + "bbox": [ + 225, + 246, + 505, + 257 + ], + "score": 1.0, + "content": "We will propose metrics to measure these phenomena in a purely offline man-", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 224, + 255, + 473, + 267 + ], + "spans": [ + { + "bbox": [ + 224, + 255, + 473, + 267 + ], + "score": 1.0, + "content": "ner and recommend how to tune the underlying method accordingly.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 21.5 + } + ], + "index": 19.75 + }, + { + "type": "text", + "bbox": [ + 106, + 270, + 505, + 402 + ], + "lines": [ + { + "bbox": [ + 105, + 269, + 504, + 284 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 496, + 284 + ], + "score": 1.0, + "content": "divergence be computed in expectation over the state visitation distribution of the learned policy", + "type": "text" + }, + { + "bbox": [ + 497, + 274, + 504, + 280 + ], + "score": 0.67, + "content": "\\pi", + "type": "inline_equation" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 280, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 505, + 294 + ], + "score": 1.0, + "content": "in the empirical MDP as discussed in Appendix F.1. For example, Equation 1 translates to utilizing", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 291, + 507, + 306 + ], + "spans": [ + { + "bbox": [ + 106, + 292, + 273, + 305 + ], + "score": 0.92, + "content": "\\begin{array} { r } { D _ { \\mathrm { C Q L } } ( p , \\bar { q } ) : = \\sum _ { \\mathbf { x } } p ( \\mathbf { x } ) ( p ( \\mathbf { x } ) / q ( \\mathbf { x } ) - \\bar { 1 } ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 274, + 291, + 507, + 306 + ], + "score": 1.0, + "content": "in Equation 2 (see Theorem 3.5 in Kumar et al. [2] for a", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 303, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 303, + 506, + 316 + ], + "score": 1.0, + "content": "proof). The training loss is discussed in Equations 1 and 2 and the test loss is equal to the negative", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 313, + 505, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 189, + 327 + ], + "score": 1.0, + "content": "of the actual return", + "type": "text" + }, + { + "bbox": [ + 189, + 314, + 210, + 326 + ], + "score": 0.92, + "content": "J ( \\pi )", + "type": "inline_equation" + }, + { + "bbox": [ + 210, + 313, + 505, + 327 + ], + "score": 1.0, + "content": "of the learned policy. Analogously to supervised learning, we can use", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 324, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 505, + 338 + ], + "score": 1.0, + "content": "the notion of train and test error to define overfitting and underfitting in offline RL, as discussed in", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 335, + 505, + 349 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 505, + 349 + ], + "score": 1.0, + "content": "Table 1. However, note that the conditions summarized in Table 1 are not measurable completely", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 345, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 345, + 506, + 360 + ], + "score": 1.0, + "content": "offline. Precisely estimating if a run of an offline RL method overfits or underfits requires evaluating", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 358, + 505, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 505, + 370 + ], + "score": 1.0, + "content": "the learned policy via interaction with the real-world environment. In Section 3, our goal will be", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 369, + 505, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 505, + 381 + ], + "score": 1.0, + "content": "to devise offline metrics for characterizing overfitting that do not have this requirement. We will", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 380, + 506, + 391 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 506, + 391 + ], + "score": 1.0, + "content": "tailor our study specifically towards CQL, though we extend it to BRAC in Appendix F.1. A similar", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 390, + 473, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 473, + 402 + ], + "score": 1.0, + "content": "procedure could be devised for other offline RL methods, but we leave this for future work.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 29.5 + }, + { + "type": "title", + "bbox": [ + 106, + 410, + 462, + 423 + ], + "lines": [ + { + "bbox": [ + 104, + 409, + 465, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 409, + 465, + 425 + ], + "score": 1.0, + "content": "3 Detecting Overfitting and Underfitting in Conservative Offline RL", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 36 + }, + { + "type": "text", + "bbox": [ + 106, + 427, + 505, + 493 + ], + "lines": [ + { + "bbox": [ + 105, + 426, + 505, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 505, + 439 + ], + "score": 1.0, + "content": "In standard supervised learning, we can determine if a method overfits or underfits by comparing the", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 438, + 506, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 438, + 506, + 450 + ], + "score": 1.0, + "content": "training loss to the same loss function evaluated on a held-out validation dataset, which serves as a", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 449, + 506, + 462 + ], + "spans": [ + { + "bbox": [ + 106, + 449, + 366, + 462 + ], + "score": 1.0, + "content": "β€œproxy” test dataset. In contrast, the return of the learned policy", + "type": "text" + }, + { + "bbox": [ + 366, + 449, + 388, + 461 + ], + "score": 0.91, + "content": "J ( \\pi )", + "type": "inline_equation" + }, + { + "bbox": [ + 388, + 449, + 506, + 462 + ], + "score": 1.0, + "content": "in RL does not have a direct", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 460, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 506, + 471 + ], + "score": 1.0, + "content": "proxy that can be computed offline. Thus, our goal is to identify offline metrics and conditions that", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 471, + 504, + 482 + ], + "spans": [ + { + "bbox": [ + 106, + 471, + 504, + 482 + ], + "score": 1.0, + "content": "allow us to measure overfitting and underfitting in conservative offline RL, with a focus on CQL.", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 481, + 471, + 494 + ], + "spans": [ + { + "bbox": [ + 106, + 481, + 471, + 494 + ], + "score": 1.0, + "content": "We also adapt these conditions to BRAC [16], a policy-constraint method in Appendix F.2.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 39.5 + }, + { + "type": "text", + "bbox": [ + 107, + 498, + 396, + 585 + ], + "lines": [ + { + "bbox": [ + 105, + 497, + 397, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 397, + 511 + ], + "score": 1.0, + "content": "Detecting overfitting in CQL. Our definition of overfitting (Table 1)", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 509, + 397, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 509, + 397, + 521 + ], + "score": 1.0, + "content": "corresponds to a low value for the training loss (Equation 1), but poor", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 519, + 397, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 519, + 217, + 532 + ], + "score": 1.0, + "content": "actual policy performance", + "type": "text" + }, + { + "bbox": [ + 217, + 520, + 238, + 532 + ], + "score": 0.91, + "content": "J ( \\pi )", + "type": "inline_equation" + }, + { + "bbox": [ + 239, + 519, + 397, + 532 + ], + "score": 1.0, + "content": ". To detect this, we analyze the time", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 530, + 396, + 543 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 396, + 543 + ], + "score": 1.0, + "content": "series of the estimated Q-values averaged over the dataset samples", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 107, + 541, + 397, + 554 + ], + "spans": [ + { + "bbox": [ + 107, + 541, + 172, + 554 + ], + "score": 0.92, + "content": "( \\mathbf { s } , \\mathbf { a } , r , \\mathbf { s } ^ { \\prime } ) \\in \\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 173, + 541, + 397, + 554 + ], + "score": 1.0, + "content": "over the course of training with a large number of gra-", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 552, + 397, + 564 + ], + "spans": [ + { + "bbox": [ + 106, + 552, + 397, + 564 + ], + "score": 1.0, + "content": "dient steps. A run is labeled as overfitting if we see that the expected", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 563, + 397, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 397, + 576 + ], + "score": 1.0, + "content": "dataset Q-value exhibits a non-monotonic trend: if the average Q-values", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 573, + 397, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 397, + 586 + ], + "score": 1.0, + "content": "first increase and then decrease as shown in the figure on the right. Ad-", + "type": "text" + } + ], + "index": 52 + } + ], + "index": 46.5 + }, + { + "type": "image", + "bbox": [ + 403, + 504, + 504, + 583 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 403, + 504, + 504, + 583 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 403, + 504, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 403, + 504, + 504, + 583 + ], + "score": 0.941, + "type": "image", + "image_path": "a834916e9152742e6ff0bc3376e3cee94d030ab397761d3b7ccebf41e7204020.jpg" + } + ] + } + ], + "index": 50.0, + "virtual_lines": [ + { + "bbox": [ + 403, + 504, + 504, + 543.5 + ], + "spans": [], + "index": 49 + }, + { + "bbox": [ + 403, + 543.5, + 504, + 583.0 + ], + "spans": [], + "index": 51 + } + ] + } + ], + "index": 50.0 + }, + { + "type": "text", + "bbox": [ + 106, + 586, + 505, + 673 + ], + "lines": [ + { + "bbox": [ + 105, + 584, + 505, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 505, + 597 + ], + "score": 1.0, + "content": "ditionally, we would see that training loss in Equation 1 eventually becomes very low. Why do we", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 104, + 595, + 505, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 595, + 266, + 609 + ], + "score": 1.0, + "content": "see such a trend in the average dataset", + "type": "text" + }, + { + "bbox": [ + 266, + 597, + 275, + 608 + ], + "score": 0.29, + "content": "\\mathbf { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 276, + 595, + 505, + 609 + ], + "score": 1.0, + "content": "-value? Since CQL selectively penalizes the average Q-", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 105, + 606, + 505, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 219, + 620 + ], + "score": 1.0, + "content": "value under the distribution", + "type": "text" + }, + { + "bbox": [ + 220, + 607, + 248, + 619 + ], + "score": 0.92, + "content": "\\mu ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 248, + 606, + 505, + 620 + ], + "score": 1.0, + "content": "supported on actions with large Q-values, we would expect the", + "type": "text" + } + ], + "index": 55 + }, + { + "bbox": [ + 106, + 617, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 106, + 617, + 255, + 631 + ], + "score": 1.0, + "content": "Q-values on states from the dataset s", + "type": "text" + }, + { + "bbox": [ + 255, + 618, + 275, + 628 + ], + "score": 0.85, + "content": "\\sim \\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 276, + 617, + 339, + 631 + ], + "score": 1.0, + "content": "and the learned", + "type": "text" + }, + { + "bbox": [ + 339, + 618, + 383, + 630 + ], + "score": 0.92, + "content": "\\mathbf { a } \\sim \\pi ( \\cdot | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 384, + 617, + 505, + 631 + ], + "score": 1.0, + "content": "to be small since the policy is", + "type": "text" + } + ], + "index": 56 + }, + { + "bbox": [ + 106, + 629, + 505, + 641 + ], + "spans": [ + { + "bbox": [ + 106, + 629, + 505, + 641 + ], + "score": 1.0, + "content": "trained to maximize the Q-function as well. This in turn would lead to an eventual reduction in the", + "type": "text" + } + ], + "index": 57 + }, + { + "bbox": [ + 105, + 639, + 505, + 653 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 249, + 653 + ], + "score": 1.0, + "content": "average Q-value on dataset actions,", + "type": "text" + }, + { + "bbox": [ + 249, + 640, + 320, + 652 + ], + "score": 0.93, + "content": "\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]", + "type": "inline_equation" + }, + { + "bbox": [ + 321, + 639, + 505, + 653 + ], + "score": 1.0, + "content": ". This would be visible after sufficiently many", + "type": "text" + } + ], + "index": 58 + }, + { + "bbox": [ + 105, + 650, + 506, + 664 + ], + "spans": [ + { + "bbox": [ + 105, + 650, + 506, + 664 + ], + "score": 1.0, + "content": "steps of training, when values have propagated via Bellman backups in Equation 1 giving rise to the", + "type": "text" + } + ], + "index": 59 + }, + { + "bbox": [ + 105, + 662, + 486, + 674 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 486, + 674 + ], + "score": 1.0, + "content": "non-monotonic trend. If such a trend is observed, this raises two questions, as we discuss next.", + "type": "text" + } + ], + "index": 60 + } + ], + "index": 56.5 + }, + { + "type": "text", + "bbox": [ + 107, + 677, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 105, + 676, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 676, + 212, + 691 + ], + "score": 1.0, + "content": "What does a low average", + "type": "text" + }, + { + "bbox": [ + 212, + 678, + 221, + 690 + ], + "score": 0.74, + "content": "\\varrho", + "type": "inline_equation" + }, + { + "bbox": [ + 221, + 676, + 248, + 691 + ], + "score": 1.0, + "content": "-value", + "type": "text" + }, + { + "bbox": [ + 249, + 678, + 319, + 690 + ], + "score": 0.94, + "content": "\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]", + "type": "inline_equation" + }, + { + "bbox": [ + 320, + 676, + 373, + 691 + ], + "score": 1.0, + "content": "imply about", + "type": "text" + }, + { + "bbox": [ + 374, + 678, + 394, + 690 + ], + "score": 0.86, + "content": "J ( \\pi )", + "type": "inline_equation" + }, + { + "bbox": [ + 395, + 676, + 506, + 691 + ], + "score": 1.0, + "content": "? We show in Appendix A", + "type": "text" + } + ], + "index": 61 + }, + { + "bbox": [ + 106, + 689, + 505, + 700 + ], + "spans": [ + { + "bbox": [ + 106, + 689, + 505, + 700 + ], + "score": 1.0, + "content": "that, in principle, CQL training (Equation 1) should never learn Q-values smaller than the dataset", + "type": "text" + } + ], + "index": 62 + }, + { + "bbox": [ + 105, + 698, + 505, + 714 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 221, + 714 + ], + "score": 1.0, + "content": "Monte-Carlo return, and the", + "type": "text" + }, + { + "bbox": [ + 221, + 700, + 230, + 711 + ], + "score": 0.3, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 231, + 698, + 425, + 714 + ], + "score": 1.0, + "content": "-values should increase unless the learned policy", + "type": "text" + }, + { + "bbox": [ + 426, + 702, + 433, + 710 + ], + "score": 0.74, + "content": "\\pi", + "type": "inline_equation" + }, + { + "bbox": [ + 434, + 698, + 489, + 714 + ], + "score": 1.0, + "content": "is better than", + "type": "text" + }, + { + "bbox": [ + 489, + 702, + 501, + 712 + ], + "score": 0.85, + "content": "\\pi _ { \\beta }", + "type": "inline_equation" + }, + { + "bbox": [ + 501, + 698, + 505, + 714 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 63 + }, + { + "bbox": [ + 106, + 711, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 711, + 505, + 723 + ], + "score": 1.0, + "content": "Intuitively, this is because the objective in Equation 1 aims to also maximize the average dataset", + "type": "text" + } + ], + "index": 64 + } + ], + "index": 62.5 + } + ], + "page_idx": 2, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 301, + 740, + 310, + 752 + ], + "spans": [ + { + "bbox": [ + 301, + 740, + 310, + 752 + ], + "score": 1.0, + "content": "3", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 72, + 504, + 95 + ], + "lines": [], + "index": 0.5, + "bbox_fs": [ + 106, + 73, + 505, + 96 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 107, + 100, + 504, + 122 + ], + "lines": [ + { + "bbox": [ + 106, + 100, + 504, + 112 + ], + "spans": [ + { + "bbox": [ + 106, + 100, + 504, + 112 + ], + "score": 1.0, + "content": "To define overfitting and underfitting generically for any conservative offline RL method, we con-", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 110, + 364, + 123 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 364, + 123 + ], + "score": 1.0, + "content": "sider an abstract optimization formulation for such methods [2]:", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 2.5, + "bbox_fs": [ + 105, + 100, + 504, + 123 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 130, + 123, + 284, + 140 + ], + "lines": [ + { + "bbox": [ + 130, + 123, + 284, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 123, + 284, + 140 + ], + "score": 0.85, + "content": "\\pi ^ { * } : = \\arg \\operatorname* { m a x } _ { \\pi } ~ J _ { \\mathcal { D } } ( \\pi ) - \\alpha D ( \\pi , \\pi _ { \\beta } )", + "type": "interline_equation", + "image_path": "5f87e2d9edb1ebc744585eaf9bbbefae1189640155a200a885d288ec1f092126.jpg" + } + ] + } + ], + "index": 4, + "virtual_lines": [ + { + "bbox": [ + 130, + 123, + 284, + 140 + ], + "spans": [], + "index": 4 + } + ] + }, + { + "type": "text", + "bbox": [ + 309, + 124, + 414, + 136 + ], + "lines": [ + { + "bbox": [ + 309, + 123, + 416, + 137 + ], + "spans": [ + { + "bbox": [ + 309, + 123, + 416, + 137 + ], + "score": 1.0, + "content": "(Conservative offline RL).", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 5, + "bbox_fs": [ + 309, + 123, + 416, + 137 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 150, + 217, + 270 + ], + "lines": [ + { + "bbox": [ + 106, + 149, + 218, + 163 + ], + "spans": [ + { + "bbox": [ + 106, + 150, + 134, + 163 + ], + "score": 0.95, + "content": "J _ { \\mathcal { D } } ( \\pi )", + "type": "inline_equation" + }, + { + "bbox": [ + 134, + 149, + 218, + 163 + ], + "score": 1.0, + "content": "denotes the average", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 161, + 218, + 173 + ], + "spans": [ + { + "bbox": [ + 106, + 161, + 178, + 173 + ], + "score": 1.0, + "content": "return of policy", + "type": "text" + }, + { + "bbox": [ + 178, + 163, + 187, + 172 + ], + "score": 0.73, + "content": "\\pi", + "type": "inline_equation" + }, + { + "bbox": [ + 187, + 161, + 218, + 173 + ], + "score": 1.0, + "content": "in the", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 172, + 218, + 184 + ], + "spans": [ + { + "bbox": [ + 106, + 172, + 218, + 184 + ], + "score": 1.0, + "content": "empirical MDP induced by", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 183, + 218, + 194 + ], + "spans": [ + { + "bbox": [ + 106, + 183, + 218, + 194 + ], + "score": 1.0, + "content": "the transitions in the offline", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 192, + 216, + 207 + ], + "spans": [ + { + "bbox": [ + 105, + 192, + 140, + 207 + ], + "score": 1.0, + "content": "dataset", + "type": "text" + }, + { + "bbox": [ + 140, + 194, + 149, + 204 + ], + "score": 0.74, + "content": "\\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 150, + 192, + 177, + 207 + ], + "score": 1.0, + "content": ", and", + "type": "text" + }, + { + "bbox": [ + 178, + 194, + 216, + 207 + ], + "score": 0.92, + "content": "D ( \\pi , \\pi _ { \\beta } )", + "type": "inline_equation" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 205, + 218, + 216 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 218, + 216 + ], + "score": 1.0, + "content": "denotes a closeness con-", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 216, + 218, + 227 + ], + "spans": [ + { + "bbox": [ + 106, + 216, + 218, + 227 + ], + "score": 1.0, + "content": "straint to the behavior pol-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 226, + 218, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 226, + 218, + 240 + ], + "score": 1.0, + "content": "icy, effectively applied by", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 237, + 218, + 248 + ], + "spans": [ + { + "bbox": [ + 106, + 237, + 218, + 248 + ], + "score": 1.0, + "content": "the offline RL method. Our", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 248, + 218, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 248, + 218, + 261 + ], + "score": 1.0, + "content": "definition of conservative", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 260, + 217, + 271 + ], + "spans": [ + { + "bbox": [ + 106, + 260, + 217, + 271 + ], + "score": 1.0, + "content": "offline RL requires that this", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 11, + "bbox_fs": [ + 105, + 149, + 218, + 271 + ] + }, + { + "type": "table", + "bbox": [ + 231, + 153, + 497, + 223 + ], + "blocks": [ + { + "type": "table_body", + "bbox": [ + 231, + 153, + 497, + 223 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 231, + 153, + 497, + 223 + ], + "spans": [ + { + "bbox": [ + 231, + 153, + 497, + 223 + ], + "score": 0.979, + "html": "
QuantitySupervised LearningConservative Offline RL
Test errorLoss L evaluated on test data,DtestPerformance of policy,J(Ο€)
Train errorLoss L evaluated on train data,DtrainObjective in Equations 2,1
OverfittingL(Dtrain) low,L(Dval) high,Dval is a validation set drawn i.i.d.as DtrainTraining objective in Equation l is ex- tremely low,low value of J(Ο€)
Underfittinghigh value of train error L(Dtrain)Training objective in Equation 1 is ex- tremely high,low value of J(Ο€)
", + "type": "table", + "image_path": "1e432827fa691c00d934499d561870f9ab21e128eee2a69b3018b096d8e40450.jpg" + } + ] + } + ], + "index": 18, + "virtual_lines": [ + { + "bbox": [ + 231, + 153, + 497, + 176.33333333333334 + ], + "spans": [], + "index": 17 + }, + { + "bbox": [ + 231, + 176.33333333333334, + 497, + 199.66666666666669 + ], + "spans": [], + "index": 18 + }, + { + "bbox": [ + 231, + 199.66666666666669, + 497, + 223.00000000000003 + ], + "spans": [], + "index": 19 + } + ] + }, + { + "type": "table_caption", + "bbox": [ + 225, + 226, + 504, + 266 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 225, + 225, + 505, + 238 + ], + "spans": [ + { + "bbox": [ + 225, + 225, + 505, + 238 + ], + "score": 1.0, + "content": "Table 1: Summary of train error, test error and our definitions of overfitting", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 225, + 236, + 505, + 246 + ], + "spans": [ + { + "bbox": [ + 225, + 236, + 505, + 246 + ], + "score": 1.0, + "content": "and underfitting in supervised learning and conservative offline RL methods.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 225, + 246, + 505, + 257 + ], + "spans": [ + { + "bbox": [ + 225, + 246, + 505, + 257 + ], + "score": 1.0, + "content": "We will propose metrics to measure these phenomena in a purely offline man-", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 224, + 255, + 473, + 267 + ], + "spans": [ + { + "bbox": [ + 224, + 255, + 473, + 267 + ], + "score": 1.0, + "content": "ner and recommend how to tune the underlying method accordingly.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 21.5 + } + ], + "index": 19.75 + }, + { + "type": "text", + "bbox": [ + 106, + 270, + 505, + 402 + ], + "lines": [ + { + "bbox": [ + 105, + 269, + 504, + 284 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 496, + 284 + ], + "score": 1.0, + "content": "divergence be computed in expectation over the state visitation distribution of the learned policy", + "type": "text" + }, + { + "bbox": [ + 497, + 274, + 504, + 280 + ], + "score": 0.67, + "content": "\\pi", + "type": "inline_equation" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 280, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 505, + 294 + ], + "score": 1.0, + "content": "in the empirical MDP as discussed in Appendix F.1. For example, Equation 1 translates to utilizing", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 291, + 507, + 306 + ], + "spans": [ + { + "bbox": [ + 106, + 292, + 273, + 305 + ], + "score": 0.92, + "content": "\\begin{array} { r } { D _ { \\mathrm { C Q L } } ( p , \\bar { q } ) : = \\sum _ { \\mathbf { x } } p ( \\mathbf { x } ) ( p ( \\mathbf { x } ) / q ( \\mathbf { x } ) - \\bar { 1 } ) } \\end{array}", + "type": "inline_equation" + }, + { + "bbox": [ + 274, + 291, + 507, + 306 + ], + "score": 1.0, + "content": "in Equation 2 (see Theorem 3.5 in Kumar et al. [2] for a", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 303, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 303, + 506, + 316 + ], + "score": 1.0, + "content": "proof). The training loss is discussed in Equations 1 and 2 and the test loss is equal to the negative", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 313, + 505, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 189, + 327 + ], + "score": 1.0, + "content": "of the actual return", + "type": "text" + }, + { + "bbox": [ + 189, + 314, + 210, + 326 + ], + "score": 0.92, + "content": "J ( \\pi )", + "type": "inline_equation" + }, + { + "bbox": [ + 210, + 313, + 505, + 327 + ], + "score": 1.0, + "content": "of the learned policy. Analogously to supervised learning, we can use", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 324, + 505, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 505, + 338 + ], + "score": 1.0, + "content": "the notion of train and test error to define overfitting and underfitting in offline RL, as discussed in", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 335, + 505, + 349 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 505, + 349 + ], + "score": 1.0, + "content": "Table 1. However, note that the conditions summarized in Table 1 are not measurable completely", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 345, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 345, + 506, + 360 + ], + "score": 1.0, + "content": "offline. Precisely estimating if a run of an offline RL method overfits or underfits requires evaluating", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 358, + 505, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 505, + 370 + ], + "score": 1.0, + "content": "the learned policy via interaction with the real-world environment. In Section 3, our goal will be", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 369, + 505, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 505, + 381 + ], + "score": 1.0, + "content": "to devise offline metrics for characterizing overfitting that do not have this requirement. We will", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 380, + 506, + 391 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 506, + 391 + ], + "score": 1.0, + "content": "tailor our study specifically towards CQL, though we extend it to BRAC in Appendix F.1. A similar", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 390, + 473, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 473, + 402 + ], + "score": 1.0, + "content": "procedure could be devised for other offline RL methods, but we leave this for future work.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 29.5, + "bbox_fs": [ + 104, + 269, + 507, + 402 + ] + }, + { + "type": "title", + "bbox": [ + 106, + 410, + 462, + 423 + ], + "lines": [ + { + "bbox": [ + 104, + 409, + 465, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 409, + 465, + 425 + ], + "score": 1.0, + "content": "3 Detecting Overfitting and Underfitting in Conservative Offline RL", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 36 + }, + { + "type": "text", + "bbox": [ + 106, + 427, + 505, + 493 + ], + "lines": [ + { + "bbox": [ + 105, + 426, + 505, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 505, + 439 + ], + "score": 1.0, + "content": "In standard supervised learning, we can determine if a method overfits or underfits by comparing the", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 438, + 506, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 438, + 506, + 450 + ], + "score": 1.0, + "content": "training loss to the same loss function evaluated on a held-out validation dataset, which serves as a", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 449, + 506, + 462 + ], + "spans": [ + { + "bbox": [ + 106, + 449, + 366, + 462 + ], + "score": 1.0, + "content": "β€œproxy” test dataset. In contrast, the return of the learned policy", + "type": "text" + }, + { + "bbox": [ + 366, + 449, + 388, + 461 + ], + "score": 0.91, + "content": "J ( \\pi )", + "type": "inline_equation" + }, + { + "bbox": [ + 388, + 449, + 506, + 462 + ], + "score": 1.0, + "content": "in RL does not have a direct", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 460, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 506, + 471 + ], + "score": 1.0, + "content": "proxy that can be computed offline. Thus, our goal is to identify offline metrics and conditions that", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 471, + 504, + 482 + ], + "spans": [ + { + "bbox": [ + 106, + 471, + 504, + 482 + ], + "score": 1.0, + "content": "allow us to measure overfitting and underfitting in conservative offline RL, with a focus on CQL.", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 481, + 471, + 494 + ], + "spans": [ + { + "bbox": [ + 106, + 481, + 471, + 494 + ], + "score": 1.0, + "content": "We also adapt these conditions to BRAC [16], a policy-constraint method in Appendix F.2.", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 39.5, + "bbox_fs": [ + 105, + 426, + 506, + 494 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 498, + 396, + 585 + ], + "lines": [ + { + "bbox": [ + 105, + 497, + 397, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 397, + 511 + ], + "score": 1.0, + "content": "Detecting overfitting in CQL. Our definition of overfitting (Table 1)", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 509, + 397, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 509, + 397, + 521 + ], + "score": 1.0, + "content": "corresponds to a low value for the training loss (Equation 1), but poor", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 519, + 397, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 519, + 217, + 532 + ], + "score": 1.0, + "content": "actual policy performance", + "type": "text" + }, + { + "bbox": [ + 217, + 520, + 238, + 532 + ], + "score": 0.91, + "content": "J ( \\pi )", + "type": "inline_equation" + }, + { + "bbox": [ + 239, + 519, + 397, + 532 + ], + "score": 1.0, + "content": ". To detect this, we analyze the time", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 530, + 396, + 543 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 396, + 543 + ], + "score": 1.0, + "content": "series of the estimated Q-values averaged over the dataset samples", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 107, + 541, + 397, + 554 + ], + "spans": [ + { + "bbox": [ + 107, + 541, + 172, + 554 + ], + "score": 0.92, + "content": "( \\mathbf { s } , \\mathbf { a } , r , \\mathbf { s } ^ { \\prime } ) \\in \\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 173, + 541, + 397, + 554 + ], + "score": 1.0, + "content": "over the course of training with a large number of gra-", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 552, + 397, + 564 + ], + "spans": [ + { + "bbox": [ + 106, + 552, + 397, + 564 + ], + "score": 1.0, + "content": "dient steps. A run is labeled as overfitting if we see that the expected", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 563, + 397, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 397, + 576 + ], + "score": 1.0, + "content": "dataset Q-value exhibits a non-monotonic trend: if the average Q-values", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 573, + 397, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 397, + 586 + ], + "score": 1.0, + "content": "first increase and then decrease as shown in the figure on the right. Ad-", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 105, + 584, + 505, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 505, + 597 + ], + "score": 1.0, + "content": "ditionally, we would see that training loss in Equation 1 eventually becomes very low. Why do we", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 104, + 595, + 505, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 595, + 266, + 609 + ], + "score": 1.0, + "content": "see such a trend in the average dataset", + "type": "text" + }, + { + "bbox": [ + 266, + 597, + 275, + 608 + ], + "score": 0.29, + "content": "\\mathbf { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 276, + 595, + 505, + 609 + ], + "score": 1.0, + "content": "-value? Since CQL selectively penalizes the average Q-", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 105, + 606, + 505, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 219, + 620 + ], + "score": 1.0, + "content": "value under the distribution", + "type": "text" + }, + { + "bbox": [ + 220, + 607, + 248, + 619 + ], + "score": 0.92, + "content": "\\mu ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 248, + 606, + 505, + 620 + ], + "score": 1.0, + "content": "supported on actions with large Q-values, we would expect the", + "type": "text" + } + ], + "index": 55 + }, + { + "bbox": [ + 106, + 617, + 505, + 631 + ], + "spans": [ + { + "bbox": [ + 106, + 617, + 255, + 631 + ], + "score": 1.0, + "content": "Q-values on states from the dataset s", + "type": "text" + }, + { + "bbox": [ + 255, + 618, + 275, + 628 + ], + "score": 0.85, + "content": "\\sim \\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 276, + 617, + 339, + 631 + ], + "score": 1.0, + "content": "and the learned", + "type": "text" + }, + { + "bbox": [ + 339, + 618, + 383, + 630 + ], + "score": 0.92, + "content": "\\mathbf { a } \\sim \\pi ( \\cdot | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 384, + 617, + 505, + 631 + ], + "score": 1.0, + "content": "to be small since the policy is", + "type": "text" + } + ], + "index": 56 + }, + { + "bbox": [ + 106, + 629, + 505, + 641 + ], + "spans": [ + { + "bbox": [ + 106, + 629, + 505, + 641 + ], + "score": 1.0, + "content": "trained to maximize the Q-function as well. This in turn would lead to an eventual reduction in the", + "type": "text" + } + ], + "index": 57 + }, + { + "bbox": [ + 105, + 639, + 505, + 653 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 249, + 653 + ], + "score": 1.0, + "content": "average Q-value on dataset actions,", + "type": "text" + }, + { + "bbox": [ + 249, + 640, + 320, + 652 + ], + "score": 0.93, + "content": "\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]", + "type": "inline_equation" + }, + { + "bbox": [ + 321, + 639, + 505, + 653 + ], + "score": 1.0, + "content": ". This would be visible after sufficiently many", + "type": "text" + } + ], + "index": 58 + }, + { + "bbox": [ + 105, + 650, + 506, + 664 + ], + "spans": [ + { + "bbox": [ + 105, + 650, + 506, + 664 + ], + "score": 1.0, + "content": "steps of training, when values have propagated via Bellman backups in Equation 1 giving rise to the", + "type": "text" + } + ], + "index": 59 + }, + { + "bbox": [ + 105, + 662, + 486, + 674 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 486, + 674 + ], + "score": 1.0, + "content": "non-monotonic trend. If such a trend is observed, this raises two questions, as we discuss next.", + "type": "text" + } + ], + "index": 60 + } + ], + "index": 46.5, + "bbox_fs": [ + 105, + 497, + 397, + 586 + ] + }, + { + "type": "image", + "bbox": [ + 403, + 504, + 504, + 583 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 403, + 504, + 504, + 583 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 403, + 504, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 403, + 504, + 504, + 583 + ], + "score": 0.941, + "type": "image", + "image_path": "a834916e9152742e6ff0bc3376e3cee94d030ab397761d3b7ccebf41e7204020.jpg" + } + ] + } + ], + "index": 50.0, + "virtual_lines": [ + { + "bbox": [ + 403, + 504, + 504, + 543.5 + ], + "spans": [], + "index": 49 + }, + { + "bbox": [ + 403, + 543.5, + 504, + 583.0 + ], + "spans": [], + "index": 51 + } + ] + } + ], + "index": 50.0 + }, + { + "type": "text", + "bbox": [ + 106, + 586, + 505, + 673 + ], + "lines": [], + "index": 56.5, + "bbox_fs": [ + 104, + 584, + 506, + 674 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 107, + 677, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 105, + 676, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 676, + 212, + 691 + ], + "score": 1.0, + "content": "What does a low average", + "type": "text" + }, + { + "bbox": [ + 212, + 678, + 221, + 690 + ], + "score": 0.74, + "content": "\\varrho", + "type": "inline_equation" + }, + { + "bbox": [ + 221, + 676, + 248, + 691 + ], + "score": 1.0, + "content": "-value", + "type": "text" + }, + { + "bbox": [ + 249, + 678, + 319, + 690 + ], + "score": 0.94, + "content": "\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]", + "type": "inline_equation" + }, + { + "bbox": [ + 320, + 676, + 373, + 691 + ], + "score": 1.0, + "content": "imply about", + "type": "text" + }, + { + "bbox": [ + 374, + 678, + 394, + 690 + ], + "score": 0.86, + "content": "J ( \\pi )", + "type": "inline_equation" + }, + { + "bbox": [ + 395, + 676, + 506, + 691 + ], + "score": 1.0, + "content": "? We show in Appendix A", + "type": "text" + } + ], + "index": 61 + }, + { + "bbox": [ + 106, + 689, + 505, + 700 + ], + "spans": [ + { + "bbox": [ + 106, + 689, + 505, + 700 + ], + "score": 1.0, + "content": "that, in principle, CQL training (Equation 1) should never learn Q-values smaller than the dataset", + "type": "text" + } + ], + "index": 62 + }, + { + "bbox": [ + 105, + 698, + 505, + 714 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 221, + 714 + ], + "score": 1.0, + "content": "Monte-Carlo return, and the", + "type": "text" + }, + { + "bbox": [ + 221, + 700, + 230, + 711 + ], + "score": 0.3, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 231, + 698, + 425, + 714 + ], + "score": 1.0, + "content": "-values should increase unless the learned policy", + "type": "text" + }, + { + "bbox": [ + 426, + 702, + 433, + 710 + ], + "score": 0.74, + "content": "\\pi", + "type": "inline_equation" + }, + { + "bbox": [ + 434, + 698, + 489, + 714 + ], + "score": 1.0, + "content": "is better than", + "type": "text" + }, + { + "bbox": [ + 489, + 702, + 501, + 712 + ], + "score": 0.85, + "content": "\\pi _ { \\beta }", + "type": "inline_equation" + }, + { + "bbox": [ + 501, + 698, + 505, + 714 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 63 + }, + { + "bbox": [ + 106, + 711, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 711, + 505, + 723 + ], + "score": 1.0, + "content": "Intuitively, this is because the objective in Equation 1 aims to also maximize the average dataset", + "type": "text" + } + ], + "index": 64 + } + ], + "index": 62.5, + "bbox_fs": [ + 105, + 676, + 506, + 723 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 72, + 505, + 172 + ], + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 85 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 85 + ], + "score": 1.0, + "content": "Q-value and thus the Q-values for the behavior policy are not underestimated in expectation. Now,", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 83, + 505, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 505, + 97 + ], + "score": 1.0, + "content": "if the policy optimizer finds a policy that attains a smaller learned Q-value than the dataset return,", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 94, + 505, + 107 + ], + "spans": [ + { + "bbox": [ + 106, + 94, + 505, + 107 + ], + "score": 1.0, + "content": "the policy can always be updated further towards the behavior policy so as to raise the Q-value.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 105, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 106, + 105, + 505, + 118 + ], + "score": 1.0, + "content": "Therefore, Q-values can only decrease when the policy found by CQL is better than the behavior", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 116, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 116, + 424, + 129 + ], + "score": 1.0, + "content": "policy. We formalize this intuition in Appendix A in Theorem A.1. Thus, a low", + "type": "text" + }, + { + "bbox": [ + 424, + 117, + 433, + 128 + ], + "score": 0.34, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 433, + 116, + 471, + 129 + ], + "score": 1.0, + "content": "-value on", + "type": "text" + }, + { + "bbox": [ + 471, + 116, + 505, + 128 + ], + "score": 0.87, + "content": "( \\mathbf { s } , \\mathbf { a } ) \\in", + "type": "inline_equation" + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 126, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 107, + 128, + 116, + 137 + ], + "score": 0.79, + "content": "\\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 116, + 126, + 473, + 140 + ], + "score": 1.0, + "content": "indicates that the Q-function predicts extremely small Q-values on actions sampled from", + "type": "text" + }, + { + "bbox": [ + 474, + 128, + 501, + 139 + ], + "score": 0.9, + "content": "\\mu ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 501, + 126, + 505, + 140 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 138, + 505, + 151 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 369, + 151 + ], + "score": 1.0, + "content": "Typically, this would mean the highest Q-value actions a at a state", + "type": "text" + }, + { + "bbox": [ + 369, + 138, + 395, + 148 + ], + "score": 0.88, + "content": "\\mathbf { s } \\in \\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 396, + 138, + 505, + 151 + ], + "score": 1.0, + "content": "are those sampled from the", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 148, + 506, + 162 + ], + "spans": [ + { + "bbox": [ + 106, + 148, + 506, + 162 + ], + "score": 1.0, + "content": "offline dataset, drawn from the behavior policy. Thus, policy optimization, which aims to maximize", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 159, + 430, + 173 + ], + "spans": [ + { + "bbox": [ + 105, + 159, + 209, + 173 + ], + "score": 1.0, + "content": "the Q-value, would make", + "type": "text" + }, + { + "bbox": [ + 209, + 160, + 237, + 172 + ], + "score": 0.94, + "content": "\\pi ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 237, + 159, + 354, + 173 + ], + "score": 1.0, + "content": "closer to the behavior policy", + "type": "text" + }, + { + "bbox": [ + 354, + 160, + 387, + 172 + ], + "score": 0.92, + "content": "\\pi _ { \\beta } ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 387, + 159, + 400, + 173 + ], + "score": 1.0, + "content": "on", + "type": "text" + }, + { + "bbox": [ + 400, + 160, + 426, + 170 + ], + "score": 0.89, + "content": "\\mathbf { s } \\in \\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 427, + 159, + 430, + 173 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 4 + }, + { + "type": "text", + "bbox": [ + 106, + 176, + 505, + 275 + ], + "lines": [ + { + "bbox": [ + 105, + 175, + 506, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 506, + 190 + ], + "score": 1.0, + "content": "Which training checkpoint is likely to attain the best policy performance? Tracking overfitting", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 187, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 506, + 201 + ], + "score": 1.0, + "content": "in supervised learning is important for selecting the best-performing checkpoint, before overfitting", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 199, + 504, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 504, + 210 + ], + "score": 1.0, + "content": "becomes severe. Analogously, we can compare the average dataset Q-value across different check-", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 209, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 106, + 209, + 506, + 222 + ], + "score": 1.0, + "content": "points within the same run to pick the best policy. Since CQL aims to increase the average dataset", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 220, + 506, + 234 + ], + "spans": [ + { + "bbox": [ + 106, + 220, + 506, + 234 + ], + "score": 1.0, + "content": "Q-value (Equation 1), we would expect Q-values to initially increase, until learning starts to overfit", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 231, + 505, + 244 + ], + "spans": [ + { + "bbox": [ + 106, + 231, + 202, + 244 + ], + "score": 1.0, + "content": "and the average dataset", + "type": "text" + }, + { + "bbox": [ + 203, + 231, + 212, + 242 + ], + "score": 0.29, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 231, + 505, + 244 + ], + "score": 1.0, + "content": "-value starts decreasing. We should therefore select the latest checkpoint", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 242, + 505, + 254 + ], + "spans": [ + { + "bbox": [ + 106, + 242, + 505, + 254 + ], + "score": 1.0, + "content": "that corresponds to a peak in the estimated dataset Q-value. A visual illustration of this idea is shown", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 252, + 506, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 506, + 267 + ], + "score": 1.0, + "content": "in the figure on the previous page, where the checkpoint marked by the green line is recommended", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 264, + 368, + 276 + ], + "spans": [ + { + "bbox": [ + 106, + 264, + 368, + 276 + ], + "score": 1.0, + "content": "to be chosen. In summary, (a) to detect overfitting we can track:", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 13 + }, + { + "type": "text", + "bbox": [ + 116, + 284, + 494, + 307 + ], + "lines": [ + { + "bbox": [ + 115, + 281, + 495, + 298 + ], + "spans": [ + { + "bbox": [ + 115, + 281, + 305, + 298 + ], + "score": 1.0, + "content": "Metric 3.1 (Overfitting). A low average data", + "type": "text" + }, + { + "bbox": [ + 305, + 285, + 313, + 295 + ], + "score": 0.52, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 314, + 281, + 340, + 298 + ], + "score": 1.0, + "content": "-value", + "type": "text" + }, + { + "bbox": [ + 340, + 284, + 411, + 296 + ], + "score": 0.9, + "content": "\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]", + "type": "inline_equation" + }, + { + "bbox": [ + 411, + 281, + 495, + 298 + ], + "score": 1.0, + "content": "that decreases with", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 116, + 293, + 469, + 309 + ], + "spans": [ + { + "bbox": [ + 116, + 293, + 469, + 309 + ], + "score": 1.0, + "content": "more gradient steps on Equation 1 indicates that the offline RL algorithm is overfitting.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 18.5 + }, + { + "type": "text", + "bbox": [ + 106, + 319, + 493, + 331 + ], + "lines": [ + { + "bbox": [ + 105, + 318, + 496, + 334 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 496, + 334 + ], + "score": 1.0, + "content": "and (b) further, given a run that exhibits overfitting, our principle for policy selection is given by:", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20 + }, + { + "type": "text", + "bbox": [ + 112, + 339, + 493, + 363 + ], + "lines": [ + { + "bbox": [ + 117, + 338, + 495, + 353 + ], + "spans": [ + { + "bbox": [ + 117, + 338, + 495, + 353 + ], + "score": 1.0, + "content": "Guideline 3.1 (Policy selection). If a run overfits (per Metric 3.1), select the checkpoint that", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 117, + 349, + 431, + 363 + ], + "spans": [ + { + "bbox": [ + 117, + 349, + 260, + 363 + ], + "score": 1.0, + "content": "attains the highest average dataset", + "type": "text" + }, + { + "bbox": [ + 261, + 352, + 269, + 362 + ], + "score": 0.37, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 269, + 349, + 431, + 363 + ], + "score": 1.0, + "content": "-value before overfitting for deployment.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 21.5 + }, + { + "type": "text", + "bbox": [ + 106, + 375, + 504, + 430 + ], + "lines": [ + { + "bbox": [ + 106, + 375, + 505, + 388 + ], + "spans": [ + { + "bbox": [ + 106, + 375, + 505, + 388 + ], + "score": 1.0, + "content": "Finally, for actor-critic algorithms [18] that update the actor slower than the critic, the next policy", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 386, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 106, + 386, + 505, + 399 + ], + "score": 1.0, + "content": "checkpoint after the peak in the average dataset Q-value appears must be selected. In most of our", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 397, + 505, + 411 + ], + "spans": [ + { + "bbox": [ + 106, + 397, + 505, + 411 + ], + "score": 1.0, + "content": "experiments, we find that simply utilizing the policy checkpoint at the point of the peak in the Q-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 407, + 506, + 421 + ], + "spans": [ + { + "bbox": [ + 106, + 407, + 506, + 421 + ], + "score": 1.0, + "content": "value also leads to good results making this a rare concern, but in some cases, utilizing the next", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 419, + 356, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 356, + 432 + ], + "score": 1.0, + "content": "checkpoint after the Q-value peak performs better empirically.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 25 + }, + { + "type": "text", + "bbox": [ + 106, + 435, + 397, + 512 + ], + "lines": [ + { + "bbox": [ + 105, + 435, + 396, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 396, + 448 + ], + "score": 1.0, + "content": "Detecting underfitting in CQL. Next, we turn to devising a procedure", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 446, + 396, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 446, + 396, + 459 + ], + "score": 1.0, + "content": "to detect underfitting. As summarized in Table 1, underfitting occurs", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 457, + 396, + 469 + ], + "spans": [ + { + "bbox": [ + 106, + 457, + 396, + 469 + ], + "score": 1.0, + "content": "when the RL algorithm is unable to minimize the training objective in", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 468, + 397, + 481 + ], + "spans": [ + { + "bbox": [ + 106, + 468, + 397, + 481 + ], + "score": 1.0, + "content": "Equation 1 effectively. Therefore, large values for the TD error, the CQL", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 478, + 396, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 396, + 492 + ], + "score": 1.0, + "content": "regularizer, or both imply underfitting. A large value for the CQL reg-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 490, + 396, + 502 + ], + "spans": [ + { + "bbox": [ + 106, + 490, + 142, + 502 + ], + "score": 1.0, + "content": "ularizer,", + "type": "text" + }, + { + "bbox": [ + 142, + 490, + 164, + 502 + ], + "score": 0.9, + "content": "{ \\mathcal { R } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 165, + 490, + 396, + 502 + ], + "score": 1.0, + "content": ", indicates an overestimation of Q-values relative to their", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 501, + 396, + 513 + ], + "spans": [ + { + "bbox": [ + 106, + 501, + 396, + 513 + ], + "score": 1.0, + "content": "true value [2] and thus, unlike the overfitting regime, we would not ex-", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 403, + 434, + 501, + 511 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 403, + 434, + 501, + 511 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 403, + 434, + 501, + 511 + ], + "spans": [ + { + "bbox": [ + 403, + 434, + 501, + 511 + ], + "score": 0.958, + "type": "image", + "image_path": "4b1e144e763ab56eff446401eff0f116a6cc810b779d08af3c7a767883b102d8.jpg" + } + ] + } + ], + "index": 33.5, + "virtual_lines": [ + { + "bbox": [ + 403, + 434, + 501, + 472.5 + ], + "spans": [], + "index": 31 + }, + { + "bbox": [ + 403, + 472.5, + 501, + 511.0 + ], + "spans": [], + "index": 36 + } + ] + } + ], + "index": 33.5 + }, + { + "type": "text", + "bbox": [ + 106, + 513, + 501, + 545 + ], + "lines": [ + { + "bbox": [ + 104, + 511, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 209, + 525 + ], + "score": 1.0, + "content": "pect the average learned", + "type": "text" + }, + { + "bbox": [ + 209, + 512, + 218, + 523 + ], + "score": 0.28, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 511, + 504, + 525 + ], + "score": 1.0, + "content": "-value to decrease with more training. Thus, one approach to predict", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 523, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 106, + 523, + 273, + 536 + ], + "score": 1.0, + "content": "underfitting is to track both the TD error,", + "type": "text" + }, + { + "bbox": [ + 273, + 523, + 306, + 534 + ], + "score": 0.92, + "content": "{ \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 306, + 523, + 412, + 536 + ], + "score": 1.0, + "content": ", and the CQL regularizer,", + "type": "text" + }, + { + "bbox": [ + 412, + 523, + 434, + 534 + ], + "score": 0.9, + "content": "{ \\mathcal { R } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 435, + 523, + 504, + 536 + ], + "score": 1.0, + "content": ", and check if the", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 534, + 467, + 546 + ], + "spans": [ + { + "bbox": [ + 106, + 534, + 467, + 546 + ], + "score": 1.0, + "content": "value of even one of these quantities is large. More discussion is provided in Appendix A.", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 38 + }, + { + "type": "text", + "bbox": [ + 106, + 550, + 505, + 671 + ], + "lines": [ + { + "bbox": [ + 105, + 550, + 506, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 221, + 563 + ], + "score": 1.0, + "content": "How do we determine if the", + "type": "text" + }, + { + "bbox": [ + 221, + 550, + 236, + 560 + ], + "score": 0.36, + "content": "\\mathbf { \\nabla } ^ { T D }", + "type": "inline_equation" + }, + { + "bbox": [ + 236, + 550, + 506, + 563 + ], + "score": 1.0, + "content": "error and the CQL regularizer are β€œlarge”? In order to determine", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 561, + 506, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 561, + 506, + 573 + ], + "score": 1.0, + "content": "if the error of a particular run is large, we can rerun the base CQL algorithm but with models of", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 571, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 506, + 586 + ], + "score": 1.0, + "content": "higher capacity, which does not necessarily correspond to the function approximator size, as we", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 582, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 506, + 596 + ], + "score": 1.0, + "content": "will discuss in Section 4. For each model, we record the corresponding training errors and check if", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 592, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 506, + 608 + ], + "score": 1.0, + "content": "the training TD error and CQL regularizer value are reduced with capacity increase. If increasing", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 604, + 505, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 505, + 618 + ], + "score": 1.0, + "content": "capacity leads to a reduction in the loss without exhibiting the overfitting signs described previously,", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 106, + 616, + 505, + 628 + ], + "spans": [ + { + "bbox": [ + 106, + 616, + 505, + 628 + ], + "score": 1.0, + "content": "then we are in an underfitting regime. Another approach to answer the question is to utilize the value", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 626, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 169, + 639 + ], + "score": 1.0, + "content": "of the TD error", + "type": "text" + }, + { + "bbox": [ + 170, + 626, + 205, + 639 + ], + "score": 0.9, + "content": "\\left( \\mathcal { L } _ { \\mathrm { T D } } ( \\theta ) \\right)", + "type": "inline_equation" + }, + { + "bbox": [ + 206, + 626, + 290, + 639 + ], + "score": 1.0, + "content": "and the task horizon", + "type": "text" + }, + { + "bbox": [ + 290, + 627, + 335, + 638 + ], + "score": 0.89, + "content": "( 1 / ( 1 - \\gamma ) )", + "type": "inline_equation" + }, + { + "bbox": [ + 335, + 626, + 506, + 639 + ], + "score": 1.0, + "content": "to estimate the overall error in the learned", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 637, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 321, + 651 + ], + "score": 1.0, + "content": "Q-values against the actual Q-value, which is equal to", + "type": "text" + }, + { + "bbox": [ + 321, + 637, + 384, + 649 + ], + "score": 0.92, + "content": "\\dot { \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta ) / ( 1 - \\gamma )", + "type": "inline_equation" + }, + { + "bbox": [ + 385, + 637, + 506, + 651 + ], + "score": 1.0, + "content": "[23] (see Appendix A). If this", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 649, + 506, + 660 + ], + "spans": [ + { + "bbox": [ + 105, + 649, + 506, + 660 + ], + "score": 1.0, + "content": "overall error spans the range of allowed Q-values on the task – which could be inferred based on the", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 658, + 485, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 485, + 672 + ], + "score": 1.0, + "content": "structure of the reward function in the task – then we can say that the algorithm is underfitting.", + "type": "text" + } + ], + "index": 50 + } + ], + "index": 45 + }, + { + "type": "text", + "bbox": [ + 118, + 679, + 493, + 714 + ], + "lines": [ + { + "bbox": [ + 117, + 678, + 494, + 693 + ], + "spans": [ + { + "bbox": [ + 117, + 678, + 375, + 693 + ], + "score": 1.0, + "content": "Metric 3.2 (Underfitting). Compute the values of the training", + "type": "text" + }, + { + "bbox": [ + 376, + 680, + 390, + 690 + ], + "score": 0.54, + "content": "T D", + "type": "inline_equation" + }, + { + "bbox": [ + 390, + 678, + 417, + 693 + ], + "score": 1.0, + "content": "error,", + "type": "text" + }, + { + "bbox": [ + 417, + 680, + 450, + 692 + ], + "score": 0.88, + "content": "{ \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 450, + 678, + 494, + 693 + ], + "score": 1.0, + "content": "and CQL", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 117, + 689, + 492, + 704 + ], + "spans": [ + { + "bbox": [ + 117, + 689, + 167, + 704 + ], + "score": 1.0, + "content": "regularizer,", + "type": "text" + }, + { + "bbox": [ + 167, + 690, + 189, + 703 + ], + "score": 0.75, + "content": "{ \\mathcal { R } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 189, + 689, + 492, + 704 + ], + "score": 1.0, + "content": "for the current run and another identical run with increased model capacity.", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 117, + 700, + 494, + 716 + ], + "spans": [ + { + "bbox": [ + 117, + 700, + 494, + 716 + ], + "score": 1.0, + "content": "If the training errors reduce with increasing model capacity, the original run was underfitting.", + "type": "text" + } + ], + "index": 53 + } + ], + "index": 52 + } + ], + "page_idx": 3, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 742, + 308, + 750 + ], + "lines": [ + { + "bbox": [ + 301, + 741, + 310, + 752 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 310, + 752 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 11, + "width": 9 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 72, + 505, + 172 + ], + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 85 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 85 + ], + "score": 1.0, + "content": "Q-value and thus the Q-values for the behavior policy are not underestimated in expectation. Now,", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 83, + 505, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 505, + 97 + ], + "score": 1.0, + "content": "if the policy optimizer finds a policy that attains a smaller learned Q-value than the dataset return,", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 94, + 505, + 107 + ], + "spans": [ + { + "bbox": [ + 106, + 94, + 505, + 107 + ], + "score": 1.0, + "content": "the policy can always be updated further towards the behavior policy so as to raise the Q-value.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 105, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 106, + 105, + 505, + 118 + ], + "score": 1.0, + "content": "Therefore, Q-values can only decrease when the policy found by CQL is better than the behavior", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 116, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 116, + 424, + 129 + ], + "score": 1.0, + "content": "policy. We formalize this intuition in Appendix A in Theorem A.1. Thus, a low", + "type": "text" + }, + { + "bbox": [ + 424, + 117, + 433, + 128 + ], + "score": 0.34, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 433, + 116, + 471, + 129 + ], + "score": 1.0, + "content": "-value on", + "type": "text" + }, + { + "bbox": [ + 471, + 116, + 505, + 128 + ], + "score": 0.87, + "content": "( \\mathbf { s } , \\mathbf { a } ) \\in", + "type": "inline_equation" + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 126, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 107, + 128, + 116, + 137 + ], + "score": 0.79, + "content": "\\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 116, + 126, + 473, + 140 + ], + "score": 1.0, + "content": "indicates that the Q-function predicts extremely small Q-values on actions sampled from", + "type": "text" + }, + { + "bbox": [ + 474, + 128, + 501, + 139 + ], + "score": 0.9, + "content": "\\mu ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 501, + 126, + 505, + 140 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 138, + 505, + 151 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 369, + 151 + ], + "score": 1.0, + "content": "Typically, this would mean the highest Q-value actions a at a state", + "type": "text" + }, + { + "bbox": [ + 369, + 138, + 395, + 148 + ], + "score": 0.88, + "content": "\\mathbf { s } \\in \\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 396, + 138, + 505, + 151 + ], + "score": 1.0, + "content": "are those sampled from the", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 148, + 506, + 162 + ], + "spans": [ + { + "bbox": [ + 106, + 148, + 506, + 162 + ], + "score": 1.0, + "content": "offline dataset, drawn from the behavior policy. Thus, policy optimization, which aims to maximize", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 159, + 430, + 173 + ], + "spans": [ + { + "bbox": [ + 105, + 159, + 209, + 173 + ], + "score": 1.0, + "content": "the Q-value, would make", + "type": "text" + }, + { + "bbox": [ + 209, + 160, + 237, + 172 + ], + "score": 0.94, + "content": "\\pi ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 237, + 159, + 354, + 173 + ], + "score": 1.0, + "content": "closer to the behavior policy", + "type": "text" + }, + { + "bbox": [ + 354, + 160, + 387, + 172 + ], + "score": 0.92, + "content": "\\pi _ { \\beta } ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 387, + 159, + 400, + 173 + ], + "score": 1.0, + "content": "on", + "type": "text" + }, + { + "bbox": [ + 400, + 160, + 426, + 170 + ], + "score": 0.89, + "content": "\\mathbf { s } \\in \\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 427, + 159, + 430, + 173 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 4, + "bbox_fs": [ + 105, + 72, + 506, + 173 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 176, + 505, + 275 + ], + "lines": [ + { + "bbox": [ + 105, + 175, + 506, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 506, + 190 + ], + "score": 1.0, + "content": "Which training checkpoint is likely to attain the best policy performance? Tracking overfitting", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 187, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 506, + 201 + ], + "score": 1.0, + "content": "in supervised learning is important for selecting the best-performing checkpoint, before overfitting", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 199, + 504, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 504, + 210 + ], + "score": 1.0, + "content": "becomes severe. Analogously, we can compare the average dataset Q-value across different check-", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 209, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 106, + 209, + 506, + 222 + ], + "score": 1.0, + "content": "points within the same run to pick the best policy. Since CQL aims to increase the average dataset", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 220, + 506, + 234 + ], + "spans": [ + { + "bbox": [ + 106, + 220, + 506, + 234 + ], + "score": 1.0, + "content": "Q-value (Equation 1), we would expect Q-values to initially increase, until learning starts to overfit", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 231, + 505, + 244 + ], + "spans": [ + { + "bbox": [ + 106, + 231, + 202, + 244 + ], + "score": 1.0, + "content": "and the average dataset", + "type": "text" + }, + { + "bbox": [ + 203, + 231, + 212, + 242 + ], + "score": 0.29, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 212, + 231, + 505, + 244 + ], + "score": 1.0, + "content": "-value starts decreasing. We should therefore select the latest checkpoint", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 242, + 505, + 254 + ], + "spans": [ + { + "bbox": [ + 106, + 242, + 505, + 254 + ], + "score": 1.0, + "content": "that corresponds to a peak in the estimated dataset Q-value. A visual illustration of this idea is shown", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 252, + 506, + 267 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 506, + 267 + ], + "score": 1.0, + "content": "in the figure on the previous page, where the checkpoint marked by the green line is recommended", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 264, + 368, + 276 + ], + "spans": [ + { + "bbox": [ + 106, + 264, + 368, + 276 + ], + "score": 1.0, + "content": "to be chosen. In summary, (a) to detect overfitting we can track:", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 13, + "bbox_fs": [ + 105, + 175, + 506, + 276 + ] + }, + { + "type": "text", + "bbox": [ + 116, + 284, + 494, + 307 + ], + "lines": [ + { + "bbox": [ + 115, + 281, + 495, + 298 + ], + "spans": [ + { + "bbox": [ + 115, + 281, + 305, + 298 + ], + "score": 1.0, + "content": "Metric 3.1 (Overfitting). A low average data", + "type": "text" + }, + { + "bbox": [ + 305, + 285, + 313, + 295 + ], + "score": 0.52, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 314, + 281, + 340, + 298 + ], + "score": 1.0, + "content": "-value", + "type": "text" + }, + { + "bbox": [ + 340, + 284, + 411, + 296 + ], + "score": 0.9, + "content": "\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]", + "type": "inline_equation" + }, + { + "bbox": [ + 411, + 281, + 495, + 298 + ], + "score": 1.0, + "content": "that decreases with", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 116, + 293, + 469, + 309 + ], + "spans": [ + { + "bbox": [ + 116, + 293, + 469, + 309 + ], + "score": 1.0, + "content": "more gradient steps on Equation 1 indicates that the offline RL algorithm is overfitting.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 18.5, + "bbox_fs": [ + 115, + 281, + 495, + 309 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 319, + 493, + 331 + ], + "lines": [ + { + "bbox": [ + 105, + 318, + 496, + 334 + ], + "spans": [ + { + "bbox": [ + 105, + 318, + 496, + 334 + ], + "score": 1.0, + "content": "and (b) further, given a run that exhibits overfitting, our principle for policy selection is given by:", + "type": "text" + } + ], + "index": 20 + } + ], + "index": 20, + "bbox_fs": [ + 105, + 318, + 496, + 334 + ] + }, + { + "type": "text", + "bbox": [ + 112, + 339, + 493, + 363 + ], + "lines": [ + { + "bbox": [ + 117, + 338, + 495, + 353 + ], + "spans": [ + { + "bbox": [ + 117, + 338, + 495, + 353 + ], + "score": 1.0, + "content": "Guideline 3.1 (Policy selection). If a run overfits (per Metric 3.1), select the checkpoint that", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 117, + 349, + 431, + 363 + ], + "spans": [ + { + "bbox": [ + 117, + 349, + 260, + 363 + ], + "score": 1.0, + "content": "attains the highest average dataset", + "type": "text" + }, + { + "bbox": [ + 261, + 352, + 269, + 362 + ], + "score": 0.37, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 269, + 349, + 431, + 363 + ], + "score": 1.0, + "content": "-value before overfitting for deployment.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 21.5, + "bbox_fs": [ + 117, + 338, + 495, + 363 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 375, + 504, + 430 + ], + "lines": [ + { + "bbox": [ + 106, + 375, + 505, + 388 + ], + "spans": [ + { + "bbox": [ + 106, + 375, + 505, + 388 + ], + "score": 1.0, + "content": "Finally, for actor-critic algorithms [18] that update the actor slower than the critic, the next policy", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 386, + 505, + 399 + ], + "spans": [ + { + "bbox": [ + 106, + 386, + 505, + 399 + ], + "score": 1.0, + "content": "checkpoint after the peak in the average dataset Q-value appears must be selected. In most of our", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 397, + 505, + 411 + ], + "spans": [ + { + "bbox": [ + 106, + 397, + 505, + 411 + ], + "score": 1.0, + "content": "experiments, we find that simply utilizing the policy checkpoint at the point of the peak in the Q-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 106, + 407, + 506, + 421 + ], + "spans": [ + { + "bbox": [ + 106, + 407, + 506, + 421 + ], + "score": 1.0, + "content": "value also leads to good results making this a rare concern, but in some cases, utilizing the next", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 419, + 356, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 356, + 432 + ], + "score": 1.0, + "content": "checkpoint after the Q-value peak performs better empirically.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 25, + "bbox_fs": [ + 105, + 375, + 506, + 432 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 435, + 397, + 512 + ], + "lines": [ + { + "bbox": [ + 105, + 435, + 396, + 448 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 396, + 448 + ], + "score": 1.0, + "content": "Detecting underfitting in CQL. Next, we turn to devising a procedure", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 446, + 396, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 446, + 396, + 459 + ], + "score": 1.0, + "content": "to detect underfitting. As summarized in Table 1, underfitting occurs", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 457, + 396, + 469 + ], + "spans": [ + { + "bbox": [ + 106, + 457, + 396, + 469 + ], + "score": 1.0, + "content": "when the RL algorithm is unable to minimize the training objective in", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 468, + 397, + 481 + ], + "spans": [ + { + "bbox": [ + 106, + 468, + 397, + 481 + ], + "score": 1.0, + "content": "Equation 1 effectively. Therefore, large values for the TD error, the CQL", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 478, + 396, + 492 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 396, + 492 + ], + "score": 1.0, + "content": "regularizer, or both imply underfitting. A large value for the CQL reg-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 490, + 396, + 502 + ], + "spans": [ + { + "bbox": [ + 106, + 490, + 142, + 502 + ], + "score": 1.0, + "content": "ularizer,", + "type": "text" + }, + { + "bbox": [ + 142, + 490, + 164, + 502 + ], + "score": 0.9, + "content": "{ \\mathcal { R } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 165, + 490, + 396, + 502 + ], + "score": 1.0, + "content": ", indicates an overestimation of Q-values relative to their", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 501, + 396, + 513 + ], + "spans": [ + { + "bbox": [ + 106, + 501, + 396, + 513 + ], + "score": 1.0, + "content": "true value [2] and thus, unlike the overfitting regime, we would not ex-", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 104, + 511, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 209, + 525 + ], + "score": 1.0, + "content": "pect the average learned", + "type": "text" + }, + { + "bbox": [ + 209, + 512, + 218, + 523 + ], + "score": 0.28, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 511, + 504, + 525 + ], + "score": 1.0, + "content": "-value to decrease with more training. Thus, one approach to predict", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 523, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 106, + 523, + 273, + 536 + ], + "score": 1.0, + "content": "underfitting is to track both the TD error,", + "type": "text" + }, + { + "bbox": [ + 273, + 523, + 306, + 534 + ], + "score": 0.92, + "content": "{ \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 306, + 523, + 412, + 536 + ], + "score": 1.0, + "content": ", and the CQL regularizer,", + "type": "text" + }, + { + "bbox": [ + 412, + 523, + 434, + 534 + ], + "score": 0.9, + "content": "{ \\mathcal { R } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 435, + 523, + 504, + 536 + ], + "score": 1.0, + "content": ", and check if the", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 534, + 467, + 546 + ], + "spans": [ + { + "bbox": [ + 106, + 534, + 467, + 546 + ], + "score": 1.0, + "content": "value of even one of these quantities is large. More discussion is provided in Appendix A.", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 32, + "bbox_fs": [ + 105, + 435, + 397, + 513 + ] + }, + { + "type": "image", + "bbox": [ + 403, + 434, + 501, + 511 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 403, + 434, + 501, + 511 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 403, + 434, + 501, + 511 + ], + "spans": [ + { + "bbox": [ + 403, + 434, + 501, + 511 + ], + "score": 0.958, + "type": "image", + "image_path": "4b1e144e763ab56eff446401eff0f116a6cc810b779d08af3c7a767883b102d8.jpg" + } + ] + } + ], + "index": 33.5, + "virtual_lines": [ + { + "bbox": [ + 403, + 434, + 501, + 472.5 + ], + "spans": [], + "index": 31 + }, + { + "bbox": [ + 403, + 472.5, + 501, + 511.0 + ], + "spans": [], + "index": 36 + } + ] + } + ], + "index": 33.5 + }, + { + "type": "text", + "bbox": [ + 106, + 513, + 501, + 545 + ], + "lines": [], + "index": 38, + "bbox_fs": [ + 104, + 511, + 504, + 546 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 106, + 550, + 505, + 671 + ], + "lines": [ + { + "bbox": [ + 105, + 550, + 506, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 221, + 563 + ], + "score": 1.0, + "content": "How do we determine if the", + "type": "text" + }, + { + "bbox": [ + 221, + 550, + 236, + 560 + ], + "score": 0.36, + "content": "\\mathbf { \\nabla } ^ { T D }", + "type": "inline_equation" + }, + { + "bbox": [ + 236, + 550, + 506, + 563 + ], + "score": 1.0, + "content": "error and the CQL regularizer are β€œlarge”? In order to determine", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 561, + 506, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 561, + 506, + 573 + ], + "score": 1.0, + "content": "if the error of a particular run is large, we can rerun the base CQL algorithm but with models of", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 571, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 506, + 586 + ], + "score": 1.0, + "content": "higher capacity, which does not necessarily correspond to the function approximator size, as we", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 582, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 506, + 596 + ], + "score": 1.0, + "content": "will discuss in Section 4. For each model, we record the corresponding training errors and check if", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 592, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 506, + 608 + ], + "score": 1.0, + "content": "the training TD error and CQL regularizer value are reduced with capacity increase. If increasing", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 604, + 505, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 505, + 618 + ], + "score": 1.0, + "content": "capacity leads to a reduction in the loss without exhibiting the overfitting signs described previously,", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 106, + 616, + 505, + 628 + ], + "spans": [ + { + "bbox": [ + 106, + 616, + 505, + 628 + ], + "score": 1.0, + "content": "then we are in an underfitting regime. Another approach to answer the question is to utilize the value", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 626, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 169, + 639 + ], + "score": 1.0, + "content": "of the TD error", + "type": "text" + }, + { + "bbox": [ + 170, + 626, + 205, + 639 + ], + "score": 0.9, + "content": "\\left( \\mathcal { L } _ { \\mathrm { T D } } ( \\theta ) \\right)", + "type": "inline_equation" + }, + { + "bbox": [ + 206, + 626, + 290, + 639 + ], + "score": 1.0, + "content": "and the task horizon", + "type": "text" + }, + { + "bbox": [ + 290, + 627, + 335, + 638 + ], + "score": 0.89, + "content": "( 1 / ( 1 - \\gamma ) )", + "type": "inline_equation" + }, + { + "bbox": [ + 335, + 626, + 506, + 639 + ], + "score": 1.0, + "content": "to estimate the overall error in the learned", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 637, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 321, + 651 + ], + "score": 1.0, + "content": "Q-values against the actual Q-value, which is equal to", + "type": "text" + }, + { + "bbox": [ + 321, + 637, + 384, + 649 + ], + "score": 0.92, + "content": "\\dot { \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta ) / ( 1 - \\gamma )", + "type": "inline_equation" + }, + { + "bbox": [ + 385, + 637, + 506, + 651 + ], + "score": 1.0, + "content": "[23] (see Appendix A). If this", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 649, + 506, + 660 + ], + "spans": [ + { + "bbox": [ + 105, + 649, + 506, + 660 + ], + "score": 1.0, + "content": "overall error spans the range of allowed Q-values on the task – which could be inferred based on the", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 658, + 485, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 485, + 672 + ], + "score": 1.0, + "content": "structure of the reward function in the task – then we can say that the algorithm is underfitting.", + "type": "text" + } + ], + "index": 50 + } + ], + "index": 45, + "bbox_fs": [ + 105, + 550, + 506, + 672 + ] + }, + { + "type": "text", + "bbox": [ + 118, + 679, + 493, + 714 + ], + "lines": [ + { + "bbox": [ + 117, + 678, + 494, + 693 + ], + "spans": [ + { + "bbox": [ + 117, + 678, + 375, + 693 + ], + "score": 1.0, + "content": "Metric 3.2 (Underfitting). Compute the values of the training", + "type": "text" + }, + { + "bbox": [ + 376, + 680, + 390, + 690 + ], + "score": 0.54, + "content": "T D", + "type": "inline_equation" + }, + { + "bbox": [ + 390, + 678, + 417, + 693 + ], + "score": 1.0, + "content": "error,", + "type": "text" + }, + { + "bbox": [ + 417, + 680, + 450, + 692 + ], + "score": 0.88, + "content": "{ \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 450, + 678, + 494, + 693 + ], + "score": 1.0, + "content": "and CQL", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 117, + 689, + 492, + 704 + ], + "spans": [ + { + "bbox": [ + 117, + 689, + 167, + 704 + ], + "score": 1.0, + "content": "regularizer,", + "type": "text" + }, + { + "bbox": [ + 167, + 690, + 189, + 703 + ], + "score": 0.75, + "content": "{ \\mathcal { R } } ( \\theta )", + "type": "inline_equation" + }, + { + "bbox": [ + 189, + 689, + 492, + 704 + ], + "score": 1.0, + "content": "for the current run and another identical run with increased model capacity.", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 117, + 700, + 494, + 716 + ], + "spans": [ + { + "bbox": [ + 117, + 700, + 494, + 716 + ], + "score": 1.0, + "content": "If the training errors reduce with increasing model capacity, the original run was underfitting.", + "type": "text" + } + ], + "index": 53 + } + ], + "index": 52, + "bbox_fs": [ + 117, + 678, + 494, + 716 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 106, + 71, + 470, + 85 + ], + "lines": [ + { + "bbox": [ + 104, + 70, + 473, + 86 + ], + "spans": [ + { + "bbox": [ + 104, + 70, + 473, + 86 + ], + "score": 1.0, + "content": "4 Addressing Overfitting and Underfitting in Conservative Offline RL", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 86, + 505, + 141 + ], + "lines": [ + { + "bbox": [ + 106, + 85, + 505, + 98 + ], + "spans": [ + { + "bbox": [ + 106, + 85, + 505, + 98 + ], + "score": 1.0, + "content": "The typical workflow for supervised learning not only identifies overfitting and underfitting, but", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 95, + 505, + 110 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 505, + 110 + ], + "score": 1.0, + "content": "also guides the practitioner how to adjust their method so as to alleviate it (e.g., by modifying", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 107, + 506, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 107, + 506, + 120 + ], + "score": 1.0, + "content": "regularization or model capacity), thus improving performance. Can we devise similar guidelines", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 118, + 506, + 131 + ], + "spans": [ + { + "bbox": [ + 105, + 118, + 506, + 131 + ], + "score": 1.0, + "content": "to address overfitting and underfitting with conservative offline RL? Here, we discuss some ways to", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 129, + 391, + 142 + ], + "spans": [ + { + "bbox": [ + 106, + 129, + 391, + 142 + ], + "score": 1.0, + "content": "adjust regularization and model capacity to alleviate these phenomena.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 3 + }, + { + "type": "text", + "bbox": [ + 106, + 145, + 505, + 268 + ], + "lines": [ + { + "bbox": [ + 106, + 146, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 106, + 146, + 505, + 157 + ], + "score": 1.0, + "content": "Capacity-decreasing regularization for overfitting. As we observed in Section 3, the mechanism", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 156, + 505, + 169 + ], + "spans": [ + { + "bbox": [ + 106, + 156, + 196, + 169 + ], + "score": 1.0, + "content": "behind extremely low", + "type": "text" + }, + { + "bbox": [ + 197, + 157, + 205, + 167 + ], + "score": 0.3, + "content": "\\mathbf { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 206, + 156, + 422, + 169 + ], + "score": 1.0, + "content": "-values on the dataset is that CQL training minimizes", + "type": "text" + }, + { + "bbox": [ + 423, + 157, + 432, + 168 + ], + "score": 0.28, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 432, + 156, + 505, + 169 + ], + "score": 1.0, + "content": "-values on actions", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 167, + 506, + 181 + ], + "spans": [ + { + "bbox": [ + 105, + 167, + 164, + 181 + ], + "score": 1.0, + "content": "sampled from", + "type": "text" + }, + { + "bbox": [ + 164, + 167, + 191, + 179 + ], + "score": 0.93, + "content": "\\mu ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 192, + 167, + 506, + 181 + ], + "score": 1.0, + "content": ". Two possible approaches to preventing over-minimization of these values are", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 177, + 505, + 192 + ], + "spans": [ + { + "bbox": [ + 105, + 177, + 505, + 192 + ], + "score": 1.0, + "content": "(1) applying regularization such as dropout [24] on Q-function layers, similar to supervised learning,", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 189, + 505, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 189, + 505, + 201 + ], + "score": 1.0, + "content": "and (2) enforcing that representations of the learned Q-function match a pre-specified target for all", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 200, + 506, + 214 + ], + "spans": [ + { + "bbox": [ + 105, + 200, + 506, + 214 + ], + "score": 1.0, + "content": "state-action tuples. For (2), we can apply techniques such as a variational information bottleneck", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 211, + 505, + 223 + ], + "spans": [ + { + "bbox": [ + 106, + 211, + 339, + 223 + ], + "score": 1.0, + "content": "(VIB) [25, 26] regularizer on the learned representations,", + "type": "text" + }, + { + "bbox": [ + 340, + 211, + 359, + 223 + ], + "score": 0.9, + "content": "\\phi ( \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 359, + 211, + 419, + 223 + ], + "score": 1.0, + "content": ". Formally, let", + "type": "text" + }, + { + "bbox": [ + 420, + 211, + 442, + 223 + ], + "score": 0.89, + "content": "( \\mathbf { s } , \\mathbf { a } )", + "type": "inline_equation" + }, + { + "bbox": [ + 443, + 211, + 505, + 223 + ], + "score": 1.0, + "content": "denote a state-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 222, + 505, + 235 + ], + "spans": [ + { + "bbox": [ + 106, + 223, + 302, + 235 + ], + "score": 1.0, + "content": "action pair. Instead of predicting a deterministic", + "type": "text" + }, + { + "bbox": [ + 302, + 222, + 347, + 235 + ], + "score": 0.92, + "content": "\\phi ( \\mathbf { s } ) \\in \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 347, + 223, + 505, + 235 + ], + "score": 1.0, + "content": "(Figure 10), we modify the Q-network", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 233, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 104, + 233, + 235, + 248 + ], + "score": 1.0, + "content": "to predict two distinct vectors,", + "type": "text" + }, + { + "bbox": [ + 236, + 234, + 291, + 247 + ], + "score": 0.92, + "content": "\\phi _ { m } ( \\mathbf { s } ) \\in \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 291, + 233, + 311, + 248 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 311, + 234, + 365, + 246 + ], + "score": 0.93, + "content": "\\phi _ { \\Sigma } ( \\mathbf { s } ) \\in \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 365, + 233, + 419, + 248 + ], + "score": 1.0, + "content": ", and sample", + "type": "text" + }, + { + "bbox": [ + 420, + 235, + 439, + 246 + ], + "score": 0.88, + "content": "\\phi ( \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 439, + 233, + 506, + 248 + ], + "score": 1.0, + "content": "randomly from", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 245, + 506, + 259 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 203, + 259 + ], + "score": 1.0, + "content": "a Gaussian centered at", + "type": "text" + }, + { + "bbox": [ + 203, + 246, + 218, + 257 + ], + "score": 0.88, + "content": "\\phi _ { m }", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 245, + 288, + 259 + ], + "score": 1.0, + "content": "with covariance", + "type": "text" + }, + { + "bbox": [ + 289, + 246, + 302, + 258 + ], + "score": 0.89, + "content": "\\phi _ { \\Sigma }", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 245, + 326, + 259 + ], + "score": 1.0, + "content": ", i.e.,", + "type": "text" + }, + { + "bbox": [ + 326, + 246, + 456, + 258 + ], + "score": 0.87, + "content": "\\phi ( \\mathbf { s } ) \\sim \\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } ) , \\mathrm { d i a g } ( \\phi _ { \\Sigma } ( \\mathbf { s } ) )", + "type": "inline_equation" + }, + { + "bbox": [ + 456, + 245, + 506, + 259 + ], + "score": 1.0, + "content": ". VIB then", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 256, + 420, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 256, + 152, + 270 + ], + "score": 1.0, + "content": "regularizes", + "type": "text" + }, + { + "bbox": [ + 153, + 257, + 246, + 269 + ], + "score": 0.62, + "content": "\\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } ) , \\mathrm { d i a g } ( \\phi _ { \\Sigma } ( \\mathbf { s } ) )", + "type": "inline_equation" + }, + { + "bbox": [ + 247, + 256, + 383, + 270 + ], + "score": 1.0, + "content": "to be close to a prior distribution,", + "type": "text" + }, + { + "bbox": [ + 383, + 257, + 415, + 269 + ], + "score": 0.91, + "content": "\\mathcal { N } ( 0 , \\mathbb { I } )", + "type": "inline_equation" + }, + { + "bbox": [ + 415, + 256, + 420, + 270 + ], + "score": 1.0, + "content": ":", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 11 + }, + { + "type": "interline_equation", + "bbox": [ + 128, + 270, + 492, + 287 + ], + "lines": [ + { + "bbox": [ + 128, + 270, + 492, + 287 + ], + "spans": [ + { + "bbox": [ + 128, + 270, + 492, + 287 + ], + "score": 0.77, + "content": "\\operatorname* { m i n } _ { \\theta } \\ \\mathcal { L } _ { \\mathrm { C Q L } } ( \\theta ) + \\beta \\mathbb { E } _ { \\mathrm { s } \\sim \\mathcal { D } } \\left[ \\mathrm { D } _ { \\mathrm { K L } } \\left( \\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } ) , \\mathrm { d i a g } ( \\phi _ { \\Sigma } ( \\mathbf { s } ) ) ) \\ | | \\mathcal { N } ( 0 , \\mathbb { I } ) \\right) \\right] \\quad ( \\mathrm { V I B ~ r e g u l a r i z e r } ) ,", + "type": "interline_equation", + "image_path": "9db0d18bcf179c6a8325f93c9a8d0886149e29c38e5c7dbf3d677141313eeab5.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 128, + 270, + 492, + 287 + ], + "spans": [], + "index": 17 + } + ] + }, + { + "type": "text", + "bbox": [ + 115, + 296, + 493, + 319 + ], + "lines": [ + { + "bbox": [ + 116, + 294, + 494, + 311 + ], + "spans": [ + { + "bbox": [ + 116, + 294, + 494, + 311 + ], + "score": 1.0, + "content": "Guideline 4.1. To address overfitting, we recommend using some form of capacity-decreasing", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 116, + 305, + 494, + 320 + ], + "spans": [ + { + "bbox": [ + 116, + 305, + 204, + 320 + ], + "score": 1.0, + "content": "regularization on the", + "type": "text" + }, + { + "bbox": [ + 204, + 308, + 213, + 319 + ], + "score": 0.26, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 213, + 305, + 494, + 320 + ], + "score": 1.0, + "content": "-function, such as dropout or the VIB regularizer shown in Equation 3.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 18.5 + }, + { + "type": "text", + "bbox": [ + 106, + 332, + 505, + 475 + ], + "lines": [ + { + "bbox": [ + 106, + 331, + 506, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 331, + 506, + 345 + ], + "score": 1.0, + "content": "Capacity-increasing techniques for underfitting. To address underfitting, we need to increase", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 342, + 505, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 505, + 356 + ], + "score": 1.0, + "content": "model capacity to improve optimization of the training objective. Analogous to supervised learning,", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 354, + 505, + 367 + ], + "spans": [ + { + "bbox": [ + 106, + 354, + 505, + 367 + ], + "score": 1.0, + "content": "model capacity can be increased by using more expressive neural nets (e.g., ResNets [27], trans-", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 365, + 505, + 378 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 505, + 378 + ], + "score": 1.0, + "content": "formers [28]) for representing the learned policy. We use ResNets in our experiments (Figure 10).", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 374, + 506, + 389 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 506, + 389 + ], + "score": 1.0, + "content": "However, the RL setting presents an additional challenge with capacity: while larger models in", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 387, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 387, + 506, + 399 + ], + "score": 1.0, + "content": "principle have more capacity, recent work [29, 21, 22] has shown that utilizing larger networks to", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 398, + 505, + 410 + ], + "spans": [ + { + "bbox": [ + 104, + 398, + 505, + 410 + ], + "score": 1.0, + "content": "represent Q-functions does not always improve its capacity in practice, because TD-based RL meth-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 408, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 105, + 408, + 505, + 420 + ], + "score": 1.0, + "content": "ods introduce an β€œimplicit under-parameterization” effect that can result in aliased (i.e., similar)", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 420, + 505, + 432 + ], + "spans": [ + { + "bbox": [ + 106, + 420, + 505, + 432 + ], + "score": 1.0, + "content": "internal representations for different state-action inputs, even for very large neural networks that", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 430, + 505, + 443 + ], + "spans": [ + { + "bbox": [ + 106, + 430, + 505, + 443 + ], + "score": 1.0, + "content": "can express the true Q-function effectively. To address this issue, these works apply a β€œcapacity-", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 441, + 505, + 454 + ], + "spans": [ + { + "bbox": [ + 106, + 441, + 505, + 454 + ], + "score": 1.0, + "content": "increasing” regularizer to Q-function training. For instance, we can use the DR3 regularizer [22],", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 452, + 505, + 465 + ], + "spans": [ + { + "bbox": [ + 106, + 452, + 247, + 465 + ], + "score": 1.0, + "content": "which penalizes the dot product of", + "type": "text" + }, + { + "bbox": [ + 247, + 452, + 266, + 464 + ], + "score": 0.91, + "content": "\\phi ( \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 267, + 452, + 285, + 465 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 285, + 452, + 307, + 464 + ], + "score": 0.92, + "content": "\\phi ( \\mathbf { s } ^ { \\prime } )", + "type": "inline_equation" + }, + { + "bbox": [ + 307, + 452, + 370, + 465 + ], + "score": 1.0, + "content": "for a transition", + "type": "text" + }, + { + "bbox": [ + 370, + 452, + 425, + 464 + ], + "score": 0.93, + "content": "( \\mathbf { s } , \\mathbf { a } , \\mathbf { s } ^ { \\prime } ) \\in \\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 426, + 452, + 505, + 465 + ], + "score": 1.0, + "content": ", and hence reduces", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 462, + 250, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 250, + 477 + ], + "score": 1.0, + "content": "aliasing. This objective is given by:", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 26 + }, + { + "type": "interline_equation", + "bbox": [ + 129, + 480, + 428, + 500 + ], + "lines": [ + { + "bbox": [ + 129, + 480, + 428, + 500 + ], + "spans": [ + { + "bbox": [ + 129, + 480, + 428, + 500 + ], + "score": 0.8, + "content": "\\operatorname* { m i n } _ { \\theta } \\ \\mathcal { L } _ { \\mathrm { C Q L } } ( \\theta ) + \\beta \\mathbb { E } _ { { \\mathbf s } , { \\mathbf a } , { \\mathbf s } ^ { \\prime } \\sim \\mathcal { D } } \\left[ \\left| \\phi ( { \\mathbf s } ) ^ { \\top } \\phi ( { \\mathbf s } ^ { \\prime } ) \\right| \\right] \\qquad ( { \\mathrm { D R 3 ~ r e g u l a r i z e r ~ } } [ 2 2 ] ) ,", + "type": "interline_equation", + "image_path": "55e6e675c67ec1e3e4745a24fa9f244367408982eb82461b79c4a5d6cace2f4e.jpg" + } + ] + } + ], + "index": 33, + "virtual_lines": [ + { + "bbox": [ + 129, + 480, + 428, + 500 + ], + "spans": [], + "index": 33 + } + ] + }, + { + "type": "text", + "bbox": [ + 117, + 507, + 494, + 541 + ], + "lines": [ + { + "bbox": [ + 116, + 505, + 494, + 521 + ], + "spans": [ + { + "bbox": [ + 116, + 505, + 494, + 521 + ], + "score": 1.0, + "content": "Guideline 4.2. To address underfitting, we recommend using some capacity-increasing regu-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 117, + 517, + 493, + 532 + ], + "spans": [ + { + "bbox": [ + 117, + 517, + 493, + 532 + ], + "score": 1.0, + "content": "larization on the Q-function and the policy either in conjunction or separately. Examples: (1)", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 116, + 528, + 433, + 543 + ], + "spans": [ + { + "bbox": [ + 116, + 528, + 433, + 543 + ], + "score": 1.0, + "content": "bigger policy networks (e.g., ResNets), (2) DR3 regularizer on the Q-network.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 35 + }, + { + "type": "title", + "bbox": [ + 106, + 551, + 459, + 565 + ], + "lines": [ + { + "bbox": [ + 104, + 551, + 460, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 551, + 460, + 567 + ], + "score": 1.0, + "content": "5 Evaluation of Our Workflow Metrics and Protocols in Simulation", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 37 + }, + { + "type": "text", + "bbox": [ + 107, + 567, + 336, + 660 + ], + "lines": [ + { + "bbox": [ + 106, + 567, + 336, + 579 + ], + "spans": [ + { + "bbox": [ + 106, + 567, + 336, + 579 + ], + "score": 1.0, + "content": "Next, we empirically validate the workflow proposed in", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 577, + 335, + 590 + ], + "spans": [ + { + "bbox": [ + 106, + 577, + 335, + 590 + ], + "score": 1.0, + "content": "Sections 3 and 4 on a suite of simulated robotic manipu-", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 588, + 336, + 600 + ], + "spans": [ + { + "bbox": [ + 106, + 588, + 336, + 600 + ], + "score": 1.0, + "content": "lation domains that mimic real-robot scenarios, from im-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 600, + 336, + 610 + ], + "spans": [ + { + "bbox": [ + 106, + 600, + 336, + 610 + ], + "score": 1.0, + "content": "age observations with sparse binary rewards. We will ex-", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 610, + 337, + 622 + ], + "spans": [ + { + "bbox": [ + 106, + 610, + 337, + 622 + ], + "score": 1.0, + "content": "amine how applying the workflow in Section 3 to detect", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 106, + 622, + 336, + 633 + ], + "spans": [ + { + "bbox": [ + 106, + 622, + 336, + 633 + ], + "score": 1.0, + "content": "overfitting or underfitting and then utilizing the strategies", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 632, + 336, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 336, + 644 + ], + "score": 1.0, + "content": "in Section 4 affects the performance of offline RL meth-", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 106, + 643, + 336, + 654 + ], + "spans": [ + { + "bbox": [ + 106, + 643, + 336, + 654 + ], + "score": 1.0, + "content": "ods. An improved performance would indicate the effi-", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 41.5 + }, + { + "type": "image", + "bbox": [ + 344, + 568, + 504, + 641 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 344, + 568, + 504, + 641 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 344, + 568, + 504, + 641 + ], + "spans": [ + { + "bbox": [ + 344, + 568, + 504, + 641 + ], + "score": 0.972, + "type": "image", + "image_path": "feadf98282550c14dd6939d9b1aaebdb526316856a1034070ef48af1c82a1480.jpg" + } + ] + } + ], + "index": 48, + "virtual_lines": [ + { + "bbox": [ + 344, + 568, + 504, + 582.6 + ], + "spans": [], + "index": 46 + }, + { + "bbox": [ + 344, + 582.6, + 504, + 597.2 + ], + "spans": [], + "index": 47 + }, + { + "bbox": [ + 344, + 597.2, + 504, + 611.8000000000001 + ], + "spans": [], + "index": 48 + }, + { + "bbox": [ + 344, + 611.8000000000001, + 504, + 626.4000000000001 + ], + "spans": [], + "index": 49 + }, + { + "bbox": [ + 344, + 626.4000000000001, + 504, + 641.0000000000001 + ], + "spans": [], + "index": 50 + } + ] + } + ], + "index": 48 + }, + { + "type": "text", + "bbox": [ + 109, + 655, + 503, + 665 + ], + "lines": [ + { + "bbox": [ + 106, + 652, + 456, + 668 + ], + "spans": [ + { + "bbox": [ + 106, + 652, + 456, + 668 + ], + "score": 1.0, + "content": "cacy of our workflow in making successful design decisions without any online tuning.", + "type": "text" + } + ], + "index": 51 + } + ], + "index": 51 + }, + { + "type": "text", + "bbox": [ + 107, + 670, + 505, + 725 + ], + "lines": [ + { + "bbox": [ + 105, + 670, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 105, + 670, + 505, + 682 + ], + "score": 1.0, + "content": "Experimental setup. We use the environments from Singh et al. [3] to design offline RL tasks and", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 105, + 681, + 505, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 505, + 694 + ], + "score": 1.0, + "content": "datasets that we use for our empirical analysis. We consider two tasks: (1) a pick and place task and", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 105, + 692, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 105, + 692, + 506, + 704 + ], + "score": 1.0, + "content": "(2) a grasping object from a drawer task. Examples of trajectories in both of these simulated domains", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 105, + 703, + 506, + 716 + ], + "spans": [ + { + "bbox": [ + 105, + 703, + 506, + 716 + ], + "score": 1.0, + "content": "are shown in Figure 2 and are detailed in Appendix D. Briefly, the pick and place task consists of", + "type": "text" + } + ], + "index": 55 + }, + { + "bbox": [ + 104, + 713, + 505, + 727 + ], + "spans": [ + { + "bbox": [ + 104, + 713, + 505, + 727 + ], + "score": 1.0, + "content": "a 6-DoF WidowX robot in front of a tray with an object. The goal is to put the object inside the", + "type": "text" + } + ], + "index": 56 + } + ], + "index": 54 + } + ], + "page_idx": 4, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 301, + 740, + 310, + 753 + ], + "spans": [ + { + "bbox": [ + 301, + 740, + 310, + 753 + ], + "score": 1.0, + "content": "5", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 106, + 71, + 470, + 85 + ], + "lines": [ + { + "bbox": [ + 104, + 70, + 473, + 86 + ], + "spans": [ + { + "bbox": [ + 104, + 70, + 473, + 86 + ], + "score": 1.0, + "content": "4 Addressing Overfitting and Underfitting in Conservative Offline RL", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 86, + 505, + 141 + ], + "lines": [ + { + "bbox": [ + 106, + 85, + 505, + 98 + ], + "spans": [ + { + "bbox": [ + 106, + 85, + 505, + 98 + ], + "score": 1.0, + "content": "The typical workflow for supervised learning not only identifies overfitting and underfitting, but", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 95, + 505, + 110 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 505, + 110 + ], + "score": 1.0, + "content": "also guides the practitioner how to adjust their method so as to alleviate it (e.g., by modifying", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 107, + 506, + 120 + ], + "spans": [ + { + "bbox": [ + 105, + 107, + 506, + 120 + ], + "score": 1.0, + "content": "regularization or model capacity), thus improving performance. Can we devise similar guidelines", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 118, + 506, + 131 + ], + "spans": [ + { + "bbox": [ + 105, + 118, + 506, + 131 + ], + "score": 1.0, + "content": "to address overfitting and underfitting with conservative offline RL? Here, we discuss some ways to", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 129, + 391, + 142 + ], + "spans": [ + { + "bbox": [ + 106, + 129, + 391, + 142 + ], + "score": 1.0, + "content": "adjust regularization and model capacity to alleviate these phenomena.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 3, + "bbox_fs": [ + 105, + 85, + 506, + 142 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 145, + 505, + 268 + ], + "lines": [ + { + "bbox": [ + 106, + 146, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 106, + 146, + 505, + 157 + ], + "score": 1.0, + "content": "Capacity-decreasing regularization for overfitting. As we observed in Section 3, the mechanism", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 156, + 505, + 169 + ], + "spans": [ + { + "bbox": [ + 106, + 156, + 196, + 169 + ], + "score": 1.0, + "content": "behind extremely low", + "type": "text" + }, + { + "bbox": [ + 197, + 157, + 205, + 167 + ], + "score": 0.3, + "content": "\\mathbf { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 206, + 156, + 422, + 169 + ], + "score": 1.0, + "content": "-values on the dataset is that CQL training minimizes", + "type": "text" + }, + { + "bbox": [ + 423, + 157, + 432, + 168 + ], + "score": 0.28, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 432, + 156, + 505, + 169 + ], + "score": 1.0, + "content": "-values on actions", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 167, + 506, + 181 + ], + "spans": [ + { + "bbox": [ + 105, + 167, + 164, + 181 + ], + "score": 1.0, + "content": "sampled from", + "type": "text" + }, + { + "bbox": [ + 164, + 167, + 191, + 179 + ], + "score": 0.93, + "content": "\\mu ( \\mathbf { a } | \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 192, + 167, + 506, + 181 + ], + "score": 1.0, + "content": ". Two possible approaches to preventing over-minimization of these values are", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 177, + 505, + 192 + ], + "spans": [ + { + "bbox": [ + 105, + 177, + 505, + 192 + ], + "score": 1.0, + "content": "(1) applying regularization such as dropout [24] on Q-function layers, similar to supervised learning,", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 189, + 505, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 189, + 505, + 201 + ], + "score": 1.0, + "content": "and (2) enforcing that representations of the learned Q-function match a pre-specified target for all", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 200, + 506, + 214 + ], + "spans": [ + { + "bbox": [ + 105, + 200, + 506, + 214 + ], + "score": 1.0, + "content": "state-action tuples. For (2), we can apply techniques such as a variational information bottleneck", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 211, + 505, + 223 + ], + "spans": [ + { + "bbox": [ + 106, + 211, + 339, + 223 + ], + "score": 1.0, + "content": "(VIB) [25, 26] regularizer on the learned representations,", + "type": "text" + }, + { + "bbox": [ + 340, + 211, + 359, + 223 + ], + "score": 0.9, + "content": "\\phi ( \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 359, + 211, + 419, + 223 + ], + "score": 1.0, + "content": ". Formally, let", + "type": "text" + }, + { + "bbox": [ + 420, + 211, + 442, + 223 + ], + "score": 0.89, + "content": "( \\mathbf { s } , \\mathbf { a } )", + "type": "inline_equation" + }, + { + "bbox": [ + 443, + 211, + 505, + 223 + ], + "score": 1.0, + "content": "denote a state-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 222, + 505, + 235 + ], + "spans": [ + { + "bbox": [ + 106, + 223, + 302, + 235 + ], + "score": 1.0, + "content": "action pair. Instead of predicting a deterministic", + "type": "text" + }, + { + "bbox": [ + 302, + 222, + 347, + 235 + ], + "score": 0.92, + "content": "\\phi ( \\mathbf { s } ) \\in \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 347, + 223, + 505, + 235 + ], + "score": 1.0, + "content": "(Figure 10), we modify the Q-network", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 233, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 104, + 233, + 235, + 248 + ], + "score": 1.0, + "content": "to predict two distinct vectors,", + "type": "text" + }, + { + "bbox": [ + 236, + 234, + 291, + 247 + ], + "score": 0.92, + "content": "\\phi _ { m } ( \\mathbf { s } ) \\in \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 291, + 233, + 311, + 248 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 311, + 234, + 365, + 246 + ], + "score": 0.93, + "content": "\\phi _ { \\Sigma } ( \\mathbf { s } ) \\in \\mathbb { R } ^ { d }", + "type": "inline_equation" + }, + { + "bbox": [ + 365, + 233, + 419, + 248 + ], + "score": 1.0, + "content": ", and sample", + "type": "text" + }, + { + "bbox": [ + 420, + 235, + 439, + 246 + ], + "score": 0.88, + "content": "\\phi ( \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 439, + 233, + 506, + 248 + ], + "score": 1.0, + "content": "randomly from", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 245, + 506, + 259 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 203, + 259 + ], + "score": 1.0, + "content": "a Gaussian centered at", + "type": "text" + }, + { + "bbox": [ + 203, + 246, + 218, + 257 + ], + "score": 0.88, + "content": "\\phi _ { m }", + "type": "inline_equation" + }, + { + "bbox": [ + 218, + 245, + 288, + 259 + ], + "score": 1.0, + "content": "with covariance", + "type": "text" + }, + { + "bbox": [ + 289, + 246, + 302, + 258 + ], + "score": 0.89, + "content": "\\phi _ { \\Sigma }", + "type": "inline_equation" + }, + { + "bbox": [ + 302, + 245, + 326, + 259 + ], + "score": 1.0, + "content": ", i.e.,", + "type": "text" + }, + { + "bbox": [ + 326, + 246, + 456, + 258 + ], + "score": 0.87, + "content": "\\phi ( \\mathbf { s } ) \\sim \\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } ) , \\mathrm { d i a g } ( \\phi _ { \\Sigma } ( \\mathbf { s } ) )", + "type": "inline_equation" + }, + { + "bbox": [ + 456, + 245, + 506, + 259 + ], + "score": 1.0, + "content": ". VIB then", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 256, + 420, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 256, + 152, + 270 + ], + "score": 1.0, + "content": "regularizes", + "type": "text" + }, + { + "bbox": [ + 153, + 257, + 246, + 269 + ], + "score": 0.62, + "content": "\\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } ) , \\mathrm { d i a g } ( \\phi _ { \\Sigma } ( \\mathbf { s } ) )", + "type": "inline_equation" + }, + { + "bbox": [ + 247, + 256, + 383, + 270 + ], + "score": 1.0, + "content": "to be close to a prior distribution,", + "type": "text" + }, + { + "bbox": [ + 383, + 257, + 415, + 269 + ], + "score": 0.91, + "content": "\\mathcal { N } ( 0 , \\mathbb { I } )", + "type": "inline_equation" + }, + { + "bbox": [ + 415, + 256, + 420, + 270 + ], + "score": 1.0, + "content": ":", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 11, + "bbox_fs": [ + 104, + 146, + 506, + 270 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 128, + 270, + 492, + 287 + ], + "lines": [ + { + "bbox": [ + 128, + 270, + 492, + 287 + ], + "spans": [ + { + "bbox": [ + 128, + 270, + 492, + 287 + ], + "score": 0.77, + "content": "\\operatorname* { m i n } _ { \\theta } \\ \\mathcal { L } _ { \\mathrm { C Q L } } ( \\theta ) + \\beta \\mathbb { E } _ { \\mathrm { s } \\sim \\mathcal { D } } \\left[ \\mathrm { D } _ { \\mathrm { K L } } \\left( \\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } ) , \\mathrm { d i a g } ( \\phi _ { \\Sigma } ( \\mathbf { s } ) ) ) \\ | | \\mathcal { N } ( 0 , \\mathbb { I } ) \\right) \\right] \\quad ( \\mathrm { V I B ~ r e g u l a r i z e r } ) ,", + "type": "interline_equation", + "image_path": "9db0d18bcf179c6a8325f93c9a8d0886149e29c38e5c7dbf3d677141313eeab5.jpg" + } + ] + } + ], + "index": 17, + "virtual_lines": [ + { + "bbox": [ + 128, + 270, + 492, + 287 + ], + "spans": [], + "index": 17 + } + ] + }, + { + "type": "text", + "bbox": [ + 115, + 296, + 493, + 319 + ], + "lines": [ + { + "bbox": [ + 116, + 294, + 494, + 311 + ], + "spans": [ + { + "bbox": [ + 116, + 294, + 494, + 311 + ], + "score": 1.0, + "content": "Guideline 4.1. To address overfitting, we recommend using some form of capacity-decreasing", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 116, + 305, + 494, + 320 + ], + "spans": [ + { + "bbox": [ + 116, + 305, + 204, + 320 + ], + "score": 1.0, + "content": "regularization on the", + "type": "text" + }, + { + "bbox": [ + 204, + 308, + 213, + 319 + ], + "score": 0.26, + "content": "Q", + "type": "inline_equation" + }, + { + "bbox": [ + 213, + 305, + 494, + 320 + ], + "score": 1.0, + "content": "-function, such as dropout or the VIB regularizer shown in Equation 3.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 18.5, + "bbox_fs": [ + 116, + 294, + 494, + 320 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 332, + 505, + 475 + ], + "lines": [ + { + "bbox": [ + 106, + 331, + 506, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 331, + 506, + 345 + ], + "score": 1.0, + "content": "Capacity-increasing techniques for underfitting. To address underfitting, we need to increase", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 342, + 505, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 505, + 356 + ], + "score": 1.0, + "content": "model capacity to improve optimization of the training objective. Analogous to supervised learning,", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 354, + 505, + 367 + ], + "spans": [ + { + "bbox": [ + 106, + 354, + 505, + 367 + ], + "score": 1.0, + "content": "model capacity can be increased by using more expressive neural nets (e.g., ResNets [27], trans-", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 365, + 505, + 378 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 505, + 378 + ], + "score": 1.0, + "content": "formers [28]) for representing the learned policy. We use ResNets in our experiments (Figure 10).", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 374, + 506, + 389 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 506, + 389 + ], + "score": 1.0, + "content": "However, the RL setting presents an additional challenge with capacity: while larger models in", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 387, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 387, + 506, + 399 + ], + "score": 1.0, + "content": "principle have more capacity, recent work [29, 21, 22] has shown that utilizing larger networks to", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 398, + 505, + 410 + ], + "spans": [ + { + "bbox": [ + 104, + 398, + 505, + 410 + ], + "score": 1.0, + "content": "represent Q-functions does not always improve its capacity in practice, because TD-based RL meth-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 408, + 505, + 420 + ], + "spans": [ + { + "bbox": [ + 105, + 408, + 505, + 420 + ], + "score": 1.0, + "content": "ods introduce an β€œimplicit under-parameterization” effect that can result in aliased (i.e., similar)", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 420, + 505, + 432 + ], + "spans": [ + { + "bbox": [ + 106, + 420, + 505, + 432 + ], + "score": 1.0, + "content": "internal representations for different state-action inputs, even for very large neural networks that", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 430, + 505, + 443 + ], + "spans": [ + { + "bbox": [ + 106, + 430, + 505, + 443 + ], + "score": 1.0, + "content": "can express the true Q-function effectively. To address this issue, these works apply a β€œcapacity-", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 441, + 505, + 454 + ], + "spans": [ + { + "bbox": [ + 106, + 441, + 505, + 454 + ], + "score": 1.0, + "content": "increasing” regularizer to Q-function training. For instance, we can use the DR3 regularizer [22],", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 452, + 505, + 465 + ], + "spans": [ + { + "bbox": [ + 106, + 452, + 247, + 465 + ], + "score": 1.0, + "content": "which penalizes the dot product of", + "type": "text" + }, + { + "bbox": [ + 247, + 452, + 266, + 464 + ], + "score": 0.91, + "content": "\\phi ( \\mathbf { s } )", + "type": "inline_equation" + }, + { + "bbox": [ + 267, + 452, + 285, + 465 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 285, + 452, + 307, + 464 + ], + "score": 0.92, + "content": "\\phi ( \\mathbf { s } ^ { \\prime } )", + "type": "inline_equation" + }, + { + "bbox": [ + 307, + 452, + 370, + 465 + ], + "score": 1.0, + "content": "for a transition", + "type": "text" + }, + { + "bbox": [ + 370, + 452, + 425, + 464 + ], + "score": 0.93, + "content": "( \\mathbf { s } , \\mathbf { a } , \\mathbf { s } ^ { \\prime } ) \\in \\mathcal { D }", + "type": "inline_equation" + }, + { + "bbox": [ + 426, + 452, + 505, + 465 + ], + "score": 1.0, + "content": ", and hence reduces", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 462, + 250, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 250, + 477 + ], + "score": 1.0, + "content": "aliasing. This objective is given by:", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 26, + "bbox_fs": [ + 104, + 331, + 506, + 477 + ] + }, + { + "type": "interline_equation", + "bbox": [ + 129, + 480, + 428, + 500 + ], + "lines": [ + { + "bbox": [ + 129, + 480, + 428, + 500 + ], + "spans": [ + { + "bbox": [ + 129, + 480, + 428, + 500 + ], + "score": 0.8, + "content": "\\operatorname* { m i n } _ { \\theta } \\ \\mathcal { L } _ { \\mathrm { C Q L } } ( \\theta ) + \\beta \\mathbb { E } _ { { \\mathbf s } , { \\mathbf a } , { \\mathbf s } ^ { \\prime } \\sim \\mathcal { D } } \\left[ \\left| \\phi ( { \\mathbf s } ) ^ { \\top } \\phi ( { \\mathbf s } ^ { \\prime } ) \\right| \\right] \\qquad ( { \\mathrm { D R 3 ~ r e g u l a r i z e r ~ } } [ 2 2 ] ) ,", + "type": "interline_equation", + "image_path": "55e6e675c67ec1e3e4745a24fa9f244367408982eb82461b79c4a5d6cace2f4e.jpg" + } + ] + } + ], + "index": 33, + "virtual_lines": [ + { + "bbox": [ + 129, + 480, + 428, + 500 + ], + "spans": [], + "index": 33 + } + ] + }, + { + "type": "text", + "bbox": [ + 117, + 507, + 494, + 541 + ], + "lines": [ + { + "bbox": [ + 116, + 505, + 494, + 521 + ], + "spans": [ + { + "bbox": [ + 116, + 505, + 494, + 521 + ], + "score": 1.0, + "content": "Guideline 4.2. To address underfitting, we recommend using some capacity-increasing regu-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 117, + 517, + 493, + 532 + ], + "spans": [ + { + "bbox": [ + 117, + 517, + 493, + 532 + ], + "score": 1.0, + "content": "larization on the Q-function and the policy either in conjunction or separately. Examples: (1)", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 116, + 528, + 433, + 543 + ], + "spans": [ + { + "bbox": [ + 116, + 528, + 433, + 543 + ], + "score": 1.0, + "content": "bigger policy networks (e.g., ResNets), (2) DR3 regularizer on the Q-network.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 35, + "bbox_fs": [ + 116, + 505, + 494, + 543 + ] + }, + { + "type": "title", + "bbox": [ + 106, + 551, + 459, + 565 + ], + "lines": [ + { + "bbox": [ + 104, + 551, + 460, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 551, + 460, + 567 + ], + "score": 1.0, + "content": "5 Evaluation of Our Workflow Metrics and Protocols in Simulation", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 37 + }, + { + "type": "text", + "bbox": [ + 107, + 567, + 336, + 660 + ], + "lines": [ + { + "bbox": [ + 106, + 567, + 336, + 579 + ], + "spans": [ + { + "bbox": [ + 106, + 567, + 336, + 579 + ], + "score": 1.0, + "content": "Next, we empirically validate the workflow proposed in", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 577, + 335, + 590 + ], + "spans": [ + { + "bbox": [ + 106, + 577, + 335, + 590 + ], + "score": 1.0, + "content": "Sections 3 and 4 on a suite of simulated robotic manipu-", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 588, + 336, + 600 + ], + "spans": [ + { + "bbox": [ + 106, + 588, + 336, + 600 + ], + "score": 1.0, + "content": "lation domains that mimic real-robot scenarios, from im-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 600, + 336, + 610 + ], + "spans": [ + { + "bbox": [ + 106, + 600, + 336, + 610 + ], + "score": 1.0, + "content": "age observations with sparse binary rewards. We will ex-", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 610, + 337, + 622 + ], + "spans": [ + { + "bbox": [ + 106, + 610, + 337, + 622 + ], + "score": 1.0, + "content": "amine how applying the workflow in Section 3 to detect", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 106, + 622, + 336, + 633 + ], + "spans": [ + { + "bbox": [ + 106, + 622, + 336, + 633 + ], + "score": 1.0, + "content": "overfitting or underfitting and then utilizing the strategies", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 632, + 336, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 336, + 644 + ], + "score": 1.0, + "content": "in Section 4 affects the performance of offline RL meth-", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 106, + 643, + 336, + 654 + ], + "spans": [ + { + "bbox": [ + 106, + 643, + 336, + 654 + ], + "score": 1.0, + "content": "ods. An improved performance would indicate the effi-", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 106, + 652, + 456, + 668 + ], + "spans": [ + { + "bbox": [ + 106, + 652, + 456, + 668 + ], + "score": 1.0, + "content": "cacy of our workflow in making successful design decisions without any online tuning.", + "type": "text" + } + ], + "index": 51 + } + ], + "index": 41.5, + "bbox_fs": [ + 105, + 567, + 337, + 654 + ] + }, + { + "type": "image", + "bbox": [ + 344, + 568, + 504, + 641 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 344, + 568, + 504, + 641 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 344, + 568, + 504, + 641 + ], + "spans": [ + { + "bbox": [ + 344, + 568, + 504, + 641 + ], + "score": 0.972, + "type": "image", + "image_path": "feadf98282550c14dd6939d9b1aaebdb526316856a1034070ef48af1c82a1480.jpg" + } + ] + } + ], + "index": 48, + "virtual_lines": [ + { + "bbox": [ + 344, + 568, + 504, + 582.6 + ], + "spans": [], + "index": 46 + }, + { + "bbox": [ + 344, + 582.6, + 504, + 597.2 + ], + "spans": [], + "index": 47 + }, + { + "bbox": [ + 344, + 597.2, + 504, + 611.8000000000001 + ], + "spans": [], + "index": 48 + }, + { + "bbox": [ + 344, + 611.8000000000001, + 504, + 626.4000000000001 + ], + "spans": [], + "index": 49 + }, + { + "bbox": [ + 344, + 626.4000000000001, + 504, + 641.0000000000001 + ], + "spans": [], + "index": 50 + } + ] + } + ], + "index": 48 + }, + { + "type": "text", + "bbox": [ + 109, + 655, + 503, + 665 + ], + "lines": [], + "index": 51, + "bbox_fs": [ + 106, + 652, + 456, + 668 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 107, + 670, + 505, + 725 + ], + "lines": [ + { + "bbox": [ + 105, + 670, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 105, + 670, + 505, + 682 + ], + "score": 1.0, + "content": "Experimental setup. We use the environments from Singh et al. [3] to design offline RL tasks and", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 105, + 681, + 505, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 505, + 694 + ], + "score": 1.0, + "content": "datasets that we use for our empirical analysis. We consider two tasks: (1) a pick and place task and", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 105, + 692, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 105, + 692, + 506, + 704 + ], + "score": 1.0, + "content": "(2) a grasping object from a drawer task. Examples of trajectories in both of these simulated domains", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 105, + 703, + 506, + 716 + ], + "spans": [ + { + "bbox": [ + 105, + 703, + 506, + 716 + ], + "score": 1.0, + "content": "are shown in Figure 2 and are detailed in Appendix D. Briefly, the pick and place task consists of", + "type": "text" + } + ], + "index": 55 + }, + { + "bbox": [ + 104, + 713, + 505, + 727 + ], + "spans": [ + { + "bbox": [ + 104, + 713, + 505, + 727 + ], + "score": 1.0, + "content": "a 6-DoF WidowX robot in front of a tray with an object. The goal is to put the object inside the", + "type": "text" + } + ], + "index": 56 + }, + { + "bbox": [ + 106, + 72, + 336, + 85 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 216, + 85 + ], + "score": 1.0, + "content": "tray. A non-zero reward of", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 216, + 73, + 228, + 83 + ], + "score": 0.83, + "content": "+ 1", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 229, + 72, + 336, + 85 + ], + "score": 1.0, + "content": "is provided only when the", + "type": "text", + "cross_page": true + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 83, + 336, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 83, + 336, + 95 + ], + "score": 1.0, + "content": "object has been placed in the box. The offline dataset", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 93, + 337, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 337, + 107 + ], + "score": 1.0, + "content": "for this task consists of trajectories that grasp an object", + "type": "text", + "cross_page": true + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 105, + 337, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 135, + 118 + ], + "score": 1.0, + "content": "with a", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 136, + 105, + 156, + 116 + ], + "score": 0.87, + "content": "3 5 \\%", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 156, + 105, + 337, + 118 + ], + "score": 1.0, + "content": "success and other trajectories that place an", + "type": "text", + "cross_page": true + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 115, + 337, + 130 + ], + "spans": [ + { + "bbox": [ + 105, + 115, + 162, + 130 + ], + "score": 1.0, + "content": "object with a", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 162, + 117, + 182, + 127 + ], + "score": 0.87, + "content": "40 \\%", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 183, + 115, + 337, + 130 + ], + "score": 1.0, + "content": "success. Our second task is a grasp-", + "type": "text", + "cross_page": true + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 127, + 337, + 139 + ], + "spans": [ + { + "bbox": [ + 105, + 127, + 337, + 139 + ], + "score": 1.0, + "content": "ing from drawer task where the WidowX robot is placed", + "type": "text", + "cross_page": true + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 138, + 336, + 150 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 336, + 150 + ], + "score": 1.0, + "content": "in front of a drawer and multiple objects. The robot can", + "type": "text", + "cross_page": true + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 149, + 337, + 161 + ], + "spans": [ + { + "bbox": [ + 105, + 149, + 337, + 161 + ], + "score": 1.0, + "content": "open or close the drawer, grasp objects from inside the", + "type": "text", + "cross_page": true + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 160, + 337, + 172 + ], + "spans": [ + { + "bbox": [ + 106, + 160, + 337, + 172 + ], + "score": 1.0, + "content": "drawer or on the table, and place them anywhere in the", + "type": "text", + "cross_page": true + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 171, + 336, + 182 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 336, + 182 + ], + "score": 1.0, + "content": "scene. The goal is to close the top drawer, then open the", + "type": "text", + "cross_page": true + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 181, + 337, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 181, + 337, + 194 + ], + "score": 1.0, + "content": "bottom drawer and take the object out. Only if the object", + "type": "text", + "cross_page": true + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 193, + 336, + 204 + ], + "spans": [ + { + "bbox": [ + 106, + 193, + 229, + 204 + ], + "score": 1.0, + "content": "has been taken out, a reward of", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 230, + 193, + 241, + 203 + ], + "score": 0.81, + "content": "+ 1", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 242, + 193, + 336, + 204 + ], + "score": 1.0, + "content": "is obtained. The offline", + "type": "text", + "cross_page": true + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 203, + 336, + 215 + ], + "spans": [ + { + "bbox": [ + 106, + 203, + 253, + 215 + ], + "score": 1.0, + "content": "dataset consists of trajectories with a", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 253, + 204, + 286, + 214 + ], + "score": 0.88, + "content": "3 0 { - } 4 0 \\%", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 287, + 203, + 336, + 215 + ], + "score": 1.0, + "content": "success rate", + "type": "text", + "cross_page": true + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 215, + 336, + 226 + ], + "spans": [ + { + "bbox": [ + 106, + 215, + 336, + 226 + ], + "score": 1.0, + "content": "for opening and closing a drawer and other trajectories", + "type": "text", + "cross_page": true + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 225, + 336, + 238 + ], + "spans": [ + { + "bbox": [ + 106, + 225, + 146, + 238 + ], + "score": 1.0, + "content": "with only", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 146, + 226, + 166, + 236 + ], + "score": 0.86, + "content": "40 \\%", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 167, + 225, + 265, + 238 + ], + "score": 1.0, + "content": "placing success. We use", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 265, + 226, + 299, + 236 + ], + "score": 0.89, + "content": "\\alpha = 1 . 0", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 300, + 225, + 336, + 238 + ], + "score": 1.0, + "content": "for CQL", + "type": "text", + "cross_page": true + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 237, + 336, + 248 + ], + "spans": [ + { + "bbox": [ + 106, + 237, + 336, + 248 + ], + "score": 1.0, + "content": "training in all experiments, which is directly taken from", + "type": "text", + "cross_page": true + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 248, + 337, + 260 + ], + "spans": [ + { + "bbox": [ + 106, + 248, + 337, + 260 + ], + "score": 1.0, + "content": "prior work [3], without any tuning. However, too low or", + "type": "text", + "cross_page": true + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 257, + 336, + 271 + ], + "spans": [ + { + "bbox": [ + 105, + 257, + 143, + 271 + ], + "score": 1.0, + "content": "too high", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 144, + 260, + 152, + 268 + ], + "score": 0.78, + "content": "\\alpha", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 153, + 257, + 336, + 271 + ], + "score": 1.0, + "content": "values will inhibit the effectiveness of regu-", + "type": "text", + "cross_page": true + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 268, + 336, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 246, + 281 + ], + "score": 1.0, + "content": "lar CQL and we first need to tune", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 246, + 271, + 254, + 279 + ], + "score": 0.75, + "content": "\\alpha", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 254, + 268, + 336, + 281 + ], + "score": 1.0, + "content": "as discussed in Ap-", + "type": "text", + "cross_page": true + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 280, + 317, + 292 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 317, + 292 + ], + "score": 1.0, + "content": "pendix G. More details are provided in Appendix D.", + "type": "text", + "cross_page": true + } + ], + "index": 19 + } + ], + "index": 54, + "bbox_fs": [ + 104, + 670, + 506, + 727 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 72, + 336, + 291 + ], + "lines": [ + { + "bbox": [ + 106, + 72, + 336, + 85 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 216, + 85 + ], + "score": 1.0, + "content": "tray. A non-zero reward of", + "type": "text" + }, + { + "bbox": [ + 216, + 73, + 228, + 83 + ], + "score": 0.83, + "content": "+ 1", + "type": "inline_equation" + }, + { + "bbox": [ + 229, + 72, + 336, + 85 + ], + "score": 1.0, + "content": "is provided only when the", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 83, + 336, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 83, + 336, + 95 + ], + "score": 1.0, + "content": "object has been placed in the box. The offline dataset", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 93, + 337, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 93, + 337, + 107 + ], + "score": 1.0, + "content": "for this task consists of trajectories that grasp an object", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 105, + 337, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 135, + 118 + ], + "score": 1.0, + "content": "with a", + "type": "text" + }, + { + "bbox": [ + 136, + 105, + 156, + 116 + ], + "score": 0.87, + "content": "3 5 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 156, + 105, + 337, + 118 + ], + "score": 1.0, + "content": "success and other trajectories that place an", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 115, + 337, + 130 + ], + "spans": [ + { + "bbox": [ + 105, + 115, + 162, + 130 + ], + "score": 1.0, + "content": "object with a", + "type": "text" + }, + { + "bbox": [ + 162, + 117, + 182, + 127 + ], + "score": 0.87, + "content": "40 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 115, + 337, + 130 + ], + "score": 1.0, + "content": "success. Our second task is a grasp-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 127, + 337, + 139 + ], + "spans": [ + { + "bbox": [ + 105, + 127, + 337, + 139 + ], + "score": 1.0, + "content": "ing from drawer task where the WidowX robot is placed", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 138, + 336, + 150 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 336, + 150 + ], + "score": 1.0, + "content": "in front of a drawer and multiple objects. The robot can", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 149, + 337, + 161 + ], + "spans": [ + { + "bbox": [ + 105, + 149, + 337, + 161 + ], + "score": 1.0, + "content": "open or close the drawer, grasp objects from inside the", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 160, + 337, + 172 + ], + "spans": [ + { + "bbox": [ + 106, + 160, + 337, + 172 + ], + "score": 1.0, + "content": "drawer or on the table, and place them anywhere in the", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 171, + 336, + 182 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 336, + 182 + ], + "score": 1.0, + "content": "scene. The goal is to close the top drawer, then open the", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 181, + 337, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 181, + 337, + 194 + ], + "score": 1.0, + "content": "bottom drawer and take the object out. Only if the object", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 193, + 336, + 204 + ], + "spans": [ + { + "bbox": [ + 106, + 193, + 229, + 204 + ], + "score": 1.0, + "content": "has been taken out, a reward of", + "type": "text" + }, + { + "bbox": [ + 230, + 193, + 241, + 203 + ], + "score": 0.81, + "content": "+ 1", + "type": "inline_equation" + }, + { + "bbox": [ + 242, + 193, + 336, + 204 + ], + "score": 1.0, + "content": "is obtained. The offline", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 203, + 336, + 215 + ], + "spans": [ + { + "bbox": [ + 106, + 203, + 253, + 215 + ], + "score": 1.0, + "content": "dataset consists of trajectories with a", + "type": "text" + }, + { + "bbox": [ + 253, + 204, + 286, + 214 + ], + "score": 0.88, + "content": "3 0 { - } 4 0 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 287, + 203, + 336, + 215 + ], + "score": 1.0, + "content": "success rate", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 215, + 336, + 226 + ], + "spans": [ + { + "bbox": [ + 106, + 215, + 336, + 226 + ], + "score": 1.0, + "content": "for opening and closing a drawer and other trajectories", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 225, + 336, + 238 + ], + "spans": [ + { + "bbox": [ + 106, + 225, + 146, + 238 + ], + "score": 1.0, + "content": "with only", + "type": "text" + }, + { + "bbox": [ + 146, + 226, + 166, + 236 + ], + "score": 0.86, + "content": "40 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 167, + 225, + 265, + 238 + ], + "score": 1.0, + "content": "placing success. We use", + "type": "text" + }, + { + "bbox": [ + 265, + 226, + 299, + 236 + ], + "score": 0.89, + "content": "\\alpha = 1 . 0", + "type": "inline_equation" + }, + { + "bbox": [ + 300, + 225, + 336, + 238 + ], + "score": 1.0, + "content": "for CQL", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 237, + 336, + 248 + ], + "spans": [ + { + "bbox": [ + 106, + 237, + 336, + 248 + ], + "score": 1.0, + "content": "training in all experiments, which is directly taken from", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 248, + 337, + 260 + ], + "spans": [ + { + "bbox": [ + 106, + 248, + 337, + 260 + ], + "score": 1.0, + "content": "prior work [3], without any tuning. However, too low or", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 257, + 336, + 271 + ], + "spans": [ + { + "bbox": [ + 105, + 257, + 143, + 271 + ], + "score": 1.0, + "content": "too high", + "type": "text" + }, + { + "bbox": [ + 144, + 260, + 152, + 268 + ], + "score": 0.78, + "content": "\\alpha", + "type": "inline_equation" + }, + { + "bbox": [ + 153, + 257, + 336, + 271 + ], + "score": 1.0, + "content": "values will inhibit the effectiveness of regu-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 268, + 336, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 268, + 246, + 281 + ], + "score": 1.0, + "content": "lar CQL and we first need to tune", + "type": "text" + }, + { + "bbox": [ + 246, + 271, + 254, + 279 + ], + "score": 0.75, + "content": "\\alpha", + "type": "inline_equation" + }, + { + "bbox": [ + 254, + 268, + 336, + 281 + ], + "score": 1.0, + "content": "as discussed in Ap-", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 280, + 317, + 292 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 317, + 292 + ], + "score": 1.0, + "content": "pendix G. More details are provided in Appendix D.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 9.5 + }, + { + "type": "image", + "bbox": [ + 352, + 77, + 497, + 213 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 352, + 77, + 497, + 213 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 352, + 77, + 497, + 213 + ], + "spans": [ + { + "bbox": [ + 352, + 77, + 497, + 213 + ], + "score": 0.966, + "type": "image", + "image_path": "7149b62299170ecd7a26a8a110d939daa58337b186bfd1671932f83c2c649963.jpg" + } + ] + } + ], + "index": 20.5, + "virtual_lines": [ + { + "bbox": [ + 352, + 77, + 497, + 145.0 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 352, + 145.0, + 497, + 213.0 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 344, + 217, + 504, + 287 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 343, + 216, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 343, + 216, + 505, + 228 + ], + "score": 1.0, + "content": "Figure 3: Policy performance (Top) and", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 343, + 227, + 505, + 238 + ], + "spans": [ + { + "bbox": [ + 343, + 227, + 505, + 238 + ], + "score": 1.0, + "content": "average dataset Q-values of CQL (bot-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 343, + 236, + 505, + 248 + ], + "spans": [ + { + "bbox": [ + 343, + 236, + 505, + 248 + ], + "score": 1.0, + "content": "tom) with varying number of trajectories.", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 344, + 247, + 505, + 257 + ], + "spans": [ + { + "bbox": [ + 344, + 247, + 505, + 257 + ], + "score": 1.0, + "content": "Vertical bands indicate regions around the", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 343, + 257, + 505, + 267 + ], + "spans": [ + { + "bbox": [ + 343, + 257, + 407, + 267 + ], + "score": 1.0, + "content": "peak in average", + "type": "text" + }, + { + "bbox": [ + 407, + 257, + 416, + 267 + ], + "score": 0.31, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 416, + 257, + 505, + 267 + ], + "score": 1.0, + "content": "-value and observe that", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 343, + 267, + 505, + 278 + ], + "spans": [ + { + "bbox": [ + 343, + 267, + 505, + 278 + ], + "score": 1.0, + "content": "these regions correspond to policies with", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 343, + 277, + 438, + 288 + ], + "spans": [ + { + "bbox": [ + 343, + 277, + 438, + 288 + ], + "score": 1.0, + "content": "good actual performance.", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 25 + } + ], + "index": 22.75 + }, + { + "type": "text", + "bbox": [ + 106, + 296, + 505, + 340 + ], + "lines": [ + { + "bbox": [ + 106, + 296, + 505, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 296, + 505, + 308 + ], + "score": 1.0, + "content": "Scenario #1: Variable amount of training data. Our first scenario consists of the simulated tasks", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 307, + 505, + 319 + ], + "spans": [ + { + "bbox": [ + 106, + 307, + 505, + 319 + ], + "score": 1.0, + "content": "discussed above with a variable number of trajectories in the training data (50, 100, 500, 10000). We", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 319, + 505, + 330 + ], + "spans": [ + { + "bbox": [ + 106, + 319, + 505, + 330 + ], + "score": 1.0, + "content": "run CQL and track metrics 3.1 and 3.2 in each case. Observe in Figure 3 (bottom) that with fewer tra-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 104, + 329, + 505, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 329, + 257, + 342 + ], + "score": 1.0, + "content": "jectories, the average dataset Q-value", + "type": "text" + }, + { + "bbox": [ + 258, + 329, + 329, + 341 + ], + "score": 0.93, + "content": "\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]", + "type": "inline_equation" + }, + { + "bbox": [ + 329, + 329, + 505, + 342 + ], + "score": 1.0, + "content": "first rises, and then drops. This matches the", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 30.5 + }, + { + "type": "text", + "bbox": [ + 107, + 341, + 316, + 525 + ], + "lines": [ + { + "bbox": [ + 106, + 339, + 317, + 352 + ], + "spans": [ + { + "bbox": [ + 106, + 339, + 317, + 352 + ], + "score": 1.0, + "content": "description of overfitting in Section 3. Observe in", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 351, + 317, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 317, + 363 + ], + "score": 1.0, + "content": "Figure 4 (left) that, at the same time, the value of the", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 362, + 317, + 374 + ], + "spans": [ + { + "bbox": [ + 106, + 362, + 317, + 374 + ], + "score": 1.0, + "content": "CQL regularizer is very low, which is not consistent", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 372, + 316, + 384 + ], + "spans": [ + { + "bbox": [ + 106, + 372, + 316, + 384 + ], + "score": 1.0, + "content": "with what we expect of underfitting. Thus, we can", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 383, + 317, + 395 + ], + "spans": [ + { + "bbox": [ + 106, + 383, + 317, + 395 + ], + "score": 1.0, + "content": "conclude that these conditions exhibit overfitting, es-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 395, + 317, + 406 + ], + "spans": [ + { + "bbox": [ + 106, + 395, + 317, + 406 + ], + "score": 1.0, + "content": "pecially with 50 and 100 trajectories. The vertical", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 405, + 317, + 417 + ], + "spans": [ + { + "bbox": [ + 106, + 405, + 317, + 417 + ], + "score": 1.0, + "content": "dashed lines indicate the checkpoints that would be", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 416, + 317, + 428 + ], + "spans": [ + { + "bbox": [ + 106, + 416, + 317, + 428 + ], + "score": 1.0, + "content": "selected for evaluation per Guideline 3.1. We fur-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 428, + 317, + 439 + ], + "spans": [ + { + "bbox": [ + 106, + 428, + 317, + 439 + ], + "score": 1.0, + "content": "ther visualize the performance of the chosen check-", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 439, + 317, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 317, + 450 + ], + "score": 1.0, + "content": "points against the actual return of each intermediate", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 449, + 317, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 317, + 460 + ], + "score": 1.0, + "content": "policy in Figure 3 (top). Note that this value is ob-", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 460, + 317, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 317, + 472 + ], + "score": 1.0, + "content": "tained by rolling out the learned policy, and would", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 106, + 471, + 317, + 483 + ], + "spans": [ + { + "bbox": [ + 106, + 471, + 317, + 483 + ], + "score": 1.0, + "content": "not be available in a realistic offline RL setting, but is", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 106, + 482, + 317, + 493 + ], + "spans": [ + { + "bbox": [ + 106, + 482, + 317, + 493 + ], + "score": 1.0, + "content": "provided only for analysis. Selecting the checkpoint", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 106, + 493, + 317, + 504 + ], + "spans": [ + { + "bbox": [ + 106, + 493, + 317, + 504 + ], + "score": 1.0, + "content": "based on Guideline 3.1 leads us to select a model", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 503, + 317, + 516 + ], + "spans": [ + { + "bbox": [ + 106, + 503, + 317, + 516 + ], + "score": 1.0, + "content": "with close to the peak performance over the training", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 515, + 302, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 302, + 527 + ], + "score": 1.0, + "content": "process, validating the efficacy of Guideline 3.1.", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 41 + }, + { + "type": "text", + "bbox": [ + 106, + 531, + 316, + 563 + ], + "lines": [ + { + "bbox": [ + 106, + 530, + 316, + 543 + ], + "spans": [ + { + "bbox": [ + 106, + 530, + 316, + 543 + ], + "score": 1.0, + "content": "Since we detected overfitting by following our work-", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 106, + 542, + 317, + 554 + ], + "spans": [ + { + "bbox": [ + 106, + 542, + 317, + 554 + ], + "score": 1.0, + "content": "flow, we now aim to address it by using the VIB", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 552, + 317, + 566 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 317, + 566 + ], + "score": 1.0, + "content": "regularizer in the setting with 100 trajectories. As", + "type": "text" + } + ], + "index": 52 + } + ], + "index": 51 + }, + { + "type": "image", + "bbox": [ + 328, + 344, + 499, + 497 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 328, + 344, + 499, + 497 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 328, + 344, + 499, + 497 + ], + "spans": [ + { + "bbox": [ + 328, + 344, + 499, + 497 + ], + "score": 0.964, + "type": "image", + "image_path": "c8b831ad68078d018a8733761f0da0593a9224d909869ecb852f0ffd8ee06f24.jpg" + } + ] + } + ], + "index": 58.5, + "virtual_lines": [ + { + "bbox": [ + 328, + 344, + 499, + 356.75 + ], + "spans": [], + "index": 53 + }, + { + "bbox": [ + 328, + 356.75, + 499, + 369.5 + ], + "spans": [], + "index": 54 + }, + { + "bbox": [ + 328, + 369.5, + 499, + 382.25 + ], + "spans": [], + "index": 55 + }, + { + "bbox": [ + 328, + 382.25, + 499, + 395.0 + ], + "spans": [], + "index": 56 + }, + { + "bbox": [ + 328, + 395.0, + 499, + 407.75 + ], + "spans": [], + "index": 57 + }, + { + "bbox": [ + 328, + 407.75, + 499, + 420.5 + ], + "spans": [], + "index": 58 + }, + { + "bbox": [ + 328, + 420.5, + 499, + 433.25 + ], + "spans": [], + "index": 59 + }, + { + "bbox": [ + 328, + 433.25, + 499, + 446.0 + ], + "spans": [], + "index": 60 + }, + { + "bbox": [ + 328, + 446.0, + 499, + 458.75 + ], + "spans": [], + "index": 61 + }, + { + "bbox": [ + 328, + 458.75, + 499, + 471.5 + ], + "spans": [], + "index": 62 + }, + { + "bbox": [ + 328, + 471.5, + 499, + 484.25 + ], + "spans": [], + "index": 63 + }, + { + "bbox": [ + 328, + 484.25, + 499, + 497.0 + ], + "spans": [], + "index": 64 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 324, + 500, + 504, + 560 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 324, + 498, + 505, + 512 + ], + "spans": [ + { + "bbox": [ + 324, + 498, + 505, + 512 + ], + "score": 1.0, + "content": "Figure 4: Left: CQL regularizer attains low val-", + "type": "text" + } + ], + "index": 65 + }, + { + "bbox": [ + 323, + 509, + 505, + 521 + ], + "spans": [ + { + "bbox": [ + 323, + 509, + 505, + 521 + ], + "score": 1.0, + "content": "ues, especially with 50 and 100 trajectories in the", + "type": "text" + } + ], + "index": 66 + }, + { + "bbox": [ + 323, + 520, + 505, + 531 + ], + "spans": [ + { + "bbox": [ + 323, + 520, + 505, + 531 + ], + "score": 1.0, + "content": "pick and place task, Right: Using VIB mitigates", + "type": "text" + } + ], + "index": 67 + }, + { + "bbox": [ + 323, + 530, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 323, + 531, + 470, + 540 + ], + "score": 1.0, + "content": "overfitting, giving rise to a stable trend in", + "type": "text" + }, + { + "bbox": [ + 470, + 530, + 478, + 540 + ], + "score": 0.25, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 479, + 531, + 505, + 540 + ], + "score": 1.0, + "content": "-values", + "type": "text" + } + ], + "index": 68 + }, + { + "bbox": [ + 323, + 539, + 505, + 551 + ], + "spans": [ + { + "bbox": [ + 323, + 539, + 505, + 551 + ], + "score": 1.0, + "content": "and better performance which does not degrade", + "type": "text" + } + ], + "index": 69 + }, + { + "bbox": [ + 324, + 549, + 416, + 562 + ], + "spans": [ + { + "bbox": [ + 324, + 549, + 416, + 562 + ], + "score": 1.0, + "content": "with more training steps.", + "type": "text" + } + ], + "index": 70 + } + ], + "index": 67.5 + } + ], + "index": 63.0 + }, + { + "type": "text", + "bbox": [ + 106, + 563, + 505, + 619 + ], + "lines": [ + { + "bbox": [ + 106, + 563, + 505, + 576 + ], + "spans": [ + { + "bbox": [ + 106, + 563, + 505, + 576 + ], + "score": 1.0, + "content": "shown in Figure 4 (right), applying this regularizer not only alleviates the drop in Q-values after", + "type": "text" + } + ], + "index": 71 + }, + { + "bbox": [ + 106, + 575, + 505, + 587 + ], + "spans": [ + { + "bbox": [ + 106, + 575, + 505, + 587 + ], + "score": 1.0, + "content": "many training steps, but allows us to pick later checkpoints in training which perform better than", + "type": "text" + } + ], + "index": 72 + }, + { + "bbox": [ + 105, + 584, + 505, + 598 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 505, + 598 + ], + "score": 1.0, + "content": "base CQL on both the tasks. This validates that overfitting, as detected via our workflow, can be", + "type": "text" + } + ], + "index": 73 + }, + { + "bbox": [ + 106, + 596, + 505, + 609 + ], + "spans": [ + { + "bbox": [ + 106, + 596, + 477, + 609 + ], + "score": 1.0, + "content": "effectively mitigated by decreasing capacity, in this case by using VIB. We evaluate dropout,", + "type": "text" + }, + { + "bbox": [ + 477, + 596, + 487, + 608 + ], + "score": 0.87, + "content": "\\ell _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 487, + 596, + 505, + 609 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 74 + }, + { + "bbox": [ + 106, + 607, + 272, + 620 + ], + "spans": [ + { + "bbox": [ + 106, + 608, + 117, + 619 + ], + "score": 0.85, + "content": "\\ell _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 117, + 607, + 272, + 620 + ], + "score": 1.0, + "content": "regularization schemes in Appendix J.", + "type": "text" + } + ], + "index": 75 + } + ], + "index": 73 + }, + { + "type": "text", + "bbox": [ + 106, + 623, + 505, + 712 + ], + "lines": [ + { + "bbox": [ + 105, + 623, + 505, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 505, + 636 + ], + "score": 1.0, + "content": "Scenario #2: Multiple training objects. Our second test scenario consists of the pick and place", + "type": "text" + } + ], + "index": 76 + }, + { + "bbox": [ + 106, + 634, + 505, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 634, + 505, + 646 + ], + "score": 1.0, + "content": "task, modified to include a variable number of object types (1, 5, 10, 20, 35). Handling more objects", + "type": "text" + } + ], + "index": 77 + }, + { + "bbox": [ + 105, + 645, + 505, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 505, + 658 + ], + "score": 1.0, + "content": "requires higher capacity, since each object has a different shape and appearance. In each case, CQL", + "type": "text" + } + ], + "index": 78 + }, + { + "bbox": [ + 105, + 657, + 505, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 505, + 669 + ], + "score": 1.0, + "content": "is provided with 5000 trajectories. Following our workflow from Section 3, we first compute the", + "type": "text" + } + ], + "index": 79 + }, + { + "bbox": [ + 105, + 667, + 505, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 505, + 680 + ], + "score": 1.0, + "content": "average dataset Q-value and the training TD error. We observe in Figure 5 that, unlike in Scenario", + "type": "text" + } + ], + "index": 80 + }, + { + "bbox": [ + 104, + 677, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 691 + ], + "score": 1.0, + "content": "#1, Q-values do not generally decrease when trained for many steps, suggesting that the Q-function", + "type": "text" + } + ], + "index": 81 + }, + { + "bbox": [ + 105, + 689, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 505, + 702 + ], + "score": 1.0, + "content": "is likely not overfitting. To check for underfitting, we visualize the training TD error and find that,", + "type": "text" + } + ], + "index": 82 + }, + { + "bbox": [ + 106, + 699, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 506, + 713 + ], + "score": 1.0, + "content": "with 10, 20 and 35 objects, TD error magnitudes are in the range of [1.0, 2.0], which suggests a", + "type": "text" + } + ], + "index": 83 + } + ], + "index": 79.5 + } + ], + "page_idx": 5, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 742, + 308, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "score": 1.0, + "content": "6", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 72, + 336, + 291 + ], + "lines": [], + "index": 9.5, + "bbox_fs": [ + 105, + 72, + 337, + 292 + ], + "lines_deleted": true + }, + { + "type": "image", + "bbox": [ + 352, + 77, + 497, + 213 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 352, + 77, + 497, + 213 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 352, + 77, + 497, + 213 + ], + "spans": [ + { + "bbox": [ + 352, + 77, + 497, + 213 + ], + "score": 0.966, + "type": "image", + "image_path": "7149b62299170ecd7a26a8a110d939daa58337b186bfd1671932f83c2c649963.jpg" + } + ] + } + ], + "index": 20.5, + "virtual_lines": [ + { + "bbox": [ + 352, + 77, + 497, + 145.0 + ], + "spans": [], + "index": 20 + }, + { + "bbox": [ + 352, + 145.0, + 497, + 213.0 + ], + "spans": [], + "index": 21 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 344, + 217, + 504, + 287 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 343, + 216, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 343, + 216, + 505, + 228 + ], + "score": 1.0, + "content": "Figure 3: Policy performance (Top) and", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 343, + 227, + 505, + 238 + ], + "spans": [ + { + "bbox": [ + 343, + 227, + 505, + 238 + ], + "score": 1.0, + "content": "average dataset Q-values of CQL (bot-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 343, + 236, + 505, + 248 + ], + "spans": [ + { + "bbox": [ + 343, + 236, + 505, + 248 + ], + "score": 1.0, + "content": "tom) with varying number of trajectories.", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 344, + 247, + 505, + 257 + ], + "spans": [ + { + "bbox": [ + 344, + 247, + 505, + 257 + ], + "score": 1.0, + "content": "Vertical bands indicate regions around the", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 343, + 257, + 505, + 267 + ], + "spans": [ + { + "bbox": [ + 343, + 257, + 407, + 267 + ], + "score": 1.0, + "content": "peak in average", + "type": "text" + }, + { + "bbox": [ + 407, + 257, + 416, + 267 + ], + "score": 0.31, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 416, + 257, + 505, + 267 + ], + "score": 1.0, + "content": "-value and observe that", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 343, + 267, + 505, + 278 + ], + "spans": [ + { + "bbox": [ + 343, + 267, + 505, + 278 + ], + "score": 1.0, + "content": "these regions correspond to policies with", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 343, + 277, + 438, + 288 + ], + "spans": [ + { + "bbox": [ + 343, + 277, + 438, + 288 + ], + "score": 1.0, + "content": "good actual performance.", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 25 + } + ], + "index": 22.75 + }, + { + "type": "text", + "bbox": [ + 106, + 296, + 505, + 340 + ], + "lines": [ + { + "bbox": [ + 106, + 296, + 505, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 296, + 505, + 308 + ], + "score": 1.0, + "content": "Scenario #1: Variable amount of training data. Our first scenario consists of the simulated tasks", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 307, + 505, + 319 + ], + "spans": [ + { + "bbox": [ + 106, + 307, + 505, + 319 + ], + "score": 1.0, + "content": "discussed above with a variable number of trajectories in the training data (50, 100, 500, 10000). We", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 319, + 505, + 330 + ], + "spans": [ + { + "bbox": [ + 106, + 319, + 505, + 330 + ], + "score": 1.0, + "content": "run CQL and track metrics 3.1 and 3.2 in each case. Observe in Figure 3 (bottom) that with fewer tra-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 104, + 329, + 505, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 329, + 257, + 342 + ], + "score": 1.0, + "content": "jectories, the average dataset Q-value", + "type": "text" + }, + { + "bbox": [ + 258, + 329, + 329, + 341 + ], + "score": 0.93, + "content": "\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]", + "type": "inline_equation" + }, + { + "bbox": [ + 329, + 329, + 505, + 342 + ], + "score": 1.0, + "content": "first rises, and then drops. This matches the", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 339, + 317, + 352 + ], + "spans": [ + { + "bbox": [ + 106, + 339, + 317, + 352 + ], + "score": 1.0, + "content": "description of overfitting in Section 3. Observe in", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 351, + 317, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 317, + 363 + ], + "score": 1.0, + "content": "Figure 4 (left) that, at the same time, the value of the", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 362, + 317, + 374 + ], + "spans": [ + { + "bbox": [ + 106, + 362, + 317, + 374 + ], + "score": 1.0, + "content": "CQL regularizer is very low, which is not consistent", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 372, + 316, + 384 + ], + "spans": [ + { + "bbox": [ + 106, + 372, + 316, + 384 + ], + "score": 1.0, + "content": "with what we expect of underfitting. Thus, we can", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 383, + 317, + 395 + ], + "spans": [ + { + "bbox": [ + 106, + 383, + 317, + 395 + ], + "score": 1.0, + "content": "conclude that these conditions exhibit overfitting, es-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 395, + 317, + 406 + ], + "spans": [ + { + "bbox": [ + 106, + 395, + 317, + 406 + ], + "score": 1.0, + "content": "pecially with 50 and 100 trajectories. The vertical", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 405, + 317, + 417 + ], + "spans": [ + { + "bbox": [ + 106, + 405, + 317, + 417 + ], + "score": 1.0, + "content": "dashed lines indicate the checkpoints that would be", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 416, + 317, + 428 + ], + "spans": [ + { + "bbox": [ + 106, + 416, + 317, + 428 + ], + "score": 1.0, + "content": "selected for evaluation per Guideline 3.1. We fur-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 428, + 317, + 439 + ], + "spans": [ + { + "bbox": [ + 106, + 428, + 317, + 439 + ], + "score": 1.0, + "content": "ther visualize the performance of the chosen check-", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 439, + 317, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 317, + 450 + ], + "score": 1.0, + "content": "points against the actual return of each intermediate", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 449, + 317, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 317, + 460 + ], + "score": 1.0, + "content": "policy in Figure 3 (top). Note that this value is ob-", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 460, + 317, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 317, + 472 + ], + "score": 1.0, + "content": "tained by rolling out the learned policy, and would", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 106, + 471, + 317, + 483 + ], + "spans": [ + { + "bbox": [ + 106, + 471, + 317, + 483 + ], + "score": 1.0, + "content": "not be available in a realistic offline RL setting, but is", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 106, + 482, + 317, + 493 + ], + "spans": [ + { + "bbox": [ + 106, + 482, + 317, + 493 + ], + "score": 1.0, + "content": "provided only for analysis. Selecting the checkpoint", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 106, + 493, + 317, + 504 + ], + "spans": [ + { + "bbox": [ + 106, + 493, + 317, + 504 + ], + "score": 1.0, + "content": "based on Guideline 3.1 leads us to select a model", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 106, + 503, + 317, + 516 + ], + "spans": [ + { + "bbox": [ + 106, + 503, + 317, + 516 + ], + "score": 1.0, + "content": "with close to the peak performance over the training", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 515, + 302, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 302, + 527 + ], + "score": 1.0, + "content": "process, validating the efficacy of Guideline 3.1.", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 30.5, + "bbox_fs": [ + 104, + 296, + 505, + 342 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 341, + 316, + 525 + ], + "lines": [], + "index": 41, + "bbox_fs": [ + 105, + 339, + 317, + 527 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 106, + 531, + 316, + 563 + ], + "lines": [ + { + "bbox": [ + 106, + 530, + 316, + 543 + ], + "spans": [ + { + "bbox": [ + 106, + 530, + 316, + 543 + ], + "score": 1.0, + "content": "Since we detected overfitting by following our work-", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 106, + 542, + 317, + 554 + ], + "spans": [ + { + "bbox": [ + 106, + 542, + 317, + 554 + ], + "score": 1.0, + "content": "flow, we now aim to address it by using the VIB", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 552, + 317, + 566 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 317, + 566 + ], + "score": 1.0, + "content": "regularizer in the setting with 100 trajectories. As", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 106, + 563, + 505, + 576 + ], + "spans": [ + { + "bbox": [ + 106, + 563, + 505, + 576 + ], + "score": 1.0, + "content": "shown in Figure 4 (right), applying this regularizer not only alleviates the drop in Q-values after", + "type": "text" + } + ], + "index": 71 + }, + { + "bbox": [ + 106, + 575, + 505, + 587 + ], + "spans": [ + { + "bbox": [ + 106, + 575, + 505, + 587 + ], + "score": 1.0, + "content": "many training steps, but allows us to pick later checkpoints in training which perform better than", + "type": "text" + } + ], + "index": 72 + }, + { + "bbox": [ + 105, + 584, + 505, + 598 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 505, + 598 + ], + "score": 1.0, + "content": "base CQL on both the tasks. This validates that overfitting, as detected via our workflow, can be", + "type": "text" + } + ], + "index": 73 + }, + { + "bbox": [ + 106, + 596, + 505, + 609 + ], + "spans": [ + { + "bbox": [ + 106, + 596, + 477, + 609 + ], + "score": 1.0, + "content": "effectively mitigated by decreasing capacity, in this case by using VIB. We evaluate dropout,", + "type": "text" + }, + { + "bbox": [ + 477, + 596, + 487, + 608 + ], + "score": 0.87, + "content": "\\ell _ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 487, + 596, + 505, + 609 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 74 + }, + { + "bbox": [ + 106, + 607, + 272, + 620 + ], + "spans": [ + { + "bbox": [ + 106, + 608, + 117, + 619 + ], + "score": 0.85, + "content": "\\ell _ { 2 }", + "type": "inline_equation" + }, + { + "bbox": [ + 117, + 607, + 272, + 620 + ], + "score": 1.0, + "content": "regularization schemes in Appendix J.", + "type": "text" + } + ], + "index": 75 + } + ], + "index": 51, + "bbox_fs": [ + 105, + 530, + 317, + 566 + ] + }, + { + "type": "image", + "bbox": [ + 328, + 344, + 499, + 497 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 328, + 344, + 499, + 497 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 328, + 344, + 499, + 497 + ], + "spans": [ + { + "bbox": [ + 328, + 344, + 499, + 497 + ], + "score": 0.964, + "type": "image", + "image_path": "c8b831ad68078d018a8733761f0da0593a9224d909869ecb852f0ffd8ee06f24.jpg" + } + ] + } + ], + "index": 58.5, + "virtual_lines": [ + { + "bbox": [ + 328, + 344, + 499, + 356.75 + ], + "spans": [], + "index": 53 + }, + { + "bbox": [ + 328, + 356.75, + 499, + 369.5 + ], + "spans": [], + "index": 54 + }, + { + "bbox": [ + 328, + 369.5, + 499, + 382.25 + ], + "spans": [], + "index": 55 + }, + { + "bbox": [ + 328, + 382.25, + 499, + 395.0 + ], + "spans": [], + "index": 56 + }, + { + "bbox": [ + 328, + 395.0, + 499, + 407.75 + ], + "spans": [], + "index": 57 + }, + { + "bbox": [ + 328, + 407.75, + 499, + 420.5 + ], + "spans": [], + "index": 58 + }, + { + "bbox": [ + 328, + 420.5, + 499, + 433.25 + ], + "spans": [], + "index": 59 + }, + { + "bbox": [ + 328, + 433.25, + 499, + 446.0 + ], + "spans": [], + "index": 60 + }, + { + "bbox": [ + 328, + 446.0, + 499, + 458.75 + ], + "spans": [], + "index": 61 + }, + { + "bbox": [ + 328, + 458.75, + 499, + 471.5 + ], + "spans": [], + "index": 62 + }, + { + "bbox": [ + 328, + 471.5, + 499, + 484.25 + ], + "spans": [], + "index": 63 + }, + { + "bbox": [ + 328, + 484.25, + 499, + 497.0 + ], + "spans": [], + "index": 64 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 324, + 500, + 504, + 560 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 324, + 498, + 505, + 512 + ], + "spans": [ + { + "bbox": [ + 324, + 498, + 505, + 512 + ], + "score": 1.0, + "content": "Figure 4: Left: CQL regularizer attains low val-", + "type": "text" + } + ], + "index": 65 + }, + { + "bbox": [ + 323, + 509, + 505, + 521 + ], + "spans": [ + { + "bbox": [ + 323, + 509, + 505, + 521 + ], + "score": 1.0, + "content": "ues, especially with 50 and 100 trajectories in the", + "type": "text" + } + ], + "index": 66 + }, + { + "bbox": [ + 323, + 520, + 505, + 531 + ], + "spans": [ + { + "bbox": [ + 323, + 520, + 505, + 531 + ], + "score": 1.0, + "content": "pick and place task, Right: Using VIB mitigates", + "type": "text" + } + ], + "index": 67 + }, + { + "bbox": [ + 323, + 530, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 323, + 531, + 470, + 540 + ], + "score": 1.0, + "content": "overfitting, giving rise to a stable trend in", + "type": "text" + }, + { + "bbox": [ + 470, + 530, + 478, + 540 + ], + "score": 0.25, + "content": "\\mathrm { Q }", + "type": "inline_equation" + }, + { + "bbox": [ + 479, + 531, + 505, + 540 + ], + "score": 1.0, + "content": "-values", + "type": "text" + } + ], + "index": 68 + }, + { + "bbox": [ + 323, + 539, + 505, + 551 + ], + "spans": [ + { + "bbox": [ + 323, + 539, + 505, + 551 + ], + "score": 1.0, + "content": "and better performance which does not degrade", + "type": "text" + } + ], + "index": 69 + }, + { + "bbox": [ + 324, + 549, + 416, + 562 + ], + "spans": [ + { + "bbox": [ + 324, + 549, + 416, + 562 + ], + "score": 1.0, + "content": "with more training steps.", + "type": "text" + } + ], + "index": 70 + } + ], + "index": 67.5 + } + ], + "index": 63.0 + }, + { + "type": "text", + "bbox": [ + 106, + 563, + 505, + 619 + ], + "lines": [], + "index": 73, + "bbox_fs": [ + 105, + 563, + 505, + 620 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 106, + 623, + 505, + 712 + ], + "lines": [ + { + "bbox": [ + 105, + 623, + 505, + 636 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 505, + 636 + ], + "score": 1.0, + "content": "Scenario #2: Multiple training objects. Our second test scenario consists of the pick and place", + "type": "text" + } + ], + "index": 76 + }, + { + "bbox": [ + 106, + 634, + 505, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 634, + 505, + 646 + ], + "score": 1.0, + "content": "task, modified to include a variable number of object types (1, 5, 10, 20, 35). Handling more objects", + "type": "text" + } + ], + "index": 77 + }, + { + "bbox": [ + 105, + 645, + 505, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 505, + 658 + ], + "score": 1.0, + "content": "requires higher capacity, since each object has a different shape and appearance. In each case, CQL", + "type": "text" + } + ], + "index": 78 + }, + { + "bbox": [ + 105, + 657, + 505, + 669 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 505, + 669 + ], + "score": 1.0, + "content": "is provided with 5000 trajectories. Following our workflow from Section 3, we first compute the", + "type": "text" + } + ], + "index": 79 + }, + { + "bbox": [ + 105, + 667, + 505, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 505, + 680 + ], + "score": 1.0, + "content": "average dataset Q-value and the training TD error. We observe in Figure 5 that, unlike in Scenario", + "type": "text" + } + ], + "index": 80 + }, + { + "bbox": [ + 104, + 677, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 691 + ], + "score": 1.0, + "content": "#1, Q-values do not generally decrease when trained for many steps, suggesting that the Q-function", + "type": "text" + } + ], + "index": 81 + }, + { + "bbox": [ + 105, + 689, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 505, + 702 + ], + "score": 1.0, + "content": "is likely not overfitting. To check for underfitting, we visualize the training TD error and find that,", + "type": "text" + } + ], + "index": 82 + }, + { + "bbox": [ + 106, + 699, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 506, + 713 + ], + "score": 1.0, + "content": "with 10, 20 and 35 objects, TD error magnitudes are in the range of [1.0, 2.0], which suggests a", + "type": "text" + } + ], + "index": 83 + }, + { + "bbox": [ + 106, + 73, + 505, + 85 + ], + "spans": [ + { + "bbox": [ + 106, + 73, + 505, + 85 + ], + "score": 1.0, + "content": "overall Q-value error of [30.0, 60.0] since the task horizon is 30. On an absolute scale, this error", + "type": "text", + "cross_page": true + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 84, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 84, + 275, + 95 + ], + "score": 1.0, + "content": "magnitude is large: since the rewards are", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 276, + 84, + 290, + 94 + ], + "score": 0.33, + "content": "_ { 0 / 1 }", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 290, + 84, + 505, + 95 + ], + "score": 1.0, + "content": ", the range of difference between actual Q-values for", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 94, + 506, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 94, + 506, + 107 + ], + "score": 1.0, + "content": "any two policies is at most 30, which suggests that the error magnitude in the runs in Figure 5", + "type": "text", + "cross_page": true + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 106, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 106, + 106, + 505, + 118 + ], + "score": 1.0, + "content": "are high. Hence, we conclude that this scenario generally exhibits underfitting with more objects.", + "type": "text", + "cross_page": true + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 115, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 115, + 505, + 129 + ], + "score": 1.0, + "content": "Indeed this trend is reflected in the policy performance that we plot for analysis in Figure 5: note", + "type": "text", + "cross_page": true + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 127, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 127, + 505, + 140 + ], + "score": 1.0, + "content": "that the policy return decreases with an increased number of objects, and the policy performance", + "type": "text", + "cross_page": true + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 138, + 323, + 150 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 323, + 150 + ], + "score": 1.0, + "content": "initially increases and saturates at a suboptimal value.", + "type": "text", + "cross_page": true + } + ], + "index": 6 + } + ], + "index": 79.5, + "bbox_fs": [ + 104, + 623, + 506, + 713 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 72, + 505, + 149 + ], + "lines": [ + { + "bbox": [ + 106, + 73, + 505, + 85 + ], + "spans": [ + { + "bbox": [ + 106, + 73, + 505, + 85 + ], + "score": 1.0, + "content": "overall Q-value error of [30.0, 60.0] since the task horizon is 30. On an absolute scale, this error", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 84, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 84, + 275, + 95 + ], + "score": 1.0, + "content": "magnitude is large: since the rewards are", + "type": "text" + }, + { + "bbox": [ + 276, + 84, + 290, + 94 + ], + "score": 0.33, + "content": "_ { 0 / 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 290, + 84, + 505, + 95 + ], + "score": 1.0, + "content": ", the range of difference between actual Q-values for", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 94, + 506, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 94, + 506, + 107 + ], + "score": 1.0, + "content": "any two policies is at most 30, which suggests that the error magnitude in the runs in Figure 5", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 106, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 106, + 106, + 505, + 118 + ], + "score": 1.0, + "content": "are high. Hence, we conclude that this scenario generally exhibits underfitting with more objects.", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 115, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 115, + 505, + 129 + ], + "score": 1.0, + "content": "Indeed this trend is reflected in the policy performance that we plot for analysis in Figure 5: note", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 127, + 505, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 127, + 505, + 140 + ], + "score": 1.0, + "content": "that the policy return decreases with an increased number of objects, and the policy performance", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 138, + 323, + 150 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 323, + 150 + ], + "score": 1.0, + "content": "initially increases and saturates at a suboptimal value.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 115, + 156, + 387, + 240 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 115, + 156, + 387, + 240 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 115, + 156, + 387, + 240 + ], + "spans": [ + { + "bbox": [ + 115, + 156, + 387, + 240 + ], + "score": 0.963, + "type": "image", + "image_path": "abb86c89607619cd53397449a92fc47f01701017a73f7df2599b8c654f14d0ca.jpg" + } + ] + } + ], + "index": 8, + "virtual_lines": [ + { + "bbox": [ + 115, + 156, + 387, + 184.0 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 115, + 184.0, + 387, + 212.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 115, + 212.0, + 387, + 240.0 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 109, + 243, + 395, + 294 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 108, + 242, + 397, + 255 + ], + "spans": [ + { + "bbox": [ + 108, + 242, + 397, + 255 + ], + "score": 1.0, + "content": "Figure 5: Performance (left), TD error (middle) and average dataset Q-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 108, + 253, + 397, + 265 + ], + "spans": [ + { + "bbox": [ + 108, + 253, + 397, + 265 + ], + "score": 1.0, + "content": "values (right) for the pick and place task with a variable number of objects.", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 108, + 263, + 396, + 275 + ], + "spans": [ + { + "bbox": [ + 108, + 263, + 396, + 275 + ], + "score": 1.0, + "content": "Note that while the learned Q-values increase and stabilize, the TD error values", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 273, + 397, + 285 + ], + "spans": [ + { + "bbox": [ + 107, + 273, + 397, + 285 + ], + "score": 1.0, + "content": "in scenarios with more than 10 objects are large (1.0-2.0). Correspondingly, the", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 108, + 284, + 356, + 295 + ], + "spans": [ + { + "bbox": [ + 108, + 284, + 356, + 295 + ], + "score": 1.0, + "content": "performance generally decreases as the number of objects increases.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 15 + } + ], + "index": 11.5 + }, + { + "type": "image", + "bbox": [ + 412, + 159, + 501, + 250 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 412, + 159, + 501, + 250 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 412, + 159, + 501, + 250 + ], + "spans": [ + { + "bbox": [ + 412, + 159, + 501, + 250 + ], + "score": 0.925, + "type": "image", + "image_path": "b4ce278afd05bdaaa57ddb722d6767ecad2d352a23720d7d7f144183cc608616.jpg" + } + ] + } + ], + "index": 10.0, + "virtual_lines": [ + { + "bbox": [ + 412, + 159, + 501, + 204.5 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 412, + 204.5, + 501, + 250.0 + ], + "spans": [], + "index": 11 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 406, + 253, + 500, + 284 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 406, + 251, + 501, + 264 + ], + "spans": [ + { + "bbox": [ + 406, + 251, + 501, + 264 + ], + "score": 1.0, + "content": "Figure 6: Correcting un-", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 405, + 262, + 501, + 275 + ], + "spans": [ + { + "bbox": [ + 405, + 262, + 501, + 275 + ], + "score": 1.0, + "content": "derfitting by applying our", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 406, + 273, + 497, + 284 + ], + "spans": [ + { + "bbox": [ + 406, + 273, + 497, + 284 + ], + "score": 1.0, + "content": "workflow for 35 objects.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 16 + } + ], + "index": 13.0 + }, + { + "type": "text", + "bbox": [ + 107, + 295, + 506, + 351 + ], + "lines": [ + { + "bbox": [ + 106, + 294, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 106, + 294, + 505, + 309 + ], + "score": 1.0, + "content": "To address underfitting in the multi-object case, we apply the proposed capacity-increasing measures", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 305, + 505, + 319 + ], + "spans": [ + { + "bbox": [ + 105, + 305, + 505, + 319 + ], + "score": 1.0, + "content": "to the 35-object task (results for 10 and 20 object settings are in Appendix I). We use a more expres-", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 317, + 506, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 506, + 330 + ], + "score": 1.0, + "content": "sive ResNet architecture for the policy and the DR3 regularizer for the Q-function together. Observe", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 328, + 506, + 341 + ], + "spans": [ + { + "bbox": [ + 106, + 328, + 506, + 341 + ], + "score": 1.0, + "content": "in the figure on the right that this combination (shown in red) improves policy performance in this", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 339, + 488, + 352 + ], + "spans": [ + { + "bbox": [ + 106, + 339, + 488, + 352 + ], + "score": 1.0, + "content": "setting (compared to green), which validates our workflow protocol for addressing underfitting.", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 22 + }, + { + "type": "title", + "bbox": [ + 106, + 357, + 388, + 370 + ], + "lines": [ + { + "bbox": [ + 105, + 356, + 389, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 389, + 374 + ], + "score": 1.0, + "content": "6 Tuning CQL for Real-World Robotic Manipulation", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25 + }, + { + "type": "text", + "bbox": [ + 107, + 372, + 316, + 471 + ], + "lines": [ + { + "bbox": [ + 106, + 372, + 316, + 384 + ], + "spans": [ + { + "bbox": [ + 106, + 372, + 316, + 384 + ], + "score": 1.0, + "content": "Having evaluated the efficacy of our proposed work-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 384, + 317, + 394 + ], + "spans": [ + { + "bbox": [ + 106, + 384, + 317, + 394 + ], + "score": 1.0, + "content": "flow in simulation, we now utilize our workflow to", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 394, + 317, + 406 + ], + "spans": [ + { + "bbox": [ + 106, + 394, + 317, + 406 + ], + "score": 1.0, + "content": "tune CQL for real-world robotic manipulation. We", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 405, + 317, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 405, + 317, + 416 + ], + "score": 1.0, + "content": "test in two setups that require the robot to learn from", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 416, + 317, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 317, + 428 + ], + "score": 1.0, + "content": "sparse binary rewards and image observations. The", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 427, + 316, + 439 + ], + "spans": [ + { + "bbox": [ + 106, + 427, + 316, + 439 + ], + "score": 1.0, + "content": "settings differ in robot platform, task specification,", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 437, + 317, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 317, + 450 + ], + "score": 1.0, + "content": "and dataset size. Additional results and robot videos", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 449, + 316, + 461 + ], + "spans": [ + { + "bbox": [ + 106, + 449, + 316, + 461 + ], + "score": 1.0, + "content": "are at the following website: https://sites.", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 460, + 294, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 294, + 471 + ], + "score": 1.0, + "content": "google.com/view/offline-rl-workflow", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 30 + }, + { + "type": "text", + "bbox": [ + 106, + 476, + 316, + 487 + ], + "lines": [ + { + "bbox": [ + 105, + 475, + 318, + 489 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 318, + 489 + ], + "score": 1.0, + "content": "Sawyer manipulation tasks [30]. First, we train a", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 325, + 377, + 503, + 435 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 325, + 377, + 503, + 435 + ], + "group_id": 2, + "lines": [ + { + "bbox": [ + 325, + 377, + 503, + 435 + ], + "spans": [ + { + "bbox": [ + 325, + 377, + 503, + 435 + ], + "score": 0.958, + "type": "image", + "image_path": "566e1138148ac542d1676690513ab2bab4af3cbfec1257dc70a864019d00681c.jpg" + } + ] + } + ], + "index": 37.5, + "virtual_lines": [ + { + "bbox": [ + 325, + 377, + 503, + 391.5 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 325, + 391.5, + 503, + 406.0 + ], + "spans": [], + "index": 37 + }, + { + "bbox": [ + 325, + 406.0, + 503, + 420.5 + ], + "spans": [], + "index": 38 + }, + { + "bbox": [ + 325, + 420.5, + 503, + 435.0 + ], + "spans": [], + "index": 39 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 324, + 441, + 505, + 482 + ], + "group_id": 2, + "lines": [ + { + "bbox": [ + 324, + 440, + 505, + 452 + ], + "spans": [ + { + "bbox": [ + 324, + 440, + 505, + 452 + ], + "score": 1.0, + "content": "Figure 7: Real-world tasks. Successful rollouts", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 324, + 451, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 324, + 451, + 505, + 461 + ], + "score": 1.0, + "content": "of CQL tuned with our workflow from Sections 3", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 324, + 461, + 505, + 472 + ], + "spans": [ + { + "bbox": [ + 324, + 461, + 505, + 472 + ], + "score": 1.0, + "content": "& 4. Top to bottom: Sawyer lid on pot, Sawyer", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 324, + 471, + 478, + 482 + ], + "spans": [ + { + "bbox": [ + 324, + 471, + 478, + 482 + ], + "score": 1.0, + "content": "drawer opening, WidowX pick-place task.", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 41.5 + } + ], + "index": 39.5 + }, + { + "type": "text", + "bbox": [ + 106, + 488, + 505, + 553 + ], + "lines": [ + { + "bbox": [ + 105, + 486, + 505, + 500 + ], + "spans": [ + { + "bbox": [ + 105, + 486, + 505, + 500 + ], + "score": 1.0, + "content": "Sawyer robot in a tabletop setting to perform two tasks: (1) placing the lid onto a pot and (2) opening", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 498, + 505, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 498, + 505, + 510 + ], + "score": 1.0, + "content": "a drawer. The robot must perform these tasks in the presence of visual distractor objects, as shown in", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 508, + 505, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 508, + 505, + 521 + ], + "score": 1.0, + "content": "Figure 7. We directly use the dataset of 100 trajectories for each task collected by Khazatsky et al.", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 520, + 505, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 520, + 505, + 532 + ], + "score": 1.0, + "content": "[30] for our experiments so as to mimic the real-world use case of leveraging existing data with", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 531, + 506, + 543 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 506, + 543 + ], + "score": 1.0, + "content": "offline RL. We use four-dimensional actions with 3D end-effector velocity control in xyz-space and", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 106, + 541, + 475, + 554 + ], + "spans": [ + { + "bbox": [ + 106, + 541, + 475, + 554 + ], + "score": 1.0, + "content": "1D gripper open/close action. More details regarding the setup are provided in Appendix D.", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 46.5 + }, + { + "type": "text", + "bbox": [ + 107, + 559, + 296, + 719 + ], + "lines": [ + { + "bbox": [ + 107, + 558, + 297, + 569 + ], + "spans": [ + { + "bbox": [ + 107, + 558, + 297, + 569 + ], + "score": 1.0, + "content": "We run default CQL on these tasks and track", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 567, + 297, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 297, + 582 + ], + "score": 1.0, + "content": "the average Q-value, TD error, and CQL reg-", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 106, + 579, + 297, + 592 + ], + "spans": [ + { + "bbox": [ + 106, + 579, + 297, + 592 + ], + "score": 1.0, + "content": "ularizer value. As shown in Figure 8, the av-", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 105, + 591, + 297, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 591, + 297, + 604 + ], + "score": 1.0, + "content": "erage Q-value does not decrease over training,", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 106, + 601, + 297, + 613 + ], + "spans": [ + { + "bbox": [ + 106, + 601, + 297, + 613 + ], + "score": 1.0, + "content": "and the TD error (and CQL regularizer shown", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 106, + 612, + 297, + 624 + ], + "spans": [ + { + "bbox": [ + 106, + 612, + 297, + 624 + ], + "score": 1.0, + "content": "in Appendix E.2) is large. Per our discussion in", + "type": "text" + } + ], + "index": 55 + }, + { + "bbox": [ + 106, + 623, + 297, + 636 + ], + "spans": [ + { + "bbox": [ + 106, + 623, + 297, + 636 + ], + "score": 1.0, + "content": "Section 3, this indicates underfitting. Following", + "type": "text" + } + ], + "index": 56 + }, + { + "bbox": [ + 106, + 635, + 297, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 635, + 297, + 646 + ], + "score": 1.0, + "content": "our guidelines from Section 4, we utilize a more", + "type": "text" + } + ], + "index": 57 + }, + { + "bbox": [ + 105, + 645, + 297, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 297, + 657 + ], + "score": 1.0, + "content": "expressive ResNet policy (Figure 10), which in-", + "type": "text" + } + ], + "index": 58 + }, + { + "bbox": [ + 106, + 657, + 297, + 668 + ], + "spans": [ + { + "bbox": [ + 106, + 657, + 297, + 668 + ], + "score": 1.0, + "content": "creases the number of total convolutional layers", + "type": "text" + } + ], + "index": 59 + }, + { + "bbox": [ + 106, + 667, + 297, + 678 + ], + "spans": [ + { + "bbox": [ + 106, + 667, + 297, + 678 + ], + "score": 1.0, + "content": "from 3 to 9. We observe that this reduces the", + "type": "text" + } + ], + "index": 60 + }, + { + "bbox": [ + 106, + 678, + 297, + 689 + ], + "spans": [ + { + "bbox": [ + 106, + 678, + 297, + 689 + ], + "score": 1.0, + "content": "values of both the TD error Figure 8 and CQL", + "type": "text" + } + ], + "index": 61 + }, + { + "bbox": [ + 106, + 689, + 297, + 700 + ], + "spans": [ + { + "bbox": [ + 106, + 689, + 297, + 700 + ], + "score": 1.0, + "content": "regularizer (Appendix E.2) on both tasks. We", + "type": "text" + } + ], + "index": 62 + } + ], + "index": 56 + }, + { + "type": "image", + "bbox": [ + 309, + 554, + 500, + 646 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 309, + 554, + 500, + 646 + ], + "group_id": 3, + "lines": [ + { + "bbox": [ + 309, + 554, + 500, + 646 + ], + "spans": [ + { + "bbox": [ + 309, + 554, + 500, + 646 + ], + "score": 0.966, + "type": "image", + "image_path": "d5317090e1ad32bc153c1daf1e4d3802dfab85d171f13b7f15890473be1358cc.jpg" + } + ] + } + ], + "index": 66, + "virtual_lines": [ + { + "bbox": [ + 309, + 554, + 500, + 567.1428571428571 + ], + "spans": [], + "index": 63 + }, + { + "bbox": [ + 309, + 567.1428571428571, + 500, + 580.2857142857142 + ], + "spans": [], + "index": 64 + }, + { + "bbox": [ + 309, + 580.2857142857142, + 500, + 593.4285714285713 + ], + "spans": [], + "index": 65 + }, + { + "bbox": [ + 309, + 593.4285714285713, + 500, + 606.5714285714284 + ], + "spans": [], + "index": 66 + }, + { + "bbox": [ + 309, + 606.5714285714284, + 500, + 619.7142857142856 + ], + "spans": [], + "index": 67 + }, + { + "bbox": [ + 309, + 619.7142857142856, + 500, + 632.8571428571427 + ], + "spans": [], + "index": 68 + }, + { + "bbox": [ + 309, + 632.8571428571427, + 500, + 645.9999999999998 + ], + "spans": [], + "index": 69 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 304, + 650, + 504, + 700 + ], + "group_id": 3, + "lines": [ + { + "bbox": [ + 303, + 649, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 303, + 649, + 506, + 662 + ], + "score": 1.0, + "content": "Figure 8: Average Q-value and TD error on Sawyer", + "type": "text" + } + ], + "index": 70 + }, + { + "bbox": [ + 304, + 660, + 505, + 671 + ], + "spans": [ + { + "bbox": [ + 304, + 660, + 505, + 671 + ], + "score": 1.0, + "content": "tasks as model capacity increases. Q-values increase", + "type": "text" + } + ], + "index": 71 + }, + { + "bbox": [ + 303, + 670, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 303, + 670, + 505, + 682 + ], + "score": 1.0, + "content": "over training with lower capacity ruling out overfitting", + "type": "text" + } + ], + "index": 72 + }, + { + "bbox": [ + 304, + 680, + 505, + 691 + ], + "spans": [ + { + "bbox": [ + 304, + 680, + 505, + 691 + ], + "score": 1.0, + "content": "and increasing model capacity leads to a reduction in", + "type": "text" + } + ], + "index": 73 + }, + { + "bbox": [ + 303, + 689, + 479, + 702 + ], + "spans": [ + { + "bbox": [ + 303, + 689, + 479, + 702 + ], + "score": 1.0, + "content": "TD error indicating the presence of underfitting.", + "type": "text" + } + ], + "index": 74 + } + ], + "index": 72 + } + ], + "index": 69.0 + }, + { + "type": "text", + "bbox": [ + 106, + 700, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 700, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 106, + 700, + 505, + 712 + ], + "score": 1.0, + "content": "then evaluate the learned policy over 12 trials conducted with different sets of distractor objects,", + "type": "text" + } + ], + "index": 75 + }, + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "score": 1.0, + "content": "including ones that are unseen during training. While the policy trained using base CQL is unable", + "type": "text" + } + ], + "index": 76 + } + ], + "index": 75.5 + } + ], + "page_idx": 6, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "score": 1.0, + "content": "7", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 72, + 505, + 149 + ], + "lines": [], + "index": 3, + "bbox_fs": [ + 105, + 73, + 506, + 150 + ], + "lines_deleted": true + }, + { + "type": "image", + "bbox": [ + 115, + 156, + 387, + 240 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 115, + 156, + 387, + 240 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 115, + 156, + 387, + 240 + ], + "spans": [ + { + "bbox": [ + 115, + 156, + 387, + 240 + ], + "score": 0.963, + "type": "image", + "image_path": "abb86c89607619cd53397449a92fc47f01701017a73f7df2599b8c654f14d0ca.jpg" + } + ] + } + ], + "index": 8, + "virtual_lines": [ + { + "bbox": [ + 115, + 156, + 387, + 184.0 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 115, + 184.0, + 387, + 212.0 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 115, + 212.0, + 387, + 240.0 + ], + "spans": [], + "index": 10 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 109, + 243, + 395, + 294 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 108, + 242, + 397, + 255 + ], + "spans": [ + { + "bbox": [ + 108, + 242, + 397, + 255 + ], + "score": 1.0, + "content": "Figure 5: Performance (left), TD error (middle) and average dataset Q-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 108, + 253, + 397, + 265 + ], + "spans": [ + { + "bbox": [ + 108, + 253, + 397, + 265 + ], + "score": 1.0, + "content": "values (right) for the pick and place task with a variable number of objects.", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 108, + 263, + 396, + 275 + ], + "spans": [ + { + "bbox": [ + 108, + 263, + 396, + 275 + ], + "score": 1.0, + "content": "Note that while the learned Q-values increase and stabilize, the TD error values", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 273, + 397, + 285 + ], + "spans": [ + { + "bbox": [ + 107, + 273, + 397, + 285 + ], + "score": 1.0, + "content": "in scenarios with more than 10 objects are large (1.0-2.0). Correspondingly, the", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 108, + 284, + 356, + 295 + ], + "spans": [ + { + "bbox": [ + 108, + 284, + 356, + 295 + ], + "score": 1.0, + "content": "performance generally decreases as the number of objects increases.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 15 + } + ], + "index": 11.5 + }, + { + "type": "image", + "bbox": [ + 412, + 159, + 501, + 250 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 412, + 159, + 501, + 250 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 412, + 159, + 501, + 250 + ], + "spans": [ + { + "bbox": [ + 412, + 159, + 501, + 250 + ], + "score": 0.925, + "type": "image", + "image_path": "b4ce278afd05bdaaa57ddb722d6767ecad2d352a23720d7d7f144183cc608616.jpg" + } + ] + } + ], + "index": 10.0, + "virtual_lines": [ + { + "bbox": [ + 412, + 159, + 501, + 204.5 + ], + "spans": [], + "index": 9 + }, + { + "bbox": [ + 412, + 204.5, + 501, + 250.0 + ], + "spans": [], + "index": 11 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 406, + 253, + 500, + 284 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 406, + 251, + 501, + 264 + ], + "spans": [ + { + "bbox": [ + 406, + 251, + 501, + 264 + ], + "score": 1.0, + "content": "Figure 6: Correcting un-", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 405, + 262, + 501, + 275 + ], + "spans": [ + { + "bbox": [ + 405, + 262, + 501, + 275 + ], + "score": 1.0, + "content": "derfitting by applying our", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 406, + 273, + 497, + 284 + ], + "spans": [ + { + "bbox": [ + 406, + 273, + 497, + 284 + ], + "score": 1.0, + "content": "workflow for 35 objects.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 16 + } + ], + "index": 13.0 + }, + { + "type": "text", + "bbox": [ + 107, + 295, + 506, + 351 + ], + "lines": [ + { + "bbox": [ + 106, + 294, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 106, + 294, + 505, + 309 + ], + "score": 1.0, + "content": "To address underfitting in the multi-object case, we apply the proposed capacity-increasing measures", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 305, + 505, + 319 + ], + "spans": [ + { + "bbox": [ + 105, + 305, + 505, + 319 + ], + "score": 1.0, + "content": "to the 35-object task (results for 10 and 20 object settings are in Appendix I). We use a more expres-", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 317, + 506, + 330 + ], + "spans": [ + { + "bbox": [ + 105, + 317, + 506, + 330 + ], + "score": 1.0, + "content": "sive ResNet architecture for the policy and the DR3 regularizer for the Q-function together. Observe", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 328, + 506, + 341 + ], + "spans": [ + { + "bbox": [ + 106, + 328, + 506, + 341 + ], + "score": 1.0, + "content": "in the figure on the right that this combination (shown in red) improves policy performance in this", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 339, + 488, + 352 + ], + "spans": [ + { + "bbox": [ + 106, + 339, + 488, + 352 + ], + "score": 1.0, + "content": "setting (compared to green), which validates our workflow protocol for addressing underfitting.", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 22, + "bbox_fs": [ + 105, + 294, + 506, + 352 + ] + }, + { + "type": "title", + "bbox": [ + 106, + 357, + 388, + 370 + ], + "lines": [ + { + "bbox": [ + 105, + 356, + 389, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 389, + 374 + ], + "score": 1.0, + "content": "6 Tuning CQL for Real-World Robotic Manipulation", + "type": "text" + } + ], + "index": 25 + } + ], + "index": 25 + }, + { + "type": "text", + "bbox": [ + 107, + 372, + 316, + 471 + ], + "lines": [ + { + "bbox": [ + 106, + 372, + 316, + 384 + ], + "spans": [ + { + "bbox": [ + 106, + 372, + 316, + 384 + ], + "score": 1.0, + "content": "Having evaluated the efficacy of our proposed work-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 384, + 317, + 394 + ], + "spans": [ + { + "bbox": [ + 106, + 384, + 317, + 394 + ], + "score": 1.0, + "content": "flow in simulation, we now utilize our workflow to", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 394, + 317, + 406 + ], + "spans": [ + { + "bbox": [ + 106, + 394, + 317, + 406 + ], + "score": 1.0, + "content": "tune CQL for real-world robotic manipulation. We", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 405, + 317, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 405, + 317, + 416 + ], + "score": 1.0, + "content": "test in two setups that require the robot to learn from", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 416, + 317, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 317, + 428 + ], + "score": 1.0, + "content": "sparse binary rewards and image observations. The", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 427, + 316, + 439 + ], + "spans": [ + { + "bbox": [ + 106, + 427, + 316, + 439 + ], + "score": 1.0, + "content": "settings differ in robot platform, task specification,", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 437, + 317, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 317, + 450 + ], + "score": 1.0, + "content": "and dataset size. Additional results and robot videos", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 449, + 316, + 461 + ], + "spans": [ + { + "bbox": [ + 106, + 449, + 316, + 461 + ], + "score": 1.0, + "content": "are at the following website: https://sites.", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 460, + 294, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 294, + 471 + ], + "score": 1.0, + "content": "google.com/view/offline-rl-workflow", + "type": "text" + } + ], + "index": 34 + } + ], + "index": 30, + "bbox_fs": [ + 105, + 372, + 317, + 471 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 476, + 316, + 487 + ], + "lines": [ + { + "bbox": [ + 105, + 475, + 318, + 489 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 318, + 489 + ], + "score": 1.0, + "content": "Sawyer manipulation tasks [30]. First, we train a", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 35, + "bbox_fs": [ + 105, + 475, + 318, + 489 + ] + }, + { + "type": "image", + "bbox": [ + 325, + 377, + 503, + 435 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 325, + 377, + 503, + 435 + ], + "group_id": 2, + "lines": [ + { + "bbox": [ + 325, + 377, + 503, + 435 + ], + "spans": [ + { + "bbox": [ + 325, + 377, + 503, + 435 + ], + "score": 0.958, + "type": "image", + "image_path": "566e1138148ac542d1676690513ab2bab4af3cbfec1257dc70a864019d00681c.jpg" + } + ] + } + ], + "index": 37.5, + "virtual_lines": [ + { + "bbox": [ + 325, + 377, + 503, + 391.5 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 325, + 391.5, + 503, + 406.0 + ], + "spans": [], + "index": 37 + }, + { + "bbox": [ + 325, + 406.0, + 503, + 420.5 + ], + "spans": [], + "index": 38 + }, + { + "bbox": [ + 325, + 420.5, + 503, + 435.0 + ], + "spans": [], + "index": 39 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 324, + 441, + 505, + 482 + ], + "group_id": 2, + "lines": [ + { + "bbox": [ + 324, + 440, + 505, + 452 + ], + "spans": [ + { + "bbox": [ + 324, + 440, + 505, + 452 + ], + "score": 1.0, + "content": "Figure 7: Real-world tasks. Successful rollouts", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 324, + 451, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 324, + 451, + 505, + 461 + ], + "score": 1.0, + "content": "of CQL tuned with our workflow from Sections 3", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 324, + 461, + 505, + 472 + ], + "spans": [ + { + "bbox": [ + 324, + 461, + 505, + 472 + ], + "score": 1.0, + "content": "& 4. Top to bottom: Sawyer lid on pot, Sawyer", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 324, + 471, + 478, + 482 + ], + "spans": [ + { + "bbox": [ + 324, + 471, + 478, + 482 + ], + "score": 1.0, + "content": "drawer opening, WidowX pick-place task.", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 41.5 + } + ], + "index": 39.5 + }, + { + "type": "text", + "bbox": [ + 106, + 488, + 505, + 553 + ], + "lines": [ + { + "bbox": [ + 105, + 486, + 505, + 500 + ], + "spans": [ + { + "bbox": [ + 105, + 486, + 505, + 500 + ], + "score": 1.0, + "content": "Sawyer robot in a tabletop setting to perform two tasks: (1) placing the lid onto a pot and (2) opening", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 498, + 505, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 498, + 505, + 510 + ], + "score": 1.0, + "content": "a drawer. The robot must perform these tasks in the presence of visual distractor objects, as shown in", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 508, + 505, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 508, + 505, + 521 + ], + "score": 1.0, + "content": "Figure 7. We directly use the dataset of 100 trajectories for each task collected by Khazatsky et al.", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 520, + 505, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 520, + 505, + 532 + ], + "score": 1.0, + "content": "[30] for our experiments so as to mimic the real-world use case of leveraging existing data with", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 531, + 506, + 543 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 506, + 543 + ], + "score": 1.0, + "content": "offline RL. We use four-dimensional actions with 3D end-effector velocity control in xyz-space and", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 106, + 541, + 475, + 554 + ], + "spans": [ + { + "bbox": [ + 106, + 541, + 475, + 554 + ], + "score": 1.0, + "content": "1D gripper open/close action. More details regarding the setup are provided in Appendix D.", + "type": "text" + } + ], + "index": 49 + } + ], + "index": 46.5, + "bbox_fs": [ + 105, + 486, + 506, + 554 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 559, + 296, + 719 + ], + "lines": [ + { + "bbox": [ + 107, + 558, + 297, + 569 + ], + "spans": [ + { + "bbox": [ + 107, + 558, + 297, + 569 + ], + "score": 1.0, + "content": "We run default CQL on these tasks and track", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 567, + 297, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 297, + 582 + ], + "score": 1.0, + "content": "the average Q-value, TD error, and CQL reg-", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 106, + 579, + 297, + 592 + ], + "spans": [ + { + "bbox": [ + 106, + 579, + 297, + 592 + ], + "score": 1.0, + "content": "ularizer value. As shown in Figure 8, the av-", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 105, + 591, + 297, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 591, + 297, + 604 + ], + "score": 1.0, + "content": "erage Q-value does not decrease over training,", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 106, + 601, + 297, + 613 + ], + "spans": [ + { + "bbox": [ + 106, + 601, + 297, + 613 + ], + "score": 1.0, + "content": "and the TD error (and CQL regularizer shown", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 106, + 612, + 297, + 624 + ], + "spans": [ + { + "bbox": [ + 106, + 612, + 297, + 624 + ], + "score": 1.0, + "content": "in Appendix E.2) is large. Per our discussion in", + "type": "text" + } + ], + "index": 55 + }, + { + "bbox": [ + 106, + 623, + 297, + 636 + ], + "spans": [ + { + "bbox": [ + 106, + 623, + 297, + 636 + ], + "score": 1.0, + "content": "Section 3, this indicates underfitting. Following", + "type": "text" + } + ], + "index": 56 + }, + { + "bbox": [ + 106, + 635, + 297, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 635, + 297, + 646 + ], + "score": 1.0, + "content": "our guidelines from Section 4, we utilize a more", + "type": "text" + } + ], + "index": 57 + }, + { + "bbox": [ + 105, + 645, + 297, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 297, + 657 + ], + "score": 1.0, + "content": "expressive ResNet policy (Figure 10), which in-", + "type": "text" + } + ], + "index": 58 + }, + { + "bbox": [ + 106, + 657, + 297, + 668 + ], + "spans": [ + { + "bbox": [ + 106, + 657, + 297, + 668 + ], + "score": 1.0, + "content": "creases the number of total convolutional layers", + "type": "text" + } + ], + "index": 59 + }, + { + "bbox": [ + 106, + 667, + 297, + 678 + ], + "spans": [ + { + "bbox": [ + 106, + 667, + 297, + 678 + ], + "score": 1.0, + "content": "from 3 to 9. We observe that this reduces the", + "type": "text" + } + ], + "index": 60 + }, + { + "bbox": [ + 106, + 678, + 297, + 689 + ], + "spans": [ + { + "bbox": [ + 106, + 678, + 297, + 689 + ], + "score": 1.0, + "content": "values of both the TD error Figure 8 and CQL", + "type": "text" + } + ], + "index": 61 + }, + { + "bbox": [ + 106, + 689, + 297, + 700 + ], + "spans": [ + { + "bbox": [ + 106, + 689, + 297, + 700 + ], + "score": 1.0, + "content": "regularizer (Appendix E.2) on both tasks. We", + "type": "text" + } + ], + "index": 62 + } + ], + "index": 56, + "bbox_fs": [ + 105, + 558, + 297, + 700 + ] + }, + { + "type": "image", + "bbox": [ + 309, + 554, + 500, + 646 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 309, + 554, + 500, + 646 + ], + "group_id": 3, + "lines": [ + { + "bbox": [ + 309, + 554, + 500, + 646 + ], + "spans": [ + { + "bbox": [ + 309, + 554, + 500, + 646 + ], + "score": 0.966, + "type": "image", + "image_path": "d5317090e1ad32bc153c1daf1e4d3802dfab85d171f13b7f15890473be1358cc.jpg" + } + ] + } + ], + "index": 66, + "virtual_lines": [ + { + "bbox": [ + 309, + 554, + 500, + 567.1428571428571 + ], + "spans": [], + "index": 63 + }, + { + "bbox": [ + 309, + 567.1428571428571, + 500, + 580.2857142857142 + ], + "spans": [], + "index": 64 + }, + { + "bbox": [ + 309, + 580.2857142857142, + 500, + 593.4285714285713 + ], + "spans": [], + "index": 65 + }, + { + "bbox": [ + 309, + 593.4285714285713, + 500, + 606.5714285714284 + ], + "spans": [], + "index": 66 + }, + { + "bbox": [ + 309, + 606.5714285714284, + 500, + 619.7142857142856 + ], + "spans": [], + "index": 67 + }, + { + "bbox": [ + 309, + 619.7142857142856, + 500, + 632.8571428571427 + ], + "spans": [], + "index": 68 + }, + { + "bbox": [ + 309, + 632.8571428571427, + 500, + 645.9999999999998 + ], + "spans": [], + "index": 69 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 304, + 650, + 504, + 700 + ], + "group_id": 3, + "lines": [ + { + "bbox": [ + 303, + 649, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 303, + 649, + 506, + 662 + ], + "score": 1.0, + "content": "Figure 8: Average Q-value and TD error on Sawyer", + "type": "text" + } + ], + "index": 70 + }, + { + "bbox": [ + 304, + 660, + 505, + 671 + ], + "spans": [ + { + "bbox": [ + 304, + 660, + 505, + 671 + ], + "score": 1.0, + "content": "tasks as model capacity increases. Q-values increase", + "type": "text" + } + ], + "index": 71 + }, + { + "bbox": [ + 303, + 670, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 303, + 670, + 505, + 682 + ], + "score": 1.0, + "content": "over training with lower capacity ruling out overfitting", + "type": "text" + } + ], + "index": 72 + }, + { + "bbox": [ + 304, + 680, + 505, + 691 + ], + "spans": [ + { + "bbox": [ + 304, + 680, + 505, + 691 + ], + "score": 1.0, + "content": "and increasing model capacity leads to a reduction in", + "type": "text" + } + ], + "index": 73 + }, + { + "bbox": [ + 303, + 689, + 479, + 702 + ], + "spans": [ + { + "bbox": [ + 303, + 689, + 479, + 702 + ], + "score": 1.0, + "content": "TD error indicating the presence of underfitting.", + "type": "text" + } + ], + "index": 74 + } + ], + "index": 72 + } + ], + "index": 69.0 + }, + { + "type": "text", + "bbox": [ + 106, + 700, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 700, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 106, + 700, + 505, + 712 + ], + "score": 1.0, + "content": "then evaluate the learned policy over 12 trials conducted with different sets of distractor objects,", + "type": "text" + } + ], + "index": 75 + }, + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "score": 1.0, + "content": "including ones that are unseen during training. While the policy trained using base CQL is unable", + "type": "text" + } + ], + "index": 76 + }, + { + "bbox": [ + 105, + 240, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 240, + 506, + 253 + ], + "score": 1.0, + "content": "to successfully complete either task even once attaining a score of 0/12 on both tasks, the run that", + "type": "text", + "cross_page": true + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 252, + 505, + 264 + ], + "spans": [ + { + "bbox": [ + 106, + 252, + 505, + 264 + ], + "score": 1.0, + "content": "uses ResNet attains a significantly better success rate of 9/12 on the put lid on pot task and 8/12 on", + "type": "text", + "cross_page": true + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 261, + 369, + 276 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 241, + 276 + ], + "score": 1.0, + "content": "the drawer opening task, equal to", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 241, + 262, + 269, + 273 + ], + "score": 0.85, + "content": "7 0 . 8 \\%", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 270, + 261, + 369, + 276 + ], + "score": 1.0, + "content": "success rate on average.", + "type": "text", + "cross_page": true + } + ], + "index": 12 + } + ], + "index": 75.5, + "bbox_fs": [ + 105, + 700, + 505, + 723 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 74, + 481, + 156 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 126, + 74, + 481, + 156 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 126, + 74, + 481, + 156 + ], + "spans": [ + { + "bbox": [ + 126, + 74, + 481, + 156 + ], + "score": 0.965, + "type": "image", + "image_path": "b9dc053e0a74e8660fec33a5ef35e25d9ae5b1e03b4824f6ff4428e483bb79a7.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 126, + 74, + 481, + 101.33333333333333 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 126, + 101.33333333333333, + 481, + 128.66666666666666 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 126, + 128.66666666666666, + 481, + 156.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 162, + 505, + 232 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 160, + 505, + 174 + ], + "spans": [ + { + "bbox": [ + 105, + 160, + 505, + 174 + ], + "score": 1.0, + "content": "Figure 9: Q-values (left) and performance of CQL with (middle) and without (right) the variational infor-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 172, + 505, + 184 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 505, + 184 + ], + "score": 1.0, + "content": "mation bottleneck correction for overfitting on the real-world widowX pick and place task. Since the Q-values", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 182, + 505, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 505, + 194 + ], + "score": 1.0, + "content": "start to decrease with more training, our workflow detects that CQL is overfitting. Using our policy selection", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 192, + 505, + 204 + ], + "spans": [ + { + "bbox": [ + 106, + 192, + 505, + 204 + ], + "score": 1.0, + "content": "guideline (Guideline 3.1) enables us to choose checkpoint 50 marked with the green vertical dashed line (right)", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 202, + 505, + 214 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 505, + 214 + ], + "score": 1.0, + "content": "which performs well. Further, addressing overfitting by applying the VIB regularizer stabilizes the Q-values", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 210, + 506, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 210, + 506, + 225 + ], + "score": 1.0, + "content": "(brown) which do not decrease unlike base CQL (blue) (left). Finally, applying the VIB regularizer improves", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 222, + 341, + 233 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 341, + 233 + ], + "score": 1.0, + "content": "performance and reduces sensitivity to policy selection (middle).", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 6 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 108, + 240, + 503, + 273 + ], + "lines": [ + { + "bbox": [ + 105, + 240, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 240, + 506, + 253 + ], + "score": 1.0, + "content": "to successfully complete either task even once attaining a score of 0/12 on both tasks, the run that", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 252, + 505, + 264 + ], + "spans": [ + { + "bbox": [ + 106, + 252, + 505, + 264 + ], + "score": 1.0, + "content": "uses ResNet attains a significantly better success rate of 9/12 on the put lid on pot task and 8/12 on", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 261, + 369, + 276 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 241, + 276 + ], + "score": 1.0, + "content": "the drawer opening task, equal to", + "type": "text" + }, + { + "bbox": [ + 241, + 262, + 269, + 273 + ], + "score": 0.85, + "content": "7 0 . 8 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 270, + 261, + 369, + 276 + ], + "score": 1.0, + "content": "success rate on average.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 11 + }, + { + "type": "text", + "bbox": [ + 106, + 278, + 505, + 388 + ], + "lines": [ + { + "bbox": [ + 105, + 277, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 506, + 291 + ], + "score": 1.0, + "content": "WidowX pick and place task. In our second setting, we tune CQL on a pick and place task with a", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 288, + 505, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 288, + 505, + 303 + ], + "score": 1.0, + "content": "WidowX 250 robotic arm, shown in Figure 7. The dataset consists of 200 trajectories collected by", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 300, + 505, + 312 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 309, + 312 + ], + "score": 1.0, + "content": "running a noisy scripted policy (Appendix D) with", + "type": "text" + }, + { + "bbox": [ + 309, + 300, + 329, + 311 + ], + "score": 0.87, + "content": "3 5 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 329, + 300, + 505, + 312 + ], + "score": 1.0, + "content": "success. We run CQL on this task and track", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 311, + 506, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 506, + 324 + ], + "score": 1.0, + "content": "the average Q-values, which we find initially increase and then decrease (Figure 9 (left; labeled as", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 322, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 106, + 322, + 506, + 335 + ], + "score": 1.0, + "content": "β€œQ-values”)), indicating overfitting. We then evaluate our policy selection scheme, which in this", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 333, + 506, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 333, + 506, + 346 + ], + "score": 1.0, + "content": "case suggests deploying checkpoint 50, the immediate checkpoint after the peak in Q-values. To", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 344, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 506, + 357 + ], + "score": 1.0, + "content": "see if this checkpoint is effective, we evaluate the performance of a few other policy checkpoints", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 355, + 505, + 368 + ], + "spans": [ + { + "bbox": [ + 106, + 355, + 505, + 368 + ], + "score": 1.0, + "content": "(for analysis only) and plot this performance trend in Figure 9 (right) as a dashed line. Observe", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 365, + 506, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 506, + 379 + ], + "score": 1.0, + "content": "that indeed the checkpoint found by our workflow attains the highest success rate (7/9) compared to", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 377, + 315, + 389 + ], + "spans": [ + { + "bbox": [ + 106, + 377, + 263, + 389 + ], + "score": 1.0, + "content": "other checkpoints, which only succeed", + "type": "text" + }, + { + "bbox": [ + 263, + 377, + 288, + 388 + ], + "score": 0.86, + "content": "\\leq 4 / 9", + "type": "inline_equation" + }, + { + "bbox": [ + 288, + 377, + 315, + 389 + ], + "score": 1.0, + "content": "times.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 17.5 + }, + { + "type": "text", + "bbox": [ + 107, + 393, + 296, + 491 + ], + "lines": [ + { + "bbox": [ + 106, + 393, + 297, + 405 + ], + "spans": [ + { + "bbox": [ + 106, + 393, + 297, + 405 + ], + "score": 1.0, + "content": "Since overfitting is detected, we now turn to ad-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 404, + 297, + 416 + ], + "spans": [ + { + "bbox": [ + 106, + 404, + 297, + 416 + ], + "score": 1.0, + "content": "dressing overfitting by adding the VIB regular-", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 415, + 297, + 427 + ], + "spans": [ + { + "bbox": [ + 106, + 415, + 297, + 427 + ], + "score": 1.0, + "content": "izer (Equation 3) during training. As shown in", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 426, + 297, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 297, + 438 + ], + "score": 1.0, + "content": "Figure 9 (left), the Q-values obtained after the", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 437, + 297, + 448 + ], + "spans": [ + { + "bbox": [ + 106, + 437, + 297, + 448 + ], + "score": 1.0, + "content": "addition of this regularizer (shown in brown; la-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 447, + 297, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 447, + 297, + 459 + ], + "score": 1.0, + "content": "beled β€œQ-values (VIB)”) are now stable and do", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 459, + 297, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 459, + 297, + 470 + ], + "score": 1.0, + "content": "not decrease over the course of training and so", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 470, + 297, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 470, + 297, + 481 + ], + "score": 1.0, + "content": "we can choose any policy for evaluation. We", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 480, + 297, + 493 + ], + "spans": [ + { + "bbox": [ + 106, + 480, + 297, + 493 + ], + "score": 1.0, + "content": "evaluate multiple policies, for visualization pur-", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 27 + }, + { + "type": "table", + "bbox": [ + 307, + 392, + 499, + 435 + ], + "blocks": [ + { + "type": "table_body", + "bbox": [ + 307, + 392, + 499, + 435 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 307, + 392, + 499, + 435 + ], + "spans": [ + { + "bbox": [ + 307, + 392, + 499, + 435 + ], + "score": 0.971, + "html": "
Real-world WidowX pick and place
MethodEpoch5075100200
CQL7/94/94/92/9
CQL + VIB3/98/97/97/9
", + "type": "table", + "image_path": "b3b18140952eb1d991a800b9e93dd4974b62b6845fbf24ef1ed2536ac7a948e7.jpg" + } + ] + } + ], + "index": 33, + "virtual_lines": [ + { + "bbox": [ + 307, + 392, + 499, + 406.3333333333333 + ], + "spans": [], + "index": 32 + }, + { + "bbox": [ + 307, + 406.3333333333333, + 499, + 420.66666666666663 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 307, + 420.66666666666663, + 499, + 434.99999999999994 + ], + "spans": [], + "index": 34 + } + ] + } + ], + "index": 33 + }, + { + "type": "text", + "bbox": [ + 304, + 438, + 504, + 489 + ], + "lines": [ + { + "bbox": [ + 303, + 437, + 505, + 450 + ], + "spans": [ + { + "bbox": [ + 303, + 437, + 505, + 450 + ], + "score": 1.0, + "content": "Table 2: Performance of various policy checkpoints", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 304, + 448, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 304, + 448, + 350, + 459 + ], + "score": 1.0, + "content": "of CQL and", + "type": "text" + }, + { + "bbox": [ + 350, + 448, + 395, + 458 + ], + "score": 0.7, + "content": "\\mathrm { C Q L + V I B }", + "type": "inline_equation" + }, + { + "bbox": [ + 395, + 448, + 505, + 459 + ], + "score": 1.0, + "content": "on the real WidowX pick and", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 304, + 459, + 505, + 469 + ], + "spans": [ + { + "bbox": [ + 304, + 459, + 505, + 469 + ], + "score": 1.0, + "content": "place task (bold entry denotes the checkpoint selected", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 303, + 468, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 303, + 468, + 506, + 480 + ], + "score": 1.0, + "content": "by our workflow). Note that when overfitting is cor-", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 304, + 478, + 489, + 489 + ], + "spans": [ + { + "bbox": [ + 304, + 478, + 489, + 489 + ], + "score": 1.0, + "content": "rected via VIB, multiple checkpoints perform well.", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 37 + }, + { + "type": "text", + "bbox": [ + 107, + 492, + 505, + 557 + ], + "lines": [ + { + "bbox": [ + 104, + 491, + 506, + 504 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 362, + 504 + ], + "score": 1.0, + "content": "poses only, in Figure 9 (middle), we find that all of them attain a", + "type": "text" + }, + { + "bbox": [ + 362, + 491, + 385, + 502 + ], + "score": 0.82, + "content": "\\geq 7 / 9", + "type": "inline_equation" + }, + { + "bbox": [ + 385, + 491, + 506, + 504 + ], + "score": 1.0, + "content": "success, comparable or better", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 502, + 505, + 515 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 505, + 515 + ], + "score": 1.0, + "content": "than the base CQL algorithm (Figure 9 (right)). This indicates that addressing overfitting not only", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 513, + 506, + 526 + ], + "spans": [ + { + "bbox": [ + 105, + 513, + 506, + 526 + ], + "score": 1.0, + "content": "leads to some gains in performance but also greatly simplifies policy selection as all checkpoints", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 104, + 524, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 524, + 506, + 537 + ], + "score": 1.0, + "content": "perform similarly and well. Table 2 summarizes these results below, where the bold entries denote", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 534, + 504, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 534, + 504, + 547 + ], + "score": 1.0, + "content": "the checkpoints found by our policy selection rule. These results indicate the effectiveness of our", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 546, + 505, + 558 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 505, + 558 + ], + "score": 1.0, + "content": "workflow in tuning CQL by addressing overfitting and underfitting on multiple real robot platforms.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 42.5 + }, + { + "type": "title", + "bbox": [ + 107, + 565, + 180, + 578 + ], + "lines": [ + { + "bbox": [ + 105, + 563, + 181, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 181, + 581 + ], + "score": 1.0, + "content": "7 Discussion", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 46 + }, + { + "type": "text", + "bbox": [ + 106, + 583, + 505, + 725 + ], + "lines": [ + { + "bbox": [ + 106, + 582, + 505, + 595 + ], + "spans": [ + { + "bbox": [ + 106, + 582, + 505, + 595 + ], + "score": 1.0, + "content": "While offline RL algorithms have improved significantly, applying these methods to real-world", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 593, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 593, + 506, + 606 + ], + "score": 1.0, + "content": "robotic domains is still challenging due to little guidance on tuning them. In this paper, we devise", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 104, + 604, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 505, + 617 + ], + "score": 1.0, + "content": "a workflow for algorithms such as CQL and BRAC, which consists of a set of metrics and condi-", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 104, + 614, + 505, + 629 + ], + "spans": [ + { + "bbox": [ + 104, + 614, + 505, + 629 + ], + "score": 1.0, + "content": "tions that can be tracked by a practitioner over the course of offline training to detect overfitting", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 626, + 505, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 505, + 639 + ], + "score": 1.0, + "content": "and underfitting, and recommendations to addresses the observed challenges. Applying our work-", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 106, + 637, + 505, + 649 + ], + "spans": [ + { + "bbox": [ + 106, + 637, + 505, + 649 + ], + "score": 1.0, + "content": "flow both in simulation and the real world shows strong performance benefits. While our proposed", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 106, + 648, + 506, + 661 + ], + "spans": [ + { + "bbox": [ + 106, + 648, + 506, + 661 + ], + "score": 1.0, + "content": "workflow is an initial step towards practical robotic offline RL and is based on our best conceptual", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 106, + 658, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 106, + 658, + 506, + 671 + ], + "score": 1.0, + "content": "understanding of certain offline RL algorithms, these guidelines are heuristic. To some extent this", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 106, + 671, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 671, + 505, + 682 + ], + "score": 1.0, + "content": "is unavoidable, since a workflow is a set of guidelines and recommendations, rather than a rigid", + "type": "text" + } + ], + "index": 55 + }, + { + "bbox": [ + 105, + 680, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 680, + 505, + 693 + ], + "score": 1.0, + "content": "algorithm. Regardless of how theoretically justified it is, in the end, its value is determined by its", + "type": "text" + } + ], + "index": 56 + }, + { + "bbox": [ + 106, + 691, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 106, + 691, + 505, + 704 + ], + "score": 1.0, + "content": "ability to produce good results. We believe the breadth of tasks considered, which consist of two dif-", + "type": "text" + } + ], + "index": 57 + }, + { + "bbox": [ + 105, + 701, + 505, + 717 + ], + "spans": [ + { + "bbox": [ + 105, + 701, + 505, + 717 + ], + "score": 1.0, + "content": "ferent real robots and multiple simulated tasks, indicates its broad applicability. However, deriving", + "type": "text" + } + ], + "index": 58 + }, + { + "bbox": [ + 106, + 714, + 505, + 727 + ], + "spans": [ + { + "bbox": [ + 106, + 714, + 505, + 727 + ], + "score": 1.0, + "content": "theoretical guarantees regarding workflows of this type is an important direction for future research.", + "type": "text" + } + ], + "index": 59 + } + ], + "index": 53 + } + ], + "page_idx": 7, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 742, + 308, + 750 + ], + "lines": [ + { + "bbox": [ + 301, + 740, + 310, + 752 + ], + "spans": [ + { + "bbox": [ + 301, + 740, + 310, + 752 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 12, + "width": 9 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 74, + 481, + 156 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 126, + 74, + 481, + 156 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 126, + 74, + 481, + 156 + ], + "spans": [ + { + "bbox": [ + 126, + 74, + 481, + 156 + ], + "score": 0.965, + "type": "image", + "image_path": "b9dc053e0a74e8660fec33a5ef35e25d9ae5b1e03b4824f6ff4428e483bb79a7.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 126, + 74, + 481, + 101.33333333333333 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 126, + 101.33333333333333, + 481, + 128.66666666666666 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 126, + 128.66666666666666, + 481, + 156.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 162, + 505, + 232 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 160, + 505, + 174 + ], + "spans": [ + { + "bbox": [ + 105, + 160, + 505, + 174 + ], + "score": 1.0, + "content": "Figure 9: Q-values (left) and performance of CQL with (middle) and without (right) the variational infor-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 172, + 505, + 184 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 505, + 184 + ], + "score": 1.0, + "content": "mation bottleneck correction for overfitting on the real-world widowX pick and place task. Since the Q-values", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 182, + 505, + 194 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 505, + 194 + ], + "score": 1.0, + "content": "start to decrease with more training, our workflow detects that CQL is overfitting. Using our policy selection", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 192, + 505, + 204 + ], + "spans": [ + { + "bbox": [ + 106, + 192, + 505, + 204 + ], + "score": 1.0, + "content": "guideline (Guideline 3.1) enables us to choose checkpoint 50 marked with the green vertical dashed line (right)", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 202, + 505, + 214 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 505, + 214 + ], + "score": 1.0, + "content": "which performs well. Further, addressing overfitting by applying the VIB regularizer stabilizes the Q-values", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 210, + 506, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 210, + 506, + 225 + ], + "score": 1.0, + "content": "(brown) which do not decrease unlike base CQL (blue) (left). Finally, applying the VIB regularizer improves", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 222, + 341, + 233 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 341, + 233 + ], + "score": 1.0, + "content": "performance and reduces sensitivity to policy selection (middle).", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 6 + } + ], + "index": 3.5 + }, + { + "type": "text", + "bbox": [ + 108, + 240, + 503, + 273 + ], + "lines": [], + "index": 11, + "bbox_fs": [ + 105, + 240, + 506, + 276 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 106, + 278, + 505, + 388 + ], + "lines": [ + { + "bbox": [ + 105, + 277, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 506, + 291 + ], + "score": 1.0, + "content": "WidowX pick and place task. In our second setting, we tune CQL on a pick and place task with a", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 288, + 505, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 288, + 505, + 303 + ], + "score": 1.0, + "content": "WidowX 250 robotic arm, shown in Figure 7. The dataset consists of 200 trajectories collected by", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 300, + 505, + 312 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 309, + 312 + ], + "score": 1.0, + "content": "running a noisy scripted policy (Appendix D) with", + "type": "text" + }, + { + "bbox": [ + 309, + 300, + 329, + 311 + ], + "score": 0.87, + "content": "3 5 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 329, + 300, + 505, + 312 + ], + "score": 1.0, + "content": "success. We run CQL on this task and track", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 311, + 506, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 506, + 324 + ], + "score": 1.0, + "content": "the average Q-values, which we find initially increase and then decrease (Figure 9 (left; labeled as", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 322, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 106, + 322, + 506, + 335 + ], + "score": 1.0, + "content": "β€œQ-values”)), indicating overfitting. We then evaluate our policy selection scheme, which in this", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 333, + 506, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 333, + 506, + 346 + ], + "score": 1.0, + "content": "case suggests deploying checkpoint 50, the immediate checkpoint after the peak in Q-values. To", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 344, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 506, + 357 + ], + "score": 1.0, + "content": "see if this checkpoint is effective, we evaluate the performance of a few other policy checkpoints", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 355, + 505, + 368 + ], + "spans": [ + { + "bbox": [ + 106, + 355, + 505, + 368 + ], + "score": 1.0, + "content": "(for analysis only) and plot this performance trend in Figure 9 (right) as a dashed line. Observe", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 365, + 506, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 506, + 379 + ], + "score": 1.0, + "content": "that indeed the checkpoint found by our workflow attains the highest success rate (7/9) compared to", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 377, + 315, + 389 + ], + "spans": [ + { + "bbox": [ + 106, + 377, + 263, + 389 + ], + "score": 1.0, + "content": "other checkpoints, which only succeed", + "type": "text" + }, + { + "bbox": [ + 263, + 377, + 288, + 388 + ], + "score": 0.86, + "content": "\\leq 4 / 9", + "type": "inline_equation" + }, + { + "bbox": [ + 288, + 377, + 315, + 389 + ], + "score": 1.0, + "content": "times.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 17.5, + "bbox_fs": [ + 105, + 277, + 506, + 389 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 393, + 296, + 491 + ], + "lines": [ + { + "bbox": [ + 106, + 393, + 297, + 405 + ], + "spans": [ + { + "bbox": [ + 106, + 393, + 297, + 405 + ], + "score": 1.0, + "content": "Since overfitting is detected, we now turn to ad-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 404, + 297, + 416 + ], + "spans": [ + { + "bbox": [ + 106, + 404, + 297, + 416 + ], + "score": 1.0, + "content": "dressing overfitting by adding the VIB regular-", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 106, + 415, + 297, + 427 + ], + "spans": [ + { + "bbox": [ + 106, + 415, + 297, + 427 + ], + "score": 1.0, + "content": "izer (Equation 3) during training. As shown in", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 426, + 297, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 297, + 438 + ], + "score": 1.0, + "content": "Figure 9 (left), the Q-values obtained after the", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 437, + 297, + 448 + ], + "spans": [ + { + "bbox": [ + 106, + 437, + 297, + 448 + ], + "score": 1.0, + "content": "addition of this regularizer (shown in brown; la-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 106, + 447, + 297, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 447, + 297, + 459 + ], + "score": 1.0, + "content": "beled β€œQ-values (VIB)”) are now stable and do", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 459, + 297, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 459, + 297, + 470 + ], + "score": 1.0, + "content": "not decrease over the course of training and so", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 470, + 297, + 481 + ], + "spans": [ + { + "bbox": [ + 105, + 470, + 297, + 481 + ], + "score": 1.0, + "content": "we can choose any policy for evaluation. We", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 480, + 297, + 493 + ], + "spans": [ + { + "bbox": [ + 106, + 480, + 297, + 493 + ], + "score": 1.0, + "content": "evaluate multiple policies, for visualization pur-", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 27, + "bbox_fs": [ + 105, + 393, + 297, + 493 + ] + }, + { + "type": "table", + "bbox": [ + 307, + 392, + 499, + 435 + ], + "blocks": [ + { + "type": "table_body", + "bbox": [ + 307, + 392, + 499, + 435 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 307, + 392, + 499, + 435 + ], + "spans": [ + { + "bbox": [ + 307, + 392, + 499, + 435 + ], + "score": 0.971, + "html": "
Real-world WidowX pick and place
MethodEpoch5075100200
CQL7/94/94/92/9
CQL + VIB3/98/97/97/9
", + "type": "table", + "image_path": "b3b18140952eb1d991a800b9e93dd4974b62b6845fbf24ef1ed2536ac7a948e7.jpg" + } + ] + } + ], + "index": 33, + "virtual_lines": [ + { + "bbox": [ + 307, + 392, + 499, + 406.3333333333333 + ], + "spans": [], + "index": 32 + }, + { + "bbox": [ + 307, + 406.3333333333333, + 499, + 420.66666666666663 + ], + "spans": [], + "index": 33 + }, + { + "bbox": [ + 307, + 420.66666666666663, + 499, + 434.99999999999994 + ], + "spans": [], + "index": 34 + } + ] + } + ], + "index": 33 + }, + { + "type": "text", + "bbox": [ + 304, + 438, + 504, + 489 + ], + "lines": [ + { + "bbox": [ + 303, + 437, + 505, + 450 + ], + "spans": [ + { + "bbox": [ + 303, + 437, + 505, + 450 + ], + "score": 1.0, + "content": "Table 2: Performance of various policy checkpoints", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 304, + 448, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 304, + 448, + 350, + 459 + ], + "score": 1.0, + "content": "of CQL and", + "type": "text" + }, + { + "bbox": [ + 350, + 448, + 395, + 458 + ], + "score": 0.7, + "content": "\\mathrm { C Q L + V I B }", + "type": "inline_equation" + }, + { + "bbox": [ + 395, + 448, + 505, + 459 + ], + "score": 1.0, + "content": "on the real WidowX pick and", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 304, + 459, + 505, + 469 + ], + "spans": [ + { + "bbox": [ + 304, + 459, + 505, + 469 + ], + "score": 1.0, + "content": "place task (bold entry denotes the checkpoint selected", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 303, + 468, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 303, + 468, + 506, + 480 + ], + "score": 1.0, + "content": "by our workflow). Note that when overfitting is cor-", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 304, + 478, + 489, + 489 + ], + "spans": [ + { + "bbox": [ + 304, + 478, + 489, + 489 + ], + "score": 1.0, + "content": "rected via VIB, multiple checkpoints perform well.", + "type": "text" + } + ], + "index": 39 + } + ], + "index": 37, + "bbox_fs": [ + 303, + 437, + 506, + 489 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 492, + 505, + 557 + ], + "lines": [ + { + "bbox": [ + 104, + 491, + 506, + 504 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 362, + 504 + ], + "score": 1.0, + "content": "poses only, in Figure 9 (middle), we find that all of them attain a", + "type": "text" + }, + { + "bbox": [ + 362, + 491, + 385, + 502 + ], + "score": 0.82, + "content": "\\geq 7 / 9", + "type": "inline_equation" + }, + { + "bbox": [ + 385, + 491, + 506, + 504 + ], + "score": 1.0, + "content": "success, comparable or better", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 502, + 505, + 515 + ], + "spans": [ + { + "bbox": [ + 105, + 502, + 505, + 515 + ], + "score": 1.0, + "content": "than the base CQL algorithm (Figure 9 (right)). This indicates that addressing overfitting not only", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 513, + 506, + 526 + ], + "spans": [ + { + "bbox": [ + 105, + 513, + 506, + 526 + ], + "score": 1.0, + "content": "leads to some gains in performance but also greatly simplifies policy selection as all checkpoints", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 104, + 524, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 524, + 506, + 537 + ], + "score": 1.0, + "content": "perform similarly and well. Table 2 summarizes these results below, where the bold entries denote", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 534, + 504, + 547 + ], + "spans": [ + { + "bbox": [ + 105, + 534, + 504, + 547 + ], + "score": 1.0, + "content": "the checkpoints found by our policy selection rule. These results indicate the effectiveness of our", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 546, + 505, + 558 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 505, + 558 + ], + "score": 1.0, + "content": "workflow in tuning CQL by addressing overfitting and underfitting on multiple real robot platforms.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 42.5, + "bbox_fs": [ + 104, + 491, + 506, + 558 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 565, + 180, + 578 + ], + "lines": [ + { + "bbox": [ + 105, + 563, + 181, + 581 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 181, + 581 + ], + "score": 1.0, + "content": "7 Discussion", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 46 + }, + { + "type": "text", + "bbox": [ + 106, + 583, + 505, + 725 + ], + "lines": [ + { + "bbox": [ + 106, + 582, + 505, + 595 + ], + "spans": [ + { + "bbox": [ + 106, + 582, + 505, + 595 + ], + "score": 1.0, + "content": "While offline RL algorithms have improved significantly, applying these methods to real-world", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 593, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 593, + 506, + 606 + ], + "score": 1.0, + "content": "robotic domains is still challenging due to little guidance on tuning them. In this paper, we devise", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 104, + 604, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 505, + 617 + ], + "score": 1.0, + "content": "a workflow for algorithms such as CQL and BRAC, which consists of a set of metrics and condi-", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 104, + 614, + 505, + 629 + ], + "spans": [ + { + "bbox": [ + 104, + 614, + 505, + 629 + ], + "score": 1.0, + "content": "tions that can be tracked by a practitioner over the course of offline training to detect overfitting", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 626, + 505, + 639 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 505, + 639 + ], + "score": 1.0, + "content": "and underfitting, and recommendations to addresses the observed challenges. Applying our work-", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 106, + 637, + 505, + 649 + ], + "spans": [ + { + "bbox": [ + 106, + 637, + 505, + 649 + ], + "score": 1.0, + "content": "flow both in simulation and the real world shows strong performance benefits. While our proposed", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 106, + 648, + 506, + 661 + ], + "spans": [ + { + "bbox": [ + 106, + 648, + 506, + 661 + ], + "score": 1.0, + "content": "workflow is an initial step towards practical robotic offline RL and is based on our best conceptual", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 106, + 658, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 106, + 658, + 506, + 671 + ], + "score": 1.0, + "content": "understanding of certain offline RL algorithms, these guidelines are heuristic. To some extent this", + "type": "text" + } + ], + "index": 54 + }, + { + "bbox": [ + 106, + 671, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 671, + 505, + 682 + ], + "score": 1.0, + "content": "is unavoidable, since a workflow is a set of guidelines and recommendations, rather than a rigid", + "type": "text" + } + ], + "index": 55 + }, + { + "bbox": [ + 105, + 680, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 680, + 505, + 693 + ], + "score": 1.0, + "content": "algorithm. Regardless of how theoretically justified it is, in the end, its value is determined by its", + "type": "text" + } + ], + "index": 56 + }, + { + "bbox": [ + 106, + 691, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 106, + 691, + 505, + 704 + ], + "score": 1.0, + "content": "ability to produce good results. We believe the breadth of tasks considered, which consist of two dif-", + "type": "text" + } + ], + "index": 57 + }, + { + "bbox": [ + 105, + 701, + 505, + 717 + ], + "spans": [ + { + "bbox": [ + 105, + 701, + 505, + 717 + ], + "score": 1.0, + "content": "ferent real robots and multiple simulated tasks, indicates its broad applicability. However, deriving", + "type": "text" + } + ], + "index": 58 + }, + { + "bbox": [ + 106, + 714, + 505, + 727 + ], + "spans": [ + { + "bbox": [ + 106, + 714, + 505, + 727 + ], + "score": 1.0, + "content": "theoretical guarantees regarding workflows of this type is an important direction for future research.", + "type": "text" + } + ], + "index": 59 + } + ], + "index": 53, + "bbox_fs": [ + 104, + 582, + 506, + 727 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 108, + 71, + 207, + 84 + ], + "lines": [ + { + "bbox": [ + 105, + 69, + 208, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 69, + 208, + 87 + ], + "score": 1.0, + "content": "Acknowledgements", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 91, + 505, + 168 + ], + "lines": [ + { + "bbox": [ + 106, + 91, + 504, + 103 + ], + "spans": [ + { + "bbox": [ + 106, + 91, + 504, + 103 + ], + "score": 1.0, + "content": "We thank Ilya Kostrikov, Avi Singh, Ashvin Nair, Alexander Khazatsky, Albert Yu, Jedrzej Orbik,", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 101, + 506, + 115 + ], + "spans": [ + { + "bbox": [ + 105, + 101, + 506, + 115 + ], + "score": 1.0, + "content": "and Jonathan Yang for their help with setting up and debugging various aspects of the experimental", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 113, + 504, + 125 + ], + "spans": [ + { + "bbox": [ + 105, + 113, + 504, + 125 + ], + "score": 1.0, + "content": "setup as well as for providing us with offline datasets we could test our workflow on. We thank Dibya", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 124, + 506, + 136 + ], + "spans": [ + { + "bbox": [ + 106, + 124, + 506, + 136 + ], + "score": 1.0, + "content": "Ghosh, anonymous reviewers, and the area chair from CoRL for constructive feedback on an earlier", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 135, + 505, + 147 + ], + "spans": [ + { + "bbox": [ + 106, + 135, + 505, + 147 + ], + "score": 1.0, + "content": "version of this paper. AK thanks George Tucker and Rishabh Agarwal for valuable discussions. This", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 145, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 145, + 505, + 158 + ], + "score": 1.0, + "content": "research was funded by the DARPA Assued Autonomy Program and compute support from Google", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 156, + 195, + 168 + ], + "spans": [ + { + "bbox": [ + 106, + 156, + 195, + 168 + ], + "score": 1.0, + "content": "and Microsoft Azure.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 4 + }, + { + "type": "title", + "bbox": [ + 107, + 184, + 163, + 197 + ], + "lines": [ + { + "bbox": [ + 106, + 182, + 165, + 199 + ], + "spans": [ + { + "bbox": [ + 106, + 182, + 165, + 199 + ], + "score": 1.0, + "content": "References", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 8 + }, + { + "type": "text", + "bbox": [ + 110, + 201, + 506, + 722 + ], + "lines": [ + { + "bbox": [ + 109, + 203, + 506, + 219 + ], + "spans": [ + { + "bbox": [ + 109, + 203, + 506, + 219 + ], + "score": 1.0, + "content": "[1] D. Kalashnikov, J. Varley, Y. Chebotar, B. Swanson, R. Jonschkowski, C. Finn, S. Levine, and", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 126, + 214, + 506, + 229 + ], + "spans": [ + { + "bbox": [ + 126, + 214, + 506, + 229 + ], + "score": 1.0, + "content": "K. Hausman. Mt-opt: Continuous multi-task robotic reinforcement learning at scale. arXiv", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 126, + 226, + 267, + 239 + ], + "spans": [ + { + "bbox": [ + 126, + 226, + 267, + 239 + ], + "score": 1.0, + "content": "preprint arXiv:2104.08212, 2021.", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 110, + 245, + 505, + 260 + ], + "spans": [ + { + "bbox": [ + 110, + 245, + 505, + 260 + ], + "score": 1.0, + "content": "[2] A. Kumar, A. Zhou, G. Tucker, and S. Levine. Conservative q-learning for offline reinforce-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 126, + 257, + 351, + 270 + ], + "spans": [ + { + "bbox": [ + 126, + 257, + 351, + 270 + ], + "score": 1.0, + "content": "ment learning. arXiv preprint arXiv:2006.04779, 2020.", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 110, + 276, + 506, + 290 + ], + "spans": [ + { + "bbox": [ + 110, + 276, + 506, + 290 + ], + "score": 1.0, + "content": "[3] A. Singh, A. Yu, J. Yang, J. Zhang, A. Kumar, and S. Levine. Cog: Connecting new skills to", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 125, + 287, + 501, + 302 + ], + "spans": [ + { + "bbox": [ + 125, + 287, + 501, + 302 + ], + "score": 1.0, + "content": "past experience with offline reinforcement learning. arXiv preprint arXiv:2010.14500, 2020.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 109, + 306, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 109, + 306, + 506, + 322 + ], + "score": 1.0, + "content": "[4] Y. Chebotar, K. Hausman, Y. Lu, T. Xiao, D. Kalashnikov, J. Varley, A. Irpan, B. Eysenbach,", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 126, + 317, + 506, + 332 + ], + "spans": [ + { + "bbox": [ + 126, + 317, + 506, + 332 + ], + "score": 1.0, + "content": "R. Julian, C. Finn, and S. Levine. Actionable models: Unsupervised offline reinforcement", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 126, + 329, + 394, + 343 + ], + "spans": [ + { + "bbox": [ + 126, + 329, + 394, + 343 + ], + "score": 1.0, + "content": "learning of robotic skills. arXiv preprint arXiv:2104.07749, 2021.", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 109, + 348, + 506, + 363 + ], + "spans": [ + { + "bbox": [ + 109, + 348, + 506, + 363 + ], + "score": 1.0, + "content": "[5] D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly,", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 126, + 360, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 126, + 360, + 506, + 374 + ], + "score": 1.0, + "content": "M. Kalakrishnan, V. Vanhoucke, et al. Scalable deep reinforcement learning for vision-based", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 126, + 370, + 479, + 386 + ], + "spans": [ + { + "bbox": [ + 126, + 370, + 479, + 386 + ], + "score": 1.0, + "content": "robotic manipulation. In Conference on Robot Learning, pages 651–673. PMLR, 2018.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 109, + 389, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 109, + 389, + 506, + 406 + ], + "score": 1.0, + "content": "[6] D. Precup. Eligibility traces for off-policy policy evaluation. Computer Science Department", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 126, + 401, + 300, + 415 + ], + "spans": [ + { + "bbox": [ + 126, + 401, + 300, + 415 + ], + "score": 1.0, + "content": "Faculty Publication Series, page 80, 2000.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 109, + 420, + 505, + 437 + ], + "spans": [ + { + "bbox": [ + 109, + 420, + 505, + 437 + ], + "score": 1.0, + "content": "[7] I. Kostrikov and O. Nachum. Statistical bootstrapping for uncertainty estimation in off-policy", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 126, + 432, + 338, + 446 + ], + "spans": [ + { + "bbox": [ + 126, + 432, + 338, + 446 + ], + "score": 1.0, + "content": "evaluation. arXiv preprint arXiv:2007.13609, 2020.", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 110, + 452, + 505, + 466 + ], + "spans": [ + { + "bbox": [ + 110, + 452, + 505, + 466 + ], + "score": 1.0, + "content": "[8] C. Paduraru. Off-policy evaluation in Markov decision processes. PhD thesis, Ph. D. Disserta-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 126, + 463, + 252, + 477 + ], + "spans": [ + { + "bbox": [ + 126, + 463, + 252, + 477 + ], + "score": 1.0, + "content": "tion. McGill University, 2012.", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 110, + 482, + 506, + 497 + ], + "spans": [ + { + "bbox": [ + 110, + 482, + 506, + 497 + ], + "score": 1.0, + "content": "[9] T. L. Paine, C. Paduraru, A. Michi, C. Gulcehre, K. Zolna, A. Novikov, Z. Wang, and", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 126, + 493, + 506, + 508 + ], + "spans": [ + { + "bbox": [ + 126, + 493, + 506, + 508 + ], + "score": 1.0, + "content": "N. de Freitas. Hyperparameter selection for offline reinforcement learning. arXiv preprint", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 126, + 504, + 231, + 518 + ], + "spans": [ + { + "bbox": [ + 126, + 504, + 231, + 518 + ], + "score": 1.0, + "content": "arXiv:2007.09055, 2020.", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 523, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 106, + 523, + 506, + 540 + ], + "score": 1.0, + "content": "[10] O. Nachum and B. Dai. Reinforcement learning via fenchel-rockafellar duality. arXiv preprint", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 126, + 535, + 231, + 548 + ], + "spans": [ + { + "bbox": [ + 126, + 535, + 231, + 548 + ], + "score": 1.0, + "content": "arXiv:2001.01866, 2020.", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 108, + 555, + 506, + 569 + ], + "spans": [ + { + "bbox": [ + 108, + 555, + 506, + 569 + ], + "score": 1.0, + "content": "[11] P. Thomas, G. Theocharous, and M. Ghavamzadeh. High confidence policy improvement. In", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 126, + 565, + 423, + 581 + ], + "spans": [ + { + "bbox": [ + 126, + 565, + 423, + 581 + ], + "score": 1.0, + "content": "International Conference on Machine Learning, pages 2380–2388, 2015.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 108, + 586, + 505, + 600 + ], + "spans": [ + { + "bbox": [ + 108, + 586, + 505, + 600 + ], + "score": 1.0, + "content": "[12] P. S. Thomas, G. Theocharous, and M. Ghavamzadeh. High-confidence off-policy evaluation.", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 126, + 595, + 395, + 612 + ], + "spans": [ + { + "bbox": [ + 126, + 595, + 395, + 612 + ], + "score": 1.0, + "content": "In Twenty-Ninth AAAI Conference on Artificial Intelligence, 2015.", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 108, + 615, + 505, + 630 + ], + "spans": [ + { + "bbox": [ + 108, + 615, + 505, + 630 + ], + "score": 1.0, + "content": "[13] N. Jiang and L. Li. Doubly robust off-policy value evaluation for reinforcement learning. arXiv", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 125, + 628, + 266, + 640 + ], + "spans": [ + { + "bbox": [ + 125, + 628, + 266, + 640 + ], + "score": 1.0, + "content": "preprint arXiv:1511.03722, 2015.", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 645, + 507, + 664 + ], + "spans": [ + { + "bbox": [ + 106, + 645, + 507, + 664 + ], + "score": 1.0, + "content": "[14] J. Fu, M. Norouzi, O. Nachum, G. Tucker, ziyu wang, A. Novikov, M. Yang, M. R. Zhang,", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 126, + 657, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 126, + 657, + 505, + 673 + ], + "score": 1.0, + "content": "Y. Chen, A. Kumar, C. Paduraru, S. Levine, and T. Paine. Benchmarks for deep off-policy", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 126, + 667, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 126, + 667, + 505, + 685 + ], + "score": 1.0, + "content": "evaluation. In International Conference on Learning Representations, 2021. URL https:", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 128, + 680, + 327, + 694 + ], + "spans": [ + { + "bbox": [ + 128, + 680, + 327, + 694 + ], + "score": 1.0, + "content": "//openreview.net/forum?id=kWSeGEeHvF8.", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 107, + 699, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 107, + 699, + 506, + 715 + ], + "score": 1.0, + "content": "[15] S. Levine, A. Kumar, G. Tucker, and J. Fu. Offline reinforcement learning: Tutorial, review,", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 126, + 711, + 437, + 725 + ], + "spans": [ + { + "bbox": [ + 126, + 711, + 437, + 725 + ], + "score": 1.0, + "content": "and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 26.5 + } + ], + "page_idx": 8, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "score": 1.0, + "content": "9", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 108, + 71, + 207, + 84 + ], + "lines": [ + { + "bbox": [ + 105, + 69, + 208, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 69, + 208, + 87 + ], + "score": 1.0, + "content": "Acknowledgements", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 107, + 91, + 505, + 168 + ], + "lines": [ + { + "bbox": [ + 106, + 91, + 504, + 103 + ], + "spans": [ + { + "bbox": [ + 106, + 91, + 504, + 103 + ], + "score": 1.0, + "content": "We thank Ilya Kostrikov, Avi Singh, Ashvin Nair, Alexander Khazatsky, Albert Yu, Jedrzej Orbik,", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 101, + 506, + 115 + ], + "spans": [ + { + "bbox": [ + 105, + 101, + 506, + 115 + ], + "score": 1.0, + "content": "and Jonathan Yang for their help with setting up and debugging various aspects of the experimental", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 113, + 504, + 125 + ], + "spans": [ + { + "bbox": [ + 105, + 113, + 504, + 125 + ], + "score": 1.0, + "content": "setup as well as for providing us with offline datasets we could test our workflow on. We thank Dibya", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 124, + 506, + 136 + ], + "spans": [ + { + "bbox": [ + 106, + 124, + 506, + 136 + ], + "score": 1.0, + "content": "Ghosh, anonymous reviewers, and the area chair from CoRL for constructive feedback on an earlier", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 135, + 505, + 147 + ], + "spans": [ + { + "bbox": [ + 106, + 135, + 505, + 147 + ], + "score": 1.0, + "content": "version of this paper. AK thanks George Tucker and Rishabh Agarwal for valuable discussions. This", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 145, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 145, + 505, + 158 + ], + "score": 1.0, + "content": "research was funded by the DARPA Assued Autonomy Program and compute support from Google", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 156, + 195, + 168 + ], + "spans": [ + { + "bbox": [ + 106, + 156, + 195, + 168 + ], + "score": 1.0, + "content": "and Microsoft Azure.", + "type": "text" + } + ], + "index": 7 + } + ], + "index": 4, + "bbox_fs": [ + 105, + 91, + 506, + 168 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 184, + 163, + 197 + ], + "lines": [ + { + "bbox": [ + 106, + 182, + 165, + 199 + ], + "spans": [ + { + "bbox": [ + 106, + 182, + 165, + 199 + ], + "score": 1.0, + "content": "References", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 8 + }, + { + "type": "list", + "bbox": [ + 110, + 201, + 506, + 722 + ], + "lines": [ + { + "bbox": [ + 109, + 203, + 506, + 219 + ], + "spans": [ + { + "bbox": [ + 109, + 203, + 506, + 219 + ], + "score": 1.0, + "content": "[1] D. Kalashnikov, J. Varley, Y. Chebotar, B. Swanson, R. Jonschkowski, C. Finn, S. Levine, and", + "type": "text" + } + ], + "index": 9, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 214, + 506, + 229 + ], + "spans": [ + { + "bbox": [ + 126, + 214, + 506, + 229 + ], + "score": 1.0, + "content": "K. Hausman. Mt-opt: Continuous multi-task robotic reinforcement learning at scale. arXiv", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 126, + 226, + 267, + 239 + ], + "spans": [ + { + "bbox": [ + 126, + 226, + 267, + 239 + ], + "score": 1.0, + "content": "preprint arXiv:2104.08212, 2021.", + "type": "text" + } + ], + "index": 11, + "is_list_end_line": true + }, + { + "bbox": [ + 110, + 245, + 505, + 260 + ], + "spans": [ + { + "bbox": [ + 110, + 245, + 505, + 260 + ], + "score": 1.0, + "content": "[2] A. Kumar, A. Zhou, G. Tucker, and S. Levine. Conservative q-learning for offline reinforce-", + "type": "text" + } + ], + "index": 12, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 257, + 351, + 270 + ], + "spans": [ + { + "bbox": [ + 126, + 257, + 351, + 270 + ], + "score": 1.0, + "content": "ment learning. arXiv preprint arXiv:2006.04779, 2020.", + "type": "text" + } + ], + "index": 13, + "is_list_end_line": true + }, + { + "bbox": [ + 110, + 276, + 506, + 290 + ], + "spans": [ + { + "bbox": [ + 110, + 276, + 506, + 290 + ], + "score": 1.0, + "content": "[3] A. Singh, A. Yu, J. Yang, J. Zhang, A. Kumar, and S. Levine. Cog: Connecting new skills to", + "type": "text" + } + ], + "index": 14, + "is_list_start_line": true + }, + { + "bbox": [ + 125, + 287, + 501, + 302 + ], + "spans": [ + { + "bbox": [ + 125, + 287, + 501, + 302 + ], + "score": 1.0, + "content": "past experience with offline reinforcement learning. arXiv preprint arXiv:2010.14500, 2020.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 109, + 306, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 109, + 306, + 506, + 322 + ], + "score": 1.0, + "content": "[4] Y. Chebotar, K. Hausman, Y. Lu, T. Xiao, D. Kalashnikov, J. Varley, A. Irpan, B. Eysenbach,", + "type": "text" + } + ], + "index": 16, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 317, + 506, + 332 + ], + "spans": [ + { + "bbox": [ + 126, + 317, + 506, + 332 + ], + "score": 1.0, + "content": "R. Julian, C. Finn, and S. Levine. Actionable models: Unsupervised offline reinforcement", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 126, + 329, + 394, + 343 + ], + "spans": [ + { + "bbox": [ + 126, + 329, + 394, + 343 + ], + "score": 1.0, + "content": "learning of robotic skills. arXiv preprint arXiv:2104.07749, 2021.", + "type": "text" + } + ], + "index": 18, + "is_list_end_line": true + }, + { + "bbox": [ + 109, + 348, + 506, + 363 + ], + "spans": [ + { + "bbox": [ + 109, + 348, + 506, + 363 + ], + "score": 1.0, + "content": "[5] D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly,", + "type": "text" + } + ], + "index": 19, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 360, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 126, + 360, + 506, + 374 + ], + "score": 1.0, + "content": "M. Kalakrishnan, V. Vanhoucke, et al. Scalable deep reinforcement learning for vision-based", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 126, + 370, + 479, + 386 + ], + "spans": [ + { + "bbox": [ + 126, + 370, + 479, + 386 + ], + "score": 1.0, + "content": "robotic manipulation. In Conference on Robot Learning, pages 651–673. PMLR, 2018.", + "type": "text" + } + ], + "index": 21, + "is_list_end_line": true + }, + { + "bbox": [ + 109, + 389, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 109, + 389, + 506, + 406 + ], + "score": 1.0, + "content": "[6] D. Precup. Eligibility traces for off-policy policy evaluation. Computer Science Department", + "type": "text" + } + ], + "index": 22, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 401, + 300, + 415 + ], + "spans": [ + { + "bbox": [ + 126, + 401, + 300, + 415 + ], + "score": 1.0, + "content": "Faculty Publication Series, page 80, 2000.", + "type": "text" + } + ], + "index": 23, + "is_list_end_line": true + }, + { + "bbox": [ + 109, + 420, + 505, + 437 + ], + "spans": [ + { + "bbox": [ + 109, + 420, + 505, + 437 + ], + "score": 1.0, + "content": "[7] I. Kostrikov and O. Nachum. Statistical bootstrapping for uncertainty estimation in off-policy", + "type": "text" + } + ], + "index": 24, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 432, + 338, + 446 + ], + "spans": [ + { + "bbox": [ + 126, + 432, + 338, + 446 + ], + "score": 1.0, + "content": "evaluation. arXiv preprint arXiv:2007.13609, 2020.", + "type": "text" + } + ], + "index": 25, + "is_list_end_line": true + }, + { + "bbox": [ + 110, + 452, + 505, + 466 + ], + "spans": [ + { + "bbox": [ + 110, + 452, + 505, + 466 + ], + "score": 1.0, + "content": "[8] C. Paduraru. Off-policy evaluation in Markov decision processes. PhD thesis, Ph. D. Disserta-", + "type": "text" + } + ], + "index": 26, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 463, + 252, + 477 + ], + "spans": [ + { + "bbox": [ + 126, + 463, + 252, + 477 + ], + "score": 1.0, + "content": "tion. McGill University, 2012.", + "type": "text" + } + ], + "index": 27, + "is_list_end_line": true + }, + { + "bbox": [ + 110, + 482, + 506, + 497 + ], + "spans": [ + { + "bbox": [ + 110, + 482, + 506, + 497 + ], + "score": 1.0, + "content": "[9] T. L. Paine, C. Paduraru, A. Michi, C. Gulcehre, K. Zolna, A. Novikov, Z. Wang, and", + "type": "text" + } + ], + "index": 28, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 493, + 506, + 508 + ], + "spans": [ + { + "bbox": [ + 126, + 493, + 506, + 508 + ], + "score": 1.0, + "content": "N. de Freitas. Hyperparameter selection for offline reinforcement learning. arXiv preprint", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 126, + 504, + 231, + 518 + ], + "spans": [ + { + "bbox": [ + 126, + 504, + 231, + 518 + ], + "score": 1.0, + "content": "arXiv:2007.09055, 2020.", + "type": "text" + } + ], + "index": 30, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 523, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 106, + 523, + 506, + 540 + ], + "score": 1.0, + "content": "[10] O. Nachum and B. Dai. Reinforcement learning via fenchel-rockafellar duality. arXiv preprint", + "type": "text" + } + ], + "index": 31, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 535, + 231, + 548 + ], + "spans": [ + { + "bbox": [ + 126, + 535, + 231, + 548 + ], + "score": 1.0, + "content": "arXiv:2001.01866, 2020.", + "type": "text" + } + ], + "index": 32, + "is_list_end_line": true + }, + { + "bbox": [ + 108, + 555, + 506, + 569 + ], + "spans": [ + { + "bbox": [ + 108, + 555, + 506, + 569 + ], + "score": 1.0, + "content": "[11] P. Thomas, G. Theocharous, and M. Ghavamzadeh. High confidence policy improvement. In", + "type": "text" + } + ], + "index": 33, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 565, + 423, + 581 + ], + "spans": [ + { + "bbox": [ + 126, + 565, + 423, + 581 + ], + "score": 1.0, + "content": "International Conference on Machine Learning, pages 2380–2388, 2015.", + "type": "text" + } + ], + "index": 34, + "is_list_end_line": true + }, + { + "bbox": [ + 108, + 586, + 505, + 600 + ], + "spans": [ + { + "bbox": [ + 108, + 586, + 505, + 600 + ], + "score": 1.0, + "content": "[12] P. S. Thomas, G. Theocharous, and M. Ghavamzadeh. High-confidence off-policy evaluation.", + "type": "text" + } + ], + "index": 35, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 595, + 395, + 612 + ], + "spans": [ + { + "bbox": [ + 126, + 595, + 395, + 612 + ], + "score": 1.0, + "content": "In Twenty-Ninth AAAI Conference on Artificial Intelligence, 2015.", + "type": "text" + } + ], + "index": 36, + "is_list_end_line": true + }, + { + "bbox": [ + 108, + 615, + 505, + 630 + ], + "spans": [ + { + "bbox": [ + 108, + 615, + 505, + 630 + ], + "score": 1.0, + "content": "[13] N. Jiang and L. Li. Doubly robust off-policy value evaluation for reinforcement learning. arXiv", + "type": "text" + } + ], + "index": 37, + "is_list_start_line": true + }, + { + "bbox": [ + 125, + 628, + 266, + 640 + ], + "spans": [ + { + "bbox": [ + 125, + 628, + 266, + 640 + ], + "score": 1.0, + "content": "preprint arXiv:1511.03722, 2015.", + "type": "text" + } + ], + "index": 38, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 645, + 507, + 664 + ], + "spans": [ + { + "bbox": [ + 106, + 645, + 507, + 664 + ], + "score": 1.0, + "content": "[14] J. Fu, M. Norouzi, O. Nachum, G. Tucker, ziyu wang, A. Novikov, M. Yang, M. R. Zhang,", + "type": "text" + } + ], + "index": 39, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 657, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 126, + 657, + 505, + 673 + ], + "score": 1.0, + "content": "Y. Chen, A. Kumar, C. Paduraru, S. Levine, and T. Paine. Benchmarks for deep off-policy", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 126, + 667, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 126, + 667, + 505, + 685 + ], + "score": 1.0, + "content": "evaluation. In International Conference on Learning Representations, 2021. URL https:", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 128, + 680, + 327, + 694 + ], + "spans": [ + { + "bbox": [ + 128, + 680, + 327, + 694 + ], + "score": 1.0, + "content": "//openreview.net/forum?id=kWSeGEeHvF8.", + "type": "text" + } + ], + "index": 42, + "is_list_end_line": true + }, + { + "bbox": [ + 107, + 699, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 107, + 699, + 506, + 715 + ], + "score": 1.0, + "content": "[15] S. Levine, A. Kumar, G. Tucker, and J. Fu. Offline reinforcement learning: Tutorial, review,", + "type": "text" + } + ], + "index": 43, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 711, + 437, + 725 + ], + "spans": [ + { + "bbox": [ + 126, + 711, + 437, + 725 + ], + "score": 1.0, + "content": "and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020.", + "type": "text" + } + ], + "index": 44, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 73, + 506, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 73, + 506, + 87 + ], + "score": 1.0, + "content": "[16] Y. Wu, G. Tucker, and O. Nachum. Behavior regularized offline reinforcement learning. arXiv", + "type": "text", + "cross_page": true + } + ], + "index": 0, + "is_list_start_line": true + }, + { + "bbox": [ + 124, + 83, + 268, + 98 + ], + "spans": [ + { + "bbox": [ + 124, + 83, + 268, + 98 + ], + "score": 1.0, + "content": "preprint arXiv:1911.11361, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 1, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "score": 1.0, + "content": "[17] T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra.", + "type": "text", + "cross_page": true + } + ], + "index": 2, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 115, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 127, + 115, + 505, + 129 + ], + "score": 1.0, + "content": "Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015.", + "type": "text", + "cross_page": true + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 133, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 506, + 150 + ], + "score": 1.0, + "content": "[18] S. Fujimoto, H. Van Hoof, and D. Meger. Addressing function approximation error in actor-", + "type": "text", + "cross_page": true + } + ], + "index": 4, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 146, + 354, + 160 + ], + "spans": [ + { + "bbox": [ + 127, + 146, + 354, + 160 + ], + "score": 1.0, + "content": "critic methods. arXiv preprint arXiv:1802.09477, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 5, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 166, + 505, + 181 + ], + "spans": [ + { + "bbox": [ + 105, + 166, + 505, + 181 + ], + "score": 1.0, + "content": "[19] T. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy", + "type": "text", + "cross_page": true + } + ], + "index": 6, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 177, + 501, + 191 + ], + "spans": [ + { + "bbox": [ + 127, + 177, + 501, + 191 + ], + "score": 1.0, + "content": "deep reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 197, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 106, + 197, + 506, + 212 + ], + "score": 1.0, + "content": "[20] I. Kostrikov, J. Tompson, R. Fergus, and O. Nachum. Offline reinforcement learning with", + "type": "text", + "cross_page": true + } + ], + "index": 8, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 209, + 444, + 221 + ], + "spans": [ + { + "bbox": [ + 127, + 209, + 444, + 221 + ], + "score": 1.0, + "content": "fisher divergence critic regularization. arXiv preprint arXiv:2103.08050, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 9, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 228, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 506, + 242 + ], + "score": 1.0, + "content": "[21] A. Kumar, R. Agarwal, D. Ghosh, and S. Levine. Implicit under-parameterization inhibits", + "type": "text", + "cross_page": true + } + ], + "index": 10, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 239, + 505, + 253 + ], + "spans": [ + { + "bbox": [ + 127, + 239, + 505, + 253 + ], + "score": 1.0, + "content": "data-efficient deep reinforcement learning. In International Conference on Learning Repre-", + "type": "text", + "cross_page": true + } + ], + "index": 11 + }, + { + "bbox": [ + 126, + 250, + 451, + 264 + ], + "spans": [ + { + "bbox": [ + 126, + 250, + 451, + 264 + ], + "score": 1.0, + "content": "sentations, 2021. URL https://openreview.net/forum?id=O9bnihsFfXU.", + "type": "text", + "cross_page": true + } + ], + "index": 12, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 270, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 506, + 285 + ], + "score": 1.0, + "content": "[22] A. Kumar, R. Agarwal, A. Courville, T. Ma, G. Tucker, and S. Levine. Value-based deep", + "type": "text", + "cross_page": true + } + ], + "index": 13, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 281, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 127, + 281, + 506, + 295 + ], + "score": 1.0, + "content": "reinforcement learning requires explicit regularization. In RL for Real Life Workshop &", + "type": "text", + "cross_page": true + } + ], + "index": 14 + }, + { + "bbox": [ + 127, + 293, + 505, + 307 + ], + "spans": [ + { + "bbox": [ + 127, + 293, + 505, + 307 + ], + "score": 1.0, + "content": "Overparameterization: Pitfalls and Opportunities Workshop, ICML, 2021. URL https:", + "type": "text", + "cross_page": true + } + ], + "index": 15 + }, + { + "bbox": [ + 127, + 304, + 469, + 318 + ], + "spans": [ + { + "bbox": [ + 127, + 304, + 469, + 318 + ], + "score": 1.0, + "content": "//drive.google.com/file/d/1Fg43H5oagQp-ksjpWBf_aDYEzAFMVJm6/view.", + "type": "text", + "cross_page": true + } + ], + "index": 16, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 323, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 106, + 323, + 506, + 338 + ], + "score": 1.0, + "content": "[23] R. Munos. Error bounds for approximate policy iteration. In Proceedings of the Twentieth", + "type": "text", + "cross_page": true + } + ], + "index": 17, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 334, + 506, + 349 + ], + "spans": [ + { + "bbox": [ + 126, + 334, + 506, + 349 + ], + "score": 1.0, + "content": "International Conference on International Conference on Machine Learning, ICML’03, page", + "type": "text", + "cross_page": true + } + ], + "index": 18 + }, + { + "bbox": [ + 128, + 347, + 327, + 358 + ], + "spans": [ + { + "bbox": [ + 128, + 347, + 327, + 358 + ], + "score": 1.0, + "content": "560–567. AAAI Press, 2003. ISBN 1577351894.", + "type": "text", + "cross_page": true + } + ], + "index": 19, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 365, + 506, + 379 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 506, + 379 + ], + "score": 1.0, + "content": "[24] N. Srivastava, G. Hinton, A. Krizhevsky, I. Sutskever, and R. Salakhutdinov. Dropout: a simple", + "type": "text", + "cross_page": true + } + ], + "index": 20, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 377, + 506, + 390 + ], + "spans": [ + { + "bbox": [ + 127, + 377, + 506, + 390 + ], + "score": 1.0, + "content": "way to prevent neural networks from overfitting. The journal of machine learning research, 15", + "type": "text", + "cross_page": true + } + ], + "index": 21 + }, + { + "bbox": [ + 127, + 388, + 218, + 400 + ], + "spans": [ + { + "bbox": [ + 127, + 388, + 218, + 400 + ], + "score": 1.0, + "content": "(1):1929–1958, 2014.", + "type": "text", + "cross_page": true + } + ], + "index": 22, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 408, + 505, + 422 + ], + "spans": [ + { + "bbox": [ + 106, + 408, + 505, + 422 + ], + "score": 1.0, + "content": "[25] A. A. Alemi, I. Fischer, J. V. Dillon, and K. Murphy. Deep variational information bottleneck.", + "type": "text", + "cross_page": true + } + ], + "index": 23, + "is_list_start_line": true + }, + { + "bbox": [ + 125, + 419, + 292, + 433 + ], + "spans": [ + { + "bbox": [ + 125, + 419, + 292, + 433 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:1612.00410, 2016.", + "type": "text", + "cross_page": true + } + ], + "index": 24, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 437, + 505, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 505, + 455 + ], + "score": 1.0, + "content": "[26] A. Achille and S. Soatto. Emergence of invariance and disentanglement in deep representa-", + "type": "text", + "cross_page": true + } + ], + "index": 25, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 450, + 431, + 463 + ], + "spans": [ + { + "bbox": [ + 126, + 450, + 431, + 463 + ], + "score": 1.0, + "content": "tions. The Journal of Machine Learning Research, 19(1):1947–1980, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 26, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 469, + 506, + 484 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 506, + 484 + ], + "score": 1.0, + "content": "[27] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In Pro-", + "type": "text", + "cross_page": true + } + ], + "index": 27, + "is_list_start_line": true + }, + { + "bbox": [ + 125, + 480, + 506, + 497 + ], + "spans": [ + { + "bbox": [ + 125, + 480, + 506, + 497 + ], + "score": 1.0, + "content": "ceedings of the IEEE conference on computer vision and pattern recognition, pages 770–778,", + "type": "text", + "cross_page": true + } + ], + "index": 28 + }, + { + "bbox": [ + 127, + 492, + 154, + 505 + ], + "spans": [ + { + "bbox": [ + 127, + 492, + 154, + 505 + ], + "score": 1.0, + "content": "2016.", + "type": "text", + "cross_page": true + } + ], + "index": 29, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 512, + 506, + 526 + ], + "spans": [ + { + "bbox": [ + 106, + 512, + 506, + 526 + ], + "score": 1.0, + "content": "[28] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polo-", + "type": "text", + "cross_page": true + } + ], + "index": 30, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 523, + 428, + 538 + ], + "spans": [ + { + "bbox": [ + 127, + 523, + 428, + 538 + ], + "score": 1.0, + "content": "sukhin. Attention is all you need. arXiv preprint arXiv:1706.03762, 2017.", + "type": "text", + "cross_page": true + } + ], + "index": 31, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 542, + 505, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 505, + 559 + ], + "score": 1.0, + "content": "[29] D. Ghosh and M. G. Bellemare. Representations for stable off-policy reinforcement learning.", + "type": "text", + "cross_page": true + } + ], + "index": 32, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 555, + 291, + 569 + ], + "spans": [ + { + "bbox": [ + 127, + 555, + 291, + 569 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:2007.05520, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 33, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 575, + 506, + 589 + ], + "spans": [ + { + "bbox": [ + 105, + 575, + 506, + 589 + ], + "score": 1.0, + "content": "[30] A. Khazatsky, A. Nair, D. Jing, and S. Levine. What can i do here? learning new skills by", + "type": "text", + "cross_page": true + } + ], + "index": 34, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 586, + 412, + 600 + ], + "spans": [ + { + "bbox": [ + 127, + 586, + 412, + 600 + ], + "score": 1.0, + "content": "imagining visual affordances. arXiv preprint arXiv:2106.00671, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 35, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 606, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 506, + 620 + ], + "score": 1.0, + "content": "[31] D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly,", + "type": "text", + "cross_page": true + } + ], + "index": 36, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 617, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 126, + 617, + 506, + 631 + ], + "score": 1.0, + "content": "M. Kalakrishnan, V. Vanhoucke, et al. Scalable deep reinforcement learning for vision-based", + "type": "text", + "cross_page": true + } + ], + "index": 37 + }, + { + "bbox": [ + 127, + 627, + 447, + 642 + ], + "spans": [ + { + "bbox": [ + 127, + 627, + 447, + 642 + ], + "score": 1.0, + "content": "robotic manipulation. In Conference on Robot Learning, pages 651–673, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 38, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 646, + 506, + 663 + ], + "spans": [ + { + "bbox": [ + 104, + 646, + 506, + 663 + ], + "score": 1.0, + "content": "[32] A. Zeng, S. Song, S. Welker, J. Lee, A. Rodriguez, and T. Funkhouser. Learning synergies", + "type": "text", + "cross_page": true + } + ], + "index": 39, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 660, + 478, + 672 + ], + "spans": [ + { + "bbox": [ + 127, + 660, + 478, + 672 + ], + "score": 1.0, + "content": "between pushing and grasping with self-supervised deep reinforcement learning. 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 40, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 679, + 505, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 505, + 694 + ], + "score": 1.0, + "content": "[33] OpenAI. Learning dexterous in-hand manipulation. In arXiv preprint arXiv:1808.00177, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 41, + "is_list_start_line": true + }, + { + "bbox": [ + 105, + 699, + 506, + 714 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 506, + 714 + ], + "score": 1.0, + "content": "[34] H. van Hoof, T. Hermans, G. Neumann, and J. Peters. Learning robot in-hand manipulation", + "type": "text", + "cross_page": true + } + ], + "index": 42, + "is_list_start_line": true + }, + { + "bbox": [ + 128, + 712, + 238, + 723 + ], + "spans": [ + { + "bbox": [ + 128, + 712, + 238, + 723 + ], + "score": 1.0, + "content": "with tactile features. 2015.", + "type": "text", + "cross_page": true + } + ], + "index": 43, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 72, + 505, + 86 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 86 + ], + "score": 1.0, + "content": "[35] A. Rajeswaran, V. Kumar, A. Gupta, G. Vezzani, J. Schulman, E. Todorov, and S. Levine.", + "type": "text", + "cross_page": true + } + ], + "index": 0, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 83, + 506, + 97 + ], + "spans": [ + { + "bbox": [ + 126, + 83, + 506, + 97 + ], + "score": 1.0, + "content": "Learning complex dexterous manipulation with deep reinforcement learning and demonstra-", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 127, + 93, + 210, + 107 + ], + "spans": [ + { + "bbox": [ + 127, + 93, + 210, + 107 + ], + "score": 1.0, + "content": "tions. In RSS, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 2, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 113, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 106, + 113, + 506, + 127 + ], + "score": 1.0, + "content": "[36] V. Kumar, A. Gupta, E. Todorov, and S. Levine. Learning dexterous manipulation policies", + "type": "text", + "cross_page": true + } + ], + "index": 3, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 124, + 376, + 138 + ], + "spans": [ + { + "bbox": [ + 127, + 124, + 376, + 138 + ], + "score": 1.0, + "content": "from experience and imitation. CoRR, abs/1611.05095, 2016.", + "type": "text", + "cross_page": true + } + ], + "index": 4, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 142, + 506, + 156 + ], + "spans": [ + { + "bbox": [ + 106, + 142, + 506, + 156 + ], + "score": 1.0, + "content": "[37] C. Schenck and D. Fox. Visual closed-loop control for pouring liquids. In International", + "type": "text", + "cross_page": true + } + ], + "index": 5, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 154, + 350, + 166 + ], + "spans": [ + { + "bbox": [ + 127, + 154, + 350, + 166 + ], + "score": 1.0, + "content": "Conference on Robotics and Automation (ICRA), 2017.", + "type": "text", + "cross_page": true + } + ], + "index": 6, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 172, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 172, + 506, + 186 + ], + "score": 1.0, + "content": "[38] A. Yahya, A. Li, M. Kalakrishnan, Y. Chebotar, and S. Levine. Collective robot reinforcement", + "type": "text", + "cross_page": true + } + ], + "index": 7, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 183, + 437, + 197 + ], + "spans": [ + { + "bbox": [ + 127, + 183, + 437, + 197 + ], + "score": 1.0, + "content": "learning with distributed asynchronous guided policy search. In IROS, 2017.", + "type": "text", + "cross_page": true + } + ], + "index": 8, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 202, + 505, + 214 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 505, + 214 + ], + "score": 1.0, + "content": "[39] J. Matas, S. James, and A. J. Davison. Sim-to-real reinforcement learning for deformable", + "type": "text", + "cross_page": true + } + ], + "index": 9, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 214, + 409, + 226 + ], + "spans": [ + { + "bbox": [ + 127, + 214, + 409, + 226 + ], + "score": 1.0, + "content": "object manipulation. In Conference on Robot Learning (CoRL), 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 10, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 230, + 506, + 246 + ], + "spans": [ + { + "bbox": [ + 104, + 230, + 506, + 246 + ], + "score": 1.0, + "content": "[40] R. Julian, B. Swanson, G. S. Sukhatme, S. Levine, C. Finn, and K. Hausman. Efficient adap-", + "type": "text", + "cross_page": true + } + ], + "index": 11, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 243, + 481, + 254 + ], + "spans": [ + { + "bbox": [ + 127, + 243, + 481, + 254 + ], + "score": 1.0, + "content": "tation for end-to-end vision-based robotic manipulation. arXiv arXiv:2004.10190, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 12, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 261, + 506, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 506, + 275 + ], + "score": 1.0, + "content": "[41] S. Cabi, S. G. Colmenarejo, A. Novikov, K. Konyushkova, S. Reed, R. Jeong, K. ZoΕ‚na, Y. Ay- Λ™", + "type": "text", + "cross_page": true + } + ], + "index": 13, + "is_list_start_line": true + }, + { + "bbox": [ + 124, + 270, + 507, + 287 + ], + "spans": [ + { + "bbox": [ + 124, + 270, + 507, + 287 + ], + "score": 1.0, + "content": "tar, D. Budden, M. Vecerik, et al. A framework for data-driven robotics. arXiv preprint", + "type": "text", + "cross_page": true + } + ], + "index": 14 + }, + { + "bbox": [ + 127, + 283, + 230, + 295 + ], + "spans": [ + { + "bbox": [ + 127, + 283, + 230, + 295 + ], + "score": 1.0, + "content": "arXiv:1909.12200, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 15, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 301, + 505, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 301, + 505, + 315 + ], + "score": 1.0, + "content": "[42] C. Finn and S. Levine. Deep visual foresight for planning robot motion. In 2017 IEEE Inter-", + "type": "text", + "cross_page": true + } + ], + "index": 16, + "is_list_start_line": true + }, + { + "bbox": [ + 124, + 311, + 489, + 327 + ], + "spans": [ + { + "bbox": [ + 124, + 311, + 489, + 327 + ], + "score": 1.0, + "content": "national Conference on Robotics and Automation (ICRA), pages 2786–2793. IEEE, 2017.", + "type": "text", + "cross_page": true + } + ], + "index": 17, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 329, + 506, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 329, + 506, + 346 + ], + "score": 1.0, + "content": "[43] F. Ebert, C. Finn, S. Dasari, A. Xie, A. Lee, and S. Levine. Visual foresight: Model-based deep", + "type": "text", + "cross_page": true + } + ], + "index": 18, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 343, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 127, + 343, + 504, + 355 + ], + "score": 1.0, + "content": "reinforcement learning for vision-based robotic control. arXiv preprint arXiv:1812.00568,", + "type": "text", + "cross_page": true + } + ], + "index": 19 + }, + { + "bbox": [ + 126, + 352, + 155, + 367 + ], + "spans": [ + { + "bbox": [ + 126, + 352, + 155, + 367 + ], + "score": 1.0, + "content": "2018.", + "type": "text", + "cross_page": true + } + ], + "index": 20, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 368, + 507, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 368, + 507, + 388 + ], + "score": 1.0, + "content": "[44] A. Xie, F. Ebert, S. Levine, and C. Finn. Improvisation through physical understanding: Using", + "type": "text", + "cross_page": true + } + ], + "index": 21, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 382, + 482, + 396 + ], + "spans": [ + { + "bbox": [ + 127, + 382, + 482, + 396 + ], + "score": 1.0, + "content": "novel objects as tools with visual foresight. Robotics: Science and Systems (RSS), 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 22, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 401, + 506, + 415 + ], + "spans": [ + { + "bbox": [ + 106, + 401, + 506, + 415 + ], + "score": 1.0, + "content": "[45] Y. Hristov, A. Lascarides, and S. Ramamoorthy. Interpretable latent spaces for learning from", + "type": "text", + "cross_page": true + } + ], + "index": 23, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 412, + 352, + 426 + ], + "spans": [ + { + "bbox": [ + 127, + 412, + 352, + 426 + ], + "score": 1.0, + "content": "demonstration. arXiv preprint arXiv:1807.06583, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 24, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 429, + 506, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 506, + 445 + ], + "score": 1.0, + "content": "[46] S. Tian, S. Nair, F. Ebert, S. Dasari, B. Eysenbach, C. Finn, and S. Levine. Model-based visual", + "type": "text", + "cross_page": true + } + ], + "index": 25, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 441, + 497, + 455 + ], + "spans": [ + { + "bbox": [ + 126, + 441, + 497, + 455 + ], + "score": 1.0, + "content": "planning with self-supervised functional distances. arXiv preprint arXiv:2012.15373, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 460, + 506, + 474 + ], + "spans": [ + { + "bbox": [ + 106, + 460, + 506, + 474 + ], + "score": 1.0, + "content": "[47] S. Young, D. Gandhi, S. Tulsiani, A. Gupta, P. Abbeel, and L. Pinto. Visual imitation made", + "type": "text", + "cross_page": true + } + ], + "index": 27, + "is_list_start_line": true + }, + { + "bbox": [ + 124, + 470, + 315, + 486 + ], + "spans": [ + { + "bbox": [ + 124, + 470, + 315, + 486 + ], + "score": 1.0, + "content": "easy. arXiv e-prints, pages arXiv–2008, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 28, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 490, + 505, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 490, + 505, + 504 + ], + "score": 1.0, + "content": "[48] E. Johns. Coarse-to-fine imitation learning: Robot manipulation from a single demonstration.", + "type": "text", + "cross_page": true + } + ], + "index": 29, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 500, + 291, + 514 + ], + "spans": [ + { + "bbox": [ + 127, + 500, + 291, + 514 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:2105.06411, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 30, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 520, + 505, + 532 + ], + "spans": [ + { + "bbox": [ + 106, + 520, + 505, + 532 + ], + "score": 1.0, + "content": "[49] A. Mandlekar, F. Ramos, B. Boots, S. Savarese, L. Fei-Fei, A. Garg, and D. Fox. Iris: Implicit", + "type": "text", + "cross_page": true + } + ], + "index": 31, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 531, + 505, + 543 + ], + "spans": [ + { + "bbox": [ + 127, + 531, + 505, + 543 + ], + "score": 1.0, + "content": "reinforcement without interaction at scale for learning control from offline robot manipulation", + "type": "text", + "cross_page": true + } + ], + "index": 32 + }, + { + "bbox": [ + 126, + 540, + 506, + 555 + ], + "spans": [ + { + "bbox": [ + 126, + 540, + 506, + 555 + ], + "score": 1.0, + "content": "data. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages", + "type": "text", + "cross_page": true + } + ], + "index": 33 + }, + { + "bbox": [ + 127, + 552, + 230, + 564 + ], + "spans": [ + { + "bbox": [ + 127, + 552, + 230, + 564 + ], + "score": 1.0, + "content": "4414–4420. IEEE, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 34, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 570, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 106, + 570, + 506, + 584 + ], + "score": 1.0, + "content": "[50] A. Mandlekar, D. Xu, R. MartΒ΄Δ±n-MartΒ΄Δ±n, S. Savarese, and L. Fei-Fei. Learning to generalize", + "type": "text", + "cross_page": true + } + ], + "index": 35, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 581, + 375, + 595 + ], + "spans": [ + { + "bbox": [ + 127, + 581, + 375, + 595 + ], + "score": 1.0, + "content": "across long-horizon tasks from human demonstrations, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 36, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 600, + 505, + 614 + ], + "spans": [ + { + "bbox": [ + 106, + 600, + 505, + 614 + ], + "score": 1.0, + "content": "[51] S. Lange, T. Gabel, and M. Riedmiller. Batch reinforcement learning. In Reinforcement learn-", + "type": "text", + "cross_page": true + } + ], + "index": 37, + "is_list_start_line": true + }, + { + "bbox": [ + 125, + 610, + 266, + 626 + ], + "spans": [ + { + "bbox": [ + 125, + 610, + 266, + 626 + ], + "score": 1.0, + "content": "ing, pages 45–73. Springer, 2012.", + "type": "text", + "cross_page": true + } + ], + "index": 38, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 629, + 505, + 643 + ], + "spans": [ + { + "bbox": [ + 106, + 629, + 505, + 643 + ], + "score": 1.0, + "content": "[52] S. Fujimoto, D. Meger, and D. Precup. Off-policy deep reinforcement learning without explo-", + "type": "text", + "cross_page": true + } + ], + "index": 39, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 640, + 321, + 654 + ], + "spans": [ + { + "bbox": [ + 126, + 640, + 321, + 654 + ], + "score": 1.0, + "content": "ration. arXiv preprint arXiv:1812.02900, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 40, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 659, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 106, + 659, + 506, + 673 + ], + "score": 1.0, + "content": "[53] A. Kumar, J. Fu, M. Soh, G. Tucker, and S. Levine. Stabilizing off-policy q-learning via", + "type": "text", + "cross_page": true + } + ], + "index": 41, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 671, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 127, + 671, + 506, + 685 + ], + "score": 1.0, + "content": "bootstrapping error reduction. In Advances in Neural Information Processing Systems, pages", + "type": "text", + "cross_page": true + } + ], + "index": 42 + }, + { + "bbox": [ + 127, + 681, + 214, + 695 + ], + "spans": [ + { + "bbox": [ + 127, + 681, + 214, + 695 + ], + "score": 1.0, + "content": "11761–11771, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 43, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 699, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 506, + 713 + ], + "score": 1.0, + "content": "[54] X. B. Peng, A. Kumar, G. Zhang, and S. Levine. Advantage-weighted regression: Simple and", + "type": "text", + "cross_page": true + } + ], + "index": 44, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 711, + 464, + 725 + ], + "spans": [ + { + "bbox": [ + 127, + 711, + 464, + 725 + ], + "score": 1.0, + "content": "scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 45, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 72, + 505, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 505, + 87 + ], + "score": 1.0, + "content": "[55] N. Jaques, A. Ghandeharioun, J. H. Shen, C. Ferguson, A. Lapedriza, N. Jones, S. Gu, and", + "type": "text", + "cross_page": true + } + ], + "index": 0, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 83, + 506, + 99 + ], + "spans": [ + { + "bbox": [ + 126, + 83, + 506, + 99 + ], + "score": 1.0, + "content": "R. Picard. Way off-policy batch deep reinforcement learning of implicit human preferences in", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 127, + 94, + 322, + 109 + ], + "spans": [ + { + "bbox": [ + 127, + 94, + 322, + 109 + ], + "score": 1.0, + "content": "dialog. arXiv preprint arXiv:1907.00456, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 2, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 113, + 505, + 128 + ], + "spans": [ + { + "bbox": [ + 105, + 113, + 505, + 128 + ], + "score": 1.0, + "content": "[56] A. Nair, M. Dalal, A. Gupta, and S. Levine. Accelerating online reinforcement learning with", + "type": "text", + "cross_page": true + } + ], + "index": 3, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 124, + 355, + 138 + ], + "spans": [ + { + "bbox": [ + 127, + 124, + 355, + 138 + ], + "score": 1.0, + "content": "offline datasets. arXiv preprint arXiv:2006.09359, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 4, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 142, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 505, + 157 + ], + "score": 1.0, + "content": "[57] R. Fakoor, J. Mueller, P. Chaudhari, and A. J. Smola. Continuous doubly constrained batch", + "type": "text", + "cross_page": true + } + ], + "index": 5, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 155, + 387, + 168 + ], + "spans": [ + { + "bbox": [ + 126, + 155, + 387, + 168 + ], + "score": 1.0, + "content": "reinforcement learning. arXiv preprint arXiv:2102.09225, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 6, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 172, + 506, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 506, + 187 + ], + "score": 1.0, + "content": "[58] T. Yu, G. Thomas, L. Yu, S. Ermon, J. Zou, S. Levine, C. Finn, and T. Ma. Mopo: Model-based", + "type": "text", + "cross_page": true + } + ], + "index": 7, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 184, + 402, + 198 + ], + "spans": [ + { + "bbox": [ + 127, + 184, + 402, + 198 + ], + "score": 1.0, + "content": "offline policy optimization. arXiv preprint arXiv:2005.13239, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 8, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 203, + 505, + 216 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 505, + 216 + ], + "score": 1.0, + "content": "[59] R. Kidambi, A. Rajeswaran, P. Netrapalli, and T. Joachims. Morel: Model-based offline rein-", + "type": "text", + "cross_page": true + } + ], + "index": 9, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 213, + 371, + 227 + ], + "spans": [ + { + "bbox": [ + 126, + 213, + 371, + 227 + ], + "score": 1.0, + "content": "forcement learning. arXiv preprint arXiv:2005.05951, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 10, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 232, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 232, + 506, + 248 + ], + "score": 1.0, + "content": "[60] R. Rafailov, T. Yu, A. Rajeswaran, and C. Finn. Offline reinforcement learning from images", + "type": "text", + "cross_page": true + } + ], + "index": 11, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 244, + 468, + 257 + ], + "spans": [ + { + "bbox": [ + 127, + 244, + 468, + 257 + ], + "score": 1.0, + "content": "with latent space models. Learning for Decision Making and Control (L4DC), 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 12, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 262, + 505, + 276 + ], + "spans": [ + { + "bbox": [ + 106, + 262, + 505, + 276 + ], + "score": 1.0, + "content": "[61] D. Precup, R. S. Sutton, and S. Dasgupta. Off-policy temporal-difference learning with func-", + "type": "text", + "cross_page": true + } + ], + "index": 13, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 273, + 341, + 287 + ], + "spans": [ + { + "bbox": [ + 126, + 273, + 341, + 287 + ], + "score": 1.0, + "content": "tion approximation. In ICML, pages 417–424, 2001.", + "type": "text", + "cross_page": true + } + ], + "index": 14, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 291, + 506, + 307 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 506, + 307 + ], + "score": 1.0, + "content": "[62] C. Voloshin, H. M. Le, N. Jiang, and Y. Yue. Empirical study of off-policy policy evaluation", + "type": "text", + "cross_page": true + } + ], + "index": 15, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 303, + 402, + 317 + ], + "spans": [ + { + "bbox": [ + 127, + 303, + 402, + 317 + ], + "score": 1.0, + "content": "for reinforcement learning. arXiv preprint arXiv:1911.06854, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 16, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 322, + 505, + 335 + ], + "spans": [ + { + "bbox": [ + 106, + 322, + 505, + 335 + ], + "score": 1.0, + "content": "[63] O. Nachum, Y. Chow, B. Dai, and L. Li. Dualdice: Behavior-agnostic estimation of discounted", + "type": "text", + "cross_page": true + } + ], + "index": 17, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 331, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 126, + 331, + 506, + 348 + ], + "score": 1.0, + "content": "stationary distribution corrections. In Advances in Neural Information Processing Systems,", + "type": "text", + "cross_page": true + } + ], + "index": 18 + }, + { + "bbox": [ + 127, + 345, + 228, + 357 + ], + "spans": [ + { + "bbox": [ + 127, + 345, + 228, + 357 + ], + "score": 1.0, + "content": "pages 2315–2325, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 19, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 363, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 106, + 363, + 505, + 376 + ], + "score": 1.0, + "content": "[64] R. Qin, S. Gao, X. Zhang, Z. Xu, S. Huang, Z. Li, W. Zhang, and Y. Yu. Neorl: A near", + "type": "text", + "cross_page": true + } + ], + "index": 20, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 374, + 505, + 387 + ], + "spans": [ + { + "bbox": [ + 127, + 374, + 505, + 387 + ], + "score": 1.0, + "content": "real-world benchmark for offline reinforcement learning. arXiv preprint arXiv:2102.00714,", + "type": "text", + "cross_page": true + } + ], + "index": 21 + }, + { + "bbox": [ + 127, + 384, + 154, + 397 + ], + "spans": [ + { + "bbox": [ + 127, + 384, + 154, + 397 + ], + "score": 1.0, + "content": "2021.", + "type": "text", + "cross_page": true + } + ], + "index": 22, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 402, + 505, + 419 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 505, + 419 + ], + "score": 1.0, + "content": "[65] T. Haarnoja, H. Tang, P. Abbeel, and S. Levine. Reinforcement learning with deep energy-", + "type": "text", + "cross_page": true + } + ], + "index": 23, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 414, + 454, + 428 + ], + "spans": [ + { + "bbox": [ + 126, + 414, + 454, + 428 + ], + "score": 1.0, + "content": "based policies. In International Conference on Machine Learning (ICML), 2017.", + "type": "text", + "cross_page": true + } + ], + "index": 24, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 432, + 506, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 506, + 447 + ], + "score": 1.0, + "content": "[66] X. B. Peng, A. Kumar, G. Zhang, and S. Levine. Advantage-weighted regression: Simple and", + "type": "text", + "cross_page": true + } + ], + "index": 25, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 444, + 464, + 458 + ], + "spans": [ + { + "bbox": [ + 126, + 444, + 464, + 458 + ], + "score": 1.0, + "content": "scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 26, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 462, + 506, + 478 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 506, + 478 + ], + "score": 1.0, + "content": "[67] S. Fujimoto and S. S. Gu. A minimalist approach to offline reinforcement learning. arXiv", + "type": "text", + "cross_page": true + } + ], + "index": 27, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 475, + 266, + 486 + ], + "spans": [ + { + "bbox": [ + 126, + 475, + 266, + 486 + ], + "score": 1.0, + "content": "preprint arXiv:2106.06860, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 28, + "is_list_end_line": true + } + ], + "index": 26.5, + "bbox_fs": [ + 106, + 203, + 507, + 725 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 40, + 507, + 723 + ], + "lines": [ + { + "bbox": [ + 105, + 73, + 506, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 73, + 506, + 87 + ], + "score": 1.0, + "content": "[16] Y. Wu, G. Tucker, and O. Nachum. Behavior regularized offline reinforcement learning. arXiv", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 124, + 83, + 268, + 98 + ], + "spans": [ + { + "bbox": [ + 124, + 83, + 268, + 98 + ], + "score": 1.0, + "content": "preprint arXiv:1911.11361, 2019.", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 103, + 505, + 118 + ], + "score": 1.0, + "content": "[17] T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 127, + 115, + 505, + 129 + ], + "spans": [ + { + "bbox": [ + 127, + 115, + 505, + 129 + ], + "score": 1.0, + "content": "Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015.", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 133, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 506, + 150 + ], + "score": 1.0, + "content": "[18] S. Fujimoto, H. Van Hoof, and D. Meger. Addressing function approximation error in actor-", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 127, + 146, + 354, + 160 + ], + "spans": [ + { + "bbox": [ + 127, + 146, + 354, + 160 + ], + "score": 1.0, + "content": "critic methods. arXiv preprint arXiv:1802.09477, 2018.", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 166, + 505, + 181 + ], + "spans": [ + { + "bbox": [ + 105, + 166, + 505, + 181 + ], + "score": 1.0, + "content": "[19] T. Haarnoja, A. Zhou, P. Abbeel, and S. Levine. Soft actor-critic: Off-policy maximum entropy", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 127, + 177, + 501, + 191 + ], + "spans": [ + { + "bbox": [ + 127, + 177, + 501, + 191 + ], + "score": 1.0, + "content": "deep reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018.", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 197, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 106, + 197, + 506, + 212 + ], + "score": 1.0, + "content": "[20] I. Kostrikov, J. Tompson, R. Fergus, and O. Nachum. Offline reinforcement learning with", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 127, + 209, + 444, + 221 + ], + "spans": [ + { + "bbox": [ + 127, + 209, + 444, + 221 + ], + "score": 1.0, + "content": "fisher divergence critic regularization. arXiv preprint arXiv:2103.08050, 2021.", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 228, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 506, + 242 + ], + "score": 1.0, + "content": "[21] A. Kumar, R. Agarwal, D. Ghosh, and S. Levine. Implicit under-parameterization inhibits", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 127, + 239, + 505, + 253 + ], + "spans": [ + { + "bbox": [ + 127, + 239, + 505, + 253 + ], + "score": 1.0, + "content": "data-efficient deep reinforcement learning. In International Conference on Learning Repre-", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 126, + 250, + 451, + 264 + ], + "spans": [ + { + "bbox": [ + 126, + 250, + 451, + 264 + ], + "score": 1.0, + "content": "sentations, 2021. URL https://openreview.net/forum?id=O9bnihsFfXU.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 270, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 506, + 285 + ], + "score": 1.0, + "content": "[22] A. Kumar, R. Agarwal, A. Courville, T. Ma, G. Tucker, and S. Levine. Value-based deep", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 127, + 281, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 127, + 281, + 506, + 295 + ], + "score": 1.0, + "content": "reinforcement learning requires explicit regularization. In RL for Real Life Workshop &", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 127, + 293, + 505, + 307 + ], + "spans": [ + { + "bbox": [ + 127, + 293, + 505, + 307 + ], + "score": 1.0, + "content": "Overparameterization: Pitfalls and Opportunities Workshop, ICML, 2021. URL https:", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 127, + 304, + 469, + 318 + ], + "spans": [ + { + "bbox": [ + 127, + 304, + 469, + 318 + ], + "score": 1.0, + "content": "//drive.google.com/file/d/1Fg43H5oagQp-ksjpWBf_aDYEzAFMVJm6/view.", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 323, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 106, + 323, + 506, + 338 + ], + "score": 1.0, + "content": "[23] R. Munos. Error bounds for approximate policy iteration. In Proceedings of the Twentieth", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 126, + 334, + 506, + 349 + ], + "spans": [ + { + "bbox": [ + 126, + 334, + 506, + 349 + ], + "score": 1.0, + "content": "International Conference on International Conference on Machine Learning, ICML’03, page", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 128, + 347, + 327, + 358 + ], + "spans": [ + { + "bbox": [ + 128, + 347, + 327, + 358 + ], + "score": 1.0, + "content": "560–567. AAAI Press, 2003. ISBN 1577351894.", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 365, + 506, + 379 + ], + "spans": [ + { + "bbox": [ + 106, + 365, + 506, + 379 + ], + "score": 1.0, + "content": "[24] N. Srivastava, G. Hinton, A. Krizhevsky, I. Sutskever, and R. Salakhutdinov. Dropout: a simple", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 127, + 377, + 506, + 390 + ], + "spans": [ + { + "bbox": [ + 127, + 377, + 506, + 390 + ], + "score": 1.0, + "content": "way to prevent neural networks from overfitting. The journal of machine learning research, 15", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 127, + 388, + 218, + 400 + ], + "spans": [ + { + "bbox": [ + 127, + 388, + 218, + 400 + ], + "score": 1.0, + "content": "(1):1929–1958, 2014.", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 408, + 505, + 422 + ], + "spans": [ + { + "bbox": [ + 106, + 408, + 505, + 422 + ], + "score": 1.0, + "content": "[25] A. A. Alemi, I. Fischer, J. V. Dillon, and K. Murphy. Deep variational information bottleneck.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 125, + 419, + 292, + 433 + ], + "spans": [ + { + "bbox": [ + 125, + 419, + 292, + 433 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:1612.00410, 2016.", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 437, + 505, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 505, + 455 + ], + "score": 1.0, + "content": "[26] A. Achille and S. Soatto. Emergence of invariance and disentanglement in deep representa-", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 126, + 450, + 431, + 463 + ], + "spans": [ + { + "bbox": [ + 126, + 450, + 431, + 463 + ], + "score": 1.0, + "content": "tions. The Journal of Machine Learning Research, 19(1):1947–1980, 2018.", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 469, + 506, + 484 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 506, + 484 + ], + "score": 1.0, + "content": "[27] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In Pro-", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 125, + 480, + 506, + 497 + ], + "spans": [ + { + "bbox": [ + 125, + 480, + 506, + 497 + ], + "score": 1.0, + "content": "ceedings of the IEEE conference on computer vision and pattern recognition, pages 770–778,", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 127, + 492, + 154, + 505 + ], + "spans": [ + { + "bbox": [ + 127, + 492, + 154, + 505 + ], + "score": 1.0, + "content": "2016.", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 512, + 506, + 526 + ], + "spans": [ + { + "bbox": [ + 106, + 512, + 506, + 526 + ], + "score": 1.0, + "content": "[28] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polo-", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 127, + 523, + 428, + 538 + ], + "spans": [ + { + "bbox": [ + 127, + 523, + 428, + 538 + ], + "score": 1.0, + "content": "sukhin. Attention is all you need. arXiv preprint arXiv:1706.03762, 2017.", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 104, + 542, + 505, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 505, + 559 + ], + "score": 1.0, + "content": "[29] D. Ghosh and M. G. Bellemare. Representations for stable off-policy reinforcement learning.", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 127, + 555, + 291, + 569 + ], + "spans": [ + { + "bbox": [ + 127, + 555, + 291, + 569 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:2007.05520, 2020.", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 575, + 506, + 589 + ], + "spans": [ + { + "bbox": [ + 105, + 575, + 506, + 589 + ], + "score": 1.0, + "content": "[30] A. Khazatsky, A. Nair, D. Jing, and S. Levine. What can i do here? learning new skills by", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 127, + 586, + 412, + 600 + ], + "spans": [ + { + "bbox": [ + 127, + 586, + 412, + 600 + ], + "score": 1.0, + "content": "imagining visual affordances. arXiv preprint arXiv:2106.00671, 2021.", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 606, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 506, + 620 + ], + "score": 1.0, + "content": "[31] D. Kalashnikov, A. Irpan, P. Pastor, J. Ibarz, A. Herzog, E. Jang, D. Quillen, E. Holly,", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 126, + 617, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 126, + 617, + 506, + 631 + ], + "score": 1.0, + "content": "M. Kalakrishnan, V. Vanhoucke, et al. Scalable deep reinforcement learning for vision-based", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 127, + 627, + 447, + 642 + ], + "spans": [ + { + "bbox": [ + 127, + 627, + 447, + 642 + ], + "score": 1.0, + "content": "robotic manipulation. In Conference on Robot Learning, pages 651–673, 2018.", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 104, + 646, + 506, + 663 + ], + "spans": [ + { + "bbox": [ + 104, + 646, + 506, + 663 + ], + "score": 1.0, + "content": "[32] A. Zeng, S. Song, S. Welker, J. Lee, A. Rodriguez, and T. Funkhouser. Learning synergies", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 127, + 660, + 478, + 672 + ], + "spans": [ + { + "bbox": [ + 127, + 660, + 478, + 672 + ], + "score": 1.0, + "content": "between pushing and grasping with self-supervised deep reinforcement learning. 2018.", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 679, + 505, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 505, + 694 + ], + "score": 1.0, + "content": "[33] OpenAI. Learning dexterous in-hand manipulation. In arXiv preprint arXiv:1808.00177, 2018.", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 699, + 506, + 714 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 506, + 714 + ], + "score": 1.0, + "content": "[34] H. van Hoof, T. Hermans, G. Neumann, and J. Peters. Learning robot in-hand manipulation", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 128, + 712, + 238, + 723 + ], + "spans": [ + { + "bbox": [ + 128, + 712, + 238, + 723 + ], + "score": 1.0, + "content": "with tactile features. 2015.", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 21.5 + } + ], + "page_idx": 9, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 741, + 311, + 751 + ], + "lines": [ + { + "bbox": [ + 299, + 740, + 313, + 754 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 313, + 754 + ], + "score": 1.0, + "content": "10", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "list", + "bbox": [ + 105, + 40, + 507, + 723 + ], + "lines": [], + "index": 21.5, + "bbox_fs": [ + 104, + 73, + 506, + 723 + ], + "lines_deleted": true + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 56, + 507, + 728 + ], + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 86 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 86 + ], + "score": 1.0, + "content": "[35] A. Rajeswaran, V. Kumar, A. Gupta, G. Vezzani, J. Schulman, E. Todorov, and S. Levine.", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 126, + 83, + 506, + 97 + ], + "spans": [ + { + "bbox": [ + 126, + 83, + 506, + 97 + ], + "score": 1.0, + "content": "Learning complex dexterous manipulation with deep reinforcement learning and demonstra-", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 127, + 93, + 210, + 107 + ], + "spans": [ + { + "bbox": [ + 127, + 93, + 210, + 107 + ], + "score": 1.0, + "content": "tions. In RSS, 2018.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 113, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 106, + 113, + 506, + 127 + ], + "score": 1.0, + "content": "[36] V. Kumar, A. Gupta, E. Todorov, and S. Levine. Learning dexterous manipulation policies", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 127, + 124, + 376, + 138 + ], + "spans": [ + { + "bbox": [ + 127, + 124, + 376, + 138 + ], + "score": 1.0, + "content": "from experience and imitation. CoRR, abs/1611.05095, 2016.", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 142, + 506, + 156 + ], + "spans": [ + { + "bbox": [ + 106, + 142, + 506, + 156 + ], + "score": 1.0, + "content": "[37] C. Schenck and D. Fox. Visual closed-loop control for pouring liquids. In International", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 127, + 154, + 350, + 166 + ], + "spans": [ + { + "bbox": [ + 127, + 154, + 350, + 166 + ], + "score": 1.0, + "content": "Conference on Robotics and Automation (ICRA), 2017.", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 172, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 172, + 506, + 186 + ], + "score": 1.0, + "content": "[38] A. Yahya, A. Li, M. Kalakrishnan, Y. Chebotar, and S. Levine. Collective robot reinforcement", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 127, + 183, + 437, + 197 + ], + "spans": [ + { + "bbox": [ + 127, + 183, + 437, + 197 + ], + "score": 1.0, + "content": "learning with distributed asynchronous guided policy search. In IROS, 2017.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 202, + 505, + 214 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 505, + 214 + ], + "score": 1.0, + "content": "[39] J. Matas, S. James, and A. J. Davison. Sim-to-real reinforcement learning for deformable", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 127, + 214, + 409, + 226 + ], + "spans": [ + { + "bbox": [ + 127, + 214, + 409, + 226 + ], + "score": 1.0, + "content": "object manipulation. In Conference on Robot Learning (CoRL), 2018.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 230, + 506, + 246 + ], + "spans": [ + { + "bbox": [ + 104, + 230, + 506, + 246 + ], + "score": 1.0, + "content": "[40] R. Julian, B. Swanson, G. S. Sukhatme, S. Levine, C. Finn, and K. Hausman. Efficient adap-", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 127, + 243, + 481, + 254 + ], + "spans": [ + { + "bbox": [ + 127, + 243, + 481, + 254 + ], + "score": 1.0, + "content": "tation for end-to-end vision-based robotic manipulation. arXiv arXiv:2004.10190, 2020.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 261, + 506, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 506, + 275 + ], + "score": 1.0, + "content": "[41] S. Cabi, S. G. Colmenarejo, A. Novikov, K. Konyushkova, S. Reed, R. Jeong, K. ZoΕ‚na, Y. Ay- Λ™", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 124, + 270, + 507, + 287 + ], + "spans": [ + { + "bbox": [ + 124, + 270, + 507, + 287 + ], + "score": 1.0, + "content": "tar, D. Budden, M. Vecerik, et al. A framework for data-driven robotics. arXiv preprint", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 127, + 283, + 230, + 295 + ], + "spans": [ + { + "bbox": [ + 127, + 283, + 230, + 295 + ], + "score": 1.0, + "content": "arXiv:1909.12200, 2019.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 301, + 505, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 301, + 505, + 315 + ], + "score": 1.0, + "content": "[42] C. Finn and S. Levine. Deep visual foresight for planning robot motion. In 2017 IEEE Inter-", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 124, + 311, + 489, + 327 + ], + "spans": [ + { + "bbox": [ + 124, + 311, + 489, + 327 + ], + "score": 1.0, + "content": "national Conference on Robotics and Automation (ICRA), pages 2786–2793. IEEE, 2017.", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 329, + 506, + 346 + ], + "spans": [ + { + "bbox": [ + 104, + 329, + 506, + 346 + ], + "score": 1.0, + "content": "[43] F. Ebert, C. Finn, S. Dasari, A. Xie, A. Lee, and S. Levine. Visual foresight: Model-based deep", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 127, + 343, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 127, + 343, + 504, + 355 + ], + "score": 1.0, + "content": "reinforcement learning for vision-based robotic control. arXiv preprint arXiv:1812.00568,", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 126, + 352, + 155, + 367 + ], + "spans": [ + { + "bbox": [ + 126, + 352, + 155, + 367 + ], + "score": 1.0, + "content": "2018.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 368, + 507, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 368, + 507, + 388 + ], + "score": 1.0, + "content": "[44] A. Xie, F. Ebert, S. Levine, and C. Finn. Improvisation through physical understanding: Using", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 127, + 382, + 482, + 396 + ], + "spans": [ + { + "bbox": [ + 127, + 382, + 482, + 396 + ], + "score": 1.0, + "content": "novel objects as tools with visual foresight. Robotics: Science and Systems (RSS), 2019.", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 401, + 506, + 415 + ], + "spans": [ + { + "bbox": [ + 106, + 401, + 506, + 415 + ], + "score": 1.0, + "content": "[45] Y. Hristov, A. Lascarides, and S. Ramamoorthy. Interpretable latent spaces for learning from", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 127, + 412, + 352, + 426 + ], + "spans": [ + { + "bbox": [ + 127, + 412, + 352, + 426 + ], + "score": 1.0, + "content": "demonstration. arXiv preprint arXiv:1807.06583, 2018.", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 104, + 429, + 506, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 506, + 445 + ], + "score": 1.0, + "content": "[46] S. Tian, S. Nair, F. Ebert, S. Dasari, B. Eysenbach, C. Finn, and S. Levine. Model-based visual", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 126, + 441, + 497, + 455 + ], + "spans": [ + { + "bbox": [ + 126, + 441, + 497, + 455 + ], + "score": 1.0, + "content": "planning with self-supervised functional distances. arXiv preprint arXiv:2012.15373, 2020.", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 460, + 506, + 474 + ], + "spans": [ + { + "bbox": [ + 106, + 460, + 506, + 474 + ], + "score": 1.0, + "content": "[47] S. Young, D. Gandhi, S. Tulsiani, A. Gupta, P. Abbeel, and L. Pinto. Visual imitation made", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 124, + 470, + 315, + 486 + ], + "spans": [ + { + "bbox": [ + 124, + 470, + 315, + 486 + ], + "score": 1.0, + "content": "easy. arXiv e-prints, pages arXiv–2008, 2020.", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 490, + 505, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 490, + 505, + 504 + ], + "score": 1.0, + "content": "[48] E. Johns. Coarse-to-fine imitation learning: Robot manipulation from a single demonstration.", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 127, + 500, + 291, + 514 + ], + "spans": [ + { + "bbox": [ + 127, + 500, + 291, + 514 + ], + "score": 1.0, + "content": "arXiv preprint arXiv:2105.06411, 2021.", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 106, + 520, + 505, + 532 + ], + "spans": [ + { + "bbox": [ + 106, + 520, + 505, + 532 + ], + "score": 1.0, + "content": "[49] A. Mandlekar, F. Ramos, B. Boots, S. Savarese, L. Fei-Fei, A. Garg, and D. Fox. Iris: Implicit", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 127, + 531, + 505, + 543 + ], + "spans": [ + { + "bbox": [ + 127, + 531, + 505, + 543 + ], + "score": 1.0, + "content": "reinforcement without interaction at scale for learning control from offline robot manipulation", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 126, + 540, + 506, + 555 + ], + "spans": [ + { + "bbox": [ + 126, + 540, + 506, + 555 + ], + "score": 1.0, + "content": "data. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 127, + 552, + 230, + 564 + ], + "spans": [ + { + "bbox": [ + 127, + 552, + 230, + 564 + ], + "score": 1.0, + "content": "4414–4420. IEEE, 2020.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 570, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 106, + 570, + 506, + 584 + ], + "score": 1.0, + "content": "[50] A. Mandlekar, D. Xu, R. MartΒ΄Δ±n-MartΒ΄Δ±n, S. Savarese, and L. Fei-Fei. Learning to generalize", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 127, + 581, + 375, + 595 + ], + "spans": [ + { + "bbox": [ + 127, + 581, + 375, + 595 + ], + "score": 1.0, + "content": "across long-horizon tasks from human demonstrations, 2020.", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 600, + 505, + 614 + ], + "spans": [ + { + "bbox": [ + 106, + 600, + 505, + 614 + ], + "score": 1.0, + "content": "[51] S. Lange, T. Gabel, and M. Riedmiller. Batch reinforcement learning. In Reinforcement learn-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 125, + 610, + 266, + 626 + ], + "spans": [ + { + "bbox": [ + 125, + 610, + 266, + 626 + ], + "score": 1.0, + "content": "ing, pages 45–73. Springer, 2012.", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 629, + 505, + 643 + ], + "spans": [ + { + "bbox": [ + 106, + 629, + 505, + 643 + ], + "score": 1.0, + "content": "[52] S. Fujimoto, D. Meger, and D. Precup. Off-policy deep reinforcement learning without explo-", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 126, + 640, + 321, + 654 + ], + "spans": [ + { + "bbox": [ + 126, + 640, + 321, + 654 + ], + "score": 1.0, + "content": "ration. arXiv preprint arXiv:1812.02900, 2018.", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 106, + 659, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 106, + 659, + 506, + 673 + ], + "score": 1.0, + "content": "[53] A. Kumar, J. Fu, M. Soh, G. Tucker, and S. Levine. Stabilizing off-policy q-learning via", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 127, + 671, + 506, + 685 + ], + "spans": [ + { + "bbox": [ + 127, + 671, + 506, + 685 + ], + "score": 1.0, + "content": "bootstrapping error reduction. In Advances in Neural Information Processing Systems, pages", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 127, + 681, + 214, + 695 + ], + "spans": [ + { + "bbox": [ + 127, + 681, + 214, + 695 + ], + "score": 1.0, + "content": "11761–11771, 2019.", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 699, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 506, + 713 + ], + "score": 1.0, + "content": "[54] X. B. Peng, A. Kumar, G. Zhang, and S. Levine. Advantage-weighted regression: Simple and", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 127, + 711, + 464, + 725 + ], + "spans": [ + { + "bbox": [ + 127, + 711, + 464, + 725 + ], + "score": 1.0, + "content": "scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 22.5 + } + ], + "page_idx": 10, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 741, + 311, + 751 + ], + "lines": [ + { + "bbox": [ + 299, + 740, + 312, + 754 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 312, + 754 + ], + "score": 1.0, + "content": "11", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "list", + "bbox": [ + 105, + 56, + 507, + 728 + ], + "lines": [], + "index": 22.5, + "bbox_fs": [ + 104, + 72, + 507, + 725 + ], + "lines_deleted": true + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 71, + 507, + 491 + ], + "lines": [ + { + "bbox": [ + 105, + 72, + 505, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 505, + 87 + ], + "score": 1.0, + "content": "[55] N. Jaques, A. Ghandeharioun, J. H. Shen, C. Ferguson, A. Lapedriza, N. Jones, S. Gu, and", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 126, + 83, + 506, + 99 + ], + "spans": [ + { + "bbox": [ + 126, + 83, + 506, + 99 + ], + "score": 1.0, + "content": "R. Picard. Way off-policy batch deep reinforcement learning of implicit human preferences in", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 127, + 94, + 322, + 109 + ], + "spans": [ + { + "bbox": [ + 127, + 94, + 322, + 109 + ], + "score": 1.0, + "content": "dialog. arXiv preprint arXiv:1907.00456, 2019.", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 113, + 505, + 128 + ], + "spans": [ + { + "bbox": [ + 105, + 113, + 505, + 128 + ], + "score": 1.0, + "content": "[56] A. Nair, M. Dalal, A. Gupta, and S. Levine. Accelerating online reinforcement learning with", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 127, + 124, + 355, + 138 + ], + "spans": [ + { + "bbox": [ + 127, + 124, + 355, + 138 + ], + "score": 1.0, + "content": "offline datasets. arXiv preprint arXiv:2006.09359, 2020.", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 142, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 505, + 157 + ], + "score": 1.0, + "content": "[57] R. Fakoor, J. Mueller, P. Chaudhari, and A. J. Smola. Continuous doubly constrained batch", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 126, + 155, + 387, + 168 + ], + "spans": [ + { + "bbox": [ + 126, + 155, + 387, + 168 + ], + "score": 1.0, + "content": "reinforcement learning. arXiv preprint arXiv:2102.09225, 2021.", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 172, + 506, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 172, + 506, + 187 + ], + "score": 1.0, + "content": "[58] T. Yu, G. Thomas, L. Yu, S. Ermon, J. Zou, S. Levine, C. Finn, and T. Ma. Mopo: Model-based", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 127, + 184, + 402, + 198 + ], + "spans": [ + { + "bbox": [ + 127, + 184, + 402, + 198 + ], + "score": 1.0, + "content": "offline policy optimization. arXiv preprint arXiv:2005.13239, 2020.", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 203, + 505, + 216 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 505, + 216 + ], + "score": 1.0, + "content": "[59] R. Kidambi, A. Rajeswaran, P. Netrapalli, and T. Joachims. Morel: Model-based offline rein-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 126, + 213, + 371, + 227 + ], + "spans": [ + { + "bbox": [ + 126, + 213, + 371, + 227 + ], + "score": 1.0, + "content": "forcement learning. arXiv preprint arXiv:2005.05951, 2020.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 232, + 506, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 232, + 506, + 248 + ], + "score": 1.0, + "content": "[60] R. Rafailov, T. Yu, A. Rajeswaran, and C. Finn. Offline reinforcement learning from images", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 127, + 244, + 468, + 257 + ], + "spans": [ + { + "bbox": [ + 127, + 244, + 468, + 257 + ], + "score": 1.0, + "content": "with latent space models. Learning for Decision Making and Control (L4DC), 2021.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 262, + 505, + 276 + ], + "spans": [ + { + "bbox": [ + 106, + 262, + 505, + 276 + ], + "score": 1.0, + "content": "[61] D. Precup, R. S. Sutton, and S. Dasgupta. Off-policy temporal-difference learning with func-", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 126, + 273, + 341, + 287 + ], + "spans": [ + { + "bbox": [ + 126, + 273, + 341, + 287 + ], + "score": 1.0, + "content": "tion approximation. In ICML, pages 417–424, 2001.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 291, + 506, + 307 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 506, + 307 + ], + "score": 1.0, + "content": "[62] C. Voloshin, H. M. Le, N. Jiang, and Y. Yue. Empirical study of off-policy policy evaluation", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 127, + 303, + 402, + 317 + ], + "spans": [ + { + "bbox": [ + 127, + 303, + 402, + 317 + ], + "score": 1.0, + "content": "for reinforcement learning. arXiv preprint arXiv:1911.06854, 2019.", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 322, + 505, + 335 + ], + "spans": [ + { + "bbox": [ + 106, + 322, + 505, + 335 + ], + "score": 1.0, + "content": "[63] O. Nachum, Y. Chow, B. Dai, and L. Li. Dualdice: Behavior-agnostic estimation of discounted", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 126, + 331, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 126, + 331, + 506, + 348 + ], + "score": 1.0, + "content": "stationary distribution corrections. In Advances in Neural Information Processing Systems,", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 127, + 345, + 228, + 357 + ], + "spans": [ + { + "bbox": [ + 127, + 345, + 228, + 357 + ], + "score": 1.0, + "content": "pages 2315–2325, 2019.", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 363, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 106, + 363, + 505, + 376 + ], + "score": 1.0, + "content": "[64] R. Qin, S. Gao, X. Zhang, Z. Xu, S. Huang, Z. Li, W. Zhang, and Y. Yu. Neorl: A near", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 127, + 374, + 505, + 387 + ], + "spans": [ + { + "bbox": [ + 127, + 374, + 505, + 387 + ], + "score": 1.0, + "content": "real-world benchmark for offline reinforcement learning. arXiv preprint arXiv:2102.00714,", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 127, + 384, + 154, + 397 + ], + "spans": [ + { + "bbox": [ + 127, + 384, + 154, + 397 + ], + "score": 1.0, + "content": "2021.", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 402, + 505, + 419 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 505, + 419 + ], + "score": 1.0, + "content": "[65] T. Haarnoja, H. Tang, P. Abbeel, and S. Levine. Reinforcement learning with deep energy-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 126, + 414, + 454, + 428 + ], + "spans": [ + { + "bbox": [ + 126, + 414, + 454, + 428 + ], + "score": 1.0, + "content": "based policies. In International Conference on Machine Learning (ICML), 2017.", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 432, + 506, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 506, + 447 + ], + "score": 1.0, + "content": "[66] X. B. Peng, A. Kumar, G. Zhang, and S. Levine. Advantage-weighted regression: Simple and", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 126, + 444, + 464, + 458 + ], + "spans": [ + { + "bbox": [ + 126, + 444, + 464, + 458 + ], + "score": 1.0, + "content": "scalable off-policy reinforcement learning. arXiv preprint arXiv:1910.00177, 2019.", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 462, + 506, + 478 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 506, + 478 + ], + "score": 1.0, + "content": "[67] S. Fujimoto and S. S. Gu. A minimalist approach to offline reinforcement learning. arXiv", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 126, + 475, + 266, + 486 + ], + "spans": [ + { + "bbox": [ + 126, + 475, + 266, + 486 + ], + "score": 1.0, + "content": "preprint arXiv:2106.06860, 2021.", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 14 + } + ], + "page_idx": 11, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 741, + 311, + 750 + ], + "lines": [ + { + "bbox": [ + 299, + 740, + 312, + 754 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 312, + 754 + ], + "score": 1.0, + "content": "12", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "list", + "bbox": [ + 105, + 71, + 507, + 491 + ], + "lines": [], + "index": 14, + "bbox_fs": [ + 105, + 72, + 506, + 486 + ], + "lines_deleted": true + } + ] + } + ], + "_backend": "pipeline", + "_version_name": "2.2.2" +} \ No newline at end of file diff --git a/parse/train/fy4ZBWxYbIo/fy4ZBWxYbIo_model.json b/parse/train/fy4ZBWxYbIo/fy4ZBWxYbIo_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2920d1a890bdedecf242f729934dcf3f5a252fac --- /dev/null +++ b/parse/train/fy4ZBWxYbIo/fy4ZBWxYbIo_model.json @@ -0,0 +1,19561 @@ +[ + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 397, + 577, + 1302, + 577, + 1302, + 1184, + 397, + 1184 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 299, + 1325, + 822, + 1325, + 822, + 1778, + 299, + 1778 + ], + "score": 0.975 + }, + { + "category_id": 3, + "poly": [ + 854, + 1282, + 1377, + 1282, + 1377, + 1587, + 854, + 1587 + ], + "score": 0.972 + }, + { + "category_id": 1, + "poly": [ + 298, + 1778, + 1405, + 1778, + 1405, + 1990, + 298, + 1990 + ], + "score": 0.969 + }, + { + "category_id": 0, + "poly": [ + 402, + 225, + 1299, + 225, + 1299, + 335, + 402, + 335 + ], + "score": 0.962 + }, + { + "category_id": 4, + "poly": [ + 846, + 1609, + 1401, + 1609, + 1401, + 1776, + 846, + 1776 + ], + "score": 0.961 + }, + { + "category_id": 1, + "poly": [ + 373, + 389, + 1330, + 389, + 1330, + 487, + 373, + 487 + ], + "score": 0.936 + }, + { + "category_id": 1, + "poly": [ + 401, + 1209, + 931, + 1209, + 931, + 1240, + 401, + 1240 + ], + "score": 0.909 + }, + { + "category_id": 0, + "poly": [ + 300, + 1278, + 531, + 1278, + 531, + 1313, + 300, + 1313 + ], + "score": 0.901 + }, + { + "category_id": 2, + "poly": [ + 299, + 2033, + 931, + 2033, + 931, + 2061, + 299, + 2061 + ], + "score": 0.683 + }, + { + "category_id": 1, + "poly": [ + 299, + 2033, + 931, + 2033, + 931, + 2061, + 299, + 2061 + ], + "score": 0.172 + }, + { + "category_id": 13, + "poly": [ + 1069, + 391, + 1141, + 391, + 1141, + 424, + 1069, + 424 + ], + "score": 0.56, + "latex": "\\mathbf { F i n n ^ { 2 } }" + }, + { + "category_id": 13, + "poly": [ + 890, + 391, + 959, + 391, + 959, + 423, + 890, + 423 + ], + "score": 0.32, + "latex": "\\mathbf { T i a n } ^ { 1 }" + }, + { + "category_id": 15, + "poly": [ + 1039.0, + 1284.0, + 1116.0, + 1284.0, + 1116.0, + 1310.0, + 1039.0, + 1310.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1093.0, + 1309.0, + 1153.0, + 1309.0, + 1153.0, + 1324.0, + 1093.0, + 1324.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1018.0, + 1310.0, + 1025.0, + 1310.0, + 1025.0, + 1320.0, + 1018.0, + 1320.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1002.0, + 1313.0, + 1022.0, + 1313.0, + 1022.0, + 1397.0, + 1002.0, + 1397.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1093.0, + 1321.0, + 1168.0, + 1321.0, + 1168.0, + 1336.0, + 1093.0, + 1336.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1154.0, + 1351.0, + 1210.0, + 1351.0, + 1210.0, + 1370.0, + 1154.0, + 1370.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1153.0, + 1362.0, + 1211.0, + 1362.0, + 1211.0, + 1381.0, + 1153.0, + 1381.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1145.0, + 1375.0, + 1218.0, + 1375.0, + 1218.0, + 1391.0, + 1145.0, + 1391.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1149.0, + 1396.0, + 1158.0, + 1396.0, + 1158.0, + 1404.0, + 1149.0, + 1404.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1100.0, + 1399.0, + 1183.0, + 1399.0, + 1183.0, + 1418.0, + 1100.0, + 1418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1020.0, + 1422.0, + 1132.0, + 1422.0, + 1132.0, + 1440.0, + 1020.0, + 1440.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 876.0, + 1442.0, + 968.0, + 1442.0, + 968.0, + 1464.0, + 876.0, + 1464.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 868.0, + 1456.0, + 973.0, + 1456.0, + 973.0, + 1483.0, + 868.0, + 1483.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1027.0, + 1454.0, + 1112.0, + 1454.0, + 1112.0, + 1481.0, + 1027.0, + 1481.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 878.0, + 1475.0, + 964.0, + 1475.0, + 964.0, + 1498.0, + 878.0, + 1498.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 995.0, + 1474.0, + 1023.0, + 1474.0, + 1023.0, + 1569.0, + 995.0, + 1569.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1043.0, + 1474.0, + 1130.0, + 1474.0, + 1130.0, + 1504.0, + 1043.0, + 1504.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1307.0, + 1481.0, + 1375.0, + 1481.0, + 1375.0, + 1497.0, + 1307.0, + 1497.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1045.0, + 1499.0, + 1111.0, + 1499.0, + 1111.0, + 1514.0, + 1045.0, + 1514.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1142.0, + 1495.0, + 1235.0, + 1495.0, + 1235.0, + 1513.0, + 1142.0, + 1513.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1160.0, + 1522.0, + 1214.0, + 1522.0, + 1214.0, + 1539.0, + 1160.0, + 1539.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1153.0, + 1546.0, + 1221.0, + 1546.0, + 1221.0, + 1562.0, + 1153.0, + 1562.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1077.0, + 1572.0, + 1149.0, + 1572.0, + 1149.0, + 1587.0, + 1077.0, + 1587.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1307.0, + 1570.0, + 1381.0, + 1570.0, + 1381.0, + 1589.0, + 1307.0, + 1589.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1303.0, + 1303.5, + 1371.0, + 1303.5, + 1371.0, + 1320.0, + 1303.0, + 1320.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1159.0, + 1533.5, + 1213.0, + 1533.5, + 1213.0, + 1550.0, + 1159.0, + 1550.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 398.0, + 222.0, + 1300.0, + 222.0, + 1300.0, + 281.0, + 398.0, + 281.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 594.0, + 280.0, + 1107.0, + 280.0, + 1107.0, + 341.0, + 594.0, + 341.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 846.0, + 1609.0, + 1406.0, + 1609.0, + 1406.0, + 1641.0, + 846.0, + 1641.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 845.0, + 1637.0, + 1405.0, + 1637.0, + 1405.0, + 1670.0, + 845.0, + 1670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 846.0, + 1666.0, + 1404.0, + 1666.0, + 1404.0, + 1696.0, + 846.0, + 1696.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 845.0, + 1694.0, + 1404.0, + 1694.0, + 1404.0, + 1723.0, + 845.0, + 1723.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 845.0, + 1720.0, + 1404.0, + 1720.0, + 1404.0, + 1751.0, + 845.0, + 1751.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 845.0, + 1749.0, + 1240.0, + 1749.0, + 1240.0, + 1779.0, + 845.0, + 1779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1273.0, + 536.0, + 1273.0, + 536.0, + 1320.0, + 292.0, + 1320.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 2031.0, + 933.0, + 2031.0, + 933.0, + 2066.0, + 296.0, + 2066.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 577.0, + 1305.0, + 577.0, + 1305.0, + 615.0, + 394.0, + 615.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 609.0, + 1305.0, + 609.0, + 1305.0, + 645.0, + 393.0, + 645.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 639.0, + 1305.0, + 639.0, + 1305.0, + 676.0, + 392.0, + 676.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 669.0, + 1305.0, + 669.0, + 1305.0, + 706.0, + 393.0, + 706.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 698.0, + 1304.0, + 698.0, + 1304.0, + 737.0, + 393.0, + 737.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 731.0, + 1305.0, + 731.0, + 1305.0, + 764.0, + 393.0, + 764.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 395.0, + 762.0, + 1306.0, + 762.0, + 1306.0, + 794.0, + 395.0, + 794.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 395.0, + 791.0, + 1305.0, + 791.0, + 1305.0, + 826.0, + 395.0, + 826.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 822.0, + 1306.0, + 822.0, + 1306.0, + 857.0, + 394.0, + 857.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 396.0, + 852.0, + 1305.0, + 852.0, + 1305.0, + 885.0, + 396.0, + 885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 882.0, + 1306.0, + 882.0, + 1306.0, + 916.0, + 393.0, + 916.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 913.0, + 1305.0, + 913.0, + 1305.0, + 946.0, + 394.0, + 946.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 395.0, + 944.0, + 1305.0, + 944.0, + 1305.0, + 976.0, + 395.0, + 976.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 973.0, + 1306.0, + 973.0, + 1306.0, + 1009.0, + 393.0, + 1009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1003.0, + 1306.0, + 1003.0, + 1306.0, + 1037.0, + 393.0, + 1037.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1034.0, + 1305.0, + 1034.0, + 1305.0, + 1067.0, + 393.0, + 1067.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 1064.0, + 1305.0, + 1064.0, + 1305.0, + 1098.0, + 394.0, + 1098.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1094.0, + 1305.0, + 1094.0, + 1305.0, + 1129.0, + 393.0, + 1129.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 395.0, + 1126.0, + 1305.0, + 1126.0, + 1305.0, + 1158.0, + 395.0, + 1158.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 395.0, + 1155.0, + 1298.0, + 1155.0, + 1298.0, + 1188.0, + 395.0, + 1188.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1321.0, + 826.0, + 1321.0, + 826.0, + 1357.0, + 295.0, + 1357.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1353.0, + 827.0, + 1353.0, + 827.0, + 1389.0, + 295.0, + 1389.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1383.0, + 828.0, + 1383.0, + 828.0, + 1416.0, + 293.0, + 1416.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1412.0, + 826.0, + 1412.0, + 826.0, + 1447.0, + 294.0, + 1447.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1444.0, + 828.0, + 1444.0, + 828.0, + 1478.0, + 294.0, + 1478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1473.0, + 828.0, + 1473.0, + 828.0, + 1508.0, + 293.0, + 1508.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1506.0, + 826.0, + 1506.0, + 826.0, + 1537.0, + 294.0, + 1537.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1535.0, + 828.0, + 1535.0, + 828.0, + 1570.0, + 294.0, + 1570.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1565.0, + 828.0, + 1565.0, + 828.0, + 1599.0, + 293.0, + 1599.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1595.0, + 828.0, + 1595.0, + 828.0, + 1630.0, + 294.0, + 1630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1625.0, + 828.0, + 1625.0, + 828.0, + 1661.0, + 293.0, + 1661.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1655.0, + 827.0, + 1655.0, + 827.0, + 1691.0, + 294.0, + 1691.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1687.0, + 827.0, + 1687.0, + 827.0, + 1718.0, + 296.0, + 1718.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1717.0, + 827.0, + 1717.0, + 827.0, + 1751.0, + 294.0, + 1751.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1749.0, + 826.0, + 1749.0, + 826.0, + 1779.0, + 295.0, + 1779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1776.0, + 1405.0, + 1776.0, + 1405.0, + 1812.0, + 293.0, + 1812.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1808.0, + 1405.0, + 1808.0, + 1405.0, + 1843.0, + 295.0, + 1843.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1836.0, + 1405.0, + 1836.0, + 1405.0, + 1874.0, + 293.0, + 1874.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1868.0, + 1405.0, + 1868.0, + 1405.0, + 1904.0, + 293.0, + 1904.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1899.0, + 1405.0, + 1899.0, + 1405.0, + 1933.0, + 293.0, + 1933.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1930.0, + 1406.0, + 1930.0, + 1406.0, + 1964.0, + 295.0, + 1964.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1960.0, + 1302.0, + 1960.0, + 1302.0, + 1996.0, + 293.0, + 1996.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 368.0, + 388.0, + 889.0, + 388.0, + 889.0, + 429.0, + 368.0, + 429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 960.0, + 388.0, + 1068.0, + 388.0, + 1068.0, + 429.0, + 960.0, + 429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1142.0, + 388.0, + 1335.0, + 388.0, + 1335.0, + 429.0, + 1142.0, + 429.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 482.0, + 420.0, + 907.0, + 420.0, + 907.0, + 460.0, + 482.0, + 460.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 952.0, + 424.0, + 1218.0, + 424.0, + 1218.0, + 460.0, + 952.0, + 460.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 533.0, + 456.0, + 1167.0, + 456.0, + 1167.0, + 491.0, + 533.0, + 491.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1202.0, + 936.0, + 1202.0, + 936.0, + 1250.0, + 393.0, + 1250.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 2031.0, + 933.0, + 2031.0, + 933.0, + 2066.0, + 296.0, + 2066.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 0, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 459, + 1404, + 459, + 1404, + 856, + 298, + 856 + ], + "score": 0.985 + }, + { + "category_id": 1, + "poly": [ + 297, + 868, + 1404, + 868, + 1404, + 1145, + 297, + 1145 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 297, + 201, + 1404, + 201, + 1404, + 448, + 297, + 448 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 297, + 1196, + 1404, + 1196, + 1404, + 1410, + 297, + 1410 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 298, + 1853, + 1403, + 1853, + 1403, + 2008, + 298, + 2008 + ], + "score": 0.972 + }, + { + "category_id": 1, + "poly": [ + 297, + 1712, + 1404, + 1712, + 1404, + 1840, + 297, + 1840 + ], + "score": 0.947 + }, + { + "category_id": 1, + "poly": [ + 295, + 1421, + 1405, + 1421, + 1405, + 1636, + 295, + 1636 + ], + "score": 0.943 + }, + { + "category_id": 0, + "poly": [ + 297, + 1157, + 967, + 1157, + 967, + 1193, + 297, + 1193 + ], + "score": 0.932 + }, + { + "category_id": 8, + "poly": [ + 280, + 1635, + 1354, + 1635, + 1354, + 1694, + 280, + 1694 + ], + "score": 0.813 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 859, + 2061, + 859, + 2085, + 841, + 2085 + ], + "score": 0.719 + }, + { + "category_id": 9, + "poly": [ + 1388, + 1648, + 1418, + 1648, + 1418, + 1680, + 1388, + 1680 + ], + "score": 0.58 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 859, + 2061, + 859, + 2085, + 841, + 2085 + ], + "score": 0.168 + }, + { + "category_id": 14, + "poly": [ + 278, + 1631, + 1380, + 1631, + 1380, + 1695, + 278, + 1695 + ], + "score": 0.93, + "latex": "\\begin{array} { r l } { \\underset { \\theta } { \\mathrm { m i n } } \\ : \\ : \\ : } & { { } \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : } \\\\ { \\mathrm { m i n } \\ : \\ : \\ : \\ : \\ : } & { { } \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : \\ : } & { \\mathrm { ~ \\ : ~ \\ : \\ : \\ : \\ : } \\ : \\ : \\ : } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 553, + 1454, + 643, + 1454, + 643, + 1487, + 553, + 1487 + ], + "score": 0.93, + "latex": "\\pi _ { \\phi } ( \\mathbf { a } | \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 1294, + 1574, + 1393, + 1574, + 1393, + 1608, + 1294, + 1608 + ], + "score": 0.92, + "latex": "Q _ { \\boldsymbol { \\theta } } ( \\mathbf { s } , \\mathbf { a } )" + }, + { + "category_id": 13, + "poly": [ + 433, + 1574, + 509, + 1574, + 509, + 1607, + 433, + 1607 + ], + "score": 0.92, + "latex": "\\mu ( \\mathbf { a } | \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 1005, + 1746, + 1081, + 1746, + 1081, + 1779, + 1005, + 1779 + ], + "score": 0.92, + "latex": "\\mu ( \\mathbf { a } | \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 297, + 1226, + 374, + 1226, + 374, + 1259, + 297, + 1259 + ], + "score": 0.92, + "latex": "r ( s , a )" + }, + { + "category_id": 13, + "poly": [ + 1110, + 1454, + 1349, + 1454, + 1349, + 1489, + 1110, + 1489 + ], + "score": 0.92, + "latex": "\\begin{array} { r } { \\mathbb { E } _ { \\mathbf { s } \\sim \\mathcal { D } , \\mathbf { a } \\sim \\pi _ { \\phi } } \\left[ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) \\right] } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 359, + 1513, + 442, + 1513, + 442, + 1547, + 359, + 1547 + ], + "score": 0.92, + "latex": "{ \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta )" + }, + { + "category_id": 13, + "poly": [ + 371, + 1712, + 490, + 1712, + 490, + 1747, + 371, + 1747 + ], + "score": 0.92, + "latex": "B ^ { \\pi } \\bar { Q } ( \\mathbf { s } , \\mathbf { a } )" + }, + { + "category_id": 13, + "poly": [ + 923, + 1514, + 983, + 1514, + 983, + 1547, + 923, + 1547 + ], + "score": 0.91, + "latex": "{ \\mathcal { R } } ( \\theta )" + }, + { + "category_id": 13, + "poly": [ + 298, + 1744, + 672, + 1744, + 672, + 1781, + 298, + 1781 + ], + "score": 0.91, + "latex": "r ( \\mathbf { s } , \\mathbf { a } ) + \\gamma \\mathbb { E } _ { \\mathbf { a ^ { \\prime } } \\sim \\pi ( \\mathbf { a ^ { \\prime } } | \\mathbf { s ^ { \\prime } } ) } [ \\bar { Q } ( \\mathbf { s ^ { \\prime } } , \\mathbf { a ^ { \\prime } } ) ]" + }, + { + "category_id": 13, + "poly": [ + 1061, + 1193, + 1317, + 1193, + 1317, + 1231, + 1061, + 1231 + ], + "score": 0.91, + "latex": "\\begin{array} { r } { R = \\sum _ { t = 0 } ^ { \\infty } \\gamma ^ { t } r ( \\mathbf { s } _ { t } , \\mathbf { a } _ { t } ) } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 1302, + 1422, + 1400, + 1422, + 1400, + 1455, + 1302, + 1455 + ], + "score": 0.9, + "latex": "Q _ { \\boldsymbol { \\theta } } ( \\mathbf { s } , \\mathbf { a } )" + }, + { + "category_id": 13, + "poly": [ + 850, + 1256, + 1127, + 1256, + 1127, + 1290, + 850, + 1290 + ], + "score": 0.9, + "latex": "\\mathcal { D } = \\{ ( \\mathbf { s } , \\mathbf { a } , r ( \\mathbf { s } , \\mathbf { a } ) , \\mathbf { s } ^ { \\prime } ) \\}" + }, + { + "category_id": 13, + "poly": [ + 414, + 1781, + 503, + 1781, + 503, + 1813, + 414, + 1813 + ], + "score": 0.88, + "latex": "\\pi _ { \\phi } ( \\mathbf { a } | \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 885, + 1289, + 974, + 1289, + 974, + 1320, + 885, + 1320 + ], + "score": 0.88, + "latex": "\\pi _ { \\beta } ( \\mathbf { a } | \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 1244, + 1113, + 1301, + 1113, + 1301, + 1142, + 1244, + 1142 + ], + "score": 0.87, + "latex": "70 \\%" + }, + { + "category_id": 13, + "poly": [ + 857, + 1319, + 881, + 1319, + 881, + 1344, + 857, + 1344 + ], + "score": 0.82, + "latex": "\\mathcal { D }" + }, + { + "category_id": 13, + "poly": [ + 1056, + 1228, + 1119, + 1228, + 1119, + 1257, + 1056, + 1257 + ], + "score": 0.7, + "latex": "( \\mathbf { s } , \\mathbf { a } )" + }, + { + "category_id": 13, + "poly": [ + 1239, + 1711, + 1405, + 1711, + 1405, + 1748, + 1239, + 1748 + ], + "score": 0.69, + "latex": "B ^ { \\pi } \\bar { Q } ( { \\bf s } , { \\bf a } ) : =" + }, + { + "category_id": 13, + "poly": [ + 1205, + 1711, + 1230, + 1711, + 1230, + 1747, + 1205, + 1747 + ], + "score": 0.54, + "latex": "\\bar { Q }" + }, + { + "category_id": 13, + "poly": [ + 1206, + 1546, + 1231, + 1546, + 1231, + 1576, + 1206, + 1576 + ], + "score": 0.39, + "latex": "\\mathrm { Q }" + }, + { + "category_id": 13, + "poly": [ + 1011, + 1455, + 1036, + 1455, + 1036, + 1485, + 1011, + 1485 + ], + "score": 0.33, + "latex": "\\mathrm { Q }" + }, + { + "category_id": 13, + "poly": [ + 380, + 1547, + 404, + 1547, + 404, + 1576, + 380, + 1576 + ], + "score": 0.29, + "latex": "\\mathrm { Q }" + }, + { + "category_id": 13, + "poly": [ + 871, + 1348, + 895, + 1348, + 895, + 1379, + 871, + 1379 + ], + "score": 0.26, + "latex": "\\mathrm { Q }" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1154.0, + 968.0, + 1154.0, + 968.0, + 1199.0, + 291.0, + 1199.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2058.0, + 862.0, + 2058.0, + 862.0, + 2093.0, + 838.0, + 2093.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2058.0, + 862.0, + 2058.0, + 862.0, + 2093.0, + 838.0, + 2093.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 458.0, + 1406.0, + 458.0, + 1406.0, + 496.0, + 292.0, + 496.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 492.0, + 1406.0, + 492.0, + 1406.0, + 527.0, + 293.0, + 527.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 520.0, + 1406.0, + 520.0, + 1406.0, + 558.0, + 292.0, + 558.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 550.0, + 1408.0, + 550.0, + 1408.0, + 590.0, + 293.0, + 590.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 582.0, + 1406.0, + 582.0, + 1406.0, + 619.0, + 293.0, + 619.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 608.0, + 1405.0, + 608.0, + 1405.0, + 652.0, + 292.0, + 652.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 643.0, + 1405.0, + 643.0, + 1405.0, + 678.0, + 292.0, + 678.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 672.0, + 1405.0, + 672.0, + 1405.0, + 708.0, + 294.0, + 708.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 701.0, + 1406.0, + 701.0, + 1406.0, + 739.0, + 292.0, + 739.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 731.0, + 1405.0, + 731.0, + 1405.0, + 769.0, + 292.0, + 769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 758.0, + 1405.0, + 758.0, + 1405.0, + 804.0, + 291.0, + 804.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 792.0, + 1406.0, + 792.0, + 1406.0, + 830.0, + 293.0, + 830.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 826.0, + 1228.0, + 826.0, + 1228.0, + 858.0, + 296.0, + 858.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 868.0, + 1404.0, + 868.0, + 1404.0, + 904.0, + 295.0, + 904.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 898.0, + 1404.0, + 898.0, + 1404.0, + 936.0, + 294.0, + 936.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 928.0, + 1404.0, + 928.0, + 1404.0, + 967.0, + 294.0, + 967.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 959.0, + 1406.0, + 959.0, + 1406.0, + 996.0, + 295.0, + 996.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 991.0, + 1406.0, + 991.0, + 1406.0, + 1027.0, + 294.0, + 1027.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1020.0, + 1404.0, + 1020.0, + 1404.0, + 1058.0, + 292.0, + 1058.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1052.0, + 1402.0, + 1052.0, + 1402.0, + 1085.0, + 296.0, + 1085.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1080.0, + 1405.0, + 1080.0, + 1405.0, + 1118.0, + 292.0, + 1118.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1111.0, + 1243.0, + 1111.0, + 1243.0, + 1147.0, + 294.0, + 1147.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1302.0, + 1111.0, + 1316.0, + 1111.0, + 1316.0, + 1147.0, + 1302.0, + 1147.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 203.0, + 1404.0, + 203.0, + 1404.0, + 237.0, + 295.0, + 237.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 234.0, + 1402.0, + 234.0, + 1402.0, + 264.0, + 296.0, + 264.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 261.0, + 1405.0, + 261.0, + 1405.0, + 300.0, + 291.0, + 300.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 293.0, + 1404.0, + 293.0, + 1404.0, + 328.0, + 292.0, + 328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 323.0, + 1406.0, + 323.0, + 1406.0, + 361.0, + 294.0, + 361.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 355.0, + 1405.0, + 355.0, + 1405.0, + 388.0, + 295.0, + 388.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 385.0, + 1406.0, + 385.0, + 1406.0, + 417.0, + 292.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 418.0, + 959.0, + 418.0, + 959.0, + 449.0, + 296.0, + 449.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 286.0, + 1182.0, + 1060.0, + 1182.0, + 1060.0, + 1243.0, + 286.0, + 1243.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1318.0, + 1182.0, + 1414.0, + 1182.0, + 1414.0, + 1243.0, + 1318.0, + 1243.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 375.0, + 1224.0, + 1055.0, + 1224.0, + 1055.0, + 1263.0, + 375.0, + 1263.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1120.0, + 1224.0, + 1404.0, + 1224.0, + 1404.0, + 1263.0, + 1120.0, + 1263.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1251.0, + 849.0, + 1251.0, + 849.0, + 1293.0, + 291.0, + 1293.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1128.0, + 1251.0, + 1405.0, + 1251.0, + 1405.0, + 1293.0, + 1128.0, + 1293.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1285.0, + 884.0, + 1285.0, + 884.0, + 1323.0, + 294.0, + 1323.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 975.0, + 1285.0, + 1405.0, + 1285.0, + 1405.0, + 1323.0, + 975.0, + 1323.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1314.0, + 856.0, + 1314.0, + 856.0, + 1352.0, + 292.0, + 1352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 882.0, + 1314.0, + 1406.0, + 1314.0, + 1406.0, + 1352.0, + 882.0, + 1352.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1346.0, + 870.0, + 1346.0, + 870.0, + 1380.0, + 292.0, + 1380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 896.0, + 1346.0, + 1404.0, + 1346.0, + 1404.0, + 1380.0, + 896.0, + 1380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1378.0, + 1397.0, + 1378.0, + 1397.0, + 1412.0, + 295.0, + 1412.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1855.0, + 1404.0, + 1855.0, + 1404.0, + 1888.0, + 297.0, + 1888.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1885.0, + 1404.0, + 1885.0, + 1404.0, + 1919.0, + 296.0, + 1919.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1915.0, + 1405.0, + 1915.0, + 1405.0, + 1950.0, + 293.0, + 1950.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1942.0, + 1405.0, + 1942.0, + 1405.0, + 1982.0, + 293.0, + 1982.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1977.0, + 1404.0, + 1977.0, + 1404.0, + 2010.0, + 296.0, + 2010.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1709.0, + 370.0, + 1709.0, + 370.0, + 1749.0, + 294.0, + 1749.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 491.0, + 1709.0, + 1204.0, + 1709.0, + 1204.0, + 1749.0, + 491.0, + 1749.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1231.0, + 1709.0, + 1238.0, + 1709.0, + 1238.0, + 1749.0, + 1231.0, + 1749.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1743.0, + 297.0, + 1743.0, + 297.0, + 1783.0, + 294.0, + 1783.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 673.0, + 1743.0, + 1004.0, + 1743.0, + 1004.0, + 1783.0, + 673.0, + 1783.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1082.0, + 1743.0, + 1406.0, + 1743.0, + 1406.0, + 1783.0, + 1082.0, + 1783.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1778.0, + 413.0, + 1778.0, + 413.0, + 1812.0, + 295.0, + 1812.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 504.0, + 1778.0, + 1404.0, + 1778.0, + 1404.0, + 1812.0, + 504.0, + 1812.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1808.0, + 1219.0, + 1808.0, + 1219.0, + 1841.0, + 294.0, + 1841.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1420.0, + 1301.0, + 1420.0, + 1301.0, + 1458.0, + 295.0, + 1458.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1401.0, + 1420.0, + 1405.0, + 1420.0, + 1405.0, + 1458.0, + 1401.0, + 1458.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1451.0, + 552.0, + 1451.0, + 552.0, + 1493.0, + 293.0, + 1493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 644.0, + 1451.0, + 1010.0, + 1451.0, + 1010.0, + 1493.0, + 644.0, + 1493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1037.0, + 1451.0, + 1109.0, + 1451.0, + 1109.0, + 1493.0, + 1037.0, + 1493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1350.0, + 1451.0, + 1407.0, + 1451.0, + 1407.0, + 1493.0, + 1350.0, + 1493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1483.0, + 1403.0, + 1483.0, + 1403.0, + 1517.0, + 295.0, + 1517.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1514.0, + 358.0, + 1514.0, + 358.0, + 1548.0, + 294.0, + 1548.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 443.0, + 1514.0, + 922.0, + 1514.0, + 922.0, + 1548.0, + 443.0, + 1548.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 984.0, + 1514.0, + 1405.0, + 1514.0, + 1405.0, + 1548.0, + 984.0, + 1548.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1542.0, + 379.0, + 1542.0, + 379.0, + 1579.0, + 293.0, + 1579.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 405.0, + 1542.0, + 1205.0, + 1542.0, + 1205.0, + 1579.0, + 405.0, + 1579.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1232.0, + 1542.0, + 1407.0, + 1542.0, + 1407.0, + 1579.0, + 1232.0, + 1579.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1572.0, + 432.0, + 1572.0, + 432.0, + 1611.0, + 293.0, + 1611.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 510.0, + 1572.0, + 1293.0, + 1572.0, + 1293.0, + 1611.0, + 510.0, + 1611.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1394.0, + 1572.0, + 1407.0, + 1572.0, + 1407.0, + 1611.0, + 1394.0, + 1611.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1605.0, + 1243.0, + 1605.0, + 1243.0, + 1639.0, + 295.0, + 1639.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 1, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 5, + "poly": [ + 642, + 425, + 1383, + 425, + 1383, + 621, + 642, + 621 + ], + "score": 0.979, + "html": "
QuantitySupervised LearningConservative Offline RL
Test errorLoss L evaluated on test data,DtestPerformance of policy,J(Ο€)
Train errorLoss L evaluated on train data,DtrainObjective in Equations 2,1
OverfittingL(Dtrain) low,L(Dval) high,Dval is a validation set drawn i.i.d.as DtrainTraining objective in Equation l is ex- tremely low,low value of J(Ο€)
Underfittinghigh value of train error L(Dtrain)Training objective in Equation 1 is ex- tremely high,low value of J(Ο€)
" + }, + { + "category_id": 1, + "poly": [ + 297, + 1187, + 1405, + 1187, + 1405, + 1370, + 297, + 1370 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 296, + 752, + 1405, + 752, + 1405, + 1117, + 296, + 1117 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 299, + 1883, + 1403, + 1883, + 1403, + 2007, + 299, + 2007 + ], + "score": 0.972 + }, + { + "category_id": 1, + "poly": [ + 297, + 1628, + 1403, + 1628, + 1403, + 1871, + 297, + 1871 + ], + "score": 0.972 + }, + { + "category_id": 1, + "poly": [ + 298, + 1384, + 1100, + 1384, + 1100, + 1626, + 298, + 1626 + ], + "score": 0.97 + }, + { + "category_id": 1, + "poly": [ + 297, + 419, + 604, + 419, + 604, + 752, + 297, + 752 + ], + "score": 0.963 + }, + { + "category_id": 3, + "poly": [ + 1121, + 1400, + 1401, + 1400, + 1401, + 1621, + 1121, + 1621 + ], + "score": 0.941 + }, + { + "category_id": 0, + "poly": [ + 297, + 1139, + 1286, + 1139, + 1286, + 1177, + 297, + 1177 + ], + "score": 0.935 + }, + { + "category_id": 1, + "poly": [ + 292, + 202, + 1402, + 202, + 1402, + 266, + 292, + 266 + ], + "score": 0.919 + }, + { + "category_id": 1, + "poly": [ + 298, + 278, + 1400, + 278, + 1400, + 341, + 298, + 341 + ], + "score": 0.912 + }, + { + "category_id": 9, + "poly": [ + 1366, + 349, + 1400, + 349, + 1400, + 377, + 1366, + 377 + ], + "score": 0.812 + }, + { + "category_id": 8, + "poly": [ + 369, + 345, + 788, + 345, + 788, + 386, + 369, + 386 + ], + "score": 0.715 + }, + { + "category_id": 6, + "poly": [ + 626, + 628, + 1401, + 628, + 1401, + 741, + 626, + 741 + ], + "score": 0.701 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 858, + 2061, + 858, + 2085, + 841, + 2085 + ], + "score": 0.64 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 859, + 2061, + 859, + 2085, + 841, + 2085 + ], + "score": 0.39 + }, + { + "category_id": 1, + "poly": [ + 860, + 347, + 1152, + 347, + 1152, + 378, + 860, + 378 + ], + "score": 0.326 + }, + { + "category_id": 1, + "poly": [ + 626, + 628, + 1401, + 628, + 1401, + 741, + 626, + 741 + ], + "score": 0.181 + }, + { + "category_id": 13, + "poly": [ + 297, + 419, + 373, + 419, + 373, + 453, + 297, + 453 + ], + "score": 0.95, + "latex": "J _ { \\mathcal { D } } ( \\pi )" + }, + { + "category_id": 13, + "poly": [ + 692, + 1884, + 888, + 1884, + 888, + 1918, + 692, + 1918 + ], + "score": 0.94, + "latex": "\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]" + }, + { + "category_id": 13, + "poly": [ + 694, + 1778, + 891, + 1778, + 891, + 1812, + 694, + 1812 + ], + "score": 0.93, + "latex": "\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]" + }, + { + "category_id": 13, + "poly": [ + 612, + 1688, + 689, + 1688, + 689, + 1720, + 612, + 1720 + ], + "score": 0.92, + "latex": "\\mu ( \\mathbf { a } | \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 495, + 540, + 602, + 540, + 602, + 575, + 495, + 575 + ], + "score": 0.92, + "latex": "D ( \\pi , \\pi _ { \\beta } )" + }, + { + "category_id": 13, + "poly": [ + 296, + 812, + 761, + 812, + 761, + 848, + 296, + 848 + ], + "score": 0.92, + "latex": "\\begin{array} { r } { D _ { \\mathrm { C Q L } } ( p , \\bar { q } ) : = \\sum _ { \\mathbf { x } } p ( \\mathbf { x } ) ( p ( \\mathbf { x } ) / q ( \\mathbf { x } ) - \\bar { 1 } ) } \\end{array}" + }, + { + "category_id": 13, + "poly": [ + 298, + 1505, + 480, + 1505, + 480, + 1539, + 298, + 1539 + ], + "score": 0.92, + "latex": "( \\mathbf { s } , \\mathbf { a } , r , \\mathbf { s } ^ { \\prime } ) \\in \\mathcal { D }" + }, + { + "category_id": 13, + "poly": [ + 526, + 874, + 585, + 874, + 585, + 907, + 526, + 907 + ], + "score": 0.92, + "latex": "J ( \\pi )" + }, + { + "category_id": 13, + "poly": [ + 944, + 1718, + 1066, + 1718, + 1066, + 1750, + 944, + 1750 + ], + "score": 0.92, + "latex": "\\mathbf { a } \\sim \\pi ( \\cdot | \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 1019, + 1248, + 1078, + 1248, + 1078, + 1281, + 1019, + 1281 + ], + "score": 0.91, + "latex": "J ( \\pi )" + }, + { + "category_id": 13, + "poly": [ + 604, + 1445, + 663, + 1445, + 663, + 1479, + 604, + 1479 + ], + "score": 0.91, + "latex": "J ( \\pi )" + }, + { + "category_id": 13, + "poly": [ + 1039, + 1884, + 1097, + 1884, + 1097, + 1917, + 1039, + 1917 + ], + "score": 0.86, + "latex": "J ( \\pi )" + }, + { + "category_id": 13, + "poly": [ + 954, + 566, + 1030, + 566, + 1030, + 590, + 954, + 590 + ], + "score": 0.86, + "latex": "\\mathcal { L } ( \\mathcal { D } _ { \\mathrm { t r a i n } } )" + }, + { + "category_id": 14, + "poly": [ + 363, + 342, + 790, + 342, + 790, + 391, + 363, + 391 + ], + "score": 0.85, + "latex": "\\pi ^ { * } : = \\arg \\operatorname* { m a x } _ { \\pi } ~ J _ { \\mathcal { D } } ( \\pi ) - \\alpha D ( \\pi , \\pi _ { \\beta } )" + }, + { + "category_id": 13, + "poly": [ + 1360, + 1950, + 1393, + 1950, + 1393, + 1980, + 1360, + 1980 + ], + "score": 0.85, + "latex": "\\pi _ { \\beta }" + }, + { + "category_id": 13, + "poly": [ + 711, + 1719, + 766, + 1719, + 766, + 1746, + 711, + 1746 + ], + "score": 0.85, + "latex": "\\sim \\mathcal { D }" + }, + { + "category_id": 13, + "poly": [ + 1277, + 545, + 1323, + 545, + 1323, + 569, + 1277, + 569 + ], + "score": 0.85, + "latex": "J ( \\pi )" + }, + { + "category_id": 13, + "poly": [ + 885, + 523, + 954, + 523, + 954, + 547, + 885, + 547 + ], + "score": 0.84, + "latex": "\\mathcal { L } ( \\mathcal { D } _ { \\mathrm { v a l } } )" + }, + { + "category_id": 13, + "poly": [ + 1004, + 525, + 1041, + 525, + 1041, + 545, + 1004, + 545 + ], + "score": 0.82, + "latex": "\\mathcal { D } _ { \\mathrm { v a l } }" + }, + { + "category_id": 13, + "poly": [ + 1001, + 547, + 1046, + 547, + 1046, + 567, + 1001, + 567 + ], + "score": 0.81, + "latex": "\\mathcal { D } _ { \\mathrm { t r a i n } }" + }, + { + "category_id": 13, + "poly": [ + 764, + 523, + 842, + 523, + 842, + 547, + 764, + 547 + ], + "score": 0.81, + "latex": "\\mathcal { L } ( \\mathcal { D } _ { \\mathrm { t r a i n } } )" + }, + { + "category_id": 13, + "poly": [ + 1002, + 470, + 1042, + 470, + 1042, + 490, + 1002, + 490 + ], + "score": 0.8, + "latex": "\\mathcal { D } _ { \\mathrm { t e s t } }" + }, + { + "category_id": 13, + "poly": [ + 1284, + 588, + 1330, + 588, + 1330, + 612, + 1284, + 612 + ], + "score": 0.77, + "latex": "J ( \\pi )" + }, + { + "category_id": 13, + "poly": [ + 591, + 1886, + 615, + 1886, + 615, + 1917, + 591, + 1917 + ], + "score": 0.74, + "latex": "\\varrho" + }, + { + "category_id": 13, + "poly": [ + 1184, + 1950, + 1205, + 1950, + 1205, + 1973, + 1184, + 1973 + ], + "score": 0.74, + "latex": "\\pi" + }, + { + "category_id": 13, + "poly": [ + 390, + 541, + 416, + 541, + 416, + 569, + 390, + 569 + ], + "score": 0.74, + "latex": "\\mathcal { D }" + }, + { + "category_id": 13, + "poly": [ + 497, + 455, + 520, + 455, + 520, + 478, + 497, + 478 + ], + "score": 0.73, + "latex": "\\pi" + }, + { + "category_id": 13, + "poly": [ + 1011, + 491, + 1056, + 491, + 1056, + 511, + 1011, + 511 + ], + "score": 0.69, + "latex": "{ \\mathcal { D } } _ { \\operatorname { t r a i n } }" + }, + { + "category_id": 13, + "poly": [ + 1381, + 762, + 1401, + 762, + 1401, + 780, + 1381, + 780 + ], + "score": 0.67, + "latex": "\\pi" + }, + { + "category_id": 13, + "poly": [ + 1256, + 470, + 1302, + 470, + 1302, + 492, + 1256, + 492 + ], + "score": 0.59, + "latex": "J ( \\pi )" + }, + { + "category_id": 13, + "poly": [ + 806, + 470, + 823, + 470, + 823, + 488, + 806, + 488 + ], + "score": 0.41, + "latex": "\\mathcal { L }" + }, + { + "category_id": 13, + "poly": [ + 616, + 1947, + 641, + 1947, + 641, + 1977, + 616, + 1977 + ], + "score": 0.3, + "latex": "\\mathrm { Q }" + }, + { + "category_id": 13, + "poly": [ + 741, + 1659, + 766, + 1659, + 766, + 1689, + 741, + 1689 + ], + "score": 0.29, + "latex": "\\mathbf { Q }" + }, + { + "category_id": 13, + "poly": [ + 805, + 490, + 822, + 490, + 822, + 509, + 805, + 509 + ], + "score": 0.28, + "latex": "\\mathcal { L }" + }, + { + "category_id": 15, + "poly": [ + 1180.0, + 1395.0, + 1298.0, + 1395.0, + 1298.0, + 1433.0, + 1180.0, + 1433.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1260.0, + 1430.0, + 1359.0, + 1430.0, + 1359.0, + 1459.0, + 1260.0, + 1459.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1122.0, + 1448.0, + 1145.0, + 1448.0, + 1145.0, + 1568.0, + 1122.0, + 1568.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1262.0, + 1448.0, + 1384.0, + 1448.0, + 1384.0, + 1477.0, + 1262.0, + 1477.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1246.0, + 1559.0, + 1254.0, + 1559.0, + 1254.0, + 1568.0, + 1246.0, + 1568.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1274.0, + 1579.0, + 1406.0, + 1579.0, + 1406.0, + 1608.0, + 1274.0, + 1608.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1147.0, + 1563.5, + 1165.0, + 1563.5, + 1165.0, + 1570.5, + 1147.0, + 1570.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1263.0, + 1573.5, + 1271.0, + 1573.5, + 1271.0, + 1581.5, + 1263.0, + 1581.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1138.0, + 1292.0, + 1138.0, + 1292.0, + 1183.0, + 289.0, + 1183.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 625.0, + 625.0, + 1404.0, + 625.0, + 1404.0, + 662.0, + 625.0, + 662.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 626.0, + 657.0, + 1403.0, + 657.0, + 1403.0, + 686.0, + 626.0, + 686.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 626.0, + 684.0, + 1404.0, + 684.0, + 1404.0, + 715.0, + 626.0, + 715.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 624.0, + 709.0, + 1314.0, + 709.0, + 1314.0, + 744.0, + 624.0, + 744.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2058.0, + 860.0, + 2058.0, + 860.0, + 2089.0, + 839.0, + 2089.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2058.0, + 862.0, + 2058.0, + 862.0, + 2091.0, + 838.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1186.0, + 1405.0, + 1186.0, + 1405.0, + 1222.0, + 294.0, + 1222.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1217.0, + 1408.0, + 1217.0, + 1408.0, + 1252.0, + 294.0, + 1252.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1248.0, + 1018.0, + 1248.0, + 1018.0, + 1284.0, + 296.0, + 1284.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1079.0, + 1248.0, + 1406.0, + 1248.0, + 1406.0, + 1284.0, + 1079.0, + 1284.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1279.0, + 1406.0, + 1279.0, + 1406.0, + 1311.0, + 292.0, + 1311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1309.0, + 1402.0, + 1309.0, + 1402.0, + 1340.0, + 296.0, + 1340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1338.0, + 1309.0, + 1338.0, + 1309.0, + 1373.0, + 295.0, + 1373.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 748.0, + 1380.0, + 748.0, + 1380.0, + 789.0, + 293.0, + 789.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1402.0, + 748.0, + 1406.0, + 748.0, + 1406.0, + 789.0, + 1402.0, + 789.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 779.0, + 1405.0, + 779.0, + 1405.0, + 819.0, + 293.0, + 819.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 762.0, + 811.0, + 1410.0, + 811.0, + 1410.0, + 852.0, + 762.0, + 852.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 842.0, + 1406.0, + 842.0, + 1406.0, + 879.0, + 291.0, + 879.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 872.0, + 525.0, + 872.0, + 525.0, + 909.0, + 293.0, + 909.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 586.0, + 872.0, + 1405.0, + 872.0, + 1405.0, + 909.0, + 586.0, + 909.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 902.0, + 1405.0, + 902.0, + 1405.0, + 939.0, + 293.0, + 939.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 932.0, + 1403.0, + 932.0, + 1403.0, + 970.0, + 293.0, + 970.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 959.0, + 1406.0, + 959.0, + 1406.0, + 1002.0, + 293.0, + 1002.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 995.0, + 1405.0, + 995.0, + 1405.0, + 1029.0, + 294.0, + 1029.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1025.0, + 1405.0, + 1025.0, + 1405.0, + 1059.0, + 294.0, + 1059.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1056.0, + 1406.0, + 1056.0, + 1406.0, + 1088.0, + 293.0, + 1088.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1085.0, + 1314.0, + 1085.0, + 1314.0, + 1119.0, + 293.0, + 1119.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1880.0, + 590.0, + 1880.0, + 590.0, + 1922.0, + 293.0, + 1922.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 616.0, + 1880.0, + 691.0, + 1880.0, + 691.0, + 1922.0, + 616.0, + 1922.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 889.0, + 1880.0, + 1038.0, + 1880.0, + 1038.0, + 1922.0, + 889.0, + 1922.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1098.0, + 1880.0, + 1407.0, + 1880.0, + 1407.0, + 1922.0, + 1098.0, + 1922.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1915.0, + 1405.0, + 1915.0, + 1405.0, + 1947.0, + 295.0, + 1947.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1941.0, + 615.0, + 1941.0, + 615.0, + 1984.0, + 293.0, + 1984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 642.0, + 1941.0, + 1183.0, + 1941.0, + 1183.0, + 1984.0, + 642.0, + 1984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1206.0, + 1941.0, + 1359.0, + 1941.0, + 1359.0, + 1984.0, + 1206.0, + 1984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1394.0, + 1941.0, + 1404.0, + 1941.0, + 1404.0, + 1984.0, + 1394.0, + 1984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1977.0, + 1404.0, + 1977.0, + 1404.0, + 2009.0, + 295.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1623.0, + 1405.0, + 1623.0, + 1405.0, + 1661.0, + 294.0, + 1661.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1655.0, + 740.0, + 1655.0, + 740.0, + 1694.0, + 291.0, + 1694.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 767.0, + 1655.0, + 1403.0, + 1655.0, + 1403.0, + 1694.0, + 767.0, + 1694.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1685.0, + 611.0, + 1685.0, + 611.0, + 1723.0, + 294.0, + 1723.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 690.0, + 1685.0, + 1405.0, + 1685.0, + 1405.0, + 1723.0, + 690.0, + 1723.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1715.0, + 710.0, + 1715.0, + 710.0, + 1753.0, + 295.0, + 1753.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 767.0, + 1715.0, + 943.0, + 1715.0, + 943.0, + 1753.0, + 767.0, + 1753.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1067.0, + 1715.0, + 1405.0, + 1715.0, + 1405.0, + 1753.0, + 1067.0, + 1753.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1748.0, + 1405.0, + 1748.0, + 1405.0, + 1782.0, + 295.0, + 1782.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1776.0, + 693.0, + 1776.0, + 693.0, + 1816.0, + 292.0, + 1816.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 892.0, + 1776.0, + 1404.0, + 1776.0, + 1404.0, + 1816.0, + 892.0, + 1816.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1808.0, + 1407.0, + 1808.0, + 1407.0, + 1845.0, + 293.0, + 1845.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1840.0, + 1350.0, + 1840.0, + 1350.0, + 1873.0, + 293.0, + 1873.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1382.0, + 1103.0, + 1382.0, + 1103.0, + 1420.0, + 294.0, + 1420.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1414.0, + 1103.0, + 1414.0, + 1103.0, + 1449.0, + 294.0, + 1449.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1444.0, + 603.0, + 1444.0, + 603.0, + 1480.0, + 294.0, + 1480.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 664.0, + 1444.0, + 1103.0, + 1444.0, + 1103.0, + 1480.0, + 664.0, + 1480.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1474.0, + 1102.0, + 1474.0, + 1102.0, + 1509.0, + 294.0, + 1509.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 481.0, + 1504.0, + 1103.0, + 1504.0, + 1103.0, + 1540.0, + 481.0, + 1540.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1536.0, + 1103.0, + 1536.0, + 1103.0, + 1569.0, + 295.0, + 1569.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1564.0, + 1103.0, + 1564.0, + 1103.0, + 1600.0, + 294.0, + 1600.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1594.0, + 1103.0, + 1594.0, + 1103.0, + 1630.0, + 294.0, + 1630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 374.0, + 416.0, + 608.0, + 416.0, + 608.0, + 455.0, + 374.0, + 455.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 449.0, + 496.0, + 449.0, + 496.0, + 483.0, + 295.0, + 483.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 521.0, + 449.0, + 607.0, + 449.0, + 607.0, + 483.0, + 521.0, + 483.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 479.0, + 606.0, + 479.0, + 606.0, + 513.0, + 295.0, + 513.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 511.0, + 607.0, + 511.0, + 607.0, + 541.0, + 296.0, + 541.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 535.0, + 389.0, + 535.0, + 389.0, + 576.0, + 293.0, + 576.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 417.0, + 535.0, + 494.0, + 535.0, + 494.0, + 576.0, + 417.0, + 576.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 603.0, + 535.0, + 607.0, + 535.0, + 607.0, + 576.0, + 603.0, + 576.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 571.0, + 607.0, + 571.0, + 607.0, + 601.0, + 296.0, + 601.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 602.0, + 606.0, + 602.0, + 606.0, + 633.0, + 296.0, + 633.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 628.0, + 608.0, + 628.0, + 608.0, + 667.0, + 292.0, + 667.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 660.0, + 606.0, + 660.0, + 606.0, + 691.0, + 295.0, + 691.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 689.0, + 607.0, + 689.0, + 607.0, + 725.0, + 294.0, + 725.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 723.0, + 605.0, + 723.0, + 605.0, + 754.0, + 297.0, + 754.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 203.0, + 1404.0, + 203.0, + 1404.0, + 235.0, + 297.0, + 235.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 233.0, + 1264.0, + 233.0, + 1264.0, + 269.0, + 295.0, + 269.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 278.0, + 1402.0, + 278.0, + 1402.0, + 313.0, + 296.0, + 313.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 308.0, + 1013.0, + 308.0, + 1013.0, + 344.0, + 294.0, + 344.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 859.0, + 344.0, + 1156.0, + 344.0, + 1156.0, + 383.0, + 859.0, + 383.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 625.0, + 625.0, + 1404.0, + 625.0, + 1404.0, + 662.0, + 625.0, + 662.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 626.0, + 657.0, + 1403.0, + 657.0, + 1403.0, + 686.0, + 626.0, + 686.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 626.0, + 684.0, + 1404.0, + 684.0, + 1404.0, + 715.0, + 626.0, + 715.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 624.0, + 709.0, + 1314.0, + 709.0, + 1314.0, + 744.0, + 624.0, + 744.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 2, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 202, + 1405, + 202, + 1405, + 479, + 297, + 479 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 297, + 490, + 1404, + 490, + 1404, + 765, + 297, + 765 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 297, + 1528, + 1405, + 1528, + 1405, + 1864, + 297, + 1864 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 297, + 1044, + 1402, + 1044, + 1402, + 1197, + 297, + 1197 + ], + "score": 0.974 + }, + { + "category_id": 3, + "poly": [ + 1122, + 1207, + 1392, + 1207, + 1392, + 1422, + 1122, + 1422 + ], + "score": 0.958 + }, + { + "category_id": 1, + "poly": [ + 297, + 1211, + 1104, + 1211, + 1104, + 1424, + 297, + 1424 + ], + "score": 0.951 + }, + { + "category_id": 1, + "poly": [ + 296, + 1426, + 1394, + 1426, + 1394, + 1515, + 296, + 1515 + ], + "score": 0.928 + }, + { + "category_id": 1, + "poly": [ + 330, + 1887, + 1370, + 1887, + 1370, + 1985, + 330, + 1985 + ], + "score": 0.924 + }, + { + "category_id": 1, + "poly": [ + 313, + 944, + 1371, + 944, + 1371, + 1009, + 313, + 1009 + ], + "score": 0.922 + }, + { + "category_id": 1, + "poly": [ + 324, + 789, + 1373, + 789, + 1373, + 853, + 324, + 853 + ], + "score": 0.909 + }, + { + "category_id": 1, + "poly": [ + 296, + 888, + 1372, + 888, + 1372, + 922, + 296, + 922 + ], + "score": 0.898 + }, + { + "category_id": 2, + "poly": [ + 841, + 2062, + 858, + 2062, + 858, + 2084, + 841, + 2084 + ], + "score": 0.765 + }, + { + "category_id": 13, + "poly": [ + 583, + 446, + 660, + 446, + 660, + 479, + 583, + 479 + ], + "score": 0.94, + "latex": "\\pi ( \\mathbf { a } | \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 894, + 1771, + 1069, + 1771, + 1069, + 1805, + 894, + 1805 + ], + "score": 0.92, + "latex": "\\dot { \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta ) / ( 1 - \\gamma )" + }, + { + "category_id": 13, + "poly": [ + 985, + 446, + 1075, + 446, + 1075, + 479, + 985, + 479 + ], + "score": 0.92, + "latex": "\\pi _ { \\beta } ( \\mathbf { a } | \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 760, + 1453, + 851, + 1453, + 851, + 1486, + 760, + 1486 + ], + "score": 0.92, + "latex": "{ \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta )" + }, + { + "category_id": 13, + "poly": [ + 1317, + 357, + 1393, + 357, + 1393, + 387, + 1317, + 387 + ], + "score": 0.9, + "latex": "\\mu ( \\mathbf { a } | \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 946, + 791, + 1142, + 791, + 1142, + 824, + 946, + 824 + ], + "score": 0.9, + "latex": "\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]" + }, + { + "category_id": 13, + "poly": [ + 1147, + 1454, + 1208, + 1454, + 1208, + 1486, + 1147, + 1486 + ], + "score": 0.9, + "latex": "{ \\mathcal { R } } ( \\theta )" + }, + { + "category_id": 13, + "poly": [ + 473, + 1741, + 572, + 1741, + 572, + 1775, + 473, + 1775 + ], + "score": 0.9, + "latex": "\\left( \\mathcal { L } _ { \\mathrm { T D } } ( \\theta ) \\right)" + }, + { + "category_id": 13, + "poly": [ + 396, + 1363, + 458, + 1363, + 458, + 1395, + 396, + 1395 + ], + "score": 0.9, + "latex": "{ \\mathcal { R } } ( \\theta )" + }, + { + "category_id": 13, + "poly": [ + 808, + 1742, + 932, + 1742, + 932, + 1774, + 808, + 1774 + ], + "score": 0.89, + "latex": "( 1 / ( 1 - \\gamma ) )" + }, + { + "category_id": 13, + "poly": [ + 1113, + 447, + 1186, + 447, + 1186, + 474, + 1113, + 474 + ], + "score": 0.89, + "latex": "\\mathbf { s } \\in \\mathcal { D }" + }, + { + "category_id": 13, + "poly": [ + 1160, + 1889, + 1251, + 1889, + 1251, + 1923, + 1160, + 1923 + ], + "score": 0.88, + "latex": "{ \\mathcal { L } } _ { \\mathrm { T D } } ( \\theta )" + }, + { + "category_id": 13, + "poly": [ + 1026, + 386, + 1099, + 386, + 1099, + 413, + 1026, + 413 + ], + "score": 0.88, + "latex": "\\mathbf { s } \\in \\mathcal { D }" + }, + { + "category_id": 13, + "poly": [ + 1311, + 324, + 1404, + 324, + 1404, + 356, + 1311, + 356 + ], + "score": 0.87, + "latex": "( \\mathbf { s } , \\mathbf { a } ) \\in" + }, + { + "category_id": 13, + "poly": [ + 298, + 357, + 323, + 357, + 323, + 382, + 298, + 382 + ], + "score": 0.79, + "latex": "\\mathcal { D }" + }, + { + "category_id": 13, + "poly": [ + 465, + 1919, + 526, + 1919, + 526, + 1953, + 465, + 1953 + ], + "score": 0.75, + "latex": "{ \\mathcal { R } } ( \\theta )" + }, + { + "category_id": 13, + "poly": [ + 1045, + 1891, + 1085, + 1891, + 1085, + 1918, + 1045, + 1918 + ], + "score": 0.54, + "latex": "T D" + }, + { + "category_id": 13, + "poly": [ + 849, + 793, + 872, + 793, + 872, + 822, + 849, + 822 + ], + "score": 0.52, + "latex": "Q" + }, + { + "category_id": 13, + "poly": [ + 725, + 978, + 748, + 978, + 748, + 1008, + 725, + 1008 + ], + "score": 0.37, + "latex": "Q" + }, + { + "category_id": 13, + "poly": [ + 616, + 1530, + 656, + 1530, + 656, + 1558, + 616, + 1558 + ], + "score": 0.36, + "latex": "\\mathbf { \\nabla } ^ { T D }" + }, + { + "category_id": 13, + "poly": [ + 1179, + 326, + 1204, + 326, + 1204, + 356, + 1179, + 356 + ], + "score": 0.34, + "latex": "\\mathrm { Q }" + }, + { + "category_id": 13, + "poly": [ + 564, + 644, + 589, + 644, + 589, + 674, + 564, + 674 + ], + "score": 0.29, + "latex": "\\mathrm { Q }" + }, + { + "category_id": 13, + "poly": [ + 582, + 1424, + 607, + 1424, + 607, + 1455, + 582, + 1455 + ], + "score": 0.28, + "latex": "\\mathrm { Q }" + }, + { + "category_id": 15, + "poly": [ + 1177.0, + 1200.0, + 1316.0, + 1200.0, + 1316.0, + 1239.0, + 1177.0, + 1239.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1122.0, + 1229.0, + 1166.0, + 1229.0, + 1166.0, + 1392.0, + 1122.0, + 1392.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1201.0, + 1233.0, + 1305.0, + 1233.0, + 1305.0, + 1261.0, + 1201.0, + 1261.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1197.0, + 1243.0, + 1209.0, + 1243.0, + 1209.0, + 1251.0, + 1197.0, + 1251.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1195.0, + 1254.0, + 1394.0, + 1254.0, + 1394.0, + 1279.0, + 1195.0, + 1279.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1151.0, + 1293.0, + 1159.0, + 1293.0, + 1159.0, + 1301.0, + 1151.0, + 1301.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1189.0, + 1272.0, + 1317.0, + 1272.0, + 1317.0, + 1300.0, + 1189.0, + 1300.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1260.0, + 1398.0, + 1384.0, + 1398.0, + 1384.0, + 1427.0, + 1260.0, + 1427.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1158.0, + 1303.5, + 1174.0, + 1303.5, + 1174.0, + 1314.5, + 1158.0, + 1314.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2059.0, + 862.0, + 2059.0, + 862.0, + 2091.0, + 838.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 202.0, + 1405.0, + 202.0, + 1405.0, + 238.0, + 295.0, + 238.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 231.0, + 1405.0, + 231.0, + 1405.0, + 270.0, + 292.0, + 270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 262.0, + 1403.0, + 262.0, + 1403.0, + 299.0, + 295.0, + 299.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 293.0, + 1405.0, + 293.0, + 1405.0, + 330.0, + 295.0, + 330.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 323.0, + 1178.0, + 323.0, + 1178.0, + 360.0, + 294.0, + 360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1205.0, + 323.0, + 1310.0, + 323.0, + 1310.0, + 360.0, + 1205.0, + 360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 351.0, + 297.0, + 351.0, + 297.0, + 390.0, + 292.0, + 390.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 351.0, + 1316.0, + 351.0, + 1316.0, + 390.0, + 324.0, + 390.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1394.0, + 351.0, + 1403.0, + 351.0, + 1403.0, + 390.0, + 1394.0, + 390.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 384.0, + 1025.0, + 384.0, + 1025.0, + 420.0, + 295.0, + 420.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1100.0, + 384.0, + 1405.0, + 384.0, + 1405.0, + 420.0, + 1100.0, + 420.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 412.0, + 1406.0, + 412.0, + 1406.0, + 450.0, + 295.0, + 450.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 444.0, + 582.0, + 444.0, + 582.0, + 481.0, + 294.0, + 481.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 661.0, + 444.0, + 984.0, + 444.0, + 984.0, + 481.0, + 661.0, + 481.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1076.0, + 444.0, + 1112.0, + 444.0, + 1112.0, + 481.0, + 1076.0, + 481.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1187.0, + 444.0, + 1197.0, + 444.0, + 1197.0, + 481.0, + 1187.0, + 481.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 487.0, + 1406.0, + 487.0, + 1406.0, + 530.0, + 294.0, + 530.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 521.0, + 1406.0, + 521.0, + 1406.0, + 559.0, + 292.0, + 559.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 553.0, + 1402.0, + 553.0, + 1402.0, + 586.0, + 296.0, + 586.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 582.0, + 1406.0, + 582.0, + 1406.0, + 618.0, + 295.0, + 618.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 613.0, + 1406.0, + 613.0, + 1406.0, + 650.0, + 296.0, + 650.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 642.0, + 563.0, + 642.0, + 563.0, + 678.0, + 295.0, + 678.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 590.0, + 642.0, + 1405.0, + 642.0, + 1405.0, + 678.0, + 590.0, + 678.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 673.0, + 1405.0, + 673.0, + 1405.0, + 706.0, + 295.0, + 706.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 702.0, + 1406.0, + 702.0, + 1406.0, + 742.0, + 292.0, + 742.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 736.0, + 1024.0, + 736.0, + 1024.0, + 768.0, + 295.0, + 768.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1528.0, + 615.0, + 1528.0, + 615.0, + 1566.0, + 294.0, + 1566.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 657.0, + 1528.0, + 1406.0, + 1528.0, + 1406.0, + 1566.0, + 657.0, + 1566.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1560.0, + 1406.0, + 1560.0, + 1406.0, + 1594.0, + 294.0, + 1594.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1588.0, + 1406.0, + 1588.0, + 1406.0, + 1628.0, + 292.0, + 1628.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1619.0, + 1408.0, + 1619.0, + 1408.0, + 1658.0, + 294.0, + 1658.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1646.0, + 1406.0, + 1646.0, + 1406.0, + 1689.0, + 292.0, + 1689.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1680.0, + 1405.0, + 1680.0, + 1405.0, + 1717.0, + 292.0, + 1717.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1712.0, + 1405.0, + 1712.0, + 1405.0, + 1747.0, + 295.0, + 1747.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1739.0, + 472.0, + 1739.0, + 472.0, + 1777.0, + 294.0, + 1777.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 573.0, + 1739.0, + 807.0, + 1739.0, + 807.0, + 1777.0, + 573.0, + 1777.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 933.0, + 1739.0, + 1406.0, + 1739.0, + 1406.0, + 1777.0, + 933.0, + 1777.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1770.0, + 893.0, + 1770.0, + 893.0, + 1809.0, + 294.0, + 1809.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1070.0, + 1770.0, + 1406.0, + 1770.0, + 1406.0, + 1809.0, + 1070.0, + 1809.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1803.0, + 1406.0, + 1803.0, + 1406.0, + 1836.0, + 292.0, + 1836.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1830.0, + 1348.0, + 1830.0, + 1348.0, + 1869.0, + 292.0, + 1869.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1043.0, + 1403.0, + 1043.0, + 1403.0, + 1078.0, + 295.0, + 1078.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1073.0, + 1404.0, + 1073.0, + 1404.0, + 1109.0, + 295.0, + 1109.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1105.0, + 1404.0, + 1105.0, + 1404.0, + 1142.0, + 295.0, + 1142.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1132.0, + 1406.0, + 1132.0, + 1406.0, + 1172.0, + 295.0, + 1172.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1164.0, + 991.0, + 1164.0, + 991.0, + 1202.0, + 293.0, + 1202.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1209.0, + 1102.0, + 1209.0, + 1102.0, + 1247.0, + 294.0, + 1247.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1240.0, + 1102.0, + 1240.0, + 1102.0, + 1275.0, + 295.0, + 1275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1270.0, + 1101.0, + 1270.0, + 1101.0, + 1305.0, + 295.0, + 1305.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1301.0, + 1103.0, + 1301.0, + 1103.0, + 1337.0, + 295.0, + 1337.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1330.0, + 1102.0, + 1330.0, + 1102.0, + 1369.0, + 293.0, + 1369.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1362.0, + 395.0, + 1362.0, + 395.0, + 1395.0, + 295.0, + 1395.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 459.0, + 1362.0, + 1102.0, + 1362.0, + 1102.0, + 1395.0, + 459.0, + 1395.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1393.0, + 1101.0, + 1393.0, + 1101.0, + 1426.0, + 295.0, + 1426.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1421.0, + 581.0, + 1421.0, + 581.0, + 1460.0, + 291.0, + 1460.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 608.0, + 1421.0, + 1400.0, + 1421.0, + 1400.0, + 1460.0, + 608.0, + 1460.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1453.0, + 759.0, + 1453.0, + 759.0, + 1489.0, + 295.0, + 1489.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 852.0, + 1453.0, + 1146.0, + 1453.0, + 1146.0, + 1489.0, + 852.0, + 1489.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1209.0, + 1453.0, + 1402.0, + 1453.0, + 1402.0, + 1489.0, + 1209.0, + 1489.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1485.0, + 1298.0, + 1485.0, + 1298.0, + 1518.0, + 297.0, + 1518.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 1884.0, + 1044.0, + 1884.0, + 1044.0, + 1926.0, + 325.0, + 1926.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1086.0, + 1884.0, + 1159.0, + 1884.0, + 1159.0, + 1926.0, + 1086.0, + 1926.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1252.0, + 1884.0, + 1374.0, + 1884.0, + 1374.0, + 1926.0, + 1252.0, + 1926.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 326.0, + 1916.0, + 464.0, + 1916.0, + 464.0, + 1957.0, + 326.0, + 1957.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 527.0, + 1916.0, + 1369.0, + 1916.0, + 1369.0, + 1957.0, + 527.0, + 1957.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 325.0, + 1946.0, + 1373.0, + 1946.0, + 1373.0, + 1990.0, + 325.0, + 1990.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 939.0, + 1375.0, + 939.0, + 1375.0, + 983.0, + 327.0, + 983.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 327.0, + 971.0, + 724.0, + 971.0, + 724.0, + 1011.0, + 327.0, + 1011.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 749.0, + 971.0, + 1198.0, + 971.0, + 1198.0, + 1011.0, + 749.0, + 1011.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 783.0, + 848.0, + 783.0, + 848.0, + 829.0, + 321.0, + 829.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 873.0, + 783.0, + 945.0, + 783.0, + 945.0, + 829.0, + 873.0, + 829.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1143.0, + 783.0, + 1375.0, + 783.0, + 1375.0, + 829.0, + 1143.0, + 829.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 816.0, + 1305.0, + 816.0, + 1305.0, + 859.0, + 324.0, + 859.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 884.0, + 1378.0, + 884.0, + 1378.0, + 929.0, + 293.0, + 929.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 3, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 923, + 1405, + 923, + 1405, + 1320, + 297, + 1320 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 297, + 405, + 1405, + 405, + 1405, + 747, + 297, + 747 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 1863, + 1403, + 1863, + 1403, + 2016, + 298, + 2016 + ], + "score": 0.973 + }, + { + "category_id": 3, + "poly": [ + 957, + 1580, + 1401, + 1580, + 1401, + 1783, + 957, + 1783 + ], + "score": 0.972 + }, + { + "category_id": 1, + "poly": [ + 298, + 240, + 1404, + 240, + 1404, + 392, + 298, + 392 + ], + "score": 0.97 + }, + { + "category_id": 1, + "poly": [ + 299, + 1577, + 934, + 1577, + 934, + 1836, + 299, + 1836 + ], + "score": 0.963 + }, + { + "category_id": 1, + "poly": [ + 325, + 1410, + 1373, + 1410, + 1373, + 1505, + 325, + 1505 + ], + "score": 0.929 + }, + { + "category_id": 0, + "poly": [ + 297, + 1533, + 1277, + 1533, + 1277, + 1571, + 297, + 1571 + ], + "score": 0.924 + }, + { + "category_id": 0, + "poly": [ + 296, + 198, + 1308, + 198, + 1308, + 237, + 296, + 237 + ], + "score": 0.912 + }, + { + "category_id": 1, + "poly": [ + 322, + 823, + 1372, + 823, + 1372, + 888, + 322, + 888 + ], + "score": 0.911 + }, + { + "category_id": 9, + "poly": [ + 1366, + 1346, + 1400, + 1346, + 1400, + 1376, + 1366, + 1376 + ], + "score": 0.857 + }, + { + "category_id": 8, + "poly": [ + 367, + 1339, + 858, + 1339, + 858, + 1387, + 367, + 1387 + ], + "score": 0.843 + }, + { + "category_id": 8, + "poly": [ + 346, + 752, + 1418, + 752, + 1418, + 797, + 346, + 797 + ], + "score": 0.799 + }, + { + "category_id": 9, + "poly": [ + 1387, + 760, + 1421, + 760, + 1421, + 790, + 1387, + 790 + ], + "score": 0.768 + }, + { + "category_id": 1, + "poly": [ + 305, + 1820, + 1399, + 1820, + 1399, + 1849, + 305, + 1849 + ], + "score": 0.751 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 858, + 2061, + 858, + 2085, + 841, + 2085 + ], + "score": 0.706 + }, + { + "category_id": 8, + "poly": [ + 358, + 1337, + 1188, + 1337, + 1188, + 1385, + 358, + 1385 + ], + "score": 0.562 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 859, + 2061, + 859, + 2085, + 841, + 2085 + ], + "score": 0.128 + }, + { + "category_id": 1, + "poly": [ + 346, + 752, + 1418, + 752, + 1418, + 797, + 346, + 797 + ], + "score": 0.117 + }, + { + "category_id": 13, + "poly": [ + 866, + 652, + 1014, + 652, + 1014, + 686, + 866, + 686 + ], + "score": 0.93, + "latex": "\\phi _ { \\Sigma } ( \\mathbf { s } ) \\in \\mathbb { R } ^ { d }" + }, + { + "category_id": 13, + "poly": [ + 1030, + 1256, + 1183, + 1256, + 1183, + 1290, + 1030, + 1290 + ], + "score": 0.93, + "latex": "( \\mathbf { s } , \\mathbf { a } , \\mathbf { s } ^ { \\prime } ) \\in \\mathcal { D }" + }, + { + "category_id": 13, + "poly": [ + 457, + 466, + 533, + 466, + 533, + 499, + 457, + 499 + ], + "score": 0.93, + "latex": "\\mu ( \\mathbf { a } | \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 656, + 651, + 809, + 651, + 809, + 687, + 656, + 687 + ], + "score": 0.92, + "latex": "\\phi _ { m } ( \\mathbf { s } ) \\in \\mathbb { R } ^ { d }" + }, + { + "category_id": 13, + "poly": [ + 793, + 1257, + 853, + 1257, + 853, + 1290, + 793, + 1290 + ], + "score": 0.92, + "latex": "\\phi ( \\mathbf { s } ^ { \\prime } )" + }, + { + "category_id": 13, + "poly": [ + 841, + 619, + 964, + 619, + 964, + 653, + 841, + 653 + ], + "score": 0.92, + "latex": "\\phi ( \\mathbf { s } ) \\in \\mathbb { R } ^ { d }" + }, + { + "category_id": 13, + "poly": [ + 1065, + 716, + 1153, + 716, + 1153, + 748, + 1065, + 748 + ], + "score": 0.91, + "latex": "\\mathcal { N } ( 0 , \\mathbb { I } )" + }, + { + "category_id": 13, + "poly": [ + 688, + 1257, + 741, + 1257, + 741, + 1290, + 688, + 1290 + ], + "score": 0.91, + "latex": "\\phi ( \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 945, + 587, + 998, + 587, + 998, + 620, + 945, + 620 + ], + "score": 0.9, + "latex": "\\phi ( \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 803, + 686, + 839, + 686, + 839, + 717, + 803, + 717 + ], + "score": 0.89, + "latex": "\\phi _ { \\Sigma }" + }, + { + "category_id": 13, + "poly": [ + 1167, + 588, + 1230, + 588, + 1230, + 621, + 1167, + 621 + ], + "score": 0.89, + "latex": "( \\mathbf { s } , \\mathbf { a } )" + }, + { + "category_id": 13, + "poly": [ + 1167, + 653, + 1220, + 653, + 1220, + 686, + 1167, + 686 + ], + "score": 0.88, + "latex": "\\phi ( \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 566, + 686, + 607, + 686, + 607, + 715, + 566, + 715 + ], + "score": 0.88, + "latex": "\\phi _ { m }" + }, + { + "category_id": 13, + "poly": [ + 907, + 684, + 1268, + 684, + 1268, + 717, + 907, + 717 + ], + "score": 0.87, + "latex": "\\phi ( \\mathbf { s } ) \\sim \\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } ) , \\mathrm { d i a g } ( \\phi _ { \\Sigma } ( \\mathbf { s } ) )" + }, + { + "category_id": 14, + "poly": [ + 359, + 1336, + 1190, + 1336, + 1190, + 1391, + 359, + 1391 + ], + "score": 0.8, + "latex": "\\operatorname* { m i n } _ { \\theta } \\ \\mathcal { L } _ { \\mathrm { C Q L } } ( \\theta ) + \\beta \\mathbb { E } _ { { \\mathbf s } , { \\mathbf a } , { \\mathbf s } ^ { \\prime } \\sim \\mathcal { D } } \\left[ \\left| \\phi ( { \\mathbf s } ) ^ { \\top } \\phi ( { \\mathbf s } ^ { \\prime } ) \\right| \\right] \\qquad ( { \\mathrm { D R 3 ~ r e g u l a r i z e r ~ } } [ 2 2 ] ) ," + }, + { + "category_id": 14, + "poly": [ + 357, + 752, + 1367, + 752, + 1367, + 798, + 357, + 798 + ], + "score": 0.77, + "latex": "\\operatorname* { m i n } _ { \\theta } \\ \\mathcal { L } _ { \\mathrm { C Q L } } ( \\theta ) + \\beta \\mathbb { E } _ { \\mathrm { s } \\sim \\mathcal { D } } \\left[ \\mathrm { D } _ { \\mathrm { K L } } \\left( \\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } ) , \\mathrm { d i a g } ( \\phi _ { \\Sigma } ( \\mathbf { s } ) ) ) \\ | | \\mathcal { N } ( 0 , \\mathbb { I } ) \\right) \\right] \\quad ( \\mathrm { V I B ~ r e g u l a r i z e r } ) ," + }, + { + "category_id": 13, + "poly": [ + 425, + 714, + 686, + 714, + 686, + 748, + 425, + 748 + ], + "score": 0.62, + "latex": "\\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } ) , \\mathrm { d i a g } ( \\phi _ { \\Sigma } ( \\mathbf { s } ) )" + }, + { + "category_id": 13, + "poly": [ + 426, + 714, + 537, + 714, + 537, + 748, + 426, + 748 + ], + "score": 0.44, + "latex": "\\mathcal { N } ( \\phi _ { m } ( \\mathbf { s } )" + }, + { + "category_id": 13, + "poly": [ + 604, + 715, + 683, + 715, + 683, + 748, + 604, + 748 + ], + "score": 0.33, + "latex": "\\left( \\phi _ { \\Sigma } ( \\mathbf { s } ) \\right)" + }, + { + "category_id": 13, + "poly": [ + 548, + 437, + 572, + 437, + 572, + 466, + 548, + 466 + ], + "score": 0.3, + "latex": "\\mathbf { Q }" + }, + { + "category_id": 13, + "poly": [ + 1175, + 437, + 1201, + 437, + 1201, + 467, + 1175, + 467 + ], + "score": 0.28, + "latex": "\\mathrm { Q }" + }, + { + "category_id": 13, + "poly": [ + 569, + 857, + 592, + 857, + 592, + 887, + 569, + 887 + ], + "score": 0.26, + "latex": "Q" + }, + { + "category_id": 15, + "poly": [ + 1005.0, + 1751.0, + 1013.0, + 1751.0, + 1013.0, + 1758.0, + 1005.0, + 1758.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1531.0, + 1279.0, + 1531.0, + 1279.0, + 1577.0, + 291.0, + 1577.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 196.0, + 1315.0, + 196.0, + 1315.0, + 241.0, + 290.0, + 241.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2058.0, + 861.0, + 2058.0, + 861.0, + 2093.0, + 839.0, + 2093.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2058.0, + 862.0, + 2058.0, + 862.0, + 2092.0, + 838.0, + 2092.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 922.0, + 1406.0, + 922.0, + 1406.0, + 960.0, + 296.0, + 960.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 951.0, + 1403.0, + 951.0, + 1403.0, + 991.0, + 294.0, + 991.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 984.0, + 1405.0, + 984.0, + 1405.0, + 1021.0, + 295.0, + 1021.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1014.0, + 1403.0, + 1014.0, + 1403.0, + 1050.0, + 295.0, + 1050.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1041.0, + 1406.0, + 1041.0, + 1406.0, + 1083.0, + 292.0, + 1083.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1075.0, + 1406.0, + 1075.0, + 1406.0, + 1110.0, + 292.0, + 1110.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1106.0, + 1405.0, + 1106.0, + 1405.0, + 1140.0, + 291.0, + 1140.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1134.0, + 1405.0, + 1134.0, + 1405.0, + 1169.0, + 292.0, + 1169.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1168.0, + 1405.0, + 1168.0, + 1405.0, + 1200.0, + 296.0, + 1200.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1197.0, + 1403.0, + 1197.0, + 1403.0, + 1233.0, + 295.0, + 1233.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1226.0, + 1403.0, + 1226.0, + 1403.0, + 1263.0, + 295.0, + 1263.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1256.0, + 687.0, + 1256.0, + 687.0, + 1292.0, + 295.0, + 1292.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 742.0, + 1256.0, + 792.0, + 1256.0, + 792.0, + 1292.0, + 742.0, + 1292.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 854.0, + 1256.0, + 1029.0, + 1256.0, + 1029.0, + 1292.0, + 854.0, + 1292.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1184.0, + 1256.0, + 1405.0, + 1256.0, + 1405.0, + 1292.0, + 1184.0, + 1292.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1286.0, + 696.0, + 1286.0, + 696.0, + 1325.0, + 294.0, + 1325.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 406.0, + 1403.0, + 406.0, + 1403.0, + 438.0, + 297.0, + 438.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 435.0, + 547.0, + 435.0, + 547.0, + 471.0, + 295.0, + 471.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 573.0, + 435.0, + 1174.0, + 435.0, + 1174.0, + 471.0, + 573.0, + 471.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1202.0, + 435.0, + 1405.0, + 435.0, + 1405.0, + 471.0, + 1202.0, + 471.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 464.0, + 456.0, + 464.0, + 456.0, + 503.0, + 294.0, + 503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 534.0, + 464.0, + 1406.0, + 464.0, + 1406.0, + 503.0, + 534.0, + 503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 494.0, + 1405.0, + 494.0, + 1405.0, + 535.0, + 292.0, + 535.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 526.0, + 1405.0, + 526.0, + 1405.0, + 560.0, + 294.0, + 560.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 556.0, + 1406.0, + 556.0, + 1406.0, + 595.0, + 292.0, + 595.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 587.0, + 944.0, + 587.0, + 944.0, + 622.0, + 295.0, + 622.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 999.0, + 587.0, + 1166.0, + 587.0, + 1166.0, + 622.0, + 999.0, + 622.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1231.0, + 587.0, + 1405.0, + 587.0, + 1405.0, + 622.0, + 1231.0, + 622.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 620.0, + 840.0, + 620.0, + 840.0, + 655.0, + 295.0, + 655.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 965.0, + 620.0, + 1405.0, + 620.0, + 1405.0, + 655.0, + 965.0, + 655.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 649.0, + 655.0, + 649.0, + 655.0, + 689.0, + 291.0, + 689.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 810.0, + 649.0, + 865.0, + 649.0, + 865.0, + 689.0, + 810.0, + 689.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1015.0, + 649.0, + 1166.0, + 649.0, + 1166.0, + 689.0, + 1015.0, + 689.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1221.0, + 649.0, + 1407.0, + 649.0, + 1407.0, + 689.0, + 1221.0, + 689.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 683.0, + 565.0, + 683.0, + 565.0, + 722.0, + 292.0, + 722.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 608.0, + 683.0, + 802.0, + 683.0, + 802.0, + 722.0, + 608.0, + 722.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 683.0, + 906.0, + 683.0, + 906.0, + 722.0, + 840.0, + 722.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1269.0, + 683.0, + 1406.0, + 683.0, + 1406.0, + 722.0, + 1269.0, + 722.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 713.0, + 424.0, + 713.0, + 424.0, + 752.0, + 294.0, + 752.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 687.0, + 713.0, + 1064.0, + 713.0, + 1064.0, + 752.0, + 687.0, + 752.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1154.0, + 713.0, + 1167.0, + 713.0, + 1167.0, + 752.0, + 1154.0, + 752.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1862.0, + 1404.0, + 1862.0, + 1404.0, + 1897.0, + 293.0, + 1897.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1892.0, + 1405.0, + 1892.0, + 1405.0, + 1929.0, + 294.0, + 1929.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1924.0, + 1407.0, + 1924.0, + 1407.0, + 1958.0, + 293.0, + 1958.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1953.0, + 1407.0, + 1953.0, + 1407.0, + 1990.0, + 294.0, + 1990.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1982.0, + 1405.0, + 1982.0, + 1405.0, + 2020.0, + 291.0, + 2020.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 237.0, + 1405.0, + 237.0, + 1405.0, + 274.0, + 296.0, + 274.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 266.0, + 1405.0, + 266.0, + 1405.0, + 308.0, + 292.0, + 308.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 299.0, + 1406.0, + 299.0, + 1406.0, + 334.0, + 293.0, + 334.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 328.0, + 1406.0, + 328.0, + 1406.0, + 366.0, + 293.0, + 366.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 359.0, + 1087.0, + 359.0, + 1087.0, + 397.0, + 295.0, + 397.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1575.0, + 935.0, + 1575.0, + 935.0, + 1609.0, + 295.0, + 1609.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1603.0, + 933.0, + 1603.0, + 933.0, + 1639.0, + 296.0, + 1639.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1635.0, + 935.0, + 1635.0, + 935.0, + 1668.0, + 295.0, + 1668.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1668.0, + 936.0, + 1668.0, + 936.0, + 1697.0, + 296.0, + 1697.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1697.0, + 937.0, + 1697.0, + 937.0, + 1728.0, + 295.0, + 1728.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1729.0, + 936.0, + 1729.0, + 936.0, + 1761.0, + 296.0, + 1761.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1758.0, + 936.0, + 1758.0, + 936.0, + 1789.0, + 294.0, + 1789.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1788.0, + 936.0, + 1788.0, + 936.0, + 1819.0, + 295.0, + 1819.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1817.0, + 938.0, + 1817.0, + 938.0, + 1843.0, + 294.0, + 1843.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 1404.0, + 1373.0, + 1404.0, + 1373.0, + 1449.0, + 324.0, + 1449.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 326.0, + 1437.0, + 1370.0, + 1437.0, + 1370.0, + 1478.0, + 326.0, + 1478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 1467.0, + 1203.0, + 1467.0, + 1203.0, + 1510.0, + 323.0, + 1510.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 324.0, + 817.0, + 1374.0, + 817.0, + 1374.0, + 865.0, + 324.0, + 865.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 323.0, + 849.0, + 568.0, + 849.0, + 568.0, + 890.0, + 323.0, + 890.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 593.0, + 849.0, + 1374.0, + 849.0, + 1374.0, + 890.0, + 593.0, + 890.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1812.0, + 1267.0, + 1812.0, + 1267.0, + 1856.0, + 296.0, + 1856.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 746.0, + 356.0, + 746.0, + 356.0, + 806.0, + 353.0, + 806.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1368.0, + 746.0, + 1434.0, + 746.0, + 1434.0, + 806.0, + 1368.0, + 806.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 4, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 201, + 934, + 201, + 934, + 810, + 298, + 810 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 298, + 949, + 879, + 949, + 879, + 1461, + 298, + 1461 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 297, + 1733, + 1403, + 1733, + 1403, + 1978, + 297, + 1978 + ], + "score": 0.976 + }, + { + "category_id": 4, + "poly": [ + 901, + 1389, + 1402, + 1389, + 1402, + 1557, + 901, + 1557 + ], + "score": 0.967 + }, + { + "category_id": 1, + "poly": [ + 296, + 824, + 1403, + 824, + 1403, + 947, + 296, + 947 + ], + "score": 0.967 + }, + { + "category_id": 3, + "poly": [ + 978, + 214, + 1381, + 214, + 1381, + 593, + 978, + 593 + ], + "score": 0.966 + }, + { + "category_id": 4, + "poly": [ + 957, + 603, + 1402, + 603, + 1402, + 798, + 957, + 798 + ], + "score": 0.964 + }, + { + "category_id": 3, + "poly": [ + 913, + 956, + 1387, + 956, + 1387, + 1381, + 913, + 1381 + ], + "score": 0.964 + }, + { + "category_id": 1, + "poly": [ + 297, + 1566, + 1403, + 1566, + 1403, + 1720, + 297, + 1720 + ], + "score": 0.963 + }, + { + "category_id": 1, + "poly": [ + 297, + 1477, + 879, + 1477, + 879, + 1566, + 297, + 1566 + ], + "score": 0.903 + }, + { + "category_id": 2, + "poly": [ + 840, + 2062, + 858, + 2062, + 858, + 2085, + 840, + 2085 + ], + "score": 0.783 + }, + { + "category_id": 13, + "poly": [ + 717, + 915, + 914, + 915, + 914, + 949, + 717, + 949 + ], + "score": 0.93, + "latex": "\\mathbb { E } _ { \\mathbf { s } , \\mathbf { a } \\sim \\mathcal { D } } [ Q _ { \\theta } ( \\mathbf { s } , \\mathbf { a } ) ]" + }, + { + "category_id": 13, + "poly": [ + 738, + 629, + 833, + 629, + 833, + 656, + 738, + 656 + ], + "score": 0.89, + "latex": "\\alpha = 1 . 0" + }, + { + "category_id": 13, + "poly": [ + 705, + 567, + 797, + 567, + 797, + 596, + 705, + 596 + ], + "score": 0.88, + "latex": "3 0 { - } 4 0 \\%" + }, + { + "category_id": 13, + "poly": [ + 452, + 325, + 508, + 325, + 508, + 354, + 452, + 354 + ], + "score": 0.87, + "latex": "40 \\%" + }, + { + "category_id": 13, + "poly": [ + 1326, + 1658, + 1354, + 1658, + 1354, + 1689, + 1326, + 1689 + ], + "score": 0.87, + "latex": "\\ell _ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 378, + 294, + 434, + 294, + 434, + 324, + 378, + 324 + ], + "score": 0.87, + "latex": "3 5 \\%" + }, + { + "category_id": 13, + "poly": [ + 408, + 628, + 463, + 628, + 463, + 657, + 408, + 657 + ], + "score": 0.86, + "latex": "40 \\%" + }, + { + "category_id": 13, + "poly": [ + 297, + 1689, + 325, + 1689, + 325, + 1720, + 297, + 1720 + ], + "score": 0.85, + "latex": "\\ell _ { 2 }" + }, + { + "category_id": 13, + "poly": [ + 602, + 205, + 636, + 205, + 636, + 232, + 602, + 232 + ], + "score": 0.83, + "latex": "+ 1" + }, + { + "category_id": 13, + "poly": [ + 639, + 538, + 672, + 538, + 672, + 565, + 639, + 565 + ], + "score": 0.81, + "latex": "+ 1" + }, + { + "category_id": 13, + "poly": [ + 400, + 723, + 424, + 723, + 424, + 747, + 400, + 747 + ], + "score": 0.78, + "latex": "\\alpha" + }, + { + "category_id": 13, + "poly": [ + 685, + 754, + 707, + 754, + 707, + 776, + 685, + 776 + ], + "score": 0.75, + "latex": "\\alpha" + }, + { + "category_id": 13, + "poly": [ + 1133, + 715, + 1156, + 715, + 1156, + 743, + 1133, + 743 + ], + "score": 0.31, + "latex": "\\mathrm { Q }" + }, + { + "category_id": 13, + "poly": [ + 1308, + 1474, + 1330, + 1474, + 1330, + 1501, + 1308, + 1501 + ], + "score": 0.25, + "latex": "\\mathrm { Q }" + }, + { + "category_id": 15, + "poly": [ + 900.0, + 1386.0, + 1404.0, + 1386.0, + 1404.0, + 1423.0, + 900.0, + 1423.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 899.0, + 1416.0, + 1404.0, + 1416.0, + 1404.0, + 1448.0, + 899.0, + 1448.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 899.0, + 1445.0, + 1405.0, + 1445.0, + 1405.0, + 1476.0, + 899.0, + 1476.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 899.0, + 1475.0, + 1307.0, + 1475.0, + 1307.0, + 1502.0, + 899.0, + 1502.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1331.0, + 1475.0, + 1403.0, + 1475.0, + 1403.0, + 1502.0, + 1331.0, + 1502.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 899.0, + 1498.0, + 1404.0, + 1498.0, + 1404.0, + 1531.0, + 899.0, + 1531.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 901.0, + 1526.0, + 1157.0, + 1526.0, + 1157.0, + 1562.0, + 901.0, + 1562.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1026.0, + 215.0, + 1045.0, + 215.0, + 1045.0, + 231.0, + 1026.0, + 231.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1097.0, + 216.0, + 1121.0, + 216.0, + 1121.0, + 230.0, + 1097.0, + 230.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1174.0, + 216.0, + 1197.0, + 216.0, + 1197.0, + 230.0, + 1174.0, + 230.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1246.0, + 216.0, + 1343.0, + 216.0, + 1343.0, + 232.0, + 1246.0, + 232.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1012.0, + 230.0, + 1198.0, + 230.0, + 1198.0, + 249.0, + 1012.0, + 249.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1253.0, + 231.0, + 1340.0, + 231.0, + 1340.0, + 250.0, + 1253.0, + 250.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1196.0, + 250.0, + 1214.0, + 250.0, + 1214.0, + 266.0, + 1196.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1009.0, + 258.0, + 1026.0, + 258.0, + 1026.0, + 274.0, + 1009.0, + 274.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1196.0, + 273.0, + 1214.0, + 273.0, + 1214.0, + 289.0, + 1196.0, + 289.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1010.0, + 279.0, + 1025.0, + 279.0, + 1025.0, + 296.0, + 1010.0, + 296.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 994.0, + 283.0, + 1013.0, + 283.0, + 1013.0, + 361.0, + 994.0, + 361.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1010.0, + 296.0, + 1025.0, + 296.0, + 1025.0, + 316.0, + 1010.0, + 316.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1198.0, + 298.0, + 1212.0, + 298.0, + 1212.0, + 311.0, + 1198.0, + 311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1010.0, + 321.0, + 1025.0, + 321.0, + 1025.0, + 340.0, + 1010.0, + 340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1197.0, + 318.0, + 1214.0, + 318.0, + 1214.0, + 334.0, + 1197.0, + 334.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1010.0, + 340.0, + 1025.0, + 340.0, + 1025.0, + 358.0, + 1010.0, + 358.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1198.0, + 343.0, + 1212.0, + 343.0, + 1212.0, + 356.0, + 1198.0, + 356.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1014.0, + 366.0, + 1025.0, + 366.0, + 1025.0, + 379.0, + 1014.0, + 379.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1197.0, + 365.0, + 1214.0, + 365.0, + 1214.0, + 380.0, + 1197.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1015.0, + 390.0, + 1024.0, + 390.0, + 1024.0, + 399.0, + 1015.0, + 399.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1203.0, + 390.0, + 1213.0, + 390.0, + 1213.0, + 398.0, + 1203.0, + 398.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 976.0, + 399.0, + 1001.0, + 399.0, + 1001.0, + 570.0, + 976.0, + 570.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1008.0, + 405.0, + 1025.0, + 405.0, + 1025.0, + 421.0, + 1008.0, + 421.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1203.0, + 413.0, + 1213.0, + 413.0, + 1213.0, + 421.0, + 1203.0, + 421.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1012.0, + 429.0, + 1025.0, + 429.0, + 1025.0, + 442.0, + 1012.0, + 442.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1219.0, + 438.0, + 1228.0, + 438.0, + 1228.0, + 446.0, + 1219.0, + 446.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1006.0, + 449.0, + 1026.0, + 449.0, + 1026.0, + 468.0, + 1006.0, + 468.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1188.0, + 445.0, + 1217.0, + 445.0, + 1217.0, + 463.0, + 1188.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1002.0, + 472.0, + 1025.0, + 472.0, + 1025.0, + 487.0, + 1002.0, + 487.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1189.0, + 481.0, + 1217.0, + 481.0, + 1217.0, + 499.0, + 1189.0, + 499.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1002.0, + 495.0, + 1026.0, + 495.0, + 1026.0, + 510.0, + 1002.0, + 510.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1002.0, + 517.0, + 1026.0, + 517.0, + 1026.0, + 532.0, + 1002.0, + 532.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1001.0, + 540.0, + 1025.0, + 540.0, + 1025.0, + 555.0, + 1001.0, + 555.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1188.0, + 556.0, + 1220.0, + 556.0, + 1220.0, + 573.0, + 1188.0, + 573.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1011.0, + 568.0, + 1042.0, + 568.0, + 1042.0, + 585.0, + 1011.0, + 585.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1056.0, + 568.0, + 1087.0, + 568.0, + 1087.0, + 585.0, + 1056.0, + 585.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1101.0, + 569.0, + 1131.0, + 569.0, + 1131.0, + 583.0, + 1101.0, + 583.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1147.0, + 569.0, + 1176.0, + 569.0, + 1176.0, + 583.0, + 1147.0, + 583.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1199.0, + 567.0, + 1231.0, + 567.0, + 1231.0, + 584.0, + 1199.0, + 584.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1245.0, + 566.0, + 1278.0, + 566.0, + 1278.0, + 583.0, + 1245.0, + 583.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1292.0, + 566.0, + 1324.0, + 566.0, + 1324.0, + 583.0, + 1292.0, + 583.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1340.0, + 567.0, + 1370.0, + 567.0, + 1370.0, + 584.0, + 1340.0, + 584.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1068.0, + 581.0, + 1142.0, + 581.0, + 1142.0, + 597.0, + 1068.0, + 597.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1259.0, + 578.0, + 1332.0, + 578.0, + 1332.0, + 596.0, + 1259.0, + 596.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1189.25, + 518.0, + 1214.25, + 518.0, + 1214.25, + 536.0, + 1189.25, + 536.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 955.0, + 601.0, + 1405.0, + 601.0, + 1405.0, + 636.0, + 955.0, + 636.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 954.0, + 631.0, + 1403.0, + 631.0, + 1403.0, + 662.0, + 954.0, + 662.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 955.0, + 657.0, + 1403.0, + 657.0, + 1403.0, + 690.0, + 955.0, + 690.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 957.0, + 687.0, + 1403.0, + 687.0, + 1403.0, + 716.0, + 957.0, + 716.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 954.0, + 715.0, + 1132.0, + 715.0, + 1132.0, + 744.0, + 954.0, + 744.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1157.0, + 715.0, + 1405.0, + 715.0, + 1405.0, + 744.0, + 1157.0, + 744.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 955.0, + 742.0, + 1403.0, + 742.0, + 1403.0, + 774.0, + 955.0, + 774.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 954.0, + 770.0, + 1218.0, + 770.0, + 1218.0, + 802.0, + 954.0, + 802.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 964.0, + 964.0, + 1085.0, + 964.0, + 1085.0, + 984.0, + 964.0, + 984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1171.0, + 964.0, + 1380.0, + 964.0, + 1380.0, + 984.0, + 1171.0, + 984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 993.0, + 987.0, + 1084.0, + 987.0, + 1084.0, + 1006.0, + 993.0, + 1006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1255.0, + 996.0, + 1343.0, + 996.0, + 1343.0, + 1016.0, + 1255.0, + 1016.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1075.0, + 1009.0, + 1096.0, + 1009.0, + 1096.0, + 1025.0, + 1075.0, + 1025.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 941.0, + 1020.0, + 963.0, + 1020.0, + 963.0, + 1035.0, + 941.0, + 1035.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1197.0, + 1023.0, + 1214.0, + 1023.0, + 1214.0, + 1038.0, + 1197.0, + 1038.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1076.0, + 1024.0, + 1100.0, + 1024.0, + 1100.0, + 1039.0, + 1076.0, + 1039.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 918.0, + 1029.0, + 940.0, + 1029.0, + 940.0, + 1135.0, + 918.0, + 1135.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1076.0, + 1036.0, + 1100.0, + 1036.0, + 1100.0, + 1052.0, + 1076.0, + 1052.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 941.0, + 1042.0, + 962.0, + 1042.0, + 962.0, + 1058.0, + 941.0, + 1058.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1075.0, + 1050.0, + 1110.0, + 1050.0, + 1110.0, + 1067.0, + 1075.0, + 1067.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1184.0, + 1052.0, + 1203.0, + 1052.0, + 1203.0, + 1134.0, + 1184.0, + 1134.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 941.0, + 1066.0, + 963.0, + 1066.0, + 963.0, + 1081.0, + 941.0, + 1081.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1198.0, + 1070.0, + 1214.0, + 1070.0, + 1214.0, + 1090.0, + 1198.0, + 1090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 939.0, + 1087.0, + 962.0, + 1087.0, + 962.0, + 1103.0, + 939.0, + 1103.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1198.0, + 1099.0, + 1215.0, + 1099.0, + 1215.0, + 1115.0, + 1198.0, + 1115.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 938.0, + 1111.0, + 962.0, + 1111.0, + 962.0, + 1125.0, + 938.0, + 1125.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1202.0, + 1125.0, + 1214.0, + 1125.0, + 1214.0, + 1139.0, + 1202.0, + 1139.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 938.0, + 1134.0, + 962.0, + 1134.0, + 962.0, + 1149.0, + 938.0, + 1149.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1202.0, + 1152.0, + 1215.0, + 1152.0, + 1215.0, + 1165.0, + 1202.0, + 1165.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 948.0, + 1160.0, + 980.0, + 1160.0, + 980.0, + 1175.0, + 948.0, + 1175.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 985.0, + 1160.0, + 1016.0, + 1160.0, + 1016.0, + 1175.0, + 985.0, + 1175.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1022.0, + 1160.0, + 1053.0, + 1160.0, + 1053.0, + 1175.0, + 1022.0, + 1175.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1060.0, + 1160.0, + 1091.0, + 1160.0, + 1091.0, + 1175.0, + 1060.0, + 1175.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1099.0, + 1160.0, + 1128.0, + 1160.0, + 1128.0, + 1175.0, + 1099.0, + 1175.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1157.0, + 1164.0, + 1179.0, + 1164.0, + 1179.0, + 1343.0, + 1157.0, + 1343.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1201.0, + 1170.0, + 1213.0, + 1170.0, + 1213.0, + 1184.0, + 1201.0, + 1184.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 942.0, + 1179.0, + 1133.0, + 1179.0, + 1133.0, + 1199.0, + 942.0, + 1199.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1077.0, + 1202.0, + 1097.0, + 1202.0, + 1097.0, + 1219.0, + 1077.0, + 1219.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1184.0, + 1198.0, + 1215.0, + 1198.0, + 1215.0, + 1217.0, + 1184.0, + 1217.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 939.0, + 1215.0, + 962.0, + 1215.0, + 962.0, + 1230.0, + 939.0, + 1230.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1077.0, + 1217.0, + 1101.0, + 1217.0, + 1101.0, + 1232.0, + 1077.0, + 1232.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 919.0, + 1224.0, + 938.0, + 1224.0, + 938.0, + 1326.0, + 919.0, + 1326.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1076.0, + 1228.0, + 1102.0, + 1228.0, + 1102.0, + 1246.0, + 1076.0, + 1246.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1184.0, + 1231.0, + 1214.0, + 1231.0, + 1214.0, + 1245.0, + 1184.0, + 1245.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 939.0, + 1240.0, + 963.0, + 1240.0, + 963.0, + 1259.0, + 939.0, + 1259.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1077.0, + 1244.0, + 1111.0, + 1244.0, + 1111.0, + 1259.0, + 1077.0, + 1259.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1190.0, + 1262.0, + 1213.0, + 1262.0, + 1213.0, + 1277.0, + 1190.0, + 1277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 940.0, + 1268.0, + 962.0, + 1268.0, + 962.0, + 1283.0, + 940.0, + 1283.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1220.0, + 1269.0, + 1228.0, + 1269.0, + 1228.0, + 1282.0, + 1220.0, + 1282.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1325.0, + 1276.0, + 1359.0, + 1276.0, + 1359.0, + 1298.0, + 1325.0, + 1298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 939.0, + 1295.0, + 961.0, + 1295.0, + 961.0, + 1310.0, + 939.0, + 1310.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1183.0, + 1293.0, + 1213.0, + 1293.0, + 1213.0, + 1307.0, + 1183.0, + 1307.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1312.0, + 1299.0, + 1379.0, + 1299.0, + 1379.0, + 1318.0, + 1312.0, + 1318.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 935.0, + 1321.0, + 961.0, + 1321.0, + 961.0, + 1336.0, + 935.0, + 1336.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1177.0, + 1322.0, + 1214.0, + 1322.0, + 1214.0, + 1340.0, + 1177.0, + 1340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1198.0, + 1343.0, + 1232.0, + 1343.0, + 1232.0, + 1360.0, + 1198.0, + 1360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1244.0, + 1343.0, + 1279.0, + 1343.0, + 1279.0, + 1359.0, + 1244.0, + 1359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1293.0, + 1343.0, + 1327.0, + 1343.0, + 1327.0, + 1360.0, + 1293.0, + 1360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1341.0, + 1343.0, + 1375.0, + 1343.0, + 1375.0, + 1360.0, + 1341.0, + 1360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 947.0, + 1353.0, + 976.0, + 1353.0, + 976.0, + 1367.0, + 947.0, + 1367.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 985.0, + 1353.0, + 1090.0, + 1353.0, + 1090.0, + 1382.0, + 985.0, + 1382.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1100.0, + 1353.0, + 1129.0, + 1353.0, + 1129.0, + 1367.0, + 1100.0, + 1367.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1258.0, + 1354.0, + 1340.0, + 1354.0, + 1340.0, + 1376.0, + 1258.0, + 1376.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1193.0, + 1047.5, + 1217.0, + 1047.5, + 1217.0, + 1064.0, + 1193.0, + 1064.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 2060.0, + 861.0, + 2060.0, + 861.0, + 2091.0, + 840.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 202.0, + 601.0, + 202.0, + 601.0, + 237.0, + 296.0, + 237.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 637.0, + 202.0, + 936.0, + 202.0, + 936.0, + 237.0, + 637.0, + 237.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 233.0, + 936.0, + 233.0, + 936.0, + 266.0, + 296.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 261.0, + 937.0, + 261.0, + 937.0, + 298.0, + 294.0, + 298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 292.0, + 377.0, + 292.0, + 377.0, + 329.0, + 294.0, + 329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 435.0, + 292.0, + 937.0, + 292.0, + 937.0, + 329.0, + 435.0, + 329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 321.0, + 451.0, + 321.0, + 451.0, + 362.0, + 294.0, + 362.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 509.0, + 321.0, + 937.0, + 321.0, + 937.0, + 362.0, + 509.0, + 362.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 353.0, + 937.0, + 353.0, + 937.0, + 388.0, + 294.0, + 388.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 384.0, + 936.0, + 384.0, + 936.0, + 417.0, + 295.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 416.0, + 937.0, + 416.0, + 937.0, + 449.0, + 294.0, + 449.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 445.0, + 937.0, + 445.0, + 937.0, + 479.0, + 295.0, + 479.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 476.0, + 936.0, + 476.0, + 936.0, + 508.0, + 294.0, + 508.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 505.0, + 937.0, + 505.0, + 937.0, + 539.0, + 294.0, + 539.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 537.0, + 638.0, + 537.0, + 638.0, + 568.0, + 295.0, + 568.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 673.0, + 537.0, + 936.0, + 537.0, + 936.0, + 568.0, + 673.0, + 568.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 566.0, + 704.0, + 566.0, + 704.0, + 599.0, + 295.0, + 599.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 798.0, + 566.0, + 936.0, + 566.0, + 936.0, + 599.0, + 798.0, + 599.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 599.0, + 935.0, + 599.0, + 935.0, + 630.0, + 296.0, + 630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 627.0, + 407.0, + 627.0, + 407.0, + 662.0, + 296.0, + 662.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 464.0, + 627.0, + 737.0, + 627.0, + 737.0, + 662.0, + 464.0, + 662.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 834.0, + 627.0, + 936.0, + 627.0, + 936.0, + 662.0, + 834.0, + 662.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 659.0, + 936.0, + 659.0, + 936.0, + 690.0, + 295.0, + 690.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 689.0, + 938.0, + 689.0, + 938.0, + 723.0, + 295.0, + 723.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 716.0, + 399.0, + 716.0, + 399.0, + 753.0, + 293.0, + 753.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 425.0, + 716.0, + 936.0, + 716.0, + 936.0, + 753.0, + 425.0, + 753.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 746.0, + 684.0, + 746.0, + 684.0, + 783.0, + 293.0, + 783.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 708.0, + 746.0, + 935.0, + 746.0, + 935.0, + 783.0, + 708.0, + 783.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 778.0, + 881.0, + 778.0, + 881.0, + 813.0, + 294.0, + 813.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 944.0, + 881.0, + 944.0, + 881.0, + 978.0, + 295.0, + 978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 975.0, + 881.0, + 975.0, + 881.0, + 1009.0, + 294.0, + 1009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1006.0, + 882.0, + 1006.0, + 882.0, + 1040.0, + 295.0, + 1040.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1036.0, + 880.0, + 1036.0, + 880.0, + 1069.0, + 296.0, + 1069.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1066.0, + 881.0, + 1066.0, + 881.0, + 1099.0, + 295.0, + 1099.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1098.0, + 881.0, + 1098.0, + 881.0, + 1128.0, + 295.0, + 1128.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1127.0, + 881.0, + 1127.0, + 881.0, + 1159.0, + 296.0, + 1159.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1158.0, + 881.0, + 1158.0, + 881.0, + 1189.0, + 296.0, + 1189.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1189.0, + 881.0, + 1189.0, + 881.0, + 1220.0, + 295.0, + 1220.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1220.0, + 881.0, + 1220.0, + 881.0, + 1250.0, + 294.0, + 1250.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1249.0, + 882.0, + 1249.0, + 882.0, + 1279.0, + 293.0, + 1279.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1279.0, + 882.0, + 1279.0, + 882.0, + 1313.0, + 294.0, + 1313.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1309.0, + 881.0, + 1309.0, + 881.0, + 1342.0, + 295.0, + 1342.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1341.0, + 881.0, + 1341.0, + 881.0, + 1372.0, + 295.0, + 1372.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1370.0, + 881.0, + 1370.0, + 881.0, + 1401.0, + 296.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1398.0, + 882.0, + 1398.0, + 882.0, + 1436.0, + 295.0, + 1436.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1431.0, + 840.0, + 1431.0, + 840.0, + 1464.0, + 292.0, + 1464.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1732.0, + 1405.0, + 1732.0, + 1405.0, + 1768.0, + 294.0, + 1768.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1763.0, + 1404.0, + 1763.0, + 1404.0, + 1797.0, + 295.0, + 1797.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1794.0, + 1403.0, + 1794.0, + 1403.0, + 1828.0, + 293.0, + 1828.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1826.0, + 1404.0, + 1826.0, + 1404.0, + 1859.0, + 293.0, + 1859.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1855.0, + 1405.0, + 1855.0, + 1405.0, + 1889.0, + 293.0, + 1889.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1883.0, + 1407.0, + 1883.0, + 1407.0, + 1921.0, + 291.0, + 1921.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1914.0, + 1405.0, + 1914.0, + 1405.0, + 1950.0, + 292.0, + 1950.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1943.0, + 1407.0, + 1943.0, + 1407.0, + 1983.0, + 295.0, + 1983.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 823.0, + 1405.0, + 823.0, + 1405.0, + 856.0, + 295.0, + 856.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 855.0, + 1405.0, + 855.0, + 1405.0, + 888.0, + 296.0, + 888.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 887.0, + 1404.0, + 887.0, + 1404.0, + 919.0, + 295.0, + 919.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 915.0, + 716.0, + 915.0, + 716.0, + 951.0, + 290.0, + 951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 915.0, + 915.0, + 1405.0, + 915.0, + 1405.0, + 951.0, + 915.0, + 951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1565.0, + 1405.0, + 1565.0, + 1405.0, + 1602.0, + 295.0, + 1602.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1598.0, + 1404.0, + 1598.0, + 1404.0, + 1631.0, + 295.0, + 1631.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1624.0, + 1405.0, + 1624.0, + 1405.0, + 1663.0, + 294.0, + 1663.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1656.0, + 1325.0, + 1656.0, + 1325.0, + 1693.0, + 295.0, + 1693.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1355.0, + 1656.0, + 1405.0, + 1656.0, + 1405.0, + 1693.0, + 1355.0, + 1693.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1688.0, + 296.0, + 1688.0, + 296.0, + 1724.0, + 293.0, + 1724.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 326.0, + 1688.0, + 758.0, + 1688.0, + 758.0, + 1724.0, + 326.0, + 1724.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1474.0, + 880.0, + 1474.0, + 880.0, + 1510.0, + 295.0, + 1510.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1506.0, + 881.0, + 1506.0, + 881.0, + 1540.0, + 295.0, + 1540.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1534.0, + 882.0, + 1534.0, + 882.0, + 1573.0, + 293.0, + 1573.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 5, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 202, + 1404, + 202, + 1404, + 415, + 297, + 415 + ], + "score": 0.981 + }, + { + "category_id": 1, + "poly": [ + 298, + 1036, + 880, + 1036, + 880, + 1310, + 298, + 1310 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 296, + 1356, + 1405, + 1356, + 1405, + 1537, + 296, + 1537 + ], + "score": 0.975 + }, + { + "category_id": 1, + "poly": [ + 298, + 822, + 1406, + 822, + 1406, + 975, + 298, + 975 + ], + "score": 0.973 + }, + { + "category_id": 3, + "poly": [ + 860, + 1541, + 1390, + 1541, + 1390, + 1797, + 860, + 1797 + ], + "score": 0.966 + }, + { + "category_id": 4, + "poly": [ + 305, + 677, + 1098, + 677, + 1098, + 817, + 305, + 817 + ], + "score": 0.963 + }, + { + "category_id": 3, + "poly": [ + 322, + 435, + 1076, + 435, + 1076, + 669, + 322, + 669 + ], + "score": 0.963 + }, + { + "category_id": 4, + "poly": [ + 845, + 1807, + 1402, + 1807, + 1402, + 1947, + 845, + 1947 + ], + "score": 0.962 + }, + { + "category_id": 4, + "poly": [ + 901, + 1226, + 1403, + 1226, + 1403, + 1339, + 901, + 1339 + ], + "score": 0.958 + }, + { + "category_id": 3, + "poly": [ + 904, + 1048, + 1399, + 1048, + 1399, + 1211, + 904, + 1211 + ], + "score": 0.958 + }, + { + "category_id": 1, + "poly": [ + 299, + 1554, + 823, + 1554, + 823, + 1999, + 299, + 1999 + ], + "score": 0.948 + }, + { + "category_id": 0, + "poly": [ + 297, + 992, + 1078, + 992, + 1078, + 1030, + 297, + 1030 + ], + "score": 0.933 + }, + { + "category_id": 4, + "poly": [ + 1130, + 704, + 1391, + 704, + 1391, + 789, + 1130, + 789 + ], + "score": 0.926 + }, + { + "category_id": 3, + "poly": [ + 1146, + 444, + 1394, + 444, + 1394, + 697, + 1146, + 697 + ], + "score": 0.925 + }, + { + "category_id": 1, + "poly": [ + 295, + 1947, + 1405, + 1947, + 1405, + 2008, + 295, + 2008 + ], + "score": 0.884 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 858, + 2061, + 858, + 2084, + 841, + 2084 + ], + "score": 0.71 + }, + { + "category_id": 1, + "poly": [ + 297, + 1323, + 879, + 1323, + 879, + 1355, + 297, + 1355 + ], + "score": 0.448 + }, + { + "category_id": 0, + "poly": [ + 297, + 1323, + 879, + 1323, + 879, + 1355, + 297, + 1355 + ], + "score": 0.323 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 859, + 2061, + 859, + 2084, + 841, + 2084 + ], + "score": 0.126 + }, + { + "category_id": 13, + "poly": [ + 767, + 235, + 806, + 235, + 806, + 263, + 767, + 263 + ], + "score": 0.33, + "latex": "_ { 0 / 1 }" + }, + { + "category_id": 15, + "poly": [ + 866.0, + 1545.0, + 1122.0, + 1545.0, + 1122.0, + 1571.0, + 866.0, + 1571.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1153.0, + 1534.0, + 1397.0, + 1534.0, + 1397.0, + 1571.0, + 1153.0, + 1571.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 880.0, + 1573.0, + 905.0, + 1573.0, + 905.0, + 1591.0, + 880.0, + 1591.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 934.0, + 1571.0, + 1061.0, + 1571.0, + 1061.0, + 1595.0, + 934.0, + 1595.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1222.0, + 1566.0, + 1353.0, + 1566.0, + 1353.0, + 1588.0, + 1222.0, + 1588.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1223.0, + 1585.0, + 1284.0, + 1585.0, + 1284.0, + 1605.0, + 1223.0, + 1605.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 860.0, + 1590.0, + 886.0, + 1590.0, + 886.0, + 1615.0, + 860.0, + 1615.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 936.0, + 1591.0, + 995.0, + 1591.0, + 995.0, + 1611.0, + 936.0, + 1611.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1126.0, + 1591.0, + 1161.0, + 1591.0, + 1161.0, + 1726.0, + 1126.0, + 1726.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1151.0, + 1592.0, + 1183.0, + 1592.0, + 1183.0, + 1610.0, + 1151.0, + 1610.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 922.0, + 1596.0, + 934.0, + 1596.0, + 934.0, + 1603.0, + 922.0, + 1603.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 879.0, + 1597.0, + 902.0, + 1597.0, + 902.0, + 1616.0, + 879.0, + 1616.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1222.0, + 1601.0, + 1376.0, + 1601.0, + 1376.0, + 1622.0, + 1222.0, + 1622.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 858.0, + 1606.0, + 885.0, + 1606.0, + 885.0, + 1701.0, + 858.0, + 1701.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 936.0, + 1606.0, + 1083.0, + 1606.0, + 1083.0, + 1629.0, + 936.0, + 1629.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 881.0, + 1626.0, + 900.0, + 1626.0, + 900.0, + 1642.0, + 881.0, + 1642.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 937.0, + 1625.0, + 1011.0, + 1625.0, + 1011.0, + 1645.0, + 937.0, + 1645.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1223.0, + 1617.0, + 1301.0, + 1617.0, + 1301.0, + 1643.0, + 1223.0, + 1643.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1152.0, + 1633.0, + 1183.0, + 1633.0, + 1183.0, + 1651.0, + 1152.0, + 1651.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 886.0, + 1652.0, + 899.0, + 1652.0, + 899.0, + 1665.0, + 886.0, + 1665.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 887.0, + 1679.0, + 899.0, + 1679.0, + 899.0, + 1691.0, + 887.0, + 1691.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1153.0, + 1672.0, + 1183.0, + 1672.0, + 1183.0, + 1689.0, + 1153.0, + 1689.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1210.0, + 1680.0, + 1227.0, + 1680.0, + 1227.0, + 1718.0, + 1210.0, + 1718.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1294.0, + 1680.0, + 1299.0, + 1680.0, + 1299.0, + 1685.0, + 1294.0, + 1685.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 865.0, + 1689.0, + 897.0, + 1689.0, + 897.0, + 1739.0, + 865.0, + 1739.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1217.0, + 1687.0, + 1289.0, + 1687.0, + 1289.0, + 1719.0, + 1217.0, + 1719.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1152.0, + 1711.0, + 1183.0, + 1711.0, + 1183.0, + 1730.0, + 1152.0, + 1730.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 888.0, + 1732.0, + 897.0, + 1732.0, + 897.0, + 1740.0, + 888.0, + 1740.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 887.0, + 1754.0, + 919.0, + 1754.0, + 919.0, + 1779.0, + 887.0, + 1779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 937.0, + 1759.0, + 969.0, + 1759.0, + 969.0, + 1780.0, + 937.0, + 1780.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 980.0, + 1759.0, + 1023.0, + 1759.0, + 1023.0, + 1779.0, + 980.0, + 1779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1027.0, + 1761.0, + 1060.0, + 1761.0, + 1060.0, + 1778.0, + 1027.0, + 1778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1153.0, + 1750.0, + 1205.0, + 1750.0, + 1205.0, + 1778.0, + 1153.0, + 1778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1223.0, + 1757.0, + 1354.0, + 1757.0, + 1354.0, + 1779.0, + 1223.0, + 1779.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 934.0, + 1770.0, + 1054.0, + 1770.0, + 1054.0, + 1800.0, + 934.0, + 1800.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1222.0, + 1769.0, + 1345.0, + 1769.0, + 1345.0, + 1799.0, + 1222.0, + 1799.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 300.0, + 674.0, + 1103.0, + 674.0, + 1103.0, + 710.0, + 300.0, + 710.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 301.0, + 704.0, + 1103.0, + 704.0, + 1103.0, + 737.0, + 301.0, + 737.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 300.0, + 732.0, + 1102.0, + 732.0, + 1102.0, + 764.0, + 300.0, + 764.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 299.0, + 760.0, + 1103.0, + 760.0, + 1103.0, + 794.0, + 299.0, + 794.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 300.0, + 789.0, + 990.0, + 789.0, + 990.0, + 821.0, + 300.0, + 821.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 321.0, + 435.0, + 573.0, + 435.0, + 573.0, + 464.0, + 321.0, + 464.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 609.0, + 432.0, + 818.0, + 432.0, + 818.0, + 460.0, + 609.0, + 460.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 410.0, + 472.0, + 421.0, + 472.0, + 421.0, + 485.0, + 410.0, + 485.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 599.0, + 472.0, + 624.0, + 472.0, + 624.0, + 490.0, + 599.0, + 490.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 316.0, + 475.0, + 348.0, + 475.0, + 348.0, + 612.0, + 316.0, + 612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 843.0, + 477.0, + 864.0, + 477.0, + 864.0, + 500.0, + 843.0, + 500.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 340.0, + 478.0, + 364.0, + 478.0, + 364.0, + 498.0, + 340.0, + 498.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 860.0, + 480.0, + 884.0, + 480.0, + 884.0, + 499.0, + 860.0, + 499.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 403.0, + 481.0, + 430.0, + 481.0, + 430.0, + 550.0, + 403.0, + 550.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 837.0, + 487.0, + 870.0, + 487.0, + 870.0, + 618.0, + 837.0, + 618.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 600.0, + 500.0, + 623.0, + 500.0, + 623.0, + 515.0, + 600.0, + 515.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 583.0, + 504.0, + 607.0, + 504.0, + 607.0, + 558.0, + 583.0, + 558.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 342.0, + 506.0, + 363.0, + 506.0, + 363.0, + 528.0, + 342.0, + 528.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 863.0, + 509.0, + 883.0, + 509.0, + 883.0, + 526.0, + 863.0, + 526.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 341.0, + 537.0, + 363.0, + 537.0, + 363.0, + 584.0, + 341.0, + 584.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 409.0, + 539.0, + 430.0, + 539.0, + 430.0, + 560.0, + 409.0, + 560.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 583.0, + 526.0, + 624.0, + 526.0, + 624.0, + 573.0, + 583.0, + 573.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 662.0, + 561.0, + 674.0, + 561.0, + 674.0, + 576.0, + 662.0, + 576.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 862.0, + 535.0, + 883.0, + 535.0, + 883.0, + 582.0, + 862.0, + 582.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1026.0, + 531.0, + 1051.0, + 531.0, + 1051.0, + 584.0, + 1026.0, + 584.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 349.0, + 599.0, + 361.0, + 599.0, + 361.0, + 614.0, + 349.0, + 614.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 598.0, + 582.0, + 626.0, + 582.0, + 626.0, + 630.0, + 598.0, + 630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 658.0, + 576.0, + 683.0, + 576.0, + 683.0, + 626.0, + 658.0, + 626.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 868.0, + 594.0, + 884.0, + 594.0, + 884.0, + 611.0, + 868.0, + 611.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1026.0, + 582.0, + 1051.0, + 582.0, + 1051.0, + 622.0, + 1026.0, + 622.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 868.0, + 622.0, + 887.0, + 622.0, + 887.0, + 639.0, + 868.0, + 639.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 344.0, + 630.0, + 384.0, + 630.0, + 384.0, + 652.0, + 344.0, + 652.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 399.0, + 630.0, + 438.0, + 630.0, + 438.0, + 651.0, + 399.0, + 651.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 454.0, + 630.0, + 493.0, + 630.0, + 493.0, + 651.0, + 454.0, + 651.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 508.0, + 632.0, + 547.0, + 632.0, + 547.0, + 650.0, + 508.0, + 650.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 607.0, + 634.0, + 642.0, + 634.0, + 642.0, + 653.0, + 607.0, + 653.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 663.0, + 632.0, + 701.0, + 632.0, + 701.0, + 654.0, + 663.0, + 654.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 724.0, + 633.0, + 758.0, + 633.0, + 758.0, + 652.0, + 724.0, + 652.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 781.0, + 634.0, + 817.0, + 634.0, + 817.0, + 653.0, + 781.0, + 653.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 865.0, + 630.0, + 905.0, + 630.0, + 905.0, + 652.0, + 865.0, + 652.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 922.0, + 630.0, + 962.0, + 630.0, + 962.0, + 651.0, + 922.0, + 651.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 980.0, + 630.0, + 1019.0, + 630.0, + 1019.0, + 651.0, + 980.0, + 651.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1037.0, + 630.0, + 1077.0, + 630.0, + 1077.0, + 652.0, + 1037.0, + 652.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 390.0, + 644.0, + 504.0, + 644.0, + 504.0, + 671.0, + 390.0, + 671.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 656.0, + 648.0, + 770.0, + 648.0, + 770.0, + 672.0, + 656.0, + 672.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 913.0, + 643.0, + 1029.0, + 643.0, + 1029.0, + 672.0, + 913.0, + 672.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 855.0, + 428.0, + 1085.0, + 428.0, + 1085.0, + 472.0, + 855.0, + 472.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 844.0, + 1804.0, + 1406.0, + 1804.0, + 1406.0, + 1840.0, + 844.0, + 1840.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 846.0, + 1836.0, + 1404.0, + 1836.0, + 1404.0, + 1865.0, + 846.0, + 1865.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 844.0, + 1862.0, + 1404.0, + 1862.0, + 1404.0, + 1896.0, + 844.0, + 1896.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 845.0, + 1890.0, + 1405.0, + 1890.0, + 1405.0, + 1922.0, + 845.0, + 1922.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 844.0, + 1914.0, + 1333.0, + 1914.0, + 1333.0, + 1951.0, + 844.0, + 1951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 901.0, + 1224.0, + 1405.0, + 1224.0, + 1405.0, + 1257.0, + 901.0, + 1257.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 901.0, + 1253.0, + 1404.0, + 1253.0, + 1404.0, + 1282.0, + 901.0, + 1282.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 900.0, + 1281.0, + 1405.0, + 1281.0, + 1405.0, + 1312.0, + 900.0, + 1312.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 902.0, + 1310.0, + 1328.0, + 1310.0, + 1328.0, + 1341.0, + 902.0, + 1341.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 902.0, + 1059.0, + 959.0, + 1059.0, + 959.0, + 1080.0, + 902.0, + 1080.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 909.0, + 1070.0, + 958.0, + 1070.0, + 958.0, + 1097.0, + 909.0, + 1097.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 909.0, + 1108.0, + 952.0, + 1108.0, + 952.0, + 1136.0, + 909.0, + 1136.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 907.0, + 1123.0, + 957.0, + 1123.0, + 957.0, + 1147.0, + 907.0, + 1147.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 913.0, + 1155.0, + 949.0, + 1155.0, + 949.0, + 1182.0, + 913.0, + 1182.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 913.0, + 1172.0, + 947.0, + 1172.0, + 947.0, + 1193.0, + 913.0, + 1193.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 909.0, + 1180.0, + 953.0, + 1180.0, + 953.0, + 1209.0, + 909.0, + 1209.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 989.0, + 1081.0, + 989.0, + 1081.0, + 1040.0, + 292.0, + 1040.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1128.0, + 699.0, + 1392.0, + 699.0, + 1392.0, + 735.0, + 1128.0, + 735.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1127.0, + 729.0, + 1392.0, + 729.0, + 1392.0, + 765.0, + 1127.0, + 765.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1129.0, + 759.0, + 1383.0, + 759.0, + 1383.0, + 790.0, + 1129.0, + 790.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1177.0, + 444.0, + 1397.0, + 444.0, + 1397.0, + 472.0, + 1177.0, + 472.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1165.0, + 485.0, + 1191.0, + 485.0, + 1191.0, + 508.0, + 1165.0, + 508.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1142.0, + 489.0, + 1174.0, + 489.0, + 1174.0, + 635.0, + 1142.0, + 635.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1167.0, + 535.0, + 1190.0, + 535.0, + 1190.0, + 555.0, + 1167.0, + 555.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1257.0, + 575.0, + 1292.0, + 575.0, + 1292.0, + 597.0, + 1257.0, + 597.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1174.0, + 587.0, + 1189.0, + 587.0, + 1189.0, + 604.0, + 1174.0, + 604.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1256.0, + 592.0, + 1327.0, + 592.0, + 1327.0, + 615.0, + 1256.0, + 615.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1257.0, + 610.0, + 1339.0, + 610.0, + 1339.0, + 630.0, + 1257.0, + 630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1176.0, + 637.0, + 1187.0, + 637.0, + 1187.0, + 650.0, + 1176.0, + 650.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1256.0, + 627.0, + 1376.0, + 627.0, + 1376.0, + 650.0, + 1256.0, + 650.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1171.0, + 656.0, + 1400.0, + 656.0, + 1400.0, + 678.0, + 1171.0, + 678.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1223.0, + 672.0, + 1349.0, + 672.0, + 1349.0, + 700.0, + 1223.0, + 700.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2059.0, + 860.0, + 2059.0, + 860.0, + 2091.0, + 839.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1321.0, + 886.0, + 1321.0, + 886.0, + 1360.0, + 293.0, + 1360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 2059.0, + 860.0, + 2059.0, + 860.0, + 2092.0, + 840.0, + 2092.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 203.0, + 1405.0, + 203.0, + 1405.0, + 238.0, + 295.0, + 238.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 234.0, + 766.0, + 234.0, + 766.0, + 266.0, + 293.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 807.0, + 234.0, + 1405.0, + 234.0, + 1405.0, + 266.0, + 807.0, + 266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 263.0, + 1407.0, + 263.0, + 1407.0, + 299.0, + 292.0, + 299.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 295.0, + 1404.0, + 295.0, + 1404.0, + 329.0, + 295.0, + 329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 321.0, + 1405.0, + 321.0, + 1405.0, + 360.0, + 292.0, + 360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 354.0, + 1404.0, + 354.0, + 1404.0, + 390.0, + 294.0, + 390.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 384.0, + 898.0, + 384.0, + 898.0, + 419.0, + 295.0, + 419.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1035.0, + 878.0, + 1035.0, + 878.0, + 1068.0, + 295.0, + 1068.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1067.0, + 881.0, + 1067.0, + 881.0, + 1097.0, + 296.0, + 1097.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1096.0, + 881.0, + 1096.0, + 881.0, + 1128.0, + 295.0, + 1128.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1127.0, + 881.0, + 1127.0, + 881.0, + 1157.0, + 294.0, + 1157.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1158.0, + 882.0, + 1158.0, + 882.0, + 1189.0, + 294.0, + 1189.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1187.0, + 880.0, + 1187.0, + 880.0, + 1221.0, + 295.0, + 1221.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1216.0, + 881.0, + 1216.0, + 881.0, + 1250.0, + 294.0, + 1250.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1248.0, + 879.0, + 1248.0, + 879.0, + 1281.0, + 295.0, + 1281.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1279.0, + 818.0, + 1279.0, + 818.0, + 1310.0, + 292.0, + 1310.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1352.0, + 1405.0, + 1352.0, + 1405.0, + 1390.0, + 294.0, + 1390.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1384.0, + 1405.0, + 1384.0, + 1405.0, + 1418.0, + 293.0, + 1418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1413.0, + 1405.0, + 1413.0, + 1405.0, + 1449.0, + 293.0, + 1449.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1445.0, + 1405.0, + 1445.0, + 1405.0, + 1480.0, + 294.0, + 1480.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1475.0, + 1407.0, + 1475.0, + 1407.0, + 1510.0, + 294.0, + 1510.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1505.0, + 1322.0, + 1505.0, + 1322.0, + 1541.0, + 296.0, + 1541.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 818.0, + 1404.0, + 818.0, + 1404.0, + 859.0, + 295.0, + 859.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 848.0, + 1404.0, + 848.0, + 1404.0, + 888.0, + 293.0, + 888.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 881.0, + 1406.0, + 881.0, + 1406.0, + 918.0, + 293.0, + 918.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 912.0, + 1406.0, + 912.0, + 1406.0, + 949.0, + 295.0, + 949.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 942.0, + 1357.0, + 942.0, + 1357.0, + 979.0, + 295.0, + 979.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 1552.0, + 825.0, + 1552.0, + 825.0, + 1583.0, + 298.0, + 1583.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1577.0, + 827.0, + 1577.0, + 827.0, + 1619.0, + 294.0, + 1619.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1609.0, + 825.0, + 1609.0, + 825.0, + 1645.0, + 296.0, + 1645.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1642.0, + 825.0, + 1642.0, + 825.0, + 1678.0, + 294.0, + 1678.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1671.0, + 825.0, + 1671.0, + 825.0, + 1705.0, + 295.0, + 1705.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1702.0, + 825.0, + 1702.0, + 825.0, + 1735.0, + 295.0, + 1735.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1731.0, + 825.0, + 1731.0, + 825.0, + 1769.0, + 295.0, + 1769.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1764.0, + 826.0, + 1764.0, + 826.0, + 1796.0, + 295.0, + 1796.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1794.0, + 826.0, + 1794.0, + 826.0, + 1826.0, + 294.0, + 1826.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1825.0, + 826.0, + 1825.0, + 826.0, + 1856.0, + 296.0, + 1856.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1853.0, + 826.0, + 1853.0, + 826.0, + 1886.0, + 295.0, + 1886.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1885.0, + 826.0, + 1885.0, + 826.0, + 1916.0, + 296.0, + 1916.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1915.0, + 826.0, + 1915.0, + 826.0, + 1946.0, + 295.0, + 1946.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1945.0, + 825.0, + 1945.0, + 825.0, + 1978.0, + 295.0, + 1978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1976.0, + 824.0, + 1976.0, + 824.0, + 2006.0, + 296.0, + 2006.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1945.0, + 1403.0, + 1945.0, + 1403.0, + 1980.0, + 295.0, + 1980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1975.0, + 1403.0, + 1975.0, + 1403.0, + 2010.0, + 294.0, + 2010.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1321.0, + 886.0, + 1321.0, + 886.0, + 1360.0, + 293.0, + 1360.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 6, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 1620, + 1404, + 1620, + 1404, + 2015, + 297, + 2015 + ], + "score": 0.984 + }, + { + "category_id": 1, + "poly": [ + 297, + 774, + 1404, + 774, + 1404, + 1079, + 297, + 1079 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 299, + 1093, + 824, + 1093, + 824, + 1366, + 299, + 1366 + ], + "score": 0.974 + }, + { + "category_id": 4, + "poly": [ + 296, + 451, + 1404, + 451, + 1404, + 647, + 296, + 647 + ], + "score": 0.974 + }, + { + "category_id": 1, + "poly": [ + 298, + 1367, + 1404, + 1367, + 1404, + 1549, + 298, + 1549 + ], + "score": 0.972 + }, + { + "category_id": 5, + "poly": [ + 854, + 1089, + 1387, + 1089, + 1387, + 1210, + 854, + 1210 + ], + "score": 0.971, + "html": "
Real-world WidowX pick and place
MethodEpoch5075100200
CQL7/94/94/92/9
CQL + VIB3/98/97/97/9
" + }, + { + "category_id": 3, + "poly": [ + 352, + 206, + 1338, + 206, + 1338, + 436, + 352, + 436 + ], + "score": 0.965 + }, + { + "category_id": 1, + "poly": [ + 300, + 669, + 1399, + 669, + 1399, + 761, + 300, + 761 + ], + "score": 0.96 + }, + { + "category_id": 0, + "poly": [ + 298, + 1572, + 500, + 1572, + 500, + 1608, + 298, + 1608 + ], + "score": 0.905 + }, + { + "category_id": 2, + "poly": [ + 840, + 2062, + 858, + 2062, + 858, + 2085, + 840, + 2085 + ], + "score": 0.793 + }, + { + "category_id": 1, + "poly": [ + 846, + 1218, + 1401, + 1218, + 1401, + 1359, + 846, + 1359 + ], + "score": 0.518 + }, + { + "category_id": 6, + "poly": [ + 846, + 1218, + 1401, + 1218, + 1401, + 1359, + 846, + 1359 + ], + "score": 0.426 + }, + { + "category_id": 13, + "poly": [ + 860, + 836, + 915, + 836, + 915, + 865, + 860, + 865 + ], + "score": 0.87, + "latex": "3 5 \\%" + }, + { + "category_id": 13, + "poly": [ + 732, + 1048, + 800, + 1048, + 800, + 1078, + 732, + 1078 + ], + "score": 0.86, + "latex": "\\leq 4 / 9" + }, + { + "category_id": 13, + "poly": [ + 671, + 730, + 749, + 730, + 749, + 759, + 671, + 759 + ], + "score": 0.85, + "latex": "7 0 . 8 \\%" + }, + { + "category_id": 13, + "poly": [ + 1008, + 1366, + 1071, + 1366, + 1071, + 1397, + 1008, + 1397 + ], + "score": 0.82, + "latex": "\\geq 7 / 9" + }, + { + "category_id": 13, + "poly": [ + 974, + 1247, + 1098, + 1247, + 1098, + 1274, + 974, + 1274 + ], + "score": 0.7, + "latex": "\\mathrm { C Q L + V I B }" + }, + { + "category_id": 13, + "poly": [ + 928, + 1181, + 948, + 1181, + 948, + 1200, + 928, + 1200 + ], + "score": 0.37, + "latex": "^ +" + }, + { + "category_id": 13, + "poly": [ + 1178, + 1176, + 1214, + 1176, + 1214, + 1203, + 1178, + 1203 + ], + "score": 0.26, + "latex": "\\mathbf { \\overline { { 8 / 9 } } }" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 447.0, + 1405.0, + 447.0, + 1405.0, + 486.0, + 294.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 479.0, + 1405.0, + 479.0, + 1405.0, + 512.0, + 294.0, + 512.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 507.0, + 1405.0, + 507.0, + 1405.0, + 540.0, + 294.0, + 540.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 534.0, + 1404.0, + 534.0, + 1404.0, + 567.0, + 295.0, + 567.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 562.0, + 1405.0, + 562.0, + 1405.0, + 595.0, + 295.0, + 595.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 586.0, + 1406.0, + 586.0, + 1406.0, + 626.0, + 293.0, + 626.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 618.0, + 948.0, + 618.0, + 948.0, + 649.0, + 293.0, + 649.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 469.0, + 206.0, + 587.0, + 206.0, + 587.0, + 232.0, + 469.0, + 232.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 783.0, + 206.0, + 900.0, + 206.0, + 900.0, + 232.0, + 783.0, + 232.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1059.0, + 206.0, + 1313.0, + 206.0, + 1313.0, + 232.0, + 1059.0, + 232.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 348.0, + 238.0, + 382.0, + 238.0, + 382.0, + 390.0, + 348.0, + 390.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 389.0, + 226.0, + 405.0, + 226.0, + 405.0, + 279.0, + 389.0, + 279.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 422.0, + 230.0, + 602.0, + 230.0, + 602.0, + 252.0, + 422.0, + 252.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 684.0, + 226.0, + 723.0, + 226.0, + 723.0, + 389.0, + 684.0, + 389.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 804.0, + 247.0, + 813.0, + 247.0, + 813.0, + 255.0, + 804.0, + 255.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 938.0, + 261.0, + 949.0, + 261.0, + 949.0, + 272.0, + 938.0, + 272.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 957.0, + 221.0, + 1007.0, + 221.0, + 1007.0, + 366.0, + 957.0, + 366.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1022.0, + 238.0, + 1055.0, + 238.0, + 1055.0, + 348.0, + 1022.0, + 348.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1062.0, + 249.0, + 1078.0, + 249.0, + 1078.0, + 264.0, + 1062.0, + 264.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1161.0, + 228.0, + 1293.0, + 228.0, + 1293.0, + 287.0, + 1161.0, + 287.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1294.0, + 222.0, + 1342.0, + 222.0, + 1342.0, + 367.0, + 1294.0, + 367.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 714.0, + 283.0, + 722.0, + 283.0, + 722.0, + 292.0, + 714.0, + 292.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1119.0, + 274.0, + 1128.0, + 274.0, + 1128.0, + 287.0, + 1119.0, + 287.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1184.0, + 278.0, + 1293.0, + 278.0, + 1293.0, + 303.0, + 1184.0, + 303.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 960.0, + 289.0, + 984.0, + 289.0, + 984.0, + 307.0, + 960.0, + 307.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1060.0, + 290.0, + 1075.0, + 290.0, + 1075.0, + 304.0, + 1060.0, + 304.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1294.0, + 288.0, + 1321.0, + 288.0, + 1321.0, + 308.0, + 1294.0, + 308.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 383.0, + 298.0, + 404.0, + 298.0, + 404.0, + 315.0, + 383.0, + 315.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1268.0, + 298.0, + 1288.0, + 298.0, + 1288.0, + 315.0, + 1268.0, + 315.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 712.0, + 310.0, + 723.0, + 310.0, + 723.0, + 320.0, + 712.0, + 320.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 373.0, + 334.0, + 405.0, + 334.0, + 405.0, + 390.0, + 373.0, + 390.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 511.0, + 350.0, + 614.0, + 350.0, + 614.0, + 397.0, + 511.0, + 397.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 713.0, + 338.0, + 718.0, + 338.0, + 718.0, + 344.0, + 713.0, + 344.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 850.0, + 354.0, + 919.0, + 354.0, + 919.0, + 385.0, + 850.0, + 385.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 958.0, + 320.0, + 985.0, + 320.0, + 985.0, + 374.0, + 958.0, + 374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1029.0, + 328.0, + 1079.0, + 328.0, + 1079.0, + 389.0, + 1029.0, + 389.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1108.0, + 345.0, + 1123.0, + 345.0, + 1123.0, + 380.0, + 1108.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1295.0, + 320.0, + 1321.0, + 320.0, + 1321.0, + 374.0, + 1295.0, + 374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 390.0, + 397.0, + 417.0, + 397.0, + 417.0, + 417.0, + 390.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 437.0, + 397.0, + 469.0, + 397.0, + 469.0, + 417.0, + 437.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 485.0, + 397.0, + 523.0, + 397.0, + 523.0, + 417.0, + 485.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 529.0, + 395.0, + 574.0, + 395.0, + 574.0, + 418.0, + 529.0, + 418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 584.0, + 397.0, + 621.0, + 397.0, + 621.0, + 417.0, + 584.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 632.0, + 397.0, + 671.0, + 397.0, + 671.0, + 417.0, + 632.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 707.0, + 386.0, + 741.0, + 386.0, + 741.0, + 420.0, + 707.0, + 420.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 765.0, + 397.0, + 796.0, + 397.0, + 796.0, + 417.0, + 765.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 818.0, + 397.0, + 854.0, + 397.0, + 854.0, + 417.0, + 818.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 872.0, + 397.0, + 908.0, + 397.0, + 908.0, + 417.0, + 872.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 926.0, + 389.0, + 983.0, + 389.0, + 983.0, + 417.0, + 926.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1069.0, + 398.0, + 1092.0, + 398.0, + 1092.0, + 416.0, + 1069.0, + 416.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1145.0, + 397.0, + 1181.0, + 397.0, + 1181.0, + 417.0, + 1145.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1227.0, + 397.0, + 1265.0, + 397.0, + 1265.0, + 417.0, + 1227.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1293.0, + 387.0, + 1321.0, + 387.0, + 1321.0, + 409.0, + 1293.0, + 409.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 470.0, + 409.0, + 586.0, + 409.0, + 586.0, + 438.0, + 470.0, + 438.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 784.0, + 409.0, + 901.0, + 409.0, + 901.0, + 438.0, + 784.0, + 438.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1130.0, + 408.0, + 1245.0, + 408.0, + 1245.0, + 438.0, + 1130.0, + 438.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 447.0, + 256.0, + 534.0, + 256.0, + 534.0, + 276.0, + 447.0, + 276.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1121.0, + 237.0, + 1152.0, + 237.0, + 1152.0, + 253.5, + 1121.0, + 253.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 736.0, + 273.5, + 953.0, + 273.5, + 953.0, + 315.0, + 736.0, + 315.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 589.75, + 295.0, + 622.75, + 295.0, + 622.75, + 311.0, + 589.75, + 311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1566.0, + 505.0, + 1566.0, + 505.0, + 1616.0, + 292.0, + 1616.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2058.0, + 862.0, + 2058.0, + 862.0, + 2090.0, + 838.0, + 2090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 844.0, + 1214.0, + 1404.0, + 1214.0, + 1404.0, + 1251.0, + 844.0, + 1251.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 845.0, + 1246.0, + 973.0, + 1246.0, + 973.0, + 1275.0, + 845.0, + 1275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1099.0, + 1246.0, + 1404.0, + 1246.0, + 1404.0, + 1275.0, + 1099.0, + 1275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 845.0, + 1275.0, + 1405.0, + 1275.0, + 1405.0, + 1305.0, + 845.0, + 1305.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 844.0, + 1302.0, + 1406.0, + 1302.0, + 1406.0, + 1334.0, + 844.0, + 1334.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 845.0, + 1330.0, + 1361.0, + 1330.0, + 1361.0, + 1359.0, + 845.0, + 1359.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1618.0, + 1404.0, + 1618.0, + 1404.0, + 1654.0, + 295.0, + 1654.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1649.0, + 1406.0, + 1649.0, + 1406.0, + 1686.0, + 292.0, + 1686.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1680.0, + 1404.0, + 1680.0, + 1404.0, + 1714.0, + 291.0, + 1714.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1707.0, + 1405.0, + 1707.0, + 1405.0, + 1748.0, + 291.0, + 1748.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1740.0, + 1404.0, + 1740.0, + 1404.0, + 1776.0, + 294.0, + 1776.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1772.0, + 1404.0, + 1772.0, + 1404.0, + 1805.0, + 296.0, + 1805.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1801.0, + 1406.0, + 1801.0, + 1406.0, + 1837.0, + 295.0, + 1837.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1830.0, + 1406.0, + 1830.0, + 1406.0, + 1866.0, + 295.0, + 1866.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1864.0, + 1405.0, + 1864.0, + 1405.0, + 1896.0, + 295.0, + 1896.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1891.0, + 1405.0, + 1891.0, + 1405.0, + 1927.0, + 294.0, + 1927.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1921.0, + 1405.0, + 1921.0, + 1405.0, + 1957.0, + 295.0, + 1957.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1949.0, + 1404.0, + 1949.0, + 1404.0, + 1992.0, + 292.0, + 1992.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1984.0, + 1404.0, + 1984.0, + 1404.0, + 2020.0, + 295.0, + 2020.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 772.0, + 1407.0, + 772.0, + 1407.0, + 810.0, + 294.0, + 810.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 800.0, + 1405.0, + 800.0, + 1405.0, + 843.0, + 292.0, + 843.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 835.0, + 859.0, + 835.0, + 859.0, + 868.0, + 292.0, + 868.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 916.0, + 835.0, + 1405.0, + 835.0, + 1405.0, + 868.0, + 916.0, + 868.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 866.0, + 1406.0, + 866.0, + 1406.0, + 901.0, + 292.0, + 901.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 896.0, + 1406.0, + 896.0, + 1406.0, + 932.0, + 295.0, + 932.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 927.0, + 1406.0, + 927.0, + 1406.0, + 962.0, + 294.0, + 962.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 956.0, + 1406.0, + 956.0, + 1406.0, + 992.0, + 292.0, + 992.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 987.0, + 1405.0, + 987.0, + 1405.0, + 1023.0, + 295.0, + 1023.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1015.0, + 1406.0, + 1015.0, + 1406.0, + 1053.0, + 294.0, + 1053.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1049.0, + 731.0, + 1049.0, + 731.0, + 1081.0, + 296.0, + 1081.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 801.0, + 1049.0, + 877.0, + 1049.0, + 877.0, + 1081.0, + 801.0, + 1081.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1093.0, + 826.0, + 1093.0, + 826.0, + 1127.0, + 297.0, + 1127.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1124.0, + 825.0, + 1124.0, + 825.0, + 1157.0, + 296.0, + 1157.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1154.0, + 826.0, + 1154.0, + 826.0, + 1187.0, + 295.0, + 1187.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1184.0, + 826.0, + 1184.0, + 826.0, + 1217.0, + 294.0, + 1217.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1214.0, + 826.0, + 1214.0, + 826.0, + 1247.0, + 295.0, + 1247.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1244.0, + 827.0, + 1244.0, + 827.0, + 1277.0, + 295.0, + 1277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1275.0, + 827.0, + 1275.0, + 827.0, + 1308.0, + 294.0, + 1308.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1306.0, + 827.0, + 1306.0, + 827.0, + 1337.0, + 294.0, + 1337.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1335.0, + 826.0, + 1335.0, + 826.0, + 1370.0, + 295.0, + 1370.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1365.0, + 1007.0, + 1365.0, + 1007.0, + 1401.0, + 291.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1072.0, + 1365.0, + 1407.0, + 1365.0, + 1407.0, + 1401.0, + 1072.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1396.0, + 1404.0, + 1396.0, + 1404.0, + 1432.0, + 293.0, + 1432.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1427.0, + 1406.0, + 1427.0, + 1406.0, + 1462.0, + 293.0, + 1462.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1456.0, + 1407.0, + 1456.0, + 1407.0, + 1492.0, + 291.0, + 1492.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1486.0, + 1402.0, + 1486.0, + 1402.0, + 1521.0, + 294.0, + 1521.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1517.0, + 1405.0, + 1517.0, + 1405.0, + 1552.0, + 294.0, + 1552.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 668.0, + 1407.0, + 668.0, + 1407.0, + 704.0, + 292.0, + 704.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 701.0, + 1404.0, + 701.0, + 1404.0, + 734.0, + 295.0, + 734.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 727.0, + 670.0, + 727.0, + 670.0, + 768.0, + 294.0, + 768.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 750.0, + 727.0, + 1027.0, + 727.0, + 1027.0, + 768.0, + 750.0, + 768.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 844.0, + 1214.0, + 1404.0, + 1214.0, + 1404.0, + 1251.0, + 844.0, + 1251.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 845.0, + 1246.0, + 973.0, + 1246.0, + 973.0, + 1275.0, + 845.0, + 1275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1099.0, + 1246.0, + 1404.0, + 1246.0, + 1404.0, + 1275.0, + 1099.0, + 1275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 845.0, + 1275.0, + 1405.0, + 1275.0, + 1405.0, + 1305.0, + 845.0, + 1305.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 844.0, + 1302.0, + 1406.0, + 1302.0, + 1406.0, + 1334.0, + 844.0, + 1334.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 845.0, + 1330.0, + 1361.0, + 1330.0, + 1361.0, + 1359.0, + 845.0, + 1359.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 7, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 254, + 1404, + 254, + 1404, + 467, + 298, + 467 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 306, + 560, + 1407, + 560, + 1407, + 2006, + 306, + 2006 + ], + "score": 0.964 + }, + { + "category_id": 0, + "poly": [ + 300, + 199, + 575, + 199, + 575, + 236, + 300, + 236 + ], + "score": 0.913 + }, + { + "category_id": 0, + "poly": [ + 299, + 512, + 454, + 512, + 454, + 548, + 299, + 548 + ], + "score": 0.904 + }, + { + "category_id": 2, + "poly": [ + 840, + 2061, + 859, + 2061, + 859, + 2084, + 840, + 2084 + ], + "score": 0.769 + }, + { + "category_id": 15, + "poly": [ + 294.0, + 194.0, + 579.0, + 194.0, + 579.0, + 244.0, + 294.0, + 244.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 508.0, + 460.0, + 508.0, + 460.0, + 554.0, + 295.0, + 554.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2060.0, + 861.0, + 2060.0, + 861.0, + 2090.0, + 839.0, + 2090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 254.0, + 1401.0, + 254.0, + 1401.0, + 288.0, + 296.0, + 288.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 283.0, + 1406.0, + 283.0, + 1406.0, + 321.0, + 294.0, + 321.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 315.0, + 1402.0, + 315.0, + 1402.0, + 349.0, + 294.0, + 349.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 346.0, + 1406.0, + 346.0, + 1406.0, + 380.0, + 296.0, + 380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 375.0, + 1404.0, + 375.0, + 1404.0, + 409.0, + 296.0, + 409.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 405.0, + 1403.0, + 405.0, + 1403.0, + 441.0, + 293.0, + 441.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 435.0, + 542.0, + 435.0, + 542.0, + 469.0, + 295.0, + 469.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 305.0, + 565.0, + 1408.0, + 565.0, + 1408.0, + 610.0, + 305.0, + 610.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 597.0, + 1406.0, + 597.0, + 1406.0, + 637.0, + 352.0, + 637.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 628.0, + 742.0, + 628.0, + 742.0, + 666.0, + 351.0, + 666.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 308.0, + 682.0, + 1403.0, + 682.0, + 1403.0, + 723.0, + 308.0, + 723.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 715.0, + 977.0, + 715.0, + 977.0, + 751.0, + 351.0, + 751.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 307.0, + 768.0, + 1406.0, + 768.0, + 1406.0, + 808.0, + 307.0, + 808.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 349.0, + 798.0, + 1393.0, + 798.0, + 1393.0, + 840.0, + 349.0, + 840.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 305.0, + 851.0, + 1406.0, + 851.0, + 1406.0, + 895.0, + 305.0, + 895.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 882.0, + 1406.0, + 882.0, + 1406.0, + 924.0, + 351.0, + 924.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 914.0, + 1096.0, + 914.0, + 1096.0, + 954.0, + 352.0, + 954.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 305.0, + 967.0, + 1406.0, + 967.0, + 1406.0, + 1011.0, + 305.0, + 1011.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 1000.0, + 1406.0, + 1000.0, + 1406.0, + 1040.0, + 352.0, + 1040.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1029.0, + 1333.0, + 1029.0, + 1333.0, + 1074.0, + 351.0, + 1074.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 305.0, + 1082.0, + 1408.0, + 1082.0, + 1408.0, + 1128.0, + 305.0, + 1128.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 1114.0, + 834.0, + 1114.0, + 834.0, + 1154.0, + 352.0, + 1154.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 305.0, + 1167.0, + 1405.0, + 1167.0, + 1405.0, + 1215.0, + 305.0, + 1215.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 1201.0, + 940.0, + 1201.0, + 940.0, + 1241.0, + 352.0, + 1241.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 307.0, + 1256.0, + 1403.0, + 1256.0, + 1403.0, + 1296.0, + 307.0, + 1296.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 350.0, + 1288.0, + 700.0, + 1288.0, + 700.0, + 1326.0, + 350.0, + 1326.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 307.0, + 1339.0, + 1406.0, + 1339.0, + 1406.0, + 1383.0, + 307.0, + 1383.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1370.0, + 1406.0, + 1370.0, + 1406.0, + 1413.0, + 351.0, + 1413.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 1401.0, + 643.0, + 1401.0, + 643.0, + 1441.0, + 352.0, + 1441.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1454.0, + 1408.0, + 1454.0, + 1408.0, + 1500.0, + 297.0, + 1500.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 350.0, + 1488.0, + 642.0, + 1488.0, + 642.0, + 1524.0, + 350.0, + 1524.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 300.0, + 1542.0, + 1406.0, + 1542.0, + 1406.0, + 1583.0, + 300.0, + 1583.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1571.0, + 1175.0, + 1571.0, + 1175.0, + 1616.0, + 351.0, + 1616.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 302.0, + 1629.0, + 1405.0, + 1629.0, + 1405.0, + 1669.0, + 302.0, + 1669.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1655.0, + 1098.0, + 1655.0, + 1098.0, + 1700.0, + 351.0, + 1700.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 300.0, + 1711.0, + 1405.0, + 1711.0, + 1405.0, + 1752.0, + 300.0, + 1752.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 349.0, + 1745.0, + 740.0, + 1745.0, + 740.0, + 1780.0, + 349.0, + 1780.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1792.0, + 1410.0, + 1792.0, + 1410.0, + 1847.0, + 295.0, + 1847.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1826.0, + 1403.0, + 1826.0, + 1403.0, + 1872.0, + 351.0, + 1872.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1855.0, + 1405.0, + 1855.0, + 1405.0, + 1903.0, + 351.0, + 1903.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 356.0, + 1890.0, + 910.0, + 1890.0, + 910.0, + 1930.0, + 356.0, + 1930.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 1943.0, + 1406.0, + 1943.0, + 1406.0, + 1987.0, + 298.0, + 1987.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1976.0, + 1214.0, + 1976.0, + 1214.0, + 2016.0, + 351.0, + 2016.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 8, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 2, + "poly": [ + 836, + 2061, + 866, + 2061, + 866, + 2087, + 836, + 2087 + ], + "score": 0.837 + }, + { + "category_id": 1, + "poly": [ + 293, + 112, + 1409, + 112, + 1409, + 2011, + 293, + 2011 + ], + "score": 0.687 + }, + { + "category_id": 15, + "poly": [ + 832.0, + 2057.0, + 870.0, + 2057.0, + 870.0, + 2095.0, + 832.0, + 2095.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 204.0, + 1406.0, + 204.0, + 1406.0, + 243.0, + 294.0, + 243.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 346.0, + 231.0, + 745.0, + 231.0, + 745.0, + 274.0, + 346.0, + 274.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 287.0, + 1404.0, + 287.0, + 1404.0, + 329.0, + 292.0, + 329.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 320.0, + 1404.0, + 320.0, + 1404.0, + 360.0, + 353.0, + 360.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 372.0, + 1406.0, + 372.0, + 1406.0, + 418.0, + 292.0, + 418.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 406.0, + 984.0, + 406.0, + 984.0, + 445.0, + 353.0, + 445.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 462.0, + 1404.0, + 462.0, + 1404.0, + 503.0, + 292.0, + 503.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 493.0, + 1394.0, + 493.0, + 1394.0, + 533.0, + 353.0, + 533.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 549.0, + 1408.0, + 549.0, + 1408.0, + 589.0, + 296.0, + 589.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 583.0, + 1235.0, + 583.0, + 1235.0, + 616.0, + 355.0, + 616.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 635.0, + 1406.0, + 635.0, + 1406.0, + 674.0, + 294.0, + 674.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 666.0, + 1404.0, + 666.0, + 1404.0, + 705.0, + 353.0, + 705.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 697.0, + 1254.0, + 697.0, + 1254.0, + 735.0, + 351.0, + 735.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 751.0, + 1406.0, + 751.0, + 1406.0, + 793.0, + 292.0, + 793.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 782.0, + 1406.0, + 782.0, + 1406.0, + 822.0, + 353.0, + 822.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 814.0, + 1404.0, + 814.0, + 1404.0, + 853.0, + 355.0, + 853.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 845.0, + 1305.0, + 845.0, + 1305.0, + 885.0, + 353.0, + 885.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 899.0, + 1406.0, + 899.0, + 1406.0, + 939.0, + 296.0, + 939.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 930.0, + 1406.0, + 930.0, + 1406.0, + 972.0, + 351.0, + 972.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 357.0, + 964.0, + 910.0, + 964.0, + 910.0, + 997.0, + 357.0, + 997.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1016.0, + 1406.0, + 1016.0, + 1406.0, + 1055.0, + 296.0, + 1055.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1049.0, + 1406.0, + 1049.0, + 1406.0, + 1084.0, + 353.0, + 1084.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1078.0, + 606.0, + 1078.0, + 606.0, + 1113.0, + 353.0, + 1113.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1134.0, + 1404.0, + 1134.0, + 1404.0, + 1174.0, + 296.0, + 1174.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 348.0, + 1166.0, + 813.0, + 1166.0, + 813.0, + 1203.0, + 348.0, + 1203.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1216.0, + 1404.0, + 1216.0, + 1404.0, + 1264.0, + 294.0, + 1264.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1251.0, + 1199.0, + 1251.0, + 1199.0, + 1288.0, + 351.0, + 1288.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1305.0, + 1406.0, + 1305.0, + 1406.0, + 1347.0, + 291.0, + 1347.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 349.0, + 1336.0, + 1408.0, + 1336.0, + 1408.0, + 1382.0, + 349.0, + 1382.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 1368.0, + 429.0, + 1368.0, + 429.0, + 1405.0, + 355.0, + 1405.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1424.0, + 1406.0, + 1424.0, + 1406.0, + 1463.0, + 296.0, + 1463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1455.0, + 1189.0, + 1455.0, + 1189.0, + 1495.0, + 353.0, + 1495.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1507.0, + 1404.0, + 1507.0, + 1404.0, + 1555.0, + 289.0, + 1555.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1543.0, + 809.0, + 1543.0, + 809.0, + 1582.0, + 353.0, + 1582.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1599.0, + 1406.0, + 1599.0, + 1406.0, + 1638.0, + 294.0, + 1638.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1628.0, + 1147.0, + 1628.0, + 1147.0, + 1667.0, + 353.0, + 1667.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1684.0, + 1406.0, + 1684.0, + 1406.0, + 1724.0, + 294.0, + 1724.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1715.0, + 1406.0, + 1715.0, + 1406.0, + 1755.0, + 351.0, + 1755.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1744.0, + 1242.0, + 1744.0, + 1242.0, + 1784.0, + 353.0, + 1784.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1797.0, + 1408.0, + 1797.0, + 1408.0, + 1844.0, + 289.0, + 1844.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 1836.0, + 1328.0, + 1836.0, + 1328.0, + 1869.0, + 355.0, + 1869.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1888.0, + 1404.0, + 1888.0, + 1404.0, + 1928.0, + 294.0, + 1928.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1944.0, + 1406.0, + 1944.0, + 1406.0, + 1984.0, + 294.0, + 1984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 357.0, + 1978.0, + 663.0, + 1978.0, + 663.0, + 2011.0, + 357.0, + 2011.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 9, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 293, + 156, + 1410, + 156, + 1410, + 2023, + 293, + 2023 + ], + "score": 0.893 + }, + { + "category_id": 2, + "poly": [ + 835, + 2060, + 864, + 2060, + 864, + 2087, + 835, + 2087 + ], + "score": 0.835 + }, + { + "category_id": 15, + "poly": [ + 831.0, + 2058.0, + 869.0, + 2058.0, + 869.0, + 2097.0, + 831.0, + 2097.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 202.0, + 1404.0, + 202.0, + 1404.0, + 241.0, + 295.0, + 241.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 233.0, + 1406.0, + 233.0, + 1406.0, + 272.0, + 351.0, + 272.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 260.0, + 585.0, + 260.0, + 585.0, + 299.0, + 353.0, + 299.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 315.0, + 1406.0, + 315.0, + 1406.0, + 354.0, + 295.0, + 354.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 346.0, + 1046.0, + 346.0, + 1046.0, + 385.0, + 353.0, + 385.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 397.0, + 1408.0, + 397.0, + 1408.0, + 436.0, + 295.0, + 436.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 430.0, + 974.0, + 430.0, + 974.0, + 463.0, + 355.0, + 463.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 479.0, + 1408.0, + 479.0, + 1408.0, + 518.0, + 295.0, + 518.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 510.0, + 1216.0, + 510.0, + 1216.0, + 549.0, + 353.0, + 549.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 563.0, + 1404.0, + 563.0, + 1404.0, + 596.0, + 297.0, + 596.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 596.0, + 1138.0, + 596.0, + 1138.0, + 628.0, + 355.0, + 628.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 639.0, + 1406.0, + 639.0, + 1406.0, + 686.0, + 291.0, + 686.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 676.0, + 1338.0, + 676.0, + 1338.0, + 708.0, + 355.0, + 708.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 725.0, + 1406.0, + 725.0, + 1406.0, + 764.0, + 293.0, + 764.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 347.0, + 751.0, + 1410.0, + 751.0, + 1410.0, + 799.0, + 347.0, + 799.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 788.0, + 641.0, + 788.0, + 641.0, + 821.0, + 355.0, + 821.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 837.0, + 1404.0, + 837.0, + 1404.0, + 876.0, + 293.0, + 876.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 347.0, + 866.0, + 1360.0, + 866.0, + 1360.0, + 909.0, + 347.0, + 909.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 915.0, + 1408.0, + 915.0, + 1408.0, + 962.0, + 291.0, + 962.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 954.0, + 1402.0, + 954.0, + 1402.0, + 987.0, + 353.0, + 987.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 979.0, + 431.0, + 979.0, + 431.0, + 1020.0, + 351.0, + 1020.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1024.0, + 1410.0, + 1024.0, + 1410.0, + 1079.0, + 289.0, + 1079.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1063.0, + 1340.0, + 1063.0, + 1340.0, + 1102.0, + 353.0, + 1102.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1114.0, + 1406.0, + 1114.0, + 1406.0, + 1153.0, + 295.0, + 1153.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1145.0, + 980.0, + 1145.0, + 980.0, + 1184.0, + 353.0, + 1184.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1194.0, + 1408.0, + 1194.0, + 1408.0, + 1237.0, + 291.0, + 1237.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1227.0, + 1382.0, + 1227.0, + 1382.0, + 1266.0, + 351.0, + 1266.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1278.0, + 1406.0, + 1278.0, + 1406.0, + 1317.0, + 295.0, + 1317.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 347.0, + 1307.0, + 877.0, + 1307.0, + 877.0, + 1350.0, + 347.0, + 1350.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1362.0, + 1404.0, + 1362.0, + 1404.0, + 1401.0, + 293.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1391.0, + 809.0, + 1391.0, + 809.0, + 1430.0, + 353.0, + 1430.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1446.0, + 1404.0, + 1446.0, + 1404.0, + 1479.0, + 297.0, + 1479.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 1477.0, + 1404.0, + 1477.0, + 1404.0, + 1510.0, + 355.0, + 1510.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1501.0, + 1406.0, + 1501.0, + 1406.0, + 1544.0, + 351.0, + 1544.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1536.0, + 639.0, + 1536.0, + 639.0, + 1569.0, + 353.0, + 1569.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1585.0, + 1406.0, + 1585.0, + 1406.0, + 1624.0, + 295.0, + 1624.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1616.0, + 1042.0, + 1616.0, + 1042.0, + 1655.0, + 353.0, + 1655.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1667.0, + 1404.0, + 1667.0, + 1404.0, + 1706.0, + 295.0, + 1706.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 349.0, + 1696.0, + 739.0, + 1696.0, + 739.0, + 1739.0, + 349.0, + 1739.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1749.0, + 1404.0, + 1749.0, + 1404.0, + 1788.0, + 295.0, + 1788.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1780.0, + 894.0, + 1780.0, + 894.0, + 1819.0, + 351.0, + 1819.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1831.0, + 1408.0, + 1831.0, + 1408.0, + 1870.0, + 295.0, + 1870.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1864.0, + 1406.0, + 1864.0, + 1406.0, + 1903.0, + 353.0, + 1903.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 1893.0, + 595.0, + 1893.0, + 595.0, + 1932.0, + 355.0, + 1932.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1944.0, + 1406.0, + 1944.0, + 1406.0, + 1983.0, + 295.0, + 1983.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1975.0, + 1290.0, + 1975.0, + 1290.0, + 2014.0, + 353.0, + 2014.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 10, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 292, + 198, + 1409, + 198, + 1409, + 1366, + 292, + 1366 + ], + "score": 0.928 + }, + { + "category_id": 2, + "poly": [ + 836, + 2061, + 866, + 2061, + 866, + 2086, + 836, + 2086 + ], + "score": 0.809 + }, + { + "category_id": 2, + "poly": [ + 836, + 2061, + 865, + 2061, + 865, + 2086, + 836, + 2086 + ], + "score": 0.113 + }, + { + "category_id": 15, + "poly": [ + 832.0, + 2058.0, + 869.0, + 2058.0, + 869.0, + 2097.0, + 832.0, + 2097.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 832.0, + 2058.0, + 870.0, + 2058.0, + 870.0, + 2097.0, + 832.0, + 2097.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 201.0, + 1405.0, + 201.0, + 1405.0, + 243.0, + 294.0, + 243.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 231.0, + 1407.0, + 231.0, + 1407.0, + 276.0, + 351.0, + 276.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 263.0, + 895.0, + 263.0, + 895.0, + 304.0, + 353.0, + 304.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 316.0, + 1405.0, + 316.0, + 1405.0, + 357.0, + 294.0, + 357.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 347.0, + 988.0, + 347.0, + 988.0, + 384.0, + 355.0, + 384.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 396.0, + 1405.0, + 396.0, + 1405.0, + 437.0, + 293.0, + 437.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 431.0, + 1077.0, + 431.0, + 1077.0, + 468.0, + 352.0, + 468.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 480.0, + 1407.0, + 480.0, + 1407.0, + 520.0, + 294.0, + 520.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 513.0, + 1118.0, + 513.0, + 1118.0, + 550.0, + 353.0, + 550.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 564.0, + 1404.0, + 564.0, + 1404.0, + 601.0, + 294.0, + 601.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 594.0, + 1033.0, + 594.0, + 1033.0, + 631.0, + 352.0, + 631.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 645.0, + 1407.0, + 645.0, + 1407.0, + 690.0, + 293.0, + 690.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 679.0, + 1300.0, + 679.0, + 1300.0, + 716.0, + 355.0, + 716.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 730.0, + 1404.0, + 730.0, + 1404.0, + 767.0, + 296.0, + 767.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 761.0, + 949.0, + 761.0, + 949.0, + 799.0, + 352.0, + 799.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 810.0, + 1407.0, + 810.0, + 1407.0, + 853.0, + 293.0, + 853.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 844.0, + 1118.0, + 844.0, + 1118.0, + 881.0, + 353.0, + 881.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 896.0, + 1405.0, + 896.0, + 1405.0, + 933.0, + 296.0, + 933.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 922.0, + 1407.0, + 922.0, + 1407.0, + 967.0, + 352.0, + 967.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 959.0, + 635.0, + 959.0, + 635.0, + 993.0, + 353.0, + 993.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1009.0, + 1405.0, + 1009.0, + 1405.0, + 1046.0, + 296.0, + 1046.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1040.0, + 1405.0, + 1040.0, + 1405.0, + 1077.0, + 353.0, + 1077.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1069.0, + 428.0, + 1069.0, + 428.0, + 1104.0, + 353.0, + 1104.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1119.0, + 1405.0, + 1119.0, + 1405.0, + 1164.0, + 293.0, + 1164.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1151.0, + 1262.0, + 1151.0, + 1262.0, + 1190.0, + 351.0, + 1190.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1202.0, + 1407.0, + 1202.0, + 1407.0, + 1244.0, + 293.0, + 1244.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 1234.0, + 1290.0, + 1234.0, + 1290.0, + 1273.0, + 352.0, + 1273.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1285.0, + 1407.0, + 1285.0, + 1407.0, + 1330.0, + 293.0, + 1330.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1320.0, + 739.0, + 1320.0, + 739.0, + 1352.0, + 351.0, + 1352.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 11, + "width": 1700, + "height": 2200 + } + } +] \ No newline at end of file diff --git a/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF.md b/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF.md new file mode 100644 index 0000000000000000000000000000000000000000..2cb4b74de683c55101ff90c3a8910dd4e5da079c --- /dev/null +++ b/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF.md @@ -0,0 +1,163 @@ +# Learning to Walk in Minutes Using Massively Parallel Deep Reinforcement Learning + +Nikita Rudin ETH Zurich and NVIDIA rudinn@ethz.ch + +David Hoeller ETH Zurich and NVIDIA dhoeller@ethz.ch + +Philipp Reist NVIDIA preist@nvidia.com + +Marco Hutter ETH Zurich mahutter@ethz.com + +Abstract: In this work, we present and study a training set-up that achieves fast policy generation for real-world robotic tasks by using massive parallelism on a single workstation GPU. We analyze and discuss the impact of different training algorithm components in the massively parallel regime on the final policy performance and training times. In addition, we present a novel game-inspired curriculum that is well suited for training with thousands of simulated robots in parallel. We evaluate the approach by training the quadrupedal robot ANYmal to walk on challenging terrain. The parallel approach allows training policies for flat terrain in under four minutes, and in twenty minutes for uneven terrain. This represents a speedup of multiple orders of magnitude compared to previous work. Finally, we transfer the policies to the real robot to validate the approach. We open-source our training code to help accelerate further research in the field of learned legged locomotion: https://leggedrobotics.github.io/legged_gym/. + +Keywords: Reinforcement Learning, Legged Robots, Sim-to-real + +![](images/b6480bd30beb6d1b9762af85125a32c3c8067d4548699e684f5c674891d50b92.jpg) +Figure 1: Thousands of robots learning to walk in simulation. + +# 1 Introduction + +Deep reinforcement learning (DRL) is proving to be a powerful tool for robotics. Tasks such as legged locomotion [1], manipulation [2], and navigation [3], have been solved using these new tools, and research continues to keep adding more and more challenging tasks to the list. The amount of data required to train a policy increases with the task complexity. For this reason, most work focuses on training in simulation before transferring to real robots. We have reached a point where multiple days or even weeks are needed to fully train an agent with current simulators. For example, OpenAI’s block reorientation task was trained for up to 14 days and their Rubik’s cube solving policy took several months to train [4]. The problem is exacerbated by the fact that deep reinforcement learning requires hyper-parameter tuning to obtain a suitable solution which requires sequentially rerunning time-consuming training. Reducing training times using massively parallel approaches such as presented here can therefore help improve the quality and time-to-deployment of DRL policies, as a training setup can be iterated on more often in the same time frame. + +In this paper, we examine the effects of massive parallelism for on-policy DRL algorithms and present considerations in how the standard RL formulation and the most commonly used hyperparameters should be adapted to learn efficiently in the highly parallel regime. Additionally, we present a novel game-inspired curriculum which automatically adapts the task difficulty to the performance of the policy. The proposed curriculum architecture is straightforward to implement, does not require tuning, and is well suited for the massively parallel regime. Common robotic simulators such as Mujoco [5], Bullet [6], or Raisim [7] feature efficient multi-body dynamics implementations. However, they have been developed to run on CPUs with only a reduced amount of parallelism. In this work, we use NVIDIA’s Isaac Gym simulation environment [8], which runs both the simulation and training on the GPU and is capable of simulating thousands of robots in parallel. + +The massively parallel training regime has been explored before [4, 9] in the context of distributed systems with a network of thousands of CPUs each running a separate instance of the simulation. The parallelization was achieved by averaging the gradients between the different workers without reducing the number of samples provided by each agent. This results in large batch sizes of millions of samples for each policy update which improves the learning dynamics, but does not optimize the overall training time. In parallel, recent works have aimed to increase the simulation throughput and reduce training times of standard DRL benchmark tasks. A framework combining parallel simulation with multi-GPU training [10] was proposed to achieve fast training using hundreds of parallel agents. In the context of visual navigation, large batch simulation has been used to increase the training throughput [11]. Furthermore, GPU accelerated physics simulation has been shown to significantly improve the training time of the Humanoid running task [12]. A differentiable simulator running on Google’s TPUs has also been shown to greatly accelerate the training of multiple tasks [13]. We build upon [10, 12] by pushing the parallelization further, optimizing the training algorithm, and applying the approach to a challenging real-world robotics task. + +Perceptive and dynamic locomotion for legged robots in unstructured environments is a demanding task that, until recently, had only been partially demonstrated with complex model-based approaches [14, 15]. Learning-based approaches are emerging as a promising alternative. For quadrupeds, DRL has been used to train blind policies robust to highly uneven ground [16] (12 hours of training). Perceptive locomotion over challenging terrain has been achieved by combining learning with optimal control techniques [17, 18] (82 and 88 hours of training) and recently, a fully learned approach has shown great robustness in this setting [19] (120 hours of training). Similarly, bipedal robots have also been trained to walk blindly on stairs [20] (training time not reported). With our approach we can train a perceptive policy in under 20 minutes on a single GPU, with the complexity of simto-real transfer to the hardware, which increases the performance and robustness requirements and provides clear validation of the overall approach. Training such behaviors in minutes opens up new exciting possibilities ranging from automatic tuning to customized training using scans of particular environments. + +# 2 Massively Parallel Reinforcement Learning + +Current (on-policy) reinforcement learning algorithms are divided into two parts: data collection and policy update. The policy update, which corresponds to back-propagation for neural networks, is easily performed in parallel on the GPU. Parallelizing data collection is not as straightforward. Each step consists of policy inference, simulation, reward, and observation calculation. Current popular pipelines have the simulation and reward/observation calculation computed on the CPU, making the GPU unsuitable for policy inference because of communication bottle-necks. Data transfer over PCIe is known to be the weakest link of GPU acceleration, and can be as much as 50 times slower than the GPU processing time alone [21]. Furthermore, with CPU data collection, a large amount of data must be sent to the GPU for each policy update, slowing down the overall process. Limited parallelization can be achieved by using multiple CPU cores and spawning many processes, each running the simulation for one agent. However, the number of agents is quickly limited by the number of cores and other issues such as memory usage. We explore the potential of massive parallelism with Isaac Gym’s end-to-end data collection and policy updates on the GPU, significantly reducing data copying and improving simulation throughput. + +# 2.1 Simulation Throughput + +The main factor affecting the total simulation throughput is the number of robots simulated in parallel. Modern GPUs can handle tens of thousands of parallel instructions. Similarly, IsaacGym’s PhysX engine can process thousands of robots in a single simulation and all other computations of our pipeline are vectorized to scale favorably with the number of robots. Using a single simulation with thousands of robots presents some new challenges. For example, a single common terrain mesh must be used, and it cannot be easily changed at each reset. We circumvent this problem by creating the whole mesh with all terrain types and levels tiled side by side. We change the terrain level of the robots by physically moving them on the mesh. In supplementary material, we show the computational time of different parts of the pipeline, examine how these times scale with the number of robots, and provide other techniques to optimize the simulation throughput. + +# 2.2 DRL Algorithm + +We build upon a custom implementation of the Proximal Policy Optimization (PPO) algorithm [22]. Our implementation is designed to perform every operation and store all the data on the GPU. In order to efficiently learn from thousands of robots in parallel, we perform some essential modifications to the algorithm and change some of the commonly used hyper-parameter values. + +# 2.2.1 Hyper-Parameters Modification + +In an on-policy algorithm such as PPO, a fixed policy collects a selected amount of data before doing the next policy update. This batch size, $B$ , is a crucial hyper-parameter for successful learning. With too little data, the gradients will be too noisy, and the algorithm will not learn effectively. With too much data, the samples become repetitive, and the algorithm cannot extract more information from them. These samples represent wasted simulation time and slow down the overall training. We have $B = n _ { r o b o t s } n _ { s t e p s }$ , where $ { n _ { s t e p s } }$ is the number of steps each robot takes per policy update and $n _ { r o b o t s }$ the number of robots simulated in parallel. Since we increase $n _ { r o b o t s }$ by a few orders of magnitude, we must choose a small $n _ { s t e p s }$ to keep $B$ reasonable and hence optimize training times, which is a setting that has not been extensively explored for on-policy reinforcement learning algorithms. It turns out that we can not choose $n _ { s t e p s }$ to be arbitrarily low. The algorithm requires trajectories with coherent temporal information to learn effectively. Even though, in theory, information of single steps could be used, we find that the algorithm fails to converge to the optimal solution below a certain threshold. This can be explained by the fact that we use Generalized Advantage Estimation (GAE) [23], which requires rewards from multiple time steps to be effective. For our task, we find that the algorithm struggles when we provide fewer than 25 consecutive steps, corresponding to $0 . 5 \mathrm { s }$ of simulated time. It is important to distinguish $ { n _ { s t e p s } }$ from the maximum episode length leading to a time-out and a reset, which we define as $2 0 \mathrm { s }$ . The environments are reset when they reach this maximum length and not after each iteration, meaning that a single episode can cover many policy updates. This limits the total number of robots training in parallel, and consequently, prohibits us from using the full computational capabilities of the GPU. + +The mini-batch size represents the size of the chunks in which the batch size is split to perform backpropagation. We find that having mini-batch sizes much larger than what is usually considered best practice is beneficial for our massively parallel use case. We use mini-batches of tens of thousands of samples and observe that it stabilizes the learning process without increasing the total training time. + +# 2.2.2 Reset Handling + +During training, the robots must be reset whenever they fall, and also after some time to keep them exploring new trajectories and terrains. The PPO algorithm includes a critic predicting an infinite horizon sum of future discounted rewards. Resets break this infinite horizon assumption and can lead to inferior critic performance if not handled carefully. Resets based on failure or reaching a goal are not a problem because the critic can predict them. However, a reset based on a time out can not be predicted (we do not provide episode time in the observations). The solution is to distinguish the two termination modes and augment the reward with the expected infinite sum of discounted future rewards in a time-out case. In other words, we bootstrap the target of the critic with its own prediction. This solution has been discussed in [24], but interestingly, this distinction is not part of the widely used Gym environment interface [25] and is ignored by popular implementations such as Stable-Baselines $[ 2 6 ] ^ { 1 }$ . After investigating multiple implementations, we conclude that this important detail is often avoided by assuming that the environments either never time out or only on the very last step of a batch collection. In our case, with few robot steps per batch, we can not make such an assumption since a meaningful episode length covers the collection of many batches. We modify the standard Gym interface to detect time-outs and implement the bootstrapping solution. In supplementary material, we show the effect of this solution on the total reward as well as the critic loss. + +![](images/9b48083b251353c43d2e71ff8968b79f99600e9e05461c3dd723d12ace8ceddd.jpg) +Figure 2: Terrain types used for training and testing in simulation. (a) Randomly rough terrain with variations of $0 . 1 \mathrm { m }$ . (b) Sloped terrain with an inclination of $2 5 \mathrm { d e g }$ . (c) Stairs with a width of $0 . 3 \mathrm { m }$ and height of $\mathrm { 0 . 2 m }$ . (d) Randomized, discrete obstacles with heights of up to $\pm 0 . 2 \mathrm { m }$ . + +# 3 Task Description + +A quadruped robot must learn to walk across challenging terrain, including uneven surfaces, slopes, stairs, and obstacles, while following base-heading and linear-velocity commands. We conduct most of the simulation and real-world deployment experiments on the ANYbotics ANYmal C robot. However, in simulation, we demonstrate the broader applicability of the approach by additionally training policies for ANYmal B, ANYmal C with an attached arm, and the Unitree A1 robots. + +# 3.1 Game-Inspired Curriculum + +The terrains are selected to be representative of real-world environments. We create five types of procedurally generated terrains presented in Fig. 2: flat, sloped, randomly rough, discrete obstacles, and stairs. The terrains are tiled squares with $8 \mathrm { m }$ sides. The robots start at the center of the terrain and are given randomized heading and velocity commands (kept constant for the duration of an episode) pushing them to walk across the terrain. Slopes and stairs are organized in pyramids to allow traversability in all directions. + +Previous works have shown the benefits of using an automated curriculum of task difficulty to learn complex locomotion policies [28, 29, 16]. Similarly, we find that it is essential to first train the policy on less challenging terrain before progressively increasing the complexity. We adopt a solution inspired by [16], but replace the particle filter approach with a new game-inspired automatic curriculum. All robots are assigned a terrain type and a level that represents the difficulty of that terrain. For stairs and randomized obstacles, we gradually increase the step height from $5 \mathrm { c m }$ to $2 0 \mathrm { c m }$ . Sloped terrain inclination is increased from 0 deg to 25 deg. If a robot manages to walk past the borders of its terrain, its level is increased, and at the next reset, it will start on more difficult terrain. However, if at the end of an episode it moved by less than half of the distance required by its target velocity, its level is reduced again. Robots solving the highest level are looped back to a randomly selected level to increase the diversity and avoid catastrophic forgetting. This approach has the advantage of training the robots at a level of difficulty tailored to their performance without requiring any external tuning. It adapts the difficulty level for each terrain type individually and provides us with visual and quantitative feedback on the progress of the training. When the robots have reached the final level and are evenly spread across all terrains due to looping back, we can conclude they have fully learned to solve the task. + +![](images/ecaf8198256af450c96e9b482c8d3d3a06909d2addf9a2530c761fb4193fd419.jpg) +Figure 3: 4000 robots progressing through the terrains with automatic curriculum, after 500 (top) and 1000 (bottom) policy updates. The robots start the training session on the first row (closest to the camera) and progressively reach harder terrains. + +The proposed curriculum structure is well suited for the massively parallel regime. With thousands of robots we can directly use their current progress in the curriculum as the distribution of the policy’s performance, and do not need learn it with a generator network [30]. Furthermore, our method doesn’t require tuning and is straightforward to implement in a parallel manner with nearzero processing cost. We remove the computational overhead of re-sampling and re-generating new terrains needed for the particle filter approach. + +Fig. 3 shows robots progressing through the terrains at two different stages of the training process. On complex terrain types, the robots require more training iterations to reach the highest levels. The distribution of robots after 500 iterations shows that while the policy is able to cross sloped terrains and to go down stairs, climbing stairs and traversing obstacles requires more training iterations. However, after 1000 iterations, the robots have reached the most challenging level for all terrain types and are spread across the map. We train for a total for 1500 iterations to let the policy converge to its highest performance. + +# 3.2 Observations, Actions, and Rewards + +The policy receives proprioceptive measurements of the robot as well as terrain information around the robot’s base. The observations are composed of: base linear and angular velocities, measurement of the gravity vector, joint positions and velocities, the previous actions selected by the policy, and finally, 108 measurements of the terrain sampled from a grid around the robot’s base. Each measurement is the distance from the terrain surface to the robot’s base height. + +The total reward is a weighted sum of nine terms, detailed in supplementary material. The main terms encourage the robot to follow the commanded velocities while avoiding undesired base velocities along other axes. In order to create a smoother, more natural motion, we also penalize joint torques, joint accelerations, joint target changes, and collisions. Contacts with the knees, shanks or between the feet and a vertical surface are considered collisions, while contacts with the base are considered crashes and lead to resets. Finally, we add an additional reward term encouraging the robot to take longer steps, which results in a more visually appealing behavior. We train a single policy with the same rewards for all terrains. + +The actions are interpreted as desired joint positions sent to the motors. There, a PD controller produces motor torques. In contrast to other works [16, 20], neither the reward function nor the action space has any gait-dependent elements. + +# 3.3 Sim-to-Real Additions + +In order to make the trained policies amenable for sim-to-real transfer, we randomize the friction of the ground, add noise to the observations and randomly push the robots during the episode to teach them a more stable stance. Each robot has a friction coefficient sampled uniformly in [0.5, 1.25]. The pushes happen every $1 0 \mathrm { s }$ . The robots’ base is accelerated up to $\pm 1 \mathrm { m } / \mathrm { s }$ in both $\mathbf { X }$ and y directions. The amount of noise is based on real data measured on the robot and is detailed in supplementary material. + +The ANYmal robot uses series elastic actuators with fairly complex dynamics, which are hard to model in simulation. For this reason and following the methodology of previous work [1], we use a neural network to compute torques from joint position commands. However, we simplify the inputs of the model. Instead of concatenating past measurements at fixed time steps and sending all of that information to a standard feed-forward network, we only provide the current measurements to an LSTM network. A potential drawback of this set-up is that the policy does not have the temporal information of the actuators as in previous work. We have experimented with various ways of providing that information through memory mechanisms for the policy but found that it does not improve the final performance. + +# 4 Results + +# 4.1 Effects of Massive Parallelism + +In this section, we study the effects of the number of parallel robots on the final performance of the policy. In order to use the total reward as a single representative metric, we have to remove the curriculum, otherwise a more performant policy sees its task difficulty increase and consequently a decrease in the total reward. As such, we simplify the task by reducing the maximum step size of stairs and obstacles and directly train robots on the full range of difficulties. + +We begin by setting a baseline with $n _ { r o b o t s } = 2 0 0 0 0$ and $n _ { s t e p s } = 5 0$ , resulting in a batch size of 1M samples. Using this very large batch size results in the best policy but at the cost of a relatively long training time. + +We then conduct experiments in which we increase the number of robots while keeping the batch size constant. As a result, the number of steps each robot takes per policy update decreases. In this case, the training time decreases with a higher number of robots, but the policy performance drops if that number is too high. We start from 128 robots corresponding to the level of parallelization of previous CPU implementations and increase that number up to 16384, which is close to the maximum amount of robots we could simulate on rough terrain with Isaac Gym running on a single workstation GPU. + +In Fig. 4, we compare these results with the baseline, which allows us to select the most favorable trade-off between policy performance and training time. We see two interesting effects at play. First, when the number of robots is too high, the performance drops sharply, which can be explained by the time horizon of each robot becoming too small. As expected, with larger batch sizes, the overall reward is higher, and the time horizon effect is shifted, meaning that we can use more robots before seeing the drop. On the other hand, below a certain threshold, we see a slow decrease in performance with fewer robots. We believe this is explained by the fact that the samples are very similar with many steps per robot because of the relatively small time steps between them. This means that for the same amount of samples, there is less diversity in the data. In other words, with a low number of robots, we are further from the standard assumption that the samples are independent and identically distributed, which seems to have a noticeable effect on the training process. In terms of training time, we see a nearly linear scaling up to 4000 robots, after which simulation throughput gains slow down. As such, we can conclude that increasing the number of robots is beneficial for both final performance and training time, but there is an upper limit on this number after which an on-policy algorithm cannot learn effectively. Increasing the batch size to values much larger than what is typically used in similar works seems highly beneficial. Unfortunately, it also scales the training time so it is a trade-off that must be balanced. From the third plot we can conclude that using 2048 to 4096 robots with a batch size of $\approx 1 0 0 k$ or $\approx 2 0 0 k$ provides the best trade-off for this specific task. + +![](images/b511ee0c2aca987f5fa8e39a0d14f3ee54098cf431491e0a271f167d3bdcff6f.jpg) +Figure 4: (a) Average and standard deviation (over 5 runs) of the total reward of an episode after 1500 policy updates for different number of robots and 3 different batch sizes. The ideal case of a batch size of 1M samples with 20000 robots is shown in red. (b) Total training time for the same experiments. (c) Reward dependency on total training time. Colors represent the number of robots, while shapes show the batch size (circles: 49152, crosses: 98304, triangles: 196608). Points in the upper left part of the graph (highlighted in green) represent the most desirable configuration. + +![](images/f15e005bc1e3d3db64d82f1f7796d15a227032608677590845862fc30d3d7da3.jpg) +Figure 5: Success rate of the tested policy on increasing terrain complexities. Robots start in the center of the terrain and are given a forward velocity command of $0 . 7 5 \mathrm { m } / \mathrm { s }$ , and a side velocity command randomized within $[ - 0 . 1 , 0 . 1 ] \mathrm { m } / \mathrm { s }$ . (a) Success rate for climbing stairs, descending stairs and traversing discrete obstacles. (b) Success rate for climbing and descending sloped terrains. + +![](images/4eab7c74f6cac548026b3ff28e7f10ab6832f65e03b74ce577a9f1ea90202882.jpg) +Figure 6: ANYmal C with a fixed arm, ANYmal B, A1 and Cassie in simulation. + +# 4.2 Simulation + +For our simulation and deployment experiments, we use a policy trained with 4096 robots and a batch size of 98304, which we train for 1500 policy updates in under 20 minutes2. We begin by measuring the performance of our trained policy in simulation. To that end, we perform robustness and traversability tests. For each terrain type, we command the robots to traverse the representative difficulty of the terrain at high forward velocity and measure the success rate. A success is defined as managing to cross the terrain while avoiding any contacts on the robot’s base. Fig. 5 shows the results for the different terrains. For stairs, we see a nearly $1 0 0 \%$ success rate for steps up to $\mathrm { 0 . 2 m }$ , which is the hardest stair difficulty we train on and close to the kinematic limits of our robot. Randomized obstacles seem to be more demanding, with the success rate decreasing steadily. We must note that in this case, the largest step is double the reported height since neighboring obstacles can have positive and negative heights. In the case of slopes, we can observe that after $2 5 \mathrm { d e g }$ the robots are not able to climb anymore but still learn to slide down with a moderate success rate. + +Given our relatively simple rewards and action space, the policy is free to adopt any gait and behavior. Interestingly, it always converges to a trotting gait, but there are often artifacts in the behavior, such as a dragging leg or unreasonably high or low base heights. After tuning of the reward weights, we can obtain a policy that respects all our constraints and can be transferred to the physical robot. + +To verify the generalizability of the approach, we train policies for multiple robots with the same set-up. We use the ANYmal C robot with a fixed robotic arm, which adds about $2 0 \%$ of additional weight, and the ANYmal B robot, which has comparable dimensions but modified kinematic and dynamic properties. In these two cases, we can retrain a policy without any modifications to the rewards or algorithm hyper-parameters and obtain a very similar performance. Next, we use the Unitree A1 robot, which has smaller dimensions, four times lower weight, and a different leg configuration. In this case, we remove the actuator model of the ANYdrive motors, reduce PD gains and the torque penalties, and change the default joint configurations. We can train a dynamic policy that learns to solve the same terrains even with the reduced size of the robot. Finally, we apply our approach to Agility Robotics’ bipedal robot Cassie. We find that an additional reward encouraging standing on a single foot is necessary to achieve a walking gait. With this addition, we are able to train the robot on the same terrains as its quadrupedal counterparts. Fig. 6 shows the different robots. + +![](images/f0dd8a9ddc78c32773c604659ba9f56cbd659b68c879ccbb91f00fe6ccb27b59.jpg) +Figure 7: Locomotion policy, trained in under $2 0 \mathrm { { m i n } }$ , deployed on the physical robot. + +# 4.3 Sim-to-real Transfer + +On the physical robot, our policy is fixed. We compute the observations from the robot’s sensors, feed them to the policy, and directly send the produced actions as target joint positions to the motors. We do not apply any additional filtering or constraint satisfaction checks. The terrain height measurements are queried from an elevation map that the robot is building from Lidar scans. + +Unfortunately, this height map is far from perfect, which results in a decrease in robustness between simulation and reality. We observe that these issues mainly occur at high velocities and therefore reduce the maximum linear velocity commands to $0 . 6 \mathrm { m } / \mathrm { s }$ for policies deployed on the hardware. The robot can walk up and down stairs and handles obstacles in a dynamic manner. We show samples of these experiments in Fig. 7 and in the supplementary video. To overcome issues with imperfect terrain mapping or state estimation drift, the authors of [19] implemented a teacher-student set-up, which provided outstanding robustness even in adverse conditions. As part of future work, we plan to merge the two approaches. + +# 5 Conclusion + +In this work, we demonstrated that a complex real-world robotics task can be trained in minutes with an on-policy deep reinforcement learning algorithm. Using an end-to-end GPU pipeline with thousands of robots simulated in parallel, combined with our proposed curriculum structure, we showed that the training time can be reduced by multiple orders of magnitude compared to previous work. We discussed multiple modifications to the learning algorithm and the standard hyper-parameters required to use the massively parallel regime effectively. Using our fast training pipeline, we performed many training runs, simplified the set-up, and kept only essential components. We showed that the task can be solved using simple observation and action spaces as well as relatively straightforward rewards without encouraging particular gaits or providing motion primitives. + +The purpose of this work is not to obtain the absolute best-performing policy with the highest robustness. For that use case, many other techniques can be incorporated into the pipeline. We aim to show that a policy can be trained in record time with our set-up while still being usable on the real hardware. We wish to shift other researchers’ perspective on the required training time for a real-world application, and hope that our work can serve as a reference for future research. We expect many other tasks to benefit from the massively parallel regime. By reducing the training time of these future robotic tasks, we can greatly accelerate the developments in this field. + +# Acknowledgments + +We would like to thank Mayank Mittal, Joonho Lee, Takahiro Miki, and Peter Werner for their valuable suggestions and help with hardware experiments as well as the Isaac Gym and PhysX teams for their continuous support. + +# References + +[1] J. Hwangbo, J. Lee, A. Dosovitskiy, D. Bellicoso, V. Tsounis, V. Koltun, and M. Hutter. Learning agile and dynamic motor skills for legged robots. Science Robotics, 4(26), 2019. +[2] S. Gu, E. Holly, T. Lillicrap, and S. Levine. Deep reinforcement learning for robotic manipulation with asynchronous off-policy updates. In IEEE International Conference on Robotics and Automation (ICRA), May 2017. +[3] G. Kahn, A. Villaflor, B. Ding, P. Abbeel, and S. Levine. Self-supervised deep reinforcement learning with generalized computation graphs for robot navigation. In IEEE International Conference on Robotics and Automation (ICRA), 2018. +[4] OpenAI, I. Akkaya, M. Andrychowicz, M. Chociej, M. Litwin, B. McGrew, A. Petron, A. Paino, M. Plappert, G. Powell, R. Ribas, J. Schneider, N. Tezak, J. Tworek, P. Welinder, L. Weng, Q. Yuan, W. Zaremba, and L. Zhang. Solving rubik’s cube with a robot hand, 2019. +[5] E. Todorov, T. Erez, and Y. Tassa. Mujoco: A physics engine for model-based control. In IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2012. +[6] E. Coumans and Y. Bai. Pybullet, a python module for physics simulation for games, robotics and machine learning. http://pybullet.org, 2016–2021. +[7] J. Hwangbo, J. Lee, and M. Hutter. Per-contact iteration method for solving contact dynamics. IEEE Robotics and Automation Letters, 3(2), 2018. URL www.raisim.com. +[8] V. Makoviychuk, L. Wawrzyniak, Y. Guo, M. Lu, K. Storey, M. Macklin, D. Hoeller, N. Rudin, A. Allshire, A. Handa, and G. State. Isaac gym: High performance GPU based physics simulation for robot learning. In Conference on Neural Information Processing Systems (NeurIPS) Datasets and Benchmarks Track, 2021. +[9] N. Heess, D. TB, S. Sriram, J. Lemmon, J. Merel, G. Wayne, Y. Tassa, T. Erez, Z. Wang, S. M. A. Eslami, M. A. Riedmiller, and D. Silver. Emergence of locomotion behaviours in rich environments. CoRR, abs/1707.02286, 2017. +[10] A. Stooke and P. Abbeel. Accelerated methods for deep reinforcement learning. CoRR, abs/1803.02811, 2018. +[11] B. Shacklett, E. Wijmans, A. Petrenko, M. Savva, D. Batra, V. Koltun, and K. Fatahalian. Large batch simulation for deep reinforcement learning. In International Conference on Learning Representations (ICLR), 2021. +[12] J. Liang, V. Makoviychuk, A. Handa, N. Chentanez, M. Macklin, and D. Fox. Gpu-accelerated robotic simulation for distributed reinforcement learning. In Conference on Robot Learning (CoRL), 2018. +[13] C. D. Freeman, E. Frey, A. Raichuk, S. Girgin, I. Mordatch, and O. Bachem. Brax - a differentiable physics engine for large scale rigid body simulation. In 35th Conference on Neural Information Processing Systems (NeurIPS) Datasets and Benchmarks Track, 2021. +[14] A. Bouman, M. F. Ginting, N. Alatur, M. Palieri, D. D. Fan, T. Touma, T. Pailevanian, S.- K. Kim, K. Otsu, J. Burdick, and A.-a. Agha-Mohammadi. Autonomous spot: Long-range autonomous exploration of extreme environments with legged locomotion. In IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2020. +[15] C. Gehring, P. Fankhauser, L. Isler, R. Diethelm, S. Bachmann, M. Potz, L. Gerstenberg, and M. Hutter. Anymal in the field: Solving industrial inspection of an offshore hvdc platform with a quadrupedal robot. In Field and Service Robotics, 2021. +[16] J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomotion over challenging terrain. Science Robotics, 5(47), 2020. +[17] V. Tsounis, M. Alge, J. Lee, F. Farshidian, and M. Hutter. Deepgait: Planning and control of quadrupedal gaits using deep reinforcement learning. IEEE Robotics and Automation Letters, PP, 03 2020. +[18] S. Gangapurwala, M. Geisert, R. Orsolino, M. Fallon, and I. Havoutis. Real-time trajectory adaptation for quadrupedal locomotion using deep reinforcement learning. In IEEE International Conference on Robotics and Automation (ICRA), 2021. +[19] T. Miki, J. Lee, L. Wellhausen, V. Koltun, and M. Hutter. Wild anymal: Robust zero-shot perceptive locomotion. Submitted to Science Robotics, 2021. +[20] J. Siekmann, K. Green, J. Warila, A. Fern, and J. W. Hurst. Blind bipedal stair traversal via sim-to-real reinforcement learning. CoRR, abs/2105.08328, 2021. +[21] C. Gregg and K. Hazelwood. Where is the data? why you cannot debate cpu vs. gpu performance without the answer. In IEEE International Symposium on Performance Analysis of Systems and Software (ISPASS), 2011. +[22] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms. CoRR, abs/1707.06347, 2017. +[23] J. Schulman, P. Moritz, S. Levine, M. Jordan, and P. Abbeel. High-dimensional continuous control using generalized advantage estimation. In Proceedings of the International Conference on Learning Representations (ICLR), 2016. +[24] F. Pardo, A. Tavakoli, V. Levdik, and P. Kormushev. Time limits in reinforcement learning. CoRR, abs/1712.00378, 2017. +[25] G. Brockman, V. Cheung, L. Pettersson, J. Schneider, J. Schulman, J. Tang, and W. Zaremba. Openai gym, 2016. +[26] A. Hill, A. Raffin, M. Ernestus, A. Gleave, A. Kanervisto, R. Traore, P. Dhariwal, C. Hesse, O. Klimov, A. Nichol, M. Plappert, A. Radford, J. Schulman, S. Sidor, and Y. Wu. Stable baselines. https://github.com/hill-a/stable-baselines, 2018. +[27] J. Achiam. Spinning up in deep reinforcement learning, 2018. URL https://spinningup. openai.com/en/latest/. +[28] R. Wang, J. Lehman, J. Clune, and K. O. Stanley. Paired open-ended trailblazer (POET): endlessly generating increasingly complex and diverse learning environments and their solutions. CoRR, abs/1901.01753, 2019. +[29] Z. Xie, H. Y. Ling, N. H. Kim, and M. van de Panne. Allsteps: Curriculum-driven learning of stepping stone skills. Proceedings of ACM SIGGRAPH / Eurographics Symposium on Computer Animation, 2020. +[30] C. Florensa, D. Held, X. Geng, and P. Abbeel. Automatic goal generation for reinforcement learning agents. In Proceedings of the 35th International Conference on Machine Learning (ICML), volume 80 of Proceedings of Machine Learning Research, 2018. \ No newline at end of file diff --git a/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_content_list.json b/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4ef860b83732c98ab3c76ae59a0483d0a6155290 --- /dev/null +++ b/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_content_list.json @@ -0,0 +1,829 @@ +[ + { + "type": "text", + "text": "Learning to Walk in Minutes Using Massively Parallel Deep Reinforcement Learning ", + "text_level": 1, + "bbox": [ + 223, + 102, + 774, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Nikita Rudin ETH Zurich and NVIDIA rudinn@ethz.ch ", + "bbox": [ + 209, + 178, + 380, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "David Hoeller ETH Zurich and NVIDIA dhoeller@ethz.ch ", + "bbox": [ + 424, + 178, + 596, + 218 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Philipp Reist NVIDIA preist@nvidia.com ", + "bbox": [ + 637, + 178, + 787, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Marco Hutter ETH Zurich mahutter@ethz.com ", + "bbox": [ + 423, + 241, + 573, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract: In this work, we present and study a training set-up that achieves fast policy generation for real-world robotic tasks by using massive parallelism on a single workstation GPU. We analyze and discuss the impact of different training algorithm components in the massively parallel regime on the final policy performance and training times. In addition, we present a novel game-inspired curriculum that is well suited for training with thousands of simulated robots in parallel. We evaluate the approach by training the quadrupedal robot ANYmal to walk on challenging terrain. The parallel approach allows training policies for flat terrain in under four minutes, and in twenty minutes for uneven terrain. This represents a speedup of multiple orders of magnitude compared to previous work. Finally, we transfer the policies to the real robot to validate the approach. We open-source our training code to help accelerate further research in the field of learned legged locomotion: https://leggedrobotics.github.io/legged_gym/. ", + "bbox": [ + 232, + 325, + 766, + 506 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: Reinforcement Learning, Legged Robots, Sim-to-real ", + "bbox": [ + 233, + 517, + 665, + 532 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b6480bd30beb6d1b9762af85125a32c3c8067d4548699e684f5c674891d50b92.jpg", + "image_caption": [ + "Figure 1: Thousands of robots learning to walk in simulation. " + ], + "image_footnote": [], + "bbox": [ + 191, + 550, + 808, + 728 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction ", + "text_level": 1, + "bbox": [ + 174, + 760, + 312, + 776 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Deep reinforcement learning (DRL) is proving to be a powerful tool for robotics. Tasks such as legged locomotion [1], manipulation [2], and navigation [3], have been solved using these new tools, and research continues to keep adding more and more challenging tasks to the list. The amount of data required to train a policy increases with the task complexity. For this reason, most work focuses on training in simulation before transferring to real robots. We have reached a point where multiple days or even weeks are needed to fully train an agent with current simulators. For example, OpenAI’s block reorientation task was trained for up to 14 days and their Rubik’s cube solving policy took several months to train [4]. The problem is exacerbated by the fact that deep reinforcement learning requires hyper-parameter tuning to obtain a suitable solution which requires sequentially rerunning time-consuming training. Reducing training times using massively parallel approaches such as presented here can therefore help improve the quality and time-to-deployment of DRL policies, as a training setup can be iterated on more often in the same time frame. ", + "bbox": [ + 174, + 791, + 825, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "", + "bbox": [ + 174, + 92, + 823, + 146 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we examine the effects of massive parallelism for on-policy DRL algorithms and present considerations in how the standard RL formulation and the most commonly used hyperparameters should be adapted to learn efficiently in the highly parallel regime. Additionally, we present a novel game-inspired curriculum which automatically adapts the task difficulty to the performance of the policy. The proposed curriculum architecture is straightforward to implement, does not require tuning, and is well suited for the massively parallel regime. Common robotic simulators such as Mujoco [5], Bullet [6], or Raisim [7] feature efficient multi-body dynamics implementations. However, they have been developed to run on CPUs with only a reduced amount of parallelism. In this work, we use NVIDIA’s Isaac Gym simulation environment [8], which runs both the simulation and training on the GPU and is capable of simulating thousands of robots in parallel. ", + "bbox": [ + 174, + 154, + 825, + 291 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The massively parallel training regime has been explored before [4, 9] in the context of distributed systems with a network of thousands of CPUs each running a separate instance of the simulation. The parallelization was achieved by averaging the gradients between the different workers without reducing the number of samples provided by each agent. This results in large batch sizes of millions of samples for each policy update which improves the learning dynamics, but does not optimize the overall training time. In parallel, recent works have aimed to increase the simulation throughput and reduce training times of standard DRL benchmark tasks. A framework combining parallel simulation with multi-GPU training [10] was proposed to achieve fast training using hundreds of parallel agents. In the context of visual navigation, large batch simulation has been used to increase the training throughput [11]. Furthermore, GPU accelerated physics simulation has been shown to significantly improve the training time of the Humanoid running task [12]. A differentiable simulator running on Google’s TPUs has also been shown to greatly accelerate the training of multiple tasks [13]. We build upon [10, 12] by pushing the parallelization further, optimizing the training algorithm, and applying the approach to a challenging real-world robotics task. ", + "bbox": [ + 174, + 297, + 825, + 491 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Perceptive and dynamic locomotion for legged robots in unstructured environments is a demanding task that, until recently, had only been partially demonstrated with complex model-based approaches [14, 15]. Learning-based approaches are emerging as a promising alternative. For quadrupeds, DRL has been used to train blind policies robust to highly uneven ground [16] (12 hours of training). Perceptive locomotion over challenging terrain has been achieved by combining learning with optimal control techniques [17, 18] (82 and 88 hours of training) and recently, a fully learned approach has shown great robustness in this setting [19] (120 hours of training). Similarly, bipedal robots have also been trained to walk blindly on stairs [20] (training time not reported). With our approach we can train a perceptive policy in under 20 minutes on a single GPU, with the complexity of simto-real transfer to the hardware, which increases the performance and robustness requirements and provides clear validation of the overall approach. Training such behaviors in minutes opens up new exciting possibilities ranging from automatic tuning to customized training using scans of particular environments. ", + "bbox": [ + 174, + 497, + 825, + 676 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Massively Parallel Reinforcement Learning ", + "text_level": 1, + "bbox": [ + 173, + 707, + 566, + 724 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Current (on-policy) reinforcement learning algorithms are divided into two parts: data collection and policy update. The policy update, which corresponds to back-propagation for neural networks, is easily performed in parallel on the GPU. Parallelizing data collection is not as straightforward. Each step consists of policy inference, simulation, reward, and observation calculation. Current popular pipelines have the simulation and reward/observation calculation computed on the CPU, making the GPU unsuitable for policy inference because of communication bottle-necks. Data transfer over PCIe is known to be the weakest link of GPU acceleration, and can be as much as 50 times slower than the GPU processing time alone [21]. Furthermore, with CPU data collection, a large amount of data must be sent to the GPU for each policy update, slowing down the overall process. Limited parallelization can be achieved by using multiple CPU cores and spawning many processes, each running the simulation for one agent. However, the number of agents is quickly limited by the number of cores and other issues such as memory usage. We explore the potential of massive parallelism with Isaac Gym’s end-to-end data collection and policy updates on the GPU, significantly reducing data copying and improving simulation throughput. ", + "bbox": [ + 174, + 744, + 825, + 911 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "", + "bbox": [ + 173, + 92, + 823, + 119 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Simulation Throughput ", + "text_level": 1, + "bbox": [ + 174, + 136, + 379, + 151 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The main factor affecting the total simulation throughput is the number of robots simulated in parallel. Modern GPUs can handle tens of thousands of parallel instructions. Similarly, IsaacGym’s PhysX engine can process thousands of robots in a single simulation and all other computations of our pipeline are vectorized to scale favorably with the number of robots. Using a single simulation with thousands of robots presents some new challenges. For example, a single common terrain mesh must be used, and it cannot be easily changed at each reset. We circumvent this problem by creating the whole mesh with all terrain types and levels tiled side by side. We change the terrain level of the robots by physically moving them on the mesh. In supplementary material, we show the computational time of different parts of the pipeline, examine how these times scale with the number of robots, and provide other techniques to optimize the simulation throughput. ", + "bbox": [ + 174, + 161, + 825, + 300 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 DRL Algorithm ", + "text_level": 1, + "bbox": [ + 174, + 316, + 323, + 332 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We build upon a custom implementation of the Proximal Policy Optimization (PPO) algorithm [22]. Our implementation is designed to perform every operation and store all the data on the GPU. In order to efficiently learn from thousands of robots in parallel, we perform some essential modifications to the algorithm and change some of the commonly used hyper-parameter values. ", + "bbox": [ + 174, + 342, + 825, + 398 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2.1 Hyper-Parameters Modification ", + "text_level": 1, + "bbox": [ + 176, + 412, + 446, + 428 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In an on-policy algorithm such as PPO, a fixed policy collects a selected amount of data before doing the next policy update. This batch size, $B$ , is a crucial hyper-parameter for successful learning. With too little data, the gradients will be too noisy, and the algorithm will not learn effectively. With too much data, the samples become repetitive, and the algorithm cannot extract more information from them. These samples represent wasted simulation time and slow down the overall training. We have $B = n _ { r o b o t s } n _ { s t e p s }$ , where $ { n _ { s t e p s } }$ is the number of steps each robot takes per policy update and $n _ { r o b o t s }$ the number of robots simulated in parallel. Since we increase $n _ { r o b o t s }$ by a few orders of magnitude, we must choose a small $n _ { s t e p s }$ to keep $B$ reasonable and hence optimize training times, which is a setting that has not been extensively explored for on-policy reinforcement learning algorithms. It turns out that we can not choose $n _ { s t e p s }$ to be arbitrarily low. The algorithm requires trajectories with coherent temporal information to learn effectively. Even though, in theory, information of single steps could be used, we find that the algorithm fails to converge to the optimal solution below a certain threshold. This can be explained by the fact that we use Generalized Advantage Estimation (GAE) [23], which requires rewards from multiple time steps to be effective. For our task, we find that the algorithm struggles when we provide fewer than 25 consecutive steps, corresponding to $0 . 5 \\mathrm { s }$ of simulated time. It is important to distinguish $ { n _ { s t e p s } }$ from the maximum episode length leading to a time-out and a reset, which we define as $2 0 \\mathrm { s }$ . The environments are reset when they reach this maximum length and not after each iteration, meaning that a single episode can cover many policy updates. This limits the total number of robots training in parallel, and consequently, prohibits us from using the full computational capabilities of the GPU. ", + "bbox": [ + 173, + 436, + 825, + 713 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The mini-batch size represents the size of the chunks in which the batch size is split to perform backpropagation. We find that having mini-batch sizes much larger than what is usually considered best practice is beneficial for our massively parallel use case. We use mini-batches of tens of thousands of samples and observe that it stabilizes the learning process without increasing the total training time. ", + "bbox": [ + 174, + 719, + 823, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2.2 Reset Handling ", + "text_level": 1, + "bbox": [ + 174, + 804, + 333, + 819 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "During training, the robots must be reset whenever they fall, and also after some time to keep them exploring new trajectories and terrains. The PPO algorithm includes a critic predicting an infinite horizon sum of future discounted rewards. Resets break this infinite horizon assumption and can lead to inferior critic performance if not handled carefully. Resets based on failure or reaching a goal are not a problem because the critic can predict them. However, a reset based on a time out can not be predicted (we do not provide episode time in the observations). The solution is to distinguish the two termination modes and augment the reward with the expected infinite sum of discounted future rewards in a time-out case. In other words, we bootstrap the target of the critic with its own prediction. This solution has been discussed in [24], but interestingly, this distinction is not part of the widely used Gym environment interface [25] and is ignored by popular implementations such as Stable-Baselines $[ 2 6 ] ^ { 1 }$ . After investigating multiple implementations, we conclude that this important detail is often avoided by assuming that the environments either never time out or only on the very last step of a batch collection. In our case, with few robot steps per batch, we can not make such an assumption since a meaningful episode length covers the collection of many batches. We modify the standard Gym interface to detect time-outs and implement the bootstrapping solution. In supplementary material, we show the effect of this solution on the total reward as well as the critic loss. ", + "bbox": [ + 174, + 828, + 823, + 911 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/9b48083b251353c43d2e71ff8968b79f99600e9e05461c3dd723d12ace8ceddd.jpg", + "image_caption": [ + "Figure 2: Terrain types used for training and testing in simulation. (a) Randomly rough terrain with variations of $0 . 1 \\mathrm { m }$ . (b) Sloped terrain with an inclination of $2 5 \\mathrm { d e g }$ . (c) Stairs with a width of $0 . 3 \\mathrm { m }$ and height of $\\mathrm { 0 . 2 m }$ . (d) Randomized, discrete obstacles with heights of up to $\\pm 0 . 2 \\mathrm { m }$ . " + ], + "image_footnote": [], + "bbox": [ + 178, + 101, + 820, + 186 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "", + "bbox": [ + 174, + 247, + 825, + 398 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Task Description ", + "text_level": 1, + "bbox": [ + 174, + 420, + 346, + 436 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A quadruped robot must learn to walk across challenging terrain, including uneven surfaces, slopes, stairs, and obstacles, while following base-heading and linear-velocity commands. We conduct most of the simulation and real-world deployment experiments on the ANYbotics ANYmal C robot. However, in simulation, we demonstrate the broader applicability of the approach by additionally training policies for ANYmal B, ANYmal C with an attached arm, and the Unitree A1 robots. ", + "bbox": [ + 174, + 452, + 825, + 521 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Game-Inspired Curriculum ", + "text_level": 1, + "bbox": [ + 176, + 537, + 405, + 553 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The terrains are selected to be representative of real-world environments. We create five types of procedurally generated terrains presented in Fig. 2: flat, sloped, randomly rough, discrete obstacles, and stairs. The terrains are tiled squares with $8 \\mathrm { m }$ sides. The robots start at the center of the terrain and are given randomized heading and velocity commands (kept constant for the duration of an episode) pushing them to walk across the terrain. Slopes and stairs are organized in pyramids to allow traversability in all directions. ", + "bbox": [ + 174, + 564, + 825, + 647 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Previous works have shown the benefits of using an automated curriculum of task difficulty to learn complex locomotion policies [28, 29, 16]. Similarly, we find that it is essential to first train the policy on less challenging terrain before progressively increasing the complexity. We adopt a solution inspired by [16], but replace the particle filter approach with a new game-inspired automatic curriculum. All robots are assigned a terrain type and a level that represents the difficulty of that terrain. For stairs and randomized obstacles, we gradually increase the step height from $5 \\mathrm { c m }$ to $2 0 \\mathrm { c m }$ . Sloped terrain inclination is increased from 0 deg to 25 deg. If a robot manages to walk past the borders of its terrain, its level is increased, and at the next reset, it will start on more difficult terrain. However, if at the end of an episode it moved by less than half of the distance required by its target velocity, its level is reduced again. Robots solving the highest level are looped back to a randomly selected level to increase the diversity and avoid catastrophic forgetting. This approach has the advantage of training the robots at a level of difficulty tailored to their performance without requiring any external tuning. It adapts the difficulty level for each terrain type individually and provides us with visual and quantitative feedback on the progress of the training. When the robots have reached the final level and are evenly spread across all terrains due to looping back, we can conclude they have fully learned to solve the task. ", + "bbox": [ + 174, + 655, + 825, + 875 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/ecaf8198256af450c96e9b482c8d3d3a06909d2addf9a2530c761fb4193fd419.jpg", + "image_caption": [ + "Figure 3: 4000 robots progressing through the terrains with automatic curriculum, after 500 (top) and 1000 (bottom) policy updates. The robots start the training session on the first row (closest to the camera) and progressively reach harder terrains. " + ], + "image_footnote": [], + "bbox": [ + 174, + 89, + 823, + 320 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The proposed curriculum structure is well suited for the massively parallel regime. With thousands of robots we can directly use their current progress in the curriculum as the distribution of the policy’s performance, and do not need learn it with a generator network [30]. Furthermore, our method doesn’t require tuning and is straightforward to implement in a parallel manner with nearzero processing cost. We remove the computational overhead of re-sampling and re-generating new terrains needed for the particle filter approach. ", + "bbox": [ + 174, + 377, + 823, + 462 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Fig. 3 shows robots progressing through the terrains at two different stages of the training process. On complex terrain types, the robots require more training iterations to reach the highest levels. The distribution of robots after 500 iterations shows that while the policy is able to cross sloped terrains and to go down stairs, climbing stairs and traversing obstacles requires more training iterations. However, after 1000 iterations, the robots have reached the most challenging level for all terrain types and are spread across the map. We train for a total for 1500 iterations to let the policy converge to its highest performance. ", + "bbox": [ + 174, + 468, + 825, + 565 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Observations, Actions, and Rewards ", + "text_level": 1, + "bbox": [ + 178, + 582, + 464, + 595 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The policy receives proprioceptive measurements of the robot as well as terrain information around the robot’s base. The observations are composed of: base linear and angular velocities, measurement of the gravity vector, joint positions and velocities, the previous actions selected by the policy, and finally, 108 measurements of the terrain sampled from a grid around the robot’s base. Each measurement is the distance from the terrain surface to the robot’s base height. ", + "bbox": [ + 174, + 606, + 825, + 676 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The total reward is a weighted sum of nine terms, detailed in supplementary material. The main terms encourage the robot to follow the commanded velocities while avoiding undesired base velocities along other axes. In order to create a smoother, more natural motion, we also penalize joint torques, joint accelerations, joint target changes, and collisions. Contacts with the knees, shanks or between the feet and a vertical surface are considered collisions, while contacts with the base are considered crashes and lead to resets. Finally, we add an additional reward term encouraging the robot to take longer steps, which results in a more visually appealing behavior. We train a single policy with the same rewards for all terrains. ", + "bbox": [ + 174, + 683, + 823, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The actions are interpreted as desired joint positions sent to the motors. There, a PD controller produces motor torques. In contrast to other works [16, 20], neither the reward function nor the action space has any gait-dependent elements. ", + "bbox": [ + 174, + 800, + 825, + 840 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Sim-to-Real Additions ", + "text_level": 1, + "bbox": [ + 174, + 857, + 367, + 872 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In order to make the trained policies amenable for sim-to-real transfer, we randomize the friction of the ground, add noise to the observations and randomly push the robots during the episode to teach them a more stable stance. Each robot has a friction coefficient sampled uniformly in [0.5, 1.25]. The pushes happen every $1 0 \\mathrm { s }$ . The robots’ base is accelerated up to $\\pm 1 \\mathrm { m } / \\mathrm { s }$ in both $\\mathbf { X }$ and y directions. The amount of noise is based on real data measured on the robot and is detailed in supplementary material. ", + "bbox": [ + 174, + 883, + 821, + 911 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "", + "bbox": [ + 174, + 92, + 825, + 147 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The ANYmal robot uses series elastic actuators with fairly complex dynamics, which are hard to model in simulation. For this reason and following the methodology of previous work [1], we use a neural network to compute torques from joint position commands. However, we simplify the inputs of the model. Instead of concatenating past measurements at fixed time steps and sending all of that information to a standard feed-forward network, we only provide the current measurements to an LSTM network. A potential drawback of this set-up is that the policy does not have the temporal information of the actuators as in previous work. We have experimented with various ways of providing that information through memory mechanisms for the policy but found that it does not improve the final performance. ", + "bbox": [ + 173, + 154, + 825, + 279 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Results ", + "text_level": 1, + "bbox": [ + 174, + 297, + 266, + 315 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Effects of Massive Parallelism ", + "text_level": 1, + "bbox": [ + 176, + 329, + 419, + 344 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we study the effects of the number of parallel robots on the final performance of the policy. In order to use the total reward as a single representative metric, we have to remove the curriculum, otherwise a more performant policy sees its task difficulty increase and consequently a decrease in the total reward. As such, we simplify the task by reducing the maximum step size of stairs and obstacles and directly train robots on the full range of difficulties. ", + "bbox": [ + 174, + 356, + 825, + 425 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We begin by setting a baseline with $n _ { r o b o t s } = 2 0 0 0 0$ and $n _ { s t e p s } = 5 0$ , resulting in a batch size of 1M samples. Using this very large batch size results in the best policy but at the cost of a relatively long training time. ", + "bbox": [ + 176, + 431, + 825, + 473 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We then conduct experiments in which we increase the number of robots while keeping the batch size constant. As a result, the number of steps each robot takes per policy update decreases. In this case, the training time decreases with a higher number of robots, but the policy performance drops if that number is too high. We start from 128 robots corresponding to the level of parallelization of previous CPU implementations and increase that number up to 16384, which is close to the maximum amount of robots we could simulate on rough terrain with Isaac Gym running on a single workstation GPU. ", + "bbox": [ + 173, + 479, + 825, + 577 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Fig. 4, we compare these results with the baseline, which allows us to select the most favorable trade-off between policy performance and training time. We see two interesting effects at play. First, when the number of robots is too high, the performance drops sharply, which can be explained by the time horizon of each robot becoming too small. As expected, with larger batch sizes, the overall reward is higher, and the time horizon effect is shifted, meaning that we can use more robots before seeing the drop. On the other hand, below a certain threshold, we see a slow decrease in performance with fewer robots. We believe this is explained by the fact that the samples are very similar with many steps per robot because of the relatively small time steps between them. This means that for the same amount of samples, there is less diversity in the data. In other words, with a low number of robots, we are further from the standard assumption that the samples are independent and identically distributed, which seems to have a noticeable effect on the training process. In terms of training time, we see a nearly linear scaling up to 4000 robots, after which simulation throughput gains slow down. As such, we can conclude that increasing the number of robots is beneficial for both final performance and training time, but there is an upper limit on this number after which an on-policy algorithm cannot learn effectively. Increasing the batch size to values much larger than what is typically used in similar works seems highly beneficial. Unfortunately, it also scales the training time so it is a trade-off that must be balanced. From the third plot we can conclude that using 2048 to 4096 robots with a batch size of $\\approx 1 0 0 k$ or $\\approx 2 0 0 k$ provides the best trade-off for this specific task. ", + "bbox": [ + 173, + 583, + 825, + 666 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b511ee0c2aca987f5fa8e39a0d14f3ee54098cf431491e0a271f167d3bdcff6f.jpg", + "image_caption": [ + "Figure 4: (a) Average and standard deviation (over 5 runs) of the total reward of an episode after 1500 policy updates for different number of robots and 3 different batch sizes. The ideal case of a batch size of 1M samples with 20000 robots is shown in red. (b) Total training time for the same experiments. (c) Reward dependency on total training time. Colors represent the number of robots, while shapes show the batch size (circles: 49152, crosses: 98304, triangles: 196608). Points in the upper left part of the graph (highlighted in green) represent the most desirable configuration. " + ], + "image_footnote": [], + "bbox": [ + 178, + 690, + 818, + 834 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f15e005bc1e3d3db64d82f1f7796d15a227032608677590845862fc30d3d7da3.jpg", + "image_caption": [ + "Figure 5: Success rate of the tested policy on increasing terrain complexities. Robots start in the center of the terrain and are given a forward velocity command of $0 . 7 5 \\mathrm { m } / \\mathrm { s }$ , and a side velocity command randomized within $[ - 0 . 1 , 0 . 1 ] \\mathrm { m } / \\mathrm { s }$ . (a) Success rate for climbing stairs, descending stairs and traversing discrete obstacles. (b) Success rate for climbing and descending sloped terrains. " + ], + "image_footnote": [], + "bbox": [ + 214, + 87, + 784, + 213 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4eab7c74f6cac548026b3ff28e7f10ab6832f65e03b74ce577a9f1ea90202882.jpg", + "image_caption": [ + "Figure 6: ANYmal C with a fixed arm, ANYmal B, A1 and Cassie in simulation. " + ], + "image_footnote": [], + "bbox": [ + 178, + 287, + 821, + 357 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "", + "bbox": [ + 173, + 388, + 825, + 568 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Simulation ", + "text_level": 1, + "bbox": [ + 174, + 585, + 289, + 599 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For our simulation and deployment experiments, we use a policy trained with 4096 robots and a batch size of 98304, which we train for 1500 policy updates in under 20 minutes2. We begin by measuring the performance of our trained policy in simulation. To that end, we perform robustness and traversability tests. For each terrain type, we command the robots to traverse the representative difficulty of the terrain at high forward velocity and measure the success rate. A success is defined as managing to cross the terrain while avoiding any contacts on the robot’s base. Fig. 5 shows the results for the different terrains. For stairs, we see a nearly $1 0 0 \\%$ success rate for steps up to $\\mathrm { 0 . 2 m }$ , which is the hardest stair difficulty we train on and close to the kinematic limits of our robot. Randomized obstacles seem to be more demanding, with the success rate decreasing steadily. We must note that in this case, the largest step is double the reported height since neighboring obstacles can have positive and negative heights. In the case of slopes, we can observe that after $2 5 \\mathrm { d e g }$ the robots are not able to climb anymore but still learn to slide down with a moderate success rate. ", + "bbox": [ + 174, + 611, + 825, + 776 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Given our relatively simple rewards and action space, the policy is free to adopt any gait and behavior. Interestingly, it always converges to a trotting gait, but there are often artifacts in the behavior, such as a dragging leg or unreasonably high or low base heights. After tuning of the reward weights, we can obtain a policy that respects all our constraints and can be transferred to the physical robot. ", + "bbox": [ + 174, + 784, + 825, + 838 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To verify the generalizability of the approach, we train policies for multiple robots with the same set-up. We use the ANYmal C robot with a fixed robotic arm, which adds about $2 0 \\%$ of additional weight, and the ANYmal B robot, which has comparable dimensions but modified kinematic and dynamic properties. In these two cases, we can retrain a policy without any modifications to the rewards or algorithm hyper-parameters and obtain a very similar performance. Next, we use the Unitree A1 robot, which has smaller dimensions, four times lower weight, and a different leg configuration. In this case, we remove the actuator model of the ANYdrive motors, reduce PD gains and the torque penalties, and change the default joint configurations. We can train a dynamic policy that learns to solve the same terrains even with the reduced size of the robot. Finally, we apply our approach to Agility Robotics’ bipedal robot Cassie. We find that an additional reward encouraging standing on a single foot is necessary to achieve a walking gait. With this addition, we are able to train the robot on the same terrains as its quadrupedal counterparts. Fig. 6 shows the different robots. ", + "bbox": [ + 176, + 844, + 821, + 873 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f0dd8a9ddc78c32773c604659ba9f56cbd659b68c879ccbb91f00fe6ccb27b59.jpg", + "image_caption": [ + "Figure 7: Locomotion policy, trained in under $2 0 \\mathrm { { m i n } }$ , deployed on the physical robot. " + ], + "image_footnote": [], + "bbox": [ + 178, + 101, + 820, + 195 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "", + "bbox": [ + 174, + 227, + 825, + 378 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 Sim-to-real Transfer ", + "text_level": 1, + "bbox": [ + 174, + 395, + 354, + 409 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "On the physical robot, our policy is fixed. We compute the observations from the robot’s sensors, feed them to the policy, and directly send the produced actions as target joint positions to the motors. We do not apply any additional filtering or constraint satisfaction checks. The terrain height measurements are queried from an elevation map that the robot is building from Lidar scans. ", + "bbox": [ + 174, + 420, + 823, + 476 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Unfortunately, this height map is far from perfect, which results in a decrease in robustness between simulation and reality. We observe that these issues mainly occur at high velocities and therefore reduce the maximum linear velocity commands to $0 . 6 \\mathrm { m } / \\mathrm { s }$ for policies deployed on the hardware. The robot can walk up and down stairs and handles obstacles in a dynamic manner. We show samples of these experiments in Fig. 7 and in the supplementary video. To overcome issues with imperfect terrain mapping or state estimation drift, the authors of [19] implemented a teacher-student set-up, which provided outstanding robustness even in adverse conditions. As part of future work, we plan to merge the two approaches. ", + "bbox": [ + 174, + 482, + 825, + 593 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Conclusion ", + "text_level": 1, + "bbox": [ + 174, + 612, + 299, + 628 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we demonstrated that a complex real-world robotics task can be trained in minutes with an on-policy deep reinforcement learning algorithm. Using an end-to-end GPU pipeline with thousands of robots simulated in parallel, combined with our proposed curriculum structure, we showed that the training time can be reduced by multiple orders of magnitude compared to previous work. We discussed multiple modifications to the learning algorithm and the standard hyper-parameters required to use the massively parallel regime effectively. Using our fast training pipeline, we performed many training runs, simplified the set-up, and kept only essential components. We showed that the task can be solved using simple observation and action spaces as well as relatively straightforward rewards without encouraging particular gaits or providing motion primitives. ", + "bbox": [ + 174, + 643, + 825, + 768 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The purpose of this work is not to obtain the absolute best-performing policy with the highest robustness. For that use case, many other techniques can be incorporated into the pipeline. We aim to show that a policy can be trained in record time with our set-up while still being usable on the real hardware. We wish to shift other researchers’ perspective on the required training time for a real-world application, and hope that our work can serve as a reference for future research. We expect many other tasks to benefit from the massively parallel regime. By reducing the training time of these future robotic tasks, we can greatly accelerate the developments in this field. ", + "bbox": [ + 174, + 775, + 823, + 872 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments ", + "text_level": 1, + "bbox": [ + 174, + 92, + 303, + 106 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We would like to thank Mayank Mittal, Joonho Lee, Takahiro Miki, and Peter Werner for their valuable suggestions and help with hardware experiments as well as the Isaac Gym and PhysX teams for their continuous support. ", + "bbox": [ + 176, + 114, + 821, + 157 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References ", + "text_level": 1, + "bbox": [ + 174, + 176, + 266, + 191 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "[1] J. Hwangbo, J. Lee, A. Dosovitskiy, D. Bellicoso, V. Tsounis, V. Koltun, and M. Hutter. Learning agile and dynamic motor skills for legged robots. Science Robotics, 4(26), 2019. \n[2] S. Gu, E. Holly, T. Lillicrap, and S. Levine. Deep reinforcement learning for robotic manipulation with asynchronous off-policy updates. In IEEE International Conference on Robotics and Automation (ICRA), May 2017. \n[3] G. Kahn, A. Villaflor, B. Ding, P. Abbeel, and S. Levine. Self-supervised deep reinforcement learning with generalized computation graphs for robot navigation. In IEEE International Conference on Robotics and Automation (ICRA), 2018. \n[4] OpenAI, I. Akkaya, M. Andrychowicz, M. Chociej, M. Litwin, B. McGrew, A. Petron, A. Paino, M. Plappert, G. Powell, R. Ribas, J. Schneider, N. Tezak, J. Tworek, P. Welinder, L. Weng, Q. Yuan, W. Zaremba, and L. Zhang. Solving rubik’s cube with a robot hand, 2019. \n[5] E. Todorov, T. Erez, and Y. Tassa. Mujoco: A physics engine for model-based control. In IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2012. \n[6] E. Coumans and Y. Bai. Pybullet, a python module for physics simulation for games, robotics and machine learning. http://pybullet.org, 2016–2021. \n[7] J. Hwangbo, J. Lee, and M. Hutter. Per-contact iteration method for solving contact dynamics. IEEE Robotics and Automation Letters, 3(2), 2018. URL www.raisim.com. \n[8] V. Makoviychuk, L. Wawrzyniak, Y. Guo, M. Lu, K. Storey, M. Macklin, D. Hoeller, N. Rudin, A. Allshire, A. Handa, and G. State. Isaac gym: High performance GPU based physics simulation for robot learning. In Conference on Neural Information Processing Systems (NeurIPS) Datasets and Benchmarks Track, 2021. \n[9] N. Heess, D. TB, S. Sriram, J. Lemmon, J. Merel, G. Wayne, Y. Tassa, T. Erez, Z. Wang, S. M. A. Eslami, M. A. Riedmiller, and D. Silver. Emergence of locomotion behaviours in rich environments. CoRR, abs/1707.02286, 2017. \n[10] A. Stooke and P. Abbeel. Accelerated methods for deep reinforcement learning. CoRR, abs/1803.02811, 2018. \n[11] B. Shacklett, E. Wijmans, A. Petrenko, M. Savva, D. Batra, V. Koltun, and K. Fatahalian. Large batch simulation for deep reinforcement learning. In International Conference on Learning Representations (ICLR), 2021. \n[12] J. Liang, V. Makoviychuk, A. Handa, N. Chentanez, M. Macklin, and D. Fox. Gpu-accelerated robotic simulation for distributed reinforcement learning. In Conference on Robot Learning (CoRL), 2018. \n[13] C. D. Freeman, E. Frey, A. Raichuk, S. Girgin, I. Mordatch, and O. Bachem. Brax - a differentiable physics engine for large scale rigid body simulation. In 35th Conference on Neural Information Processing Systems (NeurIPS) Datasets and Benchmarks Track, 2021. \n[14] A. Bouman, M. F. Ginting, N. Alatur, M. Palieri, D. D. Fan, T. Touma, T. Pailevanian, S.- K. Kim, K. Otsu, J. Burdick, and A.-a. Agha-Mohammadi. Autonomous spot: Long-range autonomous exploration of extreme environments with legged locomotion. In IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2020. \n[15] C. Gehring, P. Fankhauser, L. Isler, R. Diethelm, S. Bachmann, M. Potz, L. Gerstenberg, and M. Hutter. Anymal in the field: Solving industrial inspection of an offshore hvdc platform with a quadrupedal robot. In Field and Service Robotics, 2021. \n[16] J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomotion over challenging terrain. Science Robotics, 5(47), 2020. \n[17] V. Tsounis, M. Alge, J. Lee, F. Farshidian, and M. Hutter. Deepgait: Planning and control of quadrupedal gaits using deep reinforcement learning. IEEE Robotics and Automation Letters, PP, 03 2020. \n[18] S. Gangapurwala, M. Geisert, R. Orsolino, M. Fallon, and I. Havoutis. Real-time trajectory adaptation for quadrupedal locomotion using deep reinforcement learning. In IEEE International Conference on Robotics and Automation (ICRA), 2021. \n[19] T. Miki, J. Lee, L. Wellhausen, V. Koltun, and M. Hutter. Wild anymal: Robust zero-shot perceptive locomotion. Submitted to Science Robotics, 2021. \n[20] J. Siekmann, K. Green, J. Warila, A. Fern, and J. W. Hurst. Blind bipedal stair traversal via sim-to-real reinforcement learning. CoRR, abs/2105.08328, 2021. \n[21] C. Gregg and K. Hazelwood. Where is the data? why you cannot debate cpu vs. gpu performance without the answer. In IEEE International Symposium on Performance Analysis of Systems and Software (ISPASS), 2011. \n[22] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms. CoRR, abs/1707.06347, 2017. \n[23] J. Schulman, P. Moritz, S. Levine, M. Jordan, and P. Abbeel. High-dimensional continuous control using generalized advantage estimation. In Proceedings of the International Conference on Learning Representations (ICLR), 2016. \n[24] F. Pardo, A. Tavakoli, V. Levdik, and P. Kormushev. Time limits in reinforcement learning. CoRR, abs/1712.00378, 2017. \n[25] G. Brockman, V. Cheung, L. Pettersson, J. Schneider, J. Schulman, J. Tang, and W. Zaremba. Openai gym, 2016. \n[26] A. Hill, A. Raffin, M. Ernestus, A. Gleave, A. Kanervisto, R. Traore, P. Dhariwal, C. Hesse, O. Klimov, A. Nichol, M. Plappert, A. Radford, J. Schulman, S. Sidor, and Y. Wu. Stable baselines. https://github.com/hill-a/stable-baselines, 2018. \n[27] J. Achiam. Spinning up in deep reinforcement learning, 2018. URL https://spinningup. openai.com/en/latest/. \n[28] R. Wang, J. Lehman, J. Clune, and K. O. Stanley. Paired open-ended trailblazer (POET): endlessly generating increasingly complex and diverse learning environments and their solutions. CoRR, abs/1901.01753, 2019. \n[29] Z. Xie, H. Y. Ling, N. H. Kim, and M. van de Panne. Allsteps: Curriculum-driven learning of stepping stone skills. Proceedings of ACM SIGGRAPH / Eurographics Symposium on Computer Animation, 2020. \n[30] C. Florensa, D. Held, X. Geng, and P. Abbeel. Automatic goal generation for reinforcement learning agents. In Proceedings of the 35th International Conference on Machine Learning (ICML), volume 80 of Proceedings of Machine Learning Research, 2018. ", + "bbox": [ + 173, + 196, + 826, + 917 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "", + "bbox": [ + 171, + 90, + 826, + 765 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_middle.json b/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_middle.json new file mode 100644 index 0000000000000000000000000000000000000000..72e90388ae4f6b84ffced6151f2d2fb50ced06ce --- /dev/null +++ b/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_middle.json @@ -0,0 +1,24132 @@ +{ + "pdf_info": [ + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 137, + 81, + 474, + 120 + ], + "lines": [ + { + "bbox": [ + 135, + 79, + 475, + 103 + ], + "spans": [ + { + "bbox": [ + 135, + 79, + 475, + 103 + ], + "score": 1.0, + "content": "Learning to Walk in Minutes Using Massively", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 162, + 99, + 450, + 123 + ], + "spans": [ + { + "bbox": [ + 162, + 99, + 450, + 123 + ], + "score": 1.0, + "content": "Parallel Deep Reinforcement Learning", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "text", + "bbox": [ + 128, + 141, + 233, + 174 + ], + "lines": [ + { + "bbox": [ + 153, + 141, + 211, + 152 + ], + "spans": [ + { + "bbox": [ + 153, + 141, + 211, + 152 + ], + "score": 1.0, + "content": "Nikita Rudin", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 128, + 153, + 234, + 163 + ], + "spans": [ + { + "bbox": [ + 128, + 153, + 234, + 163 + ], + "score": 1.0, + "content": "ETH Zurich and NVIDIA", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 164, + 219, + 174 + ], + "spans": [ + { + "bbox": [ + 143, + 164, + 219, + 174 + ], + "score": 1.0, + "content": "rudinn@ethz.ch", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 5 + }, + { + "type": "text", + "bbox": [ + 260, + 141, + 365, + 173 + ], + "lines": [ + { + "bbox": [ + 281, + 140, + 344, + 153 + ], + "spans": [ + { + "bbox": [ + 281, + 140, + 344, + 153 + ], + "score": 1.0, + "content": "David Hoeller", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 259, + 152, + 366, + 163 + ], + "spans": [ + { + "bbox": [ + 259, + 152, + 366, + 163 + ], + "score": 1.0, + "content": "ETH Zurich and NVIDIA", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 270, + 162, + 356, + 174 + ], + "spans": [ + { + "bbox": [ + 270, + 162, + 356, + 174 + ], + "score": 1.0, + "content": "dhoeller@ethz.ch", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 6 + }, + { + "type": "text", + "bbox": [ + 390, + 141, + 482, + 174 + ], + "lines": [ + { + "bbox": [ + 407, + 140, + 466, + 153 + ], + "spans": [ + { + "bbox": [ + 407, + 140, + 466, + 153 + ], + "score": 1.0, + "content": "Philipp Reist", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 417, + 151, + 457, + 163 + ], + "spans": [ + { + "bbox": [ + 417, + 151, + 457, + 163 + ], + "score": 1.0, + "content": "NVIDIA", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 389, + 163, + 483, + 174 + ], + "spans": [ + { + "bbox": [ + 389, + 163, + 483, + 174 + ], + "score": 1.0, + "content": "preist@nvidia.com", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 7 + }, + { + "type": "text", + "bbox": [ + 259, + 191, + 351, + 224 + ], + "lines": [ + { + "bbox": [ + 274, + 190, + 337, + 203 + ], + "spans": [ + { + "bbox": [ + 274, + 190, + 337, + 203 + ], + "score": 1.0, + "content": "Marco Hutter", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 280, + 202, + 331, + 213 + ], + "spans": [ + { + "bbox": [ + 280, + 202, + 331, + 213 + ], + "score": 1.0, + "content": "ETH Zurich", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 259, + 214, + 351, + 224 + ], + "spans": [ + { + "bbox": [ + 259, + 214, + 351, + 224 + ], + "score": 1.0, + "content": "mahutter@ethz.com", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 12 + }, + { + "type": "text", + "bbox": [ + 142, + 258, + 469, + 401 + ], + "lines": [ + { + "bbox": [ + 142, + 258, + 469, + 271 + ], + "spans": [ + { + "bbox": [ + 142, + 258, + 469, + 271 + ], + "score": 1.0, + "content": "Abstract: In this work, we present and study a training set-up that achieves fast", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 269, + 470, + 283 + ], + "spans": [ + { + "bbox": [ + 141, + 269, + 470, + 283 + ], + "score": 1.0, + "content": "policy generation for real-world robotic tasks by using massive parallelism on a", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 280, + 469, + 294 + ], + "spans": [ + { + "bbox": [ + 141, + 280, + 469, + 294 + ], + "score": 1.0, + "content": "single workstation GPU. We analyze and discuss the impact of different training", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 291, + 469, + 304 + ], + "spans": [ + { + "bbox": [ + 141, + 291, + 469, + 304 + ], + "score": 1.0, + "content": "algorithm components in the massively parallel regime on the final policy perfor-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 303, + 469, + 314 + ], + "spans": [ + { + "bbox": [ + 141, + 303, + 469, + 314 + ], + "score": 1.0, + "content": "mance and training times. In addition, we present a novel game-inspired curricu-", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 313, + 470, + 326 + ], + "spans": [ + { + "bbox": [ + 141, + 313, + 470, + 326 + ], + "score": 1.0, + "content": "lum that is well suited for training with thousands of simulated robots in parallel.", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 141, + 323, + 469, + 336 + ], + "spans": [ + { + "bbox": [ + 141, + 323, + 469, + 336 + ], + "score": 1.0, + "content": "We evaluate the approach by training the quadrupedal robot ANYmal to walk on", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 334, + 470, + 348 + ], + "spans": [ + { + "bbox": [ + 141, + 334, + 470, + 348 + ], + "score": 1.0, + "content": "challenging terrain. The parallel approach allows training policies for flat terrain", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 346, + 469, + 358 + ], + "spans": [ + { + "bbox": [ + 141, + 346, + 469, + 358 + ], + "score": 1.0, + "content": "in under four minutes, and in twenty minutes for uneven terrain. This represents", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 141, + 357, + 469, + 370 + ], + "spans": [ + { + "bbox": [ + 141, + 357, + 469, + 370 + ], + "score": 1.0, + "content": "a speedup of multiple orders of magnitude compared to previous work. Finally,", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 141, + 367, + 470, + 380 + ], + "spans": [ + { + "bbox": [ + 141, + 367, + 470, + 380 + ], + "score": 1.0, + "content": "we transfer the policies to the real robot to validate the approach. We open-source", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 141, + 378, + 469, + 391 + ], + "spans": [ + { + "bbox": [ + 141, + 378, + 469, + 391 + ], + "score": 1.0, + "content": "our training code to help accelerate further research in the field of learned legged", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 141, + 390, + 429, + 403 + ], + "spans": [ + { + "bbox": [ + 141, + 390, + 429, + 403 + ], + "score": 1.0, + "content": "locomotion: https://leggedrobotics.github.io/legged_gym/.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 20 + }, + { + "type": "text", + "bbox": [ + 143, + 410, + 407, + 422 + ], + "lines": [ + { + "bbox": [ + 142, + 410, + 407, + 424 + ], + "spans": [ + { + "bbox": [ + 142, + 410, + 407, + 424 + ], + "score": 1.0, + "content": "Keywords: Reinforcement Learning, Legged Robots, Sim-to-real", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 117, + 436, + 495, + 577 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 117, + 436, + 495, + 577 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 117, + 436, + 495, + 577 + ], + "spans": [ + { + "bbox": [ + 117, + 436, + 495, + 577 + ], + "score": 0.969, + "type": "image", + "image_path": "b6480bd30beb6d1b9762af85125a32c3c8067d4548699e684f5c674891d50b92.jpg" + } + ] + } + ], + "index": 29, + "virtual_lines": [ + { + "bbox": [ + 117, + 436, + 495, + 483.0 + ], + "spans": [], + "index": 28 + }, + { + "bbox": [ + 117, + 483.0, + 495, + 530.0 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 117, + 530.0, + 495, + 577.0 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 182, + 583, + 428, + 594 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 181, + 582, + 429, + 595 + ], + "spans": [ + { + "bbox": [ + 181, + 582, + 429, + 595 + ], + "score": 1.0, + "content": "Figure 1: Thousands of robots learning to walk in simulation.", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 31 + } + ], + "index": 30.0 + }, + { + "type": "title", + "bbox": [ + 107, + 602, + 191, + 615 + ], + "lines": [ + { + "bbox": [ + 104, + 600, + 192, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 192, + 618 + ], + "score": 1.0, + "content": "1 Introduction", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 107, + 627, + 505, + 715 + ], + "lines": [ + { + "bbox": [ + 105, + 626, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 506, + 640 + ], + "score": 1.0, + "content": "Deep reinforcement learning (DRL) is proving to be a powerful tool for robotics. Tasks such as", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 638, + 505, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 638, + 505, + 650 + ], + "score": 1.0, + "content": "legged locomotion [1], manipulation [2], and navigation [3], have been solved using these new", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 649, + 505, + 661 + ], + "spans": [ + { + "bbox": [ + 105, + 649, + 505, + 661 + ], + "score": 1.0, + "content": "tools, and research continues to keep adding more and more challenging tasks to the list. The", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 660, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 660, + 506, + 672 + ], + "score": 1.0, + "content": "amount of data required to train a policy increases with the task complexity. For this reason, most", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 671, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 106, + 671, + 506, + 683 + ], + "score": 1.0, + "content": "work focuses on training in simulation before transferring to real robots. We have reached a point", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 681, + 505, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 505, + 694 + ], + "score": 1.0, + "content": "where multiple days or even weeks are needed to fully train an agent with current simulators. For", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 692, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 105, + 692, + 506, + 704 + ], + "score": 1.0, + "content": "example, OpenAI’s block reorientation task was trained for up to 14 days and their Rubik’s cube", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 702, + 505, + 717 + ], + "spans": [ + { + "bbox": [ + 105, + 702, + 505, + 717 + ], + "score": 1.0, + "content": "solving policy took several months to train [4]. The problem is exacerbated by the fact that deep", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 36.5 + } + ], + "page_idx": 0, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 107, + 731, + 335, + 741 + ], + "lines": [ + { + "bbox": [ + 106, + 730, + 336, + 743 + ], + "spans": [ + { + "bbox": [ + 106, + 730, + 336, + 743 + ], + "score": 1.0, + "content": "5th Conference on Robot Learning (CoRL 2021), London, UK.", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 137, + 81, + 474, + 120 + ], + "lines": [ + { + "bbox": [ + 135, + 79, + 475, + 103 + ], + "spans": [ + { + "bbox": [ + 135, + 79, + 475, + 103 + ], + "score": 1.0, + "content": "Learning to Walk in Minutes Using Massively", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 162, + 99, + 450, + 123 + ], + "spans": [ + { + "bbox": [ + 162, + 99, + 450, + 123 + ], + "score": 1.0, + "content": "Parallel Deep Reinforcement Learning", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "text", + "bbox": [ + 128, + 141, + 233, + 174 + ], + "lines": [ + { + "bbox": [ + 153, + 141, + 211, + 152 + ], + "spans": [ + { + "bbox": [ + 153, + 141, + 211, + 152 + ], + "score": 1.0, + "content": "Nikita Rudin", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 128, + 153, + 234, + 163 + ], + "spans": [ + { + "bbox": [ + 128, + 153, + 234, + 163 + ], + "score": 1.0, + "content": "ETH Zurich and NVIDIA", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 164, + 219, + 174 + ], + "spans": [ + { + "bbox": [ + 143, + 164, + 219, + 174 + ], + "score": 1.0, + "content": "rudinn@ethz.ch", + "type": "text" + } + ], + "index": 8 + } + ], + "index": 5, + "bbox_fs": [ + 128, + 141, + 234, + 174 + ] + }, + { + "type": "text", + "bbox": [ + 260, + 141, + 365, + 173 + ], + "lines": [ + { + "bbox": [ + 281, + 140, + 344, + 153 + ], + "spans": [ + { + "bbox": [ + 281, + 140, + 344, + 153 + ], + "score": 1.0, + "content": "David Hoeller", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 259, + 152, + 366, + 163 + ], + "spans": [ + { + "bbox": [ + 259, + 152, + 366, + 163 + ], + "score": 1.0, + "content": "ETH Zurich and NVIDIA", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 270, + 162, + 356, + 174 + ], + "spans": [ + { + "bbox": [ + 270, + 162, + 356, + 174 + ], + "score": 1.0, + "content": "dhoeller@ethz.ch", + "type": "text" + } + ], + "index": 9 + } + ], + "index": 6, + "bbox_fs": [ + 259, + 140, + 366, + 174 + ] + }, + { + "type": "text", + "bbox": [ + 390, + 141, + 482, + 174 + ], + "lines": [ + { + "bbox": [ + 407, + 140, + 466, + 153 + ], + "spans": [ + { + "bbox": [ + 407, + 140, + 466, + 153 + ], + "score": 1.0, + "content": "Philipp Reist", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 417, + 151, + 457, + 163 + ], + "spans": [ + { + "bbox": [ + 417, + 151, + 457, + 163 + ], + "score": 1.0, + "content": "NVIDIA", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 389, + 163, + 483, + 174 + ], + "spans": [ + { + "bbox": [ + 389, + 163, + 483, + 174 + ], + "score": 1.0, + "content": "preist@nvidia.com", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 7, + "bbox_fs": [ + 389, + 140, + 483, + 174 + ] + }, + { + "type": "text", + "bbox": [ + 259, + 191, + 351, + 224 + ], + "lines": [ + { + "bbox": [ + 274, + 190, + 337, + 203 + ], + "spans": [ + { + "bbox": [ + 274, + 190, + 337, + 203 + ], + "score": 1.0, + "content": "Marco Hutter", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 280, + 202, + 331, + 213 + ], + "spans": [ + { + "bbox": [ + 280, + 202, + 331, + 213 + ], + "score": 1.0, + "content": "ETH Zurich", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 259, + 214, + 351, + 224 + ], + "spans": [ + { + "bbox": [ + 259, + 214, + 351, + 224 + ], + "score": 1.0, + "content": "mahutter@ethz.com", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 12, + "bbox_fs": [ + 259, + 190, + 351, + 224 + ] + }, + { + "type": "text", + "bbox": [ + 142, + 258, + 469, + 401 + ], + "lines": [ + { + "bbox": [ + 142, + 258, + 469, + 271 + ], + "spans": [ + { + "bbox": [ + 142, + 258, + 469, + 271 + ], + "score": 1.0, + "content": "Abstract: In this work, we present and study a training set-up that achieves fast", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 141, + 269, + 470, + 283 + ], + "spans": [ + { + "bbox": [ + 141, + 269, + 470, + 283 + ], + "score": 1.0, + "content": "policy generation for real-world robotic tasks by using massive parallelism on a", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 141, + 280, + 469, + 294 + ], + "spans": [ + { + "bbox": [ + 141, + 280, + 469, + 294 + ], + "score": 1.0, + "content": "single workstation GPU. We analyze and discuss the impact of different training", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 141, + 291, + 469, + 304 + ], + "spans": [ + { + "bbox": [ + 141, + 291, + 469, + 304 + ], + "score": 1.0, + "content": "algorithm components in the massively parallel regime on the final policy perfor-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 141, + 303, + 469, + 314 + ], + "spans": [ + { + "bbox": [ + 141, + 303, + 469, + 314 + ], + "score": 1.0, + "content": "mance and training times. In addition, we present a novel game-inspired curricu-", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 141, + 313, + 470, + 326 + ], + "spans": [ + { + "bbox": [ + 141, + 313, + 470, + 326 + ], + "score": 1.0, + "content": "lum that is well suited for training with thousands of simulated robots in parallel.", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 141, + 323, + 469, + 336 + ], + "spans": [ + { + "bbox": [ + 141, + 323, + 469, + 336 + ], + "score": 1.0, + "content": "We evaluate the approach by training the quadrupedal robot ANYmal to walk on", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 141, + 334, + 470, + 348 + ], + "spans": [ + { + "bbox": [ + 141, + 334, + 470, + 348 + ], + "score": 1.0, + "content": "challenging terrain. The parallel approach allows training policies for flat terrain", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 141, + 346, + 469, + 358 + ], + "spans": [ + { + "bbox": [ + 141, + 346, + 469, + 358 + ], + "score": 1.0, + "content": "in under four minutes, and in twenty minutes for uneven terrain. This represents", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 141, + 357, + 469, + 370 + ], + "spans": [ + { + "bbox": [ + 141, + 357, + 469, + 370 + ], + "score": 1.0, + "content": "a speedup of multiple orders of magnitude compared to previous work. Finally,", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 141, + 367, + 470, + 380 + ], + "spans": [ + { + "bbox": [ + 141, + 367, + 470, + 380 + ], + "score": 1.0, + "content": "we transfer the policies to the real robot to validate the approach. We open-source", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 141, + 378, + 469, + 391 + ], + "spans": [ + { + "bbox": [ + 141, + 378, + 469, + 391 + ], + "score": 1.0, + "content": "our training code to help accelerate further research in the field of learned legged", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 141, + 390, + 429, + 403 + ], + "spans": [ + { + "bbox": [ + 141, + 390, + 429, + 403 + ], + "score": 1.0, + "content": "locomotion: https://leggedrobotics.github.io/legged_gym/.", + "type": "text" + } + ], + "index": 26 + } + ], + "index": 20, + "bbox_fs": [ + 141, + 258, + 470, + 403 + ] + }, + { + "type": "text", + "bbox": [ + 143, + 410, + 407, + 422 + ], + "lines": [ + { + "bbox": [ + 142, + 410, + 407, + 424 + ], + "spans": [ + { + "bbox": [ + 142, + 410, + 407, + 424 + ], + "score": 1.0, + "content": "Keywords: Reinforcement Learning, Legged Robots, Sim-to-real", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 27, + "bbox_fs": [ + 142, + 410, + 407, + 424 + ] + }, + { + "type": "image", + "bbox": [ + 117, + 436, + 495, + 577 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 117, + 436, + 495, + 577 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 117, + 436, + 495, + 577 + ], + "spans": [ + { + "bbox": [ + 117, + 436, + 495, + 577 + ], + "score": 0.969, + "type": "image", + "image_path": "b6480bd30beb6d1b9762af85125a32c3c8067d4548699e684f5c674891d50b92.jpg" + } + ] + } + ], + "index": 29, + "virtual_lines": [ + { + "bbox": [ + 117, + 436, + 495, + 483.0 + ], + "spans": [], + "index": 28 + }, + { + "bbox": [ + 117, + 483.0, + 495, + 530.0 + ], + "spans": [], + "index": 29 + }, + { + "bbox": [ + 117, + 530.0, + 495, + 577.0 + ], + "spans": [], + "index": 30 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 182, + 583, + 428, + 594 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 181, + 582, + 429, + 595 + ], + "spans": [ + { + "bbox": [ + 181, + 582, + 429, + 595 + ], + "score": 1.0, + "content": "Figure 1: Thousands of robots learning to walk in simulation.", + "type": "text" + } + ], + "index": 31 + } + ], + "index": 31 + } + ], + "index": 30.0 + }, + { + "type": "title", + "bbox": [ + 107, + 602, + 191, + 615 + ], + "lines": [ + { + "bbox": [ + 104, + 600, + 192, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 192, + 618 + ], + "score": 1.0, + "content": "1 Introduction", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 32 + }, + { + "type": "text", + "bbox": [ + 107, + 627, + 505, + 715 + ], + "lines": [ + { + "bbox": [ + 105, + 626, + 506, + 640 + ], + "spans": [ + { + "bbox": [ + 105, + 626, + 506, + 640 + ], + "score": 1.0, + "content": "Deep reinforcement learning (DRL) is proving to be a powerful tool for robotics. Tasks such as", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 638, + 505, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 638, + 505, + 650 + ], + "score": 1.0, + "content": "legged locomotion [1], manipulation [2], and navigation [3], have been solved using these new", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 649, + 505, + 661 + ], + "spans": [ + { + "bbox": [ + 105, + 649, + 505, + 661 + ], + "score": 1.0, + "content": "tools, and research continues to keep adding more and more challenging tasks to the list. The", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 660, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 660, + 506, + 672 + ], + "score": 1.0, + "content": "amount of data required to train a policy increases with the task complexity. For this reason, most", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 671, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 106, + 671, + 506, + 683 + ], + "score": 1.0, + "content": "work focuses on training in simulation before transferring to real robots. We have reached a point", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 681, + 505, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 505, + 694 + ], + "score": 1.0, + "content": "where multiple days or even weeks are needed to fully train an agent with current simulators. For", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 692, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 105, + 692, + 506, + 704 + ], + "score": 1.0, + "content": "example, OpenAI’s block reorientation task was trained for up to 14 days and their Rubik’s cube", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 702, + 505, + 717 + ], + "spans": [ + { + "bbox": [ + 105, + 702, + 505, + 717 + ], + "score": 1.0, + "content": "solving policy took several months to train [4]. The problem is exacerbated by the fact that deep", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 72, + 506, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 506, + 85 + ], + "score": 1.0, + "content": "reinforcement learning requires hyper-parameter tuning to obtain a suitable solution which requires", + "type": "text", + "cross_page": true + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 84, + 506, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 84, + 506, + 96 + ], + "score": 1.0, + "content": "sequentially rerunning time-consuming training. Reducing training times using massively parallel", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 95, + 506, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 506, + 107 + ], + "score": 1.0, + "content": "approaches such as presented here can therefore help improve the quality and time-to-deployment", + "type": "text", + "cross_page": true + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 106, + 466, + 118 + ], + "spans": [ + { + "bbox": [ + 106, + 106, + 466, + 118 + ], + "score": 1.0, + "content": "of DRL policies, as a training setup can be iterated on more often in the same time frame.", + "type": "text", + "cross_page": true + } + ], + "index": 3 + } + ], + "index": 36.5, + "bbox_fs": [ + 105, + 626, + 506, + 717 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 73, + 504, + 116 + ], + "lines": [ + { + "bbox": [ + 105, + 72, + 506, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 506, + 85 + ], + "score": 1.0, + "content": "reinforcement learning requires hyper-parameter tuning to obtain a suitable solution which requires", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 84, + 506, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 84, + 506, + 96 + ], + "score": 1.0, + "content": "sequentially rerunning time-consuming training. Reducing training times using massively parallel", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 95, + 506, + 107 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 506, + 107 + ], + "score": 1.0, + "content": "approaches such as presented here can therefore help improve the quality and time-to-deployment", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 106, + 466, + 118 + ], + "spans": [ + { + "bbox": [ + 106, + 106, + 466, + 118 + ], + "score": 1.0, + "content": "of DRL policies, as a training setup can be iterated on more often in the same time frame.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 1.5 + }, + { + "type": "text", + "bbox": [ + 107, + 122, + 505, + 231 + ], + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 135 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 135 + ], + "score": 1.0, + "content": "In this paper, we examine the effects of massive parallelism for on-policy DRL algorithms and", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 132, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 132, + 506, + 146 + ], + "score": 1.0, + "content": "present considerations in how the standard RL formulation and the most commonly used hyper-", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 144, + 506, + 157 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 506, + 157 + ], + "score": 1.0, + "content": "parameters should be adapted to learn efficiently in the highly parallel regime. Additionally, we", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 155, + 505, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 155, + 505, + 168 + ], + "score": 1.0, + "content": "present a novel game-inspired curriculum which automatically adapts the task difficulty to the per-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 165, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 505, + 178 + ], + "score": 1.0, + "content": "formance of the policy. The proposed curriculum architecture is straightforward to implement, does", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 176, + 505, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 176, + 505, + 189 + ], + "score": 1.0, + "content": "not require tuning, and is well suited for the massively parallel regime. Common robotic simulators", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 187, + 505, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 505, + 200 + ], + "score": 1.0, + "content": "such as Mujoco [5], Bullet [6], or Raisim [7] feature efficient multi-body dynamics implementations.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 198, + 505, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 505, + 211 + ], + "score": 1.0, + "content": "However, they have been developed to run on CPUs with only a reduced amount of parallelism. In", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 210, + 505, + 221 + ], + "spans": [ + { + "bbox": [ + 106, + 210, + 505, + 221 + ], + "score": 1.0, + "content": "this work, we use NVIDIA’s Isaac Gym simulation environment [8], which runs both the simulation", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 220, + 446, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 446, + 232 + ], + "score": 1.0, + "content": "and training on the GPU and is capable of simulating thousands of robots in parallel.", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 8.5 + }, + { + "type": "text", + "bbox": [ + 107, + 236, + 505, + 389 + ], + "lines": [ + { + "bbox": [ + 105, + 236, + 505, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 505, + 248 + ], + "score": 1.0, + "content": "The massively parallel training regime has been explored before [4, 9] in the context of distributed", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 247, + 505, + 260 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 505, + 260 + ], + "score": 1.0, + "content": "systems with a network of thousands of CPUs each running a separate instance of the simulation.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 258, + 506, + 271 + ], + "spans": [ + { + "bbox": [ + 106, + 258, + 506, + 271 + ], + "score": 1.0, + "content": "The parallelization was achieved by averaging the gradients between the different workers without", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 269, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 505, + 282 + ], + "score": 1.0, + "content": "reducing the number of samples provided by each agent. This results in large batch sizes of millions", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 280, + 505, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 505, + 293 + ], + "score": 1.0, + "content": "of samples for each policy update which improves the learning dynamics, but does not optimize", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 291, + 505, + 304 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 505, + 304 + ], + "score": 1.0, + "content": "the overall training time. In parallel, recent works have aimed to increase the simulation through-", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 301, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 506, + 315 + ], + "score": 1.0, + "content": "put and reduce training times of standard DRL benchmark tasks. A framework combining parallel", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 312, + 506, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 506, + 325 + ], + "score": 1.0, + "content": "simulation with multi-GPU training [10] was proposed to achieve fast training using hundreds of", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 324, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 506, + 336 + ], + "score": 1.0, + "content": "parallel agents. In the context of visual navigation, large batch simulation has been used to increase", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 335, + 506, + 347 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 506, + 347 + ], + "score": 1.0, + "content": "the training throughput [11]. Furthermore, GPU accelerated physics simulation has been shown to", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 345, + 506, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 345, + 506, + 358 + ], + "score": 1.0, + "content": "significantly improve the training time of the Humanoid running task [12]. A differentiable simu-", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 356, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 504, + 369 + ], + "score": 1.0, + "content": "lator running on Google’s TPUs has also been shown to greatly accelerate the training of multiple", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 366, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 506, + 381 + ], + "score": 1.0, + "content": "tasks [13]. We build upon [10, 12] by pushing the parallelization further, optimizing the training", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 378, + 424, + 390 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 424, + 390 + ], + "score": 1.0, + "content": "algorithm, and applying the approach to a challenging real-world robotics task.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 20.5 + }, + { + "type": "text", + "bbox": [ + 107, + 394, + 505, + 536 + ], + "lines": [ + { + "bbox": [ + 105, + 394, + 505, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 394, + 505, + 407 + ], + "score": 1.0, + "content": "Perceptive and dynamic locomotion for legged robots in unstructured environments is a demanding", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 405, + 506, + 419 + ], + "spans": [ + { + "bbox": [ + 105, + 405, + 506, + 419 + ], + "score": 1.0, + "content": "task that, until recently, had only been partially demonstrated with complex model-based approaches", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 416, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 506, + 430 + ], + "score": 1.0, + "content": "[14, 15]. Learning-based approaches are emerging as a promising alternative. For quadrupeds, DRL", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 426, + 505, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 505, + 441 + ], + "score": 1.0, + "content": "has been used to train blind policies robust to highly uneven ground [16] (12 hours of training). Per-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 438, + 505, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 438, + 505, + 451 + ], + "score": 1.0, + "content": "ceptive locomotion over challenging terrain has been achieved by combining learning with optimal", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 449, + 505, + 462 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 505, + 462 + ], + "score": 1.0, + "content": "control techniques [17, 18] (82 and 88 hours of training) and recently, a fully learned approach has", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 459, + 506, + 474 + ], + "spans": [ + { + "bbox": [ + 105, + 459, + 506, + 474 + ], + "score": 1.0, + "content": "shown great robustness in this setting [19] (120 hours of training). Similarly, bipedal robots have", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 471, + 505, + 484 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 505, + 484 + ], + "score": 1.0, + "content": "also been trained to walk blindly on stairs [20] (training time not reported). With our approach we", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 482, + 505, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 505, + 495 + ], + "score": 1.0, + "content": "can train a perceptive policy in under 20 minutes on a single GPU, with the complexity of sim-", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 493, + 505, + 505 + ], + "spans": [ + { + "bbox": [ + 106, + 493, + 505, + 505 + ], + "score": 1.0, + "content": "to-real transfer to the hardware, which increases the performance and robustness requirements and", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 504, + 505, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 505, + 517 + ], + "score": 1.0, + "content": "provides clear validation of the overall approach. Training such behaviors in minutes opens up new", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 514, + 505, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 505, + 527 + ], + "score": 1.0, + "content": "exciting possibilities ranging from automatic tuning to customized training using scans of particular", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 526, + 165, + 538 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 165, + 538 + ], + "score": 1.0, + "content": "environments.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 34 + }, + { + "type": "title", + "bbox": [ + 106, + 560, + 347, + 574 + ], + "lines": [ + { + "bbox": [ + 104, + 559, + 349, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 349, + 578 + ], + "score": 1.0, + "content": "2 Massively Parallel Reinforcement Learning", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 41 + }, + { + "type": "text", + "bbox": [ + 107, + 590, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 591, + 505, + 603 + ], + "spans": [ + { + "bbox": [ + 106, + 591, + 505, + 603 + ], + "score": 1.0, + "content": "Current (on-policy) reinforcement learning algorithms are divided into two parts: data collection and", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 601, + 506, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 506, + 615 + ], + "score": 1.0, + "content": "policy update. The policy update, which corresponds to back-propagation for neural networks, is", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 612, + 505, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 612, + 505, + 624 + ], + "score": 1.0, + "content": "easily performed in parallel on the GPU. Parallelizing data collection is not as straightforward. Each", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 623, + 506, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 506, + 637 + ], + "score": 1.0, + "content": "step consists of policy inference, simulation, reward, and observation calculation. Current popular", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 634, + 505, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 634, + 505, + 647 + ], + "score": 1.0, + "content": "pipelines have the simulation and reward/observation calculation computed on the CPU, making the", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 645, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 506, + 657 + ], + "score": 1.0, + "content": "GPU unsuitable for policy inference because of communication bottle-necks. Data transfer over", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 655, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 506, + 668 + ], + "score": 1.0, + "content": "PCIe is known to be the weakest link of GPU acceleration, and can be as much as 50 times slower", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 667, + 505, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 505, + 680 + ], + "score": 1.0, + "content": "than the GPU processing time alone [21]. Furthermore, with CPU data collection, a large amount", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 678, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 505, + 690 + ], + "score": 1.0, + "content": "of data must be sent to the GPU for each policy update, slowing down the overall process. Limited", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 688, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 505, + 702 + ], + "score": 1.0, + "content": "parallelization can be achieved by using multiple CPU cores and spawning many processes, each", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 700, + 505, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 505, + 713 + ], + "score": 1.0, + "content": "running the simulation for one agent. However, the number of agents is quickly limited by the num-", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "score": 1.0, + "content": "ber of cores and other issues such as memory usage. We explore the potential of massive parallelism", + "type": "text" + } + ], + "index": 53 + } + ], + "index": 47.5 + } + ], + "page_idx": 1, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 742, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 301, + 741, + 310, + 753 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 310, + 753 + ], + "score": 1.0, + "content": "2", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 73, + 504, + 116 + ], + "lines": [], + "index": 1.5, + "bbox_fs": [ + 105, + 72, + 506, + 118 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 107, + 122, + 505, + 231 + ], + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 135 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 135 + ], + "score": 1.0, + "content": "In this paper, we examine the effects of massive parallelism for on-policy DRL algorithms and", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 132, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 132, + 506, + 146 + ], + "score": 1.0, + "content": "present considerations in how the standard RL formulation and the most commonly used hyper-", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 144, + 506, + 157 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 506, + 157 + ], + "score": 1.0, + "content": "parameters should be adapted to learn efficiently in the highly parallel regime. Additionally, we", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 155, + 505, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 155, + 505, + 168 + ], + "score": 1.0, + "content": "present a novel game-inspired curriculum which automatically adapts the task difficulty to the per-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 165, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 505, + 178 + ], + "score": 1.0, + "content": "formance of the policy. The proposed curriculum architecture is straightforward to implement, does", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 176, + 505, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 176, + 505, + 189 + ], + "score": 1.0, + "content": "not require tuning, and is well suited for the massively parallel regime. Common robotic simulators", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 187, + 505, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 505, + 200 + ], + "score": 1.0, + "content": "such as Mujoco [5], Bullet [6], or Raisim [7] feature efficient multi-body dynamics implementations.", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 198, + 505, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 198, + 505, + 211 + ], + "score": 1.0, + "content": "However, they have been developed to run on CPUs with only a reduced amount of parallelism. In", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 210, + 505, + 221 + ], + "spans": [ + { + "bbox": [ + 106, + 210, + 505, + 221 + ], + "score": 1.0, + "content": "this work, we use NVIDIA’s Isaac Gym simulation environment [8], which runs both the simulation", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 220, + 446, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 446, + 232 + ], + "score": 1.0, + "content": "and training on the GPU and is capable of simulating thousands of robots in parallel.", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 8.5, + "bbox_fs": [ + 104, + 122, + 506, + 232 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 236, + 505, + 389 + ], + "lines": [ + { + "bbox": [ + 105, + 236, + 505, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 505, + 248 + ], + "score": 1.0, + "content": "The massively parallel training regime has been explored before [4, 9] in the context of distributed", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 247, + 505, + 260 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 505, + 260 + ], + "score": 1.0, + "content": "systems with a network of thousands of CPUs each running a separate instance of the simulation.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 258, + 506, + 271 + ], + "spans": [ + { + "bbox": [ + 106, + 258, + 506, + 271 + ], + "score": 1.0, + "content": "The parallelization was achieved by averaging the gradients between the different workers without", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 269, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 505, + 282 + ], + "score": 1.0, + "content": "reducing the number of samples provided by each agent. This results in large batch sizes of millions", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 280, + 505, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 505, + 293 + ], + "score": 1.0, + "content": "of samples for each policy update which improves the learning dynamics, but does not optimize", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 291, + 505, + 304 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 505, + 304 + ], + "score": 1.0, + "content": "the overall training time. In parallel, recent works have aimed to increase the simulation through-", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 104, + 301, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 506, + 315 + ], + "score": 1.0, + "content": "put and reduce training times of standard DRL benchmark tasks. A framework combining parallel", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 312, + 506, + 325 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 506, + 325 + ], + "score": 1.0, + "content": "simulation with multi-GPU training [10] was proposed to achieve fast training using hundreds of", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 324, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 506, + 336 + ], + "score": 1.0, + "content": "parallel agents. In the context of visual navigation, large batch simulation has been used to increase", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 335, + 506, + 347 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 506, + 347 + ], + "score": 1.0, + "content": "the training throughput [11]. Furthermore, GPU accelerated physics simulation has been shown to", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 345, + 506, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 345, + 506, + 358 + ], + "score": 1.0, + "content": "significantly improve the training time of the Humanoid running task [12]. A differentiable simu-", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 356, + 504, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 356, + 504, + 369 + ], + "score": 1.0, + "content": "lator running on Google’s TPUs has also been shown to greatly accelerate the training of multiple", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 366, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 506, + 381 + ], + "score": 1.0, + "content": "tasks [13]. We build upon [10, 12] by pushing the parallelization further, optimizing the training", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 378, + 424, + 390 + ], + "spans": [ + { + "bbox": [ + 105, + 378, + 424, + 390 + ], + "score": 1.0, + "content": "algorithm, and applying the approach to a challenging real-world robotics task.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 20.5, + "bbox_fs": [ + 104, + 236, + 506, + 390 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 394, + 505, + 536 + ], + "lines": [ + { + "bbox": [ + 105, + 394, + 505, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 394, + 505, + 407 + ], + "score": 1.0, + "content": "Perceptive and dynamic locomotion for legged robots in unstructured environments is a demanding", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 405, + 506, + 419 + ], + "spans": [ + { + "bbox": [ + 105, + 405, + 506, + 419 + ], + "score": 1.0, + "content": "task that, until recently, had only been partially demonstrated with complex model-based approaches", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 416, + 506, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 506, + 430 + ], + "score": 1.0, + "content": "[14, 15]. Learning-based approaches are emerging as a promising alternative. For quadrupeds, DRL", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 426, + 505, + 441 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 505, + 441 + ], + "score": 1.0, + "content": "has been used to train blind policies robust to highly uneven ground [16] (12 hours of training). Per-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 438, + 505, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 438, + 505, + 451 + ], + "score": 1.0, + "content": "ceptive locomotion over challenging terrain has been achieved by combining learning with optimal", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 449, + 505, + 462 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 505, + 462 + ], + "score": 1.0, + "content": "control techniques [17, 18] (82 and 88 hours of training) and recently, a fully learned approach has", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 459, + 506, + 474 + ], + "spans": [ + { + "bbox": [ + 105, + 459, + 506, + 474 + ], + "score": 1.0, + "content": "shown great robustness in this setting [19] (120 hours of training). Similarly, bipedal robots have", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 471, + 505, + 484 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 505, + 484 + ], + "score": 1.0, + "content": "also been trained to walk blindly on stairs [20] (training time not reported). With our approach we", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 482, + 505, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 482, + 505, + 495 + ], + "score": 1.0, + "content": "can train a perceptive policy in under 20 minutes on a single GPU, with the complexity of sim-", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 106, + 493, + 505, + 505 + ], + "spans": [ + { + "bbox": [ + 106, + 493, + 505, + 505 + ], + "score": 1.0, + "content": "to-real transfer to the hardware, which increases the performance and robustness requirements and", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 504, + 505, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 505, + 517 + ], + "score": 1.0, + "content": "provides clear validation of the overall approach. Training such behaviors in minutes opens up new", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 514, + 505, + 527 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 505, + 527 + ], + "score": 1.0, + "content": "exciting possibilities ranging from automatic tuning to customized training using scans of particular", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 526, + 165, + 538 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 165, + 538 + ], + "score": 1.0, + "content": "environments.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 34, + "bbox_fs": [ + 105, + 394, + 506, + 538 + ] + }, + { + "type": "title", + "bbox": [ + 106, + 560, + 347, + 574 + ], + "lines": [ + { + "bbox": [ + 104, + 559, + 349, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 349, + 578 + ], + "score": 1.0, + "content": "2 Massively Parallel Reinforcement Learning", + "type": "text" + } + ], + "index": 41 + } + ], + "index": 41 + }, + { + "type": "text", + "bbox": [ + 107, + 590, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 591, + 505, + 603 + ], + "spans": [ + { + "bbox": [ + 106, + 591, + 505, + 603 + ], + "score": 1.0, + "content": "Current (on-policy) reinforcement learning algorithms are divided into two parts: data collection and", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 601, + 506, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 506, + 615 + ], + "score": 1.0, + "content": "policy update. The policy update, which corresponds to back-propagation for neural networks, is", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 612, + 505, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 612, + 505, + 624 + ], + "score": 1.0, + "content": "easily performed in parallel on the GPU. Parallelizing data collection is not as straightforward. Each", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 623, + 506, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 506, + 637 + ], + "score": 1.0, + "content": "step consists of policy inference, simulation, reward, and observation calculation. Current popular", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 634, + 505, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 634, + 505, + 647 + ], + "score": 1.0, + "content": "pipelines have the simulation and reward/observation calculation computed on the CPU, making the", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 645, + 506, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 506, + 657 + ], + "score": 1.0, + "content": "GPU unsuitable for policy inference because of communication bottle-necks. Data transfer over", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 655, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 506, + 668 + ], + "score": 1.0, + "content": "PCIe is known to be the weakest link of GPU acceleration, and can be as much as 50 times slower", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 667, + 505, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 505, + 680 + ], + "score": 1.0, + "content": "than the GPU processing time alone [21]. Furthermore, with CPU data collection, a large amount", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 678, + 505, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 505, + 690 + ], + "score": 1.0, + "content": "of data must be sent to the GPU for each policy update, slowing down the overall process. Limited", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 105, + 688, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 505, + 702 + ], + "score": 1.0, + "content": "parallelization can be achieved by using multiple CPU cores and spawning many processes, each", + "type": "text" + } + ], + "index": 51 + }, + { + "bbox": [ + 105, + 700, + 505, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 505, + 713 + ], + "score": 1.0, + "content": "running the simulation for one agent. However, the number of agents is quickly limited by the num-", + "type": "text" + } + ], + "index": 52 + }, + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "score": 1.0, + "content": "ber of cores and other issues such as memory usage. We explore the potential of massive parallelism", + "type": "text" + } + ], + "index": 53 + }, + { + "bbox": [ + 105, + 70, + 506, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 70, + 506, + 87 + ], + "score": 1.0, + "content": "with Isaac Gym’s end-to-end data collection and policy updates on the GPU, significantly reducing", + "type": "text", + "cross_page": true + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 84, + 313, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 84, + 313, + 96 + ], + "score": 1.0, + "content": "data copying and improving simulation throughput.", + "type": "text", + "cross_page": true + } + ], + "index": 1 + } + ], + "index": 47.5, + "bbox_fs": [ + 105, + 591, + 506, + 723 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 73, + 504, + 95 + ], + "lines": [ + { + "bbox": [ + 105, + 70, + 506, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 70, + 506, + 87 + ], + "score": 1.0, + "content": "with Isaac Gym’s end-to-end data collection and policy updates on the GPU, significantly reducing", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 84, + 313, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 84, + 313, + 96 + ], + "score": 1.0, + "content": "data copying and improving simulation throughput.", + "type": "text" + } + ], + "index": 1 + } + ], + "index": 0.5 + }, + { + "type": "title", + "bbox": [ + 107, + 108, + 232, + 120 + ], + "lines": [ + { + "bbox": [ + 105, + 106, + 232, + 122 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 232, + 122 + ], + "score": 1.0, + "content": "2.1 Simulation Throughput", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2 + }, + { + "type": "text", + "bbox": [ + 107, + 128, + 505, + 238 + ], + "lines": [ + { + "bbox": [ + 106, + 128, + 505, + 141 + ], + "spans": [ + { + "bbox": [ + 106, + 128, + 505, + 141 + ], + "score": 1.0, + "content": "The main factor affecting the total simulation throughput is the number of robots simulated in par-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 139, + 505, + 151 + ], + "spans": [ + { + "bbox": [ + 106, + 139, + 505, + 151 + ], + "score": 1.0, + "content": "allel. Modern GPUs can handle tens of thousands of parallel instructions. Similarly, IsaacGym’s", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 150, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 506, + 163 + ], + "score": 1.0, + "content": "PhysX engine can process thousands of robots in a single simulation and all other computations of", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 162, + 505, + 174 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 505, + 174 + ], + "score": 1.0, + "content": "our pipeline are vectorized to scale favorably with the number of robots. Using a single simulation", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 171, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 506, + 186 + ], + "score": 1.0, + "content": "with thousands of robots presents some new challenges. For example, a single common terrain mesh", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 181, + 506, + 197 + ], + "spans": [ + { + "bbox": [ + 104, + 181, + 506, + 197 + ], + "score": 1.0, + "content": "must be used, and it cannot be easily changed at each reset. We circumvent this problem by creating", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 195, + 505, + 206 + ], + "spans": [ + { + "bbox": [ + 106, + 195, + 505, + 206 + ], + "score": 1.0, + "content": "the whole mesh with all terrain types and levels tiled side by side. We change the terrain level of", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 204, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 506, + 217 + ], + "score": 1.0, + "content": "the robots by physically moving them on the mesh. In supplementary material, we show the com-", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 215, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 215, + 506, + 228 + ], + "score": 1.0, + "content": "putational time of different parts of the pipeline, examine how these times scale with the number of", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 227, + 410, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 227, + 410, + 240 + ], + "score": 1.0, + "content": "robots, and provide other techniques to optimize the simulation throughput.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 7.5 + }, + { + "type": "title", + "bbox": [ + 107, + 251, + 198, + 263 + ], + "lines": [ + { + "bbox": [ + 105, + 250, + 199, + 265 + ], + "spans": [ + { + "bbox": [ + 105, + 250, + 199, + 265 + ], + "score": 1.0, + "content": "2.2 DRL Algorithm", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 13 + }, + { + "type": "text", + "bbox": [ + 107, + 271, + 505, + 316 + ], + "lines": [ + { + "bbox": [ + 105, + 271, + 505, + 284 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 505, + 284 + ], + "score": 1.0, + "content": "We build upon a custom implementation of the Proximal Policy Optimization (PPO) algorithm [22].", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 282, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 506, + 295 + ], + "score": 1.0, + "content": "Our implementation is designed to perform every operation and store all the data on the GPU. In or-", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 294, + 505, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 294, + 505, + 306 + ], + "score": 1.0, + "content": "der to efficiently learn from thousands of robots in parallel, we perform some essential modifications", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 305, + 433, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 305, + 433, + 316 + ], + "score": 1.0, + "content": "to the algorithm and change some of the commonly used hyper-parameter values.", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 15.5 + }, + { + "type": "title", + "bbox": [ + 108, + 327, + 273, + 339 + ], + "lines": [ + { + "bbox": [ + 105, + 326, + 275, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 275, + 340 + ], + "score": 1.0, + "content": "2.2.1 Hyper-Parameters Modification", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 18 + }, + { + "type": "text", + "bbox": [ + 106, + 346, + 505, + 565 + ], + "lines": [ + { + "bbox": [ + 105, + 347, + 505, + 359 + ], + "spans": [ + { + "bbox": [ + 105, + 347, + 505, + 359 + ], + "score": 1.0, + "content": "In an on-policy algorithm such as PPO, a fixed policy collects a selected amount of data before do-", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 357, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 282, + 370 + ], + "score": 1.0, + "content": "ing the next policy update. This batch size,", + "type": "text" + }, + { + "bbox": [ + 282, + 358, + 290, + 367 + ], + "score": 0.54, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 291, + 357, + 504, + 370 + ], + "score": 1.0, + "content": ", is a crucial hyper-parameter for successful learning.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 368, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 106, + 368, + 505, + 380 + ], + "score": 1.0, + "content": "With too little data, the gradients will be too noisy, and the algorithm will not learn effectively. With", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 380, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 505, + 392 + ], + "score": 1.0, + "content": "too much data, the samples become repetitive, and the algorithm cannot extract more information", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 390, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 106, + 390, + 505, + 403 + ], + "score": 1.0, + "content": "from them. These samples represent wasted simulation time and slow down the overall training. We", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 401, + 506, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 128, + 416 + ], + "score": 1.0, + "content": "have", + "type": "text" + }, + { + "bbox": [ + 128, + 402, + 207, + 413 + ], + "score": 0.9, + "content": "B = n _ { r o b o t s } n _ { s t e p s }", + "type": "inline_equation" + }, + { + "bbox": [ + 208, + 401, + 240, + 416 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 240, + 403, + 266, + 413 + ], + "score": 0.9, + "content": " { n _ { s t e p s } }", + "type": "inline_equation" + }, + { + "bbox": [ + 266, + 401, + 506, + 416 + ], + "score": 1.0, + "content": "is the number of steps each robot takes per policy update", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 412, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 412, + 123, + 425 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 124, + 414, + 154, + 424 + ], + "score": 0.88, + "content": "n _ { r o b o t s }", + "type": "inline_equation" + }, + { + "bbox": [ + 154, + 412, + 407, + 425 + ], + "score": 1.0, + "content": "the number of robots simulated in parallel. Since we increase", + "type": "text" + }, + { + "bbox": [ + 408, + 414, + 438, + 424 + ], + "score": 0.88, + "content": "n _ { r o b o t s }", + "type": "inline_equation" + }, + { + "bbox": [ + 438, + 412, + 506, + 425 + ], + "score": 1.0, + "content": "by a few orders", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 422, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 268, + 437 + ], + "score": 1.0, + "content": "of magnitude, we must choose a small", + "type": "text" + }, + { + "bbox": [ + 269, + 424, + 295, + 435 + ], + "score": 0.87, + "content": "n _ { s t e p s }", + "type": "inline_equation" + }, + { + "bbox": [ + 295, + 422, + 330, + 437 + ], + "score": 1.0, + "content": "to keep", + "type": "text" + }, + { + "bbox": [ + 331, + 423, + 340, + 433 + ], + "score": 0.75, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 341, + 422, + 506, + 437 + ], + "score": 1.0, + "content": "reasonable and hence optimize training", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 432, + 506, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 506, + 447 + ], + "score": 1.0, + "content": "times, which is a setting that has not been extensively explored for on-policy reinforcement learning", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 104, + 443, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 443, + 297, + 460 + ], + "score": 1.0, + "content": "algorithms. It turns out that we can not choose", + "type": "text" + }, + { + "bbox": [ + 297, + 446, + 323, + 457 + ], + "score": 0.91, + "content": "n _ { s t e p s }", + "type": "inline_equation" + }, + { + "bbox": [ + 324, + 443, + 506, + 460 + ], + "score": 1.0, + "content": "to be arbitrarily low. The algorithm requires", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 456, + 505, + 469 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 505, + 469 + ], + "score": 1.0, + "content": "trajectories with coherent temporal information to learn effectively. Even though, in theory, informa-", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 466, + 505, + 479 + ], + "spans": [ + { + "bbox": [ + 106, + 466, + 505, + 479 + ], + "score": 1.0, + "content": "tion of single steps could be used, we find that the algorithm fails to converge to the optimal solution", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 104, + 476, + 505, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 476, + 505, + 491 + ], + "score": 1.0, + "content": "below a certain threshold. This can be explained by the fact that we use Generalized Advantage", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 488, + 506, + 501 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 506, + 501 + ], + "score": 1.0, + "content": "Estimation (GAE) [23], which requires rewards from multiple time steps to be effective. For our", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 499, + 505, + 512 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 505, + 512 + ], + "score": 1.0, + "content": "task, we find that the algorithm struggles when we provide fewer than 25 consecutive steps, corre-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 510, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 155, + 524 + ], + "score": 1.0, + "content": "sponding to", + "type": "text" + }, + { + "bbox": [ + 155, + 510, + 176, + 521 + ], + "score": 0.43, + "content": "0 . 5 \\mathrm { s }", + "type": "inline_equation" + }, + { + "bbox": [ + 176, + 510, + 366, + 524 + ], + "score": 1.0, + "content": "of simulated time. It is important to distinguish", + "type": "text" + }, + { + "bbox": [ + 366, + 511, + 392, + 522 + ], + "score": 0.88, + "content": " { n _ { s t e p s } }", + "type": "inline_equation" + }, + { + "bbox": [ + 392, + 510, + 506, + 524 + ], + "score": 1.0, + "content": "from the maximum episode", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 521, + 506, + 533 + ], + "spans": [ + { + "bbox": [ + 106, + 521, + 348, + 533 + ], + "score": 1.0, + "content": "length leading to a time-out and a reset, which we define as", + "type": "text" + }, + { + "bbox": [ + 348, + 522, + 365, + 532 + ], + "score": 0.59, + "content": "2 0 \\mathrm { s }", + "type": "inline_equation" + }, + { + "bbox": [ + 366, + 521, + 506, + 533 + ], + "score": 1.0, + "content": ". The environments are reset when", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 532, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 106, + 532, + 505, + 545 + ], + "score": 1.0, + "content": "they reach this maximum length and not after each iteration, meaning that a single episode can cover", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 543, + 505, + 556 + ], + "spans": [ + { + "bbox": [ + 105, + 543, + 505, + 556 + ], + "score": 1.0, + "content": "many policy updates. This limits the total number of robots training in parallel, and consequently,", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 554, + 391, + 566 + ], + "spans": [ + { + "bbox": [ + 106, + 554, + 391, + 566 + ], + "score": 1.0, + "content": "prohibits us from using the full computational capabilities of the GPU.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 28.5 + }, + { + "type": "text", + "bbox": [ + 107, + 570, + 504, + 624 + ], + "lines": [ + { + "bbox": [ + 107, + 570, + 505, + 582 + ], + "spans": [ + { + "bbox": [ + 107, + 570, + 505, + 582 + ], + "score": 1.0, + "content": "The mini-batch size represents the size of the chunks in which the batch size is split to perform back-", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 581, + 506, + 593 + ], + "spans": [ + { + "bbox": [ + 105, + 581, + 506, + 593 + ], + "score": 1.0, + "content": "propagation. We find that having mini-batch sizes much larger than what is usually considered best", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 592, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 506, + 604 + ], + "score": 1.0, + "content": "practice is beneficial for our massively parallel use case. We use mini-batches of tens of thousands", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 601, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 506, + 617 + ], + "score": 1.0, + "content": "of samples and observe that it stabilizes the learning process without increasing the total training", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 613, + 129, + 626 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 129, + 626 + ], + "score": 1.0, + "content": "time.", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 41 + }, + { + "type": "title", + "bbox": [ + 107, + 637, + 204, + 649 + ], + "lines": [ + { + "bbox": [ + 105, + 634, + 205, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 634, + 205, + 652 + ], + "score": 1.0, + "content": "2.2.2 Reset Handling", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 44 + }, + { + "type": "text", + "bbox": [ + 107, + 656, + 504, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 655, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 106, + 655, + 505, + 668 + ], + "score": 1.0, + "content": "During training, the robots must be reset whenever they fall, and also after some time to keep them", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 667, + 506, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 506, + 680 + ], + "score": 1.0, + "content": "exploring new trajectories and terrains. The PPO algorithm includes a critic predicting an infinite", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 677, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 506, + 691 + ], + "score": 1.0, + "content": "horizon sum of future discounted rewards. Resets break this infinite horizon assumption and can", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 688, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 506, + 702 + ], + "score": 1.0, + "content": "lead to inferior critic performance if not handled carefully. Resets based on failure or reaching a", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 699, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 506, + 713 + ], + "score": 1.0, + "content": "goal are not a problem because the critic can predict them. However, a reset based on a time out can", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "score": 1.0, + "content": "not be predicted (we do not provide episode time in the observations). The solution is to distinguish", + "type": "text" + } + ], + "index": 50 + } + ], + "index": 47.5 + } + ], + "page_idx": 2, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 301, + 740, + 309, + 752 + ], + "spans": [ + { + "bbox": [ + 301, + 740, + 309, + 752 + ], + "score": 1.0, + "content": "3", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 106, + 73, + 504, + 95 + ], + "lines": [], + "index": 0.5, + "bbox_fs": [ + 105, + 70, + 506, + 96 + ], + "lines_deleted": true + }, + { + "type": "title", + "bbox": [ + 107, + 108, + 232, + 120 + ], + "lines": [ + { + "bbox": [ + 105, + 106, + 232, + 122 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 232, + 122 + ], + "score": 1.0, + "content": "2.1 Simulation Throughput", + "type": "text" + } + ], + "index": 2 + } + ], + "index": 2 + }, + { + "type": "text", + "bbox": [ + 107, + 128, + 505, + 238 + ], + "lines": [ + { + "bbox": [ + 106, + 128, + 505, + 141 + ], + "spans": [ + { + "bbox": [ + 106, + 128, + 505, + 141 + ], + "score": 1.0, + "content": "The main factor affecting the total simulation throughput is the number of robots simulated in par-", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 139, + 505, + 151 + ], + "spans": [ + { + "bbox": [ + 106, + 139, + 505, + 151 + ], + "score": 1.0, + "content": "allel. Modern GPUs can handle tens of thousands of parallel instructions. Similarly, IsaacGym’s", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 150, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 105, + 150, + 506, + 163 + ], + "score": 1.0, + "content": "PhysX engine can process thousands of robots in a single simulation and all other computations of", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 162, + 505, + 174 + ], + "spans": [ + { + "bbox": [ + 105, + 162, + 505, + 174 + ], + "score": 1.0, + "content": "our pipeline are vectorized to scale favorably with the number of robots. Using a single simulation", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 171, + 506, + 186 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 506, + 186 + ], + "score": 1.0, + "content": "with thousands of robots presents some new challenges. For example, a single common terrain mesh", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 181, + 506, + 197 + ], + "spans": [ + { + "bbox": [ + 104, + 181, + 506, + 197 + ], + "score": 1.0, + "content": "must be used, and it cannot be easily changed at each reset. We circumvent this problem by creating", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 195, + 505, + 206 + ], + "spans": [ + { + "bbox": [ + 106, + 195, + 505, + 206 + ], + "score": 1.0, + "content": "the whole mesh with all terrain types and levels tiled side by side. We change the terrain level of", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 204, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 105, + 204, + 506, + 217 + ], + "score": 1.0, + "content": "the robots by physically moving them on the mesh. In supplementary material, we show the com-", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 215, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 215, + 506, + 228 + ], + "score": 1.0, + "content": "putational time of different parts of the pipeline, examine how these times scale with the number of", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 227, + 410, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 227, + 410, + 240 + ], + "score": 1.0, + "content": "robots, and provide other techniques to optimize the simulation throughput.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 7.5, + "bbox_fs": [ + 104, + 128, + 506, + 240 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 251, + 198, + 263 + ], + "lines": [ + { + "bbox": [ + 105, + 250, + 199, + 265 + ], + "spans": [ + { + "bbox": [ + 105, + 250, + 199, + 265 + ], + "score": 1.0, + "content": "2.2 DRL Algorithm", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 13 + }, + { + "type": "text", + "bbox": [ + 107, + 271, + 505, + 316 + ], + "lines": [ + { + "bbox": [ + 105, + 271, + 505, + 284 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 505, + 284 + ], + "score": 1.0, + "content": "We build upon a custom implementation of the Proximal Policy Optimization (PPO) algorithm [22].", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 282, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 506, + 295 + ], + "score": 1.0, + "content": "Our implementation is designed to perform every operation and store all the data on the GPU. In or-", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 294, + 505, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 294, + 505, + 306 + ], + "score": 1.0, + "content": "der to efficiently learn from thousands of robots in parallel, we perform some essential modifications", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 305, + 433, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 305, + 433, + 316 + ], + "score": 1.0, + "content": "to the algorithm and change some of the commonly used hyper-parameter values.", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 15.5, + "bbox_fs": [ + 105, + 271, + 506, + 316 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 327, + 273, + 339 + ], + "lines": [ + { + "bbox": [ + 105, + 326, + 275, + 340 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 275, + 340 + ], + "score": 1.0, + "content": "2.2.1 Hyper-Parameters Modification", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 18 + }, + { + "type": "text", + "bbox": [ + 106, + 346, + 505, + 565 + ], + "lines": [ + { + "bbox": [ + 105, + 347, + 505, + 359 + ], + "spans": [ + { + "bbox": [ + 105, + 347, + 505, + 359 + ], + "score": 1.0, + "content": "In an on-policy algorithm such as PPO, a fixed policy collects a selected amount of data before do-", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 357, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 282, + 370 + ], + "score": 1.0, + "content": "ing the next policy update. This batch size,", + "type": "text" + }, + { + "bbox": [ + 282, + 358, + 290, + 367 + ], + "score": 0.54, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 291, + 357, + 504, + 370 + ], + "score": 1.0, + "content": ", is a crucial hyper-parameter for successful learning.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 368, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 106, + 368, + 505, + 380 + ], + "score": 1.0, + "content": "With too little data, the gradients will be too noisy, and the algorithm will not learn effectively. With", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 380, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 505, + 392 + ], + "score": 1.0, + "content": "too much data, the samples become repetitive, and the algorithm cannot extract more information", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 106, + 390, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 106, + 390, + 505, + 403 + ], + "score": 1.0, + "content": "from them. These samples represent wasted simulation time and slow down the overall training. We", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 401, + 506, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 128, + 416 + ], + "score": 1.0, + "content": "have", + "type": "text" + }, + { + "bbox": [ + 128, + 402, + 207, + 413 + ], + "score": 0.9, + "content": "B = n _ { r o b o t s } n _ { s t e p s }", + "type": "inline_equation" + }, + { + "bbox": [ + 208, + 401, + 240, + 416 + ], + "score": 1.0, + "content": ", where", + "type": "text" + }, + { + "bbox": [ + 240, + 403, + 266, + 413 + ], + "score": 0.9, + "content": " { n _ { s t e p s } }", + "type": "inline_equation" + }, + { + "bbox": [ + 266, + 401, + 506, + 416 + ], + "score": 1.0, + "content": "is the number of steps each robot takes per policy update", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 412, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 412, + 123, + 425 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 124, + 414, + 154, + 424 + ], + "score": 0.88, + "content": "n _ { r o b o t s }", + "type": "inline_equation" + }, + { + "bbox": [ + 154, + 412, + 407, + 425 + ], + "score": 1.0, + "content": "the number of robots simulated in parallel. Since we increase", + "type": "text" + }, + { + "bbox": [ + 408, + 414, + 438, + 424 + ], + "score": 0.88, + "content": "n _ { r o b o t s }", + "type": "inline_equation" + }, + { + "bbox": [ + 438, + 412, + 506, + 425 + ], + "score": 1.0, + "content": "by a few orders", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 422, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 268, + 437 + ], + "score": 1.0, + "content": "of magnitude, we must choose a small", + "type": "text" + }, + { + "bbox": [ + 269, + 424, + 295, + 435 + ], + "score": 0.87, + "content": "n _ { s t e p s }", + "type": "inline_equation" + }, + { + "bbox": [ + 295, + 422, + 330, + 437 + ], + "score": 1.0, + "content": "to keep", + "type": "text" + }, + { + "bbox": [ + 331, + 423, + 340, + 433 + ], + "score": 0.75, + "content": "B", + "type": "inline_equation" + }, + { + "bbox": [ + 341, + 422, + 506, + 437 + ], + "score": 1.0, + "content": "reasonable and hence optimize training", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 432, + 506, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 432, + 506, + 447 + ], + "score": 1.0, + "content": "times, which is a setting that has not been extensively explored for on-policy reinforcement learning", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 104, + 443, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 443, + 297, + 460 + ], + "score": 1.0, + "content": "algorithms. It turns out that we can not choose", + "type": "text" + }, + { + "bbox": [ + 297, + 446, + 323, + 457 + ], + "score": 0.91, + "content": "n _ { s t e p s }", + "type": "inline_equation" + }, + { + "bbox": [ + 324, + 443, + 506, + 460 + ], + "score": 1.0, + "content": "to be arbitrarily low. The algorithm requires", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 456, + 505, + 469 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 505, + 469 + ], + "score": 1.0, + "content": "trajectories with coherent temporal information to learn effectively. Even though, in theory, informa-", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 106, + 466, + 505, + 479 + ], + "spans": [ + { + "bbox": [ + 106, + 466, + 505, + 479 + ], + "score": 1.0, + "content": "tion of single steps could be used, we find that the algorithm fails to converge to the optimal solution", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 104, + 476, + 505, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 476, + 505, + 491 + ], + "score": 1.0, + "content": "below a certain threshold. This can be explained by the fact that we use Generalized Advantage", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 488, + 506, + 501 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 506, + 501 + ], + "score": 1.0, + "content": "Estimation (GAE) [23], which requires rewards from multiple time steps to be effective. For our", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 499, + 505, + 512 + ], + "spans": [ + { + "bbox": [ + 105, + 499, + 505, + 512 + ], + "score": 1.0, + "content": "task, we find that the algorithm struggles when we provide fewer than 25 consecutive steps, corre-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 510, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 155, + 524 + ], + "score": 1.0, + "content": "sponding to", + "type": "text" + }, + { + "bbox": [ + 155, + 510, + 176, + 521 + ], + "score": 0.43, + "content": "0 . 5 \\mathrm { s }", + "type": "inline_equation" + }, + { + "bbox": [ + 176, + 510, + 366, + 524 + ], + "score": 1.0, + "content": "of simulated time. It is important to distinguish", + "type": "text" + }, + { + "bbox": [ + 366, + 511, + 392, + 522 + ], + "score": 0.88, + "content": " { n _ { s t e p s } }", + "type": "inline_equation" + }, + { + "bbox": [ + 392, + 510, + 506, + 524 + ], + "score": 1.0, + "content": "from the maximum episode", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 521, + 506, + 533 + ], + "spans": [ + { + "bbox": [ + 106, + 521, + 348, + 533 + ], + "score": 1.0, + "content": "length leading to a time-out and a reset, which we define as", + "type": "text" + }, + { + "bbox": [ + 348, + 522, + 365, + 532 + ], + "score": 0.59, + "content": "2 0 \\mathrm { s }", + "type": "inline_equation" + }, + { + "bbox": [ + 366, + 521, + 506, + 533 + ], + "score": 1.0, + "content": ". The environments are reset when", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 106, + 532, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 106, + 532, + 505, + 545 + ], + "score": 1.0, + "content": "they reach this maximum length and not after each iteration, meaning that a single episode can cover", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 543, + 505, + 556 + ], + "spans": [ + { + "bbox": [ + 105, + 543, + 505, + 556 + ], + "score": 1.0, + "content": "many policy updates. This limits the total number of robots training in parallel, and consequently,", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 106, + 554, + 391, + 566 + ], + "spans": [ + { + "bbox": [ + 106, + 554, + 391, + 566 + ], + "score": 1.0, + "content": "prohibits us from using the full computational capabilities of the GPU.", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 28.5, + "bbox_fs": [ + 104, + 347, + 506, + 566 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 570, + 504, + 624 + ], + "lines": [ + { + "bbox": [ + 107, + 570, + 505, + 582 + ], + "spans": [ + { + "bbox": [ + 107, + 570, + 505, + 582 + ], + "score": 1.0, + "content": "The mini-batch size represents the size of the chunks in which the batch size is split to perform back-", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 581, + 506, + 593 + ], + "spans": [ + { + "bbox": [ + 105, + 581, + 506, + 593 + ], + "score": 1.0, + "content": "propagation. We find that having mini-batch sizes much larger than what is usually considered best", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 592, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 506, + 604 + ], + "score": 1.0, + "content": "practice is beneficial for our massively parallel use case. We use mini-batches of tens of thousands", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 601, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 506, + 617 + ], + "score": 1.0, + "content": "of samples and observe that it stabilizes the learning process without increasing the total training", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 613, + 129, + 626 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 129, + 626 + ], + "score": 1.0, + "content": "time.", + "type": "text" + } + ], + "index": 43 + } + ], + "index": 41, + "bbox_fs": [ + 105, + 570, + 506, + 626 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 637, + 204, + 649 + ], + "lines": [ + { + "bbox": [ + 105, + 634, + 205, + 652 + ], + "spans": [ + { + "bbox": [ + 105, + 634, + 205, + 652 + ], + "score": 1.0, + "content": "2.2.2 Reset Handling", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 44 + }, + { + "type": "text", + "bbox": [ + 107, + 656, + 504, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 655, + 505, + 668 + ], + "spans": [ + { + "bbox": [ + 106, + 655, + 505, + 668 + ], + "score": 1.0, + "content": "During training, the robots must be reset whenever they fall, and also after some time to keep them", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 105, + 667, + 506, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 506, + 680 + ], + "score": 1.0, + "content": "exploring new trajectories and terrains. The PPO algorithm includes a critic predicting an infinite", + "type": "text" + } + ], + "index": 46 + }, + { + "bbox": [ + 105, + 677, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 506, + 691 + ], + "score": 1.0, + "content": "horizon sum of future discounted rewards. Resets break this infinite horizon assumption and can", + "type": "text" + } + ], + "index": 47 + }, + { + "bbox": [ + 105, + 688, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 506, + 702 + ], + "score": 1.0, + "content": "lead to inferior critic performance if not handled carefully. Resets based on failure or reaching a", + "type": "text" + } + ], + "index": 48 + }, + { + "bbox": [ + 105, + 699, + 506, + 713 + ], + "spans": [ + { + "bbox": [ + 105, + 699, + 506, + 713 + ], + "score": 1.0, + "content": "goal are not a problem because the critic can predict them. However, a reset based on a time out can", + "type": "text" + } + ], + "index": 49 + }, + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 505, + 723 + ], + "score": 1.0, + "content": "not be predicted (we do not provide episode time in the observations). The solution is to distinguish", + "type": "text" + } + ], + "index": 50 + }, + { + "bbox": [ + 106, + 196, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 106, + 196, + 505, + 208 + ], + "score": 1.0, + "content": "the two termination modes and augment the reward with the expected infinite sum of discounted", + "type": "text", + "cross_page": true + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 207, + 505, + 219 + ], + "spans": [ + { + "bbox": [ + 105, + 207, + 505, + 219 + ], + "score": 1.0, + "content": "future rewards in a time-out case. In other words, we bootstrap the target of the critic with its", + "type": "text", + "cross_page": true + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 217, + 505, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 505, + 231 + ], + "score": 1.0, + "content": "own prediction. This solution has been discussed in [24], but interestingly, this distinction is not", + "type": "text", + "cross_page": true + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 228, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 506, + 242 + ], + "score": 1.0, + "content": "part of the widely used Gym environment interface [25] and is ignored by popular implementations", + "type": "text", + "cross_page": true + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 239, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 239, + 207, + 253 + ], + "score": 1.0, + "content": "such as Stable-Baselines", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 208, + 239, + 229, + 251 + ], + "score": 0.44, + "content": "[ 2 6 ] ^ { 1 }", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 229, + 239, + 506, + 253 + ], + "score": 1.0, + "content": ". After investigating multiple implementations, we conclude that this", + "type": "text", + "cross_page": true + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 252, + 505, + 263 + ], + "spans": [ + { + "bbox": [ + 106, + 252, + 505, + 263 + ], + "score": 1.0, + "content": "important detail is often avoided by assuming that the environments either never time out or only on", + "type": "text", + "cross_page": true + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 261, + 505, + 274 + ], + "spans": [ + { + "bbox": [ + 106, + 261, + 505, + 274 + ], + "score": 1.0, + "content": "the very last step of a batch collection. In our case, with few robot steps per batch, we can not make", + "type": "text", + "cross_page": true + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 272, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 106, + 272, + 505, + 285 + ], + "score": 1.0, + "content": "such an assumption since a meaningful episode length covers the collection of many batches. We", + "type": "text", + "cross_page": true + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 284, + 505, + 295 + ], + "spans": [ + { + "bbox": [ + 106, + 284, + 505, + 295 + ], + "score": 1.0, + "content": "modify the standard Gym interface to detect time-outs and implement the bootstrapping solution. In", + "type": "text", + "cross_page": true + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 295, + 505, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 295, + 505, + 306 + ], + "score": 1.0, + "content": "supplementary material, we show the effect of this solution on the total reward as well as the critic", + "type": "text", + "cross_page": true + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 306, + 128, + 317 + ], + "spans": [ + { + "bbox": [ + 105, + 306, + 128, + 317 + ], + "score": 1.0, + "content": "loss.", + "type": "text", + "cross_page": true + } + ], + "index": 16 + } + ], + "index": 47.5, + "bbox_fs": [ + 105, + 655, + 506, + 723 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 80, + 502, + 148 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 109, + 80, + 502, + 148 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 109, + 80, + 502, + 148 + ], + "spans": [ + { + "bbox": [ + 109, + 80, + 502, + 148 + ], + "score": 0.957, + "type": "image", + "image_path": "9b48083b251353c43d2e71ff8968b79f99600e9e05461c3dd723d12ace8ceddd.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 109, + 80, + 502, + 102.66666666666667 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 109, + 102.66666666666667, + 502, + 125.33333333333334 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 109, + 125.33333333333334, + 502, + 148.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 107, + 155, + 505, + 187 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 154, + 505, + 167 + ], + "spans": [ + { + "bbox": [ + 105, + 154, + 505, + 167 + ], + "score": 1.0, + "content": "Figure 2: Terrain types used for training and testing in simulation. (a) Randomly rough terrain with", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 165, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 165, + 158, + 177 + ], + "score": 1.0, + "content": "variations of", + "type": "text" + }, + { + "bbox": [ + 159, + 166, + 182, + 176 + ], + "score": 0.67, + "content": "0 . 1 \\mathrm { m }", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 165, + 346, + 177 + ], + "score": 1.0, + "content": ". (b) Sloped terrain with an inclination of", + "type": "text" + }, + { + "bbox": [ + 347, + 165, + 375, + 177 + ], + "score": 0.27, + "content": "2 5 \\mathrm { d e g }", + "type": "inline_equation" + }, + { + "bbox": [ + 375, + 165, + 479, + 177 + ], + "score": 1.0, + "content": ". (c) Stairs with a width of", + "type": "text" + }, + { + "bbox": [ + 480, + 165, + 504, + 176 + ], + "score": 0.74, + "content": "0 . 3 \\mathrm { m }", + "type": "inline_equation" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 176, + 452, + 188 + ], + "spans": [ + { + "bbox": [ + 106, + 176, + 161, + 188 + ], + "score": 1.0, + "content": "and height of", + "type": "text" + }, + { + "bbox": [ + 162, + 177, + 186, + 187 + ], + "score": 0.61, + "content": "\\mathrm { 0 . 2 m }", + "type": "inline_equation" + }, + { + "bbox": [ + 186, + 176, + 416, + 188 + ], + "score": 1.0, + "content": ". (d) Randomized, discrete obstacles with heights of up to", + "type": "text" + }, + { + "bbox": [ + 417, + 176, + 448, + 187 + ], + "score": 0.89, + "content": "\\pm 0 . 2 \\mathrm { m }", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 176, + 452, + 188 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4 + } + ], + "index": 2.5 + }, + { + "type": "text", + "bbox": [ + 107, + 196, + 505, + 316 + ], + "lines": [ + { + "bbox": [ + 106, + 196, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 106, + 196, + 505, + 208 + ], + "score": 1.0, + "content": "the two termination modes and augment the reward with the expected infinite sum of discounted", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 207, + 505, + 219 + ], + "spans": [ + { + "bbox": [ + 105, + 207, + 505, + 219 + ], + "score": 1.0, + "content": "future rewards in a time-out case. In other words, we bootstrap the target of the critic with its", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 217, + 505, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 505, + 231 + ], + "score": 1.0, + "content": "own prediction. This solution has been discussed in [24], but interestingly, this distinction is not", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 228, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 506, + 242 + ], + "score": 1.0, + "content": "part of the widely used Gym environment interface [25] and is ignored by popular implementations", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 239, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 239, + 207, + 253 + ], + "score": 1.0, + "content": "such as Stable-Baselines", + "type": "text" + }, + { + "bbox": [ + 208, + 239, + 229, + 251 + ], + "score": 0.44, + "content": "[ 2 6 ] ^ { 1 }", + "type": "inline_equation" + }, + { + "bbox": [ + 229, + 239, + 506, + 253 + ], + "score": 1.0, + "content": ". After investigating multiple implementations, we conclude that this", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 252, + 505, + 263 + ], + "spans": [ + { + "bbox": [ + 106, + 252, + 505, + 263 + ], + "score": 1.0, + "content": "important detail is often avoided by assuming that the environments either never time out or only on", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 261, + 505, + 274 + ], + "spans": [ + { + "bbox": [ + 106, + 261, + 505, + 274 + ], + "score": 1.0, + "content": "the very last step of a batch collection. In our case, with few robot steps per batch, we can not make", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 272, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 106, + 272, + 505, + 285 + ], + "score": 1.0, + "content": "such an assumption since a meaningful episode length covers the collection of many batches. We", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 284, + 505, + 295 + ], + "spans": [ + { + "bbox": [ + 106, + 284, + 505, + 295 + ], + "score": 1.0, + "content": "modify the standard Gym interface to detect time-outs and implement the bootstrapping solution. In", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 295, + 505, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 295, + 505, + 306 + ], + "score": 1.0, + "content": "supplementary material, we show the effect of this solution on the total reward as well as the critic", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 306, + 128, + 317 + ], + "spans": [ + { + "bbox": [ + 105, + 306, + 128, + 317 + ], + "score": 1.0, + "content": "loss.", + "type": "text" + } + ], + "index": 16 + } + ], + "index": 11 + }, + { + "type": "title", + "bbox": [ + 107, + 333, + 212, + 346 + ], + "lines": [ + { + "bbox": [ + 104, + 331, + 213, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 213, + 349 + ], + "score": 1.0, + "content": "3 Task Description", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 17 + }, + { + "type": "text", + "bbox": [ + 107, + 358, + 505, + 413 + ], + "lines": [ + { + "bbox": [ + 105, + 358, + 505, + 371 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 505, + 371 + ], + "score": 1.0, + "content": "A quadruped robot must learn to walk across challenging terrain, including uneven surfaces, slopes,", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 369, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 505, + 380 + ], + "score": 1.0, + "content": "stairs, and obstacles, while following base-heading and linear-velocity commands. We conduct", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 380, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 505, + 392 + ], + "score": 1.0, + "content": "most of the simulation and real-world deployment experiments on the ANYbotics ANYmal C robot.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 390, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 504, + 403 + ], + "score": 1.0, + "content": "However, in simulation, we demonstrate the broader applicability of the approach by additionally", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 401, + 483, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 483, + 415 + ], + "score": 1.0, + "content": "training policies for ANYmal B, ANYmal C with an attached arm, and the Unitree A1 robots.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 20 + }, + { + "type": "title", + "bbox": [ + 108, + 426, + 248, + 438 + ], + "lines": [ + { + "bbox": [ + 105, + 425, + 249, + 442 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 249, + 442 + ], + "score": 1.0, + "content": "3.1 Game-Inspired Curriculum", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 23 + }, + { + "type": "text", + "bbox": [ + 107, + 447, + 505, + 513 + ], + "lines": [ + { + "bbox": [ + 106, + 448, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 448, + 505, + 459 + ], + "score": 1.0, + "content": "The terrains are selected to be representative of real-world environments. We create five types of", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 458, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 458, + 506, + 471 + ], + "score": 1.0, + "content": "procedurally generated terrains presented in Fig. 2: flat, sloped, randomly rough, discrete obstacles,", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 469, + 505, + 482 + ], + "spans": [ + { + "bbox": [ + 105, + 469, + 290, + 482 + ], + "score": 1.0, + "content": "and stairs. The terrains are tiled squares with", + "type": "text" + }, + { + "bbox": [ + 291, + 470, + 305, + 480 + ], + "score": 0.34, + "content": "8 \\mathrm { m }", + "type": "inline_equation" + }, + { + "bbox": [ + 306, + 469, + 505, + 482 + ], + "score": 1.0, + "content": "sides. The robots start at the center of the terrain", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 480, + 505, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 505, + 493 + ], + "score": 1.0, + "content": "and are given randomized heading and velocity commands (kept constant for the duration of an", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 490, + 506, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 490, + 506, + 504 + ], + "score": 1.0, + "content": "episode) pushing them to walk across the terrain. Slopes and stairs are organized in pyramids to", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 502, + 252, + 514 + ], + "spans": [ + { + "bbox": [ + 106, + 502, + 252, + 514 + ], + "score": 1.0, + "content": "allow traversability in all directions.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 26.5 + }, + { + "type": "text", + "bbox": [ + 107, + 519, + 505, + 693 + ], + "lines": [ + { + "bbox": [ + 106, + 519, + 505, + 531 + ], + "spans": [ + { + "bbox": [ + 106, + 519, + 505, + 531 + ], + "score": 1.0, + "content": "Previous works have shown the benefits of using an automated curriculum of task difficulty to learn", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 529, + 505, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 505, + 542 + ], + "score": 1.0, + "content": "complex locomotion policies [28, 29, 16]. Similarly, we find that it is essential to first train the pol-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 104, + 540, + 506, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 506, + 554 + ], + "score": 1.0, + "content": "icy on less challenging terrain before progressively increasing the complexity. We adopt a solution", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 551, + 504, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 551, + 504, + 564 + ], + "score": 1.0, + "content": "inspired by [16], but replace the particle filter approach with a new game-inspired automatic curricu-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 562, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 562, + 506, + 575 + ], + "score": 1.0, + "content": "lum. All robots are assigned a terrain type and a level that represents the difficulty of that terrain. For", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 573, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 411, + 586 + ], + "score": 1.0, + "content": "stairs and randomized obstacles, we gradually increase the step height from", + "type": "text" + }, + { + "bbox": [ + 412, + 574, + 433, + 584 + ], + "score": 0.28, + "content": "5 \\mathrm { c m }", + "type": "inline_equation" + }, + { + "bbox": [ + 433, + 573, + 444, + 586 + ], + "score": 1.0, + "content": "to", + "type": "text" + }, + { + "bbox": [ + 444, + 573, + 470, + 584 + ], + "score": 0.5, + "content": "2 0 \\mathrm { c m }", + "type": "inline_equation" + }, + { + "bbox": [ + 470, + 573, + 506, + 586 + ], + "score": 1.0, + "content": ". Sloped", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 584, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 506, + 597 + ], + "score": 1.0, + "content": "terrain inclination is increased from 0 deg to 25 deg. If a robot manages to walk past the borders of", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 594, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 506, + 608 + ], + "score": 1.0, + "content": "its terrain, its level is increased, and at the next reset, it will start on more difficult terrain. However,", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 605, + 505, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 505, + 619 + ], + "score": 1.0, + "content": "if at the end of an episode it moved by less than half of the distance required by its target velocity,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 617, + 505, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 617, + 505, + 629 + ], + "score": 1.0, + "content": "its level is reduced again. Robots solving the highest level are looped back to a randomly selected", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 627, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 506, + 641 + ], + "score": 1.0, + "content": "level to increase the diversity and avoid catastrophic forgetting. This approach has the advantage of", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 638, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 638, + 506, + 651 + ], + "score": 1.0, + "content": "training the robots at a level of difficulty tailored to their performance without requiring any external", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 650, + 505, + 662 + ], + "spans": [ + { + "bbox": [ + 106, + 650, + 505, + 662 + ], + "score": 1.0, + "content": "tuning. It adapts the difficulty level for each terrain type individually and provides us with visual", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 660, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 660, + 506, + 673 + ], + "score": 1.0, + "content": "and quantitative feedback on the progress of the training. When the robots have reached the final", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 670, + 505, + 684 + ], + "spans": [ + { + "bbox": [ + 105, + 670, + 505, + 684 + ], + "score": 1.0, + "content": "level and are evenly spread across all terrains due to looping back, we can conclude they have fully", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 682, + 207, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 682, + 207, + 694 + ], + "score": 1.0, + "content": "learned to solve the task.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 37.5 + } + ], + "page_idx": 3, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 106, + 701, + 505, + 722 + ], + "lines": [ + { + "bbox": [ + 118, + 699, + 506, + 714 + ], + "spans": [ + { + "bbox": [ + 118, + 699, + 506, + 714 + ], + "score": 1.0, + "content": "1The Spinning-up [27] implementation of PPO uses the same bootstrapping solution by keeping track of", + "type": "text" + } + ] + }, + { + "bbox": [ + 105, + 711, + 442, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 711, + 442, + 723 + ], + "score": 1.0, + "content": "episode lengths within the algorithm, thus circumventing the limitation of the Gym interface.", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 742, + 308, + 750 + ], + "lines": [ + { + "bbox": [ + 301, + 741, + 310, + 752 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 310, + 752 + ], + "score": 1.0, + "content": "", + "type": "text", + "height": 11, + "width": 9 + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 80, + 502, + 148 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 109, + 80, + 502, + 148 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 109, + 80, + 502, + 148 + ], + "spans": [ + { + "bbox": [ + 109, + 80, + 502, + 148 + ], + "score": 0.957, + "type": "image", + "image_path": "9b48083b251353c43d2e71ff8968b79f99600e9e05461c3dd723d12ace8ceddd.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 109, + 80, + 502, + 102.66666666666667 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 109, + 102.66666666666667, + 502, + 125.33333333333334 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 109, + 125.33333333333334, + 502, + 148.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 107, + 155, + 505, + 187 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 154, + 505, + 167 + ], + "spans": [ + { + "bbox": [ + 105, + 154, + 505, + 167 + ], + "score": 1.0, + "content": "Figure 2: Terrain types used for training and testing in simulation. (a) Randomly rough terrain with", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 165, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 106, + 165, + 158, + 177 + ], + "score": 1.0, + "content": "variations of", + "type": "text" + }, + { + "bbox": [ + 159, + 166, + 182, + 176 + ], + "score": 0.67, + "content": "0 . 1 \\mathrm { m }", + "type": "inline_equation" + }, + { + "bbox": [ + 183, + 165, + 346, + 177 + ], + "score": 1.0, + "content": ". (b) Sloped terrain with an inclination of", + "type": "text" + }, + { + "bbox": [ + 347, + 165, + 375, + 177 + ], + "score": 0.27, + "content": "2 5 \\mathrm { d e g }", + "type": "inline_equation" + }, + { + "bbox": [ + 375, + 165, + 479, + 177 + ], + "score": 1.0, + "content": ". (c) Stairs with a width of", + "type": "text" + }, + { + "bbox": [ + 480, + 165, + 504, + 176 + ], + "score": 0.74, + "content": "0 . 3 \\mathrm { m }", + "type": "inline_equation" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 176, + 452, + 188 + ], + "spans": [ + { + "bbox": [ + 106, + 176, + 161, + 188 + ], + "score": 1.0, + "content": "and height of", + "type": "text" + }, + { + "bbox": [ + 162, + 177, + 186, + 187 + ], + "score": 0.61, + "content": "\\mathrm { 0 . 2 m }", + "type": "inline_equation" + }, + { + "bbox": [ + 186, + 176, + 416, + 188 + ], + "score": 1.0, + "content": ". (d) Randomized, discrete obstacles with heights of up to", + "type": "text" + }, + { + "bbox": [ + 417, + 176, + 448, + 187 + ], + "score": 0.89, + "content": "\\pm 0 . 2 \\mathrm { m }", + "type": "inline_equation" + }, + { + "bbox": [ + 448, + 176, + 452, + 188 + ], + "score": 1.0, + "content": ".", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4 + } + ], + "index": 2.5 + }, + { + "type": "text", + "bbox": [ + 107, + 196, + 505, + 316 + ], + "lines": [], + "index": 11, + "bbox_fs": [ + 105, + 196, + 506, + 317 + ], + "lines_deleted": true + }, + { + "type": "title", + "bbox": [ + 107, + 333, + 212, + 346 + ], + "lines": [ + { + "bbox": [ + 104, + 331, + 213, + 349 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 213, + 349 + ], + "score": 1.0, + "content": "3 Task Description", + "type": "text" + } + ], + "index": 17 + } + ], + "index": 17 + }, + { + "type": "text", + "bbox": [ + 107, + 358, + 505, + 413 + ], + "lines": [ + { + "bbox": [ + 105, + 358, + 505, + 371 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 505, + 371 + ], + "score": 1.0, + "content": "A quadruped robot must learn to walk across challenging terrain, including uneven surfaces, slopes,", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 369, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 505, + 380 + ], + "score": 1.0, + "content": "stairs, and obstacles, while following base-heading and linear-velocity commands. We conduct", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 380, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 505, + 392 + ], + "score": 1.0, + "content": "most of the simulation and real-world deployment experiments on the ANYbotics ANYmal C robot.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 390, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 504, + 403 + ], + "score": 1.0, + "content": "However, in simulation, we demonstrate the broader applicability of the approach by additionally", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 401, + 483, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 401, + 483, + 415 + ], + "score": 1.0, + "content": "training policies for ANYmal B, ANYmal C with an attached arm, and the Unitree A1 robots.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 20, + "bbox_fs": [ + 105, + 358, + 505, + 415 + ] + }, + { + "type": "title", + "bbox": [ + 108, + 426, + 248, + 438 + ], + "lines": [ + { + "bbox": [ + 105, + 425, + 249, + 442 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 249, + 442 + ], + "score": 1.0, + "content": "3.1 Game-Inspired Curriculum", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 23 + }, + { + "type": "text", + "bbox": [ + 107, + 447, + 505, + 513 + ], + "lines": [ + { + "bbox": [ + 106, + 448, + 505, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 448, + 505, + 459 + ], + "score": 1.0, + "content": "The terrains are selected to be representative of real-world environments. We create five types of", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 458, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 105, + 458, + 506, + 471 + ], + "score": 1.0, + "content": "procedurally generated terrains presented in Fig. 2: flat, sloped, randomly rough, discrete obstacles,", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 469, + 505, + 482 + ], + "spans": [ + { + "bbox": [ + 105, + 469, + 290, + 482 + ], + "score": 1.0, + "content": "and stairs. The terrains are tiled squares with", + "type": "text" + }, + { + "bbox": [ + 291, + 470, + 305, + 480 + ], + "score": 0.34, + "content": "8 \\mathrm { m }", + "type": "inline_equation" + }, + { + "bbox": [ + 306, + 469, + 505, + 482 + ], + "score": 1.0, + "content": "sides. The robots start at the center of the terrain", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 480, + 505, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 480, + 505, + 493 + ], + "score": 1.0, + "content": "and are given randomized heading and velocity commands (kept constant for the duration of an", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 490, + 506, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 490, + 506, + 504 + ], + "score": 1.0, + "content": "episode) pushing them to walk across the terrain. Slopes and stairs are organized in pyramids to", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 502, + 252, + 514 + ], + "spans": [ + { + "bbox": [ + 106, + 502, + 252, + 514 + ], + "score": 1.0, + "content": "allow traversability in all directions.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 26.5, + "bbox_fs": [ + 105, + 448, + 506, + 514 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 519, + 505, + 693 + ], + "lines": [ + { + "bbox": [ + 106, + 519, + 505, + 531 + ], + "spans": [ + { + "bbox": [ + 106, + 519, + 505, + 531 + ], + "score": 1.0, + "content": "Previous works have shown the benefits of using an automated curriculum of task difficulty to learn", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 529, + 505, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 505, + 542 + ], + "score": 1.0, + "content": "complex locomotion policies [28, 29, 16]. Similarly, we find that it is essential to first train the pol-", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 104, + 540, + 506, + 554 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 506, + 554 + ], + "score": 1.0, + "content": "icy on less challenging terrain before progressively increasing the complexity. We adopt a solution", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 551, + 504, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 551, + 504, + 564 + ], + "score": 1.0, + "content": "inspired by [16], but replace the particle filter approach with a new game-inspired automatic curricu-", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 562, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 562, + 506, + 575 + ], + "score": 1.0, + "content": "lum. All robots are assigned a terrain type and a level that represents the difficulty of that terrain. For", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 573, + 506, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 411, + 586 + ], + "score": 1.0, + "content": "stairs and randomized obstacles, we gradually increase the step height from", + "type": "text" + }, + { + "bbox": [ + 412, + 574, + 433, + 584 + ], + "score": 0.28, + "content": "5 \\mathrm { c m }", + "type": "inline_equation" + }, + { + "bbox": [ + 433, + 573, + 444, + 586 + ], + "score": 1.0, + "content": "to", + "type": "text" + }, + { + "bbox": [ + 444, + 573, + 470, + 584 + ], + "score": 0.5, + "content": "2 0 \\mathrm { c m }", + "type": "inline_equation" + }, + { + "bbox": [ + 470, + 573, + 506, + 586 + ], + "score": 1.0, + "content": ". Sloped", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 584, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 506, + 597 + ], + "score": 1.0, + "content": "terrain inclination is increased from 0 deg to 25 deg. If a robot manages to walk past the borders of", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 594, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 506, + 608 + ], + "score": 1.0, + "content": "its terrain, its level is increased, and at the next reset, it will start on more difficult terrain. However,", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 605, + 505, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 505, + 619 + ], + "score": 1.0, + "content": "if at the end of an episode it moved by less than half of the distance required by its target velocity,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 617, + 505, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 617, + 505, + 629 + ], + "score": 1.0, + "content": "its level is reduced again. Robots solving the highest level are looped back to a randomly selected", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 627, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 506, + 641 + ], + "score": 1.0, + "content": "level to increase the diversity and avoid catastrophic forgetting. This approach has the advantage of", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 638, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 638, + 506, + 651 + ], + "score": 1.0, + "content": "training the robots at a level of difficulty tailored to their performance without requiring any external", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 650, + 505, + 662 + ], + "spans": [ + { + "bbox": [ + 106, + 650, + 505, + 662 + ], + "score": 1.0, + "content": "tuning. It adapts the difficulty level for each terrain type individually and provides us with visual", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 660, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 105, + 660, + 506, + 673 + ], + "score": 1.0, + "content": "and quantitative feedback on the progress of the training. When the robots have reached the final", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 670, + 505, + 684 + ], + "spans": [ + { + "bbox": [ + 105, + 670, + 505, + 684 + ], + "score": 1.0, + "content": "level and are evenly spread across all terrains due to looping back, we can conclude they have fully", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 105, + 682, + 207, + 694 + ], + "spans": [ + { + "bbox": [ + 105, + 682, + 207, + 694 + ], + "score": 1.0, + "content": "learned to solve the task.", + "type": "text" + } + ], + "index": 45 + } + ], + "index": 37.5, + "bbox_fs": [ + 104, + 519, + 506, + 694 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 71, + 504, + 254 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 107, + 71, + 504, + 254 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 107, + 71, + 504, + 254 + ], + "spans": [ + { + "bbox": [ + 107, + 71, + 504, + 254 + ], + "score": 0.975, + "type": "image", + "image_path": "ecaf8198256af450c96e9b482c8d3d3a06909d2addf9a2530c761fb4193fd419.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 107, + 71, + 504, + 132.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 107, + 132.0, + 504, + 193.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 107, + 193.0, + 504, + 254.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 107, + 259, + 504, + 293 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 258, + 505, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 505, + 272 + ], + "score": 1.0, + "content": "Figure 3: 4000 robots progressing through the terrains with automatic curriculum, after 500 (top)", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 270, + 506, + 282 + ], + "spans": [ + { + "bbox": [ + 106, + 270, + 506, + 282 + ], + "score": 1.0, + "content": "and 1000 (bottom) policy updates. The robots start the training session on the first row (closest to", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 281, + 316, + 294 + ], + "spans": [ + { + "bbox": [ + 106, + 281, + 316, + 294 + ], + "score": 1.0, + "content": "the camera) and progressively reach harder terrains.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4 + } + ], + "index": 2.5 + }, + { + "type": "text", + "bbox": [ + 107, + 299, + 504, + 366 + ], + "lines": [ + { + "bbox": [ + 106, + 300, + 505, + 312 + ], + "spans": [ + { + "bbox": [ + 106, + 300, + 505, + 312 + ], + "score": 1.0, + "content": "The proposed curriculum structure is well suited for the massively parallel regime. With thousands", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 311, + 506, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 506, + 324 + ], + "score": 1.0, + "content": "of robots we can directly use their current progress in the curriculum as the distribution of the", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 321, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 506, + 335 + ], + "score": 1.0, + "content": "policy’s performance, and do not need learn it with a generator network [30]. Furthermore, our", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 333, + 505, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 333, + 505, + 345 + ], + "score": 1.0, + "content": "method doesn’t require tuning and is straightforward to implement in a parallel manner with near-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 344, + 505, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 505, + 357 + ], + "score": 1.0, + "content": "zero processing cost. We remove the computational overhead of re-sampling and re-generating new", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 355, + 293, + 367 + ], + "spans": [ + { + "bbox": [ + 106, + 355, + 293, + 367 + ], + "score": 1.0, + "content": "terrains needed for the particle filter approach.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 8.5 + }, + { + "type": "text", + "bbox": [ + 107, + 371, + 505, + 448 + ], + "lines": [ + { + "bbox": [ + 105, + 370, + 505, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 370, + 505, + 384 + ], + "score": 1.0, + "content": "Fig. 3 shows robots progressing through the terrains at two different stages of the training process.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 381, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 106, + 381, + 505, + 394 + ], + "score": 1.0, + "content": "On complex terrain types, the robots require more training iterations to reach the highest levels. The", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 392, + 505, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 505, + 406 + ], + "score": 1.0, + "content": "distribution of robots after 500 iterations shows that while the policy is able to cross sloped terrains", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 403, + 505, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 505, + 416 + ], + "score": 1.0, + "content": "and to go down stairs, climbing stairs and traversing obstacles requires more training iterations.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 415, + 505, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 505, + 427 + ], + "score": 1.0, + "content": "However, after 1000 iterations, the robots have reached the most challenging level for all terrain", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 425, + 505, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 505, + 439 + ], + "score": 1.0, + "content": "types and are spread across the map. We train for a total for 1500 iterations to let the policy converge", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 437, + 215, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 215, + 450 + ], + "score": 1.0, + "content": "to its highest performance.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 15 + }, + { + "type": "title", + "bbox": [ + 109, + 461, + 284, + 472 + ], + "lines": [ + { + "bbox": [ + 106, + 460, + 286, + 474 + ], + "spans": [ + { + "bbox": [ + 106, + 460, + 286, + 474 + ], + "score": 1.0, + "content": "3.2 Observations, Actions, and Rewards", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 19 + }, + { + "type": "text", + "bbox": [ + 107, + 480, + 505, + 536 + ], + "lines": [ + { + "bbox": [ + 106, + 479, + 505, + 493 + ], + "spans": [ + { + "bbox": [ + 106, + 479, + 505, + 493 + ], + "score": 1.0, + "content": "The policy receives proprioceptive measurements of the robot as well as terrain information around", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 492, + 505, + 504 + ], + "spans": [ + { + "bbox": [ + 106, + 492, + 505, + 504 + ], + "score": 1.0, + "content": "the robot’s base. The observations are composed of: base linear and angular velocities, measurement", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 501, + 506, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 501, + 506, + 516 + ], + "score": 1.0, + "content": "of the gravity vector, joint positions and velocities, the previous actions selected by the policy,", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 513, + 505, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 513, + 505, + 525 + ], + "score": 1.0, + "content": "and finally, 108 measurements of the terrain sampled from a grid around the robot’s base. Each", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 524, + 422, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 524, + 422, + 537 + ], + "score": 1.0, + "content": "measurement is the distance from the terrain surface to the robot’s base height.", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 22 + }, + { + "type": "text", + "bbox": [ + 107, + 541, + 504, + 628 + ], + "lines": [ + { + "bbox": [ + 105, + 540, + 505, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 540, + 505, + 553 + ], + "score": 1.0, + "content": "The total reward is a weighted sum of nine terms, detailed in supplementary material. The main", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 551, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 551, + 506, + 564 + ], + "score": 1.0, + "content": "terms encourage the robot to follow the commanded velocities while avoiding undesired base ve-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 563, + 505, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 505, + 575 + ], + "score": 1.0, + "content": "locities along other axes. In order to create a smoother, more natural motion, we also penalize joint", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 574, + 506, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 506, + 585 + ], + "score": 1.0, + "content": "torques, joint accelerations, joint target changes, and collisions. Contacts with the knees, shanks or", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 585, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 106, + 585, + 506, + 596 + ], + "score": 1.0, + "content": "between the feet and a vertical surface are considered collisions, while contacts with the base are", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 595, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 595, + 506, + 608 + ], + "score": 1.0, + "content": "considered crashes and lead to resets. Finally, we add an additional reward term encouraging the", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 606, + 506, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 506, + 619 + ], + "score": 1.0, + "content": "robot to take longer steps, which results in a more visually appealing behavior. We train a single", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 617, + 287, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 617, + 287, + 629 + ], + "score": 1.0, + "content": "policy with the same rewards for all terrains.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 28.5 + }, + { + "type": "text", + "bbox": [ + 107, + 634, + 505, + 666 + ], + "lines": [ + { + "bbox": [ + 106, + 633, + 506, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 633, + 506, + 646 + ], + "score": 1.0, + "content": "The actions are interpreted as desired joint positions sent to the motors. There, a PD controller", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 645, + 504, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 504, + 656 + ], + "score": 1.0, + "content": "produces motor torques. In contrast to other works [16, 20], neither the reward function nor the", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 655, + 293, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 293, + 668 + ], + "score": 1.0, + "content": "action space has any gait-dependent elements.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 34 + }, + { + "type": "title", + "bbox": [ + 107, + 679, + 225, + 691 + ], + "lines": [ + { + "bbox": [ + 105, + 678, + 227, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 227, + 693 + ], + "score": 1.0, + "content": "3.3 Sim-to-Real Additions", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 36 + }, + { + "type": "text", + "bbox": [ + 107, + 700, + 503, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 699, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 505, + 712 + ], + "score": 1.0, + "content": "In order to make the trained policies amenable for sim-to-real transfer, we randomize the friction", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 710, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 505, + 723 + ], + "score": 1.0, + "content": "of the ground, add noise to the observations and randomly push the robots during the episode to", + "type": "text" + } + ], + "index": 38 + } + ], + "index": 37.5 + } + ], + "page_idx": 4, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 742, + 308, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "score": 1.0, + "content": "5", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 71, + 504, + 254 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 107, + 71, + 504, + 254 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 107, + 71, + 504, + 254 + ], + "spans": [ + { + "bbox": [ + 107, + 71, + 504, + 254 + ], + "score": 0.975, + "type": "image", + "image_path": "ecaf8198256af450c96e9b482c8d3d3a06909d2addf9a2530c761fb4193fd419.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 107, + 71, + 504, + 132.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 107, + 132.0, + 504, + 193.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 107, + 193.0, + 504, + 254.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 107, + 259, + 504, + 293 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 258, + 505, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 505, + 272 + ], + "score": 1.0, + "content": "Figure 3: 4000 robots progressing through the terrains with automatic curriculum, after 500 (top)", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 270, + 506, + 282 + ], + "spans": [ + { + "bbox": [ + 106, + 270, + 506, + 282 + ], + "score": 1.0, + "content": "and 1000 (bottom) policy updates. The robots start the training session on the first row (closest to", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 281, + 316, + 294 + ], + "spans": [ + { + "bbox": [ + 106, + 281, + 316, + 294 + ], + "score": 1.0, + "content": "the camera) and progressively reach harder terrains.", + "type": "text" + } + ], + "index": 5 + } + ], + "index": 4 + } + ], + "index": 2.5 + }, + { + "type": "text", + "bbox": [ + 107, + 299, + 504, + 366 + ], + "lines": [ + { + "bbox": [ + 106, + 300, + 505, + 312 + ], + "spans": [ + { + "bbox": [ + 106, + 300, + 505, + 312 + ], + "score": 1.0, + "content": "The proposed curriculum structure is well suited for the massively parallel regime. With thousands", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 311, + 506, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 311, + 506, + 324 + ], + "score": 1.0, + "content": "of robots we can directly use their current progress in the curriculum as the distribution of the", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 321, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 506, + 335 + ], + "score": 1.0, + "content": "policy’s performance, and do not need learn it with a generator network [30]. Furthermore, our", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 333, + 505, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 333, + 505, + 345 + ], + "score": 1.0, + "content": "method doesn’t require tuning and is straightforward to implement in a parallel manner with near-", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 344, + 505, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 505, + 357 + ], + "score": 1.0, + "content": "zero processing cost. We remove the computational overhead of re-sampling and re-generating new", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 355, + 293, + 367 + ], + "spans": [ + { + "bbox": [ + 106, + 355, + 293, + 367 + ], + "score": 1.0, + "content": "terrains needed for the particle filter approach.", + "type": "text" + } + ], + "index": 11 + } + ], + "index": 8.5, + "bbox_fs": [ + 105, + 300, + 506, + 367 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 371, + 505, + 448 + ], + "lines": [ + { + "bbox": [ + 105, + 370, + 505, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 370, + 505, + 384 + ], + "score": 1.0, + "content": "Fig. 3 shows robots progressing through the terrains at two different stages of the training process.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 381, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 106, + 381, + 505, + 394 + ], + "score": 1.0, + "content": "On complex terrain types, the robots require more training iterations to reach the highest levels. The", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 392, + 505, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 505, + 406 + ], + "score": 1.0, + "content": "distribution of robots after 500 iterations shows that while the policy is able to cross sloped terrains", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 403, + 505, + 416 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 505, + 416 + ], + "score": 1.0, + "content": "and to go down stairs, climbing stairs and traversing obstacles requires more training iterations.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 415, + 505, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 505, + 427 + ], + "score": 1.0, + "content": "However, after 1000 iterations, the robots have reached the most challenging level for all terrain", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 425, + 505, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 505, + 439 + ], + "score": 1.0, + "content": "types and are spread across the map. We train for a total for 1500 iterations to let the policy converge", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 437, + 215, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 215, + 450 + ], + "score": 1.0, + "content": "to its highest performance.", + "type": "text" + } + ], + "index": 18 + } + ], + "index": 15, + "bbox_fs": [ + 105, + 370, + 505, + 450 + ] + }, + { + "type": "title", + "bbox": [ + 109, + 461, + 284, + 472 + ], + "lines": [ + { + "bbox": [ + 106, + 460, + 286, + 474 + ], + "spans": [ + { + "bbox": [ + 106, + 460, + 286, + 474 + ], + "score": 1.0, + "content": "3.2 Observations, Actions, and Rewards", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 19 + }, + { + "type": "text", + "bbox": [ + 107, + 480, + 505, + 536 + ], + "lines": [ + { + "bbox": [ + 106, + 479, + 505, + 493 + ], + "spans": [ + { + "bbox": [ + 106, + 479, + 505, + 493 + ], + "score": 1.0, + "content": "The policy receives proprioceptive measurements of the robot as well as terrain information around", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 492, + 505, + 504 + ], + "spans": [ + { + "bbox": [ + 106, + 492, + 505, + 504 + ], + "score": 1.0, + "content": "the robot’s base. The observations are composed of: base linear and angular velocities, measurement", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 501, + 506, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 501, + 506, + 516 + ], + "score": 1.0, + "content": "of the gravity vector, joint positions and velocities, the previous actions selected by the policy,", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 513, + 505, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 513, + 505, + 525 + ], + "score": 1.0, + "content": "and finally, 108 measurements of the terrain sampled from a grid around the robot’s base. Each", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 524, + 422, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 524, + 422, + 537 + ], + "score": 1.0, + "content": "measurement is the distance from the terrain surface to the robot’s base height.", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 22, + "bbox_fs": [ + 105, + 479, + 506, + 537 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 541, + 504, + 628 + ], + "lines": [ + { + "bbox": [ + 105, + 540, + 505, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 540, + 505, + 553 + ], + "score": 1.0, + "content": "The total reward is a weighted sum of nine terms, detailed in supplementary material. The main", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 551, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 105, + 551, + 506, + 564 + ], + "score": 1.0, + "content": "terms encourage the robot to follow the commanded velocities while avoiding undesired base ve-", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 563, + 505, + 575 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 505, + 575 + ], + "score": 1.0, + "content": "locities along other axes. In order to create a smoother, more natural motion, we also penalize joint", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 574, + 506, + 585 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 506, + 585 + ], + "score": 1.0, + "content": "torques, joint accelerations, joint target changes, and collisions. Contacts with the knees, shanks or", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 585, + 506, + 596 + ], + "spans": [ + { + "bbox": [ + 106, + 585, + 506, + 596 + ], + "score": 1.0, + "content": "between the feet and a vertical surface are considered collisions, while contacts with the base are", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 595, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 595, + 506, + 608 + ], + "score": 1.0, + "content": "considered crashes and lead to resets. Finally, we add an additional reward term encouraging the", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 606, + 506, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 506, + 619 + ], + "score": 1.0, + "content": "robot to take longer steps, which results in a more visually appealing behavior. We train a single", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 617, + 287, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 617, + 287, + 629 + ], + "score": 1.0, + "content": "policy with the same rewards for all terrains.", + "type": "text" + } + ], + "index": 32 + } + ], + "index": 28.5, + "bbox_fs": [ + 105, + 540, + 506, + 629 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 634, + 505, + 666 + ], + "lines": [ + { + "bbox": [ + 106, + 633, + 506, + 646 + ], + "spans": [ + { + "bbox": [ + 106, + 633, + 506, + 646 + ], + "score": 1.0, + "content": "The actions are interpreted as desired joint positions sent to the motors. There, a PD controller", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 645, + 504, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 504, + 656 + ], + "score": 1.0, + "content": "produces motor torques. In contrast to other works [16, 20], neither the reward function nor the", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 655, + 293, + 668 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 293, + 668 + ], + "score": 1.0, + "content": "action space has any gait-dependent elements.", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 34, + "bbox_fs": [ + 105, + 633, + 506, + 668 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 679, + 225, + 691 + ], + "lines": [ + { + "bbox": [ + 105, + 678, + 227, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 678, + 227, + 693 + ], + "score": 1.0, + "content": "3.3 Sim-to-Real Additions", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 36 + }, + { + "type": "text", + "bbox": [ + 107, + 700, + 503, + 722 + ], + "lines": [ + { + "bbox": [ + 106, + 699, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 505, + 712 + ], + "score": 1.0, + "content": "In order to make the trained policies amenable for sim-to-real transfer, we randomize the friction", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 710, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 505, + 723 + ], + "score": 1.0, + "content": "of the ground, add noise to the observations and randomly push the robots during the episode to", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 72, + 505, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 505, + 85 + ], + "score": 1.0, + "content": "teach them a more stable stance. Each robot has a friction coefficient sampled uniformly in [0.5,", + "type": "text", + "cross_page": true + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 83, + 506, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 239, + 96 + ], + "score": 1.0, + "content": "1.25]. The pushes happen every", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 240, + 84, + 257, + 94 + ], + "score": 0.46, + "content": "1 0 \\mathrm { s }", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 257, + 83, + 414, + 96 + ], + "score": 1.0, + "content": ". The robots’ base is accelerated up to", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 414, + 84, + 447, + 96 + ], + "score": 0.9, + "content": "\\pm 1 \\mathrm { m } / \\mathrm { s }", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 447, + 83, + 479, + 96 + ], + "score": 1.0, + "content": "in both", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 480, + 85, + 487, + 94 + ], + "score": 0.36, + "content": "\\mathbf { X }", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 487, + 83, + 506, + 96 + ], + "score": 1.0, + "content": "and", + "type": "text", + "cross_page": true + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 94, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 94, + 506, + 106 + ], + "score": 1.0, + "content": "y directions. The amount of noise is based on real data measured on the robot and is detailed in", + "type": "text", + "cross_page": true + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 106, + 205, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 205, + 118 + ], + "score": 1.0, + "content": "supplementary material.", + "type": "text", + "cross_page": true + } + ], + "index": 3 + } + ], + "index": 37.5, + "bbox_fs": [ + 105, + 699, + 505, + 723 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 73, + 505, + 117 + ], + "lines": [ + { + "bbox": [ + 105, + 72, + 505, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 505, + 85 + ], + "score": 1.0, + "content": "teach them a more stable stance. Each robot has a friction coefficient sampled uniformly in [0.5,", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 83, + 506, + 96 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 239, + 96 + ], + "score": 1.0, + "content": "1.25]. The pushes happen every", + "type": "text" + }, + { + "bbox": [ + 240, + 84, + 257, + 94 + ], + "score": 0.46, + "content": "1 0 \\mathrm { s }", + "type": "inline_equation" + }, + { + "bbox": [ + 257, + 83, + 414, + 96 + ], + "score": 1.0, + "content": ". The robots’ base is accelerated up to", + "type": "text" + }, + { + "bbox": [ + 414, + 84, + 447, + 96 + ], + "score": 0.9, + "content": "\\pm 1 \\mathrm { m } / \\mathrm { s }", + "type": "inline_equation" + }, + { + "bbox": [ + 447, + 83, + 479, + 96 + ], + "score": 1.0, + "content": "in both", + "type": "text" + }, + { + "bbox": [ + 480, + 85, + 487, + 94 + ], + "score": 0.36, + "content": "\\mathbf { X }", + "type": "inline_equation" + }, + { + "bbox": [ + 487, + 83, + 506, + 96 + ], + "score": 1.0, + "content": "and", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 94, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 94, + 506, + 106 + ], + "score": 1.0, + "content": "y directions. The amount of noise is based on real data measured on the robot and is detailed in", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 106, + 205, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 205, + 118 + ], + "score": 1.0, + "content": "supplementary material.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 1.5 + }, + { + "type": "text", + "bbox": [ + 106, + 122, + 505, + 221 + ], + "lines": [ + { + "bbox": [ + 106, + 121, + 505, + 133 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 505, + 133 + ], + "score": 1.0, + "content": "The ANYmal robot uses series elastic actuators with fairly complex dynamics, which are hard to", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 132, + 506, + 145 + ], + "spans": [ + { + "bbox": [ + 105, + 132, + 506, + 145 + ], + "score": 1.0, + "content": "model in simulation. For this reason and following the methodology of previous work [1], we use a", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 144, + 505, + 156 + ], + "spans": [ + { + "bbox": [ + 106, + 144, + 505, + 156 + ], + "score": 1.0, + "content": "neural network to compute torques from joint position commands. However, we simplify the inputs", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 154, + 506, + 167 + ], + "spans": [ + { + "bbox": [ + 106, + 154, + 506, + 167 + ], + "score": 1.0, + "content": "of the model. Instead of concatenating past measurements at fixed time steps and sending all of that", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 165, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 505, + 178 + ], + "score": 1.0, + "content": "information to a standard feed-forward network, we only provide the current measurements to an", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 176, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 176, + 506, + 189 + ], + "score": 1.0, + "content": "LSTM network. A potential drawback of this set-up is that the policy does not have the temporal", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 187, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 506, + 200 + ], + "score": 1.0, + "content": "information of the actuators as in previous work. We have experimented with various ways of", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 199, + 505, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 505, + 210 + ], + "score": 1.0, + "content": "providing that information through memory mechanisms for the policy but found that it does not", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 209, + 231, + 223 + ], + "spans": [ + { + "bbox": [ + 106, + 209, + 231, + 223 + ], + "score": 1.0, + "content": "improve the final performance.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 8 + }, + { + "type": "title", + "bbox": [ + 107, + 236, + 163, + 250 + ], + "lines": [ + { + "bbox": [ + 105, + 235, + 165, + 252 + ], + "spans": [ + { + "bbox": [ + 105, + 235, + 165, + 252 + ], + "score": 1.0, + "content": "4 Results", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 13 + }, + { + "type": "title", + "bbox": [ + 108, + 261, + 257, + 273 + ], + "lines": [ + { + "bbox": [ + 106, + 262, + 258, + 273 + ], + "spans": [ + { + "bbox": [ + 106, + 262, + 258, + 273 + ], + "score": 1.0, + "content": "4.1 Effects of Massive Parallelism", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 14 + }, + { + "type": "text", + "bbox": [ + 107, + 282, + 505, + 337 + ], + "lines": [ + { + "bbox": [ + 106, + 282, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 106, + 282, + 505, + 294 + ], + "score": 1.0, + "content": "In this section, we study the effects of the number of parallel robots on the final performance of", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 293, + 505, + 305 + ], + "spans": [ + { + "bbox": [ + 106, + 293, + 505, + 305 + ], + "score": 1.0, + "content": "the policy. In order to use the total reward as a single representative metric, we have to remove the", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 303, + 506, + 317 + ], + "spans": [ + { + "bbox": [ + 105, + 303, + 506, + 317 + ], + "score": 1.0, + "content": "curriculum, otherwise a more performant policy sees its task difficulty increase and consequently a", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 315, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 506, + 328 + ], + "score": 1.0, + "content": "decrease in the total reward. As such, we simplify the task by reducing the maximum step size of", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 326, + 410, + 338 + ], + "spans": [ + { + "bbox": [ + 106, + 326, + 410, + 338 + ], + "score": 1.0, + "content": "stairs and obstacles and directly train robots on the full range of difficulties.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 17 + }, + { + "type": "text", + "bbox": [ + 108, + 342, + 505, + 375 + ], + "lines": [ + { + "bbox": [ + 105, + 340, + 507, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 253, + 357 + ], + "score": 1.0, + "content": "We begin by setting a baseline with", + "type": "text" + }, + { + "bbox": [ + 253, + 342, + 322, + 354 + ], + "score": 0.89, + "content": "n _ { r o b o t s } = 2 0 0 0 0", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 340, + 341, + 357 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 341, + 343, + 392, + 354 + ], + "score": 0.9, + "content": "n _ { s t e p s } = 5 0", + "type": "inline_equation" + }, + { + "bbox": [ + 392, + 340, + 507, + 357 + ], + "score": 1.0, + "content": ", resulting in a batch size of", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 352, + 505, + 366 + ], + "spans": [ + { + "bbox": [ + 106, + 352, + 505, + 366 + ], + "score": 1.0, + "content": "1M samples. Using this very large batch size results in the best policy but at the cost of a relatively", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 363, + 184, + 378 + ], + "spans": [ + { + "bbox": [ + 105, + 363, + 184, + 378 + ], + "score": 1.0, + "content": "long training time.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 21 + }, + { + "type": "text", + "bbox": [ + 106, + 380, + 505, + 457 + ], + "lines": [ + { + "bbox": [ + 106, + 381, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 106, + 381, + 505, + 392 + ], + "score": 1.0, + "content": "We then conduct experiments in which we increase the number of robots while keeping the batch", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 392, + 506, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 506, + 404 + ], + "score": 1.0, + "content": "size constant. As a result, the number of steps each robot takes per policy update decreases. In this", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 402, + 505, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 505, + 415 + ], + "score": 1.0, + "content": "case, the training time decreases with a higher number of robots, but the policy performance drops", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 413, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 505, + 426 + ], + "score": 1.0, + "content": "if that number is too high. We start from 128 robots corresponding to the level of parallelization", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 424, + 505, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 424, + 505, + 437 + ], + "score": 1.0, + "content": "of previous CPU implementations and increase that number up to 16384, which is close to the", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 435, + 505, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 505, + 447 + ], + "score": 1.0, + "content": "maximum amount of robots we could simulate on rough terrain with Isaac Gym running on a single", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 446, + 181, + 457 + ], + "spans": [ + { + "bbox": [ + 106, + 446, + 181, + 457 + ], + "score": 1.0, + "content": "workstation GPU.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 26 + }, + { + "type": "text", + "bbox": [ + 106, + 462, + 505, + 528 + ], + "lines": [ + { + "bbox": [ + 105, + 462, + 505, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 505, + 475 + ], + "score": 1.0, + "content": "In Fig. 4, we compare these results with the baseline, which allows us to select the most favorable", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 471, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 506, + 487 + ], + "score": 1.0, + "content": "trade-off between policy performance and training time. We see two interesting effects at play.", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 483, + 505, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 505, + 496 + ], + "score": 1.0, + "content": "First, when the number of robots is too high, the performance drops sharply, which can be explained", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 495, + 505, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 505, + 507 + ], + "score": 1.0, + "content": "by the time horizon of each robot becoming too small. As expected, with larger batch sizes, the", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 506, + 505, + 518 + ], + "spans": [ + { + "bbox": [ + 106, + 506, + 505, + 518 + ], + "score": 1.0, + "content": "overall reward is higher, and the time horizon effect is shifted, meaning that we can use more robots", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 516, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 106, + 516, + 506, + 529 + ], + "score": 1.0, + "content": "before seeing the drop. On the other hand, below a certain threshold, we see a slow decrease in", + "type": "text" + } + ], + "index": 35 + } + ], + "index": 32.5 + }, + { + "type": "image", + "bbox": [ + 109, + 547, + 501, + 661 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 109, + 547, + 501, + 661 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 109, + 547, + 501, + 661 + ], + "spans": [ + { + "bbox": [ + 109, + 547, + 501, + 661 + ], + "score": 0.972, + "type": "image", + "image_path": "b511ee0c2aca987f5fa8e39a0d14f3ee54098cf431491e0a271f167d3bdcff6f.jpg" + } + ] + } + ], + "index": 37, + "virtual_lines": [ + { + "bbox": [ + 109, + 547, + 501, + 585.0 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 109, + 585.0, + 501, + 623.0 + ], + "spans": [], + "index": 37 + }, + { + "bbox": [ + 109, + 623.0, + 501, + 661.0 + ], + "spans": [], + "index": 38 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 667, + 506, + 733 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 506, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 506, + 680 + ], + "score": 1.0, + "content": "Figure 4: (a) Average and standard deviation (over 5 runs) of the total reward of an episode after", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 678, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 106, + 678, + 506, + 690 + ], + "score": 1.0, + "content": "1500 policy updates for different number of robots and 3 different batch sizes. The ideal case of a", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 688, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 506, + 702 + ], + "score": 1.0, + "content": "batch size of 1M samples with 20000 robots is shown in red. (b) Total training time for the same", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 700, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 505, + 712 + ], + "score": 1.0, + "content": "experiments. (c) Reward dependency on total training time. Colors represent the number of robots,", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 106, + 711, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 711, + 505, + 722 + ], + "score": 1.0, + "content": "while shapes show the batch size (circles: 49152, crosses: 98304, triangles: 196608). Points in the", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 721, + 477, + 734 + ], + "spans": [ + { + "bbox": [ + 106, + 721, + 477, + 734 + ], + "score": 1.0, + "content": "upper left part of the graph (highlighted in green) represent the most desirable configuration.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 41.5 + } + ], + "index": 39.25 + } + ], + "page_idx": 5, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 742, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 310, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 310, + 752 + ], + "score": 1.0, + "content": "6", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "text", + "bbox": [ + 107, + 73, + 505, + 117 + ], + "lines": [], + "index": 1.5, + "bbox_fs": [ + 105, + 72, + 506, + 118 + ], + "lines_deleted": true + }, + { + "type": "text", + "bbox": [ + 106, + 122, + 505, + 221 + ], + "lines": [ + { + "bbox": [ + 106, + 121, + 505, + 133 + ], + "spans": [ + { + "bbox": [ + 106, + 121, + 505, + 133 + ], + "score": 1.0, + "content": "The ANYmal robot uses series elastic actuators with fairly complex dynamics, which are hard to", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 132, + 506, + 145 + ], + "spans": [ + { + "bbox": [ + 105, + 132, + 506, + 145 + ], + "score": 1.0, + "content": "model in simulation. For this reason and following the methodology of previous work [1], we use a", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 144, + 505, + 156 + ], + "spans": [ + { + "bbox": [ + 106, + 144, + 505, + 156 + ], + "score": 1.0, + "content": "neural network to compute torques from joint position commands. However, we simplify the inputs", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 154, + 506, + 167 + ], + "spans": [ + { + "bbox": [ + 106, + 154, + 506, + 167 + ], + "score": 1.0, + "content": "of the model. Instead of concatenating past measurements at fixed time steps and sending all of that", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 165, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 165, + 505, + 178 + ], + "score": 1.0, + "content": "information to a standard feed-forward network, we only provide the current measurements to an", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 176, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 176, + 506, + 189 + ], + "score": 1.0, + "content": "LSTM network. A potential drawback of this set-up is that the policy does not have the temporal", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 187, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 506, + 200 + ], + "score": 1.0, + "content": "information of the actuators as in previous work. We have experimented with various ways of", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 199, + 505, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 505, + 210 + ], + "score": 1.0, + "content": "providing that information through memory mechanisms for the policy but found that it does not", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 209, + 231, + 223 + ], + "spans": [ + { + "bbox": [ + 106, + 209, + 231, + 223 + ], + "score": 1.0, + "content": "improve the final performance.", + "type": "text" + } + ], + "index": 12 + } + ], + "index": 8, + "bbox_fs": [ + 105, + 121, + 506, + 223 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 236, + 163, + 250 + ], + "lines": [ + { + "bbox": [ + 105, + 235, + 165, + 252 + ], + "spans": [ + { + "bbox": [ + 105, + 235, + 165, + 252 + ], + "score": 1.0, + "content": "4 Results", + "type": "text" + } + ], + "index": 13 + } + ], + "index": 13 + }, + { + "type": "title", + "bbox": [ + 108, + 261, + 257, + 273 + ], + "lines": [ + { + "bbox": [ + 106, + 262, + 258, + 273 + ], + "spans": [ + { + "bbox": [ + 106, + 262, + 258, + 273 + ], + "score": 1.0, + "content": "4.1 Effects of Massive Parallelism", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 14 + }, + { + "type": "text", + "bbox": [ + 107, + 282, + 505, + 337 + ], + "lines": [ + { + "bbox": [ + 106, + 282, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 106, + 282, + 505, + 294 + ], + "score": 1.0, + "content": "In this section, we study the effects of the number of parallel robots on the final performance of", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 293, + 505, + 305 + ], + "spans": [ + { + "bbox": [ + 106, + 293, + 505, + 305 + ], + "score": 1.0, + "content": "the policy. In order to use the total reward as a single representative metric, we have to remove the", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 303, + 506, + 317 + ], + "spans": [ + { + "bbox": [ + 105, + 303, + 506, + 317 + ], + "score": 1.0, + "content": "curriculum, otherwise a more performant policy sees its task difficulty increase and consequently a", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 315, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 506, + 328 + ], + "score": 1.0, + "content": "decrease in the total reward. As such, we simplify the task by reducing the maximum step size of", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 326, + 410, + 338 + ], + "spans": [ + { + "bbox": [ + 106, + 326, + 410, + 338 + ], + "score": 1.0, + "content": "stairs and obstacles and directly train robots on the full range of difficulties.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 17, + "bbox_fs": [ + 105, + 282, + 506, + 338 + ] + }, + { + "type": "text", + "bbox": [ + 108, + 342, + 505, + 375 + ], + "lines": [ + { + "bbox": [ + 105, + 340, + 507, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 253, + 357 + ], + "score": 1.0, + "content": "We begin by setting a baseline with", + "type": "text" + }, + { + "bbox": [ + 253, + 342, + 322, + 354 + ], + "score": 0.89, + "content": "n _ { r o b o t s } = 2 0 0 0 0", + "type": "inline_equation" + }, + { + "bbox": [ + 322, + 340, + 341, + 357 + ], + "score": 1.0, + "content": "and", + "type": "text" + }, + { + "bbox": [ + 341, + 343, + 392, + 354 + ], + "score": 0.9, + "content": "n _ { s t e p s } = 5 0", + "type": "inline_equation" + }, + { + "bbox": [ + 392, + 340, + 507, + 357 + ], + "score": 1.0, + "content": ", resulting in a batch size of", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 352, + 505, + 366 + ], + "spans": [ + { + "bbox": [ + 106, + 352, + 505, + 366 + ], + "score": 1.0, + "content": "1M samples. Using this very large batch size results in the best policy but at the cost of a relatively", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 363, + 184, + 378 + ], + "spans": [ + { + "bbox": [ + 105, + 363, + 184, + 378 + ], + "score": 1.0, + "content": "long training time.", + "type": "text" + } + ], + "index": 22 + } + ], + "index": 21, + "bbox_fs": [ + 105, + 340, + 507, + 378 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 380, + 505, + 457 + ], + "lines": [ + { + "bbox": [ + 106, + 381, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 106, + 381, + 505, + 392 + ], + "score": 1.0, + "content": "We then conduct experiments in which we increase the number of robots while keeping the batch", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 392, + 506, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 506, + 404 + ], + "score": 1.0, + "content": "size constant. As a result, the number of steps each robot takes per policy update decreases. In this", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 402, + 505, + 415 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 505, + 415 + ], + "score": 1.0, + "content": "case, the training time decreases with a higher number of robots, but the policy performance drops", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 413, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 505, + 426 + ], + "score": 1.0, + "content": "if that number is too high. We start from 128 robots corresponding to the level of parallelization", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 424, + 505, + 437 + ], + "spans": [ + { + "bbox": [ + 106, + 424, + 505, + 437 + ], + "score": 1.0, + "content": "of previous CPU implementations and increase that number up to 16384, which is close to the", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 435, + 505, + 447 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 505, + 447 + ], + "score": 1.0, + "content": "maximum amount of robots we could simulate on rough terrain with Isaac Gym running on a single", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 446, + 181, + 457 + ], + "spans": [ + { + "bbox": [ + 106, + 446, + 181, + 457 + ], + "score": 1.0, + "content": "workstation GPU.", + "type": "text" + } + ], + "index": 29 + } + ], + "index": 26, + "bbox_fs": [ + 105, + 381, + 506, + 457 + ] + }, + { + "type": "text", + "bbox": [ + 106, + 462, + 505, + 528 + ], + "lines": [ + { + "bbox": [ + 105, + 462, + 505, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 505, + 475 + ], + "score": 1.0, + "content": "In Fig. 4, we compare these results with the baseline, which allows us to select the most favorable", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 471, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 506, + 487 + ], + "score": 1.0, + "content": "trade-off between policy performance and training time. We see two interesting effects at play.", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 483, + 505, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 505, + 496 + ], + "score": 1.0, + "content": "First, when the number of robots is too high, the performance drops sharply, which can be explained", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 495, + 505, + 507 + ], + "spans": [ + { + "bbox": [ + 105, + 495, + 505, + 507 + ], + "score": 1.0, + "content": "by the time horizon of each robot becoming too small. As expected, with larger batch sizes, the", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 506, + 505, + 518 + ], + "spans": [ + { + "bbox": [ + 106, + 506, + 505, + 518 + ], + "score": 1.0, + "content": "overall reward is higher, and the time horizon effect is shifted, meaning that we can use more robots", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 106, + 516, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 106, + 516, + 506, + 529 + ], + "score": 1.0, + "content": "before seeing the drop. On the other hand, below a certain threshold, we see a slow decrease in", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 104, + 307, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 104, + 307, + 506, + 322 + ], + "score": 1.0, + "content": "performance with fewer robots. We believe this is explained by the fact that the samples are very", + "type": "text", + "cross_page": true + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 320, + 505, + 331 + ], + "spans": [ + { + "bbox": [ + 106, + 320, + 505, + 331 + ], + "score": 1.0, + "content": "similar with many steps per robot because of the relatively small time steps between them. This", + "type": "text", + "cross_page": true + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 330, + 506, + 342 + ], + "spans": [ + { + "bbox": [ + 106, + 330, + 506, + 342 + ], + "score": 1.0, + "content": "means that for the same amount of samples, there is less diversity in the data. In other words, with a", + "type": "text", + "cross_page": true + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 340, + 505, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 505, + 353 + ], + "score": 1.0, + "content": "low number of robots, we are further from the standard assumption that the samples are independent", + "type": "text", + "cross_page": true + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 351, + 505, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 505, + 365 + ], + "score": 1.0, + "content": "and identically distributed, which seems to have a noticeable effect on the training process. In terms", + "type": "text", + "cross_page": true + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 363, + 505, + 374 + ], + "spans": [ + { + "bbox": [ + 106, + 363, + 505, + 374 + ], + "score": 1.0, + "content": "of training time, we see a nearly linear scaling up to 4000 robots, after which simulation throughput", + "type": "text", + "cross_page": true + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 373, + 506, + 386 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 506, + 386 + ], + "score": 1.0, + "content": "gains slow down. As such, we can conclude that increasing the number of robots is beneficial for", + "type": "text", + "cross_page": true + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 385, + 505, + 397 + ], + "spans": [ + { + "bbox": [ + 106, + 385, + 505, + 397 + ], + "score": 1.0, + "content": "both final performance and training time, but there is an upper limit on this number after which an", + "type": "text", + "cross_page": true + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 395, + 506, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 395, + 506, + 408 + ], + "score": 1.0, + "content": "on-policy algorithm cannot learn effectively. Increasing the batch size to values much larger than", + "type": "text", + "cross_page": true + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 406, + 505, + 419 + ], + "spans": [ + { + "bbox": [ + 106, + 406, + 505, + 419 + ], + "score": 1.0, + "content": "what is typically used in similar works seems highly beneficial. Unfortunately, it also scales the", + "type": "text", + "cross_page": true + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 418, + 506, + 429 + ], + "spans": [ + { + "bbox": [ + 106, + 418, + 506, + 429 + ], + "score": 1.0, + "content": "training time so it is a trade-off that must be balanced. From the third plot we can conclude that", + "type": "text", + "cross_page": true + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 428, + 506, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 291, + 440 + ], + "score": 1.0, + "content": "using 2048 to 4096 robots with a batch size of", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 291, + 428, + 323, + 439 + ], + "score": 0.88, + "content": "\\approx 1 0 0 k", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 324, + 428, + 334, + 440 + ], + "score": 1.0, + "content": "or", + "type": "text", + "cross_page": true + }, + { + "bbox": [ + 335, + 428, + 367, + 439 + ], + "score": 0.84, + "content": "\\approx 2 0 0 k", + "type": "inline_equation", + "cross_page": true + }, + { + "bbox": [ + 367, + 428, + 506, + 440 + ], + "score": 1.0, + "content": "provides the best trade-off for this", + "type": "text", + "cross_page": true + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 439, + 161, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 161, + 451 + ], + "score": 1.0, + "content": "specific task.", + "type": "text", + "cross_page": true + } + ], + "index": 23 + } + ], + "index": 32.5, + "bbox_fs": [ + 105, + 462, + 506, + 529 + ] + }, + { + "type": "image", + "bbox": [ + 109, + 547, + 501, + 661 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 109, + 547, + 501, + 661 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 109, + 547, + 501, + 661 + ], + "spans": [ + { + "bbox": [ + 109, + 547, + 501, + 661 + ], + "score": 0.972, + "type": "image", + "image_path": "b511ee0c2aca987f5fa8e39a0d14f3ee54098cf431491e0a271f167d3bdcff6f.jpg" + } + ] + } + ], + "index": 37, + "virtual_lines": [ + { + "bbox": [ + 109, + 547, + 501, + 585.0 + ], + "spans": [], + "index": 36 + }, + { + "bbox": [ + 109, + 585.0, + 501, + 623.0 + ], + "spans": [], + "index": 37 + }, + { + "bbox": [ + 109, + 623.0, + 501, + 661.0 + ], + "spans": [], + "index": 38 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 667, + 506, + 733 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 506, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 506, + 680 + ], + "score": 1.0, + "content": "Figure 4: (a) Average and standard deviation (over 5 runs) of the total reward of an episode after", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 106, + 678, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 106, + 678, + 506, + 690 + ], + "score": 1.0, + "content": "1500 policy updates for different number of robots and 3 different batch sizes. The ideal case of a", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 688, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 506, + 702 + ], + "score": 1.0, + "content": "batch size of 1M samples with 20000 robots is shown in red. (b) Total training time for the same", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 700, + 505, + 712 + ], + "spans": [ + { + "bbox": [ + 105, + 700, + 505, + 712 + ], + "score": 1.0, + "content": "experiments. (c) Reward dependency on total training time. Colors represent the number of robots,", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 106, + 711, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 711, + 505, + 722 + ], + "score": 1.0, + "content": "while shapes show the batch size (circles: 49152, crosses: 98304, triangles: 196608). Points in the", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 106, + 721, + 477, + 734 + ], + "spans": [ + { + "bbox": [ + 106, + 721, + 477, + 734 + ], + "score": 1.0, + "content": "upper left part of the graph (highlighted in green) represent the most desirable configuration.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 41.5 + } + ], + "index": 39.25 + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 131, + 69, + 480, + 169 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 131, + 69, + 480, + 169 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 131, + 69, + 480, + 169 + ], + "spans": [ + { + "bbox": [ + 131, + 69, + 480, + 169 + ], + "score": 0.971, + "type": "image", + "image_path": "f15e005bc1e3d3db64d82f1f7796d15a227032608677590845862fc30d3d7da3.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 131, + 69, + 480, + 102.33333333333334 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 131, + 102.33333333333334, + 480, + 135.66666666666669 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 131, + 135.66666666666669, + 480, + 169.00000000000003 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 176, + 505, + 221 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 176, + 505, + 188 + ], + "spans": [ + { + "bbox": [ + 106, + 176, + 505, + 188 + ], + "score": 1.0, + "content": "Figure 5: Success rate of the tested policy on increasing terrain complexities. Robots start in the", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 187, + 505, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 382, + 200 + ], + "score": 1.0, + "content": "center of the terrain and are given a forward velocity command of", + "type": "text" + }, + { + "bbox": [ + 383, + 187, + 420, + 199 + ], + "score": 0.87, + "content": "0 . 7 5 \\mathrm { m } / \\mathrm { s }", + "type": "inline_equation" + }, + { + "bbox": [ + 421, + 187, + 505, + 200 + ], + "score": 1.0, + "content": ", and a side velocity", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 198, + 505, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 225, + 210 + ], + "score": 1.0, + "content": "command randomized within", + "type": "text" + }, + { + "bbox": [ + 226, + 198, + 289, + 210 + ], + "score": 0.91, + "content": "[ - 0 . 1 , 0 . 1 ] \\mathrm { m } / \\mathrm { s }", + "type": "inline_equation" + }, + { + "bbox": [ + 289, + 199, + 505, + 210 + ], + "score": 1.0, + "content": ". (a) Success rate for climbing stairs, descending stairs", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 209, + 486, + 222 + ], + "spans": [ + { + "bbox": [ + 106, + 209, + 486, + 222 + ], + "score": 1.0, + "content": "and traversing discrete obstacles. (b) Success rate for climbing and descending sloped terrains.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 4.5 + } + ], + "index": 2.75 + }, + { + "type": "image", + "bbox": [ + 109, + 228, + 503, + 283 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 109, + 228, + 503, + 283 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 109, + 228, + 503, + 283 + ], + "spans": [ + { + "bbox": [ + 109, + 228, + 503, + 283 + ], + "score": 0.964, + "type": "image", + "image_path": "4eab7c74f6cac548026b3ff28e7f10ab6832f65e03b74ce577a9f1ea90202882.jpg" + } + ] + } + ], + "index": 8, + "virtual_lines": [ + { + "bbox": [ + 109, + 228, + 503, + 246.33333333333334 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 109, + 246.33333333333334, + 503, + 264.6666666666667 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 109, + 264.6666666666667, + 503, + 283.0 + ], + "spans": [], + "index": 9 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 141, + 288, + 467, + 300 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 142, + 287, + 469, + 301 + ], + "spans": [ + { + "bbox": [ + 142, + 287, + 469, + 301 + ], + "score": 1.0, + "content": "Figure 6: ANYmal C with a fixed arm, ANYmal B, A1 and Cassie in simulation.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 10 + } + ], + "index": 9.0 + }, + { + "type": "text", + "bbox": [ + 106, + 308, + 505, + 450 + ], + "lines": [ + { + "bbox": [ + 104, + 307, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 104, + 307, + 506, + 322 + ], + "score": 1.0, + "content": "performance with fewer robots. We believe this is explained by the fact that the samples are very", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 320, + 505, + 331 + ], + "spans": [ + { + "bbox": [ + 106, + 320, + 505, + 331 + ], + "score": 1.0, + "content": "similar with many steps per robot because of the relatively small time steps between them. This", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 330, + 506, + 342 + ], + "spans": [ + { + "bbox": [ + 106, + 330, + 506, + 342 + ], + "score": 1.0, + "content": "means that for the same amount of samples, there is less diversity in the data. In other words, with a", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 340, + 505, + 353 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 505, + 353 + ], + "score": 1.0, + "content": "low number of robots, we are further from the standard assumption that the samples are independent", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 351, + 505, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 505, + 365 + ], + "score": 1.0, + "content": "and identically distributed, which seems to have a noticeable effect on the training process. In terms", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 363, + 505, + 374 + ], + "spans": [ + { + "bbox": [ + 106, + 363, + 505, + 374 + ], + "score": 1.0, + "content": "of training time, we see a nearly linear scaling up to 4000 robots, after which simulation throughput", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 373, + 506, + 386 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 506, + 386 + ], + "score": 1.0, + "content": "gains slow down. As such, we can conclude that increasing the number of robots is beneficial for", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 385, + 505, + 397 + ], + "spans": [ + { + "bbox": [ + 106, + 385, + 505, + 397 + ], + "score": 1.0, + "content": "both final performance and training time, but there is an upper limit on this number after which an", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 395, + 506, + 408 + ], + "spans": [ + { + "bbox": [ + 105, + 395, + 506, + 408 + ], + "score": 1.0, + "content": "on-policy algorithm cannot learn effectively. Increasing the batch size to values much larger than", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 106, + 406, + 505, + 419 + ], + "spans": [ + { + "bbox": [ + 106, + 406, + 505, + 419 + ], + "score": 1.0, + "content": "what is typically used in similar works seems highly beneficial. Unfortunately, it also scales the", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 418, + 506, + 429 + ], + "spans": [ + { + "bbox": [ + 106, + 418, + 506, + 429 + ], + "score": 1.0, + "content": "training time so it is a trade-off that must be balanced. From the third plot we can conclude that", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 428, + 506, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 428, + 291, + 440 + ], + "score": 1.0, + "content": "using 2048 to 4096 robots with a batch size of", + "type": "text" + }, + { + "bbox": [ + 291, + 428, + 323, + 439 + ], + "score": 0.88, + "content": "\\approx 1 0 0 k", + "type": "inline_equation" + }, + { + "bbox": [ + 324, + 428, + 334, + 440 + ], + "score": 1.0, + "content": "or", + "type": "text" + }, + { + "bbox": [ + 335, + 428, + 367, + 439 + ], + "score": 0.84, + "content": "\\approx 2 0 0 k", + "type": "inline_equation" + }, + { + "bbox": [ + 367, + 428, + 506, + 440 + ], + "score": 1.0, + "content": "provides the best trade-off for this", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 439, + 161, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 439, + 161, + 451 + ], + "score": 1.0, + "content": "specific task.", + "type": "text" + } + ], + "index": 23 + } + ], + "index": 17 + }, + { + "type": "title", + "bbox": [ + 107, + 464, + 177, + 475 + ], + "lines": [ + { + "bbox": [ + 105, + 463, + 178, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 463, + 178, + 477 + ], + "score": 1.0, + "content": "4.2 Simulation", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24 + }, + { + "type": "text", + "bbox": [ + 107, + 484, + 505, + 615 + ], + "lines": [ + { + "bbox": [ + 105, + 484, + 506, + 497 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 506, + 497 + ], + "score": 1.0, + "content": "For our simulation and deployment experiments, we use a policy trained with 4096 robots and a", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 493, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 505, + 508 + ], + "score": 1.0, + "content": "batch size of 98304, which we train for 1500 policy updates in under 20 minutes2. We begin by", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 506, + 505, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 505, + 519 + ], + "score": 1.0, + "content": "measuring the performance of our trained policy in simulation. To that end, we perform robustness", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 518, + 505, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 518, + 505, + 529 + ], + "score": 1.0, + "content": "and traversability tests. For each terrain type, we command the robots to traverse the representative", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 528, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 505, + 540 + ], + "score": 1.0, + "content": "difficulty of the terrain at high forward velocity and measure the success rate. A success is defined", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 539, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 105, + 539, + 506, + 551 + ], + "score": 1.0, + "content": "as managing to cross the terrain while avoiding any contacts on the robot’s base. Fig. 5 shows", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 549, + 505, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 549, + 365, + 563 + ], + "score": 1.0, + "content": "the results for the different terrains. For stairs, we see a nearly", + "type": "text" + }, + { + "bbox": [ + 365, + 550, + 391, + 560 + ], + "score": 0.89, + "content": "1 0 0 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 392, + 549, + 505, + 563 + ], + "score": 1.0, + "content": "success rate for steps up to", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 560, + 505, + 573 + ], + "spans": [ + { + "bbox": [ + 106, + 561, + 131, + 571 + ], + "score": 0.66, + "content": "\\mathrm { 0 . 2 m }", + "type": "inline_equation" + }, + { + "bbox": [ + 131, + 560, + 505, + 573 + ], + "score": 1.0, + "content": ", which is the hardest stair difficulty we train on and close to the kinematic limits of our robot.", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 571, + 505, + 584 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 505, + 584 + ], + "score": 1.0, + "content": "Randomized obstacles seem to be more demanding, with the success rate decreasing steadily. We", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 583, + 505, + 595 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 505, + 595 + ], + "score": 1.0, + "content": "must note that in this case, the largest step is double the reported height since neighboring obstacles", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 594, + 505, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 461, + 606 + ], + "score": 1.0, + "content": "can have positive and negative heights. In the case of slopes, we can observe that after", + "type": "text" + }, + { + "bbox": [ + 461, + 594, + 489, + 605 + ], + "score": 0.37, + "content": "2 5 \\mathrm { d e g }", + "type": "inline_equation" + }, + { + "bbox": [ + 489, + 594, + 505, + 606 + ], + "score": 1.0, + "content": "the", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 604, + 486, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 486, + 617 + ], + "score": 1.0, + "content": "robots are not able to climb anymore but still learn to slide down with a moderate success rate.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 30.5 + }, + { + "type": "text", + "bbox": [ + 107, + 621, + 505, + 664 + ], + "lines": [ + { + "bbox": [ + 106, + 620, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 106, + 620, + 505, + 633 + ], + "score": 1.0, + "content": "Given our relatively simple rewards and action space, the policy is free to adopt any gait and behav-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 631, + 505, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 505, + 644 + ], + "score": 1.0, + "content": "ior. Interestingly, it always converges to a trotting gait, but there are often artifacts in the behavior,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 643, + 505, + 655 + ], + "spans": [ + { + "bbox": [ + 106, + 643, + 505, + 655 + ], + "score": 1.0, + "content": "such as a dragging leg or unreasonably high or low base heights. After tuning of the reward weights,", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 654, + 502, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 502, + 666 + ], + "score": 1.0, + "content": "we can obtain a policy that respects all our constraints and can be transferred to the physical robot.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 38.5 + }, + { + "type": "text", + "bbox": [ + 108, + 669, + 503, + 692 + ], + "lines": [ + { + "bbox": [ + 106, + 669, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 669, + 504, + 682 + ], + "score": 1.0, + "content": "To verify the generalizability of the approach, we train policies for multiple robots with the same", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 680, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 106, + 680, + 429, + 693 + ], + "score": 1.0, + "content": "set-up. We use the ANYmal C robot with a fixed robotic arm, which adds about", + "type": "text" + }, + { + "bbox": [ + 429, + 680, + 451, + 691 + ], + "score": 0.89, + "content": "2 0 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 451, + 680, + 505, + 693 + ], + "score": 1.0, + "content": "of additional", + "type": "text" + } + ], + "index": 42 + } + ], + "index": 41.5 + } + ], + "page_idx": 6, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 110, + 701, + 503, + 722 + ], + "lines": [ + { + "bbox": [ + 118, + 699, + 504, + 714 + ], + "spans": [ + { + "bbox": [ + 118, + 699, + 504, + 714 + ], + "score": 1.0, + "content": "2Trained on: i9-11900k CPU, NVIDIA RTX A6000 GPU. VRAM requirements are in the supplementary", + "type": "text" + } + ] + }, + { + "bbox": [ + 107, + 711, + 141, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 711, + 141, + 723 + ], + "score": 1.0, + "content": "material.", + "type": "text" + } + ] + } + ] + }, + { + "type": "discarded", + "bbox": [ + 302, + 741, + 308, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "score": 1.0, + "content": "7", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 131, + 69, + 480, + 169 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 131, + 69, + 480, + 169 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 131, + 69, + 480, + 169 + ], + "spans": [ + { + "bbox": [ + 131, + 69, + 480, + 169 + ], + "score": 0.971, + "type": "image", + "image_path": "f15e005bc1e3d3db64d82f1f7796d15a227032608677590845862fc30d3d7da3.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 131, + 69, + 480, + 102.33333333333334 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 131, + 102.33333333333334, + 480, + 135.66666666666669 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 131, + 135.66666666666669, + 480, + 169.00000000000003 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 106, + 176, + 505, + 221 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 106, + 176, + 505, + 188 + ], + "spans": [ + { + "bbox": [ + 106, + 176, + 505, + 188 + ], + "score": 1.0, + "content": "Figure 5: Success rate of the tested policy on increasing terrain complexities. Robots start in the", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 187, + 505, + 200 + ], + "spans": [ + { + "bbox": [ + 105, + 187, + 382, + 200 + ], + "score": 1.0, + "content": "center of the terrain and are given a forward velocity command of", + "type": "text" + }, + { + "bbox": [ + 383, + 187, + 420, + 199 + ], + "score": 0.87, + "content": "0 . 7 5 \\mathrm { m } / \\mathrm { s }", + "type": "inline_equation" + }, + { + "bbox": [ + 421, + 187, + 505, + 200 + ], + "score": 1.0, + "content": ", and a side velocity", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 198, + 505, + 210 + ], + "spans": [ + { + "bbox": [ + 106, + 199, + 225, + 210 + ], + "score": 1.0, + "content": "command randomized within", + "type": "text" + }, + { + "bbox": [ + 226, + 198, + 289, + 210 + ], + "score": 0.91, + "content": "[ - 0 . 1 , 0 . 1 ] \\mathrm { m } / \\mathrm { s }", + "type": "inline_equation" + }, + { + "bbox": [ + 289, + 199, + 505, + 210 + ], + "score": 1.0, + "content": ". (a) Success rate for climbing stairs, descending stairs", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 209, + 486, + 222 + ], + "spans": [ + { + "bbox": [ + 106, + 209, + 486, + 222 + ], + "score": 1.0, + "content": "and traversing discrete obstacles. (b) Success rate for climbing and descending sloped terrains.", + "type": "text" + } + ], + "index": 6 + } + ], + "index": 4.5 + } + ], + "index": 2.75 + }, + { + "type": "image", + "bbox": [ + 109, + 228, + 503, + 283 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 109, + 228, + 503, + 283 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 109, + 228, + 503, + 283 + ], + "spans": [ + { + "bbox": [ + 109, + 228, + 503, + 283 + ], + "score": 0.964, + "type": "image", + "image_path": "4eab7c74f6cac548026b3ff28e7f10ab6832f65e03b74ce577a9f1ea90202882.jpg" + } + ] + } + ], + "index": 8, + "virtual_lines": [ + { + "bbox": [ + 109, + 228, + 503, + 246.33333333333334 + ], + "spans": [], + "index": 7 + }, + { + "bbox": [ + 109, + 246.33333333333334, + 503, + 264.6666666666667 + ], + "spans": [], + "index": 8 + }, + { + "bbox": [ + 109, + 264.6666666666667, + 503, + 283.0 + ], + "spans": [], + "index": 9 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 141, + 288, + 467, + 300 + ], + "group_id": 1, + "lines": [ + { + "bbox": [ + 142, + 287, + 469, + 301 + ], + "spans": [ + { + "bbox": [ + 142, + 287, + 469, + 301 + ], + "score": 1.0, + "content": "Figure 6: ANYmal C with a fixed arm, ANYmal B, A1 and Cassie in simulation.", + "type": "text" + } + ], + "index": 10 + } + ], + "index": 10 + } + ], + "index": 9.0 + }, + { + "type": "text", + "bbox": [ + 106, + 308, + 505, + 450 + ], + "lines": [], + "index": 17, + "bbox_fs": [ + 104, + 307, + 506, + 451 + ], + "lines_deleted": true + }, + { + "type": "title", + "bbox": [ + 107, + 464, + 177, + 475 + ], + "lines": [ + { + "bbox": [ + 105, + 463, + 178, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 463, + 178, + 477 + ], + "score": 1.0, + "content": "4.2 Simulation", + "type": "text" + } + ], + "index": 24 + } + ], + "index": 24 + }, + { + "type": "text", + "bbox": [ + 107, + 484, + 505, + 615 + ], + "lines": [ + { + "bbox": [ + 105, + 484, + 506, + 497 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 506, + 497 + ], + "score": 1.0, + "content": "For our simulation and deployment experiments, we use a policy trained with 4096 robots and a", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 493, + 505, + 508 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 505, + 508 + ], + "score": 1.0, + "content": "batch size of 98304, which we train for 1500 policy updates in under 20 minutes2. We begin by", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 506, + 505, + 519 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 505, + 519 + ], + "score": 1.0, + "content": "measuring the performance of our trained policy in simulation. To that end, we perform robustness", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 105, + 518, + 505, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 518, + 505, + 529 + ], + "score": 1.0, + "content": "and traversability tests. For each terrain type, we command the robots to traverse the representative", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 105, + 528, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 528, + 505, + 540 + ], + "score": 1.0, + "content": "difficulty of the terrain at high forward velocity and measure the success rate. A success is defined", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 539, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 105, + 539, + 506, + 551 + ], + "score": 1.0, + "content": "as managing to cross the terrain while avoiding any contacts on the robot’s base. Fig. 5 shows", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 549, + 505, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 549, + 365, + 563 + ], + "score": 1.0, + "content": "the results for the different terrains. For stairs, we see a nearly", + "type": "text" + }, + { + "bbox": [ + 365, + 550, + 391, + 560 + ], + "score": 0.89, + "content": "1 0 0 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 392, + 549, + 505, + 563 + ], + "score": 1.0, + "content": "success rate for steps up to", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 560, + 505, + 573 + ], + "spans": [ + { + "bbox": [ + 106, + 561, + 131, + 571 + ], + "score": 0.66, + "content": "\\mathrm { 0 . 2 m }", + "type": "inline_equation" + }, + { + "bbox": [ + 131, + 560, + 505, + 573 + ], + "score": 1.0, + "content": ", which is the hardest stair difficulty we train on and close to the kinematic limits of our robot.", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 571, + 505, + 584 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 505, + 584 + ], + "score": 1.0, + "content": "Randomized obstacles seem to be more demanding, with the success rate decreasing steadily. We", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 583, + 505, + 595 + ], + "spans": [ + { + "bbox": [ + 105, + 583, + 505, + 595 + ], + "score": 1.0, + "content": "must note that in this case, the largest step is double the reported height since neighboring obstacles", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 594, + 505, + 606 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 461, + 606 + ], + "score": 1.0, + "content": "can have positive and negative heights. In the case of slopes, we can observe that after", + "type": "text" + }, + { + "bbox": [ + 461, + 594, + 489, + 605 + ], + "score": 0.37, + "content": "2 5 \\mathrm { d e g }", + "type": "inline_equation" + }, + { + "bbox": [ + 489, + 594, + 505, + 606 + ], + "score": 1.0, + "content": "the", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 604, + 486, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 486, + 617 + ], + "score": 1.0, + "content": "robots are not able to climb anymore but still learn to slide down with a moderate success rate.", + "type": "text" + } + ], + "index": 36 + } + ], + "index": 30.5, + "bbox_fs": [ + 105, + 484, + 506, + 617 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 621, + 505, + 664 + ], + "lines": [ + { + "bbox": [ + 106, + 620, + 505, + 633 + ], + "spans": [ + { + "bbox": [ + 106, + 620, + 505, + 633 + ], + "score": 1.0, + "content": "Given our relatively simple rewards and action space, the policy is free to adopt any gait and behav-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 105, + 631, + 505, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 505, + 644 + ], + "score": 1.0, + "content": "ior. Interestingly, it always converges to a trotting gait, but there are often artifacts in the behavior,", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 106, + 643, + 505, + 655 + ], + "spans": [ + { + "bbox": [ + 106, + 643, + 505, + 655 + ], + "score": 1.0, + "content": "such as a dragging leg or unreasonably high or low base heights. After tuning of the reward weights,", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 654, + 502, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 654, + 502, + 666 + ], + "score": 1.0, + "content": "we can obtain a policy that respects all our constraints and can be transferred to the physical robot.", + "type": "text" + } + ], + "index": 40 + } + ], + "index": 38.5, + "bbox_fs": [ + 105, + 620, + 505, + 666 + ] + }, + { + "type": "text", + "bbox": [ + 108, + 669, + 503, + 692 + ], + "lines": [ + { + "bbox": [ + 106, + 669, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 669, + 504, + 682 + ], + "score": 1.0, + "content": "To verify the generalizability of the approach, we train policies for multiple robots with the same", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 106, + 680, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 106, + 680, + 429, + 693 + ], + "score": 1.0, + "content": "set-up. We use the ANYmal C robot with a fixed robotic arm, which adds about", + "type": "text" + }, + { + "bbox": [ + 429, + 680, + 451, + 691 + ], + "score": 0.89, + "content": "2 0 \\%", + "type": "inline_equation" + }, + { + "bbox": [ + 451, + 680, + 505, + 693 + ], + "score": 1.0, + "content": "of additional", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 179, + 505, + 191 + ], + "spans": [ + { + "bbox": [ + 105, + 179, + 505, + 191 + ], + "score": 1.0, + "content": "weight, and the ANYmal B robot, which has comparable dimensions but modified kinematic and", + "type": "text", + "cross_page": true + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 191, + 504, + 202 + ], + "spans": [ + { + "bbox": [ + 106, + 191, + 504, + 202 + ], + "score": 1.0, + "content": "dynamic properties. In these two cases, we can retrain a policy without any modifications to the", + "type": "text", + "cross_page": true + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 202, + 505, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 202, + 505, + 213 + ], + "score": 1.0, + "content": "rewards or algorithm hyper-parameters and obtain a very similar performance. Next, we use the", + "type": "text", + "cross_page": true + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 212, + 505, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 212, + 505, + 225 + ], + "score": 1.0, + "content": "Unitree A1 robot, which has smaller dimensions, four times lower weight, and a different leg con-", + "type": "text", + "cross_page": true + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 222, + 505, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 505, + 236 + ], + "score": 1.0, + "content": "figuration. In this case, we remove the actuator model of the ANYdrive motors, reduce PD gains", + "type": "text", + "cross_page": true + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 234, + 505, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 505, + 247 + ], + "score": 1.0, + "content": "and the torque penalties, and change the default joint configurations. We can train a dynamic policy", + "type": "text", + "cross_page": true + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 245, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 505, + 258 + ], + "score": 1.0, + "content": "that learns to solve the same terrains even with the reduced size of the robot. Finally, we apply our", + "type": "text", + "cross_page": true + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 255, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 506, + 270 + ], + "score": 1.0, + "content": "approach to Agility Robotics’ bipedal robot Cassie. We find that an additional reward encouraging", + "type": "text", + "cross_page": true + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 267, + 505, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 267, + 505, + 279 + ], + "score": 1.0, + "content": "standing on a single foot is necessary to achieve a walking gait. With this addition, we are able", + "type": "text", + "cross_page": true + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 278, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 105, + 278, + 506, + 291 + ], + "score": 1.0, + "content": "to train the robot on the same terrains as its quadrupedal counterparts. Fig. 6 shows the different", + "type": "text", + "cross_page": true + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 289, + 137, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 137, + 300 + ], + "score": 1.0, + "content": "robots.", + "type": "text", + "cross_page": true + } + ], + "index": 14 + } + ], + "index": 41.5, + "bbox_fs": [ + 106, + 669, + 505, + 693 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 80, + 502, + 155 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 109, + 80, + 502, + 155 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 109, + 80, + 502, + 155 + ], + "spans": [ + { + "bbox": [ + 109, + 80, + 502, + 155 + ], + "score": 0.965, + "type": "image", + "image_path": "f0dd8a9ddc78c32773c604659ba9f56cbd659b68c879ccbb91f00fe6ccb27b59.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 109, + 80, + 502, + 105.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 109, + 105.0, + 502, + 130.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 109, + 130.0, + 502, + 155.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 132, + 160, + 476, + 172 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 133, + 160, + 478, + 174 + ], + "spans": [ + { + "bbox": [ + 133, + 160, + 319, + 174 + ], + "score": 1.0, + "content": "Figure 7: Locomotion policy, trained in under", + "type": "text" + }, + { + "bbox": [ + 319, + 160, + 346, + 171 + ], + "score": 0.33, + "content": "2 0 \\mathrm { { m i n } }", + "type": "inline_equation" + }, + { + "bbox": [ + 346, + 160, + 478, + 174 + ], + "score": 1.0, + "content": ", deployed on the physical robot.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 3 + } + ], + "index": 2.0 + }, + { + "type": "text", + "bbox": [ + 107, + 180, + 505, + 300 + ], + "lines": [ + { + "bbox": [ + 105, + 179, + 505, + 191 + ], + "spans": [ + { + "bbox": [ + 105, + 179, + 505, + 191 + ], + "score": 1.0, + "content": "weight, and the ANYmal B robot, which has comparable dimensions but modified kinematic and", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 191, + 504, + 202 + ], + "spans": [ + { + "bbox": [ + 106, + 191, + 504, + 202 + ], + "score": 1.0, + "content": "dynamic properties. In these two cases, we can retrain a policy without any modifications to the", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 202, + 505, + 213 + ], + "spans": [ + { + "bbox": [ + 105, + 202, + 505, + 213 + ], + "score": 1.0, + "content": "rewards or algorithm hyper-parameters and obtain a very similar performance. Next, we use the", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 212, + 505, + 225 + ], + "spans": [ + { + "bbox": [ + 105, + 212, + 505, + 225 + ], + "score": 1.0, + "content": "Unitree A1 robot, which has smaller dimensions, four times lower weight, and a different leg con-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 222, + 505, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 505, + 236 + ], + "score": 1.0, + "content": "figuration. In this case, we remove the actuator model of the ANYdrive motors, reduce PD gains", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 234, + 505, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 234, + 505, + 247 + ], + "score": 1.0, + "content": "and the torque penalties, and change the default joint configurations. We can train a dynamic policy", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 245, + 505, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 505, + 258 + ], + "score": 1.0, + "content": "that learns to solve the same terrains even with the reduced size of the robot. Finally, we apply our", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 255, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 506, + 270 + ], + "score": 1.0, + "content": "approach to Agility Robotics’ bipedal robot Cassie. We find that an additional reward encouraging", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 267, + 505, + 279 + ], + "spans": [ + { + "bbox": [ + 105, + 267, + 505, + 279 + ], + "score": 1.0, + "content": "standing on a single foot is necessary to achieve a walking gait. With this addition, we are able", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 278, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 105, + 278, + 506, + 291 + ], + "score": 1.0, + "content": "to train the robot on the same terrains as its quadrupedal counterparts. Fig. 6 shows the different", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 289, + 137, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 137, + 300 + ], + "score": 1.0, + "content": "robots.", + "type": "text" + } + ], + "index": 14 + } + ], + "index": 9 + }, + { + "type": "title", + "bbox": [ + 107, + 313, + 217, + 324 + ], + "lines": [ + { + "bbox": [ + 106, + 313, + 218, + 326 + ], + "spans": [ + { + "bbox": [ + 106, + 313, + 218, + 326 + ], + "score": 1.0, + "content": "4.3 Sim-to-real Transfer", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + }, + { + "type": "text", + "bbox": [ + 107, + 333, + 504, + 377 + ], + "lines": [ + { + "bbox": [ + 105, + 333, + 506, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 333, + 506, + 346 + ], + "score": 1.0, + "content": "On the physical robot, our policy is fixed. We compute the observations from the robot’s sensors,", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 344, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 506, + 357 + ], + "score": 1.0, + "content": "feed them to the policy, and directly send the produced actions as target joint positions to the mo-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 355, + 506, + 368 + ], + "spans": [ + { + "bbox": [ + 105, + 355, + 506, + 368 + ], + "score": 1.0, + "content": "tors. We do not apply any additional filtering or constraint satisfaction checks. The terrain height", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 366, + 478, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 478, + 379 + ], + "score": 1.0, + "content": "measurements are queried from an elevation map that the robot is building from Lidar scans.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 17.5 + }, + { + "type": "text", + "bbox": [ + 107, + 382, + 505, + 470 + ], + "lines": [ + { + "bbox": [ + 106, + 382, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 106, + 382, + 505, + 394 + ], + "score": 1.0, + "content": "Unfortunately, this height map is far from perfect, which results in a decrease in robustness between", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 393, + 505, + 406 + ], + "spans": [ + { + "bbox": [ + 106, + 393, + 505, + 406 + ], + "score": 1.0, + "content": "simulation and reality. We observe that these issues mainly occur at high velocities and therefore", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 404, + 505, + 417 + ], + "spans": [ + { + "bbox": [ + 106, + 405, + 312, + 417 + ], + "score": 1.0, + "content": "reduce the maximum linear velocity commands to", + "type": "text" + }, + { + "bbox": [ + 312, + 404, + 345, + 416 + ], + "score": 0.86, + "content": "0 . 6 \\mathrm { m } / \\mathrm { s }", + "type": "inline_equation" + }, + { + "bbox": [ + 346, + 405, + 505, + 417 + ], + "score": 1.0, + "content": "for policies deployed on the hardware.", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 107, + 416, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 107, + 416, + 505, + 426 + ], + "score": 1.0, + "content": "The robot can walk up and down stairs and handles obstacles in a dynamic manner. We show samples", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 426, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 106, + 426, + 505, + 438 + ], + "score": 1.0, + "content": "of these experiments in Fig. 7 and in the supplementary video. To overcome issues with imperfect", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 436, + 505, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 505, + 451 + ], + "score": 1.0, + "content": "terrain mapping or state estimation drift, the authors of [19] implemented a teacher-student set-up,", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 447, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 105, + 447, + 505, + 461 + ], + "score": 1.0, + "content": "which provided outstanding robustness even in adverse conditions. As part of future work, we plan", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 459, + 226, + 471 + ], + "spans": [ + { + "bbox": [ + 106, + 459, + 226, + 471 + ], + "score": 1.0, + "content": "to merge the two approaches.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 23.5 + }, + { + "type": "title", + "bbox": [ + 107, + 485, + 183, + 498 + ], + "lines": [ + { + "bbox": [ + 104, + 483, + 185, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 185, + 502 + ], + "score": 1.0, + "content": "5 Conclusion", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28 + }, + { + "type": "text", + "bbox": [ + 107, + 510, + 505, + 609 + ], + "lines": [ + { + "bbox": [ + 105, + 510, + 504, + 522 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 504, + 522 + ], + "score": 1.0, + "content": "In this work, we demonstrated that a complex real-world robotics task can be trained in minutes with", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 521, + 505, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 505, + 534 + ], + "score": 1.0, + "content": "an on-policy deep reinforcement learning algorithm. Using an end-to-end GPU pipeline with thou-", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 532, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 532, + 505, + 545 + ], + "score": 1.0, + "content": "sands of robots simulated in parallel, combined with our proposed curriculum structure, we showed", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 542, + 505, + 556 + ], + "spans": [ + { + "bbox": [ + 105, + 542, + 505, + 556 + ], + "score": 1.0, + "content": "that the training time can be reduced by multiple orders of magnitude compared to previous work.", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 553, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 106, + 553, + 506, + 567 + ], + "score": 1.0, + "content": "We discussed multiple modifications to the learning algorithm and the standard hyper-parameters", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 565, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 565, + 505, + 578 + ], + "score": 1.0, + "content": "required to use the massively parallel regime effectively. Using our fast training pipeline, we per-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 576, + 505, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 505, + 588 + ], + "score": 1.0, + "content": "formed many training runs, simplified the set-up, and kept only essential components. We showed", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 586, + 505, + 599 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 505, + 599 + ], + "score": 1.0, + "content": "that the task can be solved using simple observation and action spaces as well as relatively straight-", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 597, + 448, + 610 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 448, + 610 + ], + "score": 1.0, + "content": "forward rewards without encouraging particular gaits or providing motion primitives.", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 33 + }, + { + "type": "text", + "bbox": [ + 107, + 614, + 504, + 691 + ], + "lines": [ + { + "bbox": [ + 106, + 614, + 505, + 626 + ], + "spans": [ + { + "bbox": [ + 106, + 614, + 505, + 626 + ], + "score": 1.0, + "content": "The purpose of this work is not to obtain the absolute best-performing policy with the highest ro-", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 625, + 506, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 506, + 637 + ], + "score": 1.0, + "content": "bustness. For that use case, many other techniques can be incorporated into the pipeline. We aim", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 636, + 506, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 506, + 648 + ], + "score": 1.0, + "content": "to show that a policy can be trained in record time with our set-up while still being usable on the", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 647, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 506, + 659 + ], + "score": 1.0, + "content": "real hardware. We wish to shift other researchers’ perspective on the required training time for a", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 656, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 506, + 671 + ], + "score": 1.0, + "content": "real-world application, and hope that our work can serve as a reference for future research. We ex-", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 668, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 506, + 681 + ], + "score": 1.0, + "content": "pect many other tasks to benefit from the massively parallel regime. By reducing the training time", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 680, + 446, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 680, + 446, + 692 + ], + "score": 1.0, + "content": "of these future robotic tasks, we can greatly accelerate the developments in this field.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 41 + } + ], + "page_idx": 7, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 742, + 308, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "score": 1.0, + "content": "8", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 80, + 502, + 155 + ], + "blocks": [ + { + "type": "image_body", + "bbox": [ + 109, + 80, + 502, + 155 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 109, + 80, + 502, + 155 + ], + "spans": [ + { + "bbox": [ + 109, + 80, + 502, + 155 + ], + "score": 0.965, + "type": "image", + "image_path": "f0dd8a9ddc78c32773c604659ba9f56cbd659b68c879ccbb91f00fe6ccb27b59.jpg" + } + ] + } + ], + "index": 1, + "virtual_lines": [ + { + "bbox": [ + 109, + 80, + 502, + 105.0 + ], + "spans": [], + "index": 0 + }, + { + "bbox": [ + 109, + 105.0, + 502, + 130.0 + ], + "spans": [], + "index": 1 + }, + { + "bbox": [ + 109, + 130.0, + 502, + 155.0 + ], + "spans": [], + "index": 2 + } + ] + }, + { + "type": "image_caption", + "bbox": [ + 132, + 160, + 476, + 172 + ], + "group_id": 0, + "lines": [ + { + "bbox": [ + 133, + 160, + 478, + 174 + ], + "spans": [ + { + "bbox": [ + 133, + 160, + 319, + 174 + ], + "score": 1.0, + "content": "Figure 7: Locomotion policy, trained in under", + "type": "text" + }, + { + "bbox": [ + 319, + 160, + 346, + 171 + ], + "score": 0.33, + "content": "2 0 \\mathrm { { m i n } }", + "type": "inline_equation" + }, + { + "bbox": [ + 346, + 160, + 478, + 174 + ], + "score": 1.0, + "content": ", deployed on the physical robot.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 3 + } + ], + "index": 2.0 + }, + { + "type": "text", + "bbox": [ + 107, + 180, + 505, + 300 + ], + "lines": [], + "index": 9, + "bbox_fs": [ + 105, + 179, + 506, + 300 + ], + "lines_deleted": true + }, + { + "type": "title", + "bbox": [ + 107, + 313, + 217, + 324 + ], + "lines": [ + { + "bbox": [ + 106, + 313, + 218, + 326 + ], + "spans": [ + { + "bbox": [ + 106, + 313, + 218, + 326 + ], + "score": 1.0, + "content": "4.3 Sim-to-real Transfer", + "type": "text" + } + ], + "index": 15 + } + ], + "index": 15 + }, + { + "type": "text", + "bbox": [ + 107, + 333, + 504, + 377 + ], + "lines": [ + { + "bbox": [ + 105, + 333, + 506, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 333, + 506, + 346 + ], + "score": 1.0, + "content": "On the physical robot, our policy is fixed. We compute the observations from the robot’s sensors,", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 344, + 506, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 506, + 357 + ], + "score": 1.0, + "content": "feed them to the policy, and directly send the produced actions as target joint positions to the mo-", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 355, + 506, + 368 + ], + "spans": [ + { + "bbox": [ + 105, + 355, + 506, + 368 + ], + "score": 1.0, + "content": "tors. We do not apply any additional filtering or constraint satisfaction checks. The terrain height", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 366, + 478, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 366, + 478, + 379 + ], + "score": 1.0, + "content": "measurements are queried from an elevation map that the robot is building from Lidar scans.", + "type": "text" + } + ], + "index": 19 + } + ], + "index": 17.5, + "bbox_fs": [ + 105, + 333, + 506, + 379 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 382, + 505, + 470 + ], + "lines": [ + { + "bbox": [ + 106, + 382, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 106, + 382, + 505, + 394 + ], + "score": 1.0, + "content": "Unfortunately, this height map is far from perfect, which results in a decrease in robustness between", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 106, + 393, + 505, + 406 + ], + "spans": [ + { + "bbox": [ + 106, + 393, + 505, + 406 + ], + "score": 1.0, + "content": "simulation and reality. We observe that these issues mainly occur at high velocities and therefore", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 404, + 505, + 417 + ], + "spans": [ + { + "bbox": [ + 106, + 405, + 312, + 417 + ], + "score": 1.0, + "content": "reduce the maximum linear velocity commands to", + "type": "text" + }, + { + "bbox": [ + 312, + 404, + 345, + 416 + ], + "score": 0.86, + "content": "0 . 6 \\mathrm { m } / \\mathrm { s }", + "type": "inline_equation" + }, + { + "bbox": [ + 346, + 405, + 505, + 417 + ], + "score": 1.0, + "content": "for policies deployed on the hardware.", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 107, + 416, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 107, + 416, + 505, + 426 + ], + "score": 1.0, + "content": "The robot can walk up and down stairs and handles obstacles in a dynamic manner. We show samples", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 106, + 426, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 106, + 426, + 505, + 438 + ], + "score": 1.0, + "content": "of these experiments in Fig. 7 and in the supplementary video. To overcome issues with imperfect", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 436, + 505, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 505, + 451 + ], + "score": 1.0, + "content": "terrain mapping or state estimation drift, the authors of [19] implemented a teacher-student set-up,", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 447, + 505, + 461 + ], + "spans": [ + { + "bbox": [ + 105, + 447, + 505, + 461 + ], + "score": 1.0, + "content": "which provided outstanding robustness even in adverse conditions. As part of future work, we plan", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 106, + 459, + 226, + 471 + ], + "spans": [ + { + "bbox": [ + 106, + 459, + 226, + 471 + ], + "score": 1.0, + "content": "to merge the two approaches.", + "type": "text" + } + ], + "index": 27 + } + ], + "index": 23.5, + "bbox_fs": [ + 105, + 382, + 505, + 471 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 485, + 183, + 498 + ], + "lines": [ + { + "bbox": [ + 104, + 483, + 185, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 185, + 502 + ], + "score": 1.0, + "content": "5 Conclusion", + "type": "text" + } + ], + "index": 28 + } + ], + "index": 28 + }, + { + "type": "text", + "bbox": [ + 107, + 510, + 505, + 609 + ], + "lines": [ + { + "bbox": [ + 105, + 510, + 504, + 522 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 504, + 522 + ], + "score": 1.0, + "content": "In this work, we demonstrated that a complex real-world robotics task can be trained in minutes with", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 105, + 521, + 505, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 505, + 534 + ], + "score": 1.0, + "content": "an on-policy deep reinforcement learning algorithm. Using an end-to-end GPU pipeline with thou-", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 532, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 532, + 505, + 545 + ], + "score": 1.0, + "content": "sands of robots simulated in parallel, combined with our proposed curriculum structure, we showed", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 542, + 505, + 556 + ], + "spans": [ + { + "bbox": [ + 105, + 542, + 505, + 556 + ], + "score": 1.0, + "content": "that the training time can be reduced by multiple orders of magnitude compared to previous work.", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 106, + 553, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 106, + 553, + 506, + 567 + ], + "score": 1.0, + "content": "We discussed multiple modifications to the learning algorithm and the standard hyper-parameters", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 565, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 565, + 505, + 578 + ], + "score": 1.0, + "content": "required to use the massively parallel regime effectively. Using our fast training pipeline, we per-", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 576, + 505, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 505, + 588 + ], + "score": 1.0, + "content": "formed many training runs, simplified the set-up, and kept only essential components. We showed", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 105, + 586, + 505, + 599 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 505, + 599 + ], + "score": 1.0, + "content": "that the task can be solved using simple observation and action spaces as well as relatively straight-", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 597, + 448, + 610 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 448, + 610 + ], + "score": 1.0, + "content": "forward rewards without encouraging particular gaits or providing motion primitives.", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 33, + "bbox_fs": [ + 105, + 510, + 506, + 610 + ] + }, + { + "type": "text", + "bbox": [ + 107, + 614, + 504, + 691 + ], + "lines": [ + { + "bbox": [ + 106, + 614, + 505, + 626 + ], + "spans": [ + { + "bbox": [ + 106, + 614, + 505, + 626 + ], + "score": 1.0, + "content": "The purpose of this work is not to obtain the absolute best-performing policy with the highest ro-", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 105, + 625, + 506, + 637 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 506, + 637 + ], + "score": 1.0, + "content": "bustness. For that use case, many other techniques can be incorporated into the pipeline. We aim", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 636, + 506, + 648 + ], + "spans": [ + { + "bbox": [ + 105, + 636, + 506, + 648 + ], + "score": 1.0, + "content": "to show that a policy can be trained in record time with our set-up while still being usable on the", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 105, + 647, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 105, + 647, + 506, + 659 + ], + "score": 1.0, + "content": "real hardware. We wish to shift other researchers’ perspective on the required training time for a", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 105, + 656, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 506, + 671 + ], + "score": 1.0, + "content": "real-world application, and hope that our work can serve as a reference for future research. We ex-", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 105, + 668, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 506, + 681 + ], + "score": 1.0, + "content": "pect many other tasks to benefit from the massively parallel regime. By reducing the training time", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 680, + 446, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 680, + 446, + 692 + ], + "score": 1.0, + "content": "of these future robotic tasks, we can greatly accelerate the developments in this field.", + "type": "text" + } + ], + "index": 44 + } + ], + "index": 41, + "bbox_fs": [ + 105, + 614, + 506, + 692 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 73, + 186, + 84 + ], + "lines": [ + { + "bbox": [ + 106, + 71, + 187, + 86 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 187, + 86 + ], + "score": 1.0, + "content": "Acknowledgments", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 108, + 91, + 503, + 125 + ], + "lines": [ + { + "bbox": [ + 106, + 91, + 505, + 103 + ], + "spans": [ + { + "bbox": [ + 106, + 91, + 505, + 103 + ], + "score": 1.0, + "content": "We would like to thank Mayank Mittal, Joonho Lee, Takahiro Miki, and Peter Werner for their", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 102, + 505, + 114 + ], + "spans": [ + { + "bbox": [ + 106, + 102, + 505, + 114 + ], + "score": 1.0, + "content": "valuable suggestions and help with hardware experiments as well as the Isaac Gym and PhysX", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 114, + 248, + 126 + ], + "spans": [ + { + "bbox": [ + 106, + 114, + 248, + 126 + ], + "score": 1.0, + "content": "teams for their continuous support.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 2 + }, + { + "type": "title", + "bbox": [ + 107, + 140, + 163, + 152 + ], + "lines": [ + { + "bbox": [ + 106, + 138, + 165, + 154 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 165, + 154 + ], + "score": 1.0, + "content": "References", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4 + }, + { + "type": "text", + "bbox": [ + 106, + 156, + 506, + 727 + ], + "lines": [ + { + "bbox": [ + 109, + 158, + 505, + 172 + ], + "spans": [ + { + "bbox": [ + 109, + 158, + 505, + 172 + ], + "score": 1.0, + "content": "[1] J. Hwangbo, J. Lee, A. Dosovitskiy, D. Bellicoso, V. Tsounis, V. Koltun, and M. Hutter. Learn-", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 126, + 169, + 468, + 183 + ], + "spans": [ + { + "bbox": [ + 126, + 169, + 468, + 183 + ], + "score": 1.0, + "content": "ing agile and dynamic motor skills for legged robots. Science Robotics, 4(26), 2019.", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 188, + 505, + 202 + ], + "spans": [ + { + "bbox": [ + 111, + 188, + 505, + 202 + ], + "score": 1.0, + "content": "[2] S. Gu, E. Holly, T. Lillicrap, and S. Levine. Deep reinforcement learning for robotic manip-", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 126, + 199, + 506, + 213 + ], + "spans": [ + { + "bbox": [ + 126, + 199, + 506, + 213 + ], + "score": 1.0, + "content": "ulation with asynchronous off-policy updates. In IEEE International Conference on Robotics", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 127, + 209, + 275, + 223 + ], + "spans": [ + { + "bbox": [ + 127, + 209, + 275, + 223 + ], + "score": 1.0, + "content": "and Automation (ICRA), May 2017.", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 110, + 228, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 110, + 228, + 506, + 243 + ], + "score": 1.0, + "content": "[3] G. Kahn, A. Villaflor, B. Ding, P. Abbeel, and S. Levine. Self-supervised deep reinforcement", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 127, + 239, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 127, + 239, + 506, + 253 + ], + "score": 1.0, + "content": "learning with generalized computation graphs for robot navigation. In IEEE International", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 128, + 250, + 351, + 263 + ], + "spans": [ + { + "bbox": [ + 128, + 250, + 351, + 263 + ], + "score": 1.0, + "content": "Conference on Robotics and Automation (ICRA), 2018.", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 110, + 268, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 110, + 268, + 505, + 282 + ], + "score": 1.0, + "content": "[4] OpenAI, I. Akkaya, M. Andrychowicz, M. Chociej, M. Litwin, B. McGrew, A. Petron,", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 126, + 278, + 507, + 294 + ], + "spans": [ + { + "bbox": [ + 126, + 278, + 507, + 294 + ], + "score": 1.0, + "content": "A. Paino, M. Plappert, G. Powell, R. Ribas, J. Schneider, N. Tezak, J. Tworek, P. Welinder,", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 127, + 291, + 504, + 303 + ], + "spans": [ + { + "bbox": [ + 127, + 291, + 504, + 303 + ], + "score": 1.0, + "content": "L. Weng, Q. Yuan, W. Zaremba, and L. Zhang. Solving rubik’s cube with a robot hand, 2019.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 110, + 308, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 110, + 308, + 506, + 322 + ], + "score": 1.0, + "content": "[5] E. Todorov, T. Erez, and Y. Tassa. Mujoco: A physics engine for model-based control. In", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 128, + 320, + 468, + 332 + ], + "spans": [ + { + "bbox": [ + 128, + 320, + 468, + 332 + ], + "score": 1.0, + "content": "IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2012.", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 110, + 338, + 505, + 352 + ], + "spans": [ + { + "bbox": [ + 110, + 338, + 505, + 352 + ], + "score": 1.0, + "content": "[6] E. Coumans and Y. Bai. Pybullet, a python module for physics simulation for games, robotics", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 127, + 348, + 374, + 362 + ], + "spans": [ + { + "bbox": [ + 127, + 348, + 374, + 362 + ], + "score": 1.0, + "content": "and machine learning. http://pybullet.org, 2016–2021.", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 110, + 367, + 505, + 381 + ], + "spans": [ + { + "bbox": [ + 110, + 367, + 505, + 381 + ], + "score": 1.0, + "content": "[7] J. Hwangbo, J. Lee, and M. Hutter. Per-contact iteration method for solving contact dynamics.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 128, + 379, + 436, + 391 + ], + "spans": [ + { + "bbox": [ + 128, + 379, + 436, + 391 + ], + "score": 1.0, + "content": "IEEE Robotics and Automation Letters, 3(2), 2018. URL www.raisim.com.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 110, + 397, + 505, + 410 + ], + "spans": [ + { + "bbox": [ + 110, + 397, + 505, + 410 + ], + "score": 1.0, + "content": "[8] V. Makoviychuk, L. Wawrzyniak, Y. Guo, M. Lu, K. Storey, M. Macklin, D. Hoeller, N. Rudin,", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 126, + 406, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 126, + 406, + 506, + 423 + ], + "score": 1.0, + "content": "A. Allshire, A. Handa, and G. State. Isaac gym: High performance GPU based physics simu-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 127, + 419, + 505, + 433 + ], + "spans": [ + { + "bbox": [ + 127, + 419, + 505, + 433 + ], + "score": 1.0, + "content": "lation for robot learning. In Conference on Neural Information Processing Systems (NeurIPS)", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 127, + 429, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 127, + 429, + 287, + 443 + ], + "score": 1.0, + "content": "Datasets and Benchmarks Track, 2021.", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 109, + 446, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 109, + 446, + 506, + 463 + ], + "score": 1.0, + "content": "[9] N. Heess, D. TB, S. Sriram, J. Lemmon, J. Merel, G. Wayne, Y. Tassa, T. Erez, Z. Wang,", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 126, + 458, + 505, + 473 + ], + "spans": [ + { + "bbox": [ + 126, + 458, + 505, + 473 + ], + "score": 1.0, + "content": "S. M. A. Eslami, M. A. Riedmiller, and D. Silver. Emergence of locomotion behaviours in rich", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 126, + 469, + 310, + 483 + ], + "spans": [ + { + "bbox": [ + 126, + 469, + 310, + 483 + ], + "score": 1.0, + "content": "environments. CoRR, abs/1707.02286, 2017.", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 488, + 505, + 501 + ], + "spans": [ + { + "bbox": [ + 106, + 488, + 505, + 501 + ], + "score": 1.0, + "content": "[10] A. Stooke and P. Abbeel. Accelerated methods for deep reinforcement learning. CoRR,", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 127, + 499, + 222, + 511 + ], + "spans": [ + { + "bbox": [ + 127, + 499, + 222, + 511 + ], + "score": 1.0, + "content": "abs/1803.02811, 2018.", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 516, + 506, + 531 + ], + "spans": [ + { + "bbox": [ + 105, + 516, + 506, + 531 + ], + "score": 1.0, + "content": "[11] B. Shacklett, E. Wijmans, A. Petrenko, M. Savva, D. Batra, V. Koltun, and K. Fatahalian. Large", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 126, + 526, + 506, + 544 + ], + "spans": [ + { + "bbox": [ + 126, + 526, + 506, + 544 + ], + "score": 1.0, + "content": "batch simulation for deep reinforcement learning. In International Conference on Learning", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 127, + 540, + 252, + 551 + ], + "spans": [ + { + "bbox": [ + 127, + 540, + 252, + 551 + ], + "score": 1.0, + "content": "Representations (ICLR), 2021.", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 106, + 557, + 506, + 571 + ], + "spans": [ + { + "bbox": [ + 106, + 557, + 506, + 571 + ], + "score": 1.0, + "content": "[12] J. Liang, V. Makoviychuk, A. Handa, N. Chentanez, M. Macklin, and D. Fox. Gpu-accelerated", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 125, + 567, + 506, + 583 + ], + "spans": [ + { + "bbox": [ + 125, + 567, + 506, + 583 + ], + "score": 1.0, + "content": "robotic simulation for distributed reinforcement learning. In Conference on Robot Learning", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 127, + 579, + 187, + 592 + ], + "spans": [ + { + "bbox": [ + 127, + 579, + 187, + 592 + ], + "score": 1.0, + "content": "(CoRL), 2018.", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 105, + 597, + 505, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 505, + 612 + ], + "score": 1.0, + "content": "[13] C. D. Freeman, E. Frey, A. Raichuk, S. Girgin, I. Mordatch, and O. Bachem. Brax - a differ-", + "type": "text" + } + ], + "index": 37 + }, + { + "bbox": [ + 127, + 608, + 506, + 622 + ], + "spans": [ + { + "bbox": [ + 127, + 608, + 506, + 622 + ], + "score": 1.0, + "content": "entiable physics engine for large scale rigid body simulation. In 35th Conference on Neural", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 128, + 620, + 460, + 632 + ], + "spans": [ + { + "bbox": [ + 128, + 620, + 460, + 632 + ], + "score": 1.0, + "content": "Information Processing Systems (NeurIPS) Datasets and Benchmarks Track, 2021.", + "type": "text" + } + ], + "index": 39 + }, + { + "bbox": [ + 105, + 637, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 505, + 651 + ], + "score": 1.0, + "content": "[14] A. Bouman, M. F. Ginting, N. Alatur, M. Palieri, D. D. Fan, T. Touma, T. Pailevanian, S.-", + "type": "text" + } + ], + "index": 40 + }, + { + "bbox": [ + 126, + 648, + 505, + 663 + ], + "spans": [ + { + "bbox": [ + 126, + 648, + 505, + 663 + ], + "score": 1.0, + "content": "K. Kim, K. Otsu, J. Burdick, and A.-a. Agha-Mohammadi. Autonomous spot: Long-range", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 126, + 659, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 126, + 659, + 505, + 673 + ], + "score": 1.0, + "content": "autonomous exploration of extreme environments with legged locomotion. In IEEE/RSJ Inter-", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 126, + 669, + 407, + 685 + ], + "spans": [ + { + "bbox": [ + 126, + 669, + 407, + 685 + ], + "score": 1.0, + "content": "national Conference on Intelligent Robots and Systems (IROS), 2020.", + "type": "text" + } + ], + "index": 43 + }, + { + "bbox": [ + 105, + 689, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 506, + 702 + ], + "score": 1.0, + "content": "[15] C. Gehring, P. Fankhauser, L. Isler, R. Diethelm, S. Bachmann, M. Potz, L. Gerstenberg, and", + "type": "text" + } + ], + "index": 44 + }, + { + "bbox": [ + 126, + 700, + 506, + 714 + ], + "spans": [ + { + "bbox": [ + 126, + 700, + 506, + 714 + ], + "score": 1.0, + "content": "M. Hutter. Anymal in the field: Solving industrial inspection of an offshore hvdc platform with", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 126, + 711, + 362, + 724 + ], + "spans": [ + { + "bbox": [ + 126, + 711, + 362, + 724 + ], + "score": 1.0, + "content": "a quadrupedal robot. In Field and Service Robotics, 2021.", + "type": "text" + } + ], + "index": 46 + } + ], + "index": 25.5 + } + ], + "page_idx": 8, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 302, + 741, + 309, + 750 + ], + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 752 + ], + "score": 1.0, + "content": "9", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "title", + "bbox": [ + 107, + 73, + 186, + 84 + ], + "lines": [ + { + "bbox": [ + 106, + 71, + 187, + 86 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 187, + 86 + ], + "score": 1.0, + "content": "Acknowledgments", + "type": "text" + } + ], + "index": 0 + } + ], + "index": 0 + }, + { + "type": "text", + "bbox": [ + 108, + 91, + 503, + 125 + ], + "lines": [ + { + "bbox": [ + 106, + 91, + 505, + 103 + ], + "spans": [ + { + "bbox": [ + 106, + 91, + 505, + 103 + ], + "score": 1.0, + "content": "We would like to thank Mayank Mittal, Joonho Lee, Takahiro Miki, and Peter Werner for their", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 102, + 505, + 114 + ], + "spans": [ + { + "bbox": [ + 106, + 102, + 505, + 114 + ], + "score": 1.0, + "content": "valuable suggestions and help with hardware experiments as well as the Isaac Gym and PhysX", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 114, + 248, + 126 + ], + "spans": [ + { + "bbox": [ + 106, + 114, + 248, + 126 + ], + "score": 1.0, + "content": "teams for their continuous support.", + "type": "text" + } + ], + "index": 3 + } + ], + "index": 2, + "bbox_fs": [ + 106, + 91, + 505, + 126 + ] + }, + { + "type": "title", + "bbox": [ + 107, + 140, + 163, + 152 + ], + "lines": [ + { + "bbox": [ + 106, + 138, + 165, + 154 + ], + "spans": [ + { + "bbox": [ + 106, + 138, + 165, + 154 + ], + "score": 1.0, + "content": "References", + "type": "text" + } + ], + "index": 4 + } + ], + "index": 4 + }, + { + "type": "list", + "bbox": [ + 106, + 156, + 506, + 727 + ], + "lines": [ + { + "bbox": [ + 109, + 158, + 505, + 172 + ], + "spans": [ + { + "bbox": [ + 109, + 158, + 505, + 172 + ], + "score": 1.0, + "content": "[1] J. Hwangbo, J. Lee, A. Dosovitskiy, D. Bellicoso, V. Tsounis, V. Koltun, and M. Hutter. Learn-", + "type": "text" + } + ], + "index": 5, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 169, + 468, + 183 + ], + "spans": [ + { + "bbox": [ + 126, + 169, + 468, + 183 + ], + "score": 1.0, + "content": "ing agile and dynamic motor skills for legged robots. Science Robotics, 4(26), 2019.", + "type": "text" + } + ], + "index": 6, + "is_list_end_line": true + }, + { + "bbox": [ + 111, + 188, + 505, + 202 + ], + "spans": [ + { + "bbox": [ + 111, + 188, + 505, + 202 + ], + "score": 1.0, + "content": "[2] S. Gu, E. Holly, T. Lillicrap, and S. Levine. Deep reinforcement learning for robotic manip-", + "type": "text" + } + ], + "index": 7, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 199, + 506, + 213 + ], + "spans": [ + { + "bbox": [ + 126, + 199, + 506, + 213 + ], + "score": 1.0, + "content": "ulation with asynchronous off-policy updates. In IEEE International Conference on Robotics", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 127, + 209, + 275, + 223 + ], + "spans": [ + { + "bbox": [ + 127, + 209, + 275, + 223 + ], + "score": 1.0, + "content": "and Automation (ICRA), May 2017.", + "type": "text" + } + ], + "index": 9, + "is_list_end_line": true + }, + { + "bbox": [ + 110, + 228, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 110, + 228, + 506, + 243 + ], + "score": 1.0, + "content": "[3] G. Kahn, A. Villaflor, B. Ding, P. Abbeel, and S. Levine. Self-supervised deep reinforcement", + "type": "text" + } + ], + "index": 10, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 239, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 127, + 239, + 506, + 253 + ], + "score": 1.0, + "content": "learning with generalized computation graphs for robot navigation. In IEEE International", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 128, + 250, + 351, + 263 + ], + "spans": [ + { + "bbox": [ + 128, + 250, + 351, + 263 + ], + "score": 1.0, + "content": "Conference on Robotics and Automation (ICRA), 2018.", + "type": "text" + } + ], + "index": 12, + "is_list_end_line": true + }, + { + "bbox": [ + 110, + 268, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 110, + 268, + 505, + 282 + ], + "score": 1.0, + "content": "[4] OpenAI, I. Akkaya, M. Andrychowicz, M. Chociej, M. Litwin, B. McGrew, A. Petron,", + "type": "text" + } + ], + "index": 13, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 278, + 507, + 294 + ], + "spans": [ + { + "bbox": [ + 126, + 278, + 507, + 294 + ], + "score": 1.0, + "content": "A. Paino, M. Plappert, G. Powell, R. Ribas, J. Schneider, N. Tezak, J. Tworek, P. Welinder,", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 127, + 291, + 504, + 303 + ], + "spans": [ + { + "bbox": [ + 127, + 291, + 504, + 303 + ], + "score": 1.0, + "content": "L. Weng, Q. Yuan, W. Zaremba, and L. Zhang. Solving rubik’s cube with a robot hand, 2019.", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 110, + 308, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 110, + 308, + 506, + 322 + ], + "score": 1.0, + "content": "[5] E. Todorov, T. Erez, and Y. Tassa. Mujoco: A physics engine for model-based control. In", + "type": "text" + } + ], + "index": 16, + "is_list_start_line": true + }, + { + "bbox": [ + 128, + 320, + 468, + 332 + ], + "spans": [ + { + "bbox": [ + 128, + 320, + 468, + 332 + ], + "score": 1.0, + "content": "IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2012.", + "type": "text" + } + ], + "index": 17, + "is_list_end_line": true + }, + { + "bbox": [ + 110, + 338, + 505, + 352 + ], + "spans": [ + { + "bbox": [ + 110, + 338, + 505, + 352 + ], + "score": 1.0, + "content": "[6] E. Coumans and Y. Bai. Pybullet, a python module for physics simulation for games, robotics", + "type": "text" + } + ], + "index": 18, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 348, + 374, + 362 + ], + "spans": [ + { + "bbox": [ + 127, + 348, + 374, + 362 + ], + "score": 1.0, + "content": "and machine learning. http://pybullet.org, 2016–2021.", + "type": "text" + } + ], + "index": 19, + "is_list_end_line": true + }, + { + "bbox": [ + 110, + 367, + 505, + 381 + ], + "spans": [ + { + "bbox": [ + 110, + 367, + 505, + 381 + ], + "score": 1.0, + "content": "[7] J. Hwangbo, J. Lee, and M. Hutter. Per-contact iteration method for solving contact dynamics.", + "type": "text" + } + ], + "index": 20, + "is_list_start_line": true + }, + { + "bbox": [ + 128, + 379, + 436, + 391 + ], + "spans": [ + { + "bbox": [ + 128, + 379, + 436, + 391 + ], + "score": 1.0, + "content": "IEEE Robotics and Automation Letters, 3(2), 2018. URL www.raisim.com.", + "type": "text" + } + ], + "index": 21, + "is_list_end_line": true + }, + { + "bbox": [ + 110, + 397, + 505, + 410 + ], + "spans": [ + { + "bbox": [ + 110, + 397, + 505, + 410 + ], + "score": 1.0, + "content": "[8] V. Makoviychuk, L. Wawrzyniak, Y. Guo, M. Lu, K. Storey, M. Macklin, D. Hoeller, N. Rudin,", + "type": "text" + } + ], + "index": 22, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 406, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 126, + 406, + 506, + 423 + ], + "score": 1.0, + "content": "A. Allshire, A. Handa, and G. State. Isaac gym: High performance GPU based physics simu-", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 127, + 419, + 505, + 433 + ], + "spans": [ + { + "bbox": [ + 127, + 419, + 505, + 433 + ], + "score": 1.0, + "content": "lation for robot learning. In Conference on Neural Information Processing Systems (NeurIPS)", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 127, + 429, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 127, + 429, + 287, + 443 + ], + "score": 1.0, + "content": "Datasets and Benchmarks Track, 2021.", + "type": "text" + } + ], + "index": 25, + "is_list_end_line": true + }, + { + "bbox": [ + 109, + 446, + 506, + 463 + ], + "spans": [ + { + "bbox": [ + 109, + 446, + 506, + 463 + ], + "score": 1.0, + "content": "[9] N. Heess, D. TB, S. Sriram, J. Lemmon, J. Merel, G. Wayne, Y. Tassa, T. Erez, Z. Wang,", + "type": "text" + } + ], + "index": 26, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 458, + 505, + 473 + ], + "spans": [ + { + "bbox": [ + 126, + 458, + 505, + 473 + ], + "score": 1.0, + "content": "S. M. A. Eslami, M. A. Riedmiller, and D. Silver. Emergence of locomotion behaviours in rich", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 126, + 469, + 310, + 483 + ], + "spans": [ + { + "bbox": [ + 126, + 469, + 310, + 483 + ], + "score": 1.0, + "content": "environments. CoRR, abs/1707.02286, 2017.", + "type": "text" + } + ], + "index": 28, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 488, + 505, + 501 + ], + "spans": [ + { + "bbox": [ + 106, + 488, + 505, + 501 + ], + "score": 1.0, + "content": "[10] A. Stooke and P. Abbeel. Accelerated methods for deep reinforcement learning. CoRR,", + "type": "text" + } + ], + "index": 29, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 499, + 222, + 511 + ], + "spans": [ + { + "bbox": [ + 127, + 499, + 222, + 511 + ], + "score": 1.0, + "content": "abs/1803.02811, 2018.", + "type": "text" + } + ], + "index": 30, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 516, + 506, + 531 + ], + "spans": [ + { + "bbox": [ + 105, + 516, + 506, + 531 + ], + "score": 1.0, + "content": "[11] B. Shacklett, E. Wijmans, A. Petrenko, M. Savva, D. Batra, V. Koltun, and K. Fatahalian. Large", + "type": "text" + } + ], + "index": 31, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 526, + 506, + 544 + ], + "spans": [ + { + "bbox": [ + 126, + 526, + 506, + 544 + ], + "score": 1.0, + "content": "batch simulation for deep reinforcement learning. In International Conference on Learning", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 127, + 540, + 252, + 551 + ], + "spans": [ + { + "bbox": [ + 127, + 540, + 252, + 551 + ], + "score": 1.0, + "content": "Representations (ICLR), 2021.", + "type": "text" + } + ], + "index": 33, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 557, + 506, + 571 + ], + "spans": [ + { + "bbox": [ + 106, + 557, + 506, + 571 + ], + "score": 1.0, + "content": "[12] J. Liang, V. Makoviychuk, A. Handa, N. Chentanez, M. Macklin, and D. Fox. Gpu-accelerated", + "type": "text" + } + ], + "index": 34, + "is_list_start_line": true + }, + { + "bbox": [ + 125, + 567, + 506, + 583 + ], + "spans": [ + { + "bbox": [ + 125, + 567, + 506, + 583 + ], + "score": 1.0, + "content": "robotic simulation for distributed reinforcement learning. In Conference on Robot Learning", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 127, + 579, + 187, + 592 + ], + "spans": [ + { + "bbox": [ + 127, + 579, + 187, + 592 + ], + "score": 1.0, + "content": "(CoRL), 2018.", + "type": "text" + } + ], + "index": 36, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 597, + 505, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 505, + 612 + ], + "score": 1.0, + "content": "[13] C. D. Freeman, E. Frey, A. Raichuk, S. Girgin, I. Mordatch, and O. Bachem. Brax - a differ-", + "type": "text" + } + ], + "index": 37, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 608, + 506, + 622 + ], + "spans": [ + { + "bbox": [ + 127, + 608, + 506, + 622 + ], + "score": 1.0, + "content": "entiable physics engine for large scale rigid body simulation. In 35th Conference on Neural", + "type": "text" + } + ], + "index": 38 + }, + { + "bbox": [ + 128, + 620, + 460, + 632 + ], + "spans": [ + { + "bbox": [ + 128, + 620, + 460, + 632 + ], + "score": 1.0, + "content": "Information Processing Systems (NeurIPS) Datasets and Benchmarks Track, 2021.", + "type": "text" + } + ], + "index": 39, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 637, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 505, + 651 + ], + "score": 1.0, + "content": "[14] A. Bouman, M. F. Ginting, N. Alatur, M. Palieri, D. D. Fan, T. Touma, T. Pailevanian, S.-", + "type": "text" + } + ], + "index": 40, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 648, + 505, + 663 + ], + "spans": [ + { + "bbox": [ + 126, + 648, + 505, + 663 + ], + "score": 1.0, + "content": "K. Kim, K. Otsu, J. Burdick, and A.-a. Agha-Mohammadi. Autonomous spot: Long-range", + "type": "text" + } + ], + "index": 41 + }, + { + "bbox": [ + 126, + 659, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 126, + 659, + 505, + 673 + ], + "score": 1.0, + "content": "autonomous exploration of extreme environments with legged locomotion. In IEEE/RSJ Inter-", + "type": "text" + } + ], + "index": 42 + }, + { + "bbox": [ + 126, + 669, + 407, + 685 + ], + "spans": [ + { + "bbox": [ + 126, + 669, + 407, + 685 + ], + "score": 1.0, + "content": "national Conference on Intelligent Robots and Systems (IROS), 2020.", + "type": "text" + } + ], + "index": 43, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 689, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 506, + 702 + ], + "score": 1.0, + "content": "[15] C. Gehring, P. Fankhauser, L. Isler, R. Diethelm, S. Bachmann, M. Potz, L. Gerstenberg, and", + "type": "text" + } + ], + "index": 44, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 700, + 506, + 714 + ], + "spans": [ + { + "bbox": [ + 126, + 700, + 506, + 714 + ], + "score": 1.0, + "content": "M. Hutter. Anymal in the field: Solving industrial inspection of an offshore hvdc platform with", + "type": "text" + } + ], + "index": 45 + }, + { + "bbox": [ + 126, + 711, + 362, + 724 + ], + "spans": [ + { + "bbox": [ + 126, + 711, + 362, + 724 + ], + "score": 1.0, + "content": "a quadrupedal robot. In Field and Service Robotics, 2021.", + "type": "text" + } + ], + "index": 46, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 72, + 505, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 505, + 87 + ], + "score": 1.0, + "content": "[16] J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomo-", + "type": "text", + "cross_page": true + } + ], + "index": 0, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 84, + 371, + 97 + ], + "spans": [ + { + "bbox": [ + 127, + 84, + 371, + 97 + ], + "score": 1.0, + "content": "tion over challenging terrain. Science Robotics, 5(47), 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 1, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 102, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 506, + 117 + ], + "score": 1.0, + "content": "[17] V. Tsounis, M. Alge, J. Lee, F. Farshidian, and M. Hutter. Deepgait: Planning and control of", + "type": "text", + "cross_page": true + } + ], + "index": 2, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 113, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 127, + 113, + 506, + 128 + ], + "score": 1.0, + "content": "quadrupedal gaits using deep reinforcement learning. IEEE Robotics and Automation Letters,", + "type": "text", + "cross_page": true + } + ], + "index": 3 + }, + { + "bbox": [ + 127, + 124, + 181, + 137 + ], + "spans": [ + { + "bbox": [ + 127, + 124, + 181, + 137 + ], + "score": 1.0, + "content": "PP, 03 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 4, + "is_list_end_line": true + }, + { + "bbox": [ + 104, + 141, + 505, + 159 + ], + "spans": [ + { + "bbox": [ + 104, + 141, + 505, + 159 + ], + "score": 1.0, + "content": "[18] S. Gangapurwala, M. Geisert, R. Orsolino, M. Fallon, and I. Havoutis. Real-time trajectory", + "type": "text", + "cross_page": true + } + ], + "index": 5, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 155, + 504, + 168 + ], + "spans": [ + { + "bbox": [ + 127, + 155, + 504, + 168 + ], + "score": 1.0, + "content": "adaptation for quadrupedal locomotion using deep reinforcement learning. In IEEE Interna-", + "type": "text", + "cross_page": true + } + ], + "index": 6 + }, + { + "bbox": [ + 127, + 165, + 376, + 178 + ], + "spans": [ + { + "bbox": [ + 127, + 165, + 376, + 178 + ], + "score": 1.0, + "content": "tional Conference on Robotics and Automation (ICRA), 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 7, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 184, + 505, + 197 + ], + "spans": [ + { + "bbox": [ + 106, + 184, + 505, + 197 + ], + "score": 1.0, + "content": "[19] T. Miki, J. Lee, L. Wellhausen, V. Koltun, and M. Hutter. Wild anymal: Robust zero-shot", + "type": "text", + "cross_page": true + } + ], + "index": 8, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 195, + 374, + 209 + ], + "spans": [ + { + "bbox": [ + 126, + 195, + 374, + 209 + ], + "score": 1.0, + "content": "perceptive locomotion. Submitted to Science Robotics, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 9, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 214, + 506, + 227 + ], + "spans": [ + { + "bbox": [ + 106, + 214, + 506, + 227 + ], + "score": 1.0, + "content": "[20] J. Siekmann, K. Green, J. Warila, A. Fern, and J. W. Hurst. Blind bipedal stair traversal via", + "type": "text", + "cross_page": true + } + ], + "index": 10, + "is_list_start_line": true + }, + { + "bbox": [ + 128, + 226, + 393, + 237 + ], + "spans": [ + { + "bbox": [ + 128, + 226, + 393, + 237 + ], + "score": 1.0, + "content": "sim-to-real reinforcement learning. CoRR, abs/2105.08328, 2021.", + "type": "text", + "cross_page": true + } + ], + "index": 11, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 243, + 506, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 243, + 506, + 258 + ], + "score": 1.0, + "content": "[21] C. Gregg and K. Hazelwood. Where is the data? why you cannot debate cpu vs. gpu per-", + "type": "text", + "cross_page": true + } + ], + "index": 12, + "is_list_start_line": true + }, + { + "bbox": [ + 125, + 253, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 125, + 253, + 506, + 268 + ], + "score": 1.0, + "content": "formance without the answer. In IEEE International Symposium on Performance Analysis of", + "type": "text", + "cross_page": true + } + ], + "index": 13 + }, + { + "bbox": [ + 127, + 266, + 284, + 279 + ], + "spans": [ + { + "bbox": [ + 127, + 266, + 284, + 279 + ], + "score": 1.0, + "content": "Systems and Software (ISPASS), 2011.", + "type": "text", + "cross_page": true + } + ], + "index": 14, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 283, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 106, + 283, + 506, + 298 + ], + "score": 1.0, + "content": "[22] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization", + "type": "text", + "cross_page": true + } + ], + "index": 15, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 295, + 299, + 308 + ], + "spans": [ + { + "bbox": [ + 127, + 295, + 299, + 308 + ], + "score": 1.0, + "content": "algorithms. CoRR, abs/1707.06347, 2017.", + "type": "text", + "cross_page": true + } + ], + "index": 16, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 314, + 505, + 327 + ], + "spans": [ + { + "bbox": [ + 106, + 314, + 505, + 327 + ], + "score": 1.0, + "content": "[23] J. Schulman, P. Moritz, S. Levine, M. Jordan, and P. Abbeel. High-dimensional continuous", + "type": "text", + "cross_page": true + } + ], + "index": 17, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 326, + 505, + 339 + ], + "spans": [ + { + "bbox": [ + 127, + 326, + 505, + 339 + ], + "score": 1.0, + "content": "control using generalized advantage estimation. In Proceedings of the International Confer-", + "type": "text", + "cross_page": true + } + ], + "index": 18 + }, + { + "bbox": [ + 126, + 336, + 325, + 349 + ], + "spans": [ + { + "bbox": [ + 126, + 336, + 325, + 349 + ], + "score": 1.0, + "content": "ence on Learning Representations (ICLR), 2016.", + "type": "text", + "cross_page": true + } + ], + "index": 19, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 353, + 505, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 353, + 505, + 370 + ], + "score": 1.0, + "content": "[24] F. Pardo, A. Tavakoli, V. Levdik, and P. Kormushev. Time limits in reinforcement learning.", + "type": "text", + "cross_page": true + } + ], + "index": 20, + "is_list_start_line": true + }, + { + "bbox": [ + 128, + 365, + 250, + 378 + ], + "spans": [ + { + "bbox": [ + 128, + 365, + 250, + 378 + ], + "score": 1.0, + "content": "CoRR, abs/1712.00378, 2017.", + "type": "text", + "cross_page": true + } + ], + "index": 21, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 385, + 505, + 398 + ], + "spans": [ + { + "bbox": [ + 106, + 385, + 505, + 398 + ], + "score": 1.0, + "content": "[25] G. Brockman, V. Cheung, L. Pettersson, J. Schneider, J. Schulman, J. Tang, and W. Zaremba.", + "type": "text", + "cross_page": true + } + ], + "index": 22, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 396, + 207, + 408 + ], + "spans": [ + { + "bbox": [ + 127, + 396, + 207, + 408 + ], + "score": 1.0, + "content": "Openai gym, 2016.", + "type": "text", + "cross_page": true + } + ], + "index": 23, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 413, + 506, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 506, + 428 + ], + "score": 1.0, + "content": "[26] A. Hill, A. Raffin, M. Ernestus, A. Gleave, A. Kanervisto, R. Traore, P. Dhariwal, C. Hesse,", + "type": "text", + "cross_page": true + } + ], + "index": 24, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 425, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 127, + 425, + 505, + 438 + ], + "score": 1.0, + "content": "O. Klimov, A. Nichol, M. Plappert, A. Radford, J. Schulman, S. Sidor, and Y. Wu. Stable", + "type": "text", + "cross_page": true + } + ], + "index": 25 + }, + { + "bbox": [ + 127, + 437, + 421, + 450 + ], + "spans": [ + { + "bbox": [ + 127, + 437, + 421, + 450 + ], + "score": 1.0, + "content": "baselines. https://github.com/hill-a/stable-baselines, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 26, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 453, + 504, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 504, + 470 + ], + "score": 1.0, + "content": "[27] J. Achiam. Spinning up in deep reinforcement learning, 2018. URL https://spinningup.", + "type": "text", + "cross_page": true + } + ], + "index": 27, + "is_list_start_line": true + }, + { + "bbox": [ + 126, + 466, + 244, + 480 + ], + "spans": [ + { + "bbox": [ + 126, + 466, + 244, + 480 + ], + "score": 1.0, + "content": "openai.com/en/latest/.", + "type": "text", + "cross_page": true + } + ], + "index": 28, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 484, + 505, + 499 + ], + "spans": [ + { + "bbox": [ + 106, + 484, + 505, + 499 + ], + "score": 1.0, + "content": "[28] R. Wang, J. Lehman, J. Clune, and K. O. Stanley. Paired open-ended trailblazer (POET): end-", + "type": "text", + "cross_page": true + } + ], + "index": 29, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 496, + 505, + 509 + ], + "spans": [ + { + "bbox": [ + 127, + 496, + 505, + 509 + ], + "score": 1.0, + "content": "lessly generating increasingly complex and diverse learning environments and their solutions.", + "type": "text", + "cross_page": true + } + ], + "index": 30 + }, + { + "bbox": [ + 127, + 507, + 250, + 520 + ], + "spans": [ + { + "bbox": [ + 127, + 507, + 250, + 520 + ], + "score": 1.0, + "content": "CoRR, abs/1901.01753, 2019.", + "type": "text", + "cross_page": true + } + ], + "index": 31, + "is_list_end_line": true + }, + { + "bbox": [ + 106, + 525, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 106, + 525, + 504, + 539 + ], + "score": 1.0, + "content": "[29] Z. Xie, H. Y. Ling, N. H. Kim, and M. van de Panne. Allsteps: Curriculum-driven learn-", + "type": "text", + "cross_page": true + } + ], + "index": 32, + "is_list_start_line": true + }, + { + "bbox": [ + 127, + 536, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 127, + 536, + 506, + 551 + ], + "score": 1.0, + "content": "ing of stepping stone skills. Proceedings of ACM SIGGRAPH / Eurographics Symposium on", + "type": "text", + "cross_page": true + } + ], + "index": 33 + }, + { + "bbox": [ + 128, + 547, + 241, + 561 + ], + "spans": [ + { + "bbox": [ + 128, + 547, + 241, + 561 + ], + "score": 1.0, + "content": "Computer Animation, 2020.", + "type": "text", + "cross_page": true + } + ], + "index": 34, + "is_list_end_line": true + }, + { + "bbox": [ + 105, + 565, + 506, + 580 + ], + "spans": [ + { + "bbox": [ + 105, + 565, + 506, + 580 + ], + "score": 1.0, + "content": "[30] C. Florensa, D. Held, X. Geng, and P. Abbeel. Automatic goal generation for reinforcement", + "type": "text", + "cross_page": true + } + ], + "index": 35, + "is_list_start_line": true + }, + { + "bbox": [ + 125, + 576, + 506, + 593 + ], + "spans": [ + { + "bbox": [ + 125, + 576, + 506, + 593 + ], + "score": 1.0, + "content": "learning agents. In Proceedings of the 35th International Conference on Machine Learning", + "type": "text", + "cross_page": true + } + ], + "index": 36 + }, + { + "bbox": [ + 127, + 588, + 424, + 601 + ], + "spans": [ + { + "bbox": [ + 127, + 588, + 424, + 601 + ], + "score": 1.0, + "content": "(ICML), volume 80 of Proceedings of Machine Learning Research, 2018.", + "type": "text", + "cross_page": true + } + ], + "index": 37, + "is_list_end_line": true + } + ], + "index": 25.5, + "bbox_fs": [ + 105, + 158, + 507, + 724 + ] + } + ] + }, + { + "preproc_blocks": [ + { + "type": "text", + "bbox": [ + 105, + 72, + 506, + 606 + ], + "lines": [ + { + "bbox": [ + 105, + 72, + 505, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 505, + 87 + ], + "score": 1.0, + "content": "[16] J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomo-", + "type": "text" + } + ], + "index": 0 + }, + { + "bbox": [ + 127, + 84, + 371, + 97 + ], + "spans": [ + { + "bbox": [ + 127, + 84, + 371, + 97 + ], + "score": 1.0, + "content": "tion over challenging terrain. Science Robotics, 5(47), 2020.", + "type": "text" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 102, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 102, + 506, + 117 + ], + "score": 1.0, + "content": "[17] V. Tsounis, M. Alge, J. Lee, F. Farshidian, and M. Hutter. Deepgait: Planning and control of", + "type": "text" + } + ], + "index": 2 + }, + { + "bbox": [ + 127, + 113, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 127, + 113, + 506, + 128 + ], + "score": 1.0, + "content": "quadrupedal gaits using deep reinforcement learning. IEEE Robotics and Automation Letters,", + "type": "text" + } + ], + "index": 3 + }, + { + "bbox": [ + 127, + 124, + 181, + 137 + ], + "spans": [ + { + "bbox": [ + 127, + 124, + 181, + 137 + ], + "score": 1.0, + "content": "PP, 03 2020.", + "type": "text" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 141, + 505, + 159 + ], + "spans": [ + { + "bbox": [ + 104, + 141, + 505, + 159 + ], + "score": 1.0, + "content": "[18] S. Gangapurwala, M. Geisert, R. Orsolino, M. Fallon, and I. Havoutis. Real-time trajectory", + "type": "text" + } + ], + "index": 5 + }, + { + "bbox": [ + 127, + 155, + 504, + 168 + ], + "spans": [ + { + "bbox": [ + 127, + 155, + 504, + 168 + ], + "score": 1.0, + "content": "adaptation for quadrupedal locomotion using deep reinforcement learning. In IEEE Interna-", + "type": "text" + } + ], + "index": 6 + }, + { + "bbox": [ + 127, + 165, + 376, + 178 + ], + "spans": [ + { + "bbox": [ + 127, + 165, + 376, + 178 + ], + "score": 1.0, + "content": "tional Conference on Robotics and Automation (ICRA), 2021.", + "type": "text" + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 184, + 505, + 197 + ], + "spans": [ + { + "bbox": [ + 106, + 184, + 505, + 197 + ], + "score": 1.0, + "content": "[19] T. Miki, J. Lee, L. Wellhausen, V. Koltun, and M. Hutter. Wild anymal: Robust zero-shot", + "type": "text" + } + ], + "index": 8 + }, + { + "bbox": [ + 126, + 195, + 374, + 209 + ], + "spans": [ + { + "bbox": [ + 126, + 195, + 374, + 209 + ], + "score": 1.0, + "content": "perceptive locomotion. Submitted to Science Robotics, 2021.", + "type": "text" + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 214, + 506, + 227 + ], + "spans": [ + { + "bbox": [ + 106, + 214, + 506, + 227 + ], + "score": 1.0, + "content": "[20] J. Siekmann, K. Green, J. Warila, A. Fern, and J. W. Hurst. Blind bipedal stair traversal via", + "type": "text" + } + ], + "index": 10 + }, + { + "bbox": [ + 128, + 226, + 393, + 237 + ], + "spans": [ + { + "bbox": [ + 128, + 226, + 393, + 237 + ], + "score": 1.0, + "content": "sim-to-real reinforcement learning. CoRR, abs/2105.08328, 2021.", + "type": "text" + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 243, + 506, + 258 + ], + "spans": [ + { + "bbox": [ + 105, + 243, + 506, + 258 + ], + "score": 1.0, + "content": "[21] C. Gregg and K. Hazelwood. Where is the data? why you cannot debate cpu vs. gpu per-", + "type": "text" + } + ], + "index": 12 + }, + { + "bbox": [ + 125, + 253, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 125, + 253, + 506, + 268 + ], + "score": 1.0, + "content": "formance without the answer. In IEEE International Symposium on Performance Analysis of", + "type": "text" + } + ], + "index": 13 + }, + { + "bbox": [ + 127, + 266, + 284, + 279 + ], + "spans": [ + { + "bbox": [ + 127, + 266, + 284, + 279 + ], + "score": 1.0, + "content": "Systems and Software (ISPASS), 2011.", + "type": "text" + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 283, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 106, + 283, + 506, + 298 + ], + "score": 1.0, + "content": "[22] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization", + "type": "text" + } + ], + "index": 15 + }, + { + "bbox": [ + 127, + 295, + 299, + 308 + ], + "spans": [ + { + "bbox": [ + 127, + 295, + 299, + 308 + ], + "score": 1.0, + "content": "algorithms. CoRR, abs/1707.06347, 2017.", + "type": "text" + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 314, + 505, + 327 + ], + "spans": [ + { + "bbox": [ + 106, + 314, + 505, + 327 + ], + "score": 1.0, + "content": "[23] J. Schulman, P. Moritz, S. Levine, M. Jordan, and P. Abbeel. High-dimensional continuous", + "type": "text" + } + ], + "index": 17 + }, + { + "bbox": [ + 127, + 326, + 505, + 339 + ], + "spans": [ + { + "bbox": [ + 127, + 326, + 505, + 339 + ], + "score": 1.0, + "content": "control using generalized advantage estimation. In Proceedings of the International Confer-", + "type": "text" + } + ], + "index": 18 + }, + { + "bbox": [ + 126, + 336, + 325, + 349 + ], + "spans": [ + { + "bbox": [ + 126, + 336, + 325, + 349 + ], + "score": 1.0, + "content": "ence on Learning Representations (ICLR), 2016.", + "type": "text" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 353, + 505, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 353, + 505, + 370 + ], + "score": 1.0, + "content": "[24] F. Pardo, A. Tavakoli, V. Levdik, and P. Kormushev. Time limits in reinforcement learning.", + "type": "text" + } + ], + "index": 20 + }, + { + "bbox": [ + 128, + 365, + 250, + 378 + ], + "spans": [ + { + "bbox": [ + 128, + 365, + 250, + 378 + ], + "score": 1.0, + "content": "CoRR, abs/1712.00378, 2017.", + "type": "text" + } + ], + "index": 21 + }, + { + "bbox": [ + 106, + 385, + 505, + 398 + ], + "spans": [ + { + "bbox": [ + 106, + 385, + 505, + 398 + ], + "score": 1.0, + "content": "[25] G. Brockman, V. Cheung, L. Pettersson, J. Schneider, J. Schulman, J. Tang, and W. Zaremba.", + "type": "text" + } + ], + "index": 22 + }, + { + "bbox": [ + 127, + 396, + 207, + 408 + ], + "spans": [ + { + "bbox": [ + 127, + 396, + 207, + 408 + ], + "score": 1.0, + "content": "Openai gym, 2016.", + "type": "text" + } + ], + "index": 23 + }, + { + "bbox": [ + 105, + 413, + 506, + 428 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 506, + 428 + ], + "score": 1.0, + "content": "[26] A. Hill, A. Raffin, M. Ernestus, A. Gleave, A. Kanervisto, R. Traore, P. Dhariwal, C. Hesse,", + "type": "text" + } + ], + "index": 24 + }, + { + "bbox": [ + 127, + 425, + 505, + 438 + ], + "spans": [ + { + "bbox": [ + 127, + 425, + 505, + 438 + ], + "score": 1.0, + "content": "O. Klimov, A. Nichol, M. Plappert, A. Radford, J. Schulman, S. Sidor, and Y. Wu. Stable", + "type": "text" + } + ], + "index": 25 + }, + { + "bbox": [ + 127, + 437, + 421, + 450 + ], + "spans": [ + { + "bbox": [ + 127, + 437, + 421, + 450 + ], + "score": 1.0, + "content": "baselines. https://github.com/hill-a/stable-baselines, 2018.", + "type": "text" + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 453, + 504, + 470 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 504, + 470 + ], + "score": 1.0, + "content": "[27] J. Achiam. Spinning up in deep reinforcement learning, 2018. URL https://spinningup.", + "type": "text" + } + ], + "index": 27 + }, + { + "bbox": [ + 126, + 466, + 244, + 480 + ], + "spans": [ + { + "bbox": [ + 126, + 466, + 244, + 480 + ], + "score": 1.0, + "content": "openai.com/en/latest/.", + "type": "text" + } + ], + "index": 28 + }, + { + "bbox": [ + 106, + 484, + 505, + 499 + ], + "spans": [ + { + "bbox": [ + 106, + 484, + 505, + 499 + ], + "score": 1.0, + "content": "[28] R. Wang, J. Lehman, J. Clune, and K. O. Stanley. Paired open-ended trailblazer (POET): end-", + "type": "text" + } + ], + "index": 29 + }, + { + "bbox": [ + 127, + 496, + 505, + 509 + ], + "spans": [ + { + "bbox": [ + 127, + 496, + 505, + 509 + ], + "score": 1.0, + "content": "lessly generating increasingly complex and diverse learning environments and their solutions.", + "type": "text" + } + ], + "index": 30 + }, + { + "bbox": [ + 127, + 507, + 250, + 520 + ], + "spans": [ + { + "bbox": [ + 127, + 507, + 250, + 520 + ], + "score": 1.0, + "content": "CoRR, abs/1901.01753, 2019.", + "type": "text" + } + ], + "index": 31 + }, + { + "bbox": [ + 106, + 525, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 106, + 525, + 504, + 539 + ], + "score": 1.0, + "content": "[29] Z. Xie, H. Y. Ling, N. H. Kim, and M. van de Panne. Allsteps: Curriculum-driven learn-", + "type": "text" + } + ], + "index": 32 + }, + { + "bbox": [ + 127, + 536, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 127, + 536, + 506, + 551 + ], + "score": 1.0, + "content": "ing of stepping stone skills. Proceedings of ACM SIGGRAPH / Eurographics Symposium on", + "type": "text" + } + ], + "index": 33 + }, + { + "bbox": [ + 128, + 547, + 241, + 561 + ], + "spans": [ + { + "bbox": [ + 128, + 547, + 241, + 561 + ], + "score": 1.0, + "content": "Computer Animation, 2020.", + "type": "text" + } + ], + "index": 34 + }, + { + "bbox": [ + 105, + 565, + 506, + 580 + ], + "spans": [ + { + "bbox": [ + 105, + 565, + 506, + 580 + ], + "score": 1.0, + "content": "[30] C. Florensa, D. Held, X. Geng, and P. Abbeel. Automatic goal generation for reinforcement", + "type": "text" + } + ], + "index": 35 + }, + { + "bbox": [ + 125, + 576, + 506, + 593 + ], + "spans": [ + { + "bbox": [ + 125, + 576, + 506, + 593 + ], + "score": 1.0, + "content": "learning agents. In Proceedings of the 35th International Conference on Machine Learning", + "type": "text" + } + ], + "index": 36 + }, + { + "bbox": [ + 127, + 588, + 424, + 601 + ], + "spans": [ + { + "bbox": [ + 127, + 588, + 424, + 601 + ], + "score": 1.0, + "content": "(ICML), volume 80 of Proceedings of Machine Learning Research, 2018.", + "type": "text" + } + ], + "index": 37 + } + ], + "index": 18.5 + } + ], + "page_idx": 9, + "page_size": [ + 612, + 792 + ], + "discarded_blocks": [ + { + "type": "discarded", + "bbox": [ + 300, + 741, + 311, + 750 + ], + "lines": [ + { + "bbox": [ + 299, + 740, + 313, + 754 + ], + "spans": [ + { + "bbox": [ + 299, + 740, + 313, + 754 + ], + "score": 1.0, + "content": "10", + "type": "text" + } + ] + } + ] + } + ], + "para_blocks": [ + { + "type": "list", + "bbox": [ + 105, + 72, + 506, + 606 + ], + "lines": [], + "index": 18.5, + "bbox_fs": [ + 104, + 72, + 506, + 601 + ], + "lines_deleted": true + } + ] + } + ], + "_backend": "pipeline", + "_version_name": "2.2.2" +} \ No newline at end of file diff --git a/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_model.json b/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_model.json new file mode 100644 index 0000000000000000000000000000000000000000..35dfae8b102b414f6052decbdc412eaa7e8d9e1e --- /dev/null +++ b/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_model.json @@ -0,0 +1,11712 @@ +[ + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1742, + 1404, + 1742, + 1404, + 1987, + 298, + 1987 + ], + "score": 0.978 + }, + { + "category_id": 1, + "poly": [ + 397, + 719, + 1303, + 719, + 1303, + 1115, + 397, + 1115 + ], + "score": 0.978 + }, + { + "category_id": 3, + "poly": [ + 326, + 1213, + 1375, + 1213, + 1375, + 1605, + 326, + 1605 + ], + "score": 0.969 + }, + { + "category_id": 0, + "poly": [ + 382, + 225, + 1319, + 225, + 1319, + 336, + 382, + 336 + ], + "score": 0.965 + }, + { + "category_id": 1, + "poly": [ + 1086, + 392, + 1341, + 392, + 1341, + 485, + 1086, + 485 + ], + "score": 0.951 + }, + { + "category_id": 1, + "poly": [ + 723, + 392, + 1014, + 392, + 1014, + 483, + 723, + 483 + ], + "score": 0.948 + }, + { + "category_id": 1, + "poly": [ + 358, + 392, + 649, + 392, + 649, + 484, + 358, + 484 + ], + "score": 0.948 + }, + { + "category_id": 1, + "poly": [ + 722, + 531, + 976, + 531, + 976, + 623, + 722, + 623 + ], + "score": 0.941 + }, + { + "category_id": 4, + "poly": [ + 506, + 1620, + 1191, + 1620, + 1191, + 1651, + 506, + 1651 + ], + "score": 0.926 + }, + { + "category_id": 0, + "poly": [ + 298, + 1673, + 531, + 1673, + 531, + 1711, + 298, + 1711 + ], + "score": 0.913 + }, + { + "category_id": 1, + "poly": [ + 398, + 1141, + 1131, + 1141, + 1131, + 1174, + 398, + 1174 + ], + "score": 0.885 + }, + { + "category_id": 2, + "poly": [ + 298, + 2032, + 931, + 2032, + 931, + 2061, + 298, + 2061 + ], + "score": 0.771 + }, + { + "category_id": 15, + "poly": [ + 375.0, + 221.0, + 1320.0, + 221.0, + 1320.0, + 287.0, + 375.0, + 287.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 450.0, + 277.0, + 1250.0, + 277.0, + 1250.0, + 343.0, + 450.0, + 343.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 503.0, + 1617.0, + 1193.0, + 1617.0, + 1193.0, + 1655.0, + 503.0, + 1655.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1669.0, + 535.0, + 1669.0, + 535.0, + 1717.0, + 291.0, + 1717.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 2030.0, + 934.0, + 2030.0, + 934.0, + 2066.0, + 296.0, + 2066.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1741.0, + 1406.0, + 1741.0, + 1406.0, + 1778.0, + 293.0, + 1778.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1773.0, + 1405.0, + 1773.0, + 1405.0, + 1807.0, + 294.0, + 1807.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1803.0, + 1404.0, + 1803.0, + 1404.0, + 1837.0, + 293.0, + 1837.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1835.0, + 1406.0, + 1835.0, + 1406.0, + 1868.0, + 294.0, + 1868.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1864.0, + 1406.0, + 1864.0, + 1406.0, + 1898.0, + 296.0, + 1898.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1892.0, + 1405.0, + 1892.0, + 1405.0, + 1928.0, + 293.0, + 1928.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1923.0, + 1406.0, + 1923.0, + 1406.0, + 1958.0, + 292.0, + 1958.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1952.0, + 1405.0, + 1952.0, + 1405.0, + 1993.0, + 292.0, + 1993.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 395.0, + 718.0, + 1305.0, + 718.0, + 1305.0, + 754.0, + 395.0, + 754.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 749.0, + 1308.0, + 749.0, + 1308.0, + 787.0, + 392.0, + 787.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 780.0, + 1305.0, + 780.0, + 1305.0, + 817.0, + 392.0, + 817.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 811.0, + 1304.0, + 811.0, + 1304.0, + 845.0, + 394.0, + 845.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 842.0, + 1304.0, + 842.0, + 1304.0, + 874.0, + 393.0, + 874.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 871.0, + 1306.0, + 871.0, + 1306.0, + 906.0, + 393.0, + 906.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 899.0, + 1305.0, + 899.0, + 1305.0, + 936.0, + 394.0, + 936.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 929.0, + 1306.0, + 929.0, + 1306.0, + 968.0, + 394.0, + 968.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 962.0, + 1304.0, + 962.0, + 1304.0, + 996.0, + 392.0, + 996.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 992.0, + 1305.0, + 992.0, + 1305.0, + 1028.0, + 393.0, + 1028.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 1022.0, + 1306.0, + 1022.0, + 1306.0, + 1058.0, + 394.0, + 1058.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 394.0, + 1052.0, + 1305.0, + 1052.0, + 1305.0, + 1087.0, + 394.0, + 1087.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 1084.0, + 1192.0, + 1084.0, + 1192.0, + 1120.0, + 393.0, + 1120.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1133.0, + 391.0, + 1297.0, + 391.0, + 1297.0, + 427.0, + 1133.0, + 427.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1159.0, + 421.0, + 1270.0, + 421.0, + 1270.0, + 455.0, + 1159.0, + 455.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1083.0, + 454.0, + 1344.0, + 454.0, + 1344.0, + 486.0, + 1083.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 782.0, + 391.0, + 958.0, + 391.0, + 958.0, + 426.0, + 782.0, + 426.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 722.0, + 423.0, + 1017.0, + 423.0, + 1017.0, + 454.0, + 722.0, + 454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 750.0, + 452.0, + 990.0, + 452.0, + 990.0, + 486.0, + 750.0, + 486.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 427.0, + 394.0, + 588.0, + 394.0, + 588.0, + 423.0, + 427.0, + 423.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 358.0, + 425.0, + 651.0, + 425.0, + 651.0, + 453.0, + 358.0, + 453.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 399.0, + 456.0, + 610.0, + 456.0, + 610.0, + 484.0, + 399.0, + 484.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 762.0, + 530.0, + 938.0, + 530.0, + 938.0, + 564.0, + 762.0, + 564.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 779.0, + 563.0, + 921.0, + 563.0, + 921.0, + 592.0, + 779.0, + 592.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 721.0, + 595.0, + 977.0, + 595.0, + 977.0, + 623.0, + 721.0, + 623.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 395.0, + 1139.0, + 1133.0, + 1139.0, + 1133.0, + 1178.0, + 395.0, + 1178.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 0, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1097, + 1404, + 1097, + 1404, + 1491, + 298, + 1491 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 298, + 657, + 1404, + 657, + 1404, + 1083, + 298, + 1083 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 1641, + 1404, + 1641, + 1404, + 2006, + 298, + 2006 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 340, + 1403, + 340, + 1403, + 644, + 298, + 644 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 299, + 203, + 1402, + 203, + 1402, + 324, + 299, + 324 + ], + "score": 0.973 + }, + { + "category_id": 0, + "poly": [ + 297, + 1557, + 966, + 1557, + 966, + 1597, + 297, + 1597 + ], + "score": 0.931 + }, + { + "category_id": 2, + "poly": [ + 841, + 2062, + 858, + 2062, + 858, + 2084, + 841, + 2084 + ], + "score": 0.723 + }, + { + "category_id": 2, + "poly": [ + 841, + 2062, + 859, + 2062, + 859, + 2084, + 841, + 2084 + ], + "score": 0.111 + }, + { + "category_id": 15, + "poly": [ + 289.0, + 1553.0, + 971.0, + 1553.0, + 971.0, + 1607.0, + 289.0, + 1607.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2059.0, + 862.0, + 2059.0, + 862.0, + 2093.0, + 839.0, + 2093.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2059.0, + 862.0, + 2059.0, + 862.0, + 2093.0, + 838.0, + 2093.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1096.0, + 1404.0, + 1096.0, + 1404.0, + 1133.0, + 293.0, + 1133.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1125.0, + 1406.0, + 1125.0, + 1406.0, + 1164.0, + 293.0, + 1164.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1156.0, + 1406.0, + 1156.0, + 1406.0, + 1196.0, + 293.0, + 1196.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1186.0, + 1404.0, + 1186.0, + 1404.0, + 1225.0, + 293.0, + 1225.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1218.0, + 1405.0, + 1218.0, + 1405.0, + 1254.0, + 294.0, + 1254.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1249.0, + 1404.0, + 1249.0, + 1404.0, + 1285.0, + 293.0, + 1285.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1276.0, + 1408.0, + 1276.0, + 1408.0, + 1317.0, + 292.0, + 1317.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1309.0, + 1405.0, + 1309.0, + 1405.0, + 1345.0, + 294.0, + 1345.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1340.0, + 1405.0, + 1340.0, + 1405.0, + 1376.0, + 293.0, + 1376.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1370.0, + 1404.0, + 1370.0, + 1404.0, + 1405.0, + 295.0, + 1405.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1401.0, + 1404.0, + 1401.0, + 1404.0, + 1437.0, + 294.0, + 1437.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1429.0, + 1405.0, + 1429.0, + 1405.0, + 1466.0, + 293.0, + 1466.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1463.0, + 460.0, + 1463.0, + 460.0, + 1495.0, + 294.0, + 1495.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 658.0, + 1405.0, + 658.0, + 1405.0, + 690.0, + 293.0, + 690.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 688.0, + 1404.0, + 688.0, + 1404.0, + 723.0, + 292.0, + 723.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 718.0, + 1406.0, + 718.0, + 1406.0, + 754.0, + 296.0, + 754.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 749.0, + 1404.0, + 749.0, + 1404.0, + 784.0, + 294.0, + 784.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 778.0, + 1405.0, + 778.0, + 1405.0, + 816.0, + 293.0, + 816.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 809.0, + 1404.0, + 809.0, + 1404.0, + 845.0, + 294.0, + 845.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 837.0, + 1407.0, + 837.0, + 1407.0, + 875.0, + 291.0, + 875.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 869.0, + 1406.0, + 869.0, + 1406.0, + 904.0, + 294.0, + 904.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 900.0, + 1406.0, + 900.0, + 1406.0, + 936.0, + 294.0, + 936.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 931.0, + 1406.0, + 931.0, + 1406.0, + 966.0, + 294.0, + 966.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 960.0, + 1406.0, + 960.0, + 1406.0, + 996.0, + 292.0, + 996.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 990.0, + 1402.0, + 990.0, + 1402.0, + 1026.0, + 294.0, + 1026.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1017.0, + 1406.0, + 1017.0, + 1406.0, + 1061.0, + 292.0, + 1061.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1051.0, + 1178.0, + 1051.0, + 1178.0, + 1086.0, + 293.0, + 1086.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1643.0, + 1405.0, + 1643.0, + 1405.0, + 1677.0, + 296.0, + 1677.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1672.0, + 1408.0, + 1672.0, + 1408.0, + 1710.0, + 293.0, + 1710.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1702.0, + 1405.0, + 1702.0, + 1405.0, + 1736.0, + 293.0, + 1736.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1733.0, + 1408.0, + 1733.0, + 1408.0, + 1770.0, + 293.0, + 1770.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1763.0, + 1405.0, + 1763.0, + 1405.0, + 1799.0, + 292.0, + 1799.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1792.0, + 1406.0, + 1792.0, + 1406.0, + 1827.0, + 293.0, + 1827.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1822.0, + 1406.0, + 1822.0, + 1406.0, + 1857.0, + 293.0, + 1857.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1855.0, + 1405.0, + 1855.0, + 1405.0, + 1889.0, + 294.0, + 1889.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1885.0, + 1404.0, + 1885.0, + 1404.0, + 1919.0, + 294.0, + 1919.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1913.0, + 1404.0, + 1913.0, + 1404.0, + 1951.0, + 293.0, + 1951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1945.0, + 1404.0, + 1945.0, + 1404.0, + 1981.0, + 292.0, + 1981.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1975.0, + 1404.0, + 1975.0, + 1404.0, + 2011.0, + 293.0, + 2011.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 339.0, + 1405.0, + 339.0, + 1405.0, + 375.0, + 294.0, + 375.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 368.0, + 1407.0, + 368.0, + 1407.0, + 407.0, + 293.0, + 407.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 400.0, + 1407.0, + 400.0, + 1407.0, + 438.0, + 291.0, + 438.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 431.0, + 1405.0, + 431.0, + 1405.0, + 467.0, + 293.0, + 467.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 459.0, + 1405.0, + 459.0, + 1405.0, + 496.0, + 293.0, + 496.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 491.0, + 1404.0, + 491.0, + 1404.0, + 526.0, + 294.0, + 526.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 521.0, + 1405.0, + 521.0, + 1405.0, + 556.0, + 294.0, + 556.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 551.0, + 1405.0, + 551.0, + 1405.0, + 587.0, + 294.0, + 587.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 584.0, + 1404.0, + 584.0, + 1404.0, + 616.0, + 296.0, + 616.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 612.0, + 1241.0, + 612.0, + 1241.0, + 646.0, + 293.0, + 646.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 202.0, + 1406.0, + 202.0, + 1406.0, + 238.0, + 294.0, + 238.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 234.0, + 1406.0, + 234.0, + 1406.0, + 267.0, + 293.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 265.0, + 1406.0, + 265.0, + 1406.0, + 298.0, + 293.0, + 298.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 296.0, + 1295.0, + 296.0, + 1295.0, + 328.0, + 295.0, + 328.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 1, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 962, + 1405, + 962, + 1405, + 1571, + 297, + 1571 + ], + "score": 0.984 + }, + { + "category_id": 1, + "poly": [ + 298, + 358, + 1404, + 358, + 1404, + 663, + 298, + 663 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 298, + 755, + 1403, + 755, + 1403, + 878, + 298, + 878 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 299, + 1823, + 1402, + 1823, + 1402, + 2007, + 299, + 2007 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 299, + 1585, + 1402, + 1585, + 1402, + 1736, + 299, + 1736 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 296, + 203, + 1402, + 203, + 1402, + 265, + 296, + 265 + ], + "score": 0.951 + }, + { + "category_id": 0, + "poly": [ + 299, + 1770, + 567, + 1770, + 567, + 1804, + 299, + 1804 + ], + "score": 0.92 + }, + { + "category_id": 0, + "poly": [ + 299, + 698, + 552, + 698, + 552, + 731, + 299, + 731 + ], + "score": 0.914 + }, + { + "category_id": 0, + "poly": [ + 299, + 301, + 645, + 301, + 645, + 334, + 299, + 334 + ], + "score": 0.913 + }, + { + "category_id": 0, + "poly": [ + 301, + 910, + 760, + 910, + 760, + 943, + 301, + 943 + ], + "score": 0.901 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 858, + 2061, + 858, + 2084, + 841, + 2084 + ], + "score": 0.619 + }, + { + "category_id": 2, + "poly": [ + 842, + 2061, + 859, + 2061, + 859, + 2084, + 842, + 2084 + ], + "score": 0.363 + }, + { + "category_id": 13, + "poly": [ + 827, + 1241, + 899, + 1241, + 899, + 1271, + 827, + 1271 + ], + "score": 0.91, + "latex": "n _ { s t e p s }" + }, + { + "category_id": 13, + "poly": [ + 358, + 1117, + 577, + 1117, + 577, + 1148, + 358, + 1148 + ], + "score": 0.9, + "latex": "B = n _ { r o b o t s } n _ { s t e p s }" + }, + { + "category_id": 13, + "poly": [ + 668, + 1120, + 740, + 1120, + 740, + 1149, + 668, + 1149 + ], + "score": 0.9, + "latex": " { n _ { s t e p s } }" + }, + { + "category_id": 13, + "poly": [ + 1018, + 1422, + 1090, + 1422, + 1090, + 1452, + 1018, + 1452 + ], + "score": 0.88, + "latex": " { n _ { s t e p s } }" + }, + { + "category_id": 13, + "poly": [ + 1134, + 1150, + 1217, + 1150, + 1217, + 1178, + 1134, + 1178 + ], + "score": 0.88, + "latex": "n _ { r o b o t s }" + }, + { + "category_id": 13, + "poly": [ + 345, + 1151, + 428, + 1151, + 428, + 1178, + 345, + 1178 + ], + "score": 0.88, + "latex": "n _ { r o b o t s }" + }, + { + "category_id": 13, + "poly": [ + 748, + 1180, + 821, + 1180, + 821, + 1210, + 748, + 1210 + ], + "score": 0.87, + "latex": "n _ { s t e p s }" + }, + { + "category_id": 13, + "poly": [ + 920, + 1177, + 947, + 1177, + 947, + 1205, + 920, + 1205 + ], + "score": 0.75, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 968, + 1450, + 1016, + 1450, + 1016, + 1478, + 968, + 1478 + ], + "score": 0.59, + "latex": "2 0 \\mathrm { s }" + }, + { + "category_id": 13, + "poly": [ + 786, + 997, + 808, + 997, + 808, + 1022, + 786, + 1022 + ], + "score": 0.54, + "latex": "B" + }, + { + "category_id": 13, + "poly": [ + 433, + 1419, + 489, + 1419, + 489, + 1448, + 433, + 1448 + ], + "score": 0.43, + "latex": "0 . 5 \\mathrm { s }" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1762.0, + 572.0, + 1762.0, + 572.0, + 1813.0, + 293.0, + 1813.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 696.0, + 555.0, + 696.0, + 555.0, + 738.0, + 293.0, + 738.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 296.0, + 647.0, + 296.0, + 647.0, + 340.0, + 292.0, + 340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 908.0, + 766.0, + 908.0, + 766.0, + 947.0, + 293.0, + 947.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2058.0, + 861.0, + 2058.0, + 861.0, + 2091.0, + 838.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2059.0, + 860.0, + 2059.0, + 860.0, + 2090.0, + 839.0, + 2090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 964.0, + 1403.0, + 964.0, + 1403.0, + 998.0, + 294.0, + 998.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 992.0, + 785.0, + 992.0, + 785.0, + 1030.0, + 292.0, + 1030.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 809.0, + 992.0, + 1402.0, + 992.0, + 1402.0, + 1030.0, + 809.0, + 1030.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1024.0, + 1405.0, + 1024.0, + 1405.0, + 1058.0, + 295.0, + 1058.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1056.0, + 1405.0, + 1056.0, + 1405.0, + 1090.0, + 294.0, + 1090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1086.0, + 1405.0, + 1086.0, + 1405.0, + 1120.0, + 295.0, + 1120.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1114.0, + 357.0, + 1114.0, + 357.0, + 1156.0, + 291.0, + 1156.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 578.0, + 1114.0, + 667.0, + 1114.0, + 667.0, + 1156.0, + 578.0, + 1156.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 741.0, + 1114.0, + 1407.0, + 1114.0, + 1407.0, + 1156.0, + 741.0, + 1156.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1145.0, + 344.0, + 1145.0, + 344.0, + 1183.0, + 294.0, + 1183.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 429.0, + 1145.0, + 1133.0, + 1145.0, + 1133.0, + 1183.0, + 429.0, + 1183.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1218.0, + 1145.0, + 1406.0, + 1145.0, + 1406.0, + 1183.0, + 1218.0, + 1183.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1174.0, + 747.0, + 1174.0, + 747.0, + 1216.0, + 292.0, + 1216.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 822.0, + 1174.0, + 919.0, + 1174.0, + 919.0, + 1216.0, + 822.0, + 1216.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 948.0, + 1174.0, + 1407.0, + 1174.0, + 1407.0, + 1216.0, + 948.0, + 1216.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1202.0, + 1406.0, + 1202.0, + 1406.0, + 1244.0, + 292.0, + 1244.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1233.0, + 826.0, + 1233.0, + 826.0, + 1278.0, + 291.0, + 1278.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 900.0, + 1233.0, + 1407.0, + 1233.0, + 1407.0, + 1278.0, + 900.0, + 1278.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1268.0, + 1403.0, + 1268.0, + 1403.0, + 1303.0, + 294.0, + 1303.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1297.0, + 1405.0, + 1297.0, + 1405.0, + 1332.0, + 295.0, + 1332.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1324.0, + 1405.0, + 1324.0, + 1405.0, + 1365.0, + 291.0, + 1365.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1356.0, + 1406.0, + 1356.0, + 1406.0, + 1393.0, + 292.0, + 1393.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1387.0, + 1405.0, + 1387.0, + 1405.0, + 1424.0, + 292.0, + 1424.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1418.0, + 432.0, + 1418.0, + 432.0, + 1456.0, + 294.0, + 1456.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 490.0, + 1418.0, + 1017.0, + 1418.0, + 1017.0, + 1456.0, + 490.0, + 1456.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1091.0, + 1418.0, + 1406.0, + 1418.0, + 1406.0, + 1456.0, + 1091.0, + 1456.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1449.0, + 967.0, + 1449.0, + 967.0, + 1483.0, + 295.0, + 1483.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1017.0, + 1449.0, + 1406.0, + 1449.0, + 1406.0, + 1483.0, + 1017.0, + 1483.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1479.0, + 1405.0, + 1479.0, + 1405.0, + 1514.0, + 295.0, + 1514.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1509.0, + 1405.0, + 1509.0, + 1405.0, + 1547.0, + 294.0, + 1547.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1541.0, + 1087.0, + 1541.0, + 1087.0, + 1573.0, + 295.0, + 1573.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 356.0, + 1405.0, + 356.0, + 1405.0, + 393.0, + 295.0, + 393.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 388.0, + 1405.0, + 388.0, + 1405.0, + 422.0, + 295.0, + 422.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 417.0, + 1406.0, + 417.0, + 1406.0, + 454.0, + 293.0, + 454.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 450.0, + 1405.0, + 450.0, + 1405.0, + 484.0, + 293.0, + 484.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 476.0, + 1408.0, + 476.0, + 1408.0, + 517.0, + 292.0, + 517.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 505.0, + 1408.0, + 505.0, + 1408.0, + 549.0, + 291.0, + 549.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 542.0, + 1405.0, + 542.0, + 1405.0, + 574.0, + 296.0, + 574.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 569.0, + 1406.0, + 569.0, + 1406.0, + 605.0, + 293.0, + 605.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 599.0, + 1406.0, + 599.0, + 1406.0, + 636.0, + 293.0, + 636.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 632.0, + 1139.0, + 632.0, + 1139.0, + 667.0, + 293.0, + 667.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 753.0, + 1404.0, + 753.0, + 1404.0, + 791.0, + 293.0, + 791.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 784.0, + 1407.0, + 784.0, + 1407.0, + 821.0, + 292.0, + 821.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 818.0, + 1404.0, + 818.0, + 1404.0, + 850.0, + 294.0, + 850.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 848.0, + 1204.0, + 848.0, + 1204.0, + 880.0, + 294.0, + 880.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1822.0, + 1404.0, + 1822.0, + 1404.0, + 1858.0, + 295.0, + 1858.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1854.0, + 1406.0, + 1854.0, + 1406.0, + 1890.0, + 294.0, + 1890.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1883.0, + 1406.0, + 1883.0, + 1406.0, + 1920.0, + 292.0, + 1920.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1912.0, + 1407.0, + 1912.0, + 1407.0, + 1951.0, + 293.0, + 1951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1944.0, + 1407.0, + 1944.0, + 1407.0, + 1981.0, + 293.0, + 1981.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1976.0, + 1404.0, + 1976.0, + 1404.0, + 2010.0, + 293.0, + 2010.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 1586.0, + 1403.0, + 1586.0, + 1403.0, + 1619.0, + 298.0, + 1619.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1615.0, + 1406.0, + 1615.0, + 1406.0, + 1649.0, + 294.0, + 1649.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1646.0, + 1406.0, + 1646.0, + 1406.0, + 1680.0, + 293.0, + 1680.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1672.0, + 1407.0, + 1672.0, + 1407.0, + 1715.0, + 292.0, + 1715.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1703.0, + 361.0, + 1703.0, + 361.0, + 1741.0, + 294.0, + 1741.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 197.0, + 1406.0, + 197.0, + 1406.0, + 242.0, + 294.0, + 242.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 234.0, + 872.0, + 234.0, + 872.0, + 269.0, + 296.0, + 269.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 2, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 545, + 1405, + 545, + 1405, + 880, + 298, + 880 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 1443, + 1404, + 1443, + 1404, + 1926, + 298, + 1926 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 995, + 1403, + 995, + 1403, + 1148, + 298, + 1148 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 298, + 1244, + 1404, + 1244, + 1404, + 1425, + 298, + 1425 + ], + "score": 0.98 + }, + { + "category_id": 4, + "poly": [ + 298, + 431, + 1404, + 431, + 1404, + 522, + 298, + 522 + ], + "score": 0.96 + }, + { + "category_id": 3, + "poly": [ + 303, + 224, + 1397, + 224, + 1397, + 413, + 303, + 413 + ], + "score": 0.957 + }, + { + "category_id": 2, + "poly": [ + 297, + 1949, + 1403, + 1949, + 1403, + 2007, + 297, + 2007 + ], + "score": 0.938 + }, + { + "category_id": 0, + "poly": [ + 299, + 925, + 590, + 925, + 590, + 963, + 299, + 963 + ], + "score": 0.917 + }, + { + "category_id": 0, + "poly": [ + 300, + 1186, + 690, + 1186, + 690, + 1219, + 300, + 1219 + ], + "score": 0.905 + }, + { + "category_id": 2, + "poly": [ + 841, + 2062, + 858, + 2062, + 858, + 2084, + 841, + 2084 + ], + "score": 0.784 + }, + { + "category_id": 13, + "poly": [ + 1159, + 491, + 1246, + 491, + 1246, + 521, + 1159, + 521 + ], + "score": 0.89, + "latex": "\\pm 0 . 2 \\mathrm { m }" + }, + { + "category_id": 13, + "poly": [ + 1334, + 461, + 1402, + 461, + 1402, + 490, + 1334, + 490 + ], + "score": 0.74, + "latex": "0 . 3 \\mathrm { m }" + }, + { + "category_id": 13, + "poly": [ + 442, + 462, + 508, + 462, + 508, + 489, + 442, + 489 + ], + "score": 0.67, + "latex": "0 . 1 \\mathrm { m }" + }, + { + "category_id": 13, + "poly": [ + 450, + 492, + 517, + 492, + 517, + 520, + 450, + 520 + ], + "score": 0.61, + "latex": "\\mathrm { 0 . 2 m }" + }, + { + "category_id": 13, + "poly": [ + 1236, + 1594, + 1307, + 1594, + 1307, + 1623, + 1236, + 1623 + ], + "score": 0.5, + "latex": "2 0 \\mathrm { c m }" + }, + { + "category_id": 13, + "poly": [ + 578, + 666, + 637, + 666, + 637, + 699, + 578, + 699 + ], + "score": 0.44, + "latex": "[ 2 6 ] ^ { 1 }" + }, + { + "category_id": 13, + "poly": [ + 809, + 1307, + 849, + 1307, + 849, + 1335, + 809, + 1335 + ], + "score": 0.34, + "latex": "8 \\mathrm { m }" + }, + { + "category_id": 13, + "poly": [ + 1145, + 1595, + 1204, + 1595, + 1204, + 1623, + 1145, + 1623 + ], + "score": 0.28, + "latex": "5 \\mathrm { c m }" + }, + { + "category_id": 13, + "poly": [ + 964, + 461, + 1042, + 461, + 1042, + 492, + 964, + 492 + ], + "score": 0.27, + "latex": "2 5 \\mathrm { d e g }" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 429.0, + 1404.0, + 429.0, + 1404.0, + 466.0, + 294.0, + 466.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 461.0, + 441.0, + 461.0, + 441.0, + 494.0, + 296.0, + 494.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 509.0, + 461.0, + 963.0, + 461.0, + 963.0, + 494.0, + 509.0, + 494.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1043.0, + 461.0, + 1333.0, + 461.0, + 1333.0, + 494.0, + 1043.0, + 494.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 491.0, + 449.0, + 491.0, + 449.0, + 524.0, + 296.0, + 524.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 518.0, + 491.0, + 1158.0, + 491.0, + 1158.0, + 524.0, + 518.0, + 524.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1247.0, + 491.0, + 1257.0, + 491.0, + 1257.0, + 524.0, + 1247.0, + 524.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 417.0, + 382.0, + 457.0, + 382.0, + 457.0, + 417.0, + 417.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 691.0, + 378.0, + 734.0, + 378.0, + 734.0, + 417.0, + 691.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 967.0, + 382.0, + 1008.0, + 382.0, + 1008.0, + 417.0, + 967.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1242.0, + 380.0, + 1284.0, + 380.0, + 1284.0, + 417.0, + 1242.0, + 417.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 330.0, + 1942.0, + 1408.0, + 1942.0, + 1408.0, + 1984.0, + 330.0, + 1984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1976.0, + 1229.0, + 1976.0, + 1229.0, + 2011.0, + 292.0, + 2011.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 289.0, + 920.0, + 594.0, + 920.0, + 594.0, + 971.0, + 289.0, + 971.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1182.0, + 693.0, + 1182.0, + 693.0, + 1228.0, + 292.0, + 1228.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 838.0, + 2059.0, + 862.0, + 2059.0, + 862.0, + 2091.0, + 838.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 547.0, + 1403.0, + 547.0, + 1403.0, + 578.0, + 296.0, + 578.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 575.0, + 1405.0, + 575.0, + 1405.0, + 611.0, + 292.0, + 611.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 605.0, + 1405.0, + 605.0, + 1405.0, + 642.0, + 293.0, + 642.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 636.0, + 1406.0, + 636.0, + 1406.0, + 673.0, + 292.0, + 673.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 664.0, + 577.0, + 664.0, + 577.0, + 704.0, + 292.0, + 704.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 638.0, + 664.0, + 1406.0, + 664.0, + 1406.0, + 704.0, + 638.0, + 704.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 700.0, + 1403.0, + 700.0, + 1403.0, + 731.0, + 296.0, + 731.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 727.0, + 1405.0, + 727.0, + 1405.0, + 762.0, + 295.0, + 762.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 758.0, + 1405.0, + 758.0, + 1405.0, + 793.0, + 295.0, + 793.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 790.0, + 1403.0, + 790.0, + 1403.0, + 821.0, + 296.0, + 821.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 820.0, + 1405.0, + 820.0, + 1405.0, + 852.0, + 293.0, + 852.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 850.0, + 356.0, + 850.0, + 356.0, + 883.0, + 292.0, + 883.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1442.0, + 1404.0, + 1442.0, + 1404.0, + 1475.0, + 296.0, + 1475.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1470.0, + 1404.0, + 1470.0, + 1404.0, + 1506.0, + 293.0, + 1506.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1501.0, + 1406.0, + 1501.0, + 1406.0, + 1540.0, + 291.0, + 1540.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1532.0, + 1402.0, + 1532.0, + 1402.0, + 1569.0, + 294.0, + 1569.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1562.0, + 1406.0, + 1562.0, + 1406.0, + 1599.0, + 293.0, + 1599.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1592.0, + 1144.0, + 1592.0, + 1144.0, + 1629.0, + 293.0, + 1629.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1205.0, + 1592.0, + 1235.0, + 1592.0, + 1235.0, + 1629.0, + 1205.0, + 1629.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1308.0, + 1592.0, + 1406.0, + 1592.0, + 1406.0, + 1629.0, + 1308.0, + 1629.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1623.0, + 1406.0, + 1623.0, + 1406.0, + 1660.0, + 293.0, + 1660.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1651.0, + 1406.0, + 1651.0, + 1406.0, + 1691.0, + 292.0, + 1691.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1683.0, + 1405.0, + 1683.0, + 1405.0, + 1720.0, + 293.0, + 1720.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1716.0, + 1405.0, + 1716.0, + 1405.0, + 1748.0, + 294.0, + 1748.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1744.0, + 1406.0, + 1744.0, + 1406.0, + 1781.0, + 293.0, + 1781.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1774.0, + 1406.0, + 1774.0, + 1406.0, + 1811.0, + 293.0, + 1811.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1807.0, + 1405.0, + 1807.0, + 1405.0, + 1839.0, + 296.0, + 1839.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1835.0, + 1406.0, + 1835.0, + 1406.0, + 1872.0, + 293.0, + 1872.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1863.0, + 1403.0, + 1863.0, + 1403.0, + 1902.0, + 292.0, + 1902.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1897.0, + 577.0, + 1897.0, + 577.0, + 1929.0, + 294.0, + 1929.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 995.0, + 1404.0, + 995.0, + 1404.0, + 1031.0, + 294.0, + 1031.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1025.0, + 1403.0, + 1025.0, + 1403.0, + 1058.0, + 294.0, + 1058.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1057.0, + 1404.0, + 1057.0, + 1404.0, + 1090.0, + 293.0, + 1090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1086.0, + 1402.0, + 1086.0, + 1402.0, + 1122.0, + 294.0, + 1122.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1116.0, + 1343.0, + 1116.0, + 1343.0, + 1153.0, + 294.0, + 1153.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1245.0, + 1405.0, + 1245.0, + 1405.0, + 1277.0, + 296.0, + 1277.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1273.0, + 1406.0, + 1273.0, + 1406.0, + 1311.0, + 292.0, + 1311.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1304.0, + 808.0, + 1304.0, + 808.0, + 1340.0, + 293.0, + 1340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 850.0, + 1304.0, + 1404.0, + 1304.0, + 1404.0, + 1340.0, + 850.0, + 1340.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1336.0, + 1404.0, + 1336.0, + 1404.0, + 1370.0, + 293.0, + 1370.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1363.0, + 1407.0, + 1363.0, + 1407.0, + 1402.0, + 292.0, + 1402.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1397.0, + 702.0, + 1397.0, + 702.0, + 1429.0, + 296.0, + 1429.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 3, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 299, + 1503, + 1402, + 1503, + 1402, + 1746, + 299, + 1746 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 1031, + 1404, + 1031, + 1404, + 1245, + 298, + 1245 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 299, + 1336, + 1403, + 1336, + 1403, + 1489, + 299, + 1489 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 299, + 833, + 1402, + 833, + 1402, + 1018, + 299, + 1018 + ], + "score": 0.977 + }, + { + "category_id": 3, + "poly": [ + 299, + 198, + 1400, + 198, + 1400, + 706, + 299, + 706 + ], + "score": 0.975 + }, + { + "category_id": 1, + "poly": [ + 299, + 1762, + 1404, + 1762, + 1404, + 1852, + 299, + 1852 + ], + "score": 0.973 + }, + { + "category_id": 4, + "poly": [ + 299, + 721, + 1401, + 721, + 1401, + 814, + 299, + 814 + ], + "score": 0.956 + }, + { + "category_id": 1, + "poly": [ + 298, + 1945, + 1398, + 1945, + 1398, + 2007, + 298, + 2007 + ], + "score": 0.949 + }, + { + "category_id": 0, + "poly": [ + 303, + 1281, + 791, + 1281, + 791, + 1313, + 303, + 1313 + ], + "score": 0.921 + }, + { + "category_id": 0, + "poly": [ + 299, + 1888, + 627, + 1888, + 627, + 1921, + 299, + 1921 + ], + "score": 0.919 + }, + { + "category_id": 2, + "poly": [ + 841, + 2062, + 858, + 2062, + 858, + 2085, + 841, + 2085 + ], + "score": 0.724 + }, + { + "category_id": 15, + "poly": [ + 472.0, + 389.0, + 483.0, + 389.0, + 483.0, + 399.0, + 472.0, + 399.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 945.0, + 387.0, + 962.0, + 387.0, + 962.0, + 396.0, + 945.0, + 396.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 747.0, + 422.0, + 757.0, + 422.0, + 757.0, + 432.0, + 747.0, + 432.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1299.0, + 512.0, + 1312.0, + 512.0, + 1312.0, + 522.0, + 1299.0, + 522.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1255.0, + 565.0, + 1266.0, + 565.0, + 1266.0, + 577.0, + 1255.0, + 577.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1391.0, + 565.0, + 1400.0, + 565.0, + 1400.0, + 577.0, + 1391.0, + 577.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1372.0, + 570.0, + 1389.0, + 570.0, + 1389.0, + 586.0, + 1372.0, + 586.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1312.0, + 571.0, + 1329.0, + 571.0, + 1329.0, + 586.0, + 1312.0, + 586.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1357.0, + 577.0, + 1367.0, + 577.0, + 1367.0, + 586.0, + 1357.0, + 586.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1344.0, + 579.0, + 1359.0, + 579.0, + 1359.0, + 594.0, + 1344.0, + 594.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 412.0, + 593.0, + 422.0, + 593.0, + 422.0, + 601.0, + 412.0, + 601.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1326.0, + 586.0, + 1342.0, + 586.0, + 1342.0, + 603.0, + 1326.0, + 603.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1217.0, + 603.0, + 1230.0, + 603.0, + 1230.0, + 612.0, + 1217.0, + 612.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 609.0, + 333.0, + 609.0, + 333.0, + 618.0, + 322.0, + 618.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 388.0, + 626.0, + 398.0, + 626.0, + 398.0, + 634.0, + 388.0, + 634.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 977.0, + 626.0, + 990.0, + 626.0, + 990.0, + 634.0, + 977.0, + 634.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 646.0, + 330.0, + 646.0, + 330.0, + 656.0, + 320.0, + 656.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 438.0, + 660.0, + 452.0, + 660.0, + 452.0, + 670.0, + 438.0, + 670.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 477.0, + 652.0, + 487.0, + 652.0, + 487.0, + 661.0, + 477.0, + 661.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 537.0, + 641.0, + 557.0, + 641.0, + 557.0, + 661.0, + 537.0, + 661.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 572.0, + 642.0, + 587.0, + 642.0, + 587.0, + 661.0, + 572.0, + 661.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 702.0, + 657.0, + 712.0, + 657.0, + 712.0, + 667.0, + 702.0, + 667.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 727.0, + 631.0, + 737.0, + 631.0, + 737.0, + 642.0, + 727.0, + 642.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 912.0, + 646.0, + 922.0, + 646.0, + 922.0, + 657.0, + 912.0, + 657.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 928.0, + 646.0, + 943.0, + 646.0, + 943.0, + 656.0, + 928.0, + 656.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 986.0, + 633.0, + 1011.0, + 633.0, + 1011.0, + 669.0, + 986.0, + 669.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1043.0, + 656.0, + 1060.0, + 656.0, + 1060.0, + 671.0, + 1043.0, + 671.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1112.0, + 649.0, + 1155.0, + 649.0, + 1155.0, + 674.0, + 1112.0, + 674.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1200.0, + 643.0, + 1212.0, + 643.0, + 1212.0, + 655.0, + 1200.0, + 655.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 643.0, + 671.0, + 659.0, + 671.0, + 659.0, + 686.0, + 643.0, + 686.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1180.0, + 670.0, + 1192.0, + 670.0, + 1192.0, + 680.0, + 1180.0, + 680.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 449.0, + 683.0, + 463.0, + 683.0, + 463.0, + 693.0, + 449.0, + 693.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1346.0, + 551.0, + 1370.0, + 551.0, + 1370.0, + 565.0, + 1346.0, + 565.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1320.0, + 566.5, + 1351.0, + 566.5, + 1351.0, + 582.0, + 1320.0, + 582.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 652.0, + 653.0, + 656.0, + 653.0, + 656.0, + 663.0, + 652.0, + 663.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1192.0, + 656.0, + 1214.0, + 656.0, + 1214.0, + 673.5, + 1192.0, + 673.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 453.75, + 660.5, + 475.75, + 660.5, + 475.75, + 675.5, + 453.75, + 675.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 719.0, + 1405.0, + 719.0, + 1405.0, + 758.0, + 294.0, + 758.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 752.0, + 1406.0, + 752.0, + 1406.0, + 786.0, + 295.0, + 786.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 783.0, + 878.0, + 783.0, + 878.0, + 817.0, + 297.0, + 817.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1278.0, + 796.0, + 1278.0, + 796.0, + 1318.0, + 296.0, + 1318.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1886.0, + 631.0, + 1886.0, + 631.0, + 1925.0, + 293.0, + 1925.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2060.0, + 860.0, + 2060.0, + 860.0, + 2091.0, + 839.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1502.0, + 1404.0, + 1502.0, + 1404.0, + 1538.0, + 294.0, + 1538.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1533.0, + 1406.0, + 1533.0, + 1406.0, + 1567.0, + 293.0, + 1567.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1564.0, + 1404.0, + 1564.0, + 1404.0, + 1598.0, + 294.0, + 1598.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1595.0, + 1406.0, + 1595.0, + 1406.0, + 1627.0, + 293.0, + 1627.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1627.0, + 1406.0, + 1627.0, + 1406.0, + 1657.0, + 297.0, + 1657.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1653.0, + 1407.0, + 1653.0, + 1407.0, + 1691.0, + 293.0, + 1691.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1686.0, + 1406.0, + 1686.0, + 1406.0, + 1720.0, + 294.0, + 1720.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1716.0, + 798.0, + 1716.0, + 798.0, + 1748.0, + 294.0, + 1748.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1029.0, + 1405.0, + 1029.0, + 1405.0, + 1068.0, + 292.0, + 1068.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1061.0, + 1404.0, + 1061.0, + 1404.0, + 1096.0, + 295.0, + 1096.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1090.0, + 1404.0, + 1090.0, + 1404.0, + 1128.0, + 293.0, + 1128.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1122.0, + 1404.0, + 1122.0, + 1404.0, + 1158.0, + 293.0, + 1158.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1153.0, + 1404.0, + 1153.0, + 1404.0, + 1187.0, + 294.0, + 1187.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1182.0, + 1405.0, + 1182.0, + 1405.0, + 1221.0, + 292.0, + 1221.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1215.0, + 598.0, + 1215.0, + 598.0, + 1250.0, + 294.0, + 1250.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1333.0, + 1404.0, + 1333.0, + 1404.0, + 1372.0, + 295.0, + 1372.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1368.0, + 1404.0, + 1368.0, + 1404.0, + 1401.0, + 297.0, + 1401.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1393.0, + 1407.0, + 1393.0, + 1407.0, + 1436.0, + 292.0, + 1436.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1426.0, + 1405.0, + 1426.0, + 1405.0, + 1461.0, + 293.0, + 1461.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1457.0, + 1173.0, + 1457.0, + 1173.0, + 1493.0, + 294.0, + 1493.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 835.0, + 1404.0, + 835.0, + 1404.0, + 867.0, + 297.0, + 867.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 864.0, + 1406.0, + 864.0, + 1406.0, + 900.0, + 294.0, + 900.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 894.0, + 1407.0, + 894.0, + 1407.0, + 932.0, + 292.0, + 932.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 927.0, + 1404.0, + 927.0, + 1404.0, + 959.0, + 295.0, + 959.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 956.0, + 1404.0, + 956.0, + 1404.0, + 992.0, + 294.0, + 992.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 988.0, + 814.0, + 988.0, + 814.0, + 1020.0, + 297.0, + 1020.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1760.0, + 1406.0, + 1760.0, + 1406.0, + 1796.0, + 295.0, + 1796.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1793.0, + 1402.0, + 1793.0, + 1402.0, + 1824.0, + 294.0, + 1824.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1820.0, + 816.0, + 1820.0, + 816.0, + 1858.0, + 294.0, + 1858.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1944.0, + 1404.0, + 1944.0, + 1404.0, + 1980.0, + 295.0, + 1980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1973.0, + 1404.0, + 1973.0, + 1404.0, + 2011.0, + 294.0, + 2011.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 4, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 297, + 339, + 1404, + 339, + 1404, + 614, + 297, + 614 + ], + "score": 0.98 + }, + { + "category_id": 1, + "poly": [ + 297, + 1058, + 1404, + 1058, + 1404, + 1270, + 297, + 1270 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 299, + 784, + 1404, + 784, + 1404, + 938, + 299, + 938 + ], + "score": 0.977 + }, + { + "category_id": 1, + "poly": [ + 297, + 1284, + 1403, + 1284, + 1403, + 1468, + 297, + 1468 + ], + "score": 0.976 + }, + { + "category_id": 1, + "poly": [ + 298, + 203, + 1403, + 203, + 1403, + 325, + 298, + 325 + ], + "score": 0.973 + }, + { + "category_id": 3, + "poly": [ + 303, + 1520, + 1394, + 1520, + 1394, + 1837, + 303, + 1837 + ], + "score": 0.972 + }, + { + "category_id": 1, + "poly": [ + 300, + 951, + 1403, + 951, + 1403, + 1043, + 300, + 1043 + ], + "score": 0.971 + }, + { + "category_id": 4, + "poly": [ + 296, + 1854, + 1406, + 1854, + 1406, + 2038, + 296, + 2038 + ], + "score": 0.966 + }, + { + "category_id": 0, + "poly": [ + 300, + 727, + 716, + 727, + 716, + 760, + 300, + 760 + ], + "score": 0.915 + }, + { + "category_id": 0, + "poly": [ + 298, + 658, + 454, + 658, + 454, + 695, + 298, + 695 + ], + "score": 0.904 + }, + { + "category_id": 2, + "poly": [ + 840, + 2062, + 859, + 2062, + 859, + 2085, + 840, + 2085 + ], + "score": 0.709 + }, + { + "category_id": 2, + "poly": [ + 840, + 2062, + 859, + 2062, + 859, + 2086, + 840, + 2086 + ], + "score": 0.117 + }, + { + "category_id": 13, + "poly": [ + 1151, + 234, + 1243, + 234, + 1243, + 267, + 1151, + 267 + ], + "score": 0.9, + "latex": "\\pm 1 \\mathrm { m } / \\mathrm { s }" + }, + { + "category_id": 13, + "poly": [ + 949, + 953, + 1089, + 953, + 1089, + 986, + 949, + 986 + ], + "score": 0.9, + "latex": "n _ { s t e p s } = 5 0" + }, + { + "category_id": 13, + "poly": [ + 704, + 952, + 896, + 952, + 896, + 984, + 704, + 984 + ], + "score": 0.89, + "latex": "n _ { r o b o t s } = 2 0 0 0 0" + }, + { + "category_id": 13, + "poly": [ + 667, + 234, + 714, + 234, + 714, + 263, + 667, + 263 + ], + "score": 0.46, + "latex": "1 0 \\mathrm { s }" + }, + { + "category_id": 13, + "poly": [ + 1334, + 238, + 1354, + 238, + 1354, + 262, + 1334, + 262 + ], + "score": 0.36, + "latex": "\\mathbf { X }" + }, + { + "category_id": 15, + "poly": [ + 708.0, + 1523.0, + 741.0, + 1523.0, + 741.0, + 1569.0, + 708.0, + 1569.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 749.0, + 1530.0, + 760.0, + 1530.0, + 760.0, + 1542.0, + 749.0, + 1542.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1048.0, + 1521.0, + 1089.0, + 1521.0, + 1089.0, + 1544.0, + 1048.0, + 1544.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1221.0, + 1534.0, + 1232.0, + 1534.0, + 1232.0, + 1546.0, + 1221.0, + 1546.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1253.0, + 1542.0, + 1265.0, + 1542.0, + 1265.0, + 1553.0, + 1253.0, + 1553.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 322.0, + 1565.0, + 351.0, + 1565.0, + 351.0, + 1590.0, + 322.0, + 1590.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 665.0, + 1565.0, + 694.0, + 1565.0, + 694.0, + 1702.0, + 665.0, + 1702.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 711.0, + 1557.0, + 738.0, + 1557.0, + 738.0, + 1582.0, + 711.0, + 1582.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 749.0, + 1566.0, + 759.0, + 1566.0, + 759.0, + 1577.0, + 749.0, + 1577.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1048.0, + 1549.0, + 1090.0, + 1549.0, + 1090.0, + 1573.0, + 1048.0, + 1573.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1206.0, + 1560.0, + 1222.0, + 1560.0, + 1222.0, + 1574.0, + 1206.0, + 1574.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1247.0, + 1555.0, + 1257.0, + 1555.0, + 1257.0, + 1564.0, + 1247.0, + 1564.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1329.0, + 1564.0, + 1339.0, + 1564.0, + 1339.0, + 1573.0, + 1329.0, + 1573.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 303.0, + 1587.0, + 327.0, + 1587.0, + 327.0, + 1682.0, + 303.0, + 1682.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 402.0, + 1603.0, + 412.0, + 1603.0, + 412.0, + 1613.0, + 402.0, + 1613.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 710.0, + 1591.0, + 738.0, + 1591.0, + 738.0, + 1616.0, + 710.0, + 1616.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 748.0, + 1578.0, + 763.0, + 1578.0, + 763.0, + 1594.0, + 748.0, + 1594.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1029.0, + 1578.0, + 1088.0, + 1578.0, + 1088.0, + 1684.0, + 1029.0, + 1684.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1161.0, + 1590.0, + 1177.0, + 1590.0, + 1177.0, + 1628.0, + 1161.0, + 1628.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1283.0, + 1590.0, + 1295.0, + 1590.0, + 1295.0, + 1602.0, + 1283.0, + 1602.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1366.0, + 1599.0, + 1382.0, + 1599.0, + 1382.0, + 1615.0, + 1366.0, + 1615.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 320.0, + 1617.0, + 351.0, + 1617.0, + 351.0, + 1641.0, + 320.0, + 1641.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1049.0, + 1608.0, + 1088.0, + 1608.0, + 1088.0, + 1630.0, + 1049.0, + 1630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1201.0, + 1611.0, + 1212.0, + 1611.0, + 1212.0, + 1622.0, + 1201.0, + 1622.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1238.0, + 1617.0, + 1248.0, + 1617.0, + 1248.0, + 1628.0, + 1238.0, + 1628.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1321.0, + 1615.0, + 1340.0, + 1615.0, + 1340.0, + 1630.0, + 1321.0, + 1630.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 712.0, + 1626.0, + 738.0, + 1626.0, + 738.0, + 1647.0, + 712.0, + 1647.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1056.0, + 1634.0, + 1090.0, + 1634.0, + 1090.0, + 1663.0, + 1056.0, + 1663.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1301.0, + 1630.0, + 1317.0, + 1630.0, + 1317.0, + 1646.0, + 1301.0, + 1646.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 331.0, + 1671.0, + 351.0, + 1671.0, + 351.0, + 1693.0, + 331.0, + 1693.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 375.0, + 1659.0, + 389.0, + 1659.0, + 389.0, + 1669.0, + 375.0, + 1669.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 397.0, + 1654.0, + 536.0, + 1654.0, + 536.0, + 1716.0, + 397.0, + 1716.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 681.0, + 1658.0, + 740.0, + 1658.0, + 740.0, + 1704.0, + 681.0, + 1704.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1056.0, + 1665.0, + 1089.0, + 1665.0, + 1089.0, + 1719.0, + 1056.0, + 1719.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1109.0, + 1662.0, + 1129.0, + 1662.0, + 1129.0, + 1715.0, + 1109.0, + 1715.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1131.0, + 1699.0, + 1143.0, + 1699.0, + 1143.0, + 1712.0, + 1131.0, + 1712.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1144.0, + 1656.0, + 1156.0, + 1656.0, + 1156.0, + 1695.0, + 1144.0, + 1695.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1206.0, + 1658.0, + 1222.0, + 1658.0, + 1222.0, + 1714.0, + 1206.0, + 1714.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1229.0, + 1652.0, + 1269.0, + 1652.0, + 1269.0, + 1718.0, + 1229.0, + 1718.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1305.0, + 1660.0, + 1317.0, + 1660.0, + 1317.0, + 1671.0, + 1305.0, + 1671.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1327.0, + 1651.0, + 1375.0, + 1651.0, + 1375.0, + 1718.0, + 1327.0, + 1718.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 397.0, + 1712.0, + 553.0, + 1712.0, + 553.0, + 1737.0, + 397.0, + 1737.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 686.0, + 1712.0, + 738.0, + 1712.0, + 738.0, + 1735.0, + 686.0, + 1735.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1205.0, + 1716.0, + 1222.0, + 1716.0, + 1222.0, + 1733.0, + 1205.0, + 1733.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1231.0, + 1711.0, + 1278.0, + 1711.0, + 1278.0, + 1737.0, + 1231.0, + 1737.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1303.0, + 1718.0, + 1319.0, + 1718.0, + 1319.0, + 1733.0, + 1303.0, + 1733.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1329.0, + 1711.0, + 1383.0, + 1711.0, + 1383.0, + 1737.0, + 1329.0, + 1737.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 331.0, + 1724.0, + 351.0, + 1724.0, + 351.0, + 1745.0, + 331.0, + 1745.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1057.0, + 1724.0, + 1093.0, + 1724.0, + 1093.0, + 1748.0, + 1057.0, + 1748.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 350.0, + 1746.0, + 667.0, + 1746.0, + 667.0, + 1771.0, + 350.0, + 1771.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 738.0, + 1748.0, + 1030.0, + 1748.0, + 1030.0, + 1768.0, + 738.0, + 1768.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1104.0, + 1745.0, + 1136.0, + 1745.0, + 1136.0, + 1770.0, + 1104.0, + 1770.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1254.0, + 1741.0, + 1291.0, + 1741.0, + 1291.0, + 1771.0, + 1254.0, + 1771.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 442.0, + 1767.0, + 568.0, + 1767.0, + 568.0, + 1788.0, + 442.0, + 1788.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 816.0, + 1767.0, + 943.0, + 1767.0, + 943.0, + 1788.0, + 816.0, + 1788.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1171.0, + 1763.0, + 1309.0, + 1763.0, + 1309.0, + 1792.0, + 1171.0, + 1792.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 466.0, + 1805.0, + 507.0, + 1805.0, + 507.0, + 1840.0, + 466.0, + 1840.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 829.0, + 1805.0, + 872.0, + 1805.0, + 872.0, + 1840.0, + 829.0, + 1840.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1192.0, + 1805.0, + 1234.0, + 1805.0, + 1234.0, + 1841.0, + 1192.0, + 1841.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1194.0, + 1536.0, + 1223.0, + 1536.0, + 1223.0, + 1551.0, + 1194.0, + 1551.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1168.25, + 1549.0, + 1201.25, + 1549.0, + 1201.25, + 1569.0, + 1168.25, + 1569.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1161.0, + 1563.0, + 1198.0, + 1563.0, + 1198.0, + 1588.0, + 1161.0, + 1588.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1142.0, + 1577.0, + 1179.0, + 1577.0, + 1179.0, + 1596.0, + 1142.0, + 1596.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1172.0, + 1592.5, + 1200.0, + 1592.5, + 1200.0, + 1601.5, + 1172.0, + 1601.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1104.0, + 1625.5, + 1170.0, + 1625.5, + 1170.0, + 1639.5, + 1104.0, + 1639.5 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1103.75, + 1710.0, + 1115.75, + 1710.0, + 1115.75, + 1742.0, + 1103.75, + 1742.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1853.0, + 1406.0, + 1853.0, + 1406.0, + 1889.0, + 294.0, + 1889.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1884.0, + 1408.0, + 1884.0, + 1408.0, + 1919.0, + 296.0, + 1919.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1912.0, + 1406.0, + 1912.0, + 1406.0, + 1950.0, + 293.0, + 1950.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1946.0, + 1404.0, + 1946.0, + 1404.0, + 1980.0, + 293.0, + 1980.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1976.0, + 1404.0, + 1976.0, + 1404.0, + 2008.0, + 296.0, + 2008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 2004.0, + 1327.0, + 2004.0, + 1327.0, + 2041.0, + 295.0, + 2041.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 728.0, + 718.0, + 728.0, + 718.0, + 761.0, + 296.0, + 761.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 654.0, + 459.0, + 654.0, + 459.0, + 701.0, + 292.0, + 701.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 2060.0, + 862.0, + 2060.0, + 862.0, + 2091.0, + 840.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 840.0, + 2059.0, + 862.0, + 2059.0, + 862.0, + 2090.0, + 840.0, + 2090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 338.0, + 1405.0, + 338.0, + 1405.0, + 372.0, + 296.0, + 372.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 368.0, + 1407.0, + 368.0, + 1407.0, + 405.0, + 294.0, + 405.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 402.0, + 1405.0, + 402.0, + 1405.0, + 435.0, + 295.0, + 435.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 430.0, + 1406.0, + 430.0, + 1406.0, + 466.0, + 295.0, + 466.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 460.0, + 1405.0, + 460.0, + 1405.0, + 495.0, + 294.0, + 495.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 491.0, + 1406.0, + 491.0, + 1406.0, + 527.0, + 294.0, + 527.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 521.0, + 1407.0, + 521.0, + 1407.0, + 556.0, + 294.0, + 556.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 553.0, + 1404.0, + 553.0, + 1404.0, + 586.0, + 296.0, + 586.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 582.0, + 644.0, + 582.0, + 644.0, + 620.0, + 295.0, + 620.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1060.0, + 1404.0, + 1060.0, + 1404.0, + 1091.0, + 297.0, + 1091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1090.0, + 1406.0, + 1090.0, + 1406.0, + 1124.0, + 294.0, + 1124.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1118.0, + 1405.0, + 1118.0, + 1405.0, + 1155.0, + 292.0, + 1155.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1149.0, + 1405.0, + 1149.0, + 1405.0, + 1184.0, + 294.0, + 1184.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1180.0, + 1404.0, + 1180.0, + 1404.0, + 1214.0, + 295.0, + 1214.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1209.0, + 1404.0, + 1209.0, + 1404.0, + 1244.0, + 292.0, + 1244.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1241.0, + 503.0, + 1241.0, + 503.0, + 1270.0, + 295.0, + 1270.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 786.0, + 1405.0, + 786.0, + 1405.0, + 819.0, + 295.0, + 819.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 816.0, + 1404.0, + 816.0, + 1404.0, + 849.0, + 297.0, + 849.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 842.0, + 1408.0, + 842.0, + 1408.0, + 882.0, + 293.0, + 882.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 876.0, + 1406.0, + 876.0, + 1406.0, + 913.0, + 294.0, + 913.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 907.0, + 1140.0, + 907.0, + 1140.0, + 940.0, + 295.0, + 940.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1285.0, + 1405.0, + 1285.0, + 1405.0, + 1320.0, + 293.0, + 1320.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1311.0, + 1407.0, + 1311.0, + 1407.0, + 1353.0, + 292.0, + 1353.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1343.0, + 1405.0, + 1343.0, + 1405.0, + 1380.0, + 292.0, + 1380.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1376.0, + 1405.0, + 1376.0, + 1405.0, + 1410.0, + 293.0, + 1410.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1408.0, + 1404.0, + 1408.0, + 1404.0, + 1439.0, + 295.0, + 1439.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1436.0, + 1407.0, + 1436.0, + 1407.0, + 1471.0, + 295.0, + 1471.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 200.0, + 1405.0, + 200.0, + 1405.0, + 237.0, + 292.0, + 237.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 232.0, + 666.0, + 232.0, + 666.0, + 267.0, + 294.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 715.0, + 232.0, + 1150.0, + 232.0, + 1150.0, + 267.0, + 715.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1244.0, + 232.0, + 1333.0, + 232.0, + 1333.0, + 267.0, + 1244.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1355.0, + 232.0, + 1407.0, + 232.0, + 1407.0, + 267.0, + 1355.0, + 267.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 263.0, + 1406.0, + 263.0, + 1406.0, + 296.0, + 292.0, + 296.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 296.0, + 570.0, + 296.0, + 570.0, + 328.0, + 294.0, + 328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 947.0, + 703.0, + 947.0, + 703.0, + 992.0, + 293.0, + 992.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 897.0, + 947.0, + 948.0, + 947.0, + 948.0, + 992.0, + 897.0, + 992.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1090.0, + 947.0, + 1409.0, + 947.0, + 1409.0, + 992.0, + 1090.0, + 992.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 979.0, + 1404.0, + 979.0, + 1404.0, + 1017.0, + 295.0, + 1017.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1009.0, + 512.0, + 1009.0, + 512.0, + 1050.0, + 293.0, + 1050.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 5, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 298, + 1346, + 1404, + 1346, + 1404, + 1710, + 298, + 1710 + ], + "score": 0.984 + }, + { + "category_id": 1, + "poly": [ + 297, + 857, + 1405, + 857, + 1405, + 1251, + 297, + 1251 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 299, + 1726, + 1403, + 1726, + 1403, + 1847, + 299, + 1847 + ], + "score": 0.972 + }, + { + "category_id": 3, + "poly": [ + 365, + 194, + 1336, + 194, + 1336, + 472, + 365, + 472 + ], + "score": 0.971 + }, + { + "category_id": 3, + "poly": [ + 303, + 634, + 1398, + 634, + 1398, + 788, + 303, + 788 + ], + "score": 0.964 + }, + { + "category_id": 4, + "poly": [ + 296, + 489, + 1405, + 489, + 1405, + 614, + 296, + 614 + ], + "score": 0.957 + }, + { + "category_id": 1, + "poly": [ + 301, + 1861, + 1398, + 1861, + 1398, + 1924, + 301, + 1924 + ], + "score": 0.933 + }, + { + "category_id": 4, + "poly": [ + 393, + 802, + 1299, + 802, + 1299, + 834, + 393, + 834 + ], + "score": 0.926 + }, + { + "category_id": 2, + "poly": [ + 306, + 1948, + 1398, + 1948, + 1398, + 2007, + 306, + 2007 + ], + "score": 0.922 + }, + { + "category_id": 0, + "poly": [ + 298, + 1289, + 492, + 1289, + 492, + 1321, + 298, + 1321 + ], + "score": 0.896 + }, + { + "category_id": 2, + "poly": [ + 841, + 2061, + 858, + 2061, + 858, + 2084, + 841, + 2084 + ], + "score": 0.739 + }, + { + "category_id": 13, + "poly": [ + 628, + 550, + 803, + 550, + 803, + 585, + 628, + 585 + ], + "score": 0.91, + "latex": "[ - 0 . 1 , 0 . 1 ] \\mathrm { m } / \\mathrm { s }" + }, + { + "category_id": 13, + "poly": [ + 1015, + 1528, + 1088, + 1528, + 1088, + 1558, + 1015, + 1558 + ], + "score": 0.89, + "latex": "1 0 0 \\%" + }, + { + "category_id": 13, + "poly": [ + 1194, + 1891, + 1253, + 1891, + 1253, + 1922, + 1194, + 1922 + ], + "score": 0.89, + "latex": "2 0 \\%" + }, + { + "category_id": 13, + "poly": [ + 810, + 1191, + 899, + 1191, + 899, + 1220, + 810, + 1220 + ], + "score": 0.88, + "latex": "\\approx 1 0 0 k" + }, + { + "category_id": 13, + "poly": [ + 1064, + 521, + 1169, + 521, + 1169, + 553, + 1064, + 553 + ], + "score": 0.87, + "latex": "0 . 7 5 \\mathrm { m } / \\mathrm { s }" + }, + { + "category_id": 13, + "poly": [ + 931, + 1191, + 1021, + 1191, + 1021, + 1220, + 931, + 1220 + ], + "score": 0.84, + "latex": "\\approx 2 0 0 k" + }, + { + "category_id": 13, + "poly": [ + 297, + 1559, + 364, + 1559, + 364, + 1588, + 297, + 1588 + ], + "score": 0.66, + "latex": "\\mathrm { 0 . 2 m }" + }, + { + "category_id": 13, + "poly": [ + 1282, + 1650, + 1360, + 1650, + 1360, + 1682, + 1282, + 1682 + ], + "score": 0.37, + "latex": "2 5 \\mathrm { d e g }" + }, + { + "category_id": 15, + "poly": [ + 383.0, + 197.0, + 431.0, + 197.0, + 431.0, + 223.0, + 383.0, + 223.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 437.0, + 205.0, + 451.0, + 205.0, + 451.0, + 218.0, + 437.0, + 218.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 477.0, + 207.0, + 486.0, + 207.0, + 486.0, + 218.0, + 477.0, + 218.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 877.0, + 197.0, + 925.0, + 197.0, + 925.0, + 223.0, + 877.0, + 223.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 932.0, + 205.0, + 945.0, + 205.0, + 945.0, + 218.0, + 932.0, + 218.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 987.0, + 206.0, + 997.0, + 206.0, + 997.0, + 217.0, + 987.0, + 217.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1037.0, + 204.0, + 1049.0, + 204.0, + 1049.0, + 218.0, + 1037.0, + 218.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1086.0, + 206.0, + 1096.0, + 206.0, + 1096.0, + 216.0, + 1086.0, + 216.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1132.0, + 211.0, + 1141.0, + 211.0, + 1141.0, + 219.0, + 1132.0, + 219.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 360.0, + 225.0, + 423.0, + 225.0, + 423.0, + 357.0, + 360.0, + 357.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 855.0, + 226.0, + 884.0, + 226.0, + 884.0, + 356.0, + 855.0, + 356.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 886.0, + 229.0, + 919.0, + 229.0, + 919.0, + 254.0, + 886.0, + 254.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1132.0, + 228.0, + 1141.0, + 228.0, + 1141.0, + 235.0, + 1132.0, + 235.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1174.0, + 242.0, + 1183.0, + 242.0, + 1183.0, + 249.0, + 1174.0, + 249.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 393.0, + 261.0, + 423.0, + 261.0, + 423.0, + 287.0, + 393.0, + 287.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 775.0, + 270.0, + 786.0, + 270.0, + 786.0, + 279.0, + 775.0, + 279.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 811.0, + 267.0, + 825.0, + 267.0, + 825.0, + 280.0, + 811.0, + 280.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 885.0, + 260.0, + 922.0, + 260.0, + 922.0, + 288.0, + 885.0, + 288.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1278.0, + 266.0, + 1288.0, + 266.0, + 1288.0, + 275.0, + 1278.0, + 275.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1308.0, + 270.0, + 1317.0, + 270.0, + 1317.0, + 279.0, + 1308.0, + 279.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 392.0, + 293.0, + 423.0, + 293.0, + 423.0, + 319.0, + 392.0, + 319.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 444.0, + 309.0, + 461.0, + 309.0, + 461.0, + 322.0, + 444.0, + 322.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 469.0, + 304.0, + 608.0, + 304.0, + 608.0, + 328.0, + 469.0, + 328.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 884.0, + 292.0, + 924.0, + 292.0, + 924.0, + 319.0, + 884.0, + 319.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 391.0, + 325.0, + 423.0, + 325.0, + 423.0, + 351.0, + 391.0, + 351.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 469.0, + 323.0, + 556.0, + 323.0, + 556.0, + 351.0, + 469.0, + 351.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 884.0, + 324.0, + 925.0, + 324.0, + 925.0, + 351.0, + 884.0, + 351.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 962.0, + 325.0, + 996.0, + 325.0, + 996.0, + 351.0, + 962.0, + 351.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 446.0, + 351.0, + 460.0, + 351.0, + 460.0, + 363.0, + 446.0, + 363.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 470.0, + 345.0, + 573.0, + 345.0, + 573.0, + 370.0, + 470.0, + 370.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 938.0, + 351.0, + 952.0, + 351.0, + 952.0, + 364.0, + 938.0, + 364.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 963.0, + 345.0, + 1017.0, + 345.0, + 1017.0, + 371.0, + 963.0, + 371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 400.0, + 357.0, + 430.0, + 357.0, + 430.0, + 382.0, + 400.0, + 382.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 894.0, + 357.0, + 924.0, + 357.0, + 924.0, + 381.0, + 894.0, + 381.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1211.0, + 357.0, + 1222.0, + 357.0, + 1222.0, + 366.0, + 1211.0, + 366.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 814.0, + 361.0, + 824.0, + 361.0, + 824.0, + 371.0, + 814.0, + 371.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1245.0, + 365.0, + 1257.0, + 365.0, + 1257.0, + 374.0, + 1245.0, + 374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1308.0, + 365.0, + 1317.0, + 365.0, + 1317.0, + 374.0, + 1308.0, + 374.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 422.0, + 379.0, + 466.0, + 379.0, + 466.0, + 406.0, + 422.0, + 406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 496.0, + 379.0, + 541.0, + 379.0, + 541.0, + 406.0, + 496.0, + 406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 570.0, + 378.0, + 616.0, + 378.0, + 616.0, + 406.0, + 570.0, + 406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 645.0, + 379.0, + 691.0, + 379.0, + 691.0, + 406.0, + 645.0, + 406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 720.0, + 379.0, + 766.0, + 379.0, + 766.0, + 406.0, + 720.0, + 406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 795.0, + 380.0, + 840.0, + 380.0, + 840.0, + 406.0, + 795.0, + 406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 922.0, + 381.0, + 940.0, + 381.0, + 940.0, + 404.0, + 922.0, + 404.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 963.0, + 380.0, + 996.0, + 380.0, + 996.0, + 406.0, + 963.0, + 406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1011.0, + 380.0, + 1042.0, + 380.0, + 1042.0, + 406.0, + 1011.0, + 406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1058.0, + 379.0, + 1090.0, + 379.0, + 1090.0, + 405.0, + 1058.0, + 405.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1104.0, + 378.0, + 1138.0, + 378.0, + 1138.0, + 407.0, + 1104.0, + 407.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1153.0, + 380.0, + 1185.0, + 380.0, + 1185.0, + 405.0, + 1153.0, + 405.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1200.0, + 380.0, + 1233.0, + 380.0, + 1233.0, + 405.0, + 1200.0, + 405.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1247.0, + 380.0, + 1280.0, + 380.0, + 1280.0, + 405.0, + 1247.0, + 405.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1295.0, + 381.0, + 1326.0, + 381.0, + 1326.0, + 406.0, + 1295.0, + 406.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 570.0, + 401.0, + 690.0, + 401.0, + 690.0, + 425.0, + 570.0, + 425.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1066.0, + 399.0, + 1183.0, + 399.0, + 1183.0, + 426.0, + 1066.0, + 426.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 581.0, + 440.0, + 625.0, + 440.0, + 625.0, + 478.0, + 581.0, + 478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1074.0, + 440.0, + 1120.0, + 440.0, + 1120.0, + 478.0, + 1074.0, + 478.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 754.0, + 348.5, + 780.0, + 348.5, + 780.0, + 362.0, + 754.0, + 362.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 829.0, + 641.0, + 1285.0, + 641.0, + 1285.0, + 802.0, + 829.0, + 802.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 643.0, + 679.0, + 760.0, + 679.0, + 760.0, + 753.0, + 643.0, + 753.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 732.0, + 702.0, + 807.0, + 702.0, + 807.0, + 744.0, + 732.0, + 744.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 794.0, + 704.0, + 841.0, + 704.0, + 841.0, + 755.0, + 794.0, + 755.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 491.0, + 1405.0, + 491.0, + 1405.0, + 523.0, + 295.0, + 523.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 520.0, + 1063.0, + 520.0, + 1063.0, + 556.0, + 294.0, + 556.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1170.0, + 520.0, + 1405.0, + 520.0, + 1405.0, + 556.0, + 1170.0, + 556.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 553.0, + 627.0, + 553.0, + 627.0, + 585.0, + 295.0, + 585.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 804.0, + 553.0, + 1403.0, + 553.0, + 1403.0, + 585.0, + 804.0, + 585.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 581.0, + 1350.0, + 581.0, + 1350.0, + 617.0, + 295.0, + 617.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 395.0, + 799.0, + 1304.0, + 799.0, + 1304.0, + 838.0, + 395.0, + 838.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 329.0, + 1943.0, + 1402.0, + 1943.0, + 1402.0, + 1984.0, + 329.0, + 1984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 299.0, + 1975.0, + 394.0, + 1975.0, + 394.0, + 2009.0, + 299.0, + 2009.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1287.0, + 496.0, + 1287.0, + 496.0, + 1327.0, + 293.0, + 1327.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2059.0, + 860.0, + 2059.0, + 860.0, + 2091.0, + 839.0, + 2091.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1347.0, + 1406.0, + 1347.0, + 1406.0, + 1381.0, + 293.0, + 1381.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1372.0, + 1405.0, + 1372.0, + 1405.0, + 1413.0, + 292.0, + 1413.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1408.0, + 1404.0, + 1408.0, + 1404.0, + 1442.0, + 294.0, + 1442.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1439.0, + 1404.0, + 1439.0, + 1404.0, + 1472.0, + 294.0, + 1472.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1468.0, + 1405.0, + 1468.0, + 1405.0, + 1501.0, + 294.0, + 1501.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1499.0, + 1406.0, + 1499.0, + 1406.0, + 1532.0, + 292.0, + 1532.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1527.0, + 1014.0, + 1527.0, + 1014.0, + 1564.0, + 292.0, + 1564.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1089.0, + 1527.0, + 1405.0, + 1527.0, + 1405.0, + 1564.0, + 1089.0, + 1564.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1558.0, + 296.0, + 1558.0, + 296.0, + 1592.0, + 293.0, + 1592.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 365.0, + 1558.0, + 1404.0, + 1558.0, + 1404.0, + 1592.0, + 365.0, + 1592.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1587.0, + 1404.0, + 1587.0, + 1404.0, + 1623.0, + 293.0, + 1623.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1620.0, + 1404.0, + 1620.0, + 1404.0, + 1654.0, + 294.0, + 1654.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1650.0, + 1281.0, + 1650.0, + 1281.0, + 1684.0, + 294.0, + 1684.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1361.0, + 1650.0, + 1404.0, + 1650.0, + 1404.0, + 1684.0, + 1361.0, + 1684.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1679.0, + 1350.0, + 1679.0, + 1350.0, + 1714.0, + 293.0, + 1714.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 855.0, + 1406.0, + 855.0, + 1406.0, + 896.0, + 291.0, + 896.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 889.0, + 1405.0, + 889.0, + 1405.0, + 921.0, + 295.0, + 921.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 919.0, + 1406.0, + 919.0, + 1406.0, + 951.0, + 295.0, + 951.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 945.0, + 1403.0, + 945.0, + 1403.0, + 982.0, + 294.0, + 982.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 977.0, + 1405.0, + 977.0, + 1405.0, + 1014.0, + 292.0, + 1014.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1009.0, + 1403.0, + 1009.0, + 1403.0, + 1041.0, + 295.0, + 1041.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1037.0, + 1407.0, + 1037.0, + 1407.0, + 1073.0, + 291.0, + 1073.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1071.0, + 1403.0, + 1071.0, + 1403.0, + 1103.0, + 295.0, + 1103.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1099.0, + 1406.0, + 1099.0, + 1406.0, + 1134.0, + 292.0, + 1134.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1129.0, + 1405.0, + 1129.0, + 1405.0, + 1165.0, + 295.0, + 1165.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1162.0, + 1406.0, + 1162.0, + 1406.0, + 1194.0, + 296.0, + 1194.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1190.0, + 809.0, + 1190.0, + 809.0, + 1224.0, + 292.0, + 1224.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 900.0, + 1190.0, + 930.0, + 1190.0, + 930.0, + 1224.0, + 900.0, + 1224.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1022.0, + 1190.0, + 1406.0, + 1190.0, + 1406.0, + 1224.0, + 1022.0, + 1224.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1220.0, + 448.0, + 1220.0, + 448.0, + 1254.0, + 292.0, + 1254.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1723.0, + 1403.0, + 1723.0, + 1403.0, + 1760.0, + 295.0, + 1760.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1755.0, + 1404.0, + 1755.0, + 1404.0, + 1790.0, + 294.0, + 1790.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1788.0, + 1403.0, + 1788.0, + 1403.0, + 1820.0, + 295.0, + 1820.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1818.0, + 1395.0, + 1818.0, + 1395.0, + 1850.0, + 294.0, + 1850.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1859.0, + 1402.0, + 1859.0, + 1402.0, + 1895.0, + 297.0, + 1895.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1891.0, + 1193.0, + 1891.0, + 1193.0, + 1926.0, + 295.0, + 1926.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 1254.0, + 1891.0, + 1403.0, + 1891.0, + 1403.0, + 1926.0, + 1254.0, + 1926.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 6, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 299, + 1063, + 1403, + 1063, + 1403, + 1307, + 299, + 1307 + ], + "score": 0.983 + }, + { + "category_id": 1, + "poly": [ + 298, + 1418, + 1404, + 1418, + 1404, + 1693, + 298, + 1693 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 500, + 1404, + 500, + 1404, + 834, + 298, + 834 + ], + "score": 0.982 + }, + { + "category_id": 1, + "poly": [ + 298, + 1706, + 1402, + 1706, + 1402, + 1920, + 298, + 1920 + ], + "score": 0.979 + }, + { + "category_id": 1, + "poly": [ + 299, + 927, + 1402, + 927, + 1402, + 1048, + 299, + 1048 + ], + "score": 0.975 + }, + { + "category_id": 3, + "poly": [ + 303, + 224, + 1396, + 224, + 1396, + 431, + 303, + 431 + ], + "score": 0.965 + }, + { + "category_id": 4, + "poly": [ + 367, + 447, + 1324, + 447, + 1324, + 480, + 367, + 480 + ], + "score": 0.922 + }, + { + "category_id": 0, + "poly": [ + 299, + 870, + 604, + 870, + 604, + 902, + 299, + 902 + ], + "score": 0.916 + }, + { + "category_id": 2, + "poly": [ + 841, + 2062, + 858, + 2062, + 858, + 2084, + 841, + 2084 + ], + "score": 0.793 + }, + { + "category_id": 0, + "poly": [ + 298, + 1349, + 509, + 1349, + 509, + 1386, + 298, + 1386 + ], + "score": 0.66 + }, + { + "category_id": 0, + "poly": [ + 298, + 1348, + 509, + 1348, + 509, + 1387, + 298, + 1387 + ], + "score": 0.23 + }, + { + "category_id": 13, + "poly": [ + 869, + 1124, + 961, + 1124, + 961, + 1157, + 869, + 1157 + ], + "score": 0.86, + "latex": "0 . 6 \\mathrm { m } / \\mathrm { s }" + }, + { + "category_id": 13, + "poly": [ + 888, + 447, + 962, + 447, + 962, + 477, + 888, + 477 + ], + "score": 0.33, + "latex": "2 0 \\mathrm { { m i n } }" + }, + { + "category_id": 15, + "poly": [ + 687.0, + 251.0, + 971.0, + 251.0, + 971.0, + 437.0, + 687.0, + 437.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 371.0, + 445.0, + 887.0, + 445.0, + 887.0, + 484.0, + 371.0, + 484.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 963.0, + 445.0, + 1328.0, + 445.0, + 1328.0, + 484.0, + 963.0, + 484.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 870.0, + 608.0, + 870.0, + 608.0, + 906.0, + 295.0, + 906.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2059.0, + 860.0, + 2059.0, + 860.0, + 2089.0, + 839.0, + 2089.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 291.0, + 1345.0, + 514.0, + 1345.0, + 514.0, + 1393.0, + 291.0, + 1393.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 1342.0, + 514.0, + 1342.0, + 514.0, + 1395.0, + 290.0, + 1395.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1063.0, + 1405.0, + 1063.0, + 1405.0, + 1097.0, + 295.0, + 1097.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1094.0, + 1405.0, + 1094.0, + 1405.0, + 1128.0, + 295.0, + 1128.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1125.0, + 868.0, + 1125.0, + 868.0, + 1159.0, + 295.0, + 1159.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 962.0, + 1125.0, + 1404.0, + 1125.0, + 1404.0, + 1159.0, + 962.0, + 1159.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 298.0, + 1156.0, + 1403.0, + 1156.0, + 1403.0, + 1186.0, + 298.0, + 1186.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1185.0, + 1405.0, + 1185.0, + 1405.0, + 1218.0, + 295.0, + 1218.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1212.0, + 1404.0, + 1212.0, + 1404.0, + 1253.0, + 292.0, + 1253.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1244.0, + 1404.0, + 1244.0, + 1404.0, + 1282.0, + 294.0, + 1282.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1277.0, + 628.0, + 1277.0, + 628.0, + 1310.0, + 295.0, + 1310.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1419.0, + 1402.0, + 1419.0, + 1402.0, + 1451.0, + 294.0, + 1451.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1449.0, + 1404.0, + 1449.0, + 1404.0, + 1484.0, + 293.0, + 1484.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1479.0, + 1405.0, + 1479.0, + 1405.0, + 1515.0, + 293.0, + 1515.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1507.0, + 1404.0, + 1507.0, + 1404.0, + 1545.0, + 293.0, + 1545.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1537.0, + 1406.0, + 1537.0, + 1406.0, + 1576.0, + 295.0, + 1576.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1570.0, + 1404.0, + 1570.0, + 1404.0, + 1606.0, + 294.0, + 1606.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1600.0, + 1405.0, + 1600.0, + 1405.0, + 1636.0, + 293.0, + 1636.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1630.0, + 1404.0, + 1630.0, + 1404.0, + 1666.0, + 294.0, + 1666.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1660.0, + 1245.0, + 1660.0, + 1245.0, + 1697.0, + 292.0, + 1697.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 499.0, + 1405.0, + 499.0, + 1405.0, + 533.0, + 294.0, + 533.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 533.0, + 1402.0, + 533.0, + 1402.0, + 563.0, + 297.0, + 563.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 563.0, + 1404.0, + 563.0, + 1404.0, + 594.0, + 294.0, + 594.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 589.0, + 1404.0, + 589.0, + 1404.0, + 626.0, + 293.0, + 626.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 619.0, + 1405.0, + 619.0, + 1405.0, + 658.0, + 292.0, + 658.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 651.0, + 1404.0, + 651.0, + 1404.0, + 688.0, + 293.0, + 688.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 681.0, + 1404.0, + 681.0, + 1404.0, + 718.0, + 293.0, + 718.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 710.0, + 1408.0, + 710.0, + 1408.0, + 752.0, + 292.0, + 752.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 743.0, + 1405.0, + 743.0, + 1405.0, + 777.0, + 293.0, + 777.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 774.0, + 1406.0, + 774.0, + 1406.0, + 809.0, + 293.0, + 809.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 803.0, + 382.0, + 803.0, + 382.0, + 836.0, + 293.0, + 836.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 1706.0, + 1404.0, + 1706.0, + 1404.0, + 1741.0, + 296.0, + 1741.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1737.0, + 1406.0, + 1737.0, + 1406.0, + 1771.0, + 294.0, + 1771.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1768.0, + 1406.0, + 1768.0, + 1406.0, + 1802.0, + 294.0, + 1802.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1798.0, + 1407.0, + 1798.0, + 1407.0, + 1833.0, + 294.0, + 1833.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1824.0, + 1407.0, + 1824.0, + 1407.0, + 1865.0, + 292.0, + 1865.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1858.0, + 1406.0, + 1858.0, + 1406.0, + 1893.0, + 293.0, + 1893.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1889.0, + 1241.0, + 1889.0, + 1241.0, + 1923.0, + 294.0, + 1923.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 925.0, + 1406.0, + 925.0, + 1406.0, + 962.0, + 294.0, + 962.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 958.0, + 1406.0, + 958.0, + 1406.0, + 993.0, + 294.0, + 993.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 987.0, + 1406.0, + 987.0, + 1406.0, + 1023.0, + 294.0, + 1023.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 294.0, + 1017.0, + 1329.0, + 1017.0, + 1329.0, + 1053.0, + 294.0, + 1053.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 7, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 295, + 435, + 1407, + 435, + 1407, + 2020, + 295, + 2020 + ], + "score": 0.945 + }, + { + "category_id": 1, + "poly": [ + 300, + 254, + 1399, + 254, + 1399, + 348, + 300, + 348 + ], + "score": 0.938 + }, + { + "category_id": 0, + "poly": [ + 299, + 204, + 518, + 204, + 518, + 235, + 299, + 235 + ], + "score": 0.914 + }, + { + "category_id": 0, + "poly": [ + 299, + 390, + 455, + 390, + 455, + 424, + 299, + 424 + ], + "score": 0.902 + }, + { + "category_id": 2, + "poly": [ + 840, + 2061, + 859, + 2061, + 859, + 2084, + 840, + 2084 + ], + "score": 0.773 + }, + { + "category_id": 15, + "poly": [ + 295.0, + 199.0, + 522.0, + 199.0, + 522.0, + 240.0, + 295.0, + 240.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 386.0, + 460.0, + 386.0, + 460.0, + 430.0, + 295.0, + 430.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 839.0, + 2060.0, + 861.0, + 2060.0, + 861.0, + 2090.0, + 839.0, + 2090.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 305.0, + 441.0, + 1405.0, + 441.0, + 1405.0, + 480.0, + 305.0, + 480.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 350.0, + 471.0, + 1300.0, + 471.0, + 1300.0, + 511.0, + 350.0, + 511.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 309.0, + 524.0, + 1404.0, + 524.0, + 1404.0, + 562.0, + 309.0, + 562.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 554.0, + 1407.0, + 554.0, + 1407.0, + 592.0, + 352.0, + 592.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 583.0, + 765.0, + 583.0, + 765.0, + 622.0, + 353.0, + 622.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 307.0, + 636.0, + 1407.0, + 636.0, + 1407.0, + 675.0, + 307.0, + 675.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 666.0, + 1407.0, + 666.0, + 1407.0, + 704.0, + 353.0, + 704.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 357.0, + 697.0, + 977.0, + 697.0, + 977.0, + 731.0, + 357.0, + 731.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 307.0, + 747.0, + 1405.0, + 747.0, + 1405.0, + 785.0, + 307.0, + 785.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 350.0, + 773.0, + 1409.0, + 773.0, + 1409.0, + 819.0, + 350.0, + 819.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 810.0, + 1400.0, + 810.0, + 1400.0, + 843.0, + 353.0, + 843.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 307.0, + 857.0, + 1407.0, + 857.0, + 1407.0, + 896.0, + 307.0, + 896.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 357.0, + 891.0, + 1300.0, + 891.0, + 1300.0, + 924.0, + 357.0, + 924.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 307.0, + 940.0, + 1405.0, + 940.0, + 1405.0, + 978.0, + 307.0, + 978.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 969.0, + 1040.0, + 969.0, + 1040.0, + 1008.0, + 355.0, + 1008.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 307.0, + 1022.0, + 1404.0, + 1022.0, + 1404.0, + 1061.0, + 307.0, + 1061.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 357.0, + 1054.0, + 1213.0, + 1054.0, + 1213.0, + 1087.0, + 357.0, + 1087.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 307.0, + 1103.0, + 1404.0, + 1103.0, + 1404.0, + 1141.0, + 307.0, + 1141.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 1129.0, + 1407.0, + 1129.0, + 1407.0, + 1175.0, + 352.0, + 1175.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1164.0, + 1404.0, + 1164.0, + 1404.0, + 1203.0, + 353.0, + 1203.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1192.0, + 799.0, + 1192.0, + 799.0, + 1231.0, + 353.0, + 1231.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 304.0, + 1240.0, + 1407.0, + 1240.0, + 1407.0, + 1287.0, + 304.0, + 1287.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 350.0, + 1273.0, + 1405.0, + 1273.0, + 1405.0, + 1314.0, + 350.0, + 1314.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 1305.0, + 863.0, + 1305.0, + 863.0, + 1342.0, + 352.0, + 1342.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1356.0, + 1405.0, + 1356.0, + 1405.0, + 1394.0, + 295.0, + 1394.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 355.0, + 1387.0, + 617.0, + 1387.0, + 617.0, + 1421.0, + 355.0, + 1421.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1435.0, + 1407.0, + 1435.0, + 1407.0, + 1477.0, + 293.0, + 1477.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 350.0, + 1463.0, + 1407.0, + 1463.0, + 1407.0, + 1512.0, + 350.0, + 1512.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1500.0, + 701.0, + 1500.0, + 701.0, + 1533.0, + 353.0, + 1533.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1549.0, + 1407.0, + 1549.0, + 1407.0, + 1587.0, + 295.0, + 1587.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 348.0, + 1575.0, + 1407.0, + 1575.0, + 1407.0, + 1622.0, + 348.0, + 1622.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1610.0, + 522.0, + 1610.0, + 522.0, + 1645.0, + 353.0, + 1645.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1661.0, + 1405.0, + 1661.0, + 1405.0, + 1700.0, + 293.0, + 1700.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1691.0, + 1407.0, + 1691.0, + 1407.0, + 1729.0, + 353.0, + 1729.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 357.0, + 1724.0, + 1280.0, + 1724.0, + 1280.0, + 1758.0, + 357.0, + 1758.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1772.0, + 1405.0, + 1772.0, + 1405.0, + 1810.0, + 293.0, + 1810.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 350.0, + 1800.0, + 1405.0, + 1800.0, + 1405.0, + 1844.0, + 350.0, + 1844.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 1833.0, + 1404.0, + 1833.0, + 1404.0, + 1870.0, + 352.0, + 1870.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 350.0, + 1861.0, + 1133.0, + 1861.0, + 1133.0, + 1903.0, + 350.0, + 1903.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1914.0, + 1407.0, + 1914.0, + 1407.0, + 1952.0, + 293.0, + 1952.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 1945.0, + 1407.0, + 1945.0, + 1407.0, + 1984.0, + 352.0, + 1984.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 352.0, + 1975.0, + 1008.0, + 1975.0, + 1008.0, + 2012.0, + 352.0, + 2012.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 254.0, + 1404.0, + 254.0, + 1404.0, + 288.0, + 296.0, + 288.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 284.0, + 1403.0, + 284.0, + 1403.0, + 318.0, + 296.0, + 318.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 296.0, + 317.0, + 690.0, + 317.0, + 690.0, + 351.0, + 296.0, + 351.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 8, + "width": 1700, + "height": 2200 + } + }, + { + "layout_dets": [ + { + "category_id": 1, + "poly": [ + 292, + 202, + 1408, + 202, + 1408, + 1685, + 292, + 1685 + ], + "score": 0.815 + }, + { + "category_id": 2, + "poly": [ + 836, + 2061, + 866, + 2061, + 866, + 2086, + 836, + 2086 + ], + "score": 0.697 + }, + { + "category_id": 2, + "poly": [ + 836, + 2061, + 866, + 2061, + 866, + 2086, + 836, + 2086 + ], + "score": 0.388 + }, + { + "category_id": 15, + "poly": [ + 832.0, + 2058.0, + 870.0, + 2058.0, + 870.0, + 2096.0, + 832.0, + 2096.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 832.0, + 2058.0, + 870.0, + 2058.0, + 870.0, + 2096.0, + 832.0, + 2096.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 201.0, + 1403.0, + 201.0, + 1403.0, + 243.0, + 293.0, + 243.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 234.0, + 1033.0, + 234.0, + 1033.0, + 271.0, + 354.0, + 271.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 284.0, + 1408.0, + 284.0, + 1408.0, + 325.0, + 293.0, + 325.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 315.0, + 1407.0, + 315.0, + 1407.0, + 356.0, + 353.0, + 356.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 345.0, + 503.0, + 345.0, + 503.0, + 383.0, + 354.0, + 383.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 290.0, + 394.0, + 1405.0, + 394.0, + 1405.0, + 442.0, + 290.0, + 442.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 431.0, + 1402.0, + 431.0, + 1402.0, + 467.0, + 354.0, + 467.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 460.0, + 1045.0, + 460.0, + 1045.0, + 497.0, + 354.0, + 497.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 513.0, + 1403.0, + 513.0, + 1403.0, + 549.0, + 295.0, + 549.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 543.0, + 1040.0, + 543.0, + 1040.0, + 581.0, + 351.0, + 581.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 596.0, + 1407.0, + 596.0, + 1407.0, + 632.0, + 297.0, + 632.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 356.0, + 629.0, + 1093.0, + 629.0, + 1093.0, + 660.0, + 356.0, + 660.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 675.0, + 1407.0, + 675.0, + 1407.0, + 718.0, + 293.0, + 718.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 349.0, + 704.0, + 1408.0, + 704.0, + 1408.0, + 747.0, + 349.0, + 747.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 741.0, + 789.0, + 741.0, + 789.0, + 777.0, + 354.0, + 777.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 788.0, + 1407.0, + 788.0, + 1407.0, + 830.0, + 295.0, + 830.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 821.0, + 832.0, + 821.0, + 832.0, + 858.0, + 354.0, + 858.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 874.0, + 1403.0, + 874.0, + 1403.0, + 911.0, + 297.0, + 911.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 906.0, + 1405.0, + 906.0, + 1405.0, + 942.0, + 354.0, + 942.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 934.0, + 904.0, + 934.0, + 904.0, + 970.0, + 351.0, + 970.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 981.0, + 1405.0, + 981.0, + 1405.0, + 1028.0, + 293.0, + 1028.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 356.0, + 1016.0, + 696.0, + 1016.0, + 696.0, + 1052.0, + 356.0, + 1052.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1070.0, + 1405.0, + 1070.0, + 1405.0, + 1107.0, + 295.0, + 1107.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 1102.0, + 577.0, + 1102.0, + 577.0, + 1136.0, + 354.0, + 1136.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 292.0, + 1148.0, + 1407.0, + 1148.0, + 1407.0, + 1191.0, + 292.0, + 1191.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 1183.0, + 1405.0, + 1183.0, + 1405.0, + 1219.0, + 354.0, + 1219.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 1214.0, + 1172.0, + 1214.0, + 1172.0, + 1250.0, + 354.0, + 1250.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1260.0, + 1402.0, + 1260.0, + 1402.0, + 1308.0, + 293.0, + 1308.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 351.0, + 1297.0, + 678.0, + 1297.0, + 678.0, + 1334.0, + 351.0, + 1334.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 295.0, + 1346.0, + 1403.0, + 1346.0, + 1403.0, + 1387.0, + 295.0, + 1387.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 1379.0, + 1405.0, + 1379.0, + 1405.0, + 1415.0, + 354.0, + 1415.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 1409.0, + 695.0, + 1409.0, + 695.0, + 1445.0, + 354.0, + 1445.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 297.0, + 1461.0, + 1402.0, + 1461.0, + 1402.0, + 1498.0, + 297.0, + 1498.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 353.0, + 1489.0, + 1407.0, + 1489.0, + 1407.0, + 1531.0, + 353.0, + 1531.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 356.0, + 1522.0, + 672.0, + 1522.0, + 672.0, + 1559.0, + 356.0, + 1559.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 293.0, + 1572.0, + 1407.0, + 1572.0, + 1407.0, + 1613.0, + 293.0, + 1613.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 349.0, + 1600.0, + 1408.0, + 1600.0, + 1408.0, + 1648.0, + 349.0, + 1648.0 + ], + "score": 1.0, + "text": "" + }, + { + "category_id": 15, + "poly": [ + 354.0, + 1636.0, + 1179.0, + 1636.0, + 1179.0, + 1672.0, + 354.0, + 1672.0 + ], + "score": 1.0, + "text": "" + } + ], + "page_info": { + "page_no": 9, + "width": 1700, + "height": 2200 + } + } +] \ No newline at end of file diff --git a/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_span.pdf b/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_span.pdf new file mode 100644 index 0000000000000000000000000000000000000000..79974cd77f63f0979d88d39e1c3794b442141322 --- /dev/null +++ b/parse/train/wK2fDDJ5VcF/wK2fDDJ5VcF_span.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2a3d2cfd2fbaa5fca4545ef742ff330bf3109be247b6cf612e8a03787aeff08 +size 38242885 diff --git a/vlm/dev/3RBY8fKjHeu/1.png b/vlm/dev/3RBY8fKjHeu/1.png new file mode 100644 index 0000000000000000000000000000000000000000..a08e0485676157423c7894ec56b1e04092c328a7 --- /dev/null +++ b/vlm/dev/3RBY8fKjHeu/1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77967c795f8ef7cf8b60b6d6220cad5e310366f3c0bbf550150d6670520b0ccd +size 602073 diff --git a/vlm/dev/3RBY8fKjHeu/13.png b/vlm/dev/3RBY8fKjHeu/13.png new file mode 100644 index 0000000000000000000000000000000000000000..f1c4cc638ce343db6f59c7ab9b96ec69ac0090bb --- /dev/null +++ b/vlm/dev/3RBY8fKjHeu/13.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cb8196e02c147421e01b23f7bdb448a80b1c05151861e4d585a3498ff7dada9 +size 612390 diff --git a/vlm/dev/3RBY8fKjHeu/3.png b/vlm/dev/3RBY8fKjHeu/3.png new file mode 100644 index 0000000000000000000000000000000000000000..095be9e7f0d44a6c17f57cf6534b1f4973eb526f --- /dev/null +++ b/vlm/dev/3RBY8fKjHeu/3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64d0303cd7d93598ddf6651bd2b009ab2095b78ae69db70313934e1202cd7c35 +size 530690 diff --git a/vlm/dev/a0SRWViFYW/10.png b/vlm/dev/a0SRWViFYW/10.png new file mode 100644 index 0000000000000000000000000000000000000000..c7b6d325f430640885ae1e8a7cc106d2377f6115 --- /dev/null +++ b/vlm/dev/a0SRWViFYW/10.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e078bcd3f0181fd258a4fbeec2b7380881d5a0c75d9d6fac6e87aa94527a57d9 +size 550176 diff --git a/vlm/dev/a0SRWViFYW/12.png b/vlm/dev/a0SRWViFYW/12.png new file mode 100644 index 0000000000000000000000000000000000000000..8535e46b09231d51dca901a5b9d3ac398f8f46f8 --- /dev/null +++ b/vlm/dev/a0SRWViFYW/12.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4e8f1a6004fd202725cd7b0da4ed195b31c31e4f42fb0c45136552af85cde23 +size 578914 diff --git a/vlm/dev/a0SRWViFYW/3.png b/vlm/dev/a0SRWViFYW/3.png new file mode 100644 index 0000000000000000000000000000000000000000..05942701a8ba8bd66a9b8193d0f4e603f755843f --- /dev/null +++ b/vlm/dev/a0SRWViFYW/3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c471a0fefd658a6720fa3e304ee23d3a936ad80a3c0e5640599e94e6e5565bf1 +size 614874 diff --git a/vlm/dev/a0SRWViFYW/33.png b/vlm/dev/a0SRWViFYW/33.png new file mode 100644 index 0000000000000000000000000000000000000000..4ca3c00db70cf2707c77695f8a1cc11385bee9bf --- /dev/null +++ b/vlm/dev/a0SRWViFYW/33.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b378a4a21c62e864845277d2e97d4dac2f1627add3391ddf9632524f882ef1eb +size 443053 diff --git a/vlm/dev/a0SRWViFYW/5.png b/vlm/dev/a0SRWViFYW/5.png new file mode 100644 index 0000000000000000000000000000000000000000..1e79aadc2989fbacb568a9288c635efe86cc176c --- /dev/null +++ b/vlm/dev/a0SRWViFYW/5.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:372fbad4a054ca37b21ca32d5ff5f159312221c9fb977bc440fde2b4ba6be336 +size 452567 diff --git a/vlm/dev/a0SRWViFYW/6.png b/vlm/dev/a0SRWViFYW/6.png new file mode 100644 index 0000000000000000000000000000000000000000..c9abe555e3e2794cadb5b964bd117503235c3ab6 --- /dev/null +++ b/vlm/dev/a0SRWViFYW/6.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:271b72cd588917acefad4c3d9e49847228879e831fcfc5b6471b7e394236477b +size 558508 diff --git a/vlm/test/TrloAXEJ2B/15.png b/vlm/test/TrloAXEJ2B/15.png new file mode 100644 index 0000000000000000000000000000000000000000..d40776ed554cd5248e2482cd998e09e78fcdd191 --- /dev/null +++ b/vlm/test/TrloAXEJ2B/15.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b88c1af47cfd752bb234d82a7dc8f60cf8c273ec472284fec45ae893ed09b318 +size 355969 diff --git a/vlm/test/TrloAXEJ2B/5.png b/vlm/test/TrloAXEJ2B/5.png new file mode 100644 index 0000000000000000000000000000000000000000..286ac93fa877905aa8ba717f8e8b442eaeac8cac --- /dev/null +++ b/vlm/test/TrloAXEJ2B/5.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85c7117e2cb3d72578e59f05d109c9b0d927baf0cb4030d90e69a51a1215591d +size 520061 diff --git a/vlm/test/TrloAXEJ2B/7.png b/vlm/test/TrloAXEJ2B/7.png new file mode 100644 index 0000000000000000000000000000000000000000..cfba0d27576f1ad3cafab1f553bc7d97a5e13ea6 --- /dev/null +++ b/vlm/test/TrloAXEJ2B/7.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ee062f3486a0bbd4c475704d4106321b5b02e8bcd5121936c0f9fdd57f3750d +size 541191 diff --git a/vlm/test/rzQGHXNReU/3.png b/vlm/test/rzQGHXNReU/3.png new file mode 100644 index 0000000000000000000000000000000000000000..a2db9e99952804b770c2135d3fb578113335ac61 --- /dev/null +++ b/vlm/test/rzQGHXNReU/3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f12e92a4f93895c3e44e49e6c5ab344ee9e0ecfae8dd6adf73b1bc8f4de1b1a +size 531030 diff --git a/vlm/train/fy4ZBWxYbIo/2.png b/vlm/train/fy4ZBWxYbIo/2.png new file mode 100644 index 0000000000000000000000000000000000000000..8a9fa63cce51c3d325c7752ecabe5538fe124b90 --- /dev/null +++ b/vlm/train/fy4ZBWxYbIo/2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e71801a877e5822a686f207d177db030f8ce0d176e8ee10c281754215636ed2e +size 667801 diff --git a/vlm/train/fy4ZBWxYbIo/9.png b/vlm/train/fy4ZBWxYbIo/9.png new file mode 100644 index 0000000000000000000000000000000000000000..65fa25ce1e855f9c90c9861e3b527f7a30715f52 --- /dev/null +++ b/vlm/train/fy4ZBWxYbIo/9.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a01de9c046dafa6ebc68e49bf59b47927e87b36384b8ea9ef5548cacb50d6f14 +size 542682 diff --git a/vlm/train/wK2fDDJ5VcF/3.png b/vlm/train/wK2fDDJ5VcF/3.png new file mode 100644 index 0000000000000000000000000000000000000000..ec43471b1b7b9bd73be8923f10628ed8716e3761 --- /dev/null +++ b/vlm/train/wK2fDDJ5VcF/3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b01aa2109e19d29ab5673dea6f988093ae891a7c2d5ede60df62e13a6ff3a03 +size 641590