| { |
| "absolute_id": 124, |
| "persona": "Researcher", |
| "task": "The files `ScreenShot_2026-02-07_132218_499.png` and `ScreenShot_2026-02-07_132225_683.png` on my Desktop are screenshots I took while studying a paper. Please find that paper in the paper folder, extract its abstract, and output it as `output.txt`.", |
| "task_diff": "hard", |
| "output_files": [ |
| "output.txt" |
| ], |
| "rubrics": [ |
| "Is the paper title \"CL-BENCH: A benchmark for context learning\" included in the output.txt file?", |
| "Does the output.txt file contain the full abstract section title?", |
| "Does the summary extracted in output.txt start with \"Current language models (LMs) excel at reasoning over prompts using pre-trained knowledge\"?", |
| "Does the summary mention that CL-bench contains 500 complex contexts, 1,899 tasks, and 31,607 validation criteria?", |
| "Does the summary mention the evaluation result that ten frontier language models solved only 17.2% of tasks on average?", |
| "Does the abstract mention that the best-performing model, GPT-5.1, solved only 23.7%?", |
| "Does the end of the summary mention CL-bench as a step to build context-learning-competent language models that advance their deployment in real-world scenarios?", |
| "Is the paper entitled CL-Bench: A Benchmark for Context Learning correctly identified from the two screenshots?", |
| "Was the correct paper file `CL-Bench.pdf` matched instead of another paper file?", |
| "Does the extracted abstract completely cover all paragraphs of the original abstract without missing key information?", |
| "Does the extracted abstract accurately state that context learning is a key capability naturally possessed by humans but largely overlooked by current research?", |
| "Abstract Is it accurate to say that solving tasks in CL-bench requires the model to learn from the context of new knowledge, rule systems, and complex procedures not found in the training data?", |
| "Did you successfully read two png screenshot files to identify the thesis information?", |
| "Was the `CL-Bench.pdf` paper file successfully read and was the abstract location correctly found?", |
| "Was the extracted summary successfully output in txt format?", |
| "Does the abstract make a clear distinction between CL-bench far beyond long contextual tasks and traditional contextual learning tasks?", |
| "Does the summary mention that all tasks and assessment criteria are designed by experienced domain experts?" |
| ], |
| "rubric_types": [ |
| "Basic Evaluation", |
| "Basic Evaluation", |
| "Basic Evaluation", |
| "Outcome Evaluation", |
| "Outcome Evaluation", |
| "Outcome Evaluation", |
| "Outcome Evaluation", |
| "Process Evaluation", |
| "Process Evaluation", |
| "Outcome Evaluation", |
| "Outcome Evaluation", |
| "Outcome Evaluation", |
| "Process Evaluation", |
| "Process Evaluation", |
| "Basic Evaluation", |
| "Outcome Evaluation", |
| "Outcome Evaluation" |
| ], |
| "file_dep_graph": [ |
| { |
| "from": "ScreenShot_2026-02-07_132218_499.png", |
| "to": "CL-Bench.pdf" |
| }, |
| { |
| "from": "ScreenShot_2026-02-07_132225_683.png", |
| "to": "CL-Bench.pdf" |
| } |
| ], |
| "data_manifest": [ |
| { |
| "filename": "CL-Bench.pdf", |
| "stored_relpath": "data/bc80a4360a7d761b_CL-Bench.pdf" |
| }, |
| { |
| "filename": "ScreenShot_2026-02-07_132218_499.png", |
| "stored_relpath": "data/748bf0f4e01ae135_ScreenShot_2026-02-07_132218_499.png" |
| }, |
| { |
| "filename": "ScreenShot_2026-02-07_132225_683.png", |
| "stored_relpath": "data/8aa7f1d7b4d208b7_ScreenShot_2026-02-07_132225_683.png" |
| } |
| ], |
| "tested_capabilities": [ |
| "Workspace Exploration", |
| "Lineage Tracing", |
| "Semantic Content Relations Understanding", |
| "Heterogeneous File Understanding" |
| ] |
| } |
|
|