record_id stringlengths 12 33 | answers listlengths 2 55 | average_length float64 780 1.96M | average_length_without_law float64 0 1.86k | benchmark_name stringclasses 31
values | doc_length dict | doc_num int64 2 10 | input_doc listlengths 2 10 | language stringclasses 2
values | law sequencelengths 0 9 | law_list sequencelengths 0 7 | query stringclasses 56
values | ref_doc sequencelengths 0 5 | table_schema dict |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
academic_WikiText-103_1_en | [
{
"案件名": null,
"关键词": null,
"基本案情": null,
"关联索引": null,
"被告": null,
"罪名": null,
"刑期": null,
"缓刑": null,
"处罚金": null,
"剥夺政治权利": null,
"其他判决": null,
"入库编号": null,
"没收个人财产": null,
"文档名称": null,
"报表所属期": null,
"营业收入(元)": null,
"净利润(元)": null,
"净利润率... | 57,358 | 0 | WikiText-103 | {
"吴某强奸、故意伤害案": null,
"汪某故意杀人、敲诈勒索案": null,
"温某平故意伤害案": null,
"许某龙掩饰、隐瞒犯罪所得案": null,
"叶某林猥亵儿童、贩卖、传播淫秽物品牟利案": null,
"闫某华故意杀人、盗窃案": null,
"周某军故意杀人案": null,
"杨某凤、赵某等诈骗案": null,
"张某生故意杀人案": null,
"李某龙抢劫、盗窃案": null,
"中华人民共和国刑法": null,
"黄某树交通肇事案": null,
"马某危险驾驶案": null,
"汪某秋危险驾驶案": null,
"李某俊等重大... | 3 | [
{
"案件名": null,
"入库编号": null,
"基本案情": null,
"关联索引": null,
"关键词": null,
"判决结果": null,
"裁判要旨": null,
"裁判理由": null,
"filename": "Primal-Attention Self-attention through Asymmetric Kernel SVD in Primal Representation.md"
},
{
"案件名": null,
"入库编号": null,
"基本案情": null,
... | en | [] | [] | List the Test perplexity performance of the proposed methods in the paper on the WikiText-103 dataset. | [] | {
"columns": [
{
"name": "paper_name",
"about": "Paper Title",
"valid_type": null
},
{
"name": "method",
"about": "Proposed Method",
"valid_type": null
},
{
"name": "result",
"about": "Performance on Specific Metrics",
"valid_type": null
},... |
academic_WikiText-2_1_en | [
{
"案件名": null,
"关键词": null,
"基本案情": null,
"关联索引": null,
"被告": null,
"罪名": null,
"刑期": null,
"缓刑": null,
"处罚金": null,
"剥夺政治权利": null,
"其他判决": null,
"入库编号": null,
"没收个人财产": null,
"文档名称": null,
"报表所属期": null,
"营业收入(元)": null,
"净利润(元)": null,
"净利润率... | 53,973 | 0 | WikiText-2 | {
"吴某强奸、故意伤害案": null,
"汪某故意杀人、敲诈勒索案": null,
"温某平故意伤害案": null,
"许某龙掩饰、隐瞒犯罪所得案": null,
"叶某林猥亵儿童、贩卖、传播淫秽物品牟利案": null,
"闫某华故意杀人、盗窃案": null,
"周某军故意杀人案": null,
"杨某凤、赵某等诈骗案": null,
"张某生故意杀人案": null,
"李某龙抢劫、盗窃案": null,
"中华人民共和国刑法": null,
"黄某树交通肇事案": null,
"马某危险驾驶案": null,
"汪某秋危险驾驶案": null,
"李某俊等重大... | 2 | [
{
"案件名": null,
"入库编号": null,
"基本案情": null,
"关联索引": null,
"关键词": null,
"判决结果": null,
"裁判要旨": null,
"裁判理由": null,
"filename": "Language Models with Transformers.md"
},
{
"案件名": null,
"入库编号": null,
"基本案情": null,
"关联索引": null,
"关键词": null,
"判决结果": null,
... | en | [] | [] | List the Test perplexity performance of the proposed methods in the paper on the WikiText-2 dataset. | [] | {
"columns": [
{
"name": "paper_name",
"about": "Paper Title",
"valid_type": null
},
{
"name": "method",
"about": "Proposed Method",
"valid_type": null
},
{
"name": "result",
"about": "Performance on Specific Metrics",
"valid_type": null
},... |
academic_PIQA_1_en | [{"案件名":null,"关键词":null,"基本案情":null,"关联索引":null,"被告":null,"罪名":n(...TRUNCATED) | 68,163.333333 | 0 | PIQA | {"吴某强奸、故意伤害案":null,"汪某故意杀人、敲诈勒索案":null,"温某平故意(...TRUNCATED) | 3 | [{"案件名":null,"入库编号":null,"基本案情":null,"关联索引":null,"关键词":null,"判(...TRUNCATED) | en | [] | [] | List the accuracy performance of the proposed methods in the paper on the PIQA dataset. | [] | {"columns":[{"name":"paper_name","about":"Paper Title","valid_type":null},{"name":"method","about":"(...TRUNCATED) |
academic_TriviaQA_1_en | [{"案件名":null,"关键词":null,"基本案情":null,"关联索引":null,"被告":null,"罪名":n(...TRUNCATED) | 91,734 | 0 | TriviaQA | {"吴某强奸、故意伤害案":null,"汪某故意杀人、敲诈勒索案":null,"温某平故意(...TRUNCATED) | 3 | [{"案件名":null,"入库编号":null,"基本案情":null,"关联索引":null,"关键词":null,"判(...TRUNCATED) | en | [] | [] | List the EM performance of the proposed methods in the paper on the TriviaQA dataset. | [] | {"columns":[{"name":"paper_name","about":"Paper Title","valid_type":null},{"name":"method","about":"(...TRUNCATED) |
academic_PubMedQA_1_en | [{"案件名":null,"关键词":null,"基本案情":null,"关联索引":null,"被告":null,"罪名":n(...TRUNCATED) | 78,533.5 | 0 | PubMedQA | {"吴某强奸、故意伤害案":null,"汪某故意杀人、敲诈勒索案":null,"温某平故意(...TRUNCATED) | 2 | [{"案件名":null,"入库编号":null,"基本案情":null,"关联索引":null,"关键词":null,"判(...TRUNCATED) | en | [] | [] | List the accuracy performance of the proposed methods in the paper on the PubMedQA dataset. | [] | {"columns":[{"name":"paper_name","about":"Paper Title","valid_type":null},{"name":"method","about":"(...TRUNCATED) |
academic_BoolQ_1_en | [{"案件名":null,"关键词":null,"基本案情":null,"关联索引":null,"被告":null,"罪名":n(...TRUNCATED) | 70,868.333333 | 0 | BoolQ | {"吴某强奸、故意伤害案":null,"汪某故意杀人、敲诈勒索案":null,"温某平故意(...TRUNCATED) | 3 | [{"案件名":null,"入库编号":null,"基本案情":null,"关联索引":null,"关键词":null,"判(...TRUNCATED) | en | [] | [] | List the accuracy performance of the proposed methods in the paper on the BoolQ dataset. | [] | {"columns":[{"name":"paper_name","about":"Paper Title","valid_type":null},{"name":"method","about":"(...TRUNCATED) |
academic_OpenBookQA_1_en | [{"案件名":null,"关键词":null,"基本案情":null,"关联索引":null,"被告":null,"罪名":n(...TRUNCATED) | 62,004.666667 | 0 | OpenBookQA | {"吴某强奸、故意伤害案":null,"汪某故意杀人、敲诈勒索案":null,"温某平故意(...TRUNCATED) | 3 | [{"案件名":null,"入库编号":null,"基本案情":null,"关联索引":null,"关键词":null,"判(...TRUNCATED) | en | [] | [] | List the accuracy performance of the proposed methods in the paper on the OpenBookQA dataset. | [] | {"columns":[{"name":"paper_name","about":"Paper Title","valid_type":null},{"name":"method","about":"(...TRUNCATED) |
academic_MultiRC_1_en | [{"案件名":null,"关键词":null,"基本案情":null,"关联索引":null,"被告":null,"罪名":n(...TRUNCATED) | 64,903 | 0 | MultiRC | {"吴某强奸、故意伤害案":null,"汪某故意杀人、敲诈勒索案":null,"温某平故意(...TRUNCATED) | 2 | [{"案件名":null,"入库编号":null,"基本案情":null,"关联索引":null,"关键词":null,"判(...TRUNCATED) | en | [] | [] | List the F1 performance of the proposed methods in the paper on the MultiRC dataset. | [] | {"columns":[{"name":"paper_name","about":"Paper Title","valid_type":null},{"name":"method","about":"(...TRUNCATED) |
academic_WikiQA_1_en | [{"案件名":null,"关键词":null,"基本案情":null,"关联索引":null,"被告":null,"罪名":n(...TRUNCATED) | 47,249.333333 | 0 | WikiQA | {"吴某强奸、故意伤害案":null,"汪某故意杀人、敲诈勒索案":null,"温某平故意(...TRUNCATED) | 3 | [{"案件名":null,"入库编号":null,"基本案情":null,"关联索引":null,"关键词":null,"判(...TRUNCATED) | en | [] | [] | List the map performance of the proposed methods in the paper on the WikiQA dataset. | [] | {"columns":[{"name":"paper_name","about":"Paper Title","valid_type":null},{"name":"method","about":"(...TRUNCATED) |
academic_TimeQuestions_1_en | [{"案件名":null,"关键词":null,"基本案情":null,"关联索引":null,"被告":null,"罪名":n(...TRUNCATED) | 66,359 | 0 | TimeQuestions | {"吴某强奸、故意伤害案":null,"汪某故意杀人、敲诈勒索案":null,"温某平故意(...TRUNCATED) | 2 | [{"案件名":null,"入库编号":null,"基本案情":null,"关联索引":null,"关键词":null,"判(...TRUNCATED) | en | [] | [] | List the P@1 performance of the proposed methods in the paper on the TimeQuestions dataset. | [] | {"columns":[{"name":"paper_name","about":"Paper Title","valid_type":null},{"name":"method","about":"(...TRUNCATED) |
🏆 AOE: Arranged and Organized Extraction Benchmark
📚 For full reproducibility, all source code is available in our GitHub repository.
🎯 Challenge: Can AI models construct structured tables from complex, real-world documents? AOE tests this critical capability across legal, financial, and academic domains.
🚀 What is AOE?
The AOE (Arranged and Organized Extraction) Benchmark addresses a critical gap in existing text-to-table evaluation frameworks. Unlike synthetic benchmarks, AOE challenges modern LLMs with authentic, complex, and practically relevant data extraction tasks.
💥 Why "AOE"? Like Area of Effect damage in gaming that impacts everything within range, our benchmark reveals that current AI models struggle across all aspects of structured extraction - from basic parsing to complex reasoning. No model escapes unscathed!
🎯 Core Innovation
Beyond Isolated Information: AOE doesn't just test information retrieval—it evaluates models' ability to:
- 🧠 Understand complex task requirements and construct appropriate schemas
- 🔍 Locate scattered information across multiple lengthy documents
- 🏗️ Integrate diverse data points into coherent, structured tables
- 🧮 Perform numerical reasoning and cross-document analysis
📊 Key Statistics
| Metric | Value |
|---|---|
| Total Tasks | 373 benchmark instances |
| Domains | 3 (Legal, Financial, Academic) |
| Document Sources | 100% real-world, authentic content |
| Total Documents | 1,914 source documents |
| Languages | English & Chinese |
📈 Detailed Domain Statistics
| Domain | Language | Tables | Documents | Avg Tokens | Docs/Table |
|---|---|---|---|---|---|
| Academic | EN | 74 | 257 | 69k | 3.5/5 |
| Financial | ZH,EN | 224 | 944 | 437k | 4.2/5 |
| Legal | ZH | 75 | 713 | 7k | 9.6/13 |
📁 Dataset Structure
{
"record_id": "academic_10_0_en",
"query": "Identify possible citation relationships among the following articles...",
"doc_length": {
"paper_1.md": 141566, # Character count per document
"paper_2.md": 885505,
"paper_3.md": 48869,
"paper_4.md": 65430,
"paper_5.md": 53987
},
"table_schema": { # Dynamic schema definition
"columns": [
{"name": "Cited paper title", "about": "the name of the paper"},
{"name": "Referencing paper title", "about": "Referencing paper title"},
{"name": "Referenced content", "about": "the context of the cited paper"},
{"name": "Label", "about": "reference type: background/methodology/additional"}
]
},
"answers": [ # Ground truth structured output
{
"Cited paper title": "Large Language Model Is Not a Good Few-shot Information Extractor...",
"Referencing paper title": "What Makes Good In-Context Examples for GPT-3?",
"Referenced content": "(2) Sentence-embedding (Liu et al., 2022; Su et al., 2022): retrieving...",
"Label": "background"
}
]
}
🏭 Data Sources & Domains
Figure: AOE benchmark construction pipeline from raw documents to structured evaluation tasks
📚 Academic Domain
- Sources: Semantic Scholar, Papers With Code
- Content: Research papers, citation networks, performance leaderboards
- Tasks: Citation relationship extraction, methodology performance analysis
💰 Financial Domain
- Source: CNINFO (China's official financial disclosure platform)
- Content: Annual reports (2020-2023) from A-share listed companies
- Tasks: Longitudinal financial analysis, cross-company comparisons
⚖️ Legal Domain
- Sources: People's Court Case Library, National Legal Database
- Content: Chinese civil law judgments, official statutes
- Tasks: Legal provision retrieval, defendant verdict extraction
🎯 Benchmark Tasks Overview
📊 Task Categories
| Domain | Task ID | Description | Challenge Level |
|---|---|---|---|
| Academic | $Aca_0$ | Citation Context Extraction | 🔥🔥🔥 |
| $Aca_1$ | Methodology Performance Extraction | 🔥🔥 | |
| Legal | $Legal_0$ | Legal Provision Retrieval | 🔥🔥🔥🔥 |
| $Legal_1$ | Defendant Verdict Extraction | 🔥🔥🔥 | |
| Financial | $Fin_{0-3}$ | Single Company Longitudinal Analysis | 🔥🔥 |
| $Fin_{4-6}$ | Multi-Company Comparative Analysis | 🔥🔥🔥 |
🏗️ Data Processing Pipeline
- 📄 Document Preservation: Advanced parsing with
markitdown,Marker, and OCR - 🏷️ Human-in-the-Loop: Expert annotation for legal document processing
- ✅ Quality Assurance: Multi-stage validation ensuring accuracy and completeness
💡 Example Tasks
⚖️ Legal Analysis Example
Task: Extract structured verdict information from complex trademark infringement cases
📋 View Ground Truth Table
Input Query: "作为法律文本分析专家,请按照指定格式从判决信息中准确提取每位被告的最终判决结果"
Source Documents:complex legal cases (678-2391 tokens each)
案件名,被告,罪名,刑期,缓刑,处罚金,其他判决
刘某假冒注册商标案,刘某,假冒注册商标罪,有期徒刑四年,,处罚金人民币十五万元,扣押车辆、手机等变价抵作罚金
欧某辉、张某妹假冒注册商标案,欧某辉,假冒注册商标罪,有期徒刑五年六个月,,处罚金人民币六十五万元,追缴违法所得100.6583万元
谢某某甲等假冒注册商标案,谢某某甲,无罪,,,,
马某华等假冒注册商标案,马某华,假冒注册商标罪,有期徒刑六年,,处罚金人民币六百八十万元,
……
Challenge: Models must parse complex legal language from multiple case documents (avg 9.6 docs per table), handle joint defendant cases with up to 16 defendants per case, distinguish between different verdict outcomes (guilty vs. acquitted), and extract structured information from unstructured legal narratives involving trademark infringement worth millions.
📚 Academic Analysis Example
Task: Extract methodology performance from research papers on WikiText-103 dataset
📊 View Ground Truth Table
Input Query: "List the Test perplexity performance of the proposed methods in the paper on the WikiText-103 dataset."
Source Documents: research papers (36k-96k tokens each)
paper_name,method,result,models_and_settings
Primal-Attention: Self-attention through Asymmetric Kernel SVD,Primal.+Trans.,31,
Language Modeling with Gated Convolutional Networks,GCNN-8,44.9,
GATELOOP: FULLY DATA-CONTROLLED LINEAR RECURRENCE,GateLoop,13.4,
Challenge: Models must parse complex academic papers, identify specific methodologies, locate performance tables, and extract numerical results while handling various formatting styles.
🏦 Financial Analysis Example
Task: Extract and compare financial metrics across multiple company annual reports
📊 View Ground Truth Table
Company,Revenue (CNY),Net Profit (CNY),Operating Cash Flow (CNY)
Gree Electric,203979266387,29017387604,56398426354
Midea Group,372037280000,33719935000,57902611000
Haier Smart Home,261427783050,16596615046,25262376228
TCL Technology,174366657015,4781000000,25314756105
GONGNIU GROUP,15694755600,3870135376,4827282090
Challenge: Models must locate financial data scattered across lengthy annual reports (avg 437k tokens), handle different formatting conventions, and ensure numerical accuracy across multiple documents.
🔬 Research Applications
🎯 Ideal for Evaluating:
- Multi-document Understanding: Information synthesis across long-form texts
- Schema Construction: Dynamic table structure generation
- Domain Adaptation: Performance across specialized fields
- Numerical Reasoning: Financial calculations and quantitative analysis
- Cross-lingual Capabilities: English and Chinese document processing
📈 Benchmark Insights:
- Even SOTA models struggle: Best performers achieve only ~68% accuracy
- Domain specificity matters: Performance varies significantly across fields
- Length matters: Document complexity correlates with task difficulty
- RAG limitations revealed: Standard retrieval often fails for structured tasks
🚀 Getting Started
Quick Usage
from datasets import load_dataset
# Load the complete benchmark
dataset = load_dataset("tianyumyum/AOE")
# Access specific splits
all_tasks = dataset["all"]
# Example task
task = all_tasks[0]
print(f"Documents: {len(task['doc_length'])}")
print(f"Expected output: {task['answers']}")
📊 Evaluation Framework
AOE provides a comprehensive 3-tier evaluation system:
- 🎯 CSV Parsability: Basic structure compliance (Pass Rate)
- 🏆 Overall Quality: LLM-assessed holistic evaluation (0-100%)
- 🔬 Cell-Level Accuracy: Granular content precision (F1-Score)
🤝 Contributing & Support
- 🐛 Issues: GitHub Issues
- 💬 Discussions: GitHub Discussions
⭐ Star our GitHub repo if you find AOE useful! ⭐
Pushing the boundaries of structured knowledge extraction 🚀
- Downloads last month
- 35