Huayang commited on
Commit
91a8351
·
verified ·
1 Parent(s): ae3272d

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. data.zip +3 -0
  2. long_bench.py +93 -0
data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3f7659fd4834d9ceb5d72e2509ddbff9e9c75ca4aaffd02cc577957a3adaf59
3
+ size 26195705
long_bench.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+
16
+ import datasets
17
+ import json
18
+
19
+
20
+ _DESCRIPTION = """\
21
+ LongBench is a comprehensive benchmark for multilingual and multi-task purposes, with the goal to fully measure and evaluate the ability of pre-trained language models to understand long text. This dataset consists of twenty different tasks, covering key long-text application scenarios such as multi-document QA, single-document QA, summarization, few-shot learning, synthetic tasks, and code completion.
22
+ """
23
+
24
+ _HOMEPAGE = "https://github.com/THUDM/LongBench"
25
+
26
+
27
+ _URL = r"https://huggingface.co/datasets/huayangli/long_bench/resolve/main/data.zip"
28
+
29
+ task_list = ["narrativeqa", "qasper", "multifieldqa_en", "2wikimqa", "musique", \
30
+ "dureader", "gov_report", "qmsum", "multi_news", "vcsum", "trec", "triviaqa", "samsum", "lsht"]
31
+
32
+
33
+ class LongBenchConfig(datasets.BuilderConfig):
34
+ def __init__(self, **kwargs):
35
+ super().__init__(version=datasets.Version("1.0.0"), **kwargs)
36
+
37
+
38
+ class LongBench(datasets.GeneratorBasedBuilder):
39
+ BUILDER_CONFIGS = [
40
+ LongBenchConfig(
41
+ name=task_name,
42
+ )
43
+ for task_name in task_list
44
+ ]
45
+
46
+ def _info(self):
47
+ features = datasets.Features(
48
+ {
49
+ "input": datasets.Value("string"),
50
+ "context": datasets.Value("string"),
51
+ "answers": [datasets.Value("string")],
52
+ "length": datasets.Value("int32"),
53
+ "dataset": datasets.Value("string"),
54
+ "language": datasets.Value("string"),
55
+ "all_classes": [datasets.Value("string")],
56
+ "_id": datasets.Value("string"),
57
+ }
58
+ )
59
+ return datasets.DatasetInfo(
60
+ description=_DESCRIPTION,
61
+ features=features,
62
+ homepage=_HOMEPAGE,
63
+ )
64
+
65
+ def _split_generators(self, dl_manager):
66
+ data_dir = dl_manager.download_and_extract(_URL)
67
+ task_name = self.config.name
68
+ return [
69
+ datasets.SplitGenerator(
70
+ name=datasets.Split.TEST,
71
+ gen_kwargs={
72
+ "filepath": os.path.join(
73
+ data_dir, "data", f"{task_name}.jsonl"
74
+ ),
75
+ },
76
+ )
77
+ ]
78
+
79
+ def _generate_examples(self, filepath):
80
+ with open(filepath, encoding="utf-8") as f:
81
+ for idx, line in enumerate(f):
82
+ key = f"{self.config.name}-{idx}"
83
+ item = json.loads(line)
84
+ yield key, {
85
+ "input": item["input"],
86
+ "context": item["context"],
87
+ "answers": item["answers"],
88
+ "length": item["length"],
89
+ "dataset": item["dataset"],
90
+ "language": item["language"],
91
+ "_id": item["_id"],
92
+ "all_classes": item["all_classes"],
93
+ }