| import json |
| import os |
| import datasets |
| from tqdm import tqdm |
|
|
|
|
| _ARTICLE_ID = "article_id" |
| _ARTICLE_WORDS = "article_words" |
| _ARTICLE_BBOXES = "article_bboxes" |
| _ARTICLE_NORM_BBOXES = "article_norm_bboxes" |
| _ABSTRACT = "abstract" |
| _ARTICLE_PDF_URL = "article_pdf_url" |
|
|
| def normalize_bbox(bbox, size): |
| return [ |
| int(1000 * bbox[0] / size[0]), |
| int(1000 * bbox[1] / size[1]), |
| int(1000 * bbox[2] / size[0]), |
| int(1000 * bbox[3] / size[1]), |
| ] |
|
|
|
|
| class SciELOSummarizationConfig(datasets.BuilderConfig): |
| """BuilderConfig for SciELOSummarization.""" |
| def __init__(self, lang, **kwargs): |
| """BuilderConfig for ArxivSummarization. |
| Args: |
| lang: language (`es` for Spanish, `pt` for Portuguese) |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(SciELOSummarizationConfig, self).__init__(**kwargs) |
| self.lang = lang |
| |
|
|
| class SciELOSummarizationDataset(datasets.GeneratorBasedBuilder): |
| """SciELOSummarization Dataset.""" |
| |
| BUILDER_CONFIGS = [ |
| SciELOSummarizationConfig( |
| name="scielo_es", |
| version=datasets.Version("1.0.0"), |
| description="SciELO dataset for summarization (Spanish)", |
| lang="es", |
| ), |
| SciELOSummarizationConfig( |
| name="scielo_pt", |
| version=datasets.Version("1.0.0"), |
| description="SciELO dataset for summarization (Portuguese)", |
| lang="pt", |
| ), |
| ] |
|
|
| |
| def _info(self): |
| |
| return datasets.DatasetInfo( |
| features=datasets.Features( |
| { |
| _ARTICLE_ID: datasets.Value("string"), |
| _ARTICLE_WORDS: datasets.Sequence(datasets.Value("string")), |
| _ARTICLE_BBOXES: datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), |
| _ARTICLE_NORM_BBOXES: datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), |
| _ABSTRACT: datasets.Value("string"), |
| _ARTICLE_PDF_URL: datasets.Value("string"), |
| } |
| ), |
| supervised_keys=None, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
|
|
| train_archive = self.config.lang + "_train.zip" |
| val_archive = self.config.lang + "_val.zip" |
| test_archive = self.config.lang + "_test.zip" |
| train_abstracts = self.config.lang + "_train.txt" |
| val_abstracts = self.config.lang + "_validation.txt" |
| test_abstracts = self.config.lang + "_test.txt" |
|
|
| train_dir = os.path.join(dl_manager.download_and_extract(train_archive), self.config.lang + "_train") |
| val_dir = os.path.join(dl_manager.download_and_extract(val_archive), self.config.lang + "_val") |
| test_dir = os.path.join(dl_manager.download_and_extract(test_archive), self.config.lang + "_test") |
|
|
| train_abstracts = dl_manager.download_and_extract(train_abstracts) |
| val_abstracts = dl_manager.download_and_extract(val_abstracts) |
| test_abstracts = dl_manager.download_and_extract(test_abstracts) |
| |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={"data_path": train_dir, "abstract_path": train_abstracts} |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={"data_path": val_dir, "abstract_path": val_abstracts} |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={"data_path": test_dir, "abstract_path": test_abstracts} |
| ), |
| ] |
|
|
| def _generate_examples(self, data_path, abstract_path): |
| """Generate SciELOSummarization examples.""" |
| filenames = sorted(os.listdir(data_path)) |
|
|
| guid = 0 |
| with open(abstract_path, 'r') as abstract_file: |
| for line in tqdm(abstract_file, total=len(filenames), desc=f"Reading files in {data_path}"): |
| guid += 1 |
| item = json.loads(line) |
| fname = item["id"] + ".txt" |
| filepath = os.path.join(data_path, fname) |
| |
| words = [] |
| bboxes = [] |
| norm_bboxes = [] |
|
|
| with open(filepath, encoding="utf-8") as f: |
| for line in f: |
| splits = line.split("\t") |
| word = splits[0] |
| bbox = splits[1:5] |
| bbox = [int(b) for b in bbox] |
| page_width, page_height = int(splits[5]), int(splits[6]) |
| norm_bbox = normalize_bbox(bbox, (page_width, page_height)) |
|
|
| words.append(word) |
| bboxes.append(bbox) |
| norm_bboxes.append(norm_bbox) |
|
|
| assert len(words) == len(bboxes) |
| assert len(bboxes) == len(norm_bboxes) |
|
|
| yield guid, { |
| _ARTICLE_ID: item["id"], |
| _ARTICLE_WORDS: words, |
| _ARTICLE_BBOXES: bboxes, |
| _ARTICLE_NORM_BBOXES: norm_bboxes, |
| _ABSTRACT: item["abstract"], |
| _ARTICLE_PDF_URL: item["pdf_url"], |
| } |
|
|
| |