| import json |
| import os |
|
|
| import datasets |
|
|
| _CITATION = """\ |
| @article{scialom2020mlsum, |
| title={MLSUM: The Multilingual Summarization Corpus}, |
| author={Scialom, Thomas and Dray, Paul-Alexis and Lamprier, Sylvain and Piwowarski, Benjamin and Staiano, Jacopo}, |
| journal={arXiv preprint arXiv:2004.14900}, |
| year={2020} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| This is the MLSUM subset of the GEM benchmark. MLSUM is the first large-scale MultiLingual SUMmarization dataset. |
| Obtained from online newspapers, it contains 1.5M+ article/summary pairs in five different languages -- namely, French, German, Spanish, Russian, Turkish. |
| Together with English newspapers from the popular CNN/Daily mail dataset, the collected data form a large scale multilingual dataset which can enable new research directions for the text summarization community. |
| We report cross-lingual comparative analyses based on state-of-the-art systems. |
| These highlight existing biases which motivate the use of a multi-lingual dataset. |
| """ |
| _URL = "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/" |
| _LANG = ["de", "es"] |
| _URLs = { |
| "de": { |
| "train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_train.zip", |
| "validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_val.zip", |
| "test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_test.zip", |
| "bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids_fixed.json", |
| "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/mlsum_de.zip", |
| }, |
| "es": { |
| "train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_train.zip", |
| "validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_val.zip", |
| "test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_test.zip", |
| "bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids_fixed.json", |
| "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/mlsum_es.zip", |
| }, |
| } |
|
|
|
|
| class Mlsum(datasets.GeneratorBasedBuilder): |
|
|
| BUILDER_CONFIGS = [ |
| datasets.BuilderConfig( |
| name=lang, |
| version=datasets.Version("1.0.0"), |
| description="", |
| ) |
| for lang in _LANG |
| ] |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "gem_id": datasets.Value("string"), |
| "gem_parent_id": datasets.Value("string"), |
| "text": datasets.Value("string"), |
| "topic": datasets.Value("string"), |
| "url": datasets.Value("string"), |
| "title": datasets.Value("string"), |
| "date": datasets.Value("string"), |
| "target": datasets.Value("string"), |
| "references": [datasets.Value("string")], |
| } |
| ), |
| supervised_keys=None, |
| homepage="", |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Returns SplitGenerators.""" |
| dl_dir = dl_manager.download_and_extract(_URLs[self.config.name]) |
| lang = str(self.config.name) |
| challenge_sets = [ |
| ("challenge_train_sample", f"train_mlsum_{lang}_RandomSample500.json"), |
| ("challenge_validation_sample", f"validation_mlsum_{lang}_RandomSample500.json"), |
| ("challenge_test_covid", f"{lang}_test_covid19_cleaned.jsonl"), |
| ] |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "filepath": os.path.join(dl_dir["train"], lang + "_train.jsonl"), |
| "split": "train", |
| "lang": lang, |
| "filepaths": dl_dir["bad_ids"], |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "filepath": os.path.join(dl_dir["validation"], lang + "_val.jsonl"), |
| "split": "validation", |
| "lang": lang, |
| "filepaths": dl_dir["bad_ids"], |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "filepath": os.path.join(dl_dir["test"], lang + "_test.jsonl"), |
| "split": "test", |
| "lang": lang, |
| "filepaths": dl_dir["bad_ids"], |
| }, |
| ), |
| ] + [ |
| datasets.SplitGenerator( |
| name=challenge_split, |
| gen_kwargs={ |
| "filepath": os.path.join(dl_dir["challenge_set"], f"mlsum_{self.config.name}", filename), |
| "split": challenge_split, |
| }, |
| ) |
| for challenge_split, filename in challenge_sets |
| ] |
|
|
| def _generate_examples(self, filepath, split, filepaths=None, lang=None): |
| """Yields examples.""" |
| if split in ["train", "validation", "test", "challenge_test_covid"]: |
| if split == "challenge_test_covid": |
| bad_ids = {} |
| else: |
| bad_ids_dct = json.load(open(filepaths, encoding="utf-8")) |
| bad_ids = dict((bad_url, True) for _, bad_url in bad_ids_dct[f"{lang}-{split}"]) |
| with open(filepath, encoding="utf-8") as f: |
| id_ = -1 |
| for line in f: |
| data = json.loads(line) |
| if data["url"] in bad_ids: |
| continue |
| else: |
| id_ += 1 |
| yield id_, { |
| "gem_id": f"mlsum_{self.config.name}-{split}-{id_}", |
| "gem_parent_id": f"mlsum_{self.config.name}-{split}-{id_}", |
| "text": data["text"], |
| "target": data["summary"], |
| "references": [] if split == "train" else [data["summary"]], |
| "topic": data["topic"], |
| "url": data["url"], |
| "title": data["title"], |
| "date": data["date"], |
| } |
| else: |
| exples = json.load(open(filepath, encoding="utf-8")) |
| if isinstance(exples, dict): |
| assert len(exples) == 1, "multiple entries found" |
| exples = list(exples.values())[0] |
| for id_, exple in enumerate(exples): |
| if len(exple) == 0: |
| continue |
| exple["gem_parent_id"] = exple["gem_id"] |
| exple["gem_id"] = f"mlsum_{self.config.name}-{split}-{id_}" |
| yield id_, exple |
|
|