| import os |
| import pandas as pd |
| import datasets |
| from glob import glob |
| import zipfile |
|
|
| class AraSum(datasets.GeneratorBasedBuilder): |
| def _info(self): |
| return datasets.DatasetInfo(features=datasets.Features({'index':datasets.Value('string'),'summary':datasets.Value('string'),'article':datasets.Value('string')})) |
|
|
| def extract_all(self, dir): |
| zip_files = glob(dir+'/**/**.zip', recursive=True) |
| for file in zip_files: |
| with zipfile.ZipFile(file) as item: |
| item.extractall('/'.join(file.split('/')[:-1])) |
|
|
|
|
| def get_all_files(self, dir): |
| files = [] |
| valid_file_ext = ['txt', 'csv', 'tsv', 'xlsx', 'xls', 'xml', 'json', 'jsonl', 'html', 'wav', 'mp3', 'jpg', 'png'] |
| for ext in valid_file_ext: |
| files += glob(f"{dir}/**/**.{ext}", recursive = True) |
| return files |
|
|
| def _split_generators(self, dl_manager): |
| url = ['https://raw.githubusercontent.com/ppke-nlpg/AraSum/main/AbsArSumCorpus_csv_v1.zip'] |
| downloaded_files = dl_manager.download_and_extract(url) |
| files = self.get_all_files(downloaded_files[0]) |
| return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepaths': {'inputs':files} })] |
|
|
|
|
| def _generate_examples(self, filepaths): |
| _id = 0 |
| for i,filepath in enumerate(filepaths['inputs']): |
| df = pd.read_csv(filepath, sep = r'\t', skiprows = 0, error_bad_lines = False, header = 0, engine = 'python') |
| if len(df.columns) != 3: |
| continue |
| df.columns = ['index', 'summary', 'article'] |
| for _, record in df.iterrows(): |
| yield str(_id), {'index':record['index'],'summary':record['summary'],'article':record['article']} |
| _id += 1 |
|
|
|
|