| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| """IIIT5K dataset.""" |
|
|
|
|
| import scipy.io |
|
|
| import datasets |
|
|
| import os |
| from pathlib import Path |
|
|
|
|
| _CITATION = """\ |
| @InProceedings{MishraBMVC12, |
| author = "Mishra, A. and Alahari, K. and Jawahar, C.~V.", |
| title = "Scene Text Recognition using Higher Order Language Priors", |
| booktitle= "BMVC", |
| year = "2012" |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| The IIIT 5K-Word dataset is harvested from Google image search. |
| Query words like billboards, signboard, house numbers, house name plates, movie posters were used to collect images. |
| The dataset contains 5000 cropped word images from Scene Texts and born-digital images. |
| The dataset is divided into train and test parts. |
| This dataset can be used for large lexicon cropped word recognition. |
| We also provide a lexicon of more than 0.5 million dictionary words with this dataset. |
| """ |
|
|
| _HOMEPAGE = "http://cvit.iiit.ac.in/projects/SceneTextUnderstanding/IIIT5K.html" |
|
|
| _DL_URL = "http://cvit.iiit.ac.in/projects/SceneTextUnderstanding/IIIT5K-Word_V3.0.tar.gz" |
|
|
|
|
| class IIIT5K(datasets.GeneratorBasedBuilder): |
| """IIIT-5K dataset.""" |
|
|
| def _info(self): |
| features = datasets.Features( |
| { |
| "image": datasets.Image(), |
| "label": datasets.Value("string"), |
| "small_lexicon": datasets.Sequence(datasets.Value("string")), |
| "medium_lexicon": datasets.Sequence(datasets.Value("string")), |
| } |
| ) |
|
|
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| homepage=_HOMEPAGE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| archive_path = dl_manager.download_and_extract(_DL_URL) |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "split": "train", |
| "archive_path": Path(archive_path) / "IIIT5K", |
| "info_path": Path(archive_path) / "IIIT5K/traindata.mat", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "split": "test", |
| "archive_path": Path(archive_path) / "IIIT5K", |
| "info_path": Path(archive_path) / "IIIT5K/testdata.mat", |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, split, archive_path, info_path): |
| info = scipy.io.loadmat(info_path) |
| info = info[split+"data"][0] |
|
|
| for idx, info_ex in enumerate(info): |
| path_image = os.path.join(archive_path, str(info_ex[0][0])) |
| label = str(info_ex[1][0]) |
| small_lexicon = [str(lab[0]) for lab in info_ex[2][0]] |
| medium_lexicon = [str(lab[0]) for lab in info_ex[3][0]] |
| yield idx, { |
| "image": path_image, |
| "label": label, |
| "small_lexicon": small_lexicon, |
| "medium_lexicon": medium_lexicon, |
| } |
|
|