# coding=utf-8 import json import os import datasets from PIL import Image import numpy as np logger = datasets.logging.get_logger(__name__) _CITATION = """\ title={SLR dataset}, } """ _DESCRIPTION = """\ # """ def load_image(image_path): image = Image.open(image_path).convert("RGB") w, h = image.size # resize image to 224x224 image = image.resize((224, 224)) image = np.asarray(image) image = image[:, :, ::-1] # flip color channels from RGB to BGR image = image.transpose(2, 0, 1) # move channels to first dimension return image, (w, h) def normalize_bbox(bbox, size): return [ int(1000 * bbox[0] / size[0]), int(1000 * bbox[1] / size[1]), int(1000 * bbox[2] / size[0]), int(1000 * bbox[3] / size[1]), ] class SLRConfig(datasets.BuilderConfig): """BuilderConfig for SLR""" def __init__(self, **kwargs): """BuilderConfig for SLR. Args: **kwargs: keyword arguments forwarded to super. """ super(SLRConfig, self).__init__(**kwargs) class SLR(datasets.GeneratorBasedBuilder): """SLR dataset.""" BUILDER_CONFIGS = [ SLRConfig(name="SLR", version=datasets.Version("1.0.0"), description="SLR dataset"), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "words": datasets.Sequence(datasets.Value("string")), #"tokens": datasets.Sequence(datasets.Value("string")), "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=["DATEISSUED","LOANTERM","PURPOSE","PRODUCT","PROPERTY","LOANAMOUNT","INTERESTRATE","MONTHLYPR","PREPENALTY","BALLOONPAYMENT","ESTMONTHLY","ESTTAXES"] ) ), "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"), "image_path": datasets.Value("string"), } ), supervised_keys=None, homepage="#", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" downloaded_file = dl_manager.download_and_extract("/content/SLR/SLR/SLR.zip") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"} ), ] def _generate_examples(self, filepath): logger.info("⏳ Generating examples from = %s", filepath) ann_dir = os.path.join(filepath, "annotations") img_dir = os.path.join(filepath, "images") for guid, file in enumerate(sorted(os.listdir(ann_dir))): words=[] bboxes = [] ner_tags = [] file_path = os.path.join(ann_dir, file) with open(file_path, "r", encoding="utf8") as f: data = json.load(f) image_path = os.path.join(img_dir, file) image_path = image_path.replace("json", "png") image, size = load_image(image_path) for state in data: for item in state['form']: labels=item['label'] word=item['text'] ner_tags.append(labels) words.append(word) bboxes.append(normalize_bbox(item['box'],size)) #for item in data['annotations']: #bbox=item['bbox'] yield guid, {"id": str(guid), "words": words , "bboxes": bboxes, "ner_tags": ner_tags, "image_path": image_path, "image": image}