Datasets:
Tasks:
Token Classification
License:
# coding=utf-8 | |
import json | |
import os | |
import logging | |
import datasets | |
from PIL import Image | |
import numpy as np | |
from transformers import AutoTokenizer | |
logger = datasets.logging.get_logger(__name__) | |
_CITATION = """\ | |
@article{Jaume2019FUNSDAD, | |
title={FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents}, | |
author={Guillaume Jaume and H. K. Ekenel and J. Thiran}, | |
journal={2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)}, | |
year={2019}, | |
volume={2}, | |
pages={1-6} | |
} | |
""" | |
_DESCRIPTION = """\ | |
https://guillaumejaume.github.io/FUNSD/ | |
""" | |
def load_image(image_path, size=None): | |
image = Image.open(image_path).convert("RGB") | |
w, h = image.size | |
if size is not None: | |
# resize image | |
image = image.resize((size, size)) | |
image = np.asarray(image) | |
image = image[:, :, ::-1] # flip color channels from RGB to BGR | |
image = image.transpose(2, 0, 1) # move channels to first dimension | |
return image, (w, h) | |
def simplify_bbox(bbox): | |
return [ | |
min(bbox[0::2]), | |
min(bbox[1::2]), | |
max(bbox[2::2]), | |
max(bbox[3::2]), | |
] | |
def merge_bbox(bbox_list): | |
x0, y0, x1, y1 = list(zip(*bbox_list)) | |
return [min(x0), min(y0), max(x1), max(y1)] | |
def normalize_bbox(bbox, size): | |
return [ | |
int(1000 * bbox[0] / size[0]), | |
int(1000 * bbox[1] / size[1]), | |
int(1000 * bbox[2] / size[0]), | |
int(1000 * bbox[3] / size[1]), | |
] | |
class FunsdConfig(datasets.BuilderConfig): | |
"""BuilderConfig for FUNSD""" | |
def __init__(self, **kwargs): | |
"""BuilderConfig for FUNSD. | |
Args: | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(FunsdConfig, self).__init__(**kwargs) | |
class Funsd(datasets.GeneratorBasedBuilder): | |
"""FUNSD dataset.""" | |
BUILDER_CONFIGS = [ | |
FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"), | |
] | |
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base") | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"id": datasets.Value("string"), | |
"input_ids": datasets.Sequence(datasets.Value("int64")), | |
"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), | |
"labels": datasets.Sequence( | |
datasets.features.ClassLabel( | |
names=["O", "B-HEADER", "I-HEADER", "B-QUESTION", "I-QUESTION", "B-ANSWER", "I-ANSWER"] | |
) | |
), | |
"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"), | |
"entities": datasets.Sequence( | |
{ | |
"start": datasets.Value("int64"), | |
"end": datasets.Value("int64"), | |
"label": datasets.ClassLabel(names=["HEADER", "QUESTION", "ANSWER"]), | |
} | |
), | |
"original_image": datasets.features.Image(), | |
"entities": datasets.Sequence( | |
{ | |
"start": datasets.Value("int64"), | |
"end": datasets.Value("int64"), | |
"label": datasets.ClassLabel(names=["HEADER", "QUESTION", "ANSWER"]), | |
} | |
), | |
"relations": datasets.Sequence( | |
{ | |
"head": datasets.Value("int64"), | |
"tail": datasets.Value("int64"), | |
"start_index": datasets.Value("int64"), | |
"end_index": datasets.Value("int64"), | |
} | |
), | |
} | |
), | |
supervised_keys=None, | |
homepage="https://guillaumejaume.github.io/FUNSD/", | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
downloaded_file = dl_manager.download_and_extract("https://guillaumejaume.github.io/FUNSD/dataset.zip") | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"} | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"} | |
), | |
] | |
def _generate_examples(self, filepath): | |
logger.info("⏳ Generating examples from = %s", filepath) | |
ann_dir = os.path.join(filepath, "annotations") | |
img_dir = os.path.join(filepath, "images") | |
for guid, file in enumerate(sorted(os.listdir(ann_dir))): | |
doc_id = file.split(".")[0] | |
file_path = os.path.join(ann_dir, file) | |
with open(file_path, "r", encoding="utf8") as f: | |
document = json.load(f) | |
image_path = os.path.join(img_dir, file) | |
image_path = image_path.replace("json", "png") | |
image, size = load_image(image_path, size=224) | |
original_image, _ = load_image(image_path) | |
document = document["form"] | |
tokenized_doc = {"input_ids": [], "bbox": [], "labels": []} | |
entities = [] | |
relations = [] | |
# image id to label dict | |
id2label = {} | |
entity_id_to_index_map = {} | |
empty_entity = set() | |
for line in document: | |
# word navako text lai empty_entity ma add garne | |
if len(line["text"]) == 0: | |
empty_entity.add(line["id"]) | |
continue | |
id2label[line["id"]] = line["label"] | |
relations.extend([tuple(sorted(l)) for l in line["linking"]]) | |
tokenized_inputs = self.tokenizer( | |
line["text"], | |
add_special_tokens=False, | |
return_offsets_mapping=True, | |
return_attention_mask=False, | |
) | |
text_length = 0 | |
ocr_length = 0 | |
bbox = [] | |
last_box = None | |
for token_id, offset in zip(tokenized_inputs["input_ids"], tokenized_inputs["offset_mapping"]): | |
if token_id == 6: | |
bbox.append(None) | |
continue | |
text_length += offset[1] - offset[0] | |
tmp_box = [] | |
while ocr_length < text_length: | |
if len(line["words"]) == 0: | |
break | |
ocr_word = line["words"].pop(0) | |
ocr_length += len( | |
self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word["text"].strip()) | |
) | |
tmp_box.append(simplify_bbox(ocr_word["box"])) | |
if len(tmp_box) == 0: | |
tmp_box = last_box | |
bbox.append(normalize_bbox(merge_bbox(tmp_box), size)) | |
last_box = tmp_box | |
bbox = [ | |
[bbox[i + 1][0], bbox[i + 1][1], bbox[i + 1][0], bbox[i + 1][1]] if b is None else b | |
for i, b in enumerate(bbox) | |
] | |
if line["label"] == "other": | |
label = ["O"] * len(bbox) | |
else: | |
label = [f"I-{line['label'].upper()}"] * len(bbox) | |
label[0] = f"B-{line['label'].upper()}" | |
tokenized_inputs.update({"bbox": bbox, "labels": label}) | |
if label[0] != "O": | |
entity_id_to_index_map[line["id"]] = len(entities) | |
entities.append( # determine the number of tokens wiithin the text and their start and end index | |
{ | |
"start": len(tokenized_doc["input_ids"]), # start index of the token of text. eg for text hello world having token hello world, it is 0 | |
"end": len(tokenized_doc["input_ids"]) + len(tokenized_inputs["input_ids"]), # end index of the token of text. This will be 2 for hello world. | |
"label": line["label"].upper(), # label of the text | |
} | |
) | |
for i in tokenized_doc: | |
tokenized_doc[i] = tokenized_doc[i] + tokenized_inputs[i] | |
relations = list(set(relations)) | |
relations = [rel for rel in relations if rel[0] not in empty_entity and rel[1] not in empty_entity] | |
kvrelations = [] | |
for rel in relations: | |
pair = [id2label[rel[0]], id2label[rel[1]]] | |
if pair == ["question", "answer"]: | |
kvrelations.append( | |
{"head": entity_id_to_index_map[rel[0]], "tail": entity_id_to_index_map[rel[1]]} | |
) | |
elif pair == ["answer", "question"]: | |
kvrelations.append( | |
{"head": entity_id_to_index_map[rel[1]], "tail": entity_id_to_index_map[rel[0]]} | |
) | |
else: | |
continue | |
def get_relation_span(rel): | |
bound = [] | |
for entity_index in [rel["head"], rel["tail"]]: | |
bound.append(entities[entity_index]["start"]) | |
bound.append(entities[entity_index]["end"]) | |
return min(bound), max(bound) | |
relations = sorted( | |
[ | |
{ | |
"head": rel["head"], | |
"tail": rel["tail"], | |
"start_index": get_relation_span(rel)[0], | |
"end_index": get_relation_span(rel)[1], | |
} | |
for rel in kvrelations | |
], | |
key=lambda x: x["head"], | |
) | |
chunk_size = 512 | |
for chunk_id, index in enumerate(range(0, len(tokenized_doc["input_ids"]), chunk_size)): | |
item = {} | |
for k in tokenized_doc: | |
item[k] = tokenized_doc[k][index : index + chunk_size] | |
entities_in_this_span = [] | |
global_to_local_map = {} | |
for entity_id, entity in enumerate(entities): | |
if ( # yo condition garda yedi text ko ek part euta chunk ra baki arko chunk ma aayo vane k garne? | |
index <= entity["start"] < index + chunk_size | |
and index <= entity["end"] < index + chunk_size | |
): | |
entity["start"] = entity["start"] - index | |
entity["end"] = entity["end"] - index | |
global_to_local_map[entity_id] = len(entities_in_this_span) | |
entities_in_this_span.append(entity) | |
relations_in_this_span = [] | |
for relation in relations: | |
if ( # yo condition garda yedi question euta chunk ra answer arko chunk ma aayo vane k garne? | |
index <= relation["start_index"] < index + chunk_size | |
and index <= relation["end_index"] < index + chunk_size | |
): | |
relations_in_this_span.append( | |
{ | |
"head": global_to_local_map[relation["head"]], | |
"tail": global_to_local_map[relation["tail"]], | |
"start_index": relation["start_index"] - index, | |
"end_index": relation["end_index"] - index, | |
} | |
) | |
item.update( | |
{ | |
"id": f"{doc_id}_{chunk_id}", | |
"image": image, | |
"original_image": original_image, | |
"entities": entities_in_this_span, | |
"relations": relations_in_this_span, | |
} | |
) | |
yield f"{doc_id}_{chunk_id}", item | |