|
import os |
|
|
|
|
|
|
|
os.system('pip install -q torch==1.10.0+cu111 torchvision==0.11+cu111 -f https://download.pytorch.org/whl/torch_stable.html') |
|
|
|
|
|
|
|
|
|
os.system('pip install git+https://github.com/facebookresearch/detectron2.git') |
|
|
|
import detectron2 |
|
from detectron2.utils.logger import setup_logger |
|
setup_logger() |
|
|
|
import gradio as gr |
|
import re |
|
import string |
|
|
|
from operator import itemgetter |
|
import collections |
|
|
|
import pypdf |
|
from pypdf import PdfReader |
|
from pypdf.errors import PdfReadError |
|
|
|
import pypdfium2 as pdfium |
|
import langdetect |
|
from langdetect import detect_langs |
|
|
|
import pandas as pd |
|
import numpy as np |
|
import random |
|
import tempfile |
|
import itertools |
|
|
|
from matplotlib import font_manager |
|
from PIL import Image, ImageDraw, ImageFont |
|
import cv2 |
|
|
|
|
|
|
|
import sys |
|
sys.path.insert(0, 'files/') |
|
|
|
import functions |
|
from functions import * |
|
|
|
|
|
os.system('python -m pip install --upgrade pip') |
|
|
|
|
|
|
|
import torch |
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
from transformers import LayoutLMv2ForTokenClassification |
|
|
|
model_id = "pierreguillou/layout-xlm-base-finetuned-with-DocLayNet-base-at-paragraphlevel-ml512" |
|
|
|
model = LayoutLMv2ForTokenClassification.from_pretrained(model_id); |
|
model.to(device); |
|
|
|
|
|
from transformers import LayoutLMv2FeatureExtractor |
|
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False) |
|
|
|
|
|
from transformers import AutoTokenizer |
|
tokenizer_id = "xlm-roberta-base" |
|
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id) |
|
|
|
|
|
id2label = model.config.id2label |
|
label2id = model.config.label2id |
|
num_labels = len(id2label) |
|
|
|
|
|
def app_outputs(uploaded_pdf): |
|
filename, msg, images = pdf_to_images(uploaded_pdf) |
|
num_images = len(images) |
|
|
|
if not msg.startswith("Error with the PDF"): |
|
|
|
|
|
dataset, texts_lines, texts_pars, texts_lines_par, row_indexes, par_boxes, line_boxes, lines_par_boxes = extraction_data_from_image(images) |
|
|
|
encoded_dataset = dataset.map(prepare_inference_features_paragraph, batched=True, batch_size=64, remove_columns=dataset.column_names) |
|
custom_encoded_dataset = CustomDataset(encoded_dataset, tokenizer) |
|
|
|
outputs, images_ids_list, chunk_ids, input_ids, bboxes = predictions_token_level(images, custom_encoded_dataset) |
|
|
|
probs_bbox, bboxes_list_dict, input_ids_dict_dict, probs_dict_dict, df = predictions_paragraph_level(dataset, outputs, images_ids_list, chunk_ids, input_ids, bboxes) |
|
|
|
images = get_labeled_images(dataset, images_ids_list, bboxes_list_dict, probs_dict_dict) |
|
|
|
img_files = list() |
|
|
|
for i in range(num_images): |
|
if filename != "files/blank.png": img_file = f"img_{i}_" + filename.replace(".pdf", ".png") |
|
else: img_file = filename.replace(".pdf", ".png") |
|
img_file = img_file.replace("/", "_") |
|
images[i].save(img_file) |
|
img_files.append(img_file) |
|
|
|
if num_images < max_imgboxes: |
|
img_files += [image_blank]*(max_imgboxes - num_images) |
|
images += [Image.open(image_blank)]*(max_imgboxes - num_images) |
|
for count in range(max_imgboxes - num_images): |
|
df[num_images + count] = pd.DataFrame() |
|
else: |
|
img_files = img_files[:max_imgboxes] |
|
images = images[:max_imgboxes] |
|
df = dict(itertools.islice(df.items(), max_imgboxes)) |
|
|
|
|
|
csv_files = list() |
|
for i in range(max_imgboxes): |
|
csv_file = f"csv_{i}_" + filename.replace(".pdf", ".csv") |
|
csv_file = csv_file.replace("/", "_") |
|
csv_files.append(gr.File.update(value=csv_file, visible=True)) |
|
df[i].to_csv(csv_file, encoding="utf-8", index=False) |
|
|
|
else: |
|
img_files, images, csv_files = [""]*max_imgboxes, [""]*max_imgboxes, [""]*max_imgboxes |
|
img_files[0], img_files[1] = image_blank, image_blank |
|
images[0], images[1] = Image.open(image_blank), Image.open(image_blank) |
|
csv_file = "csv_wo_content.csv" |
|
csv_files[0], csv_files[1] = gr.File.update(value=csv_file, visible=True), gr.File.update(value=csv_file, visible=True) |
|
df, df_empty = dict(), pd.DataFrame() |
|
df[0], df[1] = df_empty.to_csv(csv_file, encoding="utf-8", index=False), df_empty.to_csv(csv_file, encoding="utf-8", index=False) |
|
|
|
return msg, img_files[0], img_files[1], images[0], images[1], csv_files[0], csv_files[1], df[0], df[1] |
|
|
|
|
|
with gr.Blocks(title="Inference APP for Document Understanding at paragraph level (v2 - LayoutXLM base)", css=".gradio-container") as demo: |
|
gr.HTML(""" |
|
<div style="font-family:'Times New Roman', 'Serif'; font-size:26pt; font-weight:bold; text-align:center;"><h1>Inference APP for Document Understanding at paragraph level (v2 - LayoutXLM base)</h1></div> |
|
<div style="margin-top: 40px"><p>(03/31/2023) This Inference APP uses the <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/pierreguillou/layout-xlm-base-finetuned-with-DocLayNet-base-at-paragraphlevel-ml512" target="_blank">model Layout XLM base combined with XLM-RoBERTa base and finetuned on the dataset DocLayNet base at paragraph level</a> (chunk size of 512 tokens).</p></div> |
|
<div><p><a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://arxiv.org/abs/2104.08836" target="_blank">LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding</a> is a Document Understanding model that uses both layout and text in order to detect labels of bounding boxes. Combined with the model <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/xlm-roberta-base" target="_blank">XML-RoBERTa base</a>, this finetuned model has the capacity to <b>understand any language</b>. Finetuned on the dataset <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/datasets/pierreguillou/DocLayNet-base" target="_blank">DocLayNet base</a>, it can <b>classifly any bounding box (and its OCR text) to 11 labels</b> (Caption, Footnote, Formula, List-item, Page-footer, Page-header, Picture, Section-header, Table, Text, Title).</p></div> |
|
<div><p>It relies on an external OCR engine to get words and bounding boxes from the document image. Thus, let's run in this APP an OCR engine (<a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://github.com/madmaze/pytesseract#python-tesseract" target="_blank">PyTesseract</a>) to get the bounding boxes, then run Layout XLM base (already fine-tuned on the dataset DocLayNet base at paragraph level) on the individual tokens and then, visualize the result at paragraph level!</p></div> |
|
<div><p><b>It allows to get all pages of any PDF (of any language) with bounding boxes labeled at paragraph level and the associated dataframes with labeled data (bounding boxes, texts, labels) :-)</b></p></div> |
|
<div><p>However, the inference time per page can be high when running the model on CPU due to the number of paragraph predictions to be made. Therefore, to avoid running this APP for too long, <b>only the first 2 pages are processed by this APP</b>. If you want to increase this limit, you can either clone this APP in Hugging Face Space (or run its <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://github.com/piegu/language-models/blob/master/Gradio_inference_on_LayoutXLM_base_model_finetuned_on_DocLayNet_base_in_any_language_at_levelparagraphs_ml512.ipynb" target="_blank">notebook</a> on your own plateform) and change the value of the parameter <code>max_imgboxes</code>, or run the inference notebook "<a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://github.com/piegu/language-models/blob/master/inference_on_LayoutXLM_base_model_finetuned_on_DocLayNet_base_in_any_language_at_levelparagraphs_ml512.ipynb" target="_blank">Document AI | Inference at paragraph level with a Document Understanding model (LayoutXLM base fine-tuned on DocLayNet dataset)</a>" on your own platform as it does not have this limit.</p></div> |
|
<div style="margin-top: 20px"><p>More information about the DocLayNet datasets, the finetuning of the model and this APP in the following blog posts:</p> |
|
<ul><li>(03/31/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-inference-app-and-fine-tuning-notebook-for-document-understanding-at-paragraph-level-3507af80573d" target="_blank">Document AI | Inference APP and fine-tuning notebook for Document Understanding at paragraph level with LayoutXLM base</a></li><li>(03/25/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-app-to-compare-the-document-understanding-lilt-and-layoutxlm-base-models-at-line-1c53eb481a15" target="_blank">Document AI | APP to compare the Document Understanding LiLT and LayoutXLM (base) models at line level</a></li><li>(03/05/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-inference-app-and-fine-tuning-notebook-for-document-understanding-at-line-level-with-b08fdca5f4dc" target="_blank">Document AI | Inference APP and fine-tuning notebook for Document Understanding at line level with LayoutXLM base</a></li><li>(02/14/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-inference-app-for-document-understanding-at-line-level-a35bbfa98893" target="_blank">Document AI | Inference APP for Document Understanding at line level</a></li><li>(02/10/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-document-understanding-model-at-line-level-with-lilt-tesseract-and-doclaynet-dataset-347107a643b8" target="_blank">Document AI | Document Understanding model at line level with LiLT, Tesseract and DocLayNet dataset</a></li><li>(01/31/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-doclaynet-image-viewer-app-3ac54c19956" target="_blank">Document AI | DocLayNet image viewer APP</a></li><li>(01/27/2023) <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-processing-of-doclaynet-dataset-to-be-used-by-layout-models-of-the-hugging-face-hub-308d8bd81cdb" target="_blank">Document AI | Processing of DocLayNet dataset to be used by layout models of the Hugging Face hub (finetuning, inference)</a></li></ul></div> |
|
""") |
|
with gr.Row(): |
|
pdf_file = gr.File(label="PDF") |
|
with gr.Row(): |
|
submit_btn = gr.Button(f"Display first {max_imgboxes} labeled PDF pages") |
|
reset_btn = gr.Button(value="Clear") |
|
with gr.Row(): |
|
output_msg = gr.Textbox(label="Output message") |
|
with gr.Row(): |
|
fileboxes = [] |
|
for num_page in range(max_imgboxes): |
|
file_path = gr.File(visible=True, label=f"Image file of the PDF page n°{num_page}") |
|
fileboxes.append(file_path) |
|
with gr.Row(): |
|
imgboxes = [] |
|
for num_page in range(max_imgboxes): |
|
img = gr.Image(type="pil", label=f"Image of the PDF page n°{num_page}") |
|
imgboxes.append(img) |
|
with gr.Row(): |
|
csvboxes = [] |
|
for num_page in range(max_imgboxes): |
|
csv = gr.File(visible=True, label=f"CSV file at paragraph level (page {num_page})") |
|
csvboxes.append(csv) |
|
with gr.Row(): |
|
dfboxes = [] |
|
for num_page in range(max_imgboxes): |
|
df = gr.Dataframe( |
|
headers=["bounding boxes", "texts", "labels"], |
|
datatype=["str", "str", "str"], |
|
col_count=(3, "fixed"), |
|
visible=True, |
|
label=f"Data of page {num_page}", |
|
type="pandas", |
|
wrap=True |
|
) |
|
dfboxes.append(df) |
|
|
|
outputboxes = [output_msg] + fileboxes + imgboxes + csvboxes + dfboxes |
|
submit_btn.click(app_outputs, inputs=[pdf_file], outputs=outputboxes) |
|
|
|
reset_btn.click( |
|
lambda: [pdf_file.update(value=None), output_msg.update(value=None)] + [filebox.update(value=None) for filebox in fileboxes] + [imgbox.update(value=None) for imgbox in imgboxes] + [csvbox.update(value=None) for csvbox in csvboxes] + [dfbox.update(value=None) for dfbox in dfboxes], |
|
inputs=[], |
|
outputs=[pdf_file, output_msg] + fileboxes + imgboxes + csvboxes + dfboxes |
|
) |
|
|
|
gr.Examples( |
|
[["files/example.pdf"]], |
|
[pdf_file], |
|
outputboxes, |
|
fn=app_outputs, |
|
cache_examples=True, |
|
) |
|
|
|
demo.launch() |