docquery / app.py
Ankur Goyal
Tweak formatting
588673f
raw
history blame
4.18 kB
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from PIL import ImageDraw
import streamlit as st
st.set_page_config(layout="wide")
import torch
from docquery.pipeline import get_pipeline
from docquery.document import load_bytes, load_document
def ensure_list(x):
if isinstance(x, list):
return x
else:
return [x]
@st.experimental_singleton
def construct_pipeline():
device = "cuda" if torch.cuda.is_available() else "cpu"
ret = get_pipeline(device=device)
return ret
@st.cache
def run_pipeline(question, document, top_k):
return construct_pipeline()(question=question, **document.context, top_k=top_k)
# TODO: Move into docquery
# TODO: Support words past the first page (or window?)
def lift_word_boxes(document):
return document.context["image"][0][1]
def expand_bbox(word_boxes):
if len(word_boxes) == 0:
return None
min_x, min_y, max_x, max_y = zip(*[x[1] for x in word_boxes])
return [min(min_x), min(min_y), max(max_x), max(max_y)]
# LayoutLM boxes are normalized to 0, 1000
def normalize_bbox(box, width, height):
pct = [c / 1000 for c in box]
return [pct[0] * width, pct[1] * height, pct[2] * width, pct[3] * height]
st.markdown("# DocQuery: Query Documents w/ NLP")
if "document" not in st.session_state:
st.session_state["document"] = None
input_col, model_col = st.columns([2,1])
with input_col:
input_type = st.radio("Pick an input type", ["Upload", "URL"], horizontal=True)
with model_col:
model_type = st.radio("Pick a model", ["Upload", "URL"], horizontal=True)
def load_file_cb():
if st.session_state.file_input is None:
return
file = st.session_state.file_input
with loading_placeholder:
with st.spinner("Processing..."):
document = load_bytes(file, file.name)
_ = document.context
st.session_state.document = document
def load_url_cb():
if st.session_state.url_input is None:
return
url = st.session_state.url_input
with loading_placeholder:
with st.spinner("Downloading..."):
document = load_document(url)
with st.spinner("Processing..."):
_ = document.context
st.session_state.document = document
if input_type == "Upload":
file = st.file_uploader(
"Upload a PDF or Image document", key="file_input", on_change=load_file_cb
)
elif input_type == "URL":
# url = st.text_input("URL", "", on_change=load_url_callback, key="url_input")
url = st.text_input("URL", "", key="url_input", on_change=load_url_cb)
question = st.text_input("QUESTION", "")
document = st.session_state.document
loading_placeholder = st.empty()
if document is not None:
col1, col2 = st.columns(2)
image = document.preview
colors = ["blue", "red", "green"]
if document is not None and question is not None and len(question) > 0:
col2.header("Answers")
with col2:
answers_placeholder = st.empty()
answers_loading_placeholder = st.empty()
with answers_loading_placeholder:
with st.spinner("Processing question..."):
predictions = run_pipeline(question=question, document=document, top_k=1)
with answers_placeholder:
word_boxes = lift_word_boxes(document)
image = image.copy()
draw = ImageDraw.Draw(image)
for i, p in enumerate(ensure_list(predictions)):
col2.markdown(f"#### { p['answer'] }: ({round(p['score'] * 100, 1)}%)")
x1, y1, x2, y2 = normalize_bbox(
expand_bbox(word_boxes[p["start"] : p["end"] + 1]),
image.width,
image.height,
)
draw.rectangle(((x1, y1), (x2, y2)), outline=colors[i], width=3)
if document is not None:
col1.image(image, use_column_width='auto')
"DocQuery uses LayoutLMv1 fine-tuned on DocVQA, a document visual question answering dataset, as well as SQuAD, which boosts its English-language comprehension. To use it, simply upload an image or PDF, type a question, and click 'submit', or click one of the examples to load them."
"[Github Repo](https://github.com/impira/docquery)"