TusharGoel
commited on
Commit
•
74b7d9d
1
Parent(s):
cff7a66
Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,48 @@
|
|
1 |
---
|
2 |
license: mit
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
+
language:
|
4 |
+
- en
|
5 |
+
pipeline_tag: document-question-answering
|
6 |
---
|
7 |
+
This model trained on DocVQA Dataset on sample of 15000 questions
|
8 |
+
|
9 |
+
```
|
10 |
+
from transformers import AutoTokenizer, AutoModelForDocumentQuestionAnswering
|
11 |
+
from datasets import load_dataset
|
12 |
+
|
13 |
+
model_checkpoint = "TusharGoel/LayoutLM-Finetuned-DocVQA"
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, add_prefix_space=True)
|
15 |
+
model_predict = AutoModelForDocumentQuestionAnswering.from_pretrained(model_checkpoint)
|
16 |
+
|
17 |
+
model_predict.eval()
|
18 |
+
dataset = load_dataset("nielsr/funsd", split="train")
|
19 |
+
example = dataset[0]
|
20 |
+
|
21 |
+
question = "What's Licensee Number?"
|
22 |
+
|
23 |
+
words = example["words"]
|
24 |
+
boxes = example["bboxes"]
|
25 |
+
|
26 |
+
encoding = tokenizer(question.split(), words,
|
27 |
+
is_split_into_words=True, return_token_type_ids=True, return_tensors="pt")
|
28 |
+
|
29 |
+
bbox = []
|
30 |
+
for i, s, w in zip(encoding.input_ids[0], encoding.sequence_ids(0), encoding.word_ids(0)):
|
31 |
+
if s == 1:
|
32 |
+
bbox.append(boxes[w])
|
33 |
+
elif i == tokenizer.sep_token_id:
|
34 |
+
bbox.append([1000] * 4)
|
35 |
+
else:
|
36 |
+
bbox.append([0] * 4)
|
37 |
+
encoding["bbox"] = torch.tensor([bbox])
|
38 |
+
|
39 |
+
word_ids = encoding.word_ids(0)
|
40 |
+
outputs = model_predict(**encoding)
|
41 |
+
|
42 |
+
loss = outputs.loss
|
43 |
+
start_scores = outputs.start_logits
|
44 |
+
end_scores = outputs.end_logits
|
45 |
+
|
46 |
+
start, end = word_ids[start_scores.argmax(-1).item()], word_ids[end_scores.argmax(-1).item()]
|
47 |
+
print(" ".join(words[start : end + 1]))
|
48 |
+
```
|