Upload 2 files
Browse files- handler.py +16 -20
- requirements.txt +4 -1
handler.py
CHANGED
@@ -1,20 +1,14 @@
|
|
1 |
from typing import Dict, List, Any
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
4 |
|
5 |
class EndpointHandler():
|
6 |
def __init__(self, path=""):
|
7 |
-
|
8 |
-
|
9 |
-
config = {'context_length' : 2048,'max_new_tokens': 656, 'repetition_penalty': 1.1,'temperature': 0.1, 'stream': True}
|
10 |
-
llm = AutoModelForCausalLM.from_pretrained(
|
11 |
-
model_id,
|
12 |
-
model_file=model_file,
|
13 |
-
model_type="mistral",
|
14 |
-
gpu_layers=50,#50 #110
|
15 |
-
**config
|
16 |
-
)
|
17 |
-
self.pipeline = llm
|
18 |
|
19 |
|
20 |
def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
|
@@ -27,13 +21,15 @@ class EndpointHandler():
|
|
27 |
- "label": A string representing what the label/class is. There can be multiple labels.
|
28 |
- "score": A score between 0 and 1 describing how confident the model is for this label/class.
|
29 |
"""
|
|
|
30 |
inputs = data.pop("inputs", data)
|
31 |
-
parameters = data.pop("parameters", None)
|
32 |
-
|
|
|
|
|
|
|
|
|
33 |
# pass inputs with all kwargs in data
|
34 |
-
|
35 |
-
prediction = self.pipeline(inputs, stream=False)
|
36 |
-
else:
|
37 |
-
prediction = self.pipeline(inputs, stream=False)
|
38 |
# postprocess the prediction
|
39 |
-
return
|
|
|
1 |
from typing import Dict, List, Any
|
2 |
+
import base64
|
3 |
+
from PIL import Image
|
4 |
+
from io import BytesIO
|
5 |
+
import numpy as np
|
6 |
+
from paddleocr import PaddleOCR, draw_ocr
|
7 |
|
8 |
class EndpointHandler():
|
9 |
def __init__(self, path=""):
|
10 |
+
|
11 |
+
self.pipeline = PaddleOCR(lang="en",ocr_version="PP-OCRv4",show_log = False,use_gpu=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
|
14 |
def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
|
|
|
21 |
- "label": A string representing what the label/class is. There can be multiple labels.
|
22 |
- "score": A score between 0 and 1 describing how confident the model is for this label/class.
|
23 |
"""
|
24 |
+
|
25 |
inputs = data.pop("inputs", data)
|
26 |
+
#parameters = data.pop("parameters", None)
|
27 |
+
|
28 |
+
receipt_image = Image.open(BytesIO(base64.b64decode(inputs)))
|
29 |
+
receipt_image_array = np.array(receipt_image.convert('RGB'))
|
30 |
+
result = self.pipeline.ocr(receipt_image_array,cls=True)
|
31 |
+
txts = [line[1][0] for line in result[0]]
|
32 |
# pass inputs with all kwargs in data
|
33 |
+
extract = "".join(txts)
|
|
|
|
|
|
|
34 |
# postprocess the prediction
|
35 |
+
return extract
|
requirements.txt
CHANGED
@@ -1 +1,4 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
1 |
+
paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple
|
2 |
+
"paddleocr>=2.0.1"
|
3 |
+
pillow
|
4 |
+
numpy
|