Update handler.py
Browse files- handler.py +7 -6
handler.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
# +
|
2 |
from typing import Dict, List, Any
|
3 |
from PIL import Image
|
|
|
4 |
import torch
|
5 |
import os
|
6 |
from io import BytesIO
|
@@ -34,16 +35,16 @@ class EndpointHandler():
|
|
34 |
inputs = data.pop("inputs", data)
|
35 |
parameters = data.pop("parameters", {})
|
36 |
|
37 |
-
raw_images = [Image.open(BytesIO(_img)) for _img in inputs]
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
|
43 |
with torch.no_grad():
|
44 |
out = self.model.generate(
|
45 |
-
**
|
46 |
)
|
47 |
captions = self.processor.batch_decode(out, skip_special_tokens=True)
|
48 |
-
|
49 |
return {"captions": captions}
|
|
|
1 |
# +
|
2 |
from typing import Dict, List, Any
|
3 |
from PIL import Image
|
4 |
+
import base64
|
5 |
import torch
|
6 |
import os
|
7 |
from io import BytesIO
|
|
|
35 |
inputs = data.pop("inputs", data)
|
36 |
parameters = data.pop("parameters", {})
|
37 |
|
38 |
+
raw_images = [Image.open(BytesIO(base64.b64decode(_img))) for _img in inputs]
|
39 |
|
40 |
+
processed_images = self.processor(images=raw_images, return_tensors="pt")
|
41 |
+
processed_images["pixel_values"] = processed_images["pixel_values"].to(device)
|
42 |
+
processed_images = {**processed_images, **parameters}
|
43 |
|
44 |
with torch.no_grad():
|
45 |
out = self.model.generate(
|
46 |
+
**processed_images
|
47 |
)
|
48 |
captions = self.processor.batch_decode(out, skip_special_tokens=True)
|
49 |
+
|
50 |
return {"captions": captions}
|