File size: 1,447 Bytes
403ae51
 
 
 
 
 
 
e76e840
 
403ae51
 
e76e840
970b2f7
403ae51
 
 
 
 
 
43882c8
e539b30
403ae51
 
 
 
 
 
 
e539b30
403ae51
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
from typing import Dict, List, Any
from PIL import Image
import torch
import base64
from io import BytesIO
from transformers import CLIPProcessor, CLIPModel

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class EndpointHandler():
    def __init__(self, path=""):
        self.model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14-336").to(device)
        self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14-336")

    def __call__(self, data: Any) -> List[float]:
        inputs = data.pop("inputs", data)
        
        if "image" in inputs:
            # decode base64 image to PIL
            image = Image.open(BytesIO(base64.b64decode(inputs['image'])))
            inputs = self.processor(images=image, text=None, return_tensors="pt", padding=True).to(device)
            image_embeds = self.model.get_image_features(
                pixel_values=inputs["pixel_values"]
            )
    
            return image_embeds[0].tolist()
        if "text" in inputs:
            text = inputs['text']
            inputs = self.processor(images=None, text=text, return_tensors="pt", padding=True).to(device)
            
            text_embeds = self.model.get_text_features(
                input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"]
            )
    
            return text_embeds[0].tolist()

        raise Exception("No 'image' or 'text' provided")