File size: 547 Bytes
7d7b08f
 
29db0ee
 
7d7b08f
 
 
e3e9b30
29db0ee
 
 
 
 
 
 
 
 
 
5be384f
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
from transformers import AutoModel, CLIPImageProcessor, CLIPTokenizer
import torch
import spaces


model_name_or_path = "BAAI/EVA-CLIP-8B" 
image_size = 224

def load_model():
    processor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14")
    
    model = AutoModel.from_pretrained(
        model_name_or_path, 
        torch_dtype=torch.bfloat16,
        trust_remote_code=True).to('cuda').eval()
    
    
    tokenizer = CLIPTokenizer.from_pretrained(model_name_or_path)
    return model, tokenizer, processor

load_model()