File size: 1,175 Bytes
4a7b0b2
 
 
 
 
 
 
8e1683e
 
4a7b0b2
 
8e1683e
4a7b0b2
8e1683e
4a7b0b2
 
 
8e1683e
4a7b0b2
 
 
 
 
 
 
 
 
8e1683e
4a7b0b2
 
8e1683e
4a7b0b2
 
 
 
8e1683e
4a7b0b2
 
973f818
8e1683e
 
 
4a7b0b2
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
import gradio as gr
import torch
import PIL

from flamingo_mini import FlamingoConfig, FlamingoModel, FlamingoProcessor



EXAMPLES_DIR = 'examples'
DEFAULT_PROMPT = "<image>"

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model = FlamingoModel.from_pretrained('dhansmair/flamingo-mini')
model.to(device)
model.eval()

processor = FlamingoProcessor(model.config, load_vision_processor=True)

# setup some example images
examples = []
if os.path.isdir(EXAMPLES_DIR):
    for file in os.listdir(EXAMPLES_DIR):
        path = EXAMPLES_DIR + "/" + file
        examples.append([path, DEFAULT_PROMPT])


def predict_caption(image, prompt):
    assert isinstance(prompt, str)
    
    features = processor.extract_features(image).to(device)
    caption = model.generate_captions(processor, 
            visual_features=features,
            prompt=prompt)
  
    if isinstance(caption, list):
        caption = caption[0]
    
    return caption


iface = gr.Interface(fn=predict_caption, 
        inputs=[gr.Image(type="pil"), gr.Textbox(value=DEFAULT_PROMPT, label="Prompt")], 
        examples=examples,
        outputs="text")

iface.launch()