Spaces:
Runtime error
Runtime error
File size: 1,023 Bytes
b862c75 48ed931 6fa39c4 48ed931 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
from transformers import (
Blip2VisionConfig,
Blip2QFormerConfig,
OPTConfig,
Blip2Config,
Blip2ForConditionalGeneration,
Blip2VisionModel,
Blip2Processor,
AutoProcessor
)
from PIL import Image
import requests
import torch
import gradio as gr
config = Blip2Config()
model = Blip2ForConditionalGeneration(config)
config = model.config
vis_config = Blip2VisionConfig()
model = Blip2VisionModel(vis_config)
config_2 = model.config
processor = AutoProcessor.from_pretrained('Salesforce/blip-image-captioning-large')
model = Blip2ForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-large')
def captioning(image):
inputs = processor(images=image, return_tensors='pt')
generated_ids = model.generate(**inputs)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
return image, generated_text
demo = gr.Interface(
captioning,
inputs=gr.Image(type="pil"),
outputs = ['image', 'text']
)
demo.launch() |