noamrot commited on
Commit
a3b147e
1 Parent(s): 5e1ec9b
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -20,8 +20,8 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
20
 
21
  image_size = 384
22
  transform = transforms.Compose([
23
- transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC),
24
  transforms.ToTensor(),
 
25
  transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
26
  ])
27
 
@@ -31,6 +31,7 @@ model.eval()
31
  model = model.to(device)
32
 
33
  def inference(raw_image):
 
34
  image = transform(raw_image).unsqueeze(0).to(device)
35
  with torch.no_grad():
36
  caption = model.generate(image, sample=False, num_beams=1, max_length=60, min_length=5)
@@ -40,8 +41,16 @@ def inference(raw_image):
40
  inputs = [gr.Image(type='pil', interactive=False),]
41
  outputs = gr.outputs.Textbox(label="Caption")
42
 
43
- title = "FuseCap"
44
  description = "Gradio demo for FuseCap: Leveraging Large Language Models to Fuse Visual Data into Enriched Image Captions. This demo features a BLIP-based model, trained using FuseCap."
45
 
46
  article = "place holder"
47
- gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=[['birthday_dog.jpeg']]).launch(enable_queue=True)
 
 
 
 
 
 
 
 
 
 
20
 
21
  image_size = 384
22
  transform = transforms.Compose([
 
23
  transforms.ToTensor(),
24
+ transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC),
25
  transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
26
  ])
27
 
 
31
  model = model.to(device)
32
 
33
  def inference(raw_image):
34
+ # raw_image = torch.tensor(raw_image)
35
  image = transform(raw_image).unsqueeze(0).to(device)
36
  with torch.no_grad():
37
  caption = model.generate(image, sample=False, num_beams=1, max_length=60, min_length=5)
 
41
  inputs = [gr.Image(type='pil', interactive=False),]
42
  outputs = gr.outputs.Textbox(label="Caption")
43
 
 
44
  description = "Gradio demo for FuseCap: Leveraging Large Language Models to Fuse Visual Data into Enriched Image Captions. This demo features a BLIP-based model, trained using FuseCap."
45
 
46
  article = "place holder"
47
+ iface = gr.Interface(fn=inference,
48
+ inputs="image",
49
+ outputs="text",
50
+ title="FuseCap",
51
+ description=description,
52
+ article=article,
53
+ examples=[['birthday_dog.jpeg']],
54
+ enable_queue=True)
55
+ iface.launch()
56
+ # gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=[['birthday_dog.jpeg']]).launch(enable_queue=True)