sitammeur commited on
Commit
5c41a43
1 Parent(s): c8cff61

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -33
app.py CHANGED
@@ -1,34 +1,6 @@
1
  # Importing the requirements
2
  import gradio as gr
3
- from transformers import BlipProcessor, BlipForQuestionAnswering
4
-
5
- # Load the model and processor
6
- processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
7
- model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
8
-
9
-
10
- # Function to answer the question
11
- def answer_question(image, text):
12
- """
13
- Generates an answer to a given question based on the provided image and text.
14
-
15
- Args:
16
- image (str): The path to the image file.
17
- text (str): The question text.
18
-
19
- Returns:
20
- str: The generated answer to the question.
21
- """
22
-
23
- # Process the inputs and generate the ids
24
- inputs = processor(images=image, text=text, return_tensors="pt")
25
- generated_ids = model.generate(**inputs, max_length=50)
26
-
27
- # Decode the generated IDs
28
- generated_answer = processor.batch_decode(generated_ids, skip_special_tokens=True)
29
-
30
- # Return the generated answer
31
- return generated_answer[0]
32
 
33
 
34
  # Image and text inputs for the interface
@@ -40,14 +12,14 @@ answer = gr.Textbox(label="Predicted answer")
40
 
41
  # Examples for the interface
42
  examples = [
43
- ["cat.jpg", "How many cats are there?"],
44
- ["dog.jpg", "What color is the dog?"],
45
- ["bird.jpg", "What is the bird doing?"],
46
  ]
47
 
48
  # Title, description, and article for the interface
49
  title = "Visual Question Answering"
50
- description = "Gradio Demo for the Salesforce BLIP VQA model. This model can answer questions about images in natural language. To use it, upload your photo, type a question, click 'submit', or click one of the examples to load them. Read more at the links below."
51
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2201.12086' target='_blank'>BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation</a> | <a href='https://huggingface.co/Salesforce/blip-vqa-base' target='_blank'>Model Page</a></p>"
52
 
53
 
 
1
  # Importing the requirements
2
  import gradio as gr
3
+ from model import answer_question
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
 
6
  # Image and text inputs for the interface
 
12
 
13
  # Examples for the interface
14
  examples = [
15
+ ["images/cat.jpg", "How many cats are there?"],
16
+ ["images/dog.jpg", "What color is the dog?"],
17
+ ["images/bird.jpg", "What is the bird doing?"],
18
  ]
19
 
20
  # Title, description, and article for the interface
21
  title = "Visual Question Answering"
22
+ description = "Gradio Demo for the Salesforce BLIP VQA model. This model can answer questions about images in natural language. To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below."
23
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2201.12086' target='_blank'>BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation</a> | <a href='https://huggingface.co/Salesforce/blip-vqa-base' target='_blank'>Model Page</a></p>"
24
 
25