vishnun commited on
Commit
9438682
·
1 Parent(s): 804925c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -1,12 +1,17 @@
1
- import torch
2
  import streamlit as st
3
  import numpy as np
4
  from PIL import Image
5
  from transformers import CLIPProcessor, CLIPModel, YolosImageProcessor, YolosForObjectDetection
 
6
 
7
  st.title("CLIP & CROP")
8
  st.markdown("**Extract sections of images from your image by using OpenAI's CLIP and Facebooks Detr implemented on HuggingFace Transformers, if the similarity score is not so much, then please consider the prediction to be void.**")
9
 
 
 
 
 
 
10
  with st.spinner("Models are loading"):
11
  feature_extractor = YolosImageProcessor.from_pretrained("hustvl/yolos-tiny")
12
  dmodel = YolosForObjectDetection.from_pretrained('hustvl/yolos-tiny')
@@ -14,10 +19,6 @@ with st.spinner("Models are loading"):
14
  model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
15
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
16
 
17
- IMAGE_INPUT = st.file_uploader(type="pil", label="Input image")
18
- TEXT_INPUT = st.text_input(label="Description for section to extracted")
19
- NUMBER_INPUT = st.number_input(value=0.96, label="Threshold percentage score")
20
-
21
  SUBMIT_BUTTON = st.button("SUBMIT")
22
 
23
  def extract_image(image, text, prob, num=1):
 
 
1
  import streamlit as st
2
  import numpy as np
3
  from PIL import Image
4
  from transformers import CLIPProcessor, CLIPModel, YolosImageProcessor, YolosForObjectDetection
5
+ import torch
6
 
7
  st.title("CLIP & CROP")
8
  st.markdown("**Extract sections of images from your image by using OpenAI's CLIP and Facebooks Detr implemented on HuggingFace Transformers, if the similarity score is not so much, then please consider the prediction to be void.**")
9
 
10
+ IMAGE_INPUT = st.file_uploader(type=["jpg", "png"], label="Input image")
11
+ TEXT_INPUT = st.text_input(label="Description for section to extracted")
12
+ NUMBER_INPUT = st.number_input(value=0.96, label="Threshold percentage score")
13
+
14
+
15
  with st.spinner("Models are loading"):
16
  feature_extractor = YolosImageProcessor.from_pretrained("hustvl/yolos-tiny")
17
  dmodel = YolosForObjectDetection.from_pretrained('hustvl/yolos-tiny')
 
19
  model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
20
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
21
 
 
 
 
 
22
  SUBMIT_BUTTON = st.button("SUBMIT")
23
 
24
  def extract_image(image, text, prob, num=1):