Spaces:
Running
Running
Commit
·
1f10ad6
1
Parent(s):
2677815
adding app with CLIP image segmentation
Browse files
app.py
CHANGED
@@ -45,7 +45,7 @@ def detect_using_clip(image,prompts=[],threshould=0.4):
|
|
45 |
detection = outputs.logits[0] # Assuming class index 0
|
46 |
for i,prompt in enumerate(prompts):
|
47 |
predicted_image = torch.sigmoid(preds[i][0]).detach().cpu().numpy()
|
48 |
-
predicted_image = np.where(predicted_image>threshould,255,0)
|
49 |
# extract countours from the image
|
50 |
lbl_0 = label(predicted_image)
|
51 |
props = regionprops(lbl_0)
|
@@ -69,7 +69,10 @@ def visualize_images(image,detections,predicted_images,prompt):
|
|
69 |
return final_image
|
70 |
|
71 |
def shot(image, labels_text,selected_categoty):
|
72 |
-
|
|
|
|
|
|
|
73 |
prompts = list(map(lambda x: x.strip(),prompts))
|
74 |
model_detections,predicted_images = detect_using_clip(image,prompts=prompts)
|
75 |
|
|
|
45 |
detection = outputs.logits[0] # Assuming class index 0
|
46 |
for i,prompt in enumerate(prompts):
|
47 |
predicted_image = torch.sigmoid(preds[i][0]).detach().cpu().numpy()
|
48 |
+
predicted_image = np.where(predicted_image>threshould,np.random.randint(128,255),0)
|
49 |
# extract countours from the image
|
50 |
lbl_0 = label(predicted_image)
|
51 |
props = regionprops(lbl_0)
|
|
|
69 |
return final_image
|
70 |
|
71 |
def shot(image, labels_text,selected_categoty):
|
72 |
+
if "," in labels_text:
|
73 |
+
prompts = labels_text.split(',')
|
74 |
+
else:
|
75 |
+
prompts = [labels_text]
|
76 |
prompts = list(map(lambda x: x.strip(),prompts))
|
77 |
model_detections,predicted_images = detect_using_clip(image,prompts=prompts)
|
78 |
|