Spaces:
Runtime error
Runtime error
Commit
·
fc62531
1
Parent(s):
1bb85f5
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
from cvzone.HandTrackingModule import HandDetector
|
3 |
+
from cvzone.ClassificationModule import Classifier
|
4 |
+
import numpy as np
|
5 |
+
import math
|
6 |
+
import gradio as gr
|
7 |
+
|
8 |
+
#cap = cv2.VideoCapture(0)
|
9 |
+
detector = HandDetector(maxHands=1)
|
10 |
+
classifier = Classifier("Model/keras_model.h5", "Model/labels.txt")
|
11 |
+
|
12 |
+
offset = 20
|
13 |
+
imgSize = 300
|
14 |
+
|
15 |
+
folder = "Data/C"
|
16 |
+
counter = 0
|
17 |
+
|
18 |
+
labels = ["A", "B"]
|
19 |
+
|
20 |
+
|
21 |
+
def sign(img):
|
22 |
+
#img = cv2.imread("sign.jpg")
|
23 |
+
imgOutput = cv2.flip(img.copy(),1)
|
24 |
+
hands, img = detector.findHands(cv2.flip(img[:,:,::-1],1))
|
25 |
+
if hands:
|
26 |
+
print('hand detected')
|
27 |
+
hand = hands[0]
|
28 |
+
x, y, w, h = hand['bbox']
|
29 |
+
|
30 |
+
imgWhite = np.ones((imgSize, imgSize, 3), np.uint8) * 255
|
31 |
+
imgCrop = img[y - offset:y + h + offset, x - offset:x + w + offset]
|
32 |
+
|
33 |
+
imgCropShape = imgCrop.shape
|
34 |
+
|
35 |
+
aspectRatio = h / w
|
36 |
+
|
37 |
+
if aspectRatio > 1:
|
38 |
+
k = imgSize / h
|
39 |
+
wCal = math.ceil(k * w)
|
40 |
+
imgResize = cv2.resize(imgCrop, (wCal, imgSize))
|
41 |
+
imgResizeShape = imgResize.shape
|
42 |
+
wGap = math.ceil((imgSize - wCal) / 2)
|
43 |
+
imgWhite[:, wGap:wCal + wGap] = imgResize
|
44 |
+
prediction, index = classifier.getPrediction(imgWhite, draw=False)
|
45 |
+
print(prediction, index)
|
46 |
+
|
47 |
+
else:
|
48 |
+
k = imgSize / w
|
49 |
+
hCal = math.ceil(k * h)
|
50 |
+
imgResize = cv2.resize(imgCrop, (imgSize, hCal))
|
51 |
+
imgResizeShape = imgResize.shape
|
52 |
+
hGap = math.ceil((imgSize - hCal) / 2)
|
53 |
+
imgWhite[hGap:hCal + hGap, :] = imgResize
|
54 |
+
prediction, index = classifier.getPrediction(imgWhite, draw=False)
|
55 |
+
|
56 |
+
|
57 |
+
cv2.rectangle(imgOutput, (x-offset, y-offset),
|
58 |
+
(x + w+offset, y + h+offset), (255, 0, 255), 4)
|
59 |
+
imgOutput = cv2.flip(imgOutput,1)
|
60 |
+
#cv2.rectangle(imgOutput, (x - offset, y - offset-50),
|
61 |
+
# (x - offset+90, y - offset-50+50), (255, 0, 255), cv2.FILLED)
|
62 |
+
#cv2.putText(imgOutput, labels[index], (x, y -26), cv2.FONT_HERSHEY_COMPLEX, 1.7, (255, 255, 255), 2)
|
63 |
+
cv2.rectangle(imgOutput, (30,30),
|
64 |
+
(80,80), (255, 0, 255), cv2.FILLED)
|
65 |
+
cv2.putText(imgOutput, labels[index], (30, 80), cv2.FONT_HERSHEY_COMPLEX, 1.7, (255, 255, 255), 2)
|
66 |
+
|
67 |
+
|
68 |
+
#cv2.imshow("ImageCrop", imgCrop)
|
69 |
+
#cv2.imshow("ImageWhite", imgWhite)
|
70 |
+
|
71 |
+
#cv2.imshow("Image", imgOutput)
|
72 |
+
return imgOutput
|
73 |
+
|
74 |
+
with gr.Blocks() as demo:
|
75 |
+
with gr.Tabs():
|
76 |
+
with gr.TabItem('Webcam'):
|
77 |
+
with gr.Row():
|
78 |
+
with gr.Column():
|
79 |
+
img_input2 = gr.Webcam()
|
80 |
+
image_button2 = gr.Button("Submit")
|
81 |
+
|
82 |
+
with gr.Column():
|
83 |
+
output2 = gr.outputs.Image()
|
84 |
+
|
85 |
+
image_button2.click(fn=sign,
|
86 |
+
inputs = img_input2,
|
87 |
+
outputs = output2)
|
88 |
+
|
89 |
+
demo.launch(debug=True)
|