datasciencedojo commited on
Commit
6308c7b
·
1 Parent(s): 214f8c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -5
app.py CHANGED
@@ -5,9 +5,8 @@ import numpy as np
5
  import math
6
  import gradio as gr
7
 
8
- #cap = cv2.VideoCapture(0)
9
- detector = HandDetector(maxHands=1)
10
- classifier = Classifier("ModelFull/keras_model.h5", "ModelFull/labels.txt")
11
 
12
  offset = 20
13
  imgSize = 300
@@ -26,6 +25,30 @@ def sign(img):
26
  print('hand detected')
27
  hand = hands[0]
28
  x, y, w, h = hand['bbox']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  imgWhite = np.ones((imgSize, imgSize, 3), np.uint8) * 255
31
  imgCrop = img[y - offset:y + h + offset, x - offset:x + w + offset]
@@ -53,7 +76,7 @@ def sign(img):
53
  imgWhite[hGap:hCal + hGap, :] = imgResize
54
  prediction, index = classifier.getPrediction(imgWhite, draw=False)
55
 
56
-
57
  cv2.rectangle(imgOutput, (x-offset, y-offset),
58
  (x + w+offset, y + h+offset), (255, 0, 255), 4)
59
  imgOutput = cv2.flip(imgOutput,1)
@@ -71,8 +94,22 @@ def sign(img):
71
  #cv2.imshow("Image", imgOutput)
72
  return imgOutput
73
 
 
 
 
74
  with gr.Blocks() as demo:
75
  with gr.Tabs():
 
 
 
 
 
 
 
 
 
 
 
76
  with gr.TabItem('Webcam'):
77
  with gr.Row():
78
  with gr.Column():
@@ -84,6 +121,11 @@ with gr.Blocks() as demo:
84
 
85
  image_button2.click(fn=sign,
86
  inputs = img_input2,
87
- outputs = output2)
 
 
 
 
 
88
 
89
  demo.launch(debug=True)
 
5
  import math
6
  import gradio as gr
7
 
8
+ detector = HandDetector(mode=True,maxHands=1)
9
+ classifier = Classifier("Model/keras_model.h5", "Model/labels.txt")
 
10
 
11
  offset = 20
12
  imgSize = 300
 
25
  print('hand detected')
26
  hand = hands[0]
27
  x, y, w, h = hand['bbox']
28
+ imlist = hand['lmList']
29
+ print(imlist)
30
+ if ((imlist[10][0] < imlist[4][0] < imlist[6][0]) or (imlist[6][0] < imlist[4][0] < imlist[10][0])):
31
+ if ((imlist[4][1] < imlist[8][1]) and (imlist[4][1] < imlist[12][1]) ):
32
+ print('In T')
33
+ cv2.rectangle(imgOutput, (x-offset, y-offset),(x + w+offset, y + h+offset), (255, 0, 255), 4)
34
+ imgOutput = cv2.flip(imgOutput,1)
35
+ cv2.rectangle(imgOutput, (0,30),(80,80), (255, 0, 255), cv2.FILLED)
36
+ cv2.putText(imgOutput, 'T', (20, 75), cv2.FONT_HERSHEY_COMPLEX, 1.7, (255, 255, 255), 2)
37
+ return imgOutput
38
+ else:
39
+ print('In K')
40
+ cv2.rectangle(imgOutput, (x-offset, y-offset),(x + w+offset, y + h+offset), (255, 0, 255), 4)
41
+ imgOutput = cv2.flip(imgOutput,1)
42
+ cv2.rectangle(imgOutput, (0,30),(80,80), (255, 0, 255), cv2.FILLED)
43
+ cv2.putText(imgOutput, 'K', (20, 75), cv2.FONT_HERSHEY_COMPLEX, 1.7, (255, 255, 255), 2)
44
+ return imgOutput
45
+ if imlist[4][0]>imlist[8][0] and imlist[4][0]>imlist[12][0] and imlist[4][0]>imlist[16][0] and imlist[4][0]>imlist[20][0]:
46
+ print('In M')
47
+ cv2.rectangle(imgOutput, (x-offset, y-offset),(x + w+offset, y + h+offset), (255, 0, 255), 4)
48
+ imgOutput = cv2.flip(imgOutput,1)
49
+ cv2.rectangle(imgOutput, (0,30),(80,80), (255, 0, 255), cv2.FILLED)
50
+ cv2.putText(imgOutput, 'M', (20, 75), cv2.FONT_HERSHEY_COMPLEX, 1.7, (255, 255, 255), 2)
51
+ return imgOutput
52
 
53
  imgWhite = np.ones((imgSize, imgSize, 3), np.uint8) * 255
54
  imgCrop = img[y - offset:y + h + offset, x - offset:x + w + offset]
 
76
  imgWhite[hGap:hCal + hGap, :] = imgResize
77
  prediction, index = classifier.getPrediction(imgWhite, draw=False)
78
 
79
+ cv2.imwrite("check.jpg",imgWhite)
80
  cv2.rectangle(imgOutput, (x-offset, y-offset),
81
  (x + w+offset, y + h+offset), (255, 0, 255), 4)
82
  imgOutput = cv2.flip(imgOutput,1)
 
94
  #cv2.imshow("Image", imgOutput)
95
  return imgOutput
96
 
97
+ def set_example_image(example: list) -> dict:
98
+ return gr.inputs.Image.update(value=example[0])
99
+
100
  with gr.Blocks() as demo:
101
  with gr.Tabs():
102
+ with gr.TabItem('Upload'):
103
+ with gr.Row():
104
+ with gr.Column():
105
+ img_input = gr.Image(shape=(640,480))
106
+ image_button = gr.Button("Submit")
107
+
108
+ with gr.Column():
109
+ output = gr.Image(shape=(640,480))
110
+ with gr.Row():
111
+ example_images = gr.Dataset(components=[img_input],samples=[["ex2.jpg"]])
112
+
113
  with gr.TabItem('Webcam'):
114
  with gr.Row():
115
  with gr.Column():
 
121
 
122
  image_button2.click(fn=sign,
123
  inputs = img_input2,
124
+ outputs = output2)
125
+ image_button.click(fn=sign,
126
+ inputs = img_input,
127
+ outputs = output)
128
+ example_images.click(fn=set_example_image,inputs=[example_images],outputs=[img_input])
129
+
130
 
131
  demo.launch(debug=True)