Spaces:
Runtime error
Runtime error
File size: 1,353 Bytes
f25864b 0d7354c f25864b 856a453 f25864b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import cv2
import gradio as gr
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_hands = mp.solutions.hands
def fun(img):
print(type(img))
with mp_hands.Hands( model_complexity=0,min_detection_confidence=0.5,min_tracking_confidence=0.5) as hands:
img.flags.writeable = False
image = cv2.flip(img[:,:,::-1], 1)
# Convert the BGR image to RGB before processing.
results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
image.flags.writeable = True
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS,
mp_drawing_styles.get_default_hand_landmarks_style(),
mp_drawing_styles.get_default_hand_connections_style())
return cv2.flip(image[:,:,::-1],1)
with gr.Blocks(title="Realtime Keypoint Detection | Data Science Dojo", css="footer {display:none !important} .output-markdown{display:none !important}") as demo:
with gr.Row():
with gr.Column():
input = gr.Webcam(streaming=True)
with gr.Column():
output = gr.outputs.Image()
input.stream(fn=fun,
inputs = input,
outputs = output)
demo.launch(debug=True) |