Spaces:
Running
Running
Add application file
Browse files
app.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import gradio as gr
|
3 |
+
from model import r_holistic
|
4 |
+
|
5 |
+
title='手语动作分类'
|
6 |
+
description = "此分类模型可以识别250个[ASL](https://www.lifeprint.com/)手语动作\
|
7 |
+
并将其转化为特定的标签, 标签列表见链接[sign_to_prediction_index_map.json](sign_to_prediction_index_map.json), \
|
8 |
+
大家可以使用示例视频进行测试, 也可以根据列表下载或模拟相应的手语视频测试输出.\
|
9 |
+
\n工作流程:\
|
10 |
+
\n 1. landmark提取, 我使用了[ MediaPipe Holistic Solution](https://ai.google.dev/edge/mediapipe/solutions/vision/holistic_landmarker)进行landmark提取.\
|
11 |
+
\n 2. 利用landmark进行手语识别, 此部分模型是我自己搭建并训练的, 主体框架为cnn和transform,此模型在测试数据集上精度在90%以上."
|
12 |
+
|
13 |
+
output_video_file = gr.Video(label="landmark输出")
|
14 |
+
output_text=gr.Textbox(label="手语预测结果")
|
15 |
+
slider_1=gr.Slider(0,1,label='detection_confidence')
|
16 |
+
slider_2=gr.Slider(0,1,label='tracking_confidence')
|
17 |
+
|
18 |
+
iface = gr.Interface(
|
19 |
+
fn=r_holistic,
|
20 |
+
inputs=[gr.Video(sources=None, label="手语视频片段")],
|
21 |
+
outputs= [output_video_file,output_text],
|
22 |
+
title=title,
|
23 |
+
description=description,
|
24 |
+
examples=['book.mp4','book2.mp4','chair1.mp4','chair2.mp4'],
|
25 |
+
#cache_examples=True,
|
26 |
+
) #["hand-land-mark-video/01.mp4","hand-land-mark-video/02.mp4"]
|
27 |
+
|
28 |
+
|
29 |
+
iface.launch(share=True)
|