Spaces:
Running
Running
File size: 2,657 Bytes
23d3c5a 9a1a12e a0d3b60 d018658 cb9036d cd399f6 cb9036d 5e49edd 9a1a12e 23d3c5a 9a1a12e f939f34 86e33d0 cb9036d 1f9c6b4 4c57523 478b1bd 4c57523 0aa1785 1f9c6b4 5e49edd aacbc65 5e49edd 1f9c6b4 0aa1785 c58ae33 9a1a12e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
import gradio as gr
import cv2
from Yolov5_Deepsort.demo import app_main
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
css = """
.video-container {
display: flex;
justify-content: center;
align-items: center;
}
"""
def show_video():
return "DDMDeepsort1.mp4"
def process_video(video):
cap = cv2.VideoCapture(video)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
out.write(cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2BGR))
cap.release()
out.release()
return 'output.mp4'
title = "Welcome to DDM DeepSort"
description = "Upload a video to process it using DDM and DeepSORT."
with gr.Blocks(css=css) as demo:
gr.Interface(
fn=app_main,
inputs="video",
outputs="video",
title=title,
description=description
)
gr.HTML("""
<h1>Welcome to My Neuroscience Project</h1>
<p>The author is a third-year undergraduate student at the School of Intelligent Science and Technology, Nanjing University, Suzhou Campus.</p>
<h2>Note</h2>
<div style="border: 2px solid rgba(0, 0, 0, 0.1); border-radius: 10px; background-color: rgba(255, 255, 255, 0.8); padding: 20px;">
<p>Since this project uses Hugging Face's free CPU, the processing speed is very slow. In the worst case, even a video with a dozen frames can take several minutes to process. Therefore, if possible, it is recommended to deploy on a device with a better GPU.</p>
<p>Although the YOLOv5 model supports up to 80 classes, my project is primarily focused on autonomous driving. Therefore, objects other than people and cars will be excluded after object detection.</p>
</div>
<div style="border: 2px solid rgba(0, 0, 0, 0.1); border-radius: 10px; background-color: rgba(255, 255, 255, 0.8); padding: 20px;">
<h3>Tips for First-Time Users:</h3>
<ul>
<li>Ensure that the video includes at least people and cars.</li>
<li>It's recommended that the video is not too long, ideally within 10 seconds.</li>
</ul>
</div>
<div style="border: 2px solid rgba(0, 0, 0, 0.1); border-radius: 10px; background-color: rgba(255, 255, 255, 0.8); padding: 20px;">
The following video is a short animation created by the author using Manim to explain the general process.
</div>
""")
with gr.Row():
gr.HTML('<div class="video-container">')
gr.Video(show_video(), label="Your Video")
gr.HTML('</div>')
demo.launch()
|