Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -13,23 +13,11 @@ from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
|
|
13 |
title = "Scene Edit Detection"
|
14 |
description = "Gradio demo of PyScene scenedetect, to automatically find every shots in a video sequence, then save each shots as a splitted mp4 video chunk to download"
|
15 |
|
|
|
|
|
16 |
# SET INPUTS
|
17 |
video_input = gr.Video(source="upload", format="mp4");
|
18 |
|
19 |
-
# SET DATA AND COMPONENTS OUTPUTS
|
20 |
-
# This would be filled like this:
|
21 |
-
# data_outputs = [ [List from detection], "video_chunk_n0.mp4", "video_chunk_n1.mp4", ... , "video_chunk_n.mp4", [List of video filepath to download], [List of still images from each shot found] ]
|
22 |
-
data_outputs = []
|
23 |
-
|
24 |
-
# This would be filled like this:
|
25 |
-
# gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
|
26 |
-
gradio_components_outputs = []
|
27 |
-
|
28 |
-
# This would be nice if number of outputs could be set after Interface Launch:
|
29 |
-
# gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
|
30 |
-
# outputs = gradio_components_outputs
|
31 |
-
working_outputs = ["json", "file", "gallery"]
|
32 |
-
|
33 |
# —————————————————————————————————————————————————
|
34 |
|
35 |
def convert_to_tuple(list):
|
@@ -123,4 +111,26 @@ def find_scenes(video_path, threshold=27.0):
|
|
123 |
|
124 |
# —————————————————————————————————————————————————
|
125 |
|
126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
title = "Scene Edit Detection"
|
14 |
description = "Gradio demo of PyScene scenedetect, to automatically find every shots in a video sequence, then save each shots as a splitted mp4 video chunk to download"
|
15 |
|
16 |
+
# —————————————————————————————————————————————————
|
17 |
+
|
18 |
# SET INPUTS
|
19 |
video_input = gr.Video(source="upload", format="mp4");
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
# —————————————————————————————————————————————————
|
22 |
|
23 |
def convert_to_tuple(list):
|
|
|
111 |
|
112 |
# —————————————————————————————————————————————————
|
113 |
|
114 |
+
# SET DATA AND COMPONENTS OUTPUTS
|
115 |
+
|
116 |
+
# This would be filled like this:
|
117 |
+
# data_outputs = [ [List from detection], "video_chunk_n0.mp4", "video_chunk_n1.mp4", ... , "video_chunk_n.mp4", [List of video filepath to download], [List of still images from each shot found] ]
|
118 |
+
data_outputs = []
|
119 |
+
|
120 |
+
# This would be filled like this:
|
121 |
+
# gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
|
122 |
+
gradio_components_outputs = []
|
123 |
+
|
124 |
+
|
125 |
+
#SET OUTPUTS
|
126 |
+
|
127 |
+
# This would be nice if number of outputs could be set after Interface Launch:
|
128 |
+
# because we do not know how many shots will be detected
|
129 |
+
# gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
|
130 |
+
# outputs = gradio_components_outputs
|
131 |
+
|
132 |
+
outputs = ["json", "file", "gallery"]
|
133 |
+
|
134 |
+
# —————————————————————————————————————————————————
|
135 |
+
|
136 |
+
gr.Interface(fn=find_scenes, inputs=video_input, outputs=outputs, title=title, description=description).launch()
|