Spaces:
Sleeping
Sleeping
File size: 6,417 Bytes
7064043 8d2001a ae4b7d0 9724d68 509eeda 8ce7fdb c0ff040 8ce7fdb 8d2001a 4a23dca 7064043 63ce649 85cb9b5 86fd5b0 f6894df 509eeda cf58581 8ce7fdb 8d2001a 8ce7fdb 7064043 8ce7fdb 86f6646 95eab87 7064043 95a9f65 7064043 6fc3067 95a9f65 40fe7c0 86fd5b0 fc8689b f9127d3 831716d 63ce649 95eab87 7064043 c97a584 7064043 9d7c1dd 40fe7c0 190fc64 e399130 40fe7c0 695d1b1 7064043 d408c45 7064043 2938c1f 7064043 831716d ae4b7d0 7064043 cae39fd 63ce649 7064043 63ce649 cae39fd 63ce649 7064043 71f8de2 3b4ac51 7064043 8f01d6b 6fc3067 7064043 f50a5fd 7064043 40fe7c0 8d2001a 86fd5b0 7064043 4916c5e 554ca8f 4916c5e 86fd5b0 ef765c5 86fd5b0 422565d 86fd5b0 fc541da 86fd5b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
# Dependencies, see also requirement.txt ;)
import gradio as gr
import cv2
import numpy as np
import os
from scenedetect import open_video, SceneManager
from scenedetect.detectors import ContentDetector
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
def convert_to_tuple(list):
return tuple(list);
def clear_app():
return None, 27, None, None, None
def find_scenes(video_path, threshold):
# file name without extension
filename = os.path.splitext(os.path.basename(video_path))[0]
# Open our video, create a scene manager, and add a detector.
video = open_video(video_path)
scene_manager = SceneManager()
scene_manager.add_detector(
ContentDetector(threshold=threshold))
# Start detection
scene_manager.detect_scenes(video, show_progress=True)
scene_list = scene_manager.get_scene_list()
# Push the list of scenes into data_outputs
data_outputs.append(scene_list)
gradio_components_outputs.append("json")
#print(scene_list)
timecodes = []
if not scene_list:
raise ValueError("There are no scenes detected in this video.")
timecodes.append({"title": filename + ".mp4", "fps": scene_list[0][0].get_framerate()})
shots = []
stills = []
# For each shot found, set entry and exit points as seconds from frame number
# Then split video into chunks and store them into shots List
# Then extract first frame of each shot as thumbnail for the gallery
for i, shot in enumerate(scene_list):
# STEP 1
# Get timecode in seconds
framerate = shot[0].get_framerate()
shot_in = shot[0].get_frames() / framerate
shot_out = shot[1].get_frames() / framerate
tc_in = shot[0].get_timecode()
tc_out = shot[1].get_timecode()
frame_in = shot[0].get_frames()
frame_out = shot[1].get_frames()
timecode = {"tc_in": tc_in, "tc_out": tc_out, "frame_in": frame_in, "frame_out": frame_out}
timecodes.append(timecode)
# Set name template for each shot
target_name = "shot_" + str(i+1) + "_" + str(filename) + ".mp4"
# Split chunk
ffmpeg_extract_subclip(video_path, shot_in, shot_out, targetname=target_name)
# Push chunk into shots List
shots.append(target_name)
# Push each chunk into data_outputs
data_outputs.append(target_name)
gradio_components_outputs.append("video")
# —————————————————————————————————————————————————
# STEP 2
# extract first frame of each shot with cv2
vid = cv2.VideoCapture(video_path)
fps = vid.get(cv2.CAP_PROP_FPS)
print('frames per second =',fps)
frame_id = shot[0].get_frames() # value from scene_list from step 1
vid.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
ret, frame = vid.read()
# Save frame as PNG file
img = str(frame_id) + '_screenshot.png'
cv2.imwrite(img,frame)
# Push image into stills List
stills.append((img, 'shot ' + str(i+1)))
# Push the list of video shots into data_outputs for Gradio file component
data_outputs.append(shots)
gradio_components_outputs.append("file")
# Push the list of still images into data_outputs
data_outputs.append(stills)
gradio_components_outputs.append("gallery")
# This would have been used as gradio outputs,
# if we could set number of outputs after the interface launch
# That's not (yet ?) possible
results = convert_to_tuple(data_outputs)
print(results)
# return List of shots as JSON, List of video chunks, List of still images
# *
# Would be nice to be able to return my results tuple as outputs,
# while number of chunks found is not fixed:
# return results
return timecodes, shots, stills
# —————————————————————————————————————————————————
# SET DATA AND COMPONENTS OUTPUTS
# This would be filled like this:
# data_outputs = [ [List from detection], "video_chunk_n0.mp4", "video_chunk_n1.mp4", ... , "video_chunk_n.mp4", [List of video filepath to download], [List of still images from each shot found] ]
data_outputs = []
# This would be filled like this:
# gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
gradio_components_outputs = []
#SET OUTPUTS
# This would be nice if number of outputs could be set after Interface Launch:
# because we do not know how many shots will be detected
# gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
# outputs = gradio_components_outputs
# ANOTHER SOLUTION WOULD BE USING A (FUTURE ?) "VIDEO GALLERY" GRADIO COMPONENT FROM LIST :)
with gr.Blocks() as demo:
with gr.Column():
gr.Markdown("""
# Scene Edit Detection
Copy of @fffiloni's gradio demo of PySceneDetect.
Automatically find all the shots in a video sequence.
""")
with gr.Row():
with gr.Column():
video_input = gr.Video(sources="upload", format="mp4", label="Video Sequence", mirror_webcam = False)
threshold = gr.Slider(label="Threshold pixel comparison: if exceeded, triggers a scene cut. Default: 27.0", minimum=15.0, maximum=40.0, value=27.0)
with gr.Row():
clear_button = gr.Button(value=("Clear"))
run_button = gr.Button(value = "Submit", variant = "primary")
with gr.Column():
json_output = gr.JSON(label="Shots detected")
file_output = gr.File(label="Downloadable Shots")
gallery_output = gr.Gallery(label="Still Images from each shot", columns = 3)
run_button.click(fn=find_scenes, inputs=[video_input, threshold], outputs=[json_output, file_output, gallery_output])
clear_button.click(fn=clear_app, inputs = None, outputs=[video_input, threshold, json_output, file_output, gallery_output])
if __name__ == "__main__":
demo.launch(debug=True) |