Spaces:
Runtime error
Runtime error
File size: 6,995 Bytes
8b1feb9 62cb8e9 8b1feb9 62cb8e9 eefdeb6 8b1feb9 2251212 c97026d 8b1feb9 c97026d 8b1feb9 12f763a c97026d 8b1feb9 f238f1a c6b3ab7 c7d5df4 2f49504 c97026d 8b1feb9 fd03f3a cc4cb86 8b1feb9 fd03f3a 8b1feb9 c97026d 8b1feb9 00e7c2b 8b1feb9 00e7c2b 8b1feb9 00e7c2b 8b1feb9 00e7c2b 8b1feb9 2251212 8b1feb9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
import torch
import clip
import cv2, youtube_dl
from PIL import Image,ImageDraw, ImageFont
import os
from functools import partial
from multiprocessing.pool import Pool
import shutil
from pathlib import Path
import numpy as np
import datetime
import gradio as gr
# load model and preprocess
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32")
def select_video_format(url, format_note='480p', ext='mp4'):
defaults = ['480p', '360p','240p','144p']
ydl_opts = {}
ydl = youtube_dl.YoutubeDL(ydl_opts)
info_dict = ydl.extract_info(url, download=False)
formats = info_dict.get('formats', None)
available_format_notes = set([f['format_note'] for f in formats])
if format_note not in available_format_notes:
format_note = [d for d in defaults if d in available_format_notes][0]
formats = [f for f in formats if f['format_note'] == format_note and f['ext'] == ext]
format = formats[0]
format_id = format.get('format_id', None)
fps = format.get('fps', None)
print(f'format selected: {format}')
return(format_id, fps)
def download_video(url,format_id):
# testing
print(f"testing...all the files in local directory: {os.listdir('.')}")
ydl_opts = {
'format':format_id,
'outtmpl': "%(id)s.%(ext)s"}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
ydl.cache.remove()
meta = ydl.extract_info(url)
save_location = meta['id'] + '.' + meta['ext']
except youtube_dl.DownloadError as error:
print(f'error with download_video function: {error}')
return(save_location)
def process_video_parallel(video, skip_frames, dest_path, num_processes, process_number):
cap = cv2.VideoCapture(video)
frames_per_process = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) // (num_processes)
count = frames_per_process * process_number
print(f"worker: {process_number}, process frames {count} ~ {frames_per_process * (process_number + 1)} \n total number of frames: {cap.get(cv2.CAP_PROP_FRAME_COUNT)} \n video: {video}; isOpen? : {cap.isOpened()}")
while count < frames_per_process * (process_number + 1) :
ret, frame = cap.read()
if not ret:
break
count += 1
if (count - frames_per_process * process_number) % skip_frames ==0:
filename =f"{dest_path}/{count}.jpg"
cv2.imwrite(filename, frame)
#print(f"saved {filename}")
cap.release()
def vid2frames(url, sampling_interval=1, ext='mp4'):
# create folder for extracted frames - if folder exists, delete and create a new one
dest_path = Path('frames')
try:
dest_path.mkdir(parents=True)
except FileExistsError:
shutil.rmtree(dest_path)
dest_path.mkdir(parents=True)
# figure out the format for download,
# by default select 480p, if not available, choose the best format available
# mp4
format_id, fps = select_video_format(url, format_note='480p', ext='mp4')
# download the video
video = download_video(url,format_id)
# calculate skip_frames
try:
skip_frames = int(fps * sampling_interval)
except:
skip_frames = int(30 * sampling_interval)
print(f'video saved at: {video}, fps:{fps}, skip_frames: {skip_frames}')
# extract video frames at given sampling interval with multiprocessing -
print('extracting frames...')
n_workers = min(os.cpu_count(), 1)
# testing..
cap = cv2.VideoCapture(video)
print(f'video: {video}; isOpen? : {cap.isOpened()}')
print(f'n_workers: {n_workers}')
with Pool(n_workers) as pool:
pool.map(partial(process_video_parallel, video, skip_frames, dest_path, n_workers), range(n_workers))
# read frames
original_images = []
images = []
filenames = sorted(dest_path.glob('*.jpg'),key=lambda p: int(p.stem))
print(f"extracted {len(filenames)} frames")
for filename in filenames:
image = Image.open(filename).convert("RGB")
original_images.append(image)
images.append(preprocess(image))
return original_images, images
def captioned_strip(images, caption=None, times=None, rows=1):
increased_h = 0 if caption is None else 30
w, h = images[0].size[0], images[0].size[1]
img = Image.new("RGB", (len(images) * w // rows, h * rows + increased_h))
for i, img_ in enumerate(images):
img.paste(img_, (i // rows * w, increased_h + (i % rows) * h))
if caption is not None:
draw = ImageDraw.Draw(img)
font = ImageFont.load_default()
#font_small = ImageFont.truetype("arial.pil", 12)
#font = ImageFont.truetype(
# "/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 16
#)
#font_small = ImageFont.truetype("/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 12)
draw.text((20, 3), caption, (255, 255, 255), font=font)
for i,ts in enumerate(times):
draw.text((
(i % rows) * w + 40 , #column poistion
i // rows * h + 33) # row position
, ts,
(255, 255, 255), font=font)
return img
def run_inference(url, sampling_interval, search_query):
original_images, images = vid2frames(url,sampling_interval)
image_input = torch.tensor(np.stack(images)).to(device)
print("testing.. created image_input")
with torch.no_grad():
image_features = model.encode_image(image_input)
text_features = model.encode_text(clip.tokenize(search_query).to(device))
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
similarity = (100.0 * image_features @ text_features.T)
values, indices = similarity.topk(4, dim=0)
print("testing.. selected best frames")
best_frames = [original_images[ind] for ind in indices]
times = [f'{datetime.timedelta(seconds = ind[0].item() * sampling_interval)}' for ind in indices]
print("testing... before captioned_strip func")
image_output = captioned_strip(best_frames,search_query, times,2)
title = search_query
print("testing... after captioned_strip func")
return(title, image_output)
inputs = [gr.inputs.Textbox(label="Give us the link to your youtube video!"),
gr.Number(5,label='sampling interval (seconds)'),
gr.inputs.Textbox(label="What do you want to search?")]
outputs = [
gr.outputs.HTML(label=""), # To be used as title
gr.outputs.Image(label=""),
]
gr.Interface(
run_inference,
inputs=inputs,
outputs=outputs,
title="It Happened One Frame",
description='A CLIP-based app that search video frame based on text',
examples=[
['https://youtu.be/v1rkzUIL8oc', 1, "James Cagney dancing down the stairs"],
['https://youtu.be/k4R5wZs8cxI', 1, "James Cagney smashes a grapefruit into Mae Clarke's face"]
]
).launch(debug=True,enable_queue=True)
|