import torch import clip import cv2, youtube_dl from PIL import Image,ImageDraw, ImageFont import os from functools import partial from multiprocessing.pool import Pool import shutil from pathlib import Path import numpy as np import datetime import gradio as gr # load model and preprocess device = "cuda" if torch.cuda.is_available() else "cpu" model, preprocess = clip.load("ViT-B/32") def select_video_format(url, format_note='480p', ext='mp4'): defaults = ['480p', '360p','240p','144p'] ydl_opts = {} ydl = youtube_dl.YoutubeDL(ydl_opts) info_dict = ydl.extract_info(url, download=False) formats = info_dict.get('formats', None) available_format_notes = set([f['format_note'] for f in formats]) if format_note not in available_format_notes: format_note = [d for d in defaults if d in available_format_notes][0] formats = [f for f in formats if f['format_note'] == format_note and f['ext'] == ext and f['vcodec'].split('.')[0] != 'av01'] format = formats[0] format_id = format.get('format_id', None) fps = format.get('fps', None) print(f'format selected: {format}') return(format_id, fps) def download_video(url,format_id): # testing print(f"testing...all the files in local directory: {os.listdir('.')}") ydl_opts = { 'format':format_id, 'outtmpl': "%(id)s.%(ext)s"} with youtube_dl.YoutubeDL(ydl_opts) as ydl: try: ydl.cache.remove() meta = ydl.extract_info(url) save_location = meta['id'] + '.' + meta['ext'] except youtube_dl.DownloadError as error: print(f'error with download_video function: {error}') return(save_location) def process_video_parallel(video, skip_frames, dest_path, num_processes, process_number): cap = cv2.VideoCapture(video) frames_per_process = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) // (num_processes) count = frames_per_process * process_number print(f"worker: {process_number}, process frames {count} ~ {frames_per_process * (process_number + 1)} \n total number of frames: {cap.get(cv2.CAP_PROP_FRAME_COUNT)} \n video: {video}; isOpen? : {cap.isOpened()}") while count < frames_per_process * (process_number + 1) : ret, frame = cap.read() if not ret: break count += 1 if (count - frames_per_process * process_number) % skip_frames ==0: filename =f"{dest_path}/{count}.jpg" cv2.imwrite(filename, frame) #print(f"saved {filename}") cap.release() def vid2frames(url, sampling_interval=1, ext='mp4'): # create folder for extracted frames - if folder exists, delete and create a new one dest_path = Path('frames') try: dest_path.mkdir(parents=True) except FileExistsError: shutil.rmtree(dest_path) dest_path.mkdir(parents=True) # figure out the format for download, # by default select 480p, if not available, choose the best format available # mp4 format_id, fps = select_video_format(url, format_note='480p', ext='mp4') # download the video video = download_video(url,format_id) # calculate skip_frames try: skip_frames = int(fps * sampling_interval) except: skip_frames = int(30 * sampling_interval) print(f'video saved at: {video}, fps:{fps}, skip_frames: {skip_frames}') # extract video frames at given sampling interval with multiprocessing - print('extracting frames...') n_workers = min(os.cpu_count(), 1) # testing.. cap = cv2.VideoCapture(video) print(f'video: {video}; isOpen? : {cap.isOpened()}') print(f'n_workers: {n_workers}') with Pool(n_workers) as pool: pool.map(partial(process_video_parallel, video, skip_frames, dest_path, n_workers), range(n_workers)) # read frames original_images = [] images = [] filenames = sorted(dest_path.glob('*.jpg'),key=lambda p: int(p.stem)) print(f"extracted {len(filenames)} frames") for filename in filenames: image = Image.open(filename).convert("RGB") original_images.append(image) images.append(preprocess(image)) return original_images, images def captioned_strip(images, caption=None, times=None, rows=1): increased_h = 0 if caption is None else 30 w, h = images[0].size[0], images[0].size[1] img = Image.new("RGB", (len(images) * w // rows, h * rows + increased_h)) for i, img_ in enumerate(images): img.paste(img_, (i // rows * w, increased_h + (i % rows) * h)) if caption is not None: draw = ImageDraw.Draw(img) font = ImageFont.load_default() #font_small = ImageFont.truetype("arial.pil", 12) #font = ImageFont.truetype( # "/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 16 #) #font_small = ImageFont.truetype("/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 12) draw.text((20, 3), caption, (255, 255, 255), font=font) for i,ts in enumerate(times): draw.text(( (i % rows) * w + 40 , #column poistion i // rows * h + 33) # row position , ts, (255, 255, 255), font=font) return img def run_inference(url, sampling_interval, search_query): original_images, images = vid2frames(url,sampling_interval) image_input = torch.tensor(np.stack(images)).to(device) print("testing.. created image_input") with torch.no_grad(): image_features = model.encode_image(image_input) text_features = model.encode_text(clip.tokenize(search_query).to(device)) image_features /= image_features.norm(dim=-1, keepdim=True) text_features /= text_features.norm(dim=-1, keepdim=True) similarity = (100.0 * image_features @ text_features.T) values, indices = similarity.topk(4, dim=0) print("testing.. selected best frames") best_frames = [original_images[ind] for ind in indices] times = [f'{datetime.timedelta(seconds = ind[0].item() * sampling_interval)}' for ind in indices] print("testing... before captioned_strip func") image_output = captioned_strip(best_frames,search_query, times,2) title = search_query print("testing... after captioned_strip func") return(title, image_output) inputs = [gr.inputs.Textbox(label="Give us the link to your youtube video!"), gr.Number(5,label='sampling interval (seconds)'), gr.inputs.Textbox(label="What do you want to search?")] outputs = [ gr.outputs.HTML(label=""), # To be used as title gr.outputs.Image(label=""), ] gr.Interface( run_inference, inputs=inputs, outputs=outputs, title="It Happened One Frame", description='A CLIP-based app that search video frame based on text', examples=[ ['https://youtu.be/v1rkzUIL8oc', 1, "James Cagney dancing down the stairs"], ['https://youtu.be/k4R5wZs8cxI', 1, "James Cagney smashes a grapefruit into Mae Clarke's face"], ['https://youtu.be/0diCvgWv_ng', 1, "little Deborah practicing her ballet while wearing a tutu in empty restaurant"] ] ).launch(debug=True,enable_queue=True)