File size: 8,897 Bytes
8b1feb9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8bee4af
e572140
8b1feb9
 
 
 
 
e572140
 
 
 
8b1feb9
e572140
 
 
 
 
 
 
 
 
 
 
 
5f4ce2c
8b1feb9
e572140
 
 
 
5f4ce2c
e572140
5f4ce2c
 
e572140
 
 
 
 
 
 
 
 
 
 
6f4e163
e572140
 
 
 
 
 
 
 
 
 
 
 
8b1feb9
2251212
 
c97026d
 
5f4ce2c
c97026d
 
8b1feb9
 
 
5f4ce2c
c97026d
 
 
5f4ce2c
8b1feb9
 
 
e572140
8b1feb9
e572140
8b1feb9
e572140
8b1feb9
e572140
 
 
8b1feb9
e572140
 
 
 
 
8b1feb9
e572140
 
5f4ce2c
e572140
 
 
 
 
8b1feb9
 
 
 
 
 
 
 
 
 
15b3749
 
 
 
 
8b1feb9
 
 
 
 
15b3749
8b1feb9
 
8bee4af
5f4ce2c
e572140
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5f4ce2c
e572140
 
 
 
 
 
 
 
8b1feb9
 
e572140
2251212
8b1feb9
 
 
 
 
 
e572140
 
8b1feb9
 
 
 
 
 
 
 
46170b6
 
8b1feb9
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
import torch
import clip
import cv2, youtube_dl
from PIL import Image,ImageDraw, ImageFont
import os
from functools import partial
from multiprocessing.pool import Pool
import shutil
from pathlib import Path
import numpy as np
import datetime
import gradio as gr


# load model and preprocess
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32")


def select_video_format(url, format_note='240p', ext='mp4', max_size = 50000000):
    defaults = ['480p', '360p','240p','144p']
    ydl_opts = {}
    ydl = youtube_dl.YoutubeDL(ydl_opts)
    info_dict = ydl.extract_info(url, download=False)
    formats = info_dict.get('formats', None)
    # filter out formats we can't process
    formats = [f for f in formats if f['ext'] == ext 
               and f['vcodec'].split('.')[0] != 'av01' 
               and f['filesize'] is not None and f['filesize'] <= max_size]
    available_format_notes = set([f['format_note'] for f in formats])
    try:
        if format_note not in available_format_notes:
            format_note = [d for d in defaults if d in available_format_notes][0]
        formats = [f for f in formats if f['format_note'] == format_note]
    
        format = formats[0]
        format_id = format.get('format_id', None)
        fps = format.get('fps', None)
        print(f'format selected: {format}')
    except IndexError as err:
        print(f"can't find suitable video formats. we are not able to process video larger than 95 Mib at the moment")
        format, format_id, fps = None, None, None
    return(format, format_id, fps)

# to-do: delete saved videos      
def download_video(url):
    # create "videos" foder for saved videos
    path_videos = Path('videos')
    try:
      path_videos.mkdir(parents=True)
    except FileExistsError:
      pass
    # clear the "videos" folder 
    videos_to_keep = ['v1rkzUIL8oc', 'k4R5wZs8cxI','0diCvgWv_ng']
    if len(list(path_videos.glob('*'))) > 10:
        for path_video in path_videos.glob('*'):
            if path_video.stem not in set(videos_to_keep):
                path_video.unlink()
                print(f'removed video {path_video}')
    # select format to download for given video
    # by default select 480p and .mp4 
    format, format_id, fps = select_video_format(url)
    if format_id is not None:
        ydl_opts = {
        'format':format_id,
        'outtmpl': "videos/%(id)s.%(ext)s"}

        with youtube_dl.YoutubeDL(ydl_opts) as ydl:
            try:
                ydl.cache.remove()
                meta = ydl.extract_info(url)
                save_location = 'videos/' + meta['id'] + '.' + meta['ext']
            except youtube_dl.DownloadError as error:
                print(f'error with download_video function: {error}')
                save_location = None
    return(fps, save_location)

def process_video_parallel(video, skip_frames, dest_path, num_processes, process_number):
    cap = cv2.VideoCapture(video)
    frames_per_process = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) // (num_processes)
    count =  frames_per_process * process_number
    cap.set(cv2.CAP_PROP_POS_FRAMES, count)
    print(f"worker: {process_number}, process frames {count} ~ {frames_per_process * (process_number + 1)} \n total number of frames: {cap.get(cv2.CAP_PROP_FRAME_COUNT)} \n video: {video}; isOpen? : {cap.isOpened()}")
    while count < frames_per_process * (process_number + 1) :
        ret, frame = cap.read()
        if not ret:
            break
        if count  % skip_frames ==0:
          filename =f"{dest_path}/{count}.jpg"
          cv2.imwrite(filename, frame)
          #print(f"saved {filename}")
        count += 1
    cap.release()


def vid2frames(url, sampling_interval=1):
  # create folder for extracted frames - if folder exists, delete and create a new one
    path_frames = Path('frames')
    try:
        path_frames.mkdir(parents=True)
    except FileExistsError:
        shutil.rmtree(path_frames)
        path_frames.mkdir(parents=True)
 
    # download the video 
    fps, video = download_video(url)
    if video is not None: 
      if fps is None: fps = 30
      skip_frames = int(fps * sampling_interval)
      print(f'video saved at: {video}, fps:{fps}, skip_frames: {skip_frames}')
    # extract video frames at given sampling interval with multiprocessing - 
      n_workers = min(os.cpu_count(), 12)
      print(f'now extracting frames with {n_workers} process...')

      with Pool(n_workers) as pool:
        pool.map(partial(process_video_parallel, video, skip_frames, path_frames, n_workers), range(n_workers))
    else:
      skip_frames, path_frames = None, None
    return(skip_frames, path_frames)


def captioned_strip(images, caption=None, times=None, rows=1):
    increased_h = 0 if caption is None else 30
    w, h = images[0].size[0], images[0].size[1]
    img = Image.new("RGB", (len(images) * w // rows, h * rows + increased_h))
    for i, img_ in enumerate(images):
        img.paste(img_, (i // rows * w, increased_h + (i % rows) * h))
    if caption is not None:
        draw = ImageDraw.Draw(img)
        font = ImageFont.truetype(
            "/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 16
        )
        font_small = ImageFont.truetype("/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 12)
        draw.text((60, 3), caption, (255, 255, 255), font=font)
        for i,ts in enumerate(times):
          draw.text((
              (i % rows) * w + 40 , #column poistion
               i // rows * h  + 33) # row position
          , ts, 
          (255, 255, 255), font=font_small)
    return img

def run_inference(url, sampling_interval, search_query, bs=526):
    skip_frames, path_frames= vid2frames(url,sampling_interval)
    if path_frames is not None:
      filenames = sorted(path_frames.glob('*.jpg'),key=lambda p: int(p.stem))
      n_frames = len(filenames)
      bs = min(n_frames,bs)
      print(f"extracted {n_frames} frames, now encoding images")
      # encoding images one batch at a time, combine all batch outputs -> image_features, size n_frames x 512
      image_features = torch.empty(size=(n_frames, 512),dtype=torch.float32).to(device)
      print(f"encoding images, batch size :{bs} ; number of batches: {len(range(0, n_frames,bs))}")
      for b in range(0, n_frames,bs):
          images = []
          # loop through all frames in the batch -> create batch_image_input, size bs x 3 x 224 x 224
          for filename in filenames[b:b+bs]:
              image = Image.open(filename).convert("RGB")
              images.append(preprocess(image))
          batch_image_input = torch.tensor(np.stack(images)).to(device)
          # encoding batch_image_input -> batch_image_features
          with torch.no_grad():
              batch_image_features = model.encode_image(batch_image_input)
              batch_image_features /= batch_image_features.norm(dim=-1, keepdim=True)
          # add encoded image embedding to image_features
          image_features[b:b+bs] = batch_image_features
      # encoding search query
      print(f'encoding search query')
      with torch.no_grad():
          text_features = model.encode_text(clip.tokenize(search_query).to(device)).to(dtype=torch.float32)
          text_features /= text_features.norm(dim=-1, keepdim=True)
    
      similarity = (100.0 * image_features @ text_features.T)
      values, indices = similarity.topk(4, dim=0)
  
      best_frames = [Image.open(filenames[ind]).convert("RGB") for ind in indices]
      times = [f'{datetime.timedelta(seconds = ind[0].item() * sampling_interval)}' for ind in indices]
      image_output = captioned_strip(best_frames,search_query, times,2)
      title = search_query
      print('task complete')
    else:
      title = "not able to download video"
      image_output = None
    return(title, image_output)

inputs = [gr.inputs.Textbox(label="Give us the link to your youtube video! (note that downloading mighte be slow, e.g. it will take a few minutes to process a 10 minutes video)"),
          gr.Number(5,label='sampling interval (seconds)'),
          gr.inputs.Textbox(label="What do you want to search?")]
outputs = [
    gr.outputs.HTML(label=""),  # To be used as title
    gr.outputs.Image(label=""),
]

example_videos = ['v1rkzUIL8oc', 'k4R5wZs8cxI','0diCvgWv_ng']

gr.Interface(
    run_inference,
    inputs=inputs,
    outputs=outputs,
    title="It Happened One Frame",
    description='A CLIP-based app that search video frame based on text',
    examples=[
        ['https://youtu.be/v1rkzUIL8oc', 1, "James Cagney dancing down the stairs"],
        ['https://youtu.be/k4R5wZs8cxI', 1, "James Cagney smashes a grapefruit into Mae Clarke's face"],
        ['https://youtu.be/0diCvgWv_ng', 1, "little Deborah practicing her ballet while wearing a tutu in empty restaurant"]
    ]
).launch(debug=True,enable_queue=True)