import os os.system("git clone https://github.com/google-research/frame-interpolation") import sys sys.path.append("frame-interpolation") import cv2 import numpy as np import tensorflow as tf import mediapy from PIL import Image from eval import interpolator, util import gradio as gr from huggingface_hub import snapshot_download from image_tools.sizes import resize_and_crop model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style") interpolator = interpolator.Interpolator(model, None) ffmpeg_path = util.get_ffmpeg_path() mediapy.set_ffmpeg(ffmpeg_path) def resize(width,img): basewidth = width img = Image.open(img) wpercent = (basewidth/float(img.size[0])) hsize = int((float(img.size[1])*float(wpercent))) img = img.resize((basewidth,hsize), Image.ANTIALIAS) return img def resize_img(img1,img2): img_target_size = Image.open(img1) img_to_resize = resize_and_crop( img2, (img_target_size.size[0],img_target_size.size[1]), #set width and height to match img1 crop_origin="middle" ) img_to_resize.save('resized_img2.png') sketch1 = gr.Image(image_mode="RGB", source="canvas", type="filepath", shape=None, invert_colors=False) sketch2 = gr.Image(image_mode="RGB", source="canvas", type="filepath", shape=None, invert_colors=False) slider = gr.inputs.Slider(minimum=2,maximum=4,step=1) def predict(frame1, frame2, times_to_interpolate): frame1 = resize(256,frame1) frame2 = resize(256,frame2) frame1.save("test1.png") frame2.save("test2.png") resize_img("test1.png","test2.png") input_frames = ["test1.png", "resized_img2.png"] frames = list( util.interpolate_recursively_from_files( input_frames, times_to_interpolate, interpolator)) print(frames) mediapy.write_video("out.mp4", frames, fps=30) # video.mp4 is a video of 9 seconds filename = "out.mp4" cap = cv2.VideoCapture(filename) cap.set(cv2.CAP_PROP_POS_AVI_RATIO,0) frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) videoFPS = int(cap.get(cv2.CAP_PROP_FPS)) print (f"frameCount: {frameCount}") print (f"frameWidth: {frameWidth}") print (f"frameHeight: {frameHeight}") print (f"videoFPS: {videoFPS}") buf = np.empty(( frameCount, frameHeight, frameWidth, 3), np.dtype('uint8')) fc = 0 ret = True while (fc < frameCount): ret, buf[fc] = cap.read() fc += 1 cap.release() videoArray = buf print (f"DURATION: {frameCount/videoFPS}") print (videoArray) return "out.mp4", videoArray title="sketch-frame-interpolation" description="This is a fork of the Gradio demo for FILM: Frame Interpolation for Large Scene Motion from @akhaliq, but using sketches instead of images. This could be very useful for the animation industry :)
To use it, simply draw your sketches and add the times to interpolate number. Read more at the links below." article = "

FILM: Frame Interpolation for Large Motion | Github Repo

" custom_css = "style.css" gr.Interface(predict,[sketch1,sketch2,slider],outputs=["playable_video","carousel"],title=title,description=description,article=article,css=custom_css).launch(enable_queue=True)