Spaces:
Runtime error
Runtime error
File size: 3,628 Bytes
2ff0900 cdead31 2ff0900 10aeea5 cdead31 4eb074f cdead31 69a6745 cdead31 4eb074f cdead31 69a6745 cdead31 3e9263a 1ba0227 cdead31 69a6745 cdead31 075d904 10aeea5 2b0e5f7 10aeea5 2b0e5f7 10aeea5 4cbc8b1 273fdb4 680708f cdead31 10aeea5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
pip install opencv-python
import os
os.system("git clone https://github.com/google-research/frame-interpolation")
import sys
sys.path.append("frame-interpolation")
import cv2
import numpy as np
import tensorflow as tf
import mediapy
from PIL import Image
from eval import interpolator, util
import gradio as gr
from huggingface_hub import snapshot_download
from image_tools.sizes import resize_and_crop
model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
interpolator = interpolator.Interpolator(model, None)
ffmpeg_path = util.get_ffmpeg_path()
mediapy.set_ffmpeg(ffmpeg_path)
def resize(width,img):
basewidth = width
img = Image.open(img)
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
return img
def resize_img(img1,img2):
img_target_size = Image.open(img1)
img_to_resize = resize_and_crop(
img2,
(img_target_size.size[0],img_target_size.size[1]), #set width and height to match img1
crop_origin="middle"
)
img_to_resize.save('resized_img2.png')
sketch1 = gr.Image(image_mode="RGB",
source="canvas",
type="filepath",
shape=None,
invert_colors=False)
sketch2 = gr.Image(image_mode="RGB",
source="canvas",
type="filepath",
shape=None,
invert_colors=False)
slider = gr.inputs.Slider(minimum=2,maximum=4,step=1)
def predict(frame1, frame2, times_to_interpolate):
frame1 = resize(256,frame1)
frame2 = resize(256,frame2)
frame1.save("test1.png")
frame2.save("test2.png")
resize_img("test1.png","test2.png")
input_frames = ["test1.png", "resized_img2.png"]
frames = list(
util.interpolate_recursively_from_files(
input_frames, times_to_interpolate, interpolator))
print(frames)
mediapy.write_video("out.mp4", frames, fps=30)
# video.mp4 is a video of 9 seconds
filename = "out.mp4"
cap = cv2.VideoCapture(filename)
cap.set(cv2.CAP_PROP_POS_AVI_RATIO,0)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
videoFPS = int(cap.get(cv2.CAP_PROP_FPS))
print (f"frameCount: {frameCount}")
print (f"frameWidth: {frameWidth}")
print (f"frameHeight: {frameHeight}")
print (f"videoFPS: {videoFPS}")
buf = np.empty((
frameCount,
frameHeight,
frameWidth,
3), np.dtype('uint8'))
fc = 0
ret = True
while (fc < frameCount):
ret, buf[fc] = cap.read()
fc += 1
cap.release()
videoArray = buf
print (f"DURATION: {frameCount/videoFPS}")
return "out.mp4"
title="sketch-frame-interpolation"
description="This is a fork of the Gradio demo for FILM: Frame Interpolation for Large Scene Motion from @akhaliq, but using sketches instead of images. This could be very useful for the animation industry :) <br /> To use it, simply draw your sketches and add the times to interpolate number. Read more at the links below."
article = "<p style='text-align: center'><a href='https://film-net.github.io/' target='_blank'>FILM: Frame Interpolation for Large Motion</a> | <a href='https://github.com/google-research/frame-interpolation' target='_blank'>Github Repo</a></p>"
custom_css = "style.css"
gr.Interface(predict,[sketch1,sketch2,slider],'playable_video',title=title,description=description,article=article, css=custom_css).launch(enable_queue=True) |