Create app
Browse files
app
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tempfile
|
2 |
+
|
3 |
+
import ffmpegio
|
4 |
+
import gradio as gr
|
5 |
+
import numpy as np
|
6 |
+
import omegaconf
|
7 |
+
import tensorflow as tf
|
8 |
+
from pyprojroot.pyprojroot import here
|
9 |
+
from huggingface_hub import hf_hub_url, hf_hub_download
|
10 |
+
|
11 |
+
from ganime.model.vqgan_clean.experimental.net2net_v3 import Net2Net
|
12 |
+
|
13 |
+
IMAGE_SHAPE = (64, 128, 3)
|
14 |
+
|
15 |
+
|
16 |
+
hf_hub_download(repo_id="Kurokabe/VQGAN_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint.data-00000-of-00001", subfolder="vqgan_kny_image_full")
|
17 |
+
hf_hub_download(repo_id="Kurokabe/VQGAN_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint.index", subfolder="vqgan_kny_image_full")
|
18 |
+
vqgan_path = hf_hub_download(repo_id="Kurokabe/VQGAN_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint", subfolder="vqgan_kny_image_full")
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
hf_hub_download(repo_id="Kurokabe/GANime_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint.data-00000-of-00001", subfolder="ganime_kny_video_full")
|
23 |
+
hf_hub_download(repo_id="Kurokabe/GANime_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint.index", subfolder="ganime_kny_video_full")
|
24 |
+
gpt_path = hf_hub_download(repo_id="Kurokabe/GANime_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint", subfolder="ganime_kny_video_full")
|
25 |
+
|
26 |
+
cfg = omegaconf.OmegaConf.load(here("configs/kny_video_gpt2_large_gradio.yaml"))
|
27 |
+
cfg["model"]["first_stage_config"]["checkpoint_path"] = vqgan_path + "/checkpoint"
|
28 |
+
cfg["model"]["transformer_config"]["checkpoint_path"] = gpt_path + "/checkpoint"
|
29 |
+
|
30 |
+
model = Net2Net(**cfg["model"], trainer_config=cfg["train"], num_replicas=1)
|
31 |
+
model.first_stage_model.build((20, *IMAGE_SHAPE))
|
32 |
+
|
33 |
+
|
34 |
+
# def save_video(video):
|
35 |
+
# b, f, h, w, c = 1, 20, 500, 500, 3
|
36 |
+
|
37 |
+
# # filename = output_file.name
|
38 |
+
# filename = "./test_video.mp4"
|
39 |
+
# images = []
|
40 |
+
# for i in range(f):
|
41 |
+
# # image = video[0][i].numpy()
|
42 |
+
# # image = 255 * image # Now scale by 255
|
43 |
+
# # image = image.astype(np.uint8)
|
44 |
+
# images.append(np.random.randint(0, 255, (h, w, c), dtype=np.uint8))
|
45 |
+
|
46 |
+
# ffmpegio.video.write(filename, 20, np.array(images), overwrite=True)
|
47 |
+
# return filename
|
48 |
+
|
49 |
+
|
50 |
+
def save_video(video):
|
51 |
+
output_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
|
52 |
+
b, f, h, w, c = video.shape
|
53 |
+
|
54 |
+
filename = output_file.name
|
55 |
+
video = video.numpy()
|
56 |
+
video = video * 255
|
57 |
+
video = video.astype(np.uint8)
|
58 |
+
ffmpegio.video.write(filename, 20, video, overwrite=True)
|
59 |
+
return filename
|
60 |
+
|
61 |
+
|
62 |
+
def resize_if_necessary(image):
|
63 |
+
if image.shape[0] != 64 and image.shape[1] != 128:
|
64 |
+
image = tf.image.resize(image, (64, 128))
|
65 |
+
return image
|
66 |
+
|
67 |
+
|
68 |
+
def normalize(image):
|
69 |
+
image = (tf.cast(image, tf.float32) / 127.5) - 1
|
70 |
+
|
71 |
+
return image
|
72 |
+
|
73 |
+
|
74 |
+
def generate(first, last, n_frames):
|
75 |
+
# n_frames = 20
|
76 |
+
n_frames = int(n_frames)
|
77 |
+
first = resize_if_necessary(first)
|
78 |
+
last = resize_if_necessary(last)
|
79 |
+
first = normalize(first)
|
80 |
+
last = normalize(last)
|
81 |
+
data = {
|
82 |
+
"first_frame": np.expand_dims(first, axis=0),
|
83 |
+
"last_frame": np.expand_dims(last, axis=0),
|
84 |
+
"y": None,
|
85 |
+
"n_frames": [n_frames],
|
86 |
+
"remaining_frames": [list(reversed(range(n_frames)))],
|
87 |
+
}
|
88 |
+
generated = model.predict(data)
|
89 |
+
|
90 |
+
return save_video(generated)
|
91 |
+
|
92 |
+
|
93 |
+
gr.Interface(
|
94 |
+
generate,
|
95 |
+
inputs=[
|
96 |
+
gr.Image(label="Upload the first image"),
|
97 |
+
gr.Image(label="Upload the last image"),
|
98 |
+
gr.Slider(
|
99 |
+
label="Number of frame to generate",
|
100 |
+
minimum=15,
|
101 |
+
maximum=100,
|
102 |
+
value=15,
|
103 |
+
step=1,
|
104 |
+
),
|
105 |
+
],
|
106 |
+
outputs="video",
|
107 |
+
title="Generate a video from the first and last frame",
|
108 |
+
).launch(share=True)
|