Update app.py
Browse files
app.py
CHANGED
@@ -12,23 +12,7 @@ from ganime.model.vqgan_clean.experimental.net2net_v3 import Net2Net
|
|
12 |
|
13 |
IMAGE_SHAPE = (64, 128, 3)
|
14 |
|
15 |
-
|
16 |
-
hf_hub_download(repo_id="Kurokabe/VQGAN_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint.data-00000-of-00001", subfolder="vqgan_kny_image_full")
|
17 |
-
hf_hub_download(repo_id="Kurokabe/VQGAN_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint.index", subfolder="vqgan_kny_image_full")
|
18 |
-
vqgan_path = hf_hub_download(repo_id="Kurokabe/VQGAN_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint", subfolder="vqgan_kny_image_full")
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
hf_hub_download(repo_id="Kurokabe/GANime_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint.data-00000-of-00001", subfolder="ganime_kny_video_full")
|
23 |
-
hf_hub_download(repo_id="Kurokabe/GANime_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint.index", subfolder="ganime_kny_video_full")
|
24 |
-
gpt_path = hf_hub_download(repo_id="Kurokabe/GANime_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint", subfolder="ganime_kny_video_full")
|
25 |
-
|
26 |
-
cfg = omegaconf.OmegaConf.load(here("configs/kny_video_gpt2_large_gradio.yaml"))
|
27 |
-
cfg["model"]["first_stage_config"]["checkpoint_path"] = vqgan_path
|
28 |
-
cfg["model"]["transformer_config"]["checkpoint_path"] = gpt_path
|
29 |
-
|
30 |
-
model = Net2Net(**cfg["model"], trainer_config=cfg["train"], num_replicas=1)
|
31 |
-
model.first_stage_model.build((20, *IMAGE_SHAPE))
|
32 |
|
33 |
|
34 |
# def save_video(video):
|
@@ -70,8 +54,29 @@ def normalize(image):
|
|
70 |
|
71 |
return image
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
def generate(first, last, n_frames):
|
|
|
|
|
|
|
|
|
75 |
# n_frames = 20
|
76 |
n_frames = int(n_frames)
|
77 |
first = resize_if_necessary(first)
|
|
|
12 |
|
13 |
IMAGE_SHAPE = (64, 128, 3)
|
14 |
|
15 |
+
model = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
|
18 |
# def save_video(video):
|
|
|
54 |
|
55 |
return image
|
56 |
|
57 |
+
def load_model():
|
58 |
+
hf_hub_download(repo_id="Kurokabe/VQGAN_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint.data-00000-of-00001", subfolder="vqgan_kny_image_full")
|
59 |
+
hf_hub_download(repo_id="Kurokabe/VQGAN_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint.index", subfolder="vqgan_kny_image_full")
|
60 |
+
vqgan_path = hf_hub_download(repo_id="Kurokabe/VQGAN_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint", subfolder="vqgan_kny_image_full")
|
61 |
+
|
62 |
+
hf_hub_download(repo_id="Kurokabe/GANime_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint.data-00000-of-00001", subfolder="ganime_kny_video_full")
|
63 |
+
hf_hub_download(repo_id="Kurokabe/GANime_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint.index", subfolder="ganime_kny_video_full")
|
64 |
+
gpt_path = hf_hub_download(repo_id="Kurokabe/GANime_Kimetsu-no-yaiba_Tensorflow", filename="checkpoint", subfolder="ganime_kny_video_full")
|
65 |
+
|
66 |
+
cfg = omegaconf.OmegaConf.load(here("configs/kny_video_gpt2_large_gradio.yaml"))
|
67 |
+
cfg["model"]["first_stage_config"]["checkpoint_path"] = vqgan_path
|
68 |
+
cfg["model"]["transformer_config"]["checkpoint_path"] = gpt_path
|
69 |
+
|
70 |
+
model = Net2Net(**cfg["model"], trainer_config=cfg["train"], num_replicas=1)
|
71 |
+
model.first_stage_model.build((20, *IMAGE_SHAPE))
|
72 |
+
return model
|
73 |
+
|
74 |
|
75 |
def generate(first, last, n_frames):
|
76 |
+
global model
|
77 |
+
|
78 |
+
if model is None:
|
79 |
+
model = load_model()
|
80 |
# n_frames = 20
|
81 |
n_frames = int(n_frames)
|
82 |
first = resize_if_necessary(first)
|