Spaces:
Sleeping
Sleeping
Ad model:nova-d48w1024-osp480
Browse files- .flake8 +21 -0
- .gitignore +55 -0
- README.md +5 -4
- app.py +184 -0
- requirements.txt +7 -0
.flake8
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[flake8]
|
2 |
+
max-line-length = 100
|
3 |
+
ignore =
|
4 |
+
# whitespace before ':' (conflicted with Black)
|
5 |
+
E203,
|
6 |
+
# ambiguous variable name
|
7 |
+
E741,
|
8 |
+
# ‘from module import *’ used; unable to detect undefined names
|
9 |
+
F403,
|
10 |
+
# name may be undefined, or defined from star imports: module
|
11 |
+
F405,
|
12 |
+
# redefinition of unused name from line N
|
13 |
+
F811,
|
14 |
+
# undefined name
|
15 |
+
F821,
|
16 |
+
# line break before binary operator
|
17 |
+
W503,
|
18 |
+
# line break after binary operator
|
19 |
+
W504
|
20 |
+
# module imported but unused
|
21 |
+
per-file-ignores = __init__.py: F401
|
.gitignore
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Compiled Object files
|
2 |
+
*.slo
|
3 |
+
*.lo
|
4 |
+
*.o
|
5 |
+
*.cuo
|
6 |
+
|
7 |
+
# Compiled Dynamic libraries
|
8 |
+
*.so
|
9 |
+
*.dll
|
10 |
+
*.dylib
|
11 |
+
|
12 |
+
# Compiled Static libraries
|
13 |
+
*.lai
|
14 |
+
*.la
|
15 |
+
*.a
|
16 |
+
*.lib
|
17 |
+
|
18 |
+
# Compiled python
|
19 |
+
*.pyc
|
20 |
+
__pycache__
|
21 |
+
|
22 |
+
# Compiled MATLAB
|
23 |
+
*.mex*
|
24 |
+
|
25 |
+
# IPython notebook checkpoints
|
26 |
+
.ipynb_checkpoints
|
27 |
+
|
28 |
+
# Editor temporaries
|
29 |
+
*.swp
|
30 |
+
*~
|
31 |
+
|
32 |
+
# Sublime Text settings
|
33 |
+
*.sublime-workspace
|
34 |
+
*.sublime-project
|
35 |
+
|
36 |
+
# Eclipse Project settings
|
37 |
+
*.*project
|
38 |
+
.settings
|
39 |
+
|
40 |
+
# QtCreator files
|
41 |
+
*.user
|
42 |
+
|
43 |
+
# VSCode files
|
44 |
+
.vscode
|
45 |
+
|
46 |
+
# IDEA files
|
47 |
+
.idea
|
48 |
+
|
49 |
+
# OSX dir files
|
50 |
+
.DS_Store
|
51 |
+
|
52 |
+
# Android files
|
53 |
+
.gradle
|
54 |
+
*.iml
|
55 |
+
local.properties
|
README.md
CHANGED
@@ -1,13 +1,14 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.9.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
|
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: nova-d48w1024-osp480
|
3 |
+
emoji: 📉
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: gray
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.9.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
11 |
+
short_description: 'NOVA Text-to-Video APP'
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2024-present, BAAI. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
##############################################################################
|
15 |
+
"""NOVA T2V application."""
|
16 |
+
|
17 |
+
import argparse
|
18 |
+
import os
|
19 |
+
|
20 |
+
import gradio as gr
|
21 |
+
import numpy as np
|
22 |
+
import PIL.Image
|
23 |
+
import spaces
|
24 |
+
import torch
|
25 |
+
|
26 |
+
from diffnext.pipelines import NOVAPipeline
|
27 |
+
from diffnext.utils import export_to_video
|
28 |
+
|
29 |
+
# Fix tokenizer fork issue.
|
30 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "true"
|
31 |
+
# Switch to the allocator optimized for dynamic shape.
|
32 |
+
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
33 |
+
|
34 |
+
|
35 |
+
def parse_args():
|
36 |
+
"""Parse arguments."""
|
37 |
+
parser = argparse.ArgumentParser(description="Serve NOVA T2V application")
|
38 |
+
parser.add_argument("--model", default="BAAI/nova-d48w1024-osp480", help="model path")
|
39 |
+
parser.add_argument("--device", type=int, default=0, help="device index")
|
40 |
+
parser.add_argument("--precision", default="float16", help="compute precision")
|
41 |
+
return parser.parse_args()
|
42 |
+
|
43 |
+
|
44 |
+
def crop_image(image, target_h, target_w):
|
45 |
+
"""Center crop image to target size."""
|
46 |
+
h, w = image.height, image.width
|
47 |
+
aspect_ratio_target, aspect_ratio = target_w / target_h, w / h
|
48 |
+
if aspect_ratio > aspect_ratio_target:
|
49 |
+
new_w = int(h * aspect_ratio_target)
|
50 |
+
x_start = (w - new_w) // 2
|
51 |
+
image = image.crop((x_start, 0, x_start + new_w, h))
|
52 |
+
else:
|
53 |
+
new_h = int(w / aspect_ratio_target)
|
54 |
+
y_start = (h - new_h) // 2
|
55 |
+
image = image.crop((0, y_start, w, y_start + new_h))
|
56 |
+
return np.array(image.resize((target_w, target_h), PIL.Image.Resampling.BILINEAR))
|
57 |
+
|
58 |
+
|
59 |
+
@spaces.GPU(duration=75)
|
60 |
+
def generate_video(
|
61 |
+
prompt,
|
62 |
+
negative_prompt,
|
63 |
+
image_prompt,
|
64 |
+
motion_flow,
|
65 |
+
preset,
|
66 |
+
seed,
|
67 |
+
randomize_seed,
|
68 |
+
guidance_scale,
|
69 |
+
num_inference_steps,
|
70 |
+
num_diffusion_steps,
|
71 |
+
progress=gr.Progress(track_tqdm=True),
|
72 |
+
):
|
73 |
+
"""Generate a video."""
|
74 |
+
args = locals()
|
75 |
+
preset = [p for p in video_presets if p["label"] == preset][0]
|
76 |
+
args["max_latent_length"] = preset["#latents"]
|
77 |
+
args["image"] = crop_image(image_prompt, preset["h"], preset["w"]) if image_prompt else None
|
78 |
+
seed = np.random.randint(2147483647) if randomize_seed else seed
|
79 |
+
device = getattr(pipe, "_offload_device", pipe.device)
|
80 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
81 |
+
frames = pipe(generator=generator, **args).frames[0]
|
82 |
+
return export_to_video(frames, fps=12), seed
|
83 |
+
|
84 |
+
|
85 |
+
title = "Autoregressive Video Generation without Vector Quantization"
|
86 |
+
abbr = "<strong>NO</strong>n-quantized <strong>V</strong>ideo <strong>A</strong>utoregressive"
|
87 |
+
header = (
|
88 |
+
"<div align='center'>"
|
89 |
+
"<h2>Autoregressive Video Generation without Vector Quantization</h2>"
|
90 |
+
"<h3><a href='https://arxiv.org/abs/2412.14169' target='_blank' rel='noopener'>[paper]</a>"
|
91 |
+
"<a href='https://github.com/baaivision/NOVA' target='_blank' rel='noopener'>[code]</a></h3>"
|
92 |
+
"</div>"
|
93 |
+
)
|
94 |
+
header2 = f"<div align='center'><h3>🎞️ A {abbr} model for continuous visual generation</h3></div>"
|
95 |
+
|
96 |
+
video_presets = [
|
97 |
+
{"label": "33x768x480", "w": 768, "h": 480, "#latents": 9},
|
98 |
+
{"label": "17x768x480", "w": 768, "h": 480, "#latents": 5},
|
99 |
+
{"label": "1x768x480", "w": 768, "h": 480, "#latents": 1},
|
100 |
+
]
|
101 |
+
|
102 |
+
|
103 |
+
prompts = [
|
104 |
+
"Niagara falls with colorful paint instead of water.",
|
105 |
+
"Many spotted jellyfish pulsating under water. Their bodies are transparent and glowing in deep ocean.", # noqa
|
106 |
+
"An intense close-up of a soldier’s face, covered in dirt and sweat, his eyes filled with determination as he surveys the battlefield.", # noqa
|
107 |
+
"a close-up shot of a woman standing in a dimly lit room. she is wearing a traditional chinese outfit, which includes a red and gold dress with intricate designs and a matching headpiece. the woman has her hair styled in an updo, adorned with a gold accessory. her makeup is done in a way that accentuates her features, with red lipstick and dark eyeshadow. she is looking directly at the camera with a neutral expression. the room has a rustic feel, with wooden beams and a stone wall visible in the background. the lighting in the room is soft and warm, creating a contrast with the woman's vibrant attire. there are no texts or other objects in the video. the style of the video is a portrait, focusing on the woman and her attire.", # noqa
|
108 |
+
"The camera slowly rotates around a massive stack of vintage televisions that are placed within a large New York museum gallery. Each of the televisions is showing a different program. There are 1950s sci-fi movies with their distinctive visuals, horror movies with their creepy scenes, news broadcasts with moving images and words, static on some screens, and a 1970s sitcom with its characteristic look. The televisions are of various sizes and designs, some with rounded edges and others with more angular shapes. The gallery is well-lit, with light falling on the stack of televisions and highlighting the different programs being shown. There are no people visible in the immediate vicinity, only the stack of televisions and the surrounding gallery space.", # noqa
|
109 |
+
]
|
110 |
+
motion_flows = [5, 5, 5, 5, 5]
|
111 |
+
videos = ["", "", "", "", ""]
|
112 |
+
examples = [list(x) for x in zip(prompts, motion_flows)]
|
113 |
+
|
114 |
+
|
115 |
+
if __name__ == "__main__":
|
116 |
+
args = parse_args()
|
117 |
+
|
118 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", args.device)
|
119 |
+
model_args = {"torch_dtype": getattr(torch, args.precision.lower()), "trust_remote_code": True}
|
120 |
+
pipe = NOVAPipeline.from_pretrained(args.model, **model_args).to(device)
|
121 |
+
|
122 |
+
# Application.
|
123 |
+
app = gr.Blocks(theme="origin").__enter__()
|
124 |
+
container = gr.Column(elem_id="col-container").__enter__()
|
125 |
+
_, main_row = gr.Markdown(header), gr.Row().__enter__()
|
126 |
+
|
127 |
+
# Input.
|
128 |
+
input_col = gr.Column().__enter__()
|
129 |
+
prompt = gr.Text(
|
130 |
+
label="Prompt",
|
131 |
+
placeholder="Describe the video you want to generate",
|
132 |
+
value="Niagara falls with colorful paint instead of water.",
|
133 |
+
lines=5,
|
134 |
+
)
|
135 |
+
negative_prompt = gr.Text(
|
136 |
+
label="Negative Prompt",
|
137 |
+
value="low quality, deformed, distorted, disfigured, fused fingers, bad anatomy, weird hand, motion smear, motion artifacts", # noqa
|
138 |
+
lines=1,
|
139 |
+
)
|
140 |
+
image_prompt = gr.Image(label="Image Prompt (Optional) ", type="pil")
|
141 |
+
# fmt: off
|
142 |
+
adv_opt = gr.Accordion("Advanced Options", open=False).__enter__()
|
143 |
+
seed = gr.Slider(label="Seed", maximum=2147483647, step=1, value=0)
|
144 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
145 |
+
guidance_scale = gr.Slider(label="Guidance scale", minimum=1, maximum=10.0, step=0.1, value=7.0)
|
146 |
+
with gr.Row():
|
147 |
+
num_inference_steps = gr.Slider(label="Inference steps", minimum=1, maximum=128, step=1, value=128) # noqa
|
148 |
+
num_diffusion_steps = gr.Slider(label="Diffusion steps", minimum=1, maximum=100, step=1, value=100) # noqa
|
149 |
+
adv_opt.__exit__()
|
150 |
+
generate = gr.Button("Generate Video", variant="primary", size="lg")
|
151 |
+
input_col.__exit__()
|
152 |
+
|
153 |
+
# Results.
|
154 |
+
result_col, _ = gr.Column().__enter__(), gr.Markdown(header2)
|
155 |
+
preset = gr.Dropdown([p["label"] for p in video_presets], label="Video Preset", value=video_presets[0]["label"]) # noqa
|
156 |
+
motion_flow = gr.Slider(label="Motion Flow", minimum=1, maximum=10, step=1, value=5)
|
157 |
+
result = gr.Video(label="Result", show_label=False, autoplay=True)
|
158 |
+
result_col.__exit__(), main_row.__exit__()
|
159 |
+
# fmt: on
|
160 |
+
|
161 |
+
# Examples.
|
162 |
+
with gr.Row():
|
163 |
+
gr.Examples(examples=examples, inputs=[prompt, motion_flow])
|
164 |
+
|
165 |
+
# Events.
|
166 |
+
container.__exit__()
|
167 |
+
gr.on(
|
168 |
+
triggers=[generate.click, prompt.submit, negative_prompt.submit],
|
169 |
+
fn=generate_video,
|
170 |
+
inputs=[
|
171 |
+
prompt,
|
172 |
+
negative_prompt,
|
173 |
+
image_prompt,
|
174 |
+
motion_flow,
|
175 |
+
preset,
|
176 |
+
seed,
|
177 |
+
randomize_seed,
|
178 |
+
guidance_scale,
|
179 |
+
num_inference_steps,
|
180 |
+
num_diffusion_steps,
|
181 |
+
],
|
182 |
+
outputs=[result, seed],
|
183 |
+
)
|
184 |
+
app.__exit__(), app.launch(share=False)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
einops
|
2 |
+
torch
|
3 |
+
diffusers
|
4 |
+
transformers
|
5 |
+
accelerate
|
6 |
+
imageio[ffmpeg]
|
7 |
+
git+https://github.com/baaivision/NOVA.git
|