Commit
•
492fffc
1
Parent(s):
decf237
Update app.py
Browse files
app.py
CHANGED
@@ -72,9 +72,9 @@ def sample(
|
|
72 |
input_path: str = "assets/test_image.png", # Can either be image file or folder with image files
|
73 |
seed: Optional[int] = None,
|
74 |
randomize_seed: bool = True,
|
75 |
-
version: str = "svd_xt",
|
76 |
-
fps_id: int = 6,
|
77 |
motion_bucket_id: int = 127,
|
|
|
|
|
78 |
cond_aug: float = 0.02,
|
79 |
decoding_t: int = 7, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
|
80 |
device: str = "cuda",
|
@@ -300,8 +300,8 @@ def resize_image(image_path, output_size=(1024, 576)):
|
|
300 |
return cropped_image
|
301 |
|
302 |
with gr.Blocks() as demo:
|
303 |
-
gr.Markdown('''# Stable Video Diffusion - Img2Vid - XT ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt), [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets))
|
304 |
-
#### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)): generate `25 frames` of video from a single image at `6 fps`. Each generation takes ~60s on the A100. [Join the waitlist](https://stability.ai/contact) for
|
305 |
''')
|
306 |
with gr.Row():
|
307 |
with gr.Column():
|
@@ -311,9 +311,11 @@ with gr.Blocks() as demo:
|
|
311 |
with gr.Accordion("Advanced options", open=False):
|
312 |
seed = gr.Slider(label="Seed", value=42, randomize=True, minimum=0, maximum=max_64_bit_int, step=1)
|
313 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
|
|
|
|
314 |
|
315 |
image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
|
316 |
-
generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed], outputs=[video, seed], api_name="video")
|
317 |
|
318 |
if __name__ == "__main__":
|
319 |
demo.queue(max_size=20)
|
|
|
72 |
input_path: str = "assets/test_image.png", # Can either be image file or folder with image files
|
73 |
seed: Optional[int] = None,
|
74 |
randomize_seed: bool = True,
|
|
|
|
|
75 |
motion_bucket_id: int = 127,
|
76 |
+
fps_id: int = 6,
|
77 |
+
version: str = "svd_xt",
|
78 |
cond_aug: float = 0.02,
|
79 |
decoding_t: int = 7, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
|
80 |
device: str = "cuda",
|
|
|
300 |
return cropped_image
|
301 |
|
302 |
with gr.Blocks() as demo:
|
303 |
+
gr.Markdown('''# Unofficial demo for Stable Video Diffusion - Img2Vid - XT ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt), [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets))
|
304 |
+
#### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)): generate `25 frames` of video from a single image at `6 fps`. Each generation takes ~60s on the A100. [Join the waitlist](https://stability.ai/contact) for Stability's upcoming web experience.
|
305 |
''')
|
306 |
with gr.Row():
|
307 |
with gr.Column():
|
|
|
311 |
with gr.Accordion("Advanced options", open=False):
|
312 |
seed = gr.Slider(label="Seed", value=42, randomize=True, minimum=0, maximum=max_64_bit_int, step=1)
|
313 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
314 |
+
motion_bucket_id = gr.Slider(label="Motion bucket id", info="Controls how much motion to add/remove from the image", value=127, minimum=1, maximum=255)
|
315 |
+
fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30)
|
316 |
|
317 |
image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
|
318 |
+
generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video")
|
319 |
|
320 |
if __name__ == "__main__":
|
321 |
demo.queue(max_size=20)
|