Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import torch
|
2 |
import gradio as gr
|
3 |
-
from diffusers import AnimateDiffSparseControlNetPipeline, AutoencoderKL, MotionAdapter, SparseControlNetModel
|
4 |
from diffusers.schedulers import DPMSolverMultistepScheduler
|
5 |
from diffusers.utils import export_to_gif, load_image
|
6 |
|
@@ -45,7 +45,31 @@ def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_fr
|
|
45 |
export_to_gif(video, "output.gif")
|
46 |
return "output.gif"
|
47 |
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
fn=generate_video,
|
50 |
inputs=[
|
51 |
gr.Textbox(label="Prompt", default="an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality"),
|
@@ -55,8 +79,19 @@ demo = gr.Interface(
|
|
55 |
gr.Slider(label="ControlNet Conditioning Scale", minimum=0.1, maximum=2.0, step=0.1, default=1.0)
|
56 |
],
|
57 |
outputs=gr.Image(label="Generated Video"),
|
58 |
-
title="Generate Video with
|
59 |
description="Generate a video using the AnimateDiffSparseControlNetPipeline."
|
60 |
)
|
61 |
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
import gradio as gr
|
3 |
+
from diffusers import AnimateDiffSparseControlNetPipeline, AutoencoderKL, MotionAdapter, SparseControlNetModel, AnimateDiffPipeline, EulerAncestralDiscreteScheduler
|
4 |
from diffusers.schedulers import DPMSolverMultistepScheduler
|
5 |
from diffusers.utils import export_to_gif, load_image
|
6 |
|
|
|
45 |
export_to_gif(video, "output.gif")
|
46 |
return "output.gif"
|
47 |
|
48 |
+
def generate_simple_video(prompt):
|
49 |
+
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16).to(device)
|
50 |
+
pipe = AnimateDiffPipeline.from_pretrained("SG161222/Realistic_Vision_V6.0_B1_noVAE", motion_adapter=adapter, torch_dtype=torch.float16).to(device)
|
51 |
+
pipe.scheduler = EulerAncestralDiscreteScheduler(
|
52 |
+
beta_schedule="linear",
|
53 |
+
beta_start=0.00085,
|
54 |
+
beta_end=0.012,
|
55 |
+
)
|
56 |
+
|
57 |
+
pipe.enable_free_noise()
|
58 |
+
pipe.vae.enable_slicing()
|
59 |
+
pipe.enable_model_cpu_offload()
|
60 |
+
|
61 |
+
frames = pipe(
|
62 |
+
prompt,
|
63 |
+
num_frames=64,
|
64 |
+
num_inference_steps=20,
|
65 |
+
guidance_scale=7.0,
|
66 |
+
decode_chunk_size=2,
|
67 |
+
).frames[0]
|
68 |
+
|
69 |
+
export_to_gif(frames, "simple_output.gif")
|
70 |
+
return "simple_output.gif"
|
71 |
+
|
72 |
+
demo1 = gr.Interface(
|
73 |
fn=generate_video,
|
74 |
inputs=[
|
75 |
gr.Textbox(label="Prompt", default="an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality"),
|
|
|
79 |
gr.Slider(label="ControlNet Conditioning Scale", minimum=0.1, maximum=2.0, step=0.1, default=1.0)
|
80 |
],
|
81 |
outputs=gr.Image(label="Generated Video"),
|
82 |
+
title="Generate Video with AnimateDiffSparseControlNetPipeline",
|
83 |
description="Generate a video using the AnimateDiffSparseControlNetPipeline."
|
84 |
)
|
85 |
|
86 |
+
demo2 = gr.Interface(
|
87 |
+
fn=generate_simple_video,
|
88 |
+
inputs=gr.Textbox(label="Prompt", default="An astronaut riding a horse on Mars."),
|
89 |
+
outputs=gr.Image(label="Generated Simple Video"),
|
90 |
+
title="Generate Simple Video with AnimateDiff",
|
91 |
+
description="Generate a simple video using the AnimateDiffPipeline."
|
92 |
+
)
|
93 |
+
|
94 |
+
demo = gr.TabbedInterface([demo1, demo2], ["Advanced Video Generation", "Simple Video Generation"])
|
95 |
+
|
96 |
+
demo.launch()
|
97 |
+
#demo.launch(server_name="0.0.0.0", server_port=7910)
|