seawolf2357 commited on
Commit
35d6846
·
verified ·
1 Parent(s): cf0e959

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -0
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from diffusers import AnimateDiffSparseControlNetPipeline, AutoencoderKL, MotionAdapter, SparseControlNetModel
4
+ from diffusers.schedulers import DPMSolverMultistepScheduler
5
+ from diffusers.utils import export_to_gif, load_image
6
+
7
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+
9
+ def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_frame_indices, controlnet_conditioning_scale):
10
+ motion_adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=torch.float16).to(device)
11
+ controlnet = SparseControlNetModel.from_pretrained("guoyww/animatediff-sparsectrl-scribble", torch_dtype=torch.float16).to(device)
12
+ vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to(device)
13
+ scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++", use_karras_sigmas=True)
14
+
15
+ pipe = AnimateDiffSparseControlNetPipeline.from_pretrained(
16
+ "SG161222/Realistic_Vision_V5.1_noVAE",
17
+ motion_adapter=motion_adapter,
18
+ controlnet=controlnet,
19
+ vae=vae,
20
+ scheduler=scheduler,
21
+ torch_dtype=torch.float16,
22
+ ).to(device)
23
+
24
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++", use_karras_sigmas=True)
25
+ pipe.load_lora_weights("guoyww/animatediff-motion-lora-v1-5-3", adapter_name="motion_lora")
26
+ pipe.fuse_lora(lora_scale=1.0)
27
+
28
+ image_files = [
29
+ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-1.png",
30
+ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-2.png",
31
+ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-3.png"
32
+ ]
33
+ conditioning_frames = [load_image(img_file) for img_file in image_files]
34
+
35
+ video = pipe(
36
+ prompt=prompt,
37
+ negative_prompt=negative_prompt,
38
+ num_inference_steps=num_inference_steps,
39
+ conditioning_frames=conditioning_frames,
40
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
41
+ controlnet_frame_indices=conditioning_frame_indices,
42
+ generator=torch.Generator().manual_seed(1337),
43
+ ).frames[0]
44
+
45
+ export_to_gif(video, "output.gif")
46
+ return "output.gif"
47
+
48
+ demo = gr.Interface(
49
+ fn=generate_video,
50
+ inputs=[
51
+ gr.Textbox(label="Prompt", default="an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality"),
52
+ gr.Textbox(label="Negative Prompt", default="low quality, worst quality, letterboxed"),
53
+ gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, default=25),
54
+ gr.Textbox(label="Conditioning Frame Indices", default="[0, 8, 15]"),
55
+ gr.Slider(label="ControlNet Conditioning Scale", minimum=0.1, maximum=2.0, step=0.1, default=1.0)
56
+ ],
57
+ outputs=gr.Image(label="Generated Video"),
58
+ title="Generate Video with AnimateDiff",
59
+ description="Generate a video using the AnimateDiffSparseControlNetPipeline."
60
+ )
61
+
62
+ demo.launch(server_name="0.0.0.0", server_port=7910)