seawolf2357 commited on
Commit
1a17233
1 Parent(s): 484aa2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -18
app.py CHANGED
@@ -10,14 +10,11 @@ def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_fr
10
  motion_adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=torch.float16).to(device)
11
  controlnet = SparseControlNetModel.from_pretrained("guoyww/animatediff-sparsectrl-scribble", torch_dtype=torch.float16).to(device)
12
  vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to(device)
13
- scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++", use_karras_sigmas=True)
14
-
15
  pipe = AnimateDiffSparseControlNetPipeline.from_pretrained(
16
  "SG161222/Realistic_Vision_V5.1_noVAE",
17
  motion_adapter=motion_adapter,
18
  controlnet=controlnet,
19
  vae=vae,
20
- scheduler=scheduler,
21
  torch_dtype=torch.float16,
22
  ).to(device)
23
 
@@ -38,12 +35,13 @@ def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_fr
38
  num_inference_steps=num_inference_steps,
39
  conditioning_frames=conditioning_frames,
40
  controlnet_conditioning_scale=controlnet_conditioning_scale,
41
- controlnet_frame_indices=conditioning_frame_indices,
42
  generator=torch.Generator().manual_seed(1337),
43
- ).frames[0]
44
 
45
- export_to_gif(video, "output.gif")
46
- return "output.gif"
 
47
 
48
  def generate_simple_video(prompt):
49
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16).to(device)
@@ -64,29 +62,30 @@ def generate_simple_video(prompt):
64
  num_inference_steps=20,
65
  guidance_scale=7.0,
66
  decode_chunk_size=2,
67
- ).frames[0]
68
 
69
- export_to_gif(frames, "simple_output.gif")
70
- return "simple_output.gif"
 
71
 
72
  demo1 = gr.Interface(
73
  fn=generate_video,
74
  inputs=[
75
- gr.Textbox(label="Prompt", default="an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality"),
76
- gr.Textbox(label="Negative Prompt", default="low quality, worst quality, letterboxed"),
77
- gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, default=25),
78
- gr.Textbox(label="Conditioning Frame Indices", default="[0, 8, 15]"),
79
- gr.Slider(label="ControlNet Conditioning Scale", minimum=0.1, maximum=2.0, step=0.1, default=1.0)
80
  ],
81
- outputs=gr.Image(label="Generated Video"),
82
  title="Generate Video with AnimateDiffSparseControlNetPipeline",
83
  description="Generate a video using the AnimateDiffSparseControlNetPipeline."
84
  )
85
 
86
  demo2 = gr.Interface(
87
  fn=generate_simple_video,
88
- inputs=gr.Textbox(label="Prompt", default="An astronaut riding a horse on Mars."),
89
- outputs=gr.Image(label="Generated Simple Video"),
90
  title="Generate Simple Video with AnimateDiff",
91
  description="Generate a simple video using the AnimateDiffPipeline."
92
  )
 
10
  motion_adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=torch.float16).to(device)
11
  controlnet = SparseControlNetModel.from_pretrained("guoyww/animatediff-sparsectrl-scribble", torch_dtype=torch.float16).to(device)
12
  vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to(device)
 
 
13
  pipe = AnimateDiffSparseControlNetPipeline.from_pretrained(
14
  "SG161222/Realistic_Vision_V5.1_noVAE",
15
  motion_adapter=motion_adapter,
16
  controlnet=controlnet,
17
  vae=vae,
 
18
  torch_dtype=torch.float16,
19
  ).to(device)
20
 
 
35
  num_inference_steps=num_inference_steps,
36
  conditioning_frames=conditioning_frames,
37
  controlnet_conditioning_scale=controlnet_conditioning_scale,
38
+ controlnet_frame_indices=[int(x) for x in conditioning_frame_indices.split(",")],
39
  generator=torch.Generator().manual_seed(1337),
40
+ ).frames
41
 
42
+ output_file = "output.gif"
43
+ export_to_gif(video, output_file)
44
+ return output_file
45
 
46
  def generate_simple_video(prompt):
47
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16).to(device)
 
62
  num_inference_steps=20,
63
  guidance_scale=7.0,
64
  decode_chunk_size=2,
65
+ ).frames
66
 
67
+ output_file = "simple_output.gif"
68
+ export_to_gif(frames, output_file)
69
+ return output_file
70
 
71
  demo1 = gr.Interface(
72
  fn=generate_video,
73
  inputs=[
74
+ gr.Textbox(label="Prompt", value="an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality"),
75
+ gr.Textbox(label="Negative Prompt", value="low quality, worst quality, letterboxed"),
76
+ gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=25),
77
+ gr.Textbox(label="Conditioning Frame Indices", value="0, 8, 15"),
78
+ gr.Slider(label="ControlNet Conditioning Scale", minimum=0.1, maximum=2.0, step=0.1, value=1.0)
79
  ],
80
+ outputs=gr.Video(label="Generated Video"),
81
  title="Generate Video with AnimateDiffSparseControlNetPipeline",
82
  description="Generate a video using the AnimateDiffSparseControlNetPipeline."
83
  )
84
 
85
  demo2 = gr.Interface(
86
  fn=generate_simple_video,
87
+ inputs=gr.Textbox(label="Prompt", value="An astronaut riding a horse on Mars."),
88
+ outputs=gr.Video(label="Generated Simple Video"),
89
  title="Generate Simple Video with AnimateDiff",
90
  description="Generate a simple video using the AnimateDiffPipeline."
91
  )