seawolf2357 commited on
Commit
97d09e9
·
verified ·
1 Parent(s): a9f93ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -19
app.py CHANGED
@@ -3,7 +3,6 @@ import gradio as gr
3
  from diffusers import AnimateDiffSparseControlNetPipeline, AutoencoderKL, MotionAdapter, SparseControlNetModel, AnimateDiffPipeline, EulerAncestralDiscreteScheduler
4
  from diffusers.schedulers import DPMSolverMultistepScheduler
5
  from diffusers.utils import export_to_gif, load_image
6
- from peft import LoraConfig, get_peft_model
7
 
8
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
 
@@ -21,12 +20,6 @@ def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_fr
21
  ).to(device)
22
 
23
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++", use_karras_sigmas=True)
24
-
25
- # PEFT 사용하여 Lora 로드
26
- lora_config = LoraConfig(r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"])
27
- pipe = get_peft_model(pipe, lora_config)
28
- pipe.load_lora_weights("guoyww/animatediff-motion-lora-v1-5-3", adapter_name="motion_lora")
29
- pipe.fuse_lora(lora_scale=1.0)
30
 
31
  image_files = [
32
  "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-1.png",
@@ -41,13 +34,12 @@ def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_fr
41
  num_inference_steps=num_inference_steps,
42
  conditioning_frames=conditioning_frames,
43
  controlnet_conditioning_scale=controlnet_conditioning_scale,
44
- controlnet_frame_indices=[int(x) for x in conditioning_frame_indices.split(",")],
45
  generator=torch.Generator().manual_seed(1337),
46
- ).frames
47
 
48
- output_file = "output.gif"
49
- export_to_gif(video, output_file)
50
- return output_file
51
 
52
  def generate_simple_video(prompt):
53
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16).to(device)
@@ -68,11 +60,10 @@ def generate_simple_video(prompt):
68
  num_inference_steps=20,
69
  guidance_scale=7.0,
70
  decode_chunk_size=2,
71
- ).frames
72
 
73
- output_file = "simple_output.gif"
74
- export_to_gif(frames, output_file)
75
- return output_file
76
 
77
  demo1 = gr.Interface(
78
  fn=generate_video,
@@ -80,10 +71,10 @@ demo1 = gr.Interface(
80
  gr.Textbox(label="Prompt", value="an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality"),
81
  gr.Textbox(label="Negative Prompt", value="low quality, worst quality, letterboxed"),
82
  gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=25),
83
- gr.Textbox(label="Conditioning Frame Indices", value="0, 8, 15"),
84
  gr.Slider(label="ControlNet Conditioning Scale", minimum=0.1, maximum=2.0, step=0.1, value=1.0)
85
  ],
86
- outputs=gr.Video(label="Generated Video"),
87
  title="Generate Video with AnimateDiffSparseControlNetPipeline",
88
  description="Generate a video using the AnimateDiffSparseControlNetPipeline."
89
  )
@@ -91,12 +82,13 @@ demo1 = gr.Interface(
91
  demo2 = gr.Interface(
92
  fn=generate_simple_video,
93
  inputs=gr.Textbox(label="Prompt", value="An astronaut riding a horse on Mars."),
94
- outputs=gr.Video(label="Generated Simple Video"),
95
  title="Generate Simple Video with AnimateDiff",
96
  description="Generate a simple video using the AnimateDiffPipeline."
97
  )
98
 
99
  demo = gr.TabbedInterface([demo1, demo2], ["Advanced Video Generation", "Simple Video Generation"])
100
 
 
101
  demo.launch()
102
  #demo.launch(server_name="0.0.0.0", server_port=7910)
 
3
  from diffusers import AnimateDiffSparseControlNetPipeline, AutoencoderKL, MotionAdapter, SparseControlNetModel, AnimateDiffPipeline, EulerAncestralDiscreteScheduler
4
  from diffusers.schedulers import DPMSolverMultistepScheduler
5
  from diffusers.utils import export_to_gif, load_image
 
6
 
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
 
 
20
  ).to(device)
21
 
22
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++", use_karras_sigmas=True)
 
 
 
 
 
 
23
 
24
  image_files = [
25
  "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-1.png",
 
34
  num_inference_steps=num_inference_steps,
35
  conditioning_frames=conditioning_frames,
36
  controlnet_conditioning_scale=controlnet_conditioning_scale,
37
+ controlnet_frame_indices=conditioning_frame_indices,
38
  generator=torch.Generator().manual_seed(1337),
39
+ ).frames[0]
40
 
41
+ export_to_gif(video, "output.gif")
42
+ return "output.gif"
 
43
 
44
  def generate_simple_video(prompt):
45
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16).to(device)
 
60
  num_inference_steps=20,
61
  guidance_scale=7.0,
62
  decode_chunk_size=2,
63
+ ).frames[0]
64
 
65
+ export_to_gif(frames, "simple_output.gif")
66
+ return "simple_output.gif"
 
67
 
68
  demo1 = gr.Interface(
69
  fn=generate_video,
 
71
  gr.Textbox(label="Prompt", value="an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality"),
72
  gr.Textbox(label="Negative Prompt", value="low quality, worst quality, letterboxed"),
73
  gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=25),
74
+ gr.Textbox(label="Conditioning Frame Indices", value="[0, 8, 15]"),
75
  gr.Slider(label="ControlNet Conditioning Scale", minimum=0.1, maximum=2.0, step=0.1, value=1.0)
76
  ],
77
+ outputs=gr.Image(label="Generated Video"),
78
  title="Generate Video with AnimateDiffSparseControlNetPipeline",
79
  description="Generate a video using the AnimateDiffSparseControlNetPipeline."
80
  )
 
82
  demo2 = gr.Interface(
83
  fn=generate_simple_video,
84
  inputs=gr.Textbox(label="Prompt", value="An astronaut riding a horse on Mars."),
85
+ outputs=gr.Image(label="Generated Simple Video"),
86
  title="Generate Simple Video with AnimateDiff",
87
  description="Generate a simple video using the AnimateDiffPipeline."
88
  )
89
 
90
  demo = gr.TabbedInterface([demo1, demo2], ["Advanced Video Generation", "Simple Video Generation"])
91
 
92
+
93
  demo.launch()
94
  #demo.launch(server_name="0.0.0.0", server_port=7910)