Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
|
|
3 |
from diffusers import AnimateDiffSparseControlNetPipeline, AutoencoderKL, MotionAdapter, SparseControlNetModel, AnimateDiffPipeline, EulerAncestralDiscreteScheduler
|
4 |
from diffusers.schedulers import DPMSolverMultistepScheduler
|
5 |
from diffusers.utils import export_to_gif, load_image
|
|
|
6 |
|
7 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
8 |
|
@@ -10,6 +11,7 @@ def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_fr
|
|
10 |
motion_adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=torch.float16).to(device)
|
11 |
controlnet = SparseControlNetModel.from_pretrained("guoyww/animatediff-sparsectrl-scribble", torch_dtype=torch.float16).to(device)
|
12 |
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to(device)
|
|
|
13 |
pipe = AnimateDiffSparseControlNetPipeline.from_pretrained(
|
14 |
"SG161222/Realistic_Vision_V5.1_noVAE",
|
15 |
motion_adapter=motion_adapter,
|
@@ -19,6 +21,10 @@ def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_fr
|
|
19 |
).to(device)
|
20 |
|
21 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++", use_karras_sigmas=True)
|
|
|
|
|
|
|
|
|
22 |
pipe.load_lora_weights("guoyww/animatediff-motion-lora-v1-5-3", adapter_name="motion_lora")
|
23 |
pipe.fuse_lora(lora_scale=1.0)
|
24 |
|
|
|
3 |
from diffusers import AnimateDiffSparseControlNetPipeline, AutoencoderKL, MotionAdapter, SparseControlNetModel, AnimateDiffPipeline, EulerAncestralDiscreteScheduler
|
4 |
from diffusers.schedulers import DPMSolverMultistepScheduler
|
5 |
from diffusers.utils import export_to_gif, load_image
|
6 |
+
from peft import LoraConfig, get_peft_model
|
7 |
|
8 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
9 |
|
|
|
11 |
motion_adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=torch.float16).to(device)
|
12 |
controlnet = SparseControlNetModel.from_pretrained("guoyww/animatediff-sparsectrl-scribble", torch_dtype=torch.float16).to(device)
|
13 |
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to(device)
|
14 |
+
|
15 |
pipe = AnimateDiffSparseControlNetPipeline.from_pretrained(
|
16 |
"SG161222/Realistic_Vision_V5.1_noVAE",
|
17 |
motion_adapter=motion_adapter,
|
|
|
21 |
).to(device)
|
22 |
|
23 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++", use_karras_sigmas=True)
|
24 |
+
|
25 |
+
# PEFT 사용하여 Lora 로드
|
26 |
+
lora_config = LoraConfig(r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"])
|
27 |
+
pipe = get_peft_model(pipe, lora_config)
|
28 |
pipe.load_lora_weights("guoyww/animatediff-motion-lora-v1-5-3", adapter_name="motion_lora")
|
29 |
pipe.fuse_lora(lora_scale=1.0)
|
30 |
|