Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,69 @@
|
|
|
|
1 |
import torch
|
2 |
-
from diffusers import
|
3 |
-
from diffusers.utils import
|
|
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
import torch
|
3 |
+
from diffusers import AnimateDiffPipeline, MotionAdapter, DDIMScheduler
|
4 |
+
from diffusers.utils import export_to_gif
|
5 |
+
import random
|
6 |
|
7 |
+
def generate_gif(image, animation_type):
|
8 |
+
# Load the motion adapter
|
9 |
+
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
|
10 |
|
11 |
+
# Load SD 1.5 based finetuned model
|
12 |
+
model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
|
13 |
+
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16)
|
14 |
+
|
15 |
+
# Scheduler setup
|
16 |
+
scheduler = DDIMScheduler(
|
17 |
+
clip_sample=False,
|
18 |
+
beta_start=0.00085,
|
19 |
+
beta_end=0.012,
|
20 |
+
beta_schedule="linear",
|
21 |
+
timestep_spacing="trailing",
|
22 |
+
steps_offset=1
|
23 |
+
)
|
24 |
+
pipe.scheduler = scheduler
|
25 |
+
|
26 |
+
# Enable memory savings
|
27 |
+
pipe.enable_vae_slicing()
|
28 |
+
pipe.enable_model_cpu_offload()
|
29 |
+
|
30 |
+
# Load ip_adapter
|
31 |
+
pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
|
32 |
+
|
33 |
+
# Load the selected motion adapter
|
34 |
+
pipe.load_lora_weights(f"guoyww/animatediff-motion-lora-{animation_type}", adapter_name=animation_type)
|
35 |
+
|
36 |
+
# Generate a random seed
|
37 |
+
seed = random.randint(0, 2**32 - 1)
|
38 |
+
prompt = "best quality, high quality, trending on artstation"
|
39 |
+
|
40 |
+
# Set adapter weights for the selected adapter
|
41 |
+
adapter_weight = [0.75]
|
42 |
+
|
43 |
+
pipe.set_adapters([animation_type], adapter_weights=adapter_weight)
|
44 |
+
|
45 |
+
# Generate GIF
|
46 |
+
output = pipe(
|
47 |
+
prompt=prompt,
|
48 |
+
num_frames=16,
|
49 |
+
guidance_scale=7.5,
|
50 |
+
num_inference_steps=30,
|
51 |
+
ip_adapter_image=image,
|
52 |
+
generator=torch.Generator("cpu").manual_seed(seed),
|
53 |
+
)
|
54 |
+
frames = output.frames[0]
|
55 |
+
|
56 |
+
gif_path = "output_animation.gif"
|
57 |
+
export_to_gif(frames, gif_path)
|
58 |
+
return gif_path
|
59 |
+
|
60 |
+
# Gradio interface
|
61 |
+
iface = gr.Interface(
|
62 |
+
fn=generate_gif,
|
63 |
+
inputs=[gr.Image(type="pil"), gr.Radio(["zoom-out", "tilt-up", "pan-left"])],
|
64 |
+
outputs=gr.Image(type="pil", label="Generated GIF"),
|
65 |
+
title="AnimateDiff + IP Adapter Demo",
|
66 |
+
description="Upload an image and select an motion module type to generate a GIF!"
|
67 |
+
)
|
68 |
+
|
69 |
+
iface.launch(debug=True,share=True)
|