Update README.md
Browse files
README.md
CHANGED
@@ -233,6 +233,148 @@ To run the script, follow these steps:
|
|
233 |
</div>
|
234 |
</div>
|
235 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
## Conclusion
|
237 |
|
238 |
This script demonstrates how to use the `diffusers-sdxl-controlnet` library to generate animated images with ControlNet and SDXL models. By following the steps outlined above, you can create and visualize your own animated sequences.
|
|
|
233 |
</div>
|
234 |
</div>
|
235 |
|
236 |
+
### Run in Command line
|
237 |
+
- animatediff_controlnet_sdxl_run_script.py
|
238 |
+
```python
|
239 |
+
import sys
|
240 |
+
sys.path.insert(0, "diffusers-sdxl-controlnet/examples/community/")
|
241 |
+
from animatediff_controlnet_sdxl import *
|
242 |
+
|
243 |
+
import argparse
|
244 |
+
from moviepy.editor import VideoFileClip, ImageSequenceClip
|
245 |
+
import os
|
246 |
+
import torch
|
247 |
+
from diffusers.models import MotionAdapter
|
248 |
+
from diffusers import DDIMScheduler, AutoPipelineForText2Image, ControlNetModel
|
249 |
+
from diffusers.utils import export_to_gif
|
250 |
+
from PIL import Image
|
251 |
+
from controlnet_aux.processor import Processor
|
252 |
+
|
253 |
+
# 初始化 MotionAdapter 和 ControlNetModel
|
254 |
+
adapter = MotionAdapter.from_pretrained("a-r-r-o-w/animatediff-motion-adapter-sdxl-beta", torch_dtype=torch.float16)
|
255 |
+
|
256 |
+
def initialize_pipeline(model_id):
|
257 |
+
scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1)
|
258 |
+
controlnet = ControlNetModel.from_pretrained("thibaud/controlnet-openpose-sdxl-1.0", torch_dtype=torch.float16).to("cuda")
|
259 |
+
|
260 |
+
# 初始化 AnimateDiffSDXLControlnetPipeline
|
261 |
+
pipe = AnimateDiffSDXLControlnetPipeline.from_pretrained(
|
262 |
+
model_id,
|
263 |
+
controlnet=controlnet,
|
264 |
+
motion_adapter=adapter,
|
265 |
+
scheduler=scheduler,
|
266 |
+
torch_dtype=torch.float16,
|
267 |
+
).to("cuda")
|
268 |
+
pipe.enable_vae_slicing()
|
269 |
+
pipe.enable_vae_tiling()
|
270 |
+
return pipe
|
271 |
+
|
272 |
+
def split_video_into_frames(input_video_path, num_frames, temp_folder='temp_frames'):
|
273 |
+
"""
|
274 |
+
将视频处理成指定帧数的视频,并保持原始的帧率。
|
275 |
+
|
276 |
+
:param input_video_path: 输入视频文件路径
|
277 |
+
:param num_frames: 目标帧数
|
278 |
+
:param temp_folder: 临时文件夹路径
|
279 |
+
"""
|
280 |
+
clip = VideoFileClip(input_video_path)
|
281 |
+
original_duration = clip.duration
|
282 |
+
segment_duration = original_duration / num_frames
|
283 |
+
|
284 |
+
if not os.path.exists(temp_folder):
|
285 |
+
os.makedirs(temp_folder)
|
286 |
+
|
287 |
+
for i in range(num_frames):
|
288 |
+
frame_time = i * segment_duration
|
289 |
+
frame_path = os.path.join(temp_folder, f'frame_{i:04d}.png')
|
290 |
+
clip.save_frame(frame_path, t=frame_time)
|
291 |
+
|
292 |
+
frame_paths = [os.path.join(temp_folder, f'frame_{i:04d}.png') for i in range(num_frames)]
|
293 |
+
final_clip = ImageSequenceClip(frame_paths, fps=clip.fps)
|
294 |
+
final_clip.write_videofile("resampled_video.mp4", codec='libx264')
|
295 |
+
|
296 |
+
print(f"新的视频已保存到 resampled_video.mp4,包含 {num_frames} 个帧,并保持原始的帧率。")
|
297 |
+
|
298 |
+
def generate_video_with_prompt(input_video_path, prompt, model_id, gif_output_path, num_frames=16, keep_imgs=False, temp_folder='temp_frames'):
|
299 |
+
"""
|
300 |
+
生成带有文本提示的视频。
|
301 |
+
|
302 |
+
:param input_video_path: 输入视频文件路径
|
303 |
+
:param prompt: 文本提示
|
304 |
+
:param model_id: 模型ID
|
305 |
+
:param gif_output_path: GIF 输出文件路径
|
306 |
+
:param num_frames: 目标帧数
|
307 |
+
:param keep_imgs: 是否保留临时图片
|
308 |
+
:param temp_folder: 临时文件夹路径
|
309 |
+
"""
|
310 |
+
split_video_into_frames(input_video_path, num_frames, temp_folder)
|
311 |
+
|
312 |
+
folder_path = temp_folder
|
313 |
+
frames = os.listdir(folder_path)
|
314 |
+
frames = list(filter(lambda x: x.endswith(".png"), frames))
|
315 |
+
frames.sort()
|
316 |
+
conditioning_frames = list(map(lambda x: Image.open(os.path.join(folder_path, x)).resize((1024, 1024)), frames))[:num_frames]
|
317 |
+
|
318 |
+
p2 = Processor("openpose")
|
319 |
+
cn2 = [p2(frame) for frame in conditioning_frames]
|
320 |
+
|
321 |
+
negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly"
|
322 |
+
generator = torch.Generator(device="cpu").manual_seed(0)
|
323 |
+
|
324 |
+
pipe = initialize_pipeline(model_id)
|
325 |
+
|
326 |
+
output = pipe(
|
327 |
+
prompt=prompt,
|
328 |
+
negative_prompt=negative_prompt,
|
329 |
+
num_inference_steps=50,
|
330 |
+
guidance_scale=20,
|
331 |
+
controlnet_conditioning_scale=1.0,
|
332 |
+
width=512,
|
333 |
+
height=768,
|
334 |
+
num_frames=num_frames,
|
335 |
+
conditioning_frames=cn2,
|
336 |
+
generator=generator
|
337 |
+
)
|
338 |
+
|
339 |
+
frames = output.frames[0]
|
340 |
+
export_to_gif(frames, gif_output_path)
|
341 |
+
|
342 |
+
print(f"生成的 GIF 已保存到 {gif_output_path}")
|
343 |
+
|
344 |
+
if not keep_imgs:
|
345 |
+
# 删除临时文件夹
|
346 |
+
import shutil
|
347 |
+
shutil.rmtree(temp_folder)
|
348 |
+
|
349 |
+
if __name__ == "__main__":
|
350 |
+
parser = argparse.ArgumentParser(description="生成带有文本提示的视频")
|
351 |
+
parser.add_argument("input_video", help="输入视频文件路径")
|
352 |
+
parser.add_argument("prompt", help="文本提示")
|
353 |
+
parser.add_argument("model_id", help="模型ID")
|
354 |
+
parser.add_argument("gif_output_path", help="GIF 输出文件路径")
|
355 |
+
parser.add_argument("--num_frames", type=int, default=16, help="目标帧数")
|
356 |
+
parser.add_argument("--keep_imgs", action="store_true", help="是否保留临时图片")
|
357 |
+
parser.add_argument("--temp_folder", default='temp_frames', help="临时文件夹路径")
|
358 |
+
|
359 |
+
args = parser.parse_args()
|
360 |
+
|
361 |
+
generate_video_with_prompt(args.input_video, args.prompt, args.model_id, args.gif_output_path, args.num_frames, args.keep_imgs, args.temp_folder)
|
362 |
+
|
363 |
+
```
|
364 |
+
|
365 |
+
```bash
|
366 |
+
python animatediff_controlnet_sdxl_run_script.py girl_beach.mp4 \
|
367 |
+
"solo,Xiangling\(genshin impact\),1girl,full body professional photograph of a stunning detailed, drink tea use chinese cup" \
|
368 |
+
"svjack/GenshinImpact_XL_Base" \
|
369 |
+
xiangling_tea_animation.gif --num_frames 16 --temp_folder temp_frames
|
370 |
+
```
|
371 |
+
- Pose: girl_beach.mp4
|
372 |
+
<video controls autoplay src="https://cdn-uploads.huggingface.co/production/uploads/634dffc49b777beec3bc6448/pYx23VyLNkLk3YxAAqu5i.mp4"></video>
|
373 |
+
- Output: xiangling_tea_animation.gif
|
374 |
+
![image/gif](https://cdn-uploads.huggingface.co/production/uploads/634dffc49b777beec3bc6448/qUZOvGs5rzxN8zaZ4Xp3s.gif)
|
375 |
+
- Upscaled:
|
376 |
+
<video controls autoplay src="https://cdn-uploads.huggingface.co/production/uploads/634dffc49b777beec3bc6448/uwUDYOPiZbHuq5v6jWADr.mp4"></video>
|
377 |
+
|
378 |
## Conclusion
|
379 |
|
380 |
This script demonstrates how to use the `diffusers-sdxl-controlnet` library to generate animated images with ControlNet and SDXL models. By following the steps outlined above, you can create and visualize your own animated sequences.
|