''' !huggingface-cli download \ --repo-type dataset svjack/video-dataset-Lily-Bikini-rm-background-organized \ --local-dir video-dataset-Lily-Bikini-rm-background-organized import re def insert_content_in_string(insert_content, character_name, gender=None): """ 在原始字符串中特定位置插入内容。 :param insert_content: 要插入的内容 :param character_name: 角色名称 :param gender: 性别(可选,可以是 "1boy" 或 "1girl") :return: 修改后的字符串 """ # 根据 character_name 和 gender 生成 original_string original_string = f"solo,{character_name}\(genshin impact\),{gender if gender else '1boy'},highres," # 根据 character_name 生成 target_pattern target_pattern = re.escape(character_name) # 插入内容 modified_string = re.sub(target_pattern, r'\g<0>' + insert_content, original_string) return original_string ,modified_string from datasets import load_dataset character_name = "Xiangling" gender = "1girl" # 可选参数 prompt_list = load_dataset("svjack/daily-actions-en-zh")["train"].to_pandas()["en"].map( lambda x: ", {}".format(x) ).map( lambda insert_content: insert_content_in_string(insert_content, character_name, gender)[-1] ).dropna().drop_duplicates().values.tolist() print(len(prompt_list)) import pandas as pd import pathlib reference_video_list = pd.Series( list(pathlib.Path("video-dataset-Lily-Bikini-rm-background-organized").rglob("*.mp4")) ).map(str).values.tolist() print(len(reference_video_list)) from itertools import product pd.DataFrame(list(product(*[reference_video_list, prompt_list])))[[1, 0]].rename( columns = { 1: "prompt", 0: "input_video" } ).to_csv("xiangling_video_seed.csv", index = False) !python produce_gif_script.py xiangling_video_seed.csv "svjack/GenshinImpact_XL_Base" xiangling_gif_dir \ --num_frames 16 --temp_folder temp_frames --seed 0 --controlnet_conditioning_scale 0.3 ''' import sys sys.path.insert(0, "diffusers-sdxl-controlnet/examples/community/") from animatediff_controlnet_sdxl import * import argparse from moviepy.editor import VideoFileClip, ImageSequenceClip import os import torch from diffusers.models import MotionAdapter from diffusers import DDIMScheduler, AutoPipelineForText2Image, ControlNetModel from diffusers.utils import export_to_gif from PIL import Image from controlnet_aux.processor import Processor import pandas as pd import random from tqdm import tqdm # 初始化 MotionAdapter 和 ControlNetModel adapter = MotionAdapter.from_pretrained("a-r-r-o-w/animatediff-motion-adapter-sdxl-beta", torch_dtype=torch.float16) def initialize_pipeline(model_id): scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1) controlnet = ControlNetModel.from_pretrained("thibaud/controlnet-openpose-sdxl-1.0", torch_dtype=torch.float16).to("cuda") # 初始化 AnimateDiffSDXLControlnetPipeline pipe = AnimateDiffSDXLControlnetPipeline.from_pretrained( model_id, controlnet=controlnet, motion_adapter=adapter, scheduler=scheduler, torch_dtype=torch.float16, ).to("cuda") pipe.enable_vae_slicing() pipe.enable_vae_tiling() return pipe # 全局初始化管道 pipe = None def split_video_into_frames(input_video_path, num_frames, temp_folder='temp_frames'): """ 将视频处理成指定帧数的视频,并保持原始的帧率。 :param input_video_path: 输入视频文件路径 :param num_frames: 目标帧数 :param temp_folder: 临时文件夹路径 """ clip = VideoFileClip(input_video_path) original_duration = clip.duration segment_duration = original_duration / num_frames if not os.path.exists(temp_folder): os.makedirs(temp_folder) for i in range(num_frames): frame_time = i * segment_duration frame_path = os.path.join(temp_folder, f'frame_{i:04d}.png') clip.save_frame(frame_path, t=frame_time) frame_paths = [os.path.join(temp_folder, f'frame_{i:04d}.png') for i in range(num_frames)] final_clip = ImageSequenceClip(frame_paths, fps=clip.fps) final_clip.write_videofile("resampled_video.mp4", codec='libx264') print(f"新的视频已保存到 resampled_video.mp4,包含 {num_frames} 个帧,并保持原始的帧率。") def generate_video_with_prompt(input_video_path, prompt, model_id, gif_output_path, seed=0, num_frames=16, keep_imgs=False, temp_folder='temp_frames', num_inference_steps=50, guidance_scale=20, controlnet_conditioning_scale=0.5, width=512, height=768): """ 生成带有文本提示的视频。 :param input_video_path: 输入视频文件路径 :param prompt: 文本提示 :param model_id: 模型ID :param gif_output_path: GIF 输出文件路径 :param seed: 随机种子 :param num_frames: 目标帧数 :param keep_imgs: 是否保留临时图片 :param temp_folder: 临时文件夹路径 :param num_inference_steps: 推理步数 :param guidance_scale: 引导比例 :param controlnet_conditioning_scale: ControlNet 条件比例 :param width: 输出宽度 :param height: 输出高度 """ split_video_into_frames(input_video_path, num_frames, temp_folder) folder_path = temp_folder frames = os.listdir(folder_path) frames = list(filter(lambda x: x.endswith(".png"), frames)) frames.sort() conditioning_frames = list(map(lambda x: Image.open(os.path.join(folder_path, x)).resize((1024, 1024)), frames))[:num_frames] p2 = Processor("openpose") cn2 = [p2(frame) for frame in conditioning_frames] negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly" generator = torch.Generator(device="cuda").manual_seed(seed) global pipe if pipe is None: pipe = initialize_pipeline(model_id) output = pipe( prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, controlnet_conditioning_scale=controlnet_conditioning_scale, width=width, height=height, num_frames=num_frames, conditioning_frames=cn2, generator=generator ) frames = output.frames[0] export_to_gif(frames, gif_output_path) print(f"生成的 GIF 已保存到 {gif_output_path}") if not keep_imgs: # 删除临时文件夹 import shutil shutil.rmtree(temp_folder) def sanitize_prompt(prompt): """ 将提示词中的空格和非英文字符替换为下划线。 """ return "".join([c if c.isalnum() or c in [",", ","] else '_' for c in prompt]) if __name__ == "__main__": parser = argparse.ArgumentParser(description="生成带有文本提示的视频") parser.add_argument("csv_file", help="CSV 文件路径") parser.add_argument("model_id", help="模型ID") parser.add_argument("output_dir", help="GIF 输出目录") parser.add_argument("--seed", type=int, default=0, help="随机种子") parser.add_argument("--num_frames", type=int, default=16, help="目标帧数") parser.add_argument("--keep_imgs", action="store_true", help="是否保留临时图片") parser.add_argument("--temp_folder", default='temp_frames', help="临时文件夹路径") parser.add_argument("--num_inference_steps", type=int, default=50, help="推理步数") parser.add_argument("--guidance_scale", type=float, default=20.0, help="引导比例") parser.add_argument("--controlnet_conditioning_scale", type=float, default=0.5, help="ControlNet 条件比例") parser.add_argument("--width", type=int, default=512, help="输出宽度") parser.add_argument("--height", type=int, default=768, help="输出高度") args = parser.parse_args() # 读取CSV文件 df = pd.read_csv(args.csv_file) for index, row in tqdm(df.iterrows(), total=df.shape[0]): input_video = row['input_video'] prompt = row['prompt'] # 随机设定seed seed = random.randint(0, 2**32 - 1) # 处理提示词 sanitized_prompt = sanitize_prompt(prompt) # 生成GIF输出路径,包含seed if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) gif_output_path = os.path.join(args.output_dir, f"{sanitized_prompt}_seed_{seed}.gif") generate_video_with_prompt(input_video, prompt, args.model_id, gif_output_path, seed, args.num_frames, args.keep_imgs, args.temp_folder, args.num_inference_steps, args.guidance_scale, args.controlnet_conditioning_scale, args.width, args.height)