diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index 3328393f003be07db01ea0f13d8e82692572ccbf..0000000000000000000000000000000000000000 Binary files a/.DS_Store and /dev/null differ diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..82f927558a3dff0ea8c20858856e70779fd02c93 --- /dev/null +++ b/.gitignore @@ -0,0 +1,162 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/__asset__/.DS_Store b/__asset__/.DS_Store deleted file mode 100644 index 9bc6de3f8adf5670db95a496f5aab69517a8777f..0000000000000000000000000000000000000000 Binary files a/__asset__/.DS_Store and /dev/null differ diff --git a/__asset__/images/.DS_Store b/__asset__/images/.DS_Store deleted file mode 100644 index fce37364e609a626cfc1b26330ea784ace67552f..0000000000000000000000000000000000000000 Binary files a/__asset__/images/.DS_Store and /dev/null differ diff --git a/__asset__/images/camera/.DS_Store b/__asset__/images/camera/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 Binary files a/__asset__/images/camera/.DS_Store and /dev/null differ diff --git a/__asset__/images/object/.DS_Store b/__asset__/images/object/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 Binary files a/__asset__/images/object/.DS_Store and /dev/null differ diff --git a/__asset__/trajs/.DS_Store b/__asset__/trajs/.DS_Store deleted file mode 100644 index fce37364e609a626cfc1b26330ea784ace67552f..0000000000000000000000000000000000000000 Binary files a/__asset__/trajs/.DS_Store and /dev/null differ diff --git a/__asset__/trajs/camera/.DS_Store b/__asset__/trajs/camera/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 Binary files a/__asset__/trajs/camera/.DS_Store and /dev/null differ diff --git a/__asset__/trajs/object/.DS_Store b/__asset__/trajs/object/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 Binary files a/__asset__/trajs/object/.DS_Store and /dev/null differ diff --git a/app.py b/app.py index f4a08c459d7bf09aefbd50972773604798652ac6..c8f41b9c5072786e20a61f326aaae8b860532254 100644 --- a/app.py +++ b/app.py @@ -1,35 +1,35 @@ +import json import os -import sys - - -print("Installing correct gradio version...") -os.system("pip uninstall -y gradio") -os.system("pip install gradio==4.38.1") -print("Installing Finished!") - +import uuid +import cv2 import gradio as gr import numpy as np -import cv2 -import uuid +import spaces import torch import torchvision -import json -import spaces - -from PIL import Image +from diffusers import AutoencoderKL, DDIMScheduler +from einops import rearrange +from huggingface_hub import hf_hub_download from omegaconf import OmegaConf -from einops import rearrange, repeat -from torchvision import transforms,utils +from PIL import Image +from torchvision import transforms from transformers import CLIPTextModel, CLIPTokenizer -from diffusers import AutoencoderKL, DDIMScheduler -from pipelines.pipeline_imagecoductor import ImageConductorPipeline from modules.unet import UNet3DConditionFlowModel -from utils.gradio_utils import ensure_dirname, split_filename, visualize_drag, image2pil -from utils.utils import create_image_controlnet, create_flow_controlnet, interpolate_trajectory, load_weights, load_model, bivariate_Gaussian, save_videos_grid +from pipelines.pipeline_imagecoductor import ImageConductorPipeline +from utils.gradio_utils import ensure_dirname, image2pil, split_filename, visualize_drag from utils.lora_utils import add_LoRA_to_controlnet -from utils.visualizer import Visualizer, vis_flow_to_video +from utils.utils import ( + bivariate_Gaussian, + create_flow_controlnet, + create_image_controlnet, + interpolate_trajectory, + load_model, + load_weights, +) +from utils.visualizer import vis_flow_to_video + #### Description #### title = r"""<h1 align="center">CustomNet: Object Customization with Variable-Viewpoints in Text-to-Image Diffusion Models</h1>""" @@ -41,7 +41,7 @@ head = r""" <a href='https://liyaowei-stu.github.io/project/ImageConductor/'><img src='https://img.shields.io/badge/Project_Page-ImgaeConductor-green' alt='Project Page'></a> <a href='https://arxiv.org/pdf/2406.15339'><img src='https://img.shields.io/badge/Paper-Arxiv-blue'></a> <a href='https://github.com/liyaowei-stu/ImageConductor'><img src='https://img.shields.io/badge/Code-Github-orange'></a> - + </div> </br> @@ -49,7 +49,6 @@ head = r""" """ - descriptions = r""" Official Gradio Demo for <a href='https://github.com/liyaowei-stu/ImageConductor'><b>Image Conductor: Precision Control for Interactive Video Synthesis</b></a>.<br> 🧙Image Conductor enables precise, fine-grained control for generating motion-controllable videos from images, advancing the practical application of interactive video synthesis.<br> @@ -66,7 +65,7 @@ instructions = r""" """ citation = r""" -If Image Conductor is helpful, please help to ⭐ the <a href='https://github.com/liyaowei-stu/ImageConductor' target='_blank'>Github Repo</a>. Thanks! +If Image Conductor is helpful, please help to ⭐ the <a href='https://github.com/liyaowei-stu/ImageConductor' target='_blank'>Github Repo</a>. Thanks! [](https://github.com/liyaowei-stu/ImageConductor) --- @@ -75,7 +74,7 @@ If Image Conductor is helpful, please help to ⭐ the <a href='https://github.co If our work is useful for your research, please consider citing: ```bibtex @misc{li2024imageconductor, - title={Image Conductor: Precision Control for Interactive Video Synthesis}, + title={Image Conductor: Precision Control for Interactive Video Synthesis}, author={Li, Yaowei and Wang, Xintao and Zhang, Zhaoyang and Wang, Zhouxia and Yuan, Ziyang and Xie, Liangbin and Zou, Yuexian and Shan, Ying}, year={2024}, eprint={2406.15339}, @@ -90,46 +89,19 @@ If you have any questions, please feel free to reach me out at <b>ywl@stu.pku.ed # """ -os.makedirs("models/personalized") -os.makedirs("models/sd1-5") - -if not os.path.exists("models/flow_controlnet.ckpt"): - os.system(f'wget -q https://huggingface.co/TencentARC/ImageConductor/resolve/main/flow_controlnet.ckpt?download=true -P models/') - os.system(f'mv models/flow_controlnet.ckpt?download=true models/flow_controlnet.ckpt') - print("flow_controlnet Download!", ) - -if not os.path.exists("models/image_controlnet.ckpt"): - os.system(f'wget -q https://huggingface.co/TencentARC/ImageConductor/resolve/main/image_controlnet.ckpt?download=true -P models/') - os.system(f'mv models/image_controlnet.ckpt?download=true models/image_controlnet.ckpt') - print("image_controlnet Download!", ) +flow_controlnet_path = hf_hub_download("TencentARC/ImageConductor", "flow_controlnet.ckpt") +image_controlnet_path = hf_hub_download("TencentARC/ImageConductor", "image_controlnet.ckpt") +unet_path = hf_hub_download("TencentARC/ImageConductor", "unet.ckpt") -if not os.path.exists("models/unet.ckpt"): - os.system(f'wget -q https://huggingface.co/TencentARC/ImageConductor/resolve/main/unet.ckpt?download=true -P models/') - os.system(f'mv models/unet.ckpt?download=true models/unet.ckpt') - print("unet Download!", ) +helloobjects_path = hf_hub_download("TencentARC/ImageConductor", "helloobjects_V12c.safetensors") +tusun_path = hf_hub_download("TencentARC/ImageConductor", "TUSUN.safetensors") - +os.makedirs("models/sd1-5", exist_ok=True) +sd15_config_path = hf_hub_download("runwayml/stable-diffusion-v1-5", "config.json", subfolder="unet") if not os.path.exists("models/sd1-5/config.json"): - os.system(f'wget -q https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/unet/config.json?download=true -P models/sd1-5/') - os.system(f'mv models/sd1-5/config.json?download=true models/sd1-5/config.json') - print("config Download!", ) - - + os.symlink(sd15_config_path, "models/sd1-5/config.json") if not os.path.exists("models/sd1-5/unet.ckpt"): - os.system(f'cp -r models/unet.ckpt models/sd1-5/unet.ckpt') - -# os.system(f'wget https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/unet/diffusion_pytorch_model.bin?download=true -P models/sd1-5/') - -if not os.path.exists("models/personalized/helloobjects_V12c.safetensors"): - os.system(f'wget -q https://huggingface.co/TencentARC/ImageConductor/resolve/main/helloobjects_V12c.safetensors?download=true -P models/personalized') - os.system(f'mv models/personalized/helloobjects_V12c.safetensors?download=true models/personalized/helloobjects_V12c.safetensors') - print("helloobjects_V12c Download!", ) - - -if not os.path.exists("models/personalized/TUSUN.safetensors"): - os.system(f'wget -q https://huggingface.co/TencentARC/ImageConductor/resolve/main/TUSUN.safetensors?download=true -P models/personalized') - os.system(f'mv models/personalized/TUSUN.safetensors?download=true models/personalized/TUSUN.safetensors') - print("TUSUN Download!", ) + os.symlink(unet_path, "models/sd1-5/unet.ckpt") # mv1 = os.system(f'mv /usr/local/lib/python3.10/site-packages/gradio/helpers.py /usr/local/lib/python3.10/site-packages/gradio/helpers_bkp.py') # mv2 = os.system(f'mv helpers.py /usr/local/lib/python3.10/site-packages/gradio/helpers.py') @@ -145,128 +117,135 @@ if not os.path.exists("models/personalized/TUSUN.safetensors"): # - - - - - examples - - - - - # image_examples = [ - ["__asset__/images/object/turtle-1.jpg", - "a sea turtle gracefully swimming over a coral reef in the clear blue ocean.", - "object", - 11318446767408804497, - "", - "turtle", - "__asset__/turtle.mp4" - ], - - ["__asset__/images/object/rose-1.jpg", - "a red rose engulfed in flames.", - "object", - 6854275249656120509, - "", - "rose", - "__asset__/rose.mp4" - ], - - ["__asset__/images/object/jellyfish-1.jpg", - "intricate detailing,photorealism,hyperrealistic, glowing jellyfish mushroom, flying, starry sky, bokeh, golden ratio composition.", - "object", - 17966188172968903484, - "HelloObject", - "jellyfish", - "__asset__/jellyfish.mp4" - ], - - - ["__asset__/images/camera/lush-1.jpg", - "detailed craftsmanship, photorealism, hyperrealistic, roaring waterfall, misty spray, lush greenery, vibrant rainbow, golden ratio composition.", - "camera", - 7970487946960948963, - "HelloObject", - "lush", - "__asset__/lush.mp4", - ], - - ["__asset__/images/camera/tusun-1.jpg", - "tusuncub with its mouth open, blurry, open mouth, fangs, photo background, looking at viewer, tongue, full body, solo, cute and lovely, Beautiful and realistic eye details, perfect anatomy, Nonsense, pure background, Centered-Shot, realistic photo, photograph, 4k, hyper detailed, DSLR, 24 Megapixels, 8mm Lens, Full Frame, film grain, Global Illumination, studio Lighting, Award Winning Photography, diffuse reflection, ray tracing.", - "camera", - 996953226890228361, - "TUSUN", - "tusun", - "__asset__/tusun.mp4" - ], - - ["__asset__/images/camera/painting-1.jpg", - "A oil painting.", - "camera", - 16867854766769816385, - "", - "painting", - "__asset__/painting.mp4" - ], + [ + "__asset__/images/object/turtle-1.jpg", + "a sea turtle gracefully swimming over a coral reef in the clear blue ocean.", + "object", + 11318446767408804497, + "", + "turtle", + "__asset__/turtle.mp4", + ], + [ + "__asset__/images/object/rose-1.jpg", + "a red rose engulfed in flames.", + "object", + 6854275249656120509, + "", + "rose", + "__asset__/rose.mp4", + ], + [ + "__asset__/images/object/jellyfish-1.jpg", + "intricate detailing,photorealism,hyperrealistic, glowing jellyfish mushroom, flying, starry sky, bokeh, golden ratio composition.", + "object", + 17966188172968903484, + "HelloObject", + "jellyfish", + "__asset__/jellyfish.mp4", + ], + [ + "__asset__/images/camera/lush-1.jpg", + "detailed craftsmanship, photorealism, hyperrealistic, roaring waterfall, misty spray, lush greenery, vibrant rainbow, golden ratio composition.", + "camera", + 7970487946960948963, + "HelloObject", + "lush", + "__asset__/lush.mp4", + ], + [ + "__asset__/images/camera/tusun-1.jpg", + "tusuncub with its mouth open, blurry, open mouth, fangs, photo background, looking at viewer, tongue, full body, solo, cute and lovely, Beautiful and realistic eye details, perfect anatomy, Nonsense, pure background, Centered-Shot, realistic photo, photograph, 4k, hyper detailed, DSLR, 24 Megapixels, 8mm Lens, Full Frame, film grain, Global Illumination, studio Lighting, Award Winning Photography, diffuse reflection, ray tracing.", + "camera", + 996953226890228361, + "TUSUN", + "tusun", + "__asset__/tusun.mp4", + ], + [ + "__asset__/images/camera/painting-1.jpg", + "A oil painting.", + "camera", + 16867854766769816385, + "", + "painting", + "__asset__/painting.mp4", + ], ] POINTS = { - 'turtle': "__asset__/trajs/object/turtle-1.json", - 'rose': "__asset__/trajs/object/rose-1.json", - 'jellyfish': "__asset__/trajs/object/jellyfish-1.json", - 'lush': "__asset__/trajs/camera/lush-1.json", - 'tusun': "__asset__/trajs/camera/tusun-1.json", - 'painting': "__asset__/trajs/camera/painting-1.json", + "turtle": "__asset__/trajs/object/turtle-1.json", + "rose": "__asset__/trajs/object/rose-1.json", + "jellyfish": "__asset__/trajs/object/jellyfish-1.json", + "lush": "__asset__/trajs/camera/lush-1.json", + "tusun": "__asset__/trajs/camera/tusun-1.json", + "painting": "__asset__/trajs/camera/painting-1.json", } IMAGE_PATH = { - 'turtle': "__asset__/images/object/turtle-1.jpg", - 'rose': "__asset__/images/object/rose-1.jpg", - 'jellyfish': "__asset__/images/object/jellyfish-1.jpg", - 'lush': "__asset__/images/camera/lush-1.jpg", - 'tusun': "__asset__/images/camera/tusun-1.jpg", - 'painting': "__asset__/images/camera/painting-1.jpg", + "turtle": "__asset__/images/object/turtle-1.jpg", + "rose": "__asset__/images/object/rose-1.jpg", + "jellyfish": "__asset__/images/object/jellyfish-1.jpg", + "lush": "__asset__/images/camera/lush-1.jpg", + "tusun": "__asset__/images/camera/tusun-1.jpg", + "painting": "__asset__/images/camera/painting-1.jpg", } - DREAM_BOOTH = { - 'HelloObject': 'models/personalized/helloobjects_V12c.safetensors', + "HelloObject": helloobjects_path, } LORA = { - 'TUSUN': 'models/personalized/TUSUN.safetensors', + "TUSUN": tusun_path, } LORA_ALPHA = { - 'TUSUN': 0.6, + "TUSUN": 0.6, } NPROMPT = { - "HelloObject": 'FastNegativeV2,(bad-artist:1),(worst quality, low quality:1.4),(bad_prompt_version2:0.8),bad-hands-5,lowres,bad anatomy,bad hands,((text)),(watermark),error,missing fingers,extra digit,fewer digits,cropped,worst quality,low quality,normal quality,((username)),blurry,(extra limbs),bad-artist-anime,badhandv4,EasyNegative,ng_deepnegative_v1_75t,verybadimagenegative_v1.3,BadDream,(three hands:1.6),(three legs:1.2),(more than two hands:1.4),(more than two legs,:1.2)' + "HelloObject": "FastNegativeV2,(bad-artist:1),(worst quality, low quality:1.4),(bad_prompt_version2:0.8),bad-hands-5,lowres,bad anatomy,bad hands,((text)),(watermark),error,missing fingers,extra digit,fewer digits,cropped,worst quality,low quality,normal quality,((username)),blurry,(extra limbs),bad-artist-anime,badhandv4,EasyNegative,ng_deepnegative_v1_75t,verybadimagenegative_v1.3,BadDream,(three hands:1.6),(three legs:1.2),(more than two hands:1.4),(more than two legs,:1.2)" } output_dir = "outputs" ensure_dirname(output_dir) + def points_to_flows(track_points, model_length, height, width): input_drag = np.zeros((model_length - 1, height, width, 2)) for splited_track in track_points: - if len(splited_track) == 1: # stationary point + if len(splited_track) == 1: # stationary point displacement_point = tuple([splited_track[0][0] + 1, splited_track[0][1] + 1]) splited_track = tuple([splited_track[0], displacement_point]) # interpolate the track splited_track = interpolate_trajectory(splited_track, model_length) splited_track = splited_track[:model_length] if len(splited_track) < model_length: - splited_track = splited_track + [splited_track[-1]] * (model_length -len(splited_track)) + splited_track = splited_track + [splited_track[-1]] * (model_length - len(splited_track)) for i in range(model_length - 1): start_point = splited_track[i] - end_point = splited_track[i+1] + end_point = splited_track[i + 1] input_drag[i][int(start_point[1])][int(start_point[0])][0] = end_point[0] - start_point[0] input_drag[i][int(start_point[1])][int(start_point[0])][1] = end_point[1] - start_point[1] return input_drag + class ImageConductor: - def __init__(self, device, unet_path, image_controlnet_path, flow_controlnet_path, height, width, model_length, lora_rank=64): + def __init__( + self, device, unet_path, image_controlnet_path, flow_controlnet_path, height, width, model_length, lora_rank=64 + ): self.device = device - tokenizer = CLIPTokenizer.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="tokenizer") - text_encoder = CLIPTextModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="text_encoder").to(device) - vae = AutoencoderKL.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="vae").to(device) + tokenizer = CLIPTokenizer.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="tokenizer") + text_encoder = CLIPTextModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="text_encoder").to( + device + ) + vae = AutoencoderKL.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="vae").to(device) inference_config = OmegaConf.load("configs/inference/inference.yaml") - unet = UNet3DConditionFlowModel.from_pretrained_2d("models/sd1-5/", unet_additional_kwargs=OmegaConf.to_container(inference_config.unet_additional_kwargs)) + unet = UNet3DConditionFlowModel.from_pretrained_2d( + "models/sd1-5/", unet_additional_kwargs=OmegaConf.to_container(inference_config.unet_additional_kwargs) + ) self.vae = vae @@ -287,15 +266,14 @@ class ImageConductor: self.pipeline = ImageConductorPipeline( unet=unet, - vae=vae, - tokenizer=tokenizer, - text_encoder=text_encoder, + vae=vae, + tokenizer=tokenizer, + text_encoder=text_encoder, scheduler=DDIMScheduler(**OmegaConf.to_container(inference_config.noise_scheduler_kwargs)), image_controlnet=image_controlnet, flow_controlnet=flow_controlnet, ).to(device) - self.height = height self.width = width # _, model_step, _ = split_filename(model_path) @@ -307,40 +285,51 @@ class ImageConductor: self.blur_kernel = blur_kernel @spaces.GPU(duration=180) - def run(self, first_frame_path, tracking_points, prompt, drag_mode, negative_prompt, seed, randomize_seed, guidance_scale, num_inference_steps, personalized, examples_type): + def run( + self, + first_frame_path, + tracking_points, + prompt, + drag_mode, + negative_prompt, + seed, + randomize_seed, + guidance_scale, + num_inference_steps, + personalized, + ): print("Run!") - if examples_type != "": - ### for adapting high version gradio - tracking_points = gr.State([]) - first_frame_path = IMAGE_PATH[examples_type] - points = json.load(open(POINTS[examples_type])) - tracking_points.value.extend(points) - print("example first_frame_path", first_frame_path) - print("example tracking_points", tracking_points.value) - - original_width, original_height=384, 256 - if isinstance(tracking_points, list): - input_all_points = tracking_points - else: - input_all_points = tracking_points.value - + + original_width, original_height = 384, 256 + input_all_points = tracking_points + print("input_all_points", input_all_points) - resized_all_points = [tuple([tuple([float(e1[0]*self.width/original_width), float(e1[1]*self.height/original_height)]) for e1 in e]) for e in input_all_points] + resized_all_points = [ + tuple( + [ + tuple([float(e1[0] * self.width / original_width), float(e1[1] * self.height / original_height)]) + for e1 in e + ] + ) + for e in input_all_points + ] dir, base, ext = split_filename(first_frame_path) - id = base.split('_')[-1] - - - visualized_drag, _ = visualize_drag(first_frame_path, resized_all_points, drag_mode, self.width, self.height, self.model_length) + id = base.split("_")[-1] + + visualized_drag, _ = visualize_drag( + first_frame_path, resized_all_points, drag_mode, self.width, self.height, self.model_length + ) - ## image condition - image_transforms = transforms.Compose([ + ## image condition + image_transforms = transforms.Compose( + [ transforms.RandomResizedCrop( - (self.height, self.width), (1.0, 1.0), - ratio=(self.width/self.height, self.width/self.height) + (self.height, self.width), (1.0, 1.0), ratio=(self.width / self.height, self.width / self.height) ), transforms.ToTensor(), - ]) + ] + ) image_paths = [first_frame_path] controlnet_images = [(image_transforms(Image.open(path).convert("RGB"))) for path in image_paths] @@ -349,205 +338,296 @@ class ImageConductor: num_controlnet_images = controlnet_images.shape[2] controlnet_images = rearrange(controlnet_images, "b c f h w -> (b f) c h w") self.vae.to(device) - controlnet_images = self.vae.encode(controlnet_images * 2. - 1.).latent_dist.sample() * 0.18215 + controlnet_images = self.vae.encode(controlnet_images * 2.0 - 1.0).latent_dist.sample() * 0.18215 controlnet_images = rearrange(controlnet_images, "(b f) c h w -> b c f h w", f=num_controlnet_images) # flow condition controlnet_flows = points_to_flows(resized_all_points, self.model_length, self.height, self.width) - for i in range(0, self.model_length-1): + for i in range(0, self.model_length - 1): controlnet_flows[i] = cv2.filter2D(controlnet_flows[i], -1, self.blur_kernel) - controlnet_flows = np.concatenate([np.zeros_like(controlnet_flows[0])[np.newaxis, ...], controlnet_flows], axis=0) # pad the first frame with zero flow + controlnet_flows = np.concatenate( + [np.zeros_like(controlnet_flows[0])[np.newaxis, ...], controlnet_flows], axis=0 + ) # pad the first frame with zero flow os.makedirs(os.path.join(output_dir, "control_flows"), exist_ok=True) - trajs_video = vis_flow_to_video(controlnet_flows, num_frames=self.model_length) # T-1 x H x W x 3 - torchvision.io.write_video(f'{output_dir}/control_flows/sample-{id}-train_flow.mp4', trajs_video, fps=8, video_codec='h264', options={'crf': '10'}) - controlnet_flows = torch.from_numpy(controlnet_flows)[None][:, :self.model_length, ...] - controlnet_flows = rearrange(controlnet_flows, "b f h w c-> b c f h w").float().to(device) + trajs_video = vis_flow_to_video(controlnet_flows, num_frames=self.model_length) # T-1 x H x W x 3 + torchvision.io.write_video( + f"{output_dir}/control_flows/sample-{id}-train_flow.mp4", + trajs_video, + fps=8, + video_codec="h264", + options={"crf": "10"}, + ) + controlnet_flows = torch.from_numpy(controlnet_flows)[None][:, : self.model_length, ...] + controlnet_flows = rearrange(controlnet_flows, "b f h w c-> b c f h w").float().to(device) - dreambooth_model_path = DREAM_BOOTH.get(personalized, '') - lora_model_path = LORA.get(personalized, '') + dreambooth_model_path = DREAM_BOOTH.get(personalized, "") + lora_model_path = LORA.get(personalized, "") lora_alpha = LORA_ALPHA.get(personalized, 0.6) self.pipeline = load_weights( self.pipeline, - dreambooth_model_path = dreambooth_model_path, - lora_model_path = lora_model_path, - lora_alpha = lora_alpha, + dreambooth_model_path=dreambooth_model_path, + lora_model_path=lora_model_path, + lora_alpha=lora_alpha, ).to(device) - - if NPROMPT.get(personalized, '') != '': - negative_prompt = NPROMPT.get(personalized) - + + if NPROMPT.get(personalized, "") != "": + negative_prompt = NPROMPT.get(personalized) + if randomize_seed: random_seed = torch.seed() else: seed = int(seed) random_seed = seed torch.manual_seed(random_seed) - torch.cuda.manual_seed_all(random_seed) + torch.cuda.manual_seed_all(random_seed) print(f"current seed: {torch.initial_seed()}") sample = self.pipeline( - prompt, - negative_prompt = negative_prompt, - num_inference_steps = num_inference_steps, - guidance_scale = guidance_scale, - width = self.width, - height = self.height, - video_length = self.model_length, - controlnet_images = controlnet_images, # 1 4 1 32 48 - controlnet_image_index = [0], - controlnet_flows = controlnet_flows,# [1, 2, 16, 256, 384] - control_mode = drag_mode, - eval_mode = True, - ).videos - - outputs_path = os.path.join(output_dir, f'output_{i}_{id}.mp4') - vis_video = (rearrange(sample[0], 'c t h w -> t h w c') * 255.).clip(0, 255) - torchvision.io.write_video(outputs_path, vis_video, fps=8, video_codec='h264', options={'crf': '10'}) - + prompt, + negative_prompt=negative_prompt, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + width=self.width, + height=self.height, + video_length=self.model_length, + controlnet_images=controlnet_images, # 1 4 1 32 48 + controlnet_image_index=[0], + controlnet_flows=controlnet_flows, # [1, 2, 16, 256, 384] + control_mode=drag_mode, + eval_mode=True, + ).videos + + outputs_path = os.path.join(output_dir, f"output_{i}_{id}.mp4") + vis_video = (rearrange(sample[0], "c t h w -> t h w c") * 255.0).clip(0, 255) + torchvision.io.write_video(outputs_path, vis_video, fps=8, video_codec="h264", options={"crf": "10"}) + # outputs_path = os.path.join(output_dir, f'output_{i}_{id}.gif') # save_videos_grid(sample[0][None], outputs_path) print("Done!") - return {output_image: visualized_drag, output_video: outputs_path} + return visualized_drag, outputs_path def reset_states(first_frame_path, tracking_points): - first_frame_path = gr.State() - tracking_points = gr.State([]) - return {input_image:None, first_frame_path_var: first_frame_path, tracking_points_var: tracking_points} + first_frame_path = None + tracking_points = [] + return {input_image: None, first_frame_path_var: first_frame_path, tracking_points_var: tracking_points} def preprocess_image(image, tracking_points): image_pil = image2pil(image.name) raw_w, raw_h = image_pil.size - resize_ratio = max(384/raw_w, 256/raw_h) + resize_ratio = max(384 / raw_w, 256 / raw_h) image_pil = image_pil.resize((int(raw_w * resize_ratio), int(raw_h * resize_ratio)), Image.BILINEAR) - image_pil = transforms.CenterCrop((256, 384))(image_pil.convert('RGB')) + image_pil = transforms.CenterCrop((256, 384))(image_pil.convert("RGB")) id = str(uuid.uuid4())[:4] first_frame_path = os.path.join(output_dir, f"first_frame_{id}.jpg") image_pil.save(first_frame_path, quality=95) - tracking_points = gr.State([]) - return {input_image: first_frame_path, first_frame_path_var: first_frame_path, tracking_points_var: tracking_points, personalized:""} + tracking_points = [] + return { + input_image: first_frame_path, + first_frame_path_var: first_frame_path, + tracking_points_var: tracking_points, + personalized: "", + } + + +def add_tracking_points( + tracking_points, first_frame_path, drag_mode, evt: gr.SelectData +): # SelectData is a subclass of EventData + if drag_mode == "object": + color = (255, 0, 0, 255) + elif drag_mode == "camera": + color = (0, 0, 255, 255) + + print(f"You selected {evt.value} at {evt.index} from {evt.target}") + if not tracking_points: + tracking_points = [[]] + tracking_points[-1].append(evt.index) + + transparent_background = Image.open(first_frame_path).convert("RGBA") + w, h = transparent_background.size + transparent_layer = np.zeros((h, w, 4)) + + for track in tracking_points: + if len(track) > 1: + for i in range(len(track) - 1): + start_point = track[i] + end_point = track[i + 1] + vx = end_point[0] - start_point[0] + vy = end_point[1] - start_point[1] + arrow_length = np.sqrt(vx**2 + vy**2) + if i == len(track) - 2: + cv2.arrowedLine( + transparent_layer, tuple(start_point), tuple(end_point), color, 2, tipLength=8 / arrow_length + ) + else: + cv2.line( + transparent_layer, + tuple(start_point), + tuple(end_point), + color, + 2, + ) + else: + cv2.circle(transparent_layer, tuple(track[0]), 5, color, -1) + + transparent_layer = Image.fromarray(transparent_layer.astype(np.uint8)) + trajectory_map = Image.alpha_composite(transparent_background, transparent_layer) + return {tracking_points_var: tracking_points, input_image: trajectory_map} + +def preprocess_example_image(image_path, tracking_points, drag_mode): + image_pil = image2pil(image_path) + raw_w, raw_h = image_pil.size + resize_ratio = max(384 / raw_w, 256 / raw_h) + image_pil = image_pil.resize((int(raw_w * resize_ratio), int(raw_h * resize_ratio)), Image.BILINEAR) + image_pil = transforms.CenterCrop((256, 384))(image_pil.convert("RGB")) + id = str(uuid.uuid4())[:4] + first_frame_path = os.path.join(output_dir, f"first_frame_{id}.jpg") + image_pil.save(first_frame_path, quality=95) -def add_tracking_points(tracking_points, first_frame_path, drag_mode, evt: gr.SelectData): # SelectData is a subclass of EventData - if drag_mode=='object': + if drag_mode == "object": color = (255, 0, 0, 255) - elif drag_mode=='camera': + elif drag_mode == "camera": color = (0, 0, 255, 255) - if not isinstance(tracking_points ,list): - print(f"You selected {evt.value} at {evt.index} from {evt.target}") - tracking_points.value[-1].append(evt.index) - print(tracking_points.value) - tracking_points_values = tracking_points.value - else: - try: - tracking_points[-1].append(evt.index) - except Exception as e: - tracking_points.append([]) - tracking_points[-1].append(evt.index) - print(f"Solved Error: {e}") - - tracking_points_values = tracking_points - - - transparent_background = Image.open(first_frame_path).convert('RGBA') + transparent_background = Image.open(first_frame_path).convert("RGBA") w, h = transparent_background.size transparent_layer = np.zeros((h, w, 4)) - - for track in tracking_points_values: + + for track in tracking_points: if len(track) > 1: - for i in range(len(track)-1): + for i in range(len(track) - 1): start_point = track[i] - end_point = track[i+1] + end_point = track[i + 1] vx = end_point[0] - start_point[0] vy = end_point[1] - start_point[1] arrow_length = np.sqrt(vx**2 + vy**2) - if i == len(track)-2: - cv2.arrowedLine(transparent_layer, tuple(start_point), tuple(end_point), color, 2, tipLength=8 / arrow_length) + if i == len(track) - 2: + cv2.arrowedLine( + transparent_layer, tuple(start_point), tuple(end_point), color, 2, tipLength=8 / arrow_length + ) else: - cv2.line(transparent_layer, tuple(start_point), tuple(end_point), color, 2,) + cv2.line( + transparent_layer, + tuple(start_point), + tuple(end_point), + color, + 2, + ) else: cv2.circle(transparent_layer, tuple(track[0]), 5, color, -1) transparent_layer = Image.fromarray(transparent_layer.astype(np.uint8)) trajectory_map = Image.alpha_composite(transparent_background, transparent_layer) - return {tracking_points_var: tracking_points, input_image: trajectory_map} + + return trajectory_map, first_frame_path def add_drag(tracking_points): - if not isinstance(tracking_points ,list): - # print("before", tracking_points.value) - tracking_points.value.append([]) - # print(tracking_points.value) - else: + if not tracking_points or tracking_points[-1]: tracking_points.append([]) return {tracking_points_var: tracking_points} - + def delete_last_drag(tracking_points, first_frame_path, drag_mode): - if drag_mode=='object': + if drag_mode == "object": color = (255, 0, 0, 255) - elif drag_mode=='camera': + elif drag_mode == "camera": color = (0, 0, 255, 255) - tracking_points.value.pop() - transparent_background = Image.open(first_frame_path).convert('RGBA') + if tracking_points: + tracking_points.pop() + transparent_background = Image.open(first_frame_path).convert("RGBA") w, h = transparent_background.size transparent_layer = np.zeros((h, w, 4)) - for track in tracking_points.value: + for track in tracking_points: if len(track) > 1: - for i in range(len(track)-1): + for i in range(len(track) - 1): start_point = track[i] - end_point = track[i+1] + end_point = track[i + 1] vx = end_point[0] - start_point[0] vy = end_point[1] - start_point[1] arrow_length = np.sqrt(vx**2 + vy**2) - if i == len(track)-2: - cv2.arrowedLine(transparent_layer, tuple(start_point), tuple(end_point), color, 2, tipLength=8 / arrow_length) + if i == len(track) - 2: + cv2.arrowedLine( + transparent_layer, tuple(start_point), tuple(end_point), color, 2, tipLength=8 / arrow_length + ) else: - cv2.line(transparent_layer, tuple(start_point), tuple(end_point), color, 2,) + cv2.line( + transparent_layer, + tuple(start_point), + tuple(end_point), + color, + 2, + ) else: cv2.circle(transparent_layer, tuple(track[0]), 5, color, -1) transparent_layer = Image.fromarray(transparent_layer.astype(np.uint8)) trajectory_map = Image.alpha_composite(transparent_background, transparent_layer) return {tracking_points_var: tracking_points, input_image: trajectory_map} - + def delete_last_step(tracking_points, first_frame_path, drag_mode): - if drag_mode=='object': + if drag_mode == "object": color = (255, 0, 0, 255) - elif drag_mode=='camera': + elif drag_mode == "camera": color = (0, 0, 255, 255) - tracking_points.value[-1].pop() - transparent_background = Image.open(first_frame_path).convert('RGBA') + if tracking_points and tracking_points[-1]: + tracking_points[-1].pop() + transparent_background = Image.open(first_frame_path).convert("RGBA") w, h = transparent_background.size transparent_layer = np.zeros((h, w, 4)) - for track in tracking_points.value: + for track in tracking_points: + if not track: + continue if len(track) > 1: - for i in range(len(track)-1): + for i in range(len(track) - 1): start_point = track[i] - end_point = track[i+1] + end_point = track[i + 1] vx = end_point[0] - start_point[0] vy = end_point[1] - start_point[1] arrow_length = np.sqrt(vx**2 + vy**2) - if i == len(track)-2: - cv2.arrowedLine(transparent_layer, tuple(start_point), tuple(end_point), color, 2, tipLength=8 / arrow_length) + if i == len(track) - 2: + cv2.arrowedLine( + transparent_layer, tuple(start_point), tuple(end_point), color, 2, tipLength=8 / arrow_length + ) else: - cv2.line(transparent_layer, tuple(start_point), tuple(end_point), color, 2,) + cv2.line( + transparent_layer, + tuple(start_point), + tuple(end_point), + color, + 2, + ) else: - cv2.circle(transparent_layer, tuple(track[0]), 5,color, -1) + cv2.circle(transparent_layer, tuple(track[0]), 5, color, -1) transparent_layer = Image.fromarray(transparent_layer.astype(np.uint8)) trajectory_map = Image.alpha_composite(transparent_background, transparent_layer) return {tracking_points_var: tracking_points, input_image: trajectory_map} -block = gr.Blocks( - theme=gr.themes.Soft( - radius_size=gr.themes.sizes.radius_none, - text_size=gr.themes.sizes.text_md - ) - ) +def load_example(drag_mode, examples_type): + example_image_path = IMAGE_PATH[examples_type] + with open(POINTS[examples_type]) as f: + tracking_points = json.load(f) + tracking_points = np.round(tracking_points).astype(int).tolist() + trajectory_map, first_frame_path = preprocess_example_image(example_image_path, tracking_points, drag_mode) + return {input_image: trajectory_map, first_frame_path_var: first_frame_path, tracking_points_var: tracking_points} + + +device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") +ImageConductor_net = ImageConductor( + device=device, + unet_path=unet_path, + image_controlnet_path=image_controlnet_path, + flow_controlnet_path=flow_controlnet_path, + height=256, + width=384, + model_length=16, +) + +block = gr.Blocks(theme=gr.themes.Soft(radius_size=gr.themes.sizes.radius_none, text_size=gr.themes.sizes.text_md)) with block: with gr.Row(): with gr.Column(): @@ -557,66 +637,58 @@ with block: with gr.Accordion(label="🛠️ Instructions:", open=True, elem_id="accordion"): with gr.Row(equal_height=True): - gr.Markdown(instructions) - - - # device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - device = torch.device("cuda") - unet_path = 'models/unet.ckpt' - image_controlnet_path = 'models/image_controlnet.ckpt' - flow_controlnet_path = 'models/flow_controlnet.ckpt' - ImageConductor_net = ImageConductor(device=device, - unet_path=unet_path, - image_controlnet_path=image_controlnet_path, - flow_controlnet_path=flow_controlnet_path, - height=256, - width=384, - model_length=16 - ) - first_frame_path_var = gr.State(value=None) + gr.Markdown(instructions) + + first_frame_path_var = gr.State() tracking_points_var = gr.State([]) with gr.Row(): with gr.Column(scale=1): - image_upload_button = gr.UploadButton(label="Upload Image",file_types=["image"]) + image_upload_button = gr.UploadButton(label="Upload Image", file_types=["image"]) add_drag_button = gr.Button(value="Add Drag") reset_button = gr.Button(value="Reset") delete_last_drag_button = gr.Button(value="Delete last drag") delete_last_step_button = gr.Button(value="Delete last step") - - with gr.Column(scale=7): with gr.Row(): with gr.Column(scale=6): - input_image = gr.Image(label="Input Image", - interactive=True, - height=300, - width=384,) + input_image = gr.Image( + label="Input Image", + interactive=True, + height=300, + width=384, + ) with gr.Column(scale=6): - output_image = gr.Image(label="Motion Path", - interactive=False, - height=256, - width=384,) + output_image = gr.Image( + label="Motion Path", + interactive=False, + height=256, + width=384, + ) with gr.Row(): with gr.Column(scale=1): - prompt = gr.Textbox(value="a wonderful elf.", label="Prompt (highly-recommended)", interactive=True, visible=True) + prompt = gr.Textbox( + value="a wonderful elf.", + label="Prompt (highly-recommended)", + interactive=True, + visible=True, + ) negative_prompt = gr.Text( - label="Negative Prompt", - max_lines=5, - placeholder="Please input your negative prompt", - value='worst quality, low quality, letterboxed',lines=1 - ) - drag_mode = gr.Radio(['camera', 'object'], label='Drag mode: ', value='object', scale=2) + label="Negative Prompt", + max_lines=5, + placeholder="Please input your negative prompt", + value="worst quality, low quality, letterboxed", + lines=1, + ) + drag_mode = gr.Radio(["camera", "object"], label="Drag mode: ", value="object", scale=2) run_button = gr.Button(value="Run") with gr.Accordion("More input params", open=False, elem_id="accordion1"): with gr.Group(): - seed = gr.Textbox( - label="Seed: ", value=561793204, - ) + seed = gr.Textbox(label="Seed: ", value=561793204) randomize_seed = gr.Checkbox(label="Randomize seed", value=False) - + with gr.Group(): with gr.Row(): guidance_scale = gr.Slider( @@ -633,24 +705,15 @@ with block: step=1, value=25, ) - + with gr.Group(): - personalized = gr.Dropdown(label="Personalized", choices=["", 'HelloObject', 'TUSUN'], value="") - examples_type = gr.Textbox(label="Examples Type (Ignore) ", value="", visible=False) + personalized = gr.Dropdown(label="Personalized", choices=["", "HelloObject", "TUSUN"], value="") + examples_type = gr.Textbox(label="Examples Type (Ignore) ", value="", visible=False) with gr.Column(scale=7): - output_video = gr.Video( - label="Output Video", - width=384, - height=256) - # output_video = gr.Image(label="Output Video", - # height=256, - # width=384,) - - - with gr.Row(): - + output_video = gr.Video(label="Output Video", width=384, height=256) + with gr.Row(): example = gr.Examples( label="Input Example", examples=image_examples, @@ -658,26 +721,65 @@ with block: examples_per_page=10, cache_examples=False, ) - - + with gr.Row(): gr.Markdown(citation) - - image_upload_button.upload(preprocess_image, [image_upload_button, tracking_points_var], [input_image, first_frame_path_var, tracking_points_var, personalized]) + image_upload_button.upload( + preprocess_image, + [image_upload_button, tracking_points_var], + [input_image, first_frame_path_var, tracking_points_var, personalized], + ) add_drag_button.click(add_drag, tracking_points_var, tracking_points_var) - delete_last_drag_button.click(delete_last_drag, [tracking_points_var, first_frame_path_var, drag_mode], [tracking_points_var, input_image]) - - delete_last_step_button.click(delete_last_step, [tracking_points_var, first_frame_path_var, drag_mode], [tracking_points_var, input_image]) - - reset_button.click(reset_states, [first_frame_path_var, tracking_points_var], [input_image, first_frame_path_var, tracking_points_var]) - - input_image.select(add_tracking_points, [tracking_points_var, first_frame_path_var, drag_mode], [tracking_points_var, input_image]) - - run_button.click(ImageConductor_net.run, [first_frame_path_var, tracking_points_var, prompt, drag_mode, - negative_prompt, seed, randomize_seed, guidance_scale, num_inference_steps, personalized, examples_type], - [output_image, output_video]) + delete_last_drag_button.click( + delete_last_drag, + [tracking_points_var, first_frame_path_var, drag_mode], + [tracking_points_var, input_image], + ) + + delete_last_step_button.click( + delete_last_step, + [tracking_points_var, first_frame_path_var, drag_mode], + [tracking_points_var, input_image], + ) + + reset_button.click( + reset_states, + [first_frame_path_var, tracking_points_var], + [input_image, first_frame_path_var, tracking_points_var], + ) + + input_image.select( + add_tracking_points, + [tracking_points_var, first_frame_path_var, drag_mode], + [tracking_points_var, input_image], + ) + + run_button.click( + ImageConductor_net.run, + [ + first_frame_path_var, + tracking_points_var, + prompt, + drag_mode, + negative_prompt, + seed, + randomize_seed, + guidance_scale, + num_inference_steps, + personalized, + ], + [output_image, output_video], + ) + + examples_type.change( + fn=load_example, + inputs=[drag_mode, examples_type], + outputs=[input_image, first_frame_path_var, tracking_points_var], + api_name=False, + queue=False, + ) block.queue().launch() diff --git a/configs/.DS_Store b/configs/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 Binary files a/configs/.DS_Store and /dev/null differ diff --git a/models/.DS_Store b/models/.DS_Store deleted file mode 100644 index 71a30893823876826ad4987d58e9ce4b41291cea..0000000000000000000000000000000000000000 Binary files a/models/.DS_Store and /dev/null differ diff --git a/modules/__pycache__/attention.cpython-310.pyc b/modules/__pycache__/attention.cpython-310.pyc deleted file mode 100644 index 95b780a82fdc5cdeafc7e567c3a86d4a43efe2ab..0000000000000000000000000000000000000000 Binary files a/modules/__pycache__/attention.cpython-310.pyc and /dev/null differ diff --git a/modules/__pycache__/flow_controlnet.cpython-310.pyc b/modules/__pycache__/flow_controlnet.cpython-310.pyc deleted file mode 100644 index c35d216cd0d7baef8196beb2c2446afa2eb20e2c..0000000000000000000000000000000000000000 Binary files a/modules/__pycache__/flow_controlnet.cpython-310.pyc and /dev/null differ diff --git a/modules/__pycache__/image_controlnet.cpython-310.pyc b/modules/__pycache__/image_controlnet.cpython-310.pyc deleted file mode 100644 index 6dd72a42170af585e9828058717f46e2abd0c5ce..0000000000000000000000000000000000000000 Binary files a/modules/__pycache__/image_controlnet.cpython-310.pyc and /dev/null differ diff --git a/modules/__pycache__/motion_module.cpython-310.pyc b/modules/__pycache__/motion_module.cpython-310.pyc deleted file mode 100644 index 02d724ed29e82bf76b28920ea94f9327127dd4e2..0000000000000000000000000000000000000000 Binary files a/modules/__pycache__/motion_module.cpython-310.pyc and /dev/null differ diff --git a/modules/__pycache__/resnet.cpython-310.pyc b/modules/__pycache__/resnet.cpython-310.pyc deleted file mode 100644 index 1b875ec156c21364e2018dd36c2bf05af2909104..0000000000000000000000000000000000000000 Binary files a/modules/__pycache__/resnet.cpython-310.pyc and /dev/null differ diff --git a/modules/__pycache__/unet.cpython-310.pyc b/modules/__pycache__/unet.cpython-310.pyc deleted file mode 100644 index d777c33c68db0a708e791a9f7024e9994148cc59..0000000000000000000000000000000000000000 Binary files a/modules/__pycache__/unet.cpython-310.pyc and /dev/null differ diff --git a/modules/__pycache__/unet_blocks.cpython-310.pyc b/modules/__pycache__/unet_blocks.cpython-310.pyc deleted file mode 100644 index c2abde7953e12c1b9ffbed93ba9d69a90f256bd3..0000000000000000000000000000000000000000 Binary files a/modules/__pycache__/unet_blocks.cpython-310.pyc and /dev/null differ diff --git a/peft/__pycache__/__init__.cpython-310.pyc b/peft/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7519f40da950b0df55e6d8ab2df9a18221f646d1..0000000000000000000000000000000000000000 Binary files a/peft/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/__pycache__/auto.cpython-310.pyc b/peft/__pycache__/auto.cpython-310.pyc deleted file mode 100644 index a48eb8ac284e9b03f0ae25bafb43fc79f89bb62c..0000000000000000000000000000000000000000 Binary files a/peft/__pycache__/auto.cpython-310.pyc and /dev/null differ diff --git a/peft/__pycache__/config.cpython-310.pyc b/peft/__pycache__/config.cpython-310.pyc deleted file mode 100644 index e99f10e8e6c7b24206a42261d9ec24fd2491118d..0000000000000000000000000000000000000000 Binary files a/peft/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/__pycache__/import_utils.cpython-310.pyc b/peft/__pycache__/import_utils.cpython-310.pyc deleted file mode 100644 index 97e87072c428137738955a011d3a9837b637ac69..0000000000000000000000000000000000000000 Binary files a/peft/__pycache__/import_utils.cpython-310.pyc and /dev/null differ diff --git a/peft/__pycache__/mapping.cpython-310.pyc b/peft/__pycache__/mapping.cpython-310.pyc deleted file mode 100644 index 9cb5ce2a074f407bccad18aab353b0455dab5c09..0000000000000000000000000000000000000000 Binary files a/peft/__pycache__/mapping.cpython-310.pyc and /dev/null differ diff --git a/peft/__pycache__/mixed_model.cpython-310.pyc b/peft/__pycache__/mixed_model.cpython-310.pyc deleted file mode 100644 index 70be307653d0608f4d50ed009e1421c006257ec5..0000000000000000000000000000000000000000 Binary files a/peft/__pycache__/mixed_model.cpython-310.pyc and /dev/null differ diff --git a/peft/__pycache__/peft_model.cpython-310.pyc b/peft/__pycache__/peft_model.cpython-310.pyc deleted file mode 100644 index 973f687480ed9ffb5e31ba346b787e6ff25b27ac..0000000000000000000000000000000000000000 Binary files a/peft/__pycache__/peft_model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/__pycache__/__init__.cpython-310.pyc b/peft/tuners/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 5098f1277fe52e8efd665799040a2005cf2a006f..0000000000000000000000000000000000000000 Binary files a/peft/tuners/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc b/peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc deleted file mode 100644 index e41938019e5dd37b2f3254d5a8ebff0ea8aaa9e0..0000000000000000000000000000000000000000 Binary files a/peft/tuners/__pycache__/lycoris_utils.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc b/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc deleted file mode 100644 index 2a6a0b9a02d7ac8b8938dbf5b3ed05bc3914b58b..0000000000000000000000000000000000000000 Binary files a/peft/tuners/__pycache__/tuners_utils.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc b/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 2c7d3299fbcbd7e7585de0a16fda2527ce97c073..0000000000000000000000000000000000000000 Binary files a/peft/tuners/adalora/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc b/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc deleted file mode 100644 index 21ab08756f2efaf08bf91569fa6c84f7da6931ad..0000000000000000000000000000000000000000 Binary files a/peft/tuners/adalora/__pycache__/bnb.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/adalora/__pycache__/config.cpython-310.pyc b/peft/tuners/adalora/__pycache__/config.cpython-310.pyc deleted file mode 100644 index b3754e90ab061cd911b5736f22b667832701ec75..0000000000000000000000000000000000000000 Binary files a/peft/tuners/adalora/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc b/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc deleted file mode 100644 index f335b6eebf80c203177a76c433e07fa90830f355..0000000000000000000000000000000000000000 Binary files a/peft/tuners/adalora/__pycache__/gptq.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc b/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc deleted file mode 100644 index f6e29b4d5825e2f2d415b3b3c7102b77eb02d8be..0000000000000000000000000000000000000000 Binary files a/peft/tuners/adalora/__pycache__/layer.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/adalora/__pycache__/model.cpython-310.pyc b/peft/tuners/adalora/__pycache__/model.cpython-310.pyc deleted file mode 100644 index 7f2021bb5753a376680ecf6e525756e213ab2008..0000000000000000000000000000000000000000 Binary files a/peft/tuners/adalora/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc b/peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 184e90af87162a9f665e28c4ce774bcf3879c776..0000000000000000000000000000000000000000 Binary files a/peft/tuners/adaption_prompt/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc b/peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc deleted file mode 100644 index 5620960991c740728e640fbaba0b3ee37f715927..0000000000000000000000000000000000000000 Binary files a/peft/tuners/adaption_prompt/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc b/peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc deleted file mode 100644 index fe8e680a78703597eb2451a156f935c0d6c95ba9..0000000000000000000000000000000000000000 Binary files a/peft/tuners/adaption_prompt/__pycache__/layer.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc b/peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc deleted file mode 100644 index eff23c789c1c4ec8306c200348ee2597665cc557..0000000000000000000000000000000000000000 Binary files a/peft/tuners/adaption_prompt/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc b/peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a22d07f40ff74cd01df24cb784a37c2ee8c6fa0b..0000000000000000000000000000000000000000 Binary files a/peft/tuners/adaption_prompt/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/boft/__pycache__/__init__.cpython-310.pyc b/peft/tuners/boft/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index bed15547e805fd2ab33a4f69fe4532967c755e03..0000000000000000000000000000000000000000 Binary files a/peft/tuners/boft/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/boft/__pycache__/config.cpython-310.pyc b/peft/tuners/boft/__pycache__/config.cpython-310.pyc deleted file mode 100644 index f6ec82042dfbdd8e67f0523e2bc9ea38c413d095..0000000000000000000000000000000000000000 Binary files a/peft/tuners/boft/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/boft/__pycache__/layer.cpython-310.pyc b/peft/tuners/boft/__pycache__/layer.cpython-310.pyc deleted file mode 100644 index f9d713c6337c46706eb8bd26b8a98d7be3c27aeb..0000000000000000000000000000000000000000 Binary files a/peft/tuners/boft/__pycache__/layer.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/boft/__pycache__/model.cpython-310.pyc b/peft/tuners/boft/__pycache__/model.cpython-310.pyc deleted file mode 100644 index 6996580a894f7debd0972e5751a797b61c43ea78..0000000000000000000000000000000000000000 Binary files a/peft/tuners/boft/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/boft/fbd/__pycache__/__init__.cpython-310.pyc b/peft/tuners/boft/fbd/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 4700c2c398aeb4ff3698a416b027cbf92c93f50c..0000000000000000000000000000000000000000 Binary files a/peft/tuners/boft/fbd/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc b/peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7cabe4d57af91d6b3d2a94c96772dacc56328cdc..0000000000000000000000000000000000000000 Binary files a/peft/tuners/ia3/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc b/peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc deleted file mode 100644 index ae41aba8b97dab2fcefa5fbb42ef6829f66f950d..0000000000000000000000000000000000000000 Binary files a/peft/tuners/ia3/__pycache__/bnb.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/ia3/__pycache__/config.cpython-310.pyc b/peft/tuners/ia3/__pycache__/config.cpython-310.pyc deleted file mode 100644 index f08a4bf2e09958a999b53be0d1e5a26ee6575d3e..0000000000000000000000000000000000000000 Binary files a/peft/tuners/ia3/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/ia3/__pycache__/layer.cpython-310.pyc b/peft/tuners/ia3/__pycache__/layer.cpython-310.pyc deleted file mode 100644 index e5ea0fd2db957b8e920124aa1d7c26f355e0c79c..0000000000000000000000000000000000000000 Binary files a/peft/tuners/ia3/__pycache__/layer.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/ia3/__pycache__/model.cpython-310.pyc b/peft/tuners/ia3/__pycache__/model.cpython-310.pyc deleted file mode 100644 index db19e5da9ebe37b266d23f3ab7066ddba7882ba2..0000000000000000000000000000000000000000 Binary files a/peft/tuners/ia3/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/ln_tuning/__pycache__/__init__.cpython-310.pyc b/peft/tuners/ln_tuning/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index d0fcffe94d3a9219a294589746659365cb8e295c..0000000000000000000000000000000000000000 Binary files a/peft/tuners/ln_tuning/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/ln_tuning/__pycache__/config.cpython-310.pyc b/peft/tuners/ln_tuning/__pycache__/config.cpython-310.pyc deleted file mode 100644 index 0b823aa03d6464630e53452985c14909b529f480..0000000000000000000000000000000000000000 Binary files a/peft/tuners/ln_tuning/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/ln_tuning/__pycache__/layer.cpython-310.pyc b/peft/tuners/ln_tuning/__pycache__/layer.cpython-310.pyc deleted file mode 100644 index 875ac9422c2dc26b69c13d09200829f56b42d092..0000000000000000000000000000000000000000 Binary files a/peft/tuners/ln_tuning/__pycache__/layer.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/ln_tuning/__pycache__/model.cpython-310.pyc b/peft/tuners/ln_tuning/__pycache__/model.cpython-310.pyc deleted file mode 100644 index db5bd3931933c11436191e35203fbd8aff28c035..0000000000000000000000000000000000000000 Binary files a/peft/tuners/ln_tuning/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/loha/__pycache__/__init__.cpython-310.pyc b/peft/tuners/loha/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 38f9e7b827d56cdeaeddd2063d4aad14c17dd330..0000000000000000000000000000000000000000 Binary files a/peft/tuners/loha/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/loha/__pycache__/config.cpython-310.pyc b/peft/tuners/loha/__pycache__/config.cpython-310.pyc deleted file mode 100644 index 8a0e96c2544adb484e03dfb5261aff7d09196135..0000000000000000000000000000000000000000 Binary files a/peft/tuners/loha/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/loha/__pycache__/layer.cpython-310.pyc b/peft/tuners/loha/__pycache__/layer.cpython-310.pyc deleted file mode 100644 index 38d21cd45886c8a6b64bb74180f7dd283bbcfc11..0000000000000000000000000000000000000000 Binary files a/peft/tuners/loha/__pycache__/layer.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/loha/__pycache__/model.cpython-310.pyc b/peft/tuners/loha/__pycache__/model.cpython-310.pyc deleted file mode 100644 index 1a6946f6fb2c3971c0f5a6b3782b5d92380726cb..0000000000000000000000000000000000000000 Binary files a/peft/tuners/loha/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc b/peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7b10a867a213b29ff420823e1b1c62d2ab2bc429..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lokr/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lokr/__pycache__/config.cpython-310.pyc b/peft/tuners/lokr/__pycache__/config.cpython-310.pyc deleted file mode 100644 index 0c9391e426b7d6130279aa192a4472074f06b02e..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lokr/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lokr/__pycache__/layer.cpython-310.pyc b/peft/tuners/lokr/__pycache__/layer.cpython-310.pyc deleted file mode 100644 index f8243263de1845d3e276214e42027fbf341222ee..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lokr/__pycache__/layer.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lokr/__pycache__/model.cpython-310.pyc b/peft/tuners/lokr/__pycache__/model.cpython-310.pyc deleted file mode 100644 index 589c95fb409d59443f4eaf0e65824824527c8b6e..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lokr/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc b/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 43066fb91b9eadf1383ba47951994f20e2d39ebe..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lora/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc b/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc deleted file mode 100644 index 6b59d028a4488cbcae29e3a77b7d661436db436b..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lora/__pycache__/aqlm.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lora/__pycache__/awq.cpython-310.pyc b/peft/tuners/lora/__pycache__/awq.cpython-310.pyc deleted file mode 100644 index 053a6d09147a23d30304bb5fef5197befccd6bcc..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lora/__pycache__/awq.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc b/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc deleted file mode 100644 index b9fd73ad08603f81003f37e15d5ceead1bf4856c..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lora/__pycache__/bnb.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lora/__pycache__/config.cpython-310.pyc b/peft/tuners/lora/__pycache__/config.cpython-310.pyc deleted file mode 100644 index 57c08959f8d6993f9d72d225d1cdd628e1a3db71..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lora/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lora/__pycache__/eetq.cpython-310.pyc b/peft/tuners/lora/__pycache__/eetq.cpython-310.pyc deleted file mode 100644 index 3a06827872f2a6ed7c5a77caedf48bf8a33a8d42..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lora/__pycache__/eetq.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc b/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc deleted file mode 100644 index 9e5810cc1116a71af71f58eb379add607fa68605..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lora/__pycache__/gptq.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lora/__pycache__/hqq.cpython-310.pyc b/peft/tuners/lora/__pycache__/hqq.cpython-310.pyc deleted file mode 100644 index 8177da62bb7d826672a032fbb1a11729e5cfb976..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lora/__pycache__/hqq.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lora/__pycache__/layer.cpython-310.pyc b/peft/tuners/lora/__pycache__/layer.cpython-310.pyc deleted file mode 100644 index fceffa4daff88aa6fd925ae12722183e8e47388b..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lora/__pycache__/layer.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lora/__pycache__/model.cpython-310.pyc b/peft/tuners/lora/__pycache__/model.cpython-310.pyc deleted file mode 100644 index 8c1956d00d5eab0679f5bf25ddbb69720abc4297..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lora/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc b/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc deleted file mode 100644 index 881b5359c888dc05903ae134f1c7213b7b23d374..0000000000000000000000000000000000000000 Binary files a/peft/tuners/lora/__pycache__/tp_layer.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/mixed/__pycache__/__init__.cpython-310.pyc b/peft/tuners/mixed/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 434f0a0afa3e450f9a81fae0c2646177ad31e28b..0000000000000000000000000000000000000000 Binary files a/peft/tuners/mixed/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/mixed/__pycache__/model.cpython-310.pyc b/peft/tuners/mixed/__pycache__/model.cpython-310.pyc deleted file mode 100644 index 6af9320b2768cd109102bfa54778cf2fca1147d3..0000000000000000000000000000000000000000 Binary files a/peft/tuners/mixed/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc b/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 3c70362a684b92008a222000a7b82f3d250b5932..0000000000000000000000000000000000000000 Binary files a/peft/tuners/multitask_prompt_tuning/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc b/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc deleted file mode 100644 index fe3dd0bc44bc0f6d8cd38cea2faec86a9965700b..0000000000000000000000000000000000000000 Binary files a/peft/tuners/multitask_prompt_tuning/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc b/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc deleted file mode 100644 index 0031da8fb1055866a6be8e70886a8cb1df880e5f..0000000000000000000000000000000000000000 Binary files a/peft/tuners/multitask_prompt_tuning/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/oft/__pycache__/__init__.cpython-310.pyc b/peft/tuners/oft/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index dd40ffc99e05acbdce560604801f65856d3e8a55..0000000000000000000000000000000000000000 Binary files a/peft/tuners/oft/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/oft/__pycache__/config.cpython-310.pyc b/peft/tuners/oft/__pycache__/config.cpython-310.pyc deleted file mode 100644 index 067776feff3a353cdd0c9531c68b674918405fb2..0000000000000000000000000000000000000000 Binary files a/peft/tuners/oft/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/oft/__pycache__/layer.cpython-310.pyc b/peft/tuners/oft/__pycache__/layer.cpython-310.pyc deleted file mode 100644 index b6394092daea9cb9c1f27e8a5d3137d86ae1a70c..0000000000000000000000000000000000000000 Binary files a/peft/tuners/oft/__pycache__/layer.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/oft/__pycache__/model.cpython-310.pyc b/peft/tuners/oft/__pycache__/model.cpython-310.pyc deleted file mode 100644 index 4e0bccb0d4a10e525210462ee1be022816ab63f3..0000000000000000000000000000000000000000 Binary files a/peft/tuners/oft/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/p_tuning/__pycache__/__init__.cpython-310.pyc b/peft/tuners/p_tuning/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c1c324a0bf1c8673da68e2a63a8509b0df16c476..0000000000000000000000000000000000000000 Binary files a/peft/tuners/p_tuning/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/p_tuning/__pycache__/config.cpython-310.pyc b/peft/tuners/p_tuning/__pycache__/config.cpython-310.pyc deleted file mode 100644 index a95dc0c3c6ea52c33a2eca8e1be1c28f9ad9bf3a..0000000000000000000000000000000000000000 Binary files a/peft/tuners/p_tuning/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/p_tuning/__pycache__/model.cpython-310.pyc b/peft/tuners/p_tuning/__pycache__/model.cpython-310.pyc deleted file mode 100644 index 334098e141cec92cf1e431fb9e4f2329ed766218..0000000000000000000000000000000000000000 Binary files a/peft/tuners/p_tuning/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/poly/__pycache__/__init__.cpython-310.pyc b/peft/tuners/poly/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index b5cae1fca5c0a71e744b407516795e939543abbd..0000000000000000000000000000000000000000 Binary files a/peft/tuners/poly/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/poly/__pycache__/config.cpython-310.pyc b/peft/tuners/poly/__pycache__/config.cpython-310.pyc deleted file mode 100644 index 6bbd45c0a12c26bebee972bd81825a019a63f517..0000000000000000000000000000000000000000 Binary files a/peft/tuners/poly/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/poly/__pycache__/layer.cpython-310.pyc b/peft/tuners/poly/__pycache__/layer.cpython-310.pyc deleted file mode 100644 index 540089adc2af803e75a0770eba8aadb507aa2298..0000000000000000000000000000000000000000 Binary files a/peft/tuners/poly/__pycache__/layer.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/poly/__pycache__/model.cpython-310.pyc b/peft/tuners/poly/__pycache__/model.cpython-310.pyc deleted file mode 100644 index c04f570b472a334507220c2124eb2ff705505f5a..0000000000000000000000000000000000000000 Binary files a/peft/tuners/poly/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/poly/__pycache__/router.cpython-310.pyc b/peft/tuners/poly/__pycache__/router.cpython-310.pyc deleted file mode 100644 index 8c45e551f21b88093299f44e189b2a82e1cbaa71..0000000000000000000000000000000000000000 Binary files a/peft/tuners/poly/__pycache__/router.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc b/peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 3ce5ca54fd323aa906e2ef6843a3c0099b5efc94..0000000000000000000000000000000000000000 Binary files a/peft/tuners/prefix_tuning/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc b/peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc deleted file mode 100644 index 7065dab677330f2f55a47c7915c1b0960c824d90..0000000000000000000000000000000000000000 Binary files a/peft/tuners/prefix_tuning/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc b/peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc deleted file mode 100644 index 76eb57d16b6ba10c7f816708adee9308832baea4..0000000000000000000000000000000000000000 Binary files a/peft/tuners/prefix_tuning/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc b/peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index fdd772ea0a8b961a34d6b2f7af1d187b537a5739..0000000000000000000000000000000000000000 Binary files a/peft/tuners/prompt_tuning/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc b/peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc deleted file mode 100644 index cc31497e99ce70f7d18b2e18ec17dd62d009e95e..0000000000000000000000000000000000000000 Binary files a/peft/tuners/prompt_tuning/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc b/peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc deleted file mode 100644 index 0096fe7a5c97927a1b124419bdf920fca8aa9840..0000000000000000000000000000000000000000 Binary files a/peft/tuners/prompt_tuning/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/vera/__pycache__/__init__.cpython-310.pyc b/peft/tuners/vera/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index e98bb1fc671c6aee8f8399772dc0a778d63c8d03..0000000000000000000000000000000000000000 Binary files a/peft/tuners/vera/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/vera/__pycache__/buffer_dict.cpython-310.pyc b/peft/tuners/vera/__pycache__/buffer_dict.cpython-310.pyc deleted file mode 100644 index 30ed9f2b7aad142f0f80a3dcc7cd0500ed83f4c8..0000000000000000000000000000000000000000 Binary files a/peft/tuners/vera/__pycache__/buffer_dict.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/vera/__pycache__/config.cpython-310.pyc b/peft/tuners/vera/__pycache__/config.cpython-310.pyc deleted file mode 100644 index 46a6cb46cab34fa5ba742595ce48f4921348b2bb..0000000000000000000000000000000000000000 Binary files a/peft/tuners/vera/__pycache__/config.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/vera/__pycache__/layer.cpython-310.pyc b/peft/tuners/vera/__pycache__/layer.cpython-310.pyc deleted file mode 100644 index 0e80108e0d094dc75bdf37ba20b272c931d6272f..0000000000000000000000000000000000000000 Binary files a/peft/tuners/vera/__pycache__/layer.cpython-310.pyc and /dev/null differ diff --git a/peft/tuners/vera/__pycache__/model.cpython-310.pyc b/peft/tuners/vera/__pycache__/model.cpython-310.pyc deleted file mode 100644 index eb93e9e45be0353a8291755bfe4e5ee395f26eac..0000000000000000000000000000000000000000 Binary files a/peft/tuners/vera/__pycache__/model.cpython-310.pyc and /dev/null differ diff --git a/peft/utils/__pycache__/__init__.cpython-310.pyc b/peft/utils/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 9d6be355c8b5f487c5b51e1b52907443c9d8ed3f..0000000000000000000000000000000000000000 Binary files a/peft/utils/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/peft/utils/__pycache__/constants.cpython-310.pyc b/peft/utils/__pycache__/constants.cpython-310.pyc deleted file mode 100644 index 2c88b9505331e5fdc88bd0f94f3301c5ce07e4d8..0000000000000000000000000000000000000000 Binary files a/peft/utils/__pycache__/constants.cpython-310.pyc and /dev/null differ diff --git a/peft/utils/__pycache__/integrations.cpython-310.pyc b/peft/utils/__pycache__/integrations.cpython-310.pyc deleted file mode 100644 index 22f34689aa7dc1dc47b17de541b76a2c08c42cc3..0000000000000000000000000000000000000000 Binary files a/peft/utils/__pycache__/integrations.cpython-310.pyc and /dev/null differ diff --git a/peft/utils/__pycache__/loftq_utils.cpython-310.pyc b/peft/utils/__pycache__/loftq_utils.cpython-310.pyc deleted file mode 100644 index 40add20f2681b0668f5bbcc8415e8d2046f8820a..0000000000000000000000000000000000000000 Binary files a/peft/utils/__pycache__/loftq_utils.cpython-310.pyc and /dev/null differ diff --git a/peft/utils/__pycache__/merge_utils.cpython-310.pyc b/peft/utils/__pycache__/merge_utils.cpython-310.pyc deleted file mode 100644 index c8be9fd55f3de55880a37922719d27d8f50fe02f..0000000000000000000000000000000000000000 Binary files a/peft/utils/__pycache__/merge_utils.cpython-310.pyc and /dev/null differ diff --git a/peft/utils/__pycache__/other.cpython-310.pyc b/peft/utils/__pycache__/other.cpython-310.pyc deleted file mode 100644 index 7665a0977730ebff9edcb1d3bf5ed2f82aaf243c..0000000000000000000000000000000000000000 Binary files a/peft/utils/__pycache__/other.cpython-310.pyc and /dev/null differ diff --git a/peft/utils/__pycache__/peft_types.cpython-310.pyc b/peft/utils/__pycache__/peft_types.cpython-310.pyc deleted file mode 100644 index 466ea91521aa922489526a91729886436cee5818..0000000000000000000000000000000000000000 Binary files a/peft/utils/__pycache__/peft_types.cpython-310.pyc and /dev/null differ diff --git a/peft/utils/__pycache__/save_and_load.cpython-310.pyc b/peft/utils/__pycache__/save_and_load.cpython-310.pyc deleted file mode 100644 index e7baa51dc5f10e4629c2bce8ecbd49eaf7b6b140..0000000000000000000000000000000000000000 Binary files a/peft/utils/__pycache__/save_and_load.cpython-310.pyc and /dev/null differ diff --git a/pipelines/__pycache__/pipeline_imagecoductor.cpython-310.pyc b/pipelines/__pycache__/pipeline_imagecoductor.cpython-310.pyc deleted file mode 100644 index bfa7a0da04273f420859de00b9956270fa57e1e6..0000000000000000000000000000000000000000 Binary files a/pipelines/__pycache__/pipeline_imagecoductor.cpython-310.pyc and /dev/null differ diff --git a/requirements.txt b/requirements.txt index 9d7deab7dfee802a3d835c766de41f8381a8243a..eb8d8719c4ce3d7864d6580c215262483dc76538 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,29 +1,28 @@ -torch -torchvision -torchaudio -transformers==4.32.1 -gradio==4.38.1 -ftfy -tensorboard -datasets -Pillow==9.5.0 -opencv-python -imgaug accelerate==0.23.0 -image-reward -hpsv2 -torchmetrics -open-clip-torch -clip av2 -peft -imageio-ffmpeg -scipy -tqdm -einops +clip +datasets diffusers==0.28.0 +einops +ftfy +gradio==4.38.1 +hpsv2 +image-reward +imageio-ffmpeg +imgaug +numpy==1.26.2 omegaconf +open-clip-torch +opencv-python +peft +Pillow==9.5.0 scikit-image scikit-learn -numpy==1.26.2 - +scipy +tensorboard +torch +torchaudio +torchmetrics +torchvision +tqdm +transformers==4.32.1 diff --git a/utils/__pycache__/convert_from_ckpt.cpython-310.pyc b/utils/__pycache__/convert_from_ckpt.cpython-310.pyc deleted file mode 100644 index 35988a572bebdcec2da2830e79889488975df453..0000000000000000000000000000000000000000 Binary files a/utils/__pycache__/convert_from_ckpt.cpython-310.pyc and /dev/null differ diff --git a/utils/__pycache__/convert_lora_safetensor_to_diffusers.cpython-310.pyc b/utils/__pycache__/convert_lora_safetensor_to_diffusers.cpython-310.pyc deleted file mode 100644 index 2536faa654441e074aa6a9204301c62d838c1a62..0000000000000000000000000000000000000000 Binary files a/utils/__pycache__/convert_lora_safetensor_to_diffusers.cpython-310.pyc and /dev/null differ diff --git a/utils/__pycache__/gradio_utils.cpython-310.pyc b/utils/__pycache__/gradio_utils.cpython-310.pyc deleted file mode 100644 index 5be6755314c372cc0c0bb37bb65ad18b55d27bab..0000000000000000000000000000000000000000 Binary files a/utils/__pycache__/gradio_utils.cpython-310.pyc and /dev/null differ diff --git a/utils/__pycache__/lora_utils.cpython-310.pyc b/utils/__pycache__/lora_utils.cpython-310.pyc deleted file mode 100644 index 8e4a21a4c4815e943ad916d86d9aef9522422373..0000000000000000000000000000000000000000 Binary files a/utils/__pycache__/lora_utils.cpython-310.pyc and /dev/null differ diff --git a/utils/__pycache__/utils.cpython-310.pyc b/utils/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 3097d1c3d5ecd5a0c7091c30feea10450d844c55..0000000000000000000000000000000000000000 Binary files a/utils/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/utils/__pycache__/visualizer.cpython-310.pyc b/utils/__pycache__/visualizer.cpython-310.pyc deleted file mode 100644 index d25e81cafb1edbab61053bbf2f483cdfe26f3398..0000000000000000000000000000000000000000 Binary files a/utils/__pycache__/visualizer.cpython-310.pyc and /dev/null differ