patrickvonplaten
commited on
Commit
•
2acb9f2
1
Parent(s):
269cbe7
up
Browse files- all_branches.txt +0 -0
- log.txt +0 -0
- mass_branches_retrieval.sh +0 -1
- mass_open_controlnet_pr.sh → mass_fp16_branch_pr.sh +2 -2
- model_ids.txt +0 -0
- open_pr_version.py +32 -44
- run_local_img2img_xl.py +46 -0
- run_local_xl.py +46 -0
- run_video.py +12 -0
all_branches.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
log.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
mass_branches_retrieval.sh
CHANGED
@@ -2,6 +2,5 @@
|
|
2 |
touch all_branches.txt
|
3 |
|
4 |
while read p; do
|
5 |
-
echo "-------------------------------" >> all_branches.txt
|
6 |
python check_for_branches.py --model_id ${p} >> all_branches.txt
|
7 |
done <model_ids.txt
|
|
|
2 |
touch all_branches.txt
|
3 |
|
4 |
while read p; do
|
|
|
5 |
python check_for_branches.py --model_id ${p} >> all_branches.txt
|
6 |
done <model_ids.txt
|
mass_open_controlnet_pr.sh → mass_fp16_branch_pr.sh
RENAMED
@@ -2,5 +2,5 @@
|
|
2 |
while read p; do
|
3 |
echo "-------------------------------"
|
4 |
echo "Open PR for $p"
|
5 |
-
python
|
6 |
-
done <
|
|
|
2 |
while read p; do
|
3 |
echo "-------------------------------"
|
4 |
echo "Open PR for $p"
|
5 |
+
python open_pr_version.py $p
|
6 |
+
done <all_branches.txt
|
model_ids.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
open_pr_version.py
CHANGED
@@ -5,6 +5,7 @@ import torch
|
|
5 |
import shutil
|
6 |
from tempfile import TemporaryDirectory
|
7 |
from typing import List, Optional
|
|
|
8 |
|
9 |
from huggingface_hub import CommitInfo, CommitOperationAdd, Discussion, HfApi, hf_hub_download
|
10 |
from huggingface_hub.file_download import repo_folder_name
|
@@ -44,24 +45,27 @@ def is_index_stable_diffusion_like(config_dict):
|
|
44 |
|
45 |
|
46 |
def convert_single(model_id: str, folder: str) -> List["CommitOperationAdd"]:
|
47 |
-
pipe = DiffusionPipeline.from_pretrained(model_id)
|
48 |
|
49 |
try:
|
50 |
pipe.to(torch_dtype=torch.float16)
|
51 |
-
pipe.save_pretrained(
|
52 |
-
pipe.save_pretrained(
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
65 |
|
66 |
def convert_file(
|
67 |
old_config: str,
|
@@ -96,18 +100,7 @@ def previous_pr(api: "HfApi", model_id: str, pr_title: str) -> Optional["Discuss
|
|
96 |
|
97 |
|
98 |
def convert(api: "HfApi", model_id: str, force: bool = False) -> Optional["CommitInfo"]:
|
99 |
-
|
100 |
-
pr_title = "Fix deprecation warning by changing `CLIPFeatureExtractor` to `CLIPImageProcessor`."
|
101 |
-
info = api.model_info(model_id)
|
102 |
-
filenames = set(s.rfilename for s in info.siblings)
|
103 |
-
|
104 |
-
if "model_index.json" not in filenames:
|
105 |
-
print(f"Model: {model_id} has no model_index.json file to change")
|
106 |
-
return
|
107 |
-
|
108 |
-
# if "vae/config.json" not in filenames:
|
109 |
-
# print(f"Model: {model_id} has no 'vae/config.json' file to change")
|
110 |
-
# return
|
111 |
|
112 |
with TemporaryDirectory() as d:
|
113 |
folder = os.path.join(d, repo_folder_name(repo_id=model_id, repo_type="models"))
|
@@ -121,27 +114,22 @@ def convert(api: "HfApi", model_id: str, force: bool = False) -> Optional["Commi
|
|
121 |
new_pr = pr
|
122 |
raise AlreadyExists(f"Model {model_id} already has an open PR check out {url}")
|
123 |
else:
|
124 |
-
operations
|
125 |
|
126 |
if operations:
|
127 |
-
pr_title = pr_title.format(model_type)
|
128 |
-
# if model_type == "Stable Diffusion 1":
|
129 |
-
# sample_size = 64
|
130 |
-
# image_size = 512
|
131 |
-
# elif model_type == "Stable Diffusion 2":
|
132 |
-
# sample_size = 96
|
133 |
-
# image_size = 768
|
134 |
-
|
135 |
-
# pr_description = (
|
136 |
-
# f"Since `diffusers==0.9.0` the width and height is automatically inferred from the `sample_size` attribute of your unet's config. It seems like your diffusion model has the same architecture as {model_type} which means that when using this model, by default an image size of {image_size}x{image_size} should be generated. This in turn means the unet's sample size should be **{sample_size}**. \n\n In order to suppress to update your configuration on the fly and to suppress the deprecation warning added in this PR: https://github.com/huggingface/diffusers/pull/1406/files#r1035703505 it is strongly recommended to merge this PR."
|
137 |
-
# )
|
138 |
contributor = model_id.split("/")[0]
|
139 |
pr_description = (
|
140 |
-
f"Hey {contributor} 👋, \n\n Your model repository seems to contain
|
141 |
-
"
|
142 |
-
|
143 |
-
"This PR makes sure that the
|
144 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
)
|
146 |
new_pr = api.create_commit(
|
147 |
repo_id=model_id,
|
|
|
5 |
import shutil
|
6 |
from tempfile import TemporaryDirectory
|
7 |
from typing import List, Optional
|
8 |
+
from diffusers import DiffusionPipeline
|
9 |
|
10 |
from huggingface_hub import CommitInfo, CommitOperationAdd, Discussion, HfApi, hf_hub_download
|
11 |
from huggingface_hub.file_download import repo_folder_name
|
|
|
45 |
|
46 |
|
47 |
def convert_single(model_id: str, folder: str) -> List["CommitOperationAdd"]:
|
48 |
+
pipe = DiffusionPipeline.from_pretrained(model_id, cache_dir="/home/patrick/cache_to_delete")
|
49 |
|
50 |
try:
|
51 |
pipe.to(torch_dtype=torch.float16)
|
52 |
+
pipe.save_pretrained(folder, variant="fp16")
|
53 |
+
pipe.save_pretrained(folder, variant="fp16", safe_serialization=True)
|
54 |
+
|
55 |
+
all_files = []
|
56 |
+
def find_files_in_dir(directory):
|
57 |
+
for root, dirs, files in os.walk(directory):
|
58 |
+
for file in files:
|
59 |
+
all_files.append(os.path.join(root, file))
|
60 |
+
|
61 |
+
find_files_in_dir(folder)
|
62 |
+
files = [f for f in all_files if ".fp16." in f]
|
63 |
+
|
64 |
+
operations = [CommitOperationAdd(path_in_repo='/'.join(f.split("/")[-2:]), path_or_fileobj=f) for f in files]
|
65 |
+
return operations
|
66 |
+
except Exception as e:
|
67 |
+
print(e)
|
68 |
+
return False
|
69 |
|
70 |
def convert_file(
|
71 |
old_config: str,
|
|
|
100 |
|
101 |
|
102 |
def convert(api: "HfApi", model_id: str, force: bool = False) -> Optional["CommitInfo"]:
|
103 |
+
pr_title = "Fix deprecated float16/fp16 variant loading through new `version` API."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
with TemporaryDirectory() as d:
|
106 |
folder = os.path.join(d, repo_folder_name(repo_id=model_id, repo_type="models"))
|
|
|
114 |
new_pr = pr
|
115 |
raise AlreadyExists(f"Model {model_id} already has an open PR check out {url}")
|
116 |
else:
|
117 |
+
operations = convert_single(model_id, folder)
|
118 |
|
119 |
if operations:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
contributor = model_id.split("/")[0]
|
121 |
pr_description = (
|
122 |
+
f"Hey {contributor} 👋, \n\n Your model repository seems to contain a [`fp16` branch](https://huggingface.co/{model_id}/tree/fp16) to load the model in float16 precision. "
|
123 |
+
"Loading `fp16` versions from a branch instead of the main branch is deprecated and will eventually be forbidden. "
|
124 |
+
"Instead, we strongly recommend to save `fp16` versions of the model under `.fp16.` version files directly on the 'main' branch as enabled through this PR."
|
125 |
+
f"This PR makes sure that your model repository allows the user to correctly download float16 precision model weights by adding `fp16` model weights in both safetensors and PyTorch bin format:"
|
126 |
+
"\n\n"
|
127 |
+
"```py\n"
|
128 |
+
f"pipe = DiffusionPipeline.from_pretrained({model_id}, torch_dtype=torch.float16, variant='fp16')"
|
129 |
+
"\n```"
|
130 |
+
"\n\n"
|
131 |
+
"For more information please have a look at: https://huggingface.co/docs/diffusers/using-diffusers/loading#checkpoint-variants."
|
132 |
+
"\nWe made sure you that you can safely merge this pull request. \n\n Best, the 🧨 Diffusers team."
|
133 |
)
|
134 |
new_pr = api.create_commit(
|
135 |
repo_id=model_id,
|
run_local_img2img_xl.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
from diffusers import DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionPipeline, KDPM2DiscreteScheduler, StableDiffusionImg2ImgPipeline, HeunDiscreteScheduler, KDPM2AncestralDiscreteScheduler, DDIMScheduler
|
3 |
+
import time
|
4 |
+
import os
|
5 |
+
from huggingface_hub import HfApi
|
6 |
+
# from compel import Compel
|
7 |
+
import torch
|
8 |
+
import sys
|
9 |
+
from pathlib import Path
|
10 |
+
import requests
|
11 |
+
from PIL import Image
|
12 |
+
from io import BytesIO
|
13 |
+
|
14 |
+
path = sys.argv[1]
|
15 |
+
|
16 |
+
api = HfApi()
|
17 |
+
start_time = time.time()
|
18 |
+
pipe = DiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
|
19 |
+
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
20 |
+
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
21 |
+
# pipe = StableDiffusionImg2ImgPipeline.from_pretrained(path, torch_dtype=torch.float16, safety_checker=None
|
22 |
+
|
23 |
+
# compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)
|
24 |
+
|
25 |
+
|
26 |
+
pipe = pipe.to("cuda")
|
27 |
+
|
28 |
+
prompt = "Elon Musk riding a green horse on Mars"
|
29 |
+
|
30 |
+
# pipe.unet.to(memory_format=torch.channels_last)
|
31 |
+
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
32 |
+
pipe(prompt=prompt, num_inference_steps=2).images[0]
|
33 |
+
|
34 |
+
image = pipe(prompt=prompt).images[0]
|
35 |
+
|
36 |
+
file_name = f"aaa"
|
37 |
+
path = os.path.join(Path.home(), "images", f"{file_name}.png")
|
38 |
+
image.save(path)
|
39 |
+
|
40 |
+
api.upload_file(
|
41 |
+
path_or_fileobj=path,
|
42 |
+
path_in_repo=path.split("/")[-1],
|
43 |
+
repo_id="patrickvonplaten/images",
|
44 |
+
repo_type="dataset",
|
45 |
+
)
|
46 |
+
print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/{file_name}.png")
|
run_local_xl.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
from diffusers import DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionPipeline, KDPM2DiscreteScheduler, StableDiffusionImg2ImgPipeline, HeunDiscreteScheduler, KDPM2AncestralDiscreteScheduler, DDIMScheduler
|
3 |
+
import time
|
4 |
+
import os
|
5 |
+
from huggingface_hub import HfApi
|
6 |
+
# from compel import Compel
|
7 |
+
import torch
|
8 |
+
import sys
|
9 |
+
from pathlib import Path
|
10 |
+
import requests
|
11 |
+
from PIL import Image
|
12 |
+
from io import BytesIO
|
13 |
+
|
14 |
+
path = sys.argv[1]
|
15 |
+
|
16 |
+
api = HfApi()
|
17 |
+
start_time = time.time()
|
18 |
+
pipe = DiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
|
19 |
+
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
20 |
+
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
21 |
+
# pipe = StableDiffusionImg2ImgPipeline.from_pretrained(path, torch_dtype=torch.float16, safety_checker=None
|
22 |
+
|
23 |
+
# compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)
|
24 |
+
|
25 |
+
|
26 |
+
pipe = pipe.to("cuda")
|
27 |
+
|
28 |
+
prompt = "Elon Musk riding a green horse on Mars"
|
29 |
+
|
30 |
+
# pipe.unet.to(memory_format=torch.channels_last)
|
31 |
+
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
32 |
+
pipe(prompt=prompt, num_inference_steps=2).images[0]
|
33 |
+
|
34 |
+
image = pipe(prompt=prompt).images[0]
|
35 |
+
|
36 |
+
file_name = f"aaa"
|
37 |
+
path = os.path.join(Path.home(), "images", f"{file_name}.png")
|
38 |
+
image.save(path)
|
39 |
+
|
40 |
+
api.upload_file(
|
41 |
+
path_or_fileobj=path,
|
42 |
+
path_in_repo=path.split("/")[-1],
|
43 |
+
repo_id="patrickvonplaten/images",
|
44 |
+
repo_type="dataset",
|
45 |
+
)
|
46 |
+
print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/{file_name}.png")
|
run_video.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import torch
|
3 |
+
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
4 |
+
from diffusers.utils import export_to_video
|
5 |
+
|
6 |
+
pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
|
7 |
+
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
8 |
+
pipe.enable_model_cpu_offload()
|
9 |
+
|
10 |
+
prompt = "Darth Vader is surfing on waves"
|
11 |
+
video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
|
12 |
+
video_path = export_to_video(video_frames, output_video_path="/home/patrick/videos/video_10.mp4")
|