patrickvonplaten
commited on
Commit
•
a032108
1
Parent(s):
c51632b
further improve
Browse files- run_kandinsky.py +57 -0
- run_local.py +25 -42
run_kandinsky.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
from diffusers import DiffusionPipeline
|
3 |
+
import torch
|
4 |
+
from diffusers.models.attention_processor import AttnAddedKVProcessor2_0, XFormersAttnAddedKVProcessor, AttnAddedKVProcessor
|
5 |
+
|
6 |
+
import time
|
7 |
+
import os
|
8 |
+
from huggingface_hub import HfApi
|
9 |
+
from pathlib import Path
|
10 |
+
|
11 |
+
api = HfApi()
|
12 |
+
prev_time = time.time()
|
13 |
+
|
14 |
+
|
15 |
+
prompt = "a picture of elon musk next to a rocket"
|
16 |
+
negative_prompt = "low quality, ugly"
|
17 |
+
|
18 |
+
pipe_prior = DiffusionPipeline.from_pretrained(
|
19 |
+
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
|
20 |
+
)
|
21 |
+
pipe_prior.to("cuda")
|
22 |
+
t2i_pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
|
23 |
+
t2i_pipe.to("cuda")
|
24 |
+
|
25 |
+
t2i_pipe.unet.set_attn_processor(AttnAddedKVProcessor2_0())
|
26 |
+
t2i_pipe.unet.to(memory_format=torch.channels_last)
|
27 |
+
t2i_pipe.unet = torch.compile(t2i_pipe.unet, mode="reduce-overhead", fullgraph=True)
|
28 |
+
|
29 |
+
next_time = time.time()
|
30 |
+
print("Loading", next_time - prev_time)
|
31 |
+
prev_time = next_time
|
32 |
+
|
33 |
+
generator = torch.Generator(device="cuda").manual_seed(12)
|
34 |
+
image_embeds, negative_image_embeds = pipe_prior(prompt, negative_prompt=negative_prompt, generator=generator).to_tuple()
|
35 |
+
|
36 |
+
next_time = time.time()
|
37 |
+
print("Prior", next_time - prev_time)
|
38 |
+
prev_time = next_time
|
39 |
+
|
40 |
+
for _ in range(3):
|
41 |
+
images = t2i_pipe(prompt, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, negative_prompt=negative_prompt, num_inference_steps=50, generator=generator).images
|
42 |
+
|
43 |
+
next_time = time.time()
|
44 |
+
print("Text-to-image", next_time - prev_time)
|
45 |
+
prev_time = next_time
|
46 |
+
|
47 |
+
for i, image in enumerate(images):
|
48 |
+
path = os.path.join(Path.home(), "images", f"aa_{i}.png")
|
49 |
+
image.save(path)
|
50 |
+
|
51 |
+
api.upload_file(
|
52 |
+
path_or_fileobj=path,
|
53 |
+
path_in_repo=path.split("/")[-1],
|
54 |
+
repo_id="patrickvonplaten/images",
|
55 |
+
repo_type="dataset",
|
56 |
+
)
|
57 |
+
print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa_{i}.png")
|
run_local.py
CHANGED
@@ -1,9 +1,8 @@
|
|
1 |
#!/usr/bin/env python3
|
2 |
-
from diffusers import
|
3 |
import time
|
4 |
import os
|
5 |
from huggingface_hub import HfApi
|
6 |
-
# from compel import Compel
|
7 |
import torch
|
8 |
import sys
|
9 |
from pathlib import Path
|
@@ -11,47 +10,31 @@ import requests
|
|
11 |
from PIL import Image
|
12 |
from io import BytesIO
|
13 |
|
14 |
-
|
|
|
15 |
|
16 |
api = HfApi()
|
17 |
start_time = time.time()
|
18 |
-
|
19 |
-
|
20 |
-
pipe = StableDiffusionControlNetPipeline.from_pretrained(path, torch_dtype=torch.float16, safety_checker=None)
|
21 |
-
pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
22 |
-
|
23 |
-
# compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)
|
24 |
-
|
25 |
-
|
26 |
pipe = pipe.to("cuda")
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
print("
|
46 |
-
|
47 |
-
for i, image in enumerate(images):
|
48 |
-
path = os.path.join(Path.home(), "images", f"aa_{i}.png")
|
49 |
-
image.save(path)
|
50 |
-
|
51 |
-
api.upload_file(
|
52 |
-
path_or_fileobj=path,
|
53 |
-
path_in_repo=path.split("/")[-1],
|
54 |
-
repo_id="patrickvonplaten/images",
|
55 |
-
repo_type="dataset",
|
56 |
-
)
|
57 |
-
print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa_{i}.png")
|
|
|
1 |
#!/usr/bin/env python3
|
2 |
+
from diffusers import StableDiffusionPipeline
|
3 |
import time
|
4 |
import os
|
5 |
from huggingface_hub import HfApi
|
|
|
6 |
import torch
|
7 |
import sys
|
8 |
from pathlib import Path
|
|
|
10 |
from PIL import Image
|
11 |
from io import BytesIO
|
12 |
|
13 |
+
begin = ["a picture of <rickmann>", "a photo of <rickmann>", "<rickmann>", "an image of <rickmann>"]
|
14 |
+
end = ["", " smiling", " with sunglasses", " at the beach", " in front of a mountain", " in beautiful sunshine", " in avatar style", " as a picasso painting", " as an oil painting", " as oil art", " while it snows", " in a forest", " with a nice landscape"]
|
15 |
|
16 |
api = HfApi()
|
17 |
start_time = time.time()
|
18 |
+
path = "patrickvonplaten/papa_out_5"
|
19 |
+
pipe = StableDiffusionPipeline.from_pretrained(path, safety_checker=None, torch_dtype=torch.float16)
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
pipe = pipe.to("cuda")
|
21 |
+
counter = 0
|
22 |
+
|
23 |
+
for b in begin:
|
24 |
+
for e in end:
|
25 |
+
prompt = b + e + ", highly realistic, super resolution, high quality photography, beautiful"
|
26 |
+
|
27 |
+
images = pipe(prompt=prompt, num_images_per_prompt=4, negative_prompt="ugly, bad quality, deformed", num_inference_steps=50).images
|
28 |
+
|
29 |
+
for i, image in enumerate(images):
|
30 |
+
path = os.path.join(Path.home(), "papa", f"{counter}.png")
|
31 |
+
image.save(path)
|
32 |
+
|
33 |
+
api.upload_file(
|
34 |
+
path_or_fileobj=path,
|
35 |
+
path_in_repo=path.split("/")[-1],
|
36 |
+
repo_id="patrickvonplaten/papa",
|
37 |
+
repo_type="dataset",
|
38 |
+
)
|
39 |
+
print(f"https://huggingface.co/datasets/patrickvonplaten/papa/blob/main/{counter}.png")
|
40 |
+
counter += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|