patrickvonplaten
commited on
Commit
•
b88c59f
1
Parent(s):
e58dd86
up
Browse files- control_net_canny.py +11 -3
- control_net_inpaint.py +55 -0
- control_net_lineart.py +50 -0
- control_net_lineart_anime.py +52 -0
- control_net_normalbae.py +49 -0
- control_net_open_pose.py +4 -4
- control_net_pix2pix.py +45 -0
- control_net_shuffle.py +49 -0
- control_net_soft_edge.py +50 -0
- run_local.py +22 -12
control_net_canny.py
CHANGED
@@ -4,7 +4,9 @@ import os
|
|
4 |
from huggingface_hub import HfApi
|
5 |
from pathlib import Path
|
6 |
from diffusers.utils import load_image
|
7 |
-
|
|
|
|
|
8 |
|
9 |
from diffusers import (
|
10 |
ControlNetModel,
|
@@ -18,9 +20,15 @@ checkpoint = sys.argv[1]
|
|
18 |
image = load_image(
|
19 |
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png"
|
20 |
)
|
|
|
21 |
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
|
26 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
|
|
4 |
from huggingface_hub import HfApi
|
5 |
from pathlib import Path
|
6 |
from diffusers.utils import load_image
|
7 |
+
import cv2
|
8 |
+
from PIL import Image
|
9 |
+
import numpy as np
|
10 |
|
11 |
from diffusers import (
|
12 |
ControlNetModel,
|
|
|
20 |
image = load_image(
|
21 |
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png"
|
22 |
)
|
23 |
+
image = np.array(image)
|
24 |
|
25 |
+
low_threshold = 100
|
26 |
+
high_threshold = 200
|
27 |
+
|
28 |
+
image = cv2.Canny(image, low_threshold, high_threshold)
|
29 |
+
image = image[:, :, None]
|
30 |
+
image = np.concatenate([image, image, image], axis=2)
|
31 |
+
canny_image = Image.fromarray(image)
|
32 |
|
33 |
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
|
34 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
control_net_inpaint.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
from huggingface_hub import HfApi
|
5 |
+
from pathlib import Path
|
6 |
+
from diffusers.utils import load_image
|
7 |
+
from PIL import Image
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
from diffusers import (
|
11 |
+
ControlNetModel,
|
12 |
+
StableDiffusionControlNetPipeline,
|
13 |
+
DDIMScheduler,
|
14 |
+
)
|
15 |
+
import sys
|
16 |
+
|
17 |
+
checkpoint = sys.argv[1]
|
18 |
+
|
19 |
+
|
20 |
+
# pre-process image and mask
|
21 |
+
image = load_image("https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png").convert('RGB')
|
22 |
+
mask_image = load_image("https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png").convert("L")
|
23 |
+
|
24 |
+
# convert to float32
|
25 |
+
image = np.asarray(image, dtype=np.float32)
|
26 |
+
mask_image = np.asarray(mask_image, dtype=np.float32)
|
27 |
+
|
28 |
+
image[mask_image > 127] = -255.0
|
29 |
+
image = torch.from_numpy(image)[None].permute(0, 3, 1, 2) / 255.0
|
30 |
+
|
31 |
+
prompt = "A blue cat sitting on a park bench"
|
32 |
+
|
33 |
+
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
|
34 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
35 |
+
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
36 |
+
)
|
37 |
+
|
38 |
+
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
39 |
+
pipe.enable_model_cpu_offload()
|
40 |
+
|
41 |
+
generator = torch.manual_seed(0)
|
42 |
+
out_image = pipe(prompt, num_inference_steps=20, generator=generator, image=image, guidance_scale=9.0).images[0]
|
43 |
+
|
44 |
+
path = os.path.join(Path.home(), "images", "aa.png")
|
45 |
+
out_image.save(path)
|
46 |
+
|
47 |
+
api = HfApi()
|
48 |
+
|
49 |
+
api.upload_file(
|
50 |
+
path_or_fileobj=path,
|
51 |
+
path_in_repo=path.split("/")[-1],
|
52 |
+
repo_id="patrickvonplaten/images",
|
53 |
+
repo_type="dataset",
|
54 |
+
)
|
55 |
+
print("https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa.png")
|
control_net_lineart.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
from huggingface_hub import HfApi
|
5 |
+
from pathlib import Path
|
6 |
+
from diffusers.utils import load_image
|
7 |
+
from controlnet_aux import LineartDetector
|
8 |
+
|
9 |
+
from diffusers import (
|
10 |
+
ControlNetModel,
|
11 |
+
StableDiffusionControlNetPipeline,
|
12 |
+
UniPCMultistepScheduler,
|
13 |
+
)
|
14 |
+
import sys
|
15 |
+
|
16 |
+
checkpoint = sys.argv[1]
|
17 |
+
|
18 |
+
url = "https://github.com/lllyasviel/ControlNet-v1-1-nightly/raw/main/test_imgs/bag.png"
|
19 |
+
url = "https://github.com/lllyasviel/ControlNet-v1-1-nightly/raw/main/test_imgs/person_1.jpeg"
|
20 |
+
image = load_image(url)
|
21 |
+
|
22 |
+
prompt = "michael jackson concert"
|
23 |
+
|
24 |
+
processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
25 |
+
image = processor(image)
|
26 |
+
image.save("/home/patrick/images/check.png")
|
27 |
+
|
28 |
+
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
|
29 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
30 |
+
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
31 |
+
)
|
32 |
+
|
33 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
34 |
+
pipe.enable_model_cpu_offload()
|
35 |
+
|
36 |
+
generator = torch.manual_seed(0)
|
37 |
+
out_image = pipe(prompt, num_inference_steps=30, generator=generator, image=image).images[0]
|
38 |
+
|
39 |
+
path = os.path.join(Path.home(), "images", "aa.png")
|
40 |
+
out_image.save(path)
|
41 |
+
|
42 |
+
api = HfApi()
|
43 |
+
|
44 |
+
api.upload_file(
|
45 |
+
path_or_fileobj=path,
|
46 |
+
path_in_repo=path.split("/")[-1],
|
47 |
+
repo_id="patrickvonplaten/images",
|
48 |
+
repo_type="dataset",
|
49 |
+
)
|
50 |
+
print("https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa.png")
|
control_net_lineart_anime.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
from huggingface_hub import HfApi
|
5 |
+
from pathlib import Path
|
6 |
+
from diffusers.utils import load_image
|
7 |
+
from controlnet_aux import LineartAnimeDetector
|
8 |
+
from transformers import CLIPTextModel
|
9 |
+
|
10 |
+
from diffusers import (
|
11 |
+
ControlNetModel,
|
12 |
+
StableDiffusionControlNetPipeline,
|
13 |
+
UniPCMultistepScheduler,
|
14 |
+
)
|
15 |
+
import sys
|
16 |
+
|
17 |
+
checkpoint = sys.argv[1]
|
18 |
+
|
19 |
+
url = "https://static.wikia.nocookie.net/unanything/images/a/a0/Unnamed.png/revision/latest/scale-to-width-down/350?cb=20230326002111"
|
20 |
+
image = load_image(url)
|
21 |
+
|
22 |
+
prompt = "warrior girl"
|
23 |
+
|
24 |
+
processor = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
|
25 |
+
image = processor(image)
|
26 |
+
image.save("/home/patrick/images/check.png")
|
27 |
+
|
28 |
+
text_encoder = CLIPTextModel.from_pretrained("Linaqruf/anything-v3.0", subfolder="text_encoder", num_hidden_layers=11, torch_dtype=torch.float16)
|
29 |
+
|
30 |
+
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
|
31 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
32 |
+
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, text_encoder=text_encoder, torch_dtype=torch.float16
|
33 |
+
)
|
34 |
+
|
35 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
36 |
+
pipe.enable_model_cpu_offload()
|
37 |
+
|
38 |
+
generator = torch.manual_seed(33)
|
39 |
+
out_image = pipe(prompt, num_inference_steps=25, generator=generator, image=image).images[0]
|
40 |
+
|
41 |
+
path = os.path.join(Path.home(), "images", "aa.png")
|
42 |
+
out_image.save(path)
|
43 |
+
|
44 |
+
api = HfApi()
|
45 |
+
|
46 |
+
api.upload_file(
|
47 |
+
path_or_fileobj=path,
|
48 |
+
path_in_repo=path.split("/")[-1],
|
49 |
+
repo_id="patrickvonplaten/images",
|
50 |
+
repo_type="dataset",
|
51 |
+
)
|
52 |
+
print("https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa.png")
|
control_net_normalbae.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
from huggingface_hub import HfApi
|
5 |
+
from pathlib import Path
|
6 |
+
from diffusers.utils import load_image
|
7 |
+
from controlnet_aux import NormalBaeDetector
|
8 |
+
|
9 |
+
from diffusers import (
|
10 |
+
ControlNetModel,
|
11 |
+
StableDiffusionControlNetPipeline,
|
12 |
+
UniPCMultistepScheduler,
|
13 |
+
)
|
14 |
+
import sys
|
15 |
+
|
16 |
+
checkpoint = sys.argv[1]
|
17 |
+
|
18 |
+
url = "https://github.com/lllyasviel/ControlNet-v1-1-nightly/raw/main/test_imgs/person-leaves.png"
|
19 |
+
image = load_image(url)
|
20 |
+
|
21 |
+
prompt = "A head full of roses"
|
22 |
+
|
23 |
+
|
24 |
+
processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
|
25 |
+
image = processor(image)
|
26 |
+
|
27 |
+
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
|
28 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
29 |
+
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
30 |
+
)
|
31 |
+
|
32 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
33 |
+
pipe.enable_model_cpu_offload()
|
34 |
+
|
35 |
+
generator = torch.manual_seed(33)
|
36 |
+
out_image = pipe(prompt, num_inference_steps=20, generator=generator, image=image).images[0]
|
37 |
+
|
38 |
+
path = os.path.join(Path.home(), "images", "aa.png")
|
39 |
+
out_image.save(path)
|
40 |
+
|
41 |
+
api = HfApi()
|
42 |
+
|
43 |
+
api.upload_file(
|
44 |
+
path_or_fileobj=path,
|
45 |
+
path_in_repo=path.split("/")[-1],
|
46 |
+
repo_id="patrickvonplaten/images",
|
47 |
+
repo_type="dataset",
|
48 |
+
)
|
49 |
+
print("https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa.png")
|
control_net_open_pose.py
CHANGED
@@ -15,12 +15,12 @@ import sys
|
|
15 |
|
16 |
checkpoint = sys.argv[1]
|
17 |
|
18 |
-
image = load_image("https://
|
19 |
-
prompt = "
|
20 |
|
21 |
|
22 |
openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
|
23 |
-
image = openpose(image)
|
24 |
|
25 |
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
|
26 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
@@ -31,7 +31,7 @@ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
|
31 |
pipe.enable_model_cpu_offload()
|
32 |
|
33 |
generator = torch.manual_seed(33)
|
34 |
-
out_image = pipe(prompt, num_inference_steps=
|
35 |
|
36 |
path = os.path.join(Path.home(), "images", "aa.png")
|
37 |
out_image.save(path)
|
|
|
15 |
|
16 |
checkpoint = sys.argv[1]
|
17 |
|
18 |
+
image = load_image("https://github.com/lllyasviel/ControlNet-v1-1-nightly/raw/main/test_imgs/demo.jpg").resize((512, 512))
|
19 |
+
prompt = "The pope with sunglasses rapping with a mic"
|
20 |
|
21 |
|
22 |
openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
|
23 |
+
image = openpose(image, hand_and_face=True)
|
24 |
|
25 |
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
|
26 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
|
|
31 |
pipe.enable_model_cpu_offload()
|
32 |
|
33 |
generator = torch.manual_seed(33)
|
34 |
+
out_image = pipe(prompt, num_inference_steps=35, generator=generator, image=image).images[0]
|
35 |
|
36 |
path = os.path.join(Path.home(), "images", "aa.png")
|
37 |
out_image.save(path)
|
control_net_pix2pix.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
from huggingface_hub import HfApi
|
5 |
+
from pathlib import Path
|
6 |
+
from diffusers.utils import load_image
|
7 |
+
from PIL import Image
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
from diffusers import (
|
11 |
+
ControlNetModel,
|
12 |
+
StableDiffusionControlNetPipeline,
|
13 |
+
UniPCMultistepScheduler,
|
14 |
+
)
|
15 |
+
import sys
|
16 |
+
|
17 |
+
checkpoint = sys.argv[1]
|
18 |
+
|
19 |
+
image = load_image("https://huggingface.co/lllyasviel/sd-controlnet-seg/resolve/main/images/house.png").convert('RGB')
|
20 |
+
|
21 |
+
prompt = "make it on fire"
|
22 |
+
|
23 |
+
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
|
24 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
25 |
+
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
26 |
+
)
|
27 |
+
|
28 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
29 |
+
pipe.enable_model_cpu_offload()
|
30 |
+
|
31 |
+
generator = torch.manual_seed(0)
|
32 |
+
out_image = pipe(prompt, num_inference_steps=30, generator=generator, image=image).images[0]
|
33 |
+
|
34 |
+
path = os.path.join(Path.home(), "images", "aa.png")
|
35 |
+
out_image.save(path)
|
36 |
+
|
37 |
+
api = HfApi()
|
38 |
+
|
39 |
+
api.upload_file(
|
40 |
+
path_or_fileobj=path,
|
41 |
+
path_in_repo=path.split("/")[-1],
|
42 |
+
repo_id="patrickvonplaten/images",
|
43 |
+
repo_type="dataset",
|
44 |
+
)
|
45 |
+
print("https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa.png")
|
control_net_shuffle.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
from huggingface_hub import HfApi
|
5 |
+
from pathlib import Path
|
6 |
+
from diffusers.utils import load_image
|
7 |
+
from controlnet_aux import ContentShuffleDetector
|
8 |
+
|
9 |
+
from diffusers import (
|
10 |
+
ControlNetModel,
|
11 |
+
StableDiffusionControlNetPipeline,
|
12 |
+
UniPCMultistepScheduler,
|
13 |
+
)
|
14 |
+
import sys
|
15 |
+
|
16 |
+
checkpoint = sys.argv[1]
|
17 |
+
|
18 |
+
url = "https://github.com/lllyasviel/ControlNet-v1-1-nightly/raw/main/test_imgs/city.jpg"
|
19 |
+
image = load_image(url)
|
20 |
+
|
21 |
+
prompt = "New York"
|
22 |
+
|
23 |
+
|
24 |
+
processor = ContentShuffleDetector()
|
25 |
+
image = processor(image)
|
26 |
+
|
27 |
+
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
|
28 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
29 |
+
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
30 |
+
)
|
31 |
+
|
32 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
33 |
+
pipe.enable_model_cpu_offload()
|
34 |
+
|
35 |
+
generator = torch.manual_seed(33)
|
36 |
+
out_image = pipe(prompt, num_inference_steps=20, generator=generator, image=image).images[0]
|
37 |
+
|
38 |
+
path = os.path.join(Path.home(), "images", "aa.png")
|
39 |
+
out_image.save(path)
|
40 |
+
|
41 |
+
api = HfApi()
|
42 |
+
|
43 |
+
api.upload_file(
|
44 |
+
path_or_fileobj=path,
|
45 |
+
path_in_repo=path.split("/")[-1],
|
46 |
+
repo_id="patrickvonplaten/images",
|
47 |
+
repo_type="dataset",
|
48 |
+
)
|
49 |
+
print("https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa.png")
|
control_net_soft_edge.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
from huggingface_hub import HfApi
|
5 |
+
from pathlib import Path
|
6 |
+
from diffusers.utils import load_image
|
7 |
+
from PIL import Image
|
8 |
+
import numpy as np
|
9 |
+
from controlnet_aux import PidiNetDetector, HEDdetector
|
10 |
+
|
11 |
+
from diffusers import (
|
12 |
+
ControlNetModel,
|
13 |
+
StableDiffusionControlNetPipeline,
|
14 |
+
UniPCMultistepScheduler,
|
15 |
+
)
|
16 |
+
import sys
|
17 |
+
|
18 |
+
checkpoint = sys.argv[1]
|
19 |
+
|
20 |
+
image = load_image("https://huggingface.co/lllyasviel/sd-controlnet-mlsd/resolve/main/images/room.png")
|
21 |
+
|
22 |
+
prompt = "royal chamber with fancy bed"
|
23 |
+
|
24 |
+
processor = HEDdetector.from_pretrained('lllyasviel/Annotators')
|
25 |
+
processor = PidiNetDetector.from_pretrained('lllyasviel/Annotators')
|
26 |
+
image = processor(image, safe=True)
|
27 |
+
|
28 |
+
controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16)
|
29 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
30 |
+
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
31 |
+
)
|
32 |
+
|
33 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
34 |
+
pipe.enable_model_cpu_offload()
|
35 |
+
|
36 |
+
generator = torch.manual_seed(0)
|
37 |
+
out_image = pipe(prompt, num_inference_steps=30, generator=generator, image=image).images[0]
|
38 |
+
|
39 |
+
path = os.path.join(Path.home(), "images", "aa.png")
|
40 |
+
out_image.save(path)
|
41 |
+
|
42 |
+
api = HfApi()
|
43 |
+
|
44 |
+
api.upload_file(
|
45 |
+
path_or_fileobj=path,
|
46 |
+
path_in_repo=path.split("/")[-1],
|
47 |
+
repo_id="patrickvonplaten/images",
|
48 |
+
repo_type="dataset",
|
49 |
+
)
|
50 |
+
print("https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa.png")
|
run_local.py
CHANGED
@@ -1,13 +1,15 @@
|
|
1 |
#!/usr/bin/env python3
|
2 |
-
from diffusers import StableDiffusionPipeline,
|
3 |
-
from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler
|
4 |
import time
|
5 |
import os
|
6 |
from huggingface_hub import HfApi
|
7 |
-
from compel import Compel
|
8 |
import torch
|
9 |
import sys
|
10 |
from pathlib import Path
|
|
|
|
|
|
|
11 |
|
12 |
path = sys.argv[1]
|
13 |
|
@@ -15,26 +17,34 @@ api = HfApi()
|
|
15 |
start_time = time.time()
|
16 |
#pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16, device_map="auto")
|
17 |
#pipe.scheduler = HeunDiscreteScheduler.from_config(pipe.scheduler.config)
|
18 |
-
pipe =
|
19 |
-
pipe.scheduler =
|
20 |
|
21 |
-
compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)
|
22 |
|
23 |
|
24 |
pipe = pipe.to("cuda")
|
25 |
|
26 |
-
prompt = "
|
27 |
|
28 |
-
|
29 |
|
30 |
-
prompt_embeds = torch.cat([compel.build_conditioning_tensor(prompt) for prompt in prompts])
|
31 |
|
32 |
-
generator = [torch.Generator(device="cuda").manual_seed(0) for _ in range(prompt_embeds.shape[0])]
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
print("Time", time.time() - start_time)
|
36 |
|
37 |
-
for i, image in enumerate(
|
38 |
path = os.path.join(Path.home(), "images", f"aa_{i}.png")
|
39 |
image.save(path)
|
40 |
|
|
|
1 |
#!/usr/bin/env python3
|
2 |
+
from diffusers import StableDiffusionPipeline, KDPM2DiscreteScheduler, StableDiffusionImg2ImgPipeline, HeunDiscreteScheduler, KDPM2AncestralDiscreteScheduler
|
|
|
3 |
import time
|
4 |
import os
|
5 |
from huggingface_hub import HfApi
|
6 |
+
# from compel import Compel
|
7 |
import torch
|
8 |
import sys
|
9 |
from pathlib import Path
|
10 |
+
import requests
|
11 |
+
from PIL import Image
|
12 |
+
from io import BytesIO
|
13 |
|
14 |
path = sys.argv[1]
|
15 |
|
|
|
17 |
start_time = time.time()
|
18 |
#pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16, device_map="auto")
|
19 |
#pipe.scheduler = HeunDiscreteScheduler.from_config(pipe.scheduler.config)
|
20 |
+
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(path, torch_dtype=torch.float16, safety_checker=None)
|
21 |
+
pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
22 |
|
23 |
+
# compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)
|
24 |
|
25 |
|
26 |
pipe = pipe.to("cuda")
|
27 |
|
28 |
+
prompt = "ghibli style, a fantasy landscape with castles"
|
29 |
|
30 |
+
# rompts = ["a cat playing with a ball++ in the forest", "a cat playing with a ball++ in the forest", "a cat playing with a ball-- in the forest"]
|
31 |
|
32 |
+
# prompt_embeds = torch.cat([compel.build_conditioning_tensor(prompt) for prompt in prompts])
|
33 |
|
34 |
+
# generator = [torch.Generator(device="cuda").manual_seed(0) for _ in range(prompt_embeds.shape[0])]
|
35 |
+
#
|
36 |
+
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
37 |
+
|
38 |
+
response = requests.get(url)
|
39 |
+
image = Image.open(BytesIO(response.content)).convert("RGB")
|
40 |
+
image.thumbnail((768, 768))
|
41 |
+
|
42 |
+
generator = torch.Generator(device="cpu").manual_seed(0)
|
43 |
+
images = pipe(prompt=prompt, image=image, generator=generator, strength=0.75, num_inference_steps=30).images
|
44 |
|
45 |
print("Time", time.time() - start_time)
|
46 |
|
47 |
+
for i, image in enumerate(images):
|
48 |
path = os.path.join(Path.home(), "images", f"aa_{i}.png")
|
49 |
image.save(path)
|
50 |
|