Commit
·
d8d872a
1
Parent(s):
6c27fdd
[Speculative Decoding] Add speculative decoding
Browse files- run_bug.py +32 -0
- run_lora_on_off.py +35 -0
- run_sd_lora.py +20 -0
- run_sd_sde.py +22 -0
- run_sdxl_cpu.py +25 -0
run_bug.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from diffusers import AutoencoderKL, StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, KDPM2AncestralDiscreteScheduler
|
5 |
+
|
6 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix",
|
7 |
+
torch_dtype=torch.float16
|
8 |
+
)
|
9 |
+
|
10 |
+
base = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0",
|
11 |
+
vae=vae,
|
12 |
+
torch_dtype=torch.float16,
|
13 |
+
variant="fp16",
|
14 |
+
use_safetensors=True
|
15 |
+
)
|
16 |
+
|
17 |
+
scheduler = KDPM2AncestralDiscreteScheduler.from_config(base.scheduler.config, use_karras_sigmas=True)
|
18 |
+
base.scheduler = scheduler
|
19 |
+
|
20 |
+
base.to("cuda")
|
21 |
+
|
22 |
+
def print_step(s, t, latents):
|
23 |
+
print(s)
|
24 |
+
|
25 |
+
generator=torch.manual_seed(1111)
|
26 |
+
|
27 |
+
images = base(
|
28 |
+
prompt="LOVE",
|
29 |
+
num_inference_steps=10,
|
30 |
+
generator=generator,
|
31 |
+
callback=print_step
|
32 |
+
).images
|
run_lora_on_off.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
#@title Fuse/unfuse LoRAs sequentially leading to trouble
|
3 |
+
import torch
|
4 |
+
from diffusers import StableDiffusionXLPipeline
|
5 |
+
|
6 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
7 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
8 |
+
torch_dtype=torch.float16,
|
9 |
+
variant="fp16",
|
10 |
+
).to("cuda")
|
11 |
+
|
12 |
+
pipe.load_lora_weights("Pclanglais/TintinIA")
|
13 |
+
pipe.fuse_lora()
|
14 |
+
images = pipe("a mecha robot", num_inference_steps=2)
|
15 |
+
pipe.unfuse_lora()
|
16 |
+
pipe.unload_lora_weights()
|
17 |
+
|
18 |
+
pipe.load_lora_weights("ProomptEngineer/pe-balloon-diffusion-style")
|
19 |
+
pipe.fuse_lora()
|
20 |
+
images = pipe("a mecha robot", num_inference_steps=2)
|
21 |
+
pipe.unfuse_lora()
|
22 |
+
pipe.unload_lora_weights()
|
23 |
+
|
24 |
+
pipe.load_lora_weights("ostris/crayon_style_lora_sdxl")
|
25 |
+
pipe.fuse_lora()
|
26 |
+
images = pipe("a mecha robot", num_inference_steps=2)
|
27 |
+
pipe.unfuse_lora()
|
28 |
+
pipe.unload_lora_weights()
|
29 |
+
|
30 |
+
|
31 |
+
pipe.load_lora_weights("joachimsallstrom/aether-cloud-lora-for-sdxl")
|
32 |
+
pipe.fuse_lora()
|
33 |
+
images = pipe("a mecha robot", num_inference_steps=2)
|
34 |
+
pipe.unfuse_lora()
|
35 |
+
pipe.unload_lora_weights()
|
run_sd_lora.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
from diffusers import DiffusionPipeline, AutoencoderKL
|
3 |
+
import hf_image_uploader as hiu
|
4 |
+
import torch
|
5 |
+
|
6 |
+
vae = AutoencoderKL.from_pretrained(
|
7 |
+
"madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
|
8 |
+
)
|
9 |
+
pipe = DiffusionPipeline.from_pretrained(
|
10 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
11 |
+
variant="fp16",
|
12 |
+
vae=vae,
|
13 |
+
torch_dtype=torch.float16,
|
14 |
+
).to("cuda")
|
15 |
+
|
16 |
+
pipe.load_lora_weights("rvorias/m_test")
|
17 |
+
# pipe.enable_xformers_memory_efficient_attention()
|
18 |
+
|
19 |
+
image = pipe("a photo of a pikachu pixel art", generator=torch.manual_seed(66)).images[0]
|
20 |
+
hiu.upload(image, "patrickvonplaten/images")
|
run_sd_sde.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
from diffusers import DPMSolverMultistepScheduler, StableDiffusionXLPipeline, DPMSolverSDEScheduler
|
3 |
+
|
4 |
+
path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe"
|
5 |
+
|
6 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(path)
|
7 |
+
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, algorithm_type="sde-dpmsolver++")
|
8 |
+
|
9 |
+
prompt = "An astronaut riding a green horse on Mars"
|
10 |
+
steps = 20
|
11 |
+
|
12 |
+
for i in range(2):
|
13 |
+
width = 32 * (i + 1)
|
14 |
+
height = 32 * (i + 1)
|
15 |
+
image = pipe(prompt=prompt, width=width, height=height, num_inference_steps=steps).images[0]
|
16 |
+
|
17 |
+
pipe.scheduler = DPMSolverSDEScheduler.from_config(pipe.scheduler.config, algorithm_type="sde-dpmsolver++")
|
18 |
+
|
19 |
+
for i in range(2):
|
20 |
+
width = 32 * (i + 1)
|
21 |
+
height = 32 * (i + 1)
|
22 |
+
image = pipe(prompt=prompt, width=width, height=height, num_inference_steps=steps).images[0]
|
run_sdxl_cpu.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
from diffusers import DPMSolverMultistepScheduler, StableDiffusionXLPipeline, DPMSolverSDEScheduler
|
3 |
+
import hf_image_uploader as hiu
|
4 |
+
|
5 |
+
path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe"
|
6 |
+
|
7 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(path)
|
8 |
+
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, algorithm_type="sde-dpmsolver++")
|
9 |
+
|
10 |
+
prompt = "An astronaut riding a green horse on Mars"
|
11 |
+
steps = 20
|
12 |
+
|
13 |
+
for i in range(2):
|
14 |
+
width = 32 * (i + 1)
|
15 |
+
height = 32 * (i + 1)
|
16 |
+
image = pipe(prompt=prompt, width=width, height=height, num_inference_steps=steps).images[0]
|
17 |
+
hiu.upload(image, "patrickvonplaten/images")
|
18 |
+
|
19 |
+
pipe.scheduler = DPMSolverSDEScheduler.from_config(pipe.scheduler.config, algorithm_type="sde-dpmsolver++")
|
20 |
+
|
21 |
+
for i in range(2):
|
22 |
+
width = 32 * (i + 1)
|
23 |
+
height = 32 * (i + 1)
|
24 |
+
image = pipe(prompt=prompt, width=width, height=height, num_inference_steps=steps).images[0]
|
25 |
+
hiu.upload(image, "patrickvonplaten/images")
|