Spaces:
Running
on
Zero
Running
on
Zero
adamelliotfields
commited on
Commit
•
4d5d84d
1
Parent(s):
14665b0
Remove Clip Skip and FreeU
Browse files- DOCS.md +0 -8
- README.md +1 -1
- app.py +0 -12
- lib/inference.py +1 -8
- lib/loader.py +1 -41
DOCS.md
CHANGED
@@ -87,11 +87,3 @@ Enable `Use negative TI` to append [`fast_negative`](https://civitai.com/models/
|
|
87 |
* `2`: more quality
|
88 |
* `3`: balanced
|
89 |
* `4`: more speed
|
90 |
-
|
91 |
-
#### FreeU
|
92 |
-
|
93 |
-
[FreeU](https://github.com/ChenyangSi/FreeU) re-weights the contributions sourced from the UNet’s skip connections and backbone feature maps. Can sometimes improve image quality.
|
94 |
-
|
95 |
-
#### Clip Skip
|
96 |
-
|
97 |
-
When enabled, the last CLIP layer is skipped. Can sometimes improve image quality.
|
|
|
87 |
* `2`: more quality
|
88 |
* `3`: balanced
|
89 |
* `4`: more speed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
@@ -65,7 +65,7 @@ Gradio app for Stable Diffusion 1.5 featuring:
|
|
65 |
* Compel prompt weighting
|
66 |
* Hand-written style templates
|
67 |
* Multiple samplers with Karras scheduling
|
68 |
-
* DeepCache
|
69 |
* Real-ESRGAN upscaling
|
70 |
|
71 |
## Usage
|
|
|
65 |
* Compel prompt weighting
|
66 |
* Hand-written style templates
|
67 |
* Multiple samplers with Karras scheduling
|
68 |
+
* DeepCache available
|
69 |
* Real-ESRGAN upscaling
|
70 |
|
71 |
## Usage
|
app.py
CHANGED
@@ -302,16 +302,6 @@ with gr.Blocks(
|
|
302 |
label="Use negative TI",
|
303 |
value=False,
|
304 |
)
|
305 |
-
use_freeu = gr.Checkbox(
|
306 |
-
elem_classes=["checkbox"],
|
307 |
-
label="FreeU",
|
308 |
-
value=False,
|
309 |
-
)
|
310 |
-
use_clip_skip = gr.Checkbox(
|
311 |
-
elem_classes=["checkbox"],
|
312 |
-
label="Clip skip",
|
313 |
-
value=False,
|
314 |
-
)
|
315 |
|
316 |
# Image-to-Image settings
|
317 |
gr.HTML("<h3>Image-to-Image</h3>")
|
@@ -451,8 +441,6 @@ with gr.Blocks(
|
|
451 |
scale,
|
452 |
num_images,
|
453 |
use_karras,
|
454 |
-
use_freeu,
|
455 |
-
use_clip_skip,
|
456 |
use_ip_face,
|
457 |
use_negative_embedding,
|
458 |
DISABLE_IMAGE_PROMPT,
|
|
|
302 |
label="Use negative TI",
|
303 |
value=False,
|
304 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
305 |
|
306 |
# Image-to-Image settings
|
307 |
gr.HTML("<h3>Image-to-Image</h3>")
|
|
|
441 |
scale,
|
442 |
num_images,
|
443 |
use_karras,
|
|
|
|
|
444 |
use_ip_face,
|
445 |
use_negative_embedding,
|
446 |
DISABLE_IMAGE_PROMPT,
|
lib/inference.py
CHANGED
@@ -80,8 +80,6 @@ def generate(
|
|
80 |
scale=1,
|
81 |
num_images=1,
|
82 |
karras=False,
|
83 |
-
freeu=False,
|
84 |
-
clip_skip=False,
|
85 |
ip_face=False,
|
86 |
negative_embedding=False,
|
87 |
Error=Exception,
|
@@ -108,11 +106,7 @@ def generate(
|
|
108 |
KIND = "img2img" if image_prompt is not None else "txt2img"
|
109 |
KIND = f"controlnet_{KIND}" if control_image_prompt is not None else KIND
|
110 |
|
111 |
-
EMBEDDINGS_TYPE =
|
112 |
-
ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NORMALIZED
|
113 |
-
if clip_skip
|
114 |
-
else ReturnedEmbeddingsType.LAST_HIDDEN_STATES_NORMALIZED
|
115 |
-
)
|
116 |
|
117 |
if ip_image_prompt:
|
118 |
IP_ADAPTER = "full-face" if ip_face else "plus"
|
@@ -143,7 +137,6 @@ def generate(
|
|
143 |
deepcache,
|
144 |
scale,
|
145 |
karras,
|
146 |
-
freeu,
|
147 |
progress,
|
148 |
)
|
149 |
|
|
|
80 |
scale=1,
|
81 |
num_images=1,
|
82 |
karras=False,
|
|
|
|
|
83 |
ip_face=False,
|
84 |
negative_embedding=False,
|
85 |
Error=Exception,
|
|
|
106 |
KIND = "img2img" if image_prompt is not None else "txt2img"
|
107 |
KIND = f"controlnet_{KIND}" if control_image_prompt is not None else KIND
|
108 |
|
109 |
+
EMBEDDINGS_TYPE = ReturnedEmbeddingsType.LAST_HIDDEN_STATES_NORMALIZED
|
|
|
|
|
|
|
|
|
110 |
|
111 |
if ip_image_prompt:
|
112 |
IP_ADAPTER = "full-face" if ip_face else "plus"
|
|
|
137 |
deepcache,
|
138 |
scale,
|
139 |
karras,
|
|
|
140 |
progress,
|
141 |
)
|
142 |
|
lib/loader.py
CHANGED
@@ -29,14 +29,6 @@ class Loader:
|
|
29 |
cls._instance.log = Logger("Loader")
|
30 |
return cls._instance
|
31 |
|
32 |
-
@property
|
33 |
-
def _has_freeu(self):
|
34 |
-
if self.pipe is not None:
|
35 |
-
attrs = ["b1", "b2", "s1", "s2"]
|
36 |
-
block = self.pipe.unet.up_blocks[0]
|
37 |
-
return all(getattr(block, attr, None) is not None for attr in attrs)
|
38 |
-
return False
|
39 |
-
|
40 |
def _should_unload_upscaler(self, scale=1):
|
41 |
if self.upscaler is not None and self.upscaler.scale != scale:
|
42 |
return True
|
@@ -50,11 +42,6 @@ class Loader:
|
|
50 |
return True
|
51 |
return False
|
52 |
|
53 |
-
def _should_unload_freeu(self, freeu=False):
|
54 |
-
if self._has_freeu and not freeu:
|
55 |
-
return True
|
56 |
-
return False
|
57 |
-
|
58 |
def _should_unload_ip_adapter(self, model="", ip_adapter=""):
|
59 |
# unload if model changed
|
60 |
if self.model and self.model.lower() != model.lower():
|
@@ -106,11 +93,6 @@ class Loader:
|
|
106 |
self.pipe.deepcache.disable()
|
107 |
delattr(self.pipe, "deepcache")
|
108 |
|
109 |
-
def _unload_freeu(self, freeu=False):
|
110 |
-
if self._has_freeu and not freeu:
|
111 |
-
self.log.info("Disabling FreeU")
|
112 |
-
self.pipe.disable_freeu()
|
113 |
-
|
114 |
# Copied from https://github.com/huggingface/diffusers/blob/v0.28.0/src/diffusers/loaders/ip_adapter.py#L300
|
115 |
def _unload_ip_adapter(self):
|
116 |
if self.ip_adapter is not None:
|
@@ -145,15 +127,11 @@ class Loader:
|
|
145 |
ip_adapter="",
|
146 |
deepcache=1,
|
147 |
scale=1,
|
148 |
-
freeu=False,
|
149 |
):
|
150 |
to_unload = []
|
151 |
if self._should_unload_deepcache(deepcache): # remove deepcache first
|
152 |
self._unload_deepcache()
|
153 |
|
154 |
-
if self._should_unload_freeu(freeu):
|
155 |
-
self._unload_freeu()
|
156 |
-
|
157 |
if self._should_unload_upscaler(scale):
|
158 |
self._unload_upscaler()
|
159 |
to_unload.append("upscaler")
|
@@ -181,11 +159,6 @@ class Loader:
|
|
181 |
return True
|
182 |
return False
|
183 |
|
184 |
-
def _should_load_freeu(self, freeu=False):
|
185 |
-
if not self._has_freeu and freeu:
|
186 |
-
return True
|
187 |
-
return False
|
188 |
-
|
189 |
def _should_load_deepcache(self, interval=1):
|
190 |
has_deepcache = hasattr(self.pipe, "deepcache")
|
191 |
if not has_deepcache and interval != 1:
|
@@ -222,12 +195,6 @@ class Loader:
|
|
222 |
self.pipe.deepcache.set_params(cache_interval=interval)
|
223 |
self.pipe.deepcache.enable()
|
224 |
|
225 |
-
# https://github.com/ChenyangSi/FreeU
|
226 |
-
def _load_freeu(self, freeu=False):
|
227 |
-
if self._should_load_freeu(freeu):
|
228 |
-
self.log.info("Enabling FreeU")
|
229 |
-
self.pipe.enable_freeu(b1=1.5, b2=1.6, s1=0.9, s2=0.2)
|
230 |
-
|
231 |
def _load_ip_adapter(self, ip_adapter=""):
|
232 |
if self._should_load_ip_adapter(ip_adapter):
|
233 |
msg = "Loading IP-Adapter"
|
@@ -298,7 +265,6 @@ class Loader:
|
|
298 |
deepcache,
|
299 |
scale,
|
300 |
karras,
|
301 |
-
freeu,
|
302 |
progress,
|
303 |
):
|
304 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
@@ -351,7 +317,7 @@ class Loader:
|
|
351 |
)
|
352 |
self.controlnet = annotator
|
353 |
|
354 |
-
self._unload(kind, model, annotator, ip_adapter, deepcache, scale
|
355 |
self._load_pipeline(kind, model, progress, **pipe_kwargs)
|
356 |
|
357 |
# error loading model
|
@@ -379,7 +345,6 @@ class Loader:
|
|
379 |
CURRENT_STEP = 1
|
380 |
TOTAL_STEPS = sum(
|
381 |
[
|
382 |
-
self._should_load_freeu(freeu),
|
383 |
self._should_load_deepcache(deepcache),
|
384 |
self._should_load_ip_adapter(ip_adapter),
|
385 |
self._should_load_upscaler(scale),
|
@@ -387,11 +352,6 @@ class Loader:
|
|
387 |
)
|
388 |
|
389 |
desc = "Configuring pipeline"
|
390 |
-
if not self._has_freeu and freeu:
|
391 |
-
self._load_freeu(freeu)
|
392 |
-
safe_progress(progress, CURRENT_STEP, TOTAL_STEPS, desc)
|
393 |
-
CURRENT_STEP += 1
|
394 |
-
|
395 |
if self._should_load_deepcache(deepcache):
|
396 |
self._load_deepcache(deepcache)
|
397 |
safe_progress(progress, CURRENT_STEP, TOTAL_STEPS, desc)
|
|
|
29 |
cls._instance.log = Logger("Loader")
|
30 |
return cls._instance
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
def _should_unload_upscaler(self, scale=1):
|
33 |
if self.upscaler is not None and self.upscaler.scale != scale:
|
34 |
return True
|
|
|
42 |
return True
|
43 |
return False
|
44 |
|
|
|
|
|
|
|
|
|
|
|
45 |
def _should_unload_ip_adapter(self, model="", ip_adapter=""):
|
46 |
# unload if model changed
|
47 |
if self.model and self.model.lower() != model.lower():
|
|
|
93 |
self.pipe.deepcache.disable()
|
94 |
delattr(self.pipe, "deepcache")
|
95 |
|
|
|
|
|
|
|
|
|
|
|
96 |
# Copied from https://github.com/huggingface/diffusers/blob/v0.28.0/src/diffusers/loaders/ip_adapter.py#L300
|
97 |
def _unload_ip_adapter(self):
|
98 |
if self.ip_adapter is not None:
|
|
|
127 |
ip_adapter="",
|
128 |
deepcache=1,
|
129 |
scale=1,
|
|
|
130 |
):
|
131 |
to_unload = []
|
132 |
if self._should_unload_deepcache(deepcache): # remove deepcache first
|
133 |
self._unload_deepcache()
|
134 |
|
|
|
|
|
|
|
135 |
if self._should_unload_upscaler(scale):
|
136 |
self._unload_upscaler()
|
137 |
to_unload.append("upscaler")
|
|
|
159 |
return True
|
160 |
return False
|
161 |
|
|
|
|
|
|
|
|
|
|
|
162 |
def _should_load_deepcache(self, interval=1):
|
163 |
has_deepcache = hasattr(self.pipe, "deepcache")
|
164 |
if not has_deepcache and interval != 1:
|
|
|
195 |
self.pipe.deepcache.set_params(cache_interval=interval)
|
196 |
self.pipe.deepcache.enable()
|
197 |
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
def _load_ip_adapter(self, ip_adapter=""):
|
199 |
if self._should_load_ip_adapter(ip_adapter):
|
200 |
msg = "Loading IP-Adapter"
|
|
|
265 |
deepcache,
|
266 |
scale,
|
267 |
karras,
|
|
|
268 |
progress,
|
269 |
):
|
270 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
317 |
)
|
318 |
self.controlnet = annotator
|
319 |
|
320 |
+
self._unload(kind, model, annotator, ip_adapter, deepcache, scale)
|
321 |
self._load_pipeline(kind, model, progress, **pipe_kwargs)
|
322 |
|
323 |
# error loading model
|
|
|
345 |
CURRENT_STEP = 1
|
346 |
TOTAL_STEPS = sum(
|
347 |
[
|
|
|
348 |
self._should_load_deepcache(deepcache),
|
349 |
self._should_load_ip_adapter(ip_adapter),
|
350 |
self._should_load_upscaler(scale),
|
|
|
352 |
)
|
353 |
|
354 |
desc = "Configuring pipeline"
|
|
|
|
|
|
|
|
|
|
|
355 |
if self._should_load_deepcache(deepcache):
|
356 |
self._load_deepcache(deepcache)
|
357 |
safe_progress(progress, CURRENT_STEP, TOTAL_STEPS, desc)
|