lllyasviel commited on
Commit
87246b6
·
1 Parent(s): 904cbf6

update dep, sampler (#217)

Browse files
Files changed (5) hide show
  1. fooocus_version.py +1 -1
  2. launch.py +1 -1
  3. modules/core.py +9 -8
  4. readme.md +1 -1
  5. update_log.md +4 -0
fooocus_version.py CHANGED
@@ -1 +1 @@
1
- version = '1.0.37'
 
1
+ version = '1.0.38'
launch.py CHANGED
@@ -20,7 +20,7 @@ def prepare_environment():
20
  xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
21
 
22
  comfy_repo = os.environ.get('COMFY_REPO', "https://github.com/comfyanonymous/ComfyUI")
23
- comfy_commit_hash = os.environ.get('COMFY_COMMIT_HASH', "2bc12d3d22efb5c63ae3a7fc342bb2dd16b31735")
24
 
25
  print(f"Python {sys.version}")
26
  print(f"Fooocus version: {fooocus_version.version}")
 
20
  xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
21
 
22
  comfy_repo = os.environ.get('COMFY_REPO', "https://github.com/comfyanonymous/ComfyUI")
23
+ comfy_commit_hash = os.environ.get('COMFY_COMMIT_HASH', "c9b562aed153cb35d4ce4126caf86995b0c63b12")
24
 
25
  print(f"Python {sys.version}")
26
  print(f"Fooocus version: {fooocus_version.version}")
modules/core.py CHANGED
@@ -9,7 +9,7 @@ import comfy.utils
9
 
10
  from comfy.sd import load_checkpoint_guess_config
11
  from nodes import VAEDecode, EmptyLatentImage, CLIPTextEncode
12
- from comfy.sample import prepare_mask, broadcast_cond, load_additional_models, cleanup_additional_models
13
  from modules.samplers_advanced import KSampler, KSamplerWithRefiner
14
  from modules.patch import patch_all
15
 
@@ -92,7 +92,7 @@ def get_previewer(device, latent_format):
92
 
93
 
94
  @torch.no_grad()
95
- def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sampler_name='dpmpp_2m_sde_gpu',
96
  scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None,
97
  force_full_denoise=False, callback_function=None):
98
  # SCHEDULERS = ["normal", "karras", "exponential", "simple", "ddim_uniform"]
@@ -133,7 +133,6 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa
133
  if noise_mask is not None:
134
  noise_mask = prepare_mask(noise_mask, noise.shape, device)
135
 
136
- comfy.model_management.load_model_gpu(model)
137
  real_model = model.model
138
 
139
  noise = noise.to(device)
@@ -142,7 +141,9 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa
142
  positive_copy = broadcast_cond(positive, noise.shape[0], device)
143
  negative_copy = broadcast_cond(negative, noise.shape[0], device)
144
 
145
- models = load_additional_models(positive, negative, model.model_dtype())
 
 
146
 
147
  sampler = KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler,
148
  denoise=denoise, model_options=model.model_options)
@@ -164,7 +165,7 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa
164
 
165
  @torch.no_grad()
166
  def ksampler_with_refiner(model, positive, negative, refiner, refiner_positive, refiner_negative, latent,
167
- seed=None, steps=30, refiner_switch_step=20, cfg=7.0, sampler_name='dpmpp_2m_sde_gpu',
168
  scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None,
169
  force_full_denoise=False, callback_function=None):
170
  # SCHEDULERS = ["normal", "karras", "exponential", "simple", "ddim_uniform"]
@@ -205,8 +206,6 @@ def ksampler_with_refiner(model, positive, negative, refiner, refiner_positive,
205
  if noise_mask is not None:
206
  noise_mask = prepare_mask(noise_mask, noise.shape, device)
207
 
208
- comfy.model_management.load_model_gpu(model)
209
-
210
  noise = noise.to(device)
211
  latent_image = latent_image.to(device)
212
 
@@ -216,7 +215,9 @@ def ksampler_with_refiner(model, positive, negative, refiner, refiner_positive,
216
  refiner_positive_copy = broadcast_cond(refiner_positive, noise.shape[0], device)
217
  refiner_negative_copy = broadcast_cond(refiner_negative, noise.shape[0], device)
218
 
219
- models = load_additional_models(positive, negative, model.model_dtype())
 
 
220
 
221
  sampler = KSamplerWithRefiner(model=model, refiner_model=refiner, steps=steps, device=device,
222
  sampler=sampler_name, scheduler=scheduler,
 
9
 
10
  from comfy.sd import load_checkpoint_guess_config
11
  from nodes import VAEDecode, EmptyLatentImage, CLIPTextEncode
12
+ from comfy.sample import prepare_mask, broadcast_cond, get_additional_models, cleanup_additional_models
13
  from modules.samplers_advanced import KSampler, KSamplerWithRefiner
14
  from modules.patch import patch_all
15
 
 
92
 
93
 
94
  @torch.no_grad()
95
+ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sampler_name='uni_pc',
96
  scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None,
97
  force_full_denoise=False, callback_function=None):
98
  # SCHEDULERS = ["normal", "karras", "exponential", "simple", "ddim_uniform"]
 
133
  if noise_mask is not None:
134
  noise_mask = prepare_mask(noise_mask, noise.shape, device)
135
 
 
136
  real_model = model.model
137
 
138
  noise = noise.to(device)
 
141
  positive_copy = broadcast_cond(positive, noise.shape[0], device)
142
  negative_copy = broadcast_cond(negative, noise.shape[0], device)
143
 
144
+ models = get_additional_models(positive, negative)
145
+ comfy.model_management.load_models_gpu([model] + models, comfy.model_management.batch_area_memory(
146
+ noise.shape[0] * noise.shape[2] * noise.shape[3]))
147
 
148
  sampler = KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler,
149
  denoise=denoise, model_options=model.model_options)
 
165
 
166
  @torch.no_grad()
167
  def ksampler_with_refiner(model, positive, negative, refiner, refiner_positive, refiner_negative, latent,
168
+ seed=None, steps=30, refiner_switch_step=20, cfg=7.0, sampler_name='uni_pc',
169
  scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None,
170
  force_full_denoise=False, callback_function=None):
171
  # SCHEDULERS = ["normal", "karras", "exponential", "simple", "ddim_uniform"]
 
206
  if noise_mask is not None:
207
  noise_mask = prepare_mask(noise_mask, noise.shape, device)
208
 
 
 
209
  noise = noise.to(device)
210
  latent_image = latent_image.to(device)
211
 
 
215
  refiner_positive_copy = broadcast_cond(refiner_positive, noise.shape[0], device)
216
  refiner_negative_copy = broadcast_cond(refiner_negative, noise.shape[0], device)
217
 
218
+ models = get_additional_models(positive, negative)
219
+ comfy.model_management.load_models_gpu([model] + models, comfy.model_management.batch_area_memory(
220
+ noise.shape[0] * noise.shape[2] * noise.shape[3]))
221
 
222
  sampler = KSamplerWithRefiner(model=model, refiner_model=refiner, steps=steps, device=device,
223
  sampler=sampler_name, scheduler=scheduler,
readme.md CHANGED
@@ -103,7 +103,7 @@ Note that some of these tricks are currently (2023 Aug 11) impossible to reprodu
103
  6. The parameters of samplers are carefully tuned.
104
  7. Because XL uses positional encoding for generation resolution, images generated by several fixed resolutions look a bit better than that from arbitrary resolutions (because the positional encoding is not very good at handling int numbers that are unseen during training). This suggests that the resolutions in UI may be hard coded for best results.
105
  8. Separated prompts for two different text encoders seem unnecessary. Separated prompts for base model and refiner may work but the effects are random, and we refrain from implement this.
106
- 9. DPM family seems well-suited for XL, since XL sometimes generates overly smooth texture but DPM family sometimes generate overly dense detail in texture. Their joint effect looks neutral and appealing to human perception.
107
 
108
  ## Advanced Features
109
 
 
103
  6. The parameters of samplers are carefully tuned.
104
  7. Because XL uses positional encoding for generation resolution, images generated by several fixed resolutions look a bit better than that from arbitrary resolutions (because the positional encoding is not very good at handling int numbers that are unseen during training). This suggests that the resolutions in UI may be hard coded for best results.
105
  8. Separated prompts for two different text encoders seem unnecessary. Separated prompts for base model and refiner may work but the effects are random, and we refrain from implement this.
106
+ 9. DPM family (or UniPC) seems well-suited for XL, since XL sometimes generates overly smooth texture but DPM family sometimes generate overly dense detail in texture. Their joint effect looks neutral and appealing to human perception. (Update 2023 Aug 19, changed to UniPC.)
107
 
108
  ## Advanced Features
109
 
update_log.md CHANGED
@@ -1,3 +1,7 @@
 
 
 
 
1
  ### 1.0.37
2
 
3
  * Cinematic-default v2.
 
1
+ ### 1.0.38
2
+
3
+ * Update dependency, update to sampler.
4
+
5
  ### 1.0.37
6
 
7
  * Cinematic-default v2.