lllyasviel commited on
Commit
7d697f7
·
1 Parent(s): ca685d6
Files changed (2) hide show
  1. modules/core.py +42 -20
  2. modules/default_pipeline.py +12 -12
modules/core.py CHANGED
@@ -2,13 +2,17 @@ import random
2
  import torch
3
  import numpy as np
4
 
 
 
 
 
 
5
  from comfy.sd import load_checkpoint_guess_config
6
- from nodes import VAEDecode, KSamplerAdvanced, EmptyLatentImage, CLIPTextEncode
7
 
8
 
9
  opCLIPTextEncode = CLIPTextEncode()
10
  opEmptyLatentImage = EmptyLatentImage()
11
- opKSamplerAdvanced = KSamplerAdvanced()
12
  opVAEDecode = VAEDecode()
13
 
14
 
@@ -42,24 +46,42 @@ def decode_vae(vae, latent_image):
42
 
43
 
44
  @torch.no_grad()
45
- def ksample(unet, positive_condition, negative_condition, latent_image, add_noise=True, noise_seed=None, steps=25, cfg=9,
46
- sampler_name='euler_ancestral', scheduler='normal', start_at_step=None, end_at_step=None,
47
- return_with_leftover_noise=False):
48
- return opKSamplerAdvanced.sample(
49
- add_noise='enable' if add_noise else 'disable',
50
- noise_seed=noise_seed if isinstance(noise_seed, int) else random.randint(1, 2 ** 64),
51
- steps=steps,
52
- cfg=cfg,
53
- sampler_name=sampler_name,
54
- scheduler=scheduler,
55
- start_at_step=0 if start_at_step is None else start_at_step,
56
- end_at_step=steps if end_at_step is None else end_at_step,
57
- return_with_leftover_noise='enable' if return_with_leftover_noise else 'disable',
58
- model=unet,
59
- positive=positive_condition,
60
- negative=negative_condition,
61
- latent_image=latent_image,
62
- )[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
 
65
  @torch.no_grad()
 
2
  import torch
3
  import numpy as np
4
 
5
+ import comfy.model_management
6
+ import comfy.sample
7
+ import comfy.utils
8
+ import latent_preview
9
+
10
  from comfy.sd import load_checkpoint_guess_config
11
+ from nodes import VAEDecode, EmptyLatentImage, CLIPTextEncode, common_ksampler
12
 
13
 
14
  opCLIPTextEncode = CLIPTextEncode()
15
  opEmptyLatentImage = EmptyLatentImage()
 
16
  opVAEDecode = VAEDecode()
17
 
18
 
 
46
 
47
 
48
  @torch.no_grad()
49
+ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=9.0, sampler_name='euler_ancestral', scheduler='normal', denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
50
+ seed = seed if isinstance(seed, int) else random.randint(1, 2 ** 64)
51
+
52
+ device = comfy.model_management.get_torch_device()
53
+ latent_image = latent["samples"]
54
+
55
+ if disable_noise:
56
+ noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
57
+ else:
58
+ batch_inds = latent["batch_index"] if "batch_index" in latent else None
59
+ noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
60
+
61
+ noise_mask = None
62
+ if "noise_mask" in latent:
63
+ noise_mask = latent["noise_mask"]
64
+
65
+ preview_format = "JPEG"
66
+ if preview_format not in ["JPEG", "PNG"]:
67
+ preview_format = "JPEG"
68
+
69
+ previewer = latent_preview.get_previewer(device, model.model.latent_format)
70
+
71
+ pbar = comfy.utils.ProgressBar(steps)
72
+
73
+ def callback(step, x0, x, total_steps):
74
+ preview_bytes = None
75
+ if previewer:
76
+ preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
77
+ pbar.update_absolute(step + 1, total_steps, preview_bytes)
78
+
79
+ samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
80
+ denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
81
+ force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, seed=seed)
82
+ out = latent.copy()
83
+ out["samples"] = samples
84
+ return (out, )
85
 
86
 
87
  @torch.no_grad()
modules/default_pipeline.py CHANGED
@@ -23,20 +23,20 @@ def process(positive_prompt, negative_prompt, width=1024, height=1024, batch_siz
23
 
24
  empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=batch_size)
25
 
26
- sampled_latent = core.ksample(
27
- unet=xl_base.unet,
28
- positive_condition=positive_conditions,
29
- negative_condition=negative_conditions,
30
- latent_image=empty_latent,
31
- steps=30, start_at_step=0, end_at_step=20, return_with_leftover_noise=True, add_noise=True
32
  )
33
 
34
- sampled_latent = core.ksample(
35
- unet=xl_refiner.unet,
36
- positive_condition=positive_conditions_refiner,
37
- negative_condition=negative_conditions_refiner,
38
- latent_image=sampled_latent,
39
- steps=30, start_at_step=20, end_at_step=30, return_with_leftover_noise=False, add_noise=False
40
  )
41
 
42
  decoded_latent = core.decode_vae(vae=xl_refiner.vae, latent_image=sampled_latent)
 
23
 
24
  empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=batch_size)
25
 
26
+ sampled_latent = core.ksampler(
27
+ model=xl_base.unet,
28
+ positive=positive_conditions,
29
+ negative=negative_conditions,
30
+ latent=empty_latent,
31
+ steps=30, start_step=0, last_step=20, disable_noise=False, force_full_denoise=False
32
  )
33
 
34
+ sampled_latent = core.ksampler(
35
+ model=xl_refiner.unet,
36
+ positive=positive_conditions_refiner,
37
+ negative=negative_conditions_refiner,
38
+ latent=sampled_latent,
39
+ steps=30, start_step=20, last_step=30, disable_noise=True, force_full_denoise=True
40
  )
41
 
42
  decoded_latent = core.decode_vae(vae=xl_refiner.vae, latent_image=sampled_latent)