lllyasviel commited on
Commit
8ee75ad
·
1 Parent(s): bf0bc7d
Files changed (1) hide show
  1. webui.py +28 -35
webui.py CHANGED
@@ -1,19 +1,12 @@
1
  import os
2
  import random
 
3
 
4
  from comfy.sd import load_checkpoint_guess_config
5
- from comfy.model_management import unload_model
6
-
7
- from nodes import (
8
- VAEDecode,
9
- KSamplerAdvanced,
10
- EmptyLatentImage,
11
- SaveImage,
12
- CLIPTextEncode,
13
- )
14
-
15
  from modules.path import modelfile_path
16
 
 
17
  xl_base_filename = os.path.join(modelfile_path, 'sd_xl_base_1.0.safetensors')
18
  xl_refiner_filename = os.path.join(modelfile_path, 'sd_xl_refiner_1.0.safetensors')
19
 
@@ -25,28 +18,28 @@ opEmptyLatentImage = EmptyLatentImage()
25
  opKSamplerAdvanced = KSamplerAdvanced()
26
  opVAEDecode = VAEDecode()
27
 
28
- positive_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='a handsome man in forest')[0]
29
- negative_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='bad, ugly')[0]
30
-
31
- initial_latent_image = opEmptyLatentImage.generate(width=1024, height=1024, batch_size=1)[0]
32
-
33
- samples = opKSamplerAdvanced.sample(
34
- add_noise="enable",
35
- noise_seed=random.randint(1, 2 ** 64),
36
- steps=25,
37
- cfg=9,
38
- sampler_name="euler",
39
- scheduler="normal",
40
- start_at_step=0,
41
- end_at_step=25,
42
- return_with_leftover_noise="enable",
43
- model=xl_base,
44
- positive=positive_conditions,
45
- negative=negative_conditions,
46
- latent_image=initial_latent_image,
47
- )[0]
48
- unload_model()
49
-
50
- vae_decoded = opVAEDecode.decode(samples=samples, vae=xl_base_vae)[0]
51
-
52
- a = 0
 
1
  import os
2
  import random
3
+ import torch
4
 
5
  from comfy.sd import load_checkpoint_guess_config
6
+ from nodes import VAEDecode, KSamplerAdvanced, EmptyLatentImage, SaveImage, CLIPTextEncode
 
 
 
 
 
 
 
 
 
7
  from modules.path import modelfile_path
8
 
9
+
10
  xl_base_filename = os.path.join(modelfile_path, 'sd_xl_base_1.0.safetensors')
11
  xl_refiner_filename = os.path.join(modelfile_path, 'sd_xl_refiner_1.0.safetensors')
12
 
 
18
  opKSamplerAdvanced = KSamplerAdvanced()
19
  opVAEDecode = VAEDecode()
20
 
21
+ with torch.no_grad():
22
+ positive_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='a handsome man in forest')[0]
23
+ negative_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='bad, ugly')[0]
24
+
25
+ initial_latent_image = opEmptyLatentImage.generate(width=1024, height=1024, batch_size=1)[0]
26
+
27
+ samples = opKSamplerAdvanced.sample(
28
+ add_noise="enable",
29
+ noise_seed=random.randint(1, 2 ** 64),
30
+ steps=25,
31
+ cfg=9,
32
+ sampler_name="euler",
33
+ scheduler="normal",
34
+ start_at_step=0,
35
+ end_at_step=25,
36
+ return_with_leftover_noise="enable",
37
+ model=xl_base,
38
+ positive=positive_conditions,
39
+ negative=negative_conditions,
40
+ latent_image=initial_latent_image,
41
+ )[0]
42
+
43
+ vae_decoded = opVAEDecode.decode(samples=samples, vae=xl_base_vae)[0]
44
+
45
+ a = 0