lllyasviel commited on
Commit
44d1f2e
·
1 Parent(s): deba410
Files changed (2) hide show
  1. launch.py +1 -1
  2. modules/sd.py +50 -0
launch.py CHANGED
@@ -18,7 +18,7 @@ def prepare_environment():
18
 
19
  xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
20
 
21
- comfy_repo = os.environ.get('COMFY_REPO', "https://github.com/comfyanonymous/ComfyUI.git")
22
  comfy_commit_hash = os.environ.get('COMFY_COMMIT_HASH', "5ac96897e9782805cd5e8fe85bd98ad03eae2b6f")
23
 
24
  commit = commit_hash()
 
18
 
19
  xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
20
 
21
+ comfy_repo = os.environ.get('COMFY_REPO', "https://github.com/lllyasviel/ComfyUI-Embedded.git")
22
  comfy_commit_hash = os.environ.get('COMFY_COMMIT_HASH', "5ac96897e9782805cd5e8fe85bd98ad03eae2b6f")
23
 
24
  commit = commit_hash()
modules/sd.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import torch
4
+ import numpy as np
5
+
6
+ from comfy.sd import load_checkpoint_guess_config
7
+ from nodes import VAEDecode, KSamplerAdvanced, EmptyLatentImage, CLIPTextEncode
8
+ from modules.path import modelfile_path
9
+
10
+
11
+ xl_base_filename = os.path.join(modelfile_path, 'sd_xl_base_1.0.safetensors')
12
+ xl_refiner_filename = os.path.join(modelfile_path, 'sd_xl_refiner_1.0.safetensors')
13
+
14
+ xl_base, xl_base_clip, xl_base_vae, xl_base_clipvision = load_checkpoint_guess_config(xl_base_filename)
15
+ del xl_base_clipvision
16
+
17
+ opCLIPTextEncode = CLIPTextEncode()
18
+ opEmptyLatentImage = EmptyLatentImage()
19
+ opKSamplerAdvanced = KSamplerAdvanced()
20
+ opVAEDecode = VAEDecode()
21
+
22
+ with torch.no_grad():
23
+ positive_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='a handsome man in forest')[0]
24
+ negative_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='bad, ugly')[0]
25
+
26
+ initial_latent_image = opEmptyLatentImage.generate(width=1024, height=1024, batch_size=1)[0]
27
+
28
+ samples = opKSamplerAdvanced.sample(
29
+ add_noise="enable",
30
+ noise_seed=random.randint(1, 2 ** 64),
31
+ steps=25,
32
+ cfg=9,
33
+ sampler_name="euler",
34
+ scheduler="normal",
35
+ start_at_step=0,
36
+ end_at_step=25,
37
+ return_with_leftover_noise="enable",
38
+ model=xl_base,
39
+ positive=positive_conditions,
40
+ negative=negative_conditions,
41
+ latent_image=initial_latent_image,
42
+ )[0]
43
+
44
+ vae_decoded = opVAEDecode.decode(samples=samples, vae=xl_base_vae)[0]
45
+
46
+ for image in vae_decoded:
47
+ i = 255. * image.cpu().numpy()
48
+ img = np.clip(i, 0, 255).astype(np.uint8)
49
+ import cv2
50
+ cv2.imwrite('a.png', img[:, :, ::-1])