Spaces:
Running
Running
File size: 1,454 Bytes
07f408f 681fa96 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
from ..utils import common_annotator_call, define_preprocessor_inputs, INPUT, run_script
import comfy.model_management as model_management
import sys
def install_deps():
try:
import sklearn
except:
run_script([sys.executable, '-s', '-m', 'pip', 'install', 'scikit-learn'])
class DiffusionEdge_Preprocessor:
@classmethod
def INPUT_TYPES(s):
return define_preprocessor_inputs(
environment=INPUT.COMBO(["indoor", "urban", "natrual"]),
patch_batch_size=INPUT.INT(default=4, min=1, max=16),
resolution=INPUT.RESOLUTION()
)
RETURN_TYPES = ("IMAGE",)
FUNCTION = "execute"
CATEGORY = "ControlNet Preprocessors/Line Extractors"
def execute(self, image, environment="indoor", patch_batch_size=4, resolution=512, **kwargs):
install_deps()
from custom_controlnet_aux.diffusion_edge import DiffusionEdgeDetector
model = DiffusionEdgeDetector \
.from_pretrained(filename = f"diffusion_edge_{environment}.pt") \
.to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution, patch_batch_size=patch_batch_size)
del model
return (out, )
NODE_CLASS_MAPPINGS = {
"DiffusionEdge_Preprocessor": DiffusionEdge_Preprocessor,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"DiffusionEdge_Preprocessor": "Diffusion Edge (batch size ↑ => speed ↑, VRAM ↑)",
} |