ItchyFingaz
commited on
Commit
•
37d34c5
1
Parent(s):
6679dad
Upload 56 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- LatentUpscaleMultiply_Omar92.py +72 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/.DS_Store +0 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/debugNodes.cpython-310.pyc +0 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/fields.cpython-310.pyc +0 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/functionsNodes.cpython-310.pyc +0 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/mathNodes.cpython-310.pyc +0 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/moddedNodes.cpython-310.pyc +0 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/tree.cpython-310.pyc +0 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/trigonNodes.cpython-310.pyc +0 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/typeNodes.cpython-310.pyc +0 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/types.cpython-310.pyc +0 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/debugNodes.py +51 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/fields.py +7 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/functionsNodes.py +166 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/mathNodes.py +137 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/moddedNodes.py +194 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/tree.py +13 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/trigonNodes.py +87 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/typeNodes.py +130 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/types.py +7 -0
- comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes_02.py +64 -0
- wasNodeSuitesComfyui_externalSeedKsampler/Samplers_WAS.py +64 -0
- wasNodeSuitesComfyui_externalSeedKsampler/WAS_License.txt +7 -0
- wasNodeSuitesComfyui_filtersSuiteV1/Canny_Filter_WAS.py +147 -0
- wasNodeSuitesComfyui_filtersSuiteV1/Image_Filters_WAS.py +78 -0
- wasNodeSuitesComfyui_filtersSuiteV1/WAS_License.txt +7 -0
- wasNodeSuitesComfyui_filtersSuiteV2/Canny_Filter_WAS.py +147 -0
- wasNodeSuitesComfyui_filtersSuiteV2/Image_Blend_WAS.py +51 -0
- wasNodeSuitesComfyui_filtersSuiteV2/Image_Combine_WAS.py +103 -0
- wasNodeSuitesComfyui_filtersSuiteV2/Image_Filters_WAS.py +93 -0
- wasNodeSuitesComfyui_filtersSuiteV2/WAS_License.txt +7 -0
- wasNodeSuitesComfyui_filtersSuiteV3/Canny_Filter_WAS.py +147 -0
- wasNodeSuitesComfyui_filtersSuiteV3/Image_Blend_WAS.py +51 -0
- wasNodeSuitesComfyui_filtersSuiteV3/Image_Combine_WAS.py +103 -0
- wasNodeSuitesComfyui_filtersSuiteV3/Image_Edge_Detection_WAS.py +48 -0
- wasNodeSuitesComfyui_filtersSuiteV3/Image_Filters_WAS.py +93 -0
- wasNodeSuitesComfyui_filtersSuiteV3/Image_Style_Filter_WAS.py +137 -0
- wasNodeSuitesComfyui_filtersSuiteV3/WAS_License.txt +7 -0
- wasNodeSuitesComfyui_filtersSuiteV42/Image_Blank_WAS.py +45 -0
- wasNodeSuitesComfyui_filtersSuiteV42/Image_Blend_WAS.py +52 -0
- wasNodeSuitesComfyui_filtersSuiteV42/Image_Canny_Filter_WAS.py +147 -0
- wasNodeSuitesComfyui_filtersSuiteV42/Image_Combine_WAS.py +103 -0
- wasNodeSuitesComfyui_filtersSuiteV42/Image_Edge_Detection_WAS.py +48 -0
- wasNodeSuitesComfyui_filtersSuiteV42/Image_Film_Grain_WAS.py +89 -0
- wasNodeSuitesComfyui_filtersSuiteV42/Image_Filters_WAS.py +103 -0
- wasNodeSuitesComfyui_filtersSuiteV42/Image_Flip_WAS.py +50 -0
- wasNodeSuitesComfyui_filtersSuiteV42/Image_Nova_Filter_WAS.py +73 -0
- wasNodeSuitesComfyui_filtersSuiteV42/Image_Rotate_WAS.py +69 -0
- wasNodeSuitesComfyui_filtersSuiteV42/Image_Style_Filter_WAS.py +137 -0
- wasNodeSuitesComfyui_filtersSuiteV42/WAS_License.txt +7 -0
LatentUpscaleMultiply_Omar92.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Developed by Omar - https://github.com/omar92
|
2 |
+
# https://civitai.com/user/omar92
|
3 |
+
# discord: Omar92#3374
|
4 |
+
|
5 |
+
# This node provides an alterntive scaling node buy multiplying previous width and height by a factor
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
|
9 |
+
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
|
10 |
+
import comfy.samplers
|
11 |
+
import comfy.sd
|
12 |
+
import comfy.utils
|
13 |
+
import model_management
|
14 |
+
|
15 |
+
|
16 |
+
def before_node_execution():
|
17 |
+
model_management.throw_exception_if_processing_interrupted()
|
18 |
+
|
19 |
+
|
20 |
+
def interrupt_processing(value=True):
|
21 |
+
model_management.interrupt_current_processing(value)
|
22 |
+
|
23 |
+
|
24 |
+
class LatentUpscaleMultiply:
|
25 |
+
upscale_methods = ["nearest-exact", "bilinear", "area"]
|
26 |
+
crop_methods = ["disabled", "center"]
|
27 |
+
|
28 |
+
@classmethod
|
29 |
+
def INPUT_TYPES(s):
|
30 |
+
return {
|
31 |
+
"required": {
|
32 |
+
"samples": ("LATENT",),
|
33 |
+
"upscale_method": (s.upscale_methods,),
|
34 |
+
"WidthMul": (
|
35 |
+
"FLOAT",
|
36 |
+
{"default": 1.25, "min": 0.0, "max": 10.0, "step": 0.1},
|
37 |
+
),
|
38 |
+
"HeightMul": (
|
39 |
+
"FLOAT",
|
40 |
+
{"default": 1.25, "min": 0.0, "max": 10.0, "step": 0.1},
|
41 |
+
),
|
42 |
+
"crop": (s.crop_methods,),
|
43 |
+
}
|
44 |
+
}
|
45 |
+
|
46 |
+
RETURN_TYPES = ("LATENT",)
|
47 |
+
FUNCTION = "upscale"
|
48 |
+
|
49 |
+
CATEGORY = "O/latent"
|
50 |
+
|
51 |
+
def upscale(self, samples, upscale_method, WidthMul, HeightMul, crop):
|
52 |
+
s = samples.copy()
|
53 |
+
x = samples["samples"].shape[3]
|
54 |
+
y = samples["samples"].shape[2]
|
55 |
+
|
56 |
+
new_x = int(x * WidthMul)
|
57 |
+
new_y = int(y * HeightMul)
|
58 |
+
print("upscale from ("+ str(x * 8)+ ","+ str(y * 8)+ ") to ("+ str(new_x * 8)+ ","+ str(new_y * 8)+ ")")
|
59 |
+
|
60 |
+
def enforce_mul_of_64(d):
|
61 |
+
leftover = d % 8
|
62 |
+
if leftover != 0:
|
63 |
+
d += 8 - leftover
|
64 |
+
return d
|
65 |
+
|
66 |
+
s["samples"] = comfy.utils.common_upscale(
|
67 |
+
samples["samples"], enforce_mul_of_64(new_x), enforce_mul_of_64(new_y), upscale_method, crop
|
68 |
+
)
|
69 |
+
return (s,)
|
70 |
+
|
71 |
+
|
72 |
+
NODE_CLASS_MAPPINGS = {"LatentUpscaleMultiply": LatentUpscaleMultiply}
|
comfyuiDerfuuMathAnd_derfuuNodes02/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/debugNodes.cpython-310.pyc
ADDED
Binary file (1.66 kB). View file
|
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/fields.cpython-310.pyc
ADDED
Binary file (382 Bytes). View file
|
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/functionsNodes.cpython-310.pyc
ADDED
Binary file (4.87 kB). View file
|
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/mathNodes.cpython-310.pyc
ADDED
Binary file (3.54 kB). View file
|
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/moddedNodes.cpython-310.pyc
ADDED
Binary file (5.27 kB). View file
|
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/tree.cpython-310.pyc
ADDED
Binary file (474 Bytes). View file
|
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/trigonNodes.cpython-310.pyc
ADDED
Binary file (2.09 kB). View file
|
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/typeNodes.cpython-310.pyc
ADDED
Binary file (3.25 kB). View file
|
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/__pycache__/types.cpython-310.pyc
ADDED
Binary file (263 Bytes). View file
|
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/debugNodes.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import custom_nodes.Derfuu_Nodes.types as type
|
2 |
+
import custom_nodes.Derfuu_Nodes.fields as field
|
3 |
+
from custom_nodes.Derfuu_Nodes.tree import TREE_DEBUG
|
4 |
+
|
5 |
+
class baseDebugNode:
|
6 |
+
def __init__(self):
|
7 |
+
pass
|
8 |
+
|
9 |
+
@classmethod
|
10 |
+
def INPUT_TYPES(cls):
|
11 |
+
return {
|
12 |
+
"required": {
|
13 |
+
"FLOAT": (type.FLOAT,),
|
14 |
+
"INTEGER": (type.INT,),
|
15 |
+
"TUPLE": (type.TUPLE,),
|
16 |
+
}
|
17 |
+
}
|
18 |
+
|
19 |
+
RETURN_TYPES = ()
|
20 |
+
CATEGORY = TREE_DEBUG
|
21 |
+
FUNCTION = "print_values"
|
22 |
+
OUTPUT_NODE = True
|
23 |
+
|
24 |
+
def print_values(self, FLOAT=0, INTEGER=0, TUPLE=(0, 0)):
|
25 |
+
print(FLOAT, INTEGER, TUPLE, sep="\n")
|
26 |
+
return (None,)
|
27 |
+
|
28 |
+
|
29 |
+
class extendedDebugNode:
|
30 |
+
def __init__(self):
|
31 |
+
pass
|
32 |
+
|
33 |
+
@classmethod
|
34 |
+
def INPUT_TYPES(cls):
|
35 |
+
return {
|
36 |
+
"required": {
|
37 |
+
"FLOAT": (type.FLOAT,),
|
38 |
+
"INTEGER": (type.INT,),
|
39 |
+
"TUPLE": (type.TUPLE,),
|
40 |
+
# "ABS": (type.ABS,),
|
41 |
+
}
|
42 |
+
}
|
43 |
+
|
44 |
+
RETURN_TYPES = ()
|
45 |
+
CATEGORY = TREE_DEBUG
|
46 |
+
FUNCTION = "print_values"
|
47 |
+
OUTPUT_NODE = True
|
48 |
+
|
49 |
+
def print_values(self, FLOAT=0, INTEGER=0, TUPLE=(0, 0)):
|
50 |
+
print(FLOAT, INTEGER, TUPLE, sep="\n")
|
51 |
+
return (None,)
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/fields.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
FLOAT = ("FLOAT", {"default": 1.00, "min": sys.float_info.min, "max": sys.float_info.max, "step": 0.01})
|
4 |
+
# BOOL = ("BOOL", {"default": False})
|
5 |
+
INT = ("INT", {"default": 1, "min": -sys.maxsize, "max": sys.maxsize, "step": 1})
|
6 |
+
STRING = ("STRING", {"default": ""})
|
7 |
+
STRING_ML = ("STRING", {"multiline": True})
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/functionsNodes.py
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import comfy.utils
|
2 |
+
import custom_nodes.Derfuu_Nodes.types as type
|
3 |
+
import custom_nodes.Derfuu_Nodes.fields as field
|
4 |
+
|
5 |
+
import math
|
6 |
+
|
7 |
+
from custom_nodes.Derfuu_Nodes.tree import TREE_FUNCTIONS
|
8 |
+
|
9 |
+
|
10 |
+
class Int2Float:
|
11 |
+
def __init__(self):
|
12 |
+
pass
|
13 |
+
|
14 |
+
@classmethod
|
15 |
+
def INPUT_TYPES(cls):
|
16 |
+
return {
|
17 |
+
"required": {
|
18 |
+
"INT": (type.INT,),
|
19 |
+
}
|
20 |
+
}
|
21 |
+
|
22 |
+
RETURN_TYPES = (type.FLOAT,)
|
23 |
+
FUNCTION = "get_value"
|
24 |
+
CATEGORY = TREE_FUNCTIONS
|
25 |
+
|
26 |
+
def get_value(self, INT):
|
27 |
+
return (float(INT),)
|
28 |
+
|
29 |
+
|
30 |
+
class CeilNode:
|
31 |
+
def __init__(self) -> None:
|
32 |
+
pass
|
33 |
+
|
34 |
+
@classmethod
|
35 |
+
def INPUT_TYPES(cls):
|
36 |
+
return {
|
37 |
+
"required": {
|
38 |
+
"FLOAT": (type.FLOAT,),
|
39 |
+
}
|
40 |
+
}
|
41 |
+
|
42 |
+
RETURN_TYPES = (type.INT,)
|
43 |
+
FUNCTION = "get_value"
|
44 |
+
CATEGORY = TREE_FUNCTIONS
|
45 |
+
|
46 |
+
def get_value(self, FLOAT):
|
47 |
+
total = int(math.ceil(FLOAT))
|
48 |
+
return (total,)
|
49 |
+
|
50 |
+
|
51 |
+
class FloorNode:
|
52 |
+
def __init__(self) -> None:
|
53 |
+
pass
|
54 |
+
|
55 |
+
@classmethod
|
56 |
+
def INPUT_TYPES(cls):
|
57 |
+
return {
|
58 |
+
"required": {
|
59 |
+
"FLOAT": (type.FLOAT,),
|
60 |
+
}
|
61 |
+
}
|
62 |
+
|
63 |
+
RETURN_TYPES = (type.INT,)
|
64 |
+
FUNCTION = "get_value"
|
65 |
+
CATEGORY = TREE_FUNCTIONS
|
66 |
+
|
67 |
+
def get_value(self, FLOAT):
|
68 |
+
total = int(math.floor(FLOAT))
|
69 |
+
return (total,)
|
70 |
+
|
71 |
+
|
72 |
+
class Float2Tuple:
|
73 |
+
def __init__(self) -> None:
|
74 |
+
pass
|
75 |
+
|
76 |
+
@classmethod
|
77 |
+
def INPUT_TYPES(cls):
|
78 |
+
return {
|
79 |
+
"required": {
|
80 |
+
"FLOAT_A": (type.FLOAT,),
|
81 |
+
"FLOAT_B": (type.FLOAT,),
|
82 |
+
"Ceil2Int": ([False, True],),
|
83 |
+
}
|
84 |
+
}
|
85 |
+
|
86 |
+
RETURN_TYPES = (type.TUPLE,)
|
87 |
+
CATEGORY = TREE_FUNCTIONS
|
88 |
+
|
89 |
+
FUNCTION = 'get_tuple'
|
90 |
+
|
91 |
+
def get_tuple(self, FLOAT_A=0, FLOAT_B=0, Ceil2Int="false"):
|
92 |
+
if Ceil2Int == "true":
|
93 |
+
FLOAT_A = math.ceil(FLOAT_A)
|
94 |
+
FLOAT_B = math.ceil(FLOAT_B)
|
95 |
+
return ((FLOAT_A, FLOAT_B),)
|
96 |
+
|
97 |
+
|
98 |
+
class Tuple2Float:
|
99 |
+
def __init__(self) -> None:
|
100 |
+
pass
|
101 |
+
|
102 |
+
@classmethod
|
103 |
+
def INPUT_TYPES(cls):
|
104 |
+
return {
|
105 |
+
"required": {
|
106 |
+
"TUPLE": (type.TUPLE,),
|
107 |
+
}
|
108 |
+
}
|
109 |
+
|
110 |
+
RETURN_TYPES = (type.FLOAT, type.FLOAT,)
|
111 |
+
CATEGORY = TREE_FUNCTIONS
|
112 |
+
|
113 |
+
FUNCTION = 'get_tuple'
|
114 |
+
|
115 |
+
def get_tuple(self, TUPLE):
|
116 |
+
return (TUPLE[0], TUPLE[1],)
|
117 |
+
|
118 |
+
|
119 |
+
class GetLatentSize:
|
120 |
+
def __init__(self) -> None:
|
121 |
+
pass
|
122 |
+
|
123 |
+
@classmethod
|
124 |
+
def INPUT_TYPES(cls):
|
125 |
+
return {
|
126 |
+
"required": {
|
127 |
+
"LATENT": (type.LATENT,),
|
128 |
+
"ORIGINAL_VALUES": ([False, True],),
|
129 |
+
}
|
130 |
+
}
|
131 |
+
|
132 |
+
RETURN_TYPES = (type.TUPLE,)
|
133 |
+
CATEGORY = TREE_FUNCTIONS
|
134 |
+
|
135 |
+
FUNCTION = 'get_size'
|
136 |
+
|
137 |
+
def get_size(self, LATENT, ORIGINAL_VALUES=False):
|
138 |
+
lc = LATENT.copy()
|
139 |
+
size = lc["samples"].shape[3], lc["samples"].shape[2]
|
140 |
+
if ORIGINAL_VALUES == False:
|
141 |
+
size = size[0] * 8, size[1] * 8
|
142 |
+
return (size,)
|
143 |
+
|
144 |
+
|
145 |
+
class GetImageSize:
|
146 |
+
def __init__(self) -> None:
|
147 |
+
pass
|
148 |
+
|
149 |
+
@classmethod
|
150 |
+
def INPUT_TYPES(cls):
|
151 |
+
return {
|
152 |
+
"required": {
|
153 |
+
"IMAGE": (type.IMAGE,),
|
154 |
+
}
|
155 |
+
}
|
156 |
+
|
157 |
+
RETURN_TYPES = (type.TUPLE,)
|
158 |
+
CATEGORY = TREE_FUNCTIONS
|
159 |
+
|
160 |
+
FUNCTION = 'get_size'
|
161 |
+
|
162 |
+
def get_size(self, IMAGE):
|
163 |
+
samples = IMAGE.movedim(-1, 1)
|
164 |
+
size = samples.shape[3], samples.shape[2]
|
165 |
+
# size = size.movedim(1, -1)
|
166 |
+
return (size,)
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/mathNodes.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import numpy
|
4 |
+
|
5 |
+
import custom_nodes.Derfuu_Nodes.types as type
|
6 |
+
from custom_nodes.Derfuu_Nodes.tree import TREE_MATH
|
7 |
+
|
8 |
+
|
9 |
+
class MultiplyNode:
|
10 |
+
def __init__(self) -> None:
|
11 |
+
pass
|
12 |
+
|
13 |
+
@classmethod
|
14 |
+
def INPUT_TYPES(cls):
|
15 |
+
return {
|
16 |
+
"required": {
|
17 |
+
"FLOAT_A": (type.FLOAT,),
|
18 |
+
"FLOAT_B": (type.FLOAT,),
|
19 |
+
},
|
20 |
+
}
|
21 |
+
|
22 |
+
RETURN_TYPES = (type.FLOAT,)
|
23 |
+
FUNCTION = "multiply"
|
24 |
+
CATEGORY = TREE_MATH
|
25 |
+
|
26 |
+
def multiply(self, FLOAT_A, FLOAT_B):
|
27 |
+
total = float(FLOAT_A * FLOAT_B)
|
28 |
+
return (total,)
|
29 |
+
|
30 |
+
|
31 |
+
class DivideNode:
|
32 |
+
def __init__(self) -> None:
|
33 |
+
pass
|
34 |
+
|
35 |
+
@classmethod
|
36 |
+
def INPUT_TYPES(cls):
|
37 |
+
return {
|
38 |
+
"required": {
|
39 |
+
"FLOAT_A": (type.FLOAT,),
|
40 |
+
"FLOAT_B": (type.FLOAT,),
|
41 |
+
},
|
42 |
+
}
|
43 |
+
|
44 |
+
RETURN_TYPES = (type.FLOAT,)
|
45 |
+
FUNCTION = "divide"
|
46 |
+
CATEGORY = TREE_MATH
|
47 |
+
|
48 |
+
def divide(self, FLOAT_A, FLOAT_B):
|
49 |
+
total = float(FLOAT_A / FLOAT_B)
|
50 |
+
return (total,)
|
51 |
+
|
52 |
+
|
53 |
+
class SumNode:
|
54 |
+
def __init__(self) -> None:
|
55 |
+
pass
|
56 |
+
|
57 |
+
@classmethod
|
58 |
+
def INPUT_TYPES(cls):
|
59 |
+
return {
|
60 |
+
"required": {
|
61 |
+
"FLOAT_A": (type.FLOAT,),
|
62 |
+
"FLOAT_B": (type.FLOAT,),
|
63 |
+
},
|
64 |
+
}
|
65 |
+
|
66 |
+
RETURN_TYPES = (type.FLOAT,)
|
67 |
+
FUNCTION = "sum"
|
68 |
+
CATEGORY = TREE_MATH
|
69 |
+
|
70 |
+
def sum(self, FLOAT_A, FLOAT_B):
|
71 |
+
total = float(FLOAT_A + FLOAT_B)
|
72 |
+
return (total,)
|
73 |
+
|
74 |
+
|
75 |
+
class SubtractNode:
|
76 |
+
def __init__(self) -> None:
|
77 |
+
pass
|
78 |
+
|
79 |
+
@classmethod
|
80 |
+
def INPUT_TYPES(cls):
|
81 |
+
return {
|
82 |
+
"required": {
|
83 |
+
"FLOAT_A": (type.FLOAT,),
|
84 |
+
"FLOAT_B": (type.FLOAT,),
|
85 |
+
},
|
86 |
+
}
|
87 |
+
|
88 |
+
RETURN_TYPES = (type.FLOAT,)
|
89 |
+
FUNCTION = "sub"
|
90 |
+
CATEGORY = TREE_MATH
|
91 |
+
|
92 |
+
def sub(self, FLOAT_A, FLOAT_B):
|
93 |
+
total = float(FLOAT_A + FLOAT_B)
|
94 |
+
return (total,)
|
95 |
+
|
96 |
+
|
97 |
+
class PowNode:
|
98 |
+
def __init__(self) -> None:
|
99 |
+
pass
|
100 |
+
|
101 |
+
@classmethod
|
102 |
+
def INPUT_TYPES(cls):
|
103 |
+
return {
|
104 |
+
"required": {
|
105 |
+
"FLOAT_A": (type.FLOAT,),
|
106 |
+
"FLOAT_B": (type.FLOAT,),
|
107 |
+
},
|
108 |
+
}
|
109 |
+
|
110 |
+
RETURN_TYPES = (type.FLOAT,)
|
111 |
+
FUNCTION = "sub"
|
112 |
+
CATEGORY = TREE_MATH
|
113 |
+
|
114 |
+
def sub(self, FLOAT_A, FLOAT_B=2):
|
115 |
+
total = math.pow(FLOAT_A, FLOAT_B)
|
116 |
+
return (total,)
|
117 |
+
|
118 |
+
|
119 |
+
class SquareRootNode:
|
120 |
+
def __init__(self) -> None:
|
121 |
+
pass
|
122 |
+
|
123 |
+
@classmethod
|
124 |
+
def INPUT_TYPES(cls):
|
125 |
+
return {
|
126 |
+
"required": {
|
127 |
+
"FLOAT": (type.FLOAT,),
|
128 |
+
},
|
129 |
+
}
|
130 |
+
|
131 |
+
RETURN_TYPES = (type.FLOAT,)
|
132 |
+
FUNCTION = "sub"
|
133 |
+
CATEGORY = TREE_MATH
|
134 |
+
|
135 |
+
def sub(self, FLOAT):
|
136 |
+
total = math.sqrt(FLOAT)
|
137 |
+
return (total,)
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/moddedNodes.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import custom_nodes.Derfuu_Nodes.types as type
|
2 |
+
import custom_nodes.Derfuu_Nodes.fields as field
|
3 |
+
from custom_nodes.Derfuu_Nodes.tree import TREE_LATENTS, TREE_IMAGES
|
4 |
+
|
5 |
+
import math
|
6 |
+
import torch
|
7 |
+
|
8 |
+
import comfy.utils
|
9 |
+
|
10 |
+
|
11 |
+
class EmptyLatentImage:
|
12 |
+
def __init__(self, device="cpu"):
|
13 |
+
self.device = device
|
14 |
+
|
15 |
+
@classmethod
|
16 |
+
def INPUT_TYPES(cls):
|
17 |
+
return {
|
18 |
+
"required": {
|
19 |
+
"TUPLE": (type.TUPLE,),
|
20 |
+
"batch_size": field.INT,
|
21 |
+
}
|
22 |
+
}
|
23 |
+
|
24 |
+
RETURN_TYPES = (type.LATENT,)
|
25 |
+
FUNCTION = "generate"
|
26 |
+
CATEGORY = TREE_LATENTS
|
27 |
+
|
28 |
+
def generate(self, TUPLE, batch_size=1):
|
29 |
+
width = int(TUPLE[0])
|
30 |
+
height = int(TUPLE[1])
|
31 |
+
|
32 |
+
latent = torch.zeros([batch_size, 4, height // 8, width // 8])
|
33 |
+
return ({"samples": latent},)
|
34 |
+
|
35 |
+
|
36 |
+
class ImageScale_Ratio:
|
37 |
+
upscale_methods = ["nearest-exact", "bilinear", "area"]
|
38 |
+
crop_methods = ["disabled", "center"]
|
39 |
+
|
40 |
+
@classmethod
|
41 |
+
def INPUT_TYPES(cls):
|
42 |
+
return {
|
43 |
+
"required": {
|
44 |
+
"IMAGE": (type.IMAGE,),
|
45 |
+
"TUPLE": (type.TUPLE,),
|
46 |
+
"modifier": field.FLOAT,
|
47 |
+
"upscale_method": (cls.upscale_methods,),
|
48 |
+
"crop": (cls.crop_methods,)}}
|
49 |
+
|
50 |
+
RETURN_TYPES = (type.IMAGE, type.TUPLE,)
|
51 |
+
FUNCTION = "upscale"
|
52 |
+
|
53 |
+
CATEGORY = TREE_IMAGES
|
54 |
+
|
55 |
+
def upscale(self, IMAGE, upscale_method, TUPLE, modifier, crop):
|
56 |
+
samples = IMAGE.movedim(-1, 1)
|
57 |
+
|
58 |
+
width_B = int(TUPLE[0])
|
59 |
+
height_B = int(TUPLE[1])
|
60 |
+
|
61 |
+
height = math.ceil(height_B * modifier)
|
62 |
+
width = math.ceil(width_B * modifier)
|
63 |
+
cls = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
|
64 |
+
cls = cls.movedim(1, -1)
|
65 |
+
return (cls, (width, height),)
|
66 |
+
|
67 |
+
|
68 |
+
class ImageScale_Side:
|
69 |
+
upscale_methods = ["nearest-exact", "bilinear", "area"]
|
70 |
+
crop_methods = ["disabled", "center"]
|
71 |
+
|
72 |
+
def __init__(self) -> None:
|
73 |
+
pass
|
74 |
+
|
75 |
+
@classmethod
|
76 |
+
def INPUT_TYPES(cls):
|
77 |
+
return {
|
78 |
+
"required": {
|
79 |
+
"IMAGE": (type.IMAGE,),
|
80 |
+
"TUPLE": (type.TUPLE,),
|
81 |
+
"side_length": field.INT,
|
82 |
+
"side": (["Width", "Height"],),
|
83 |
+
"upscale_method": (cls.upscale_methods,),
|
84 |
+
"crop": (cls.crop_methods,)}}
|
85 |
+
|
86 |
+
RETURN_TYPES = (type.IMAGE, type.TUPLE,)
|
87 |
+
FUNCTION = "upscale"
|
88 |
+
|
89 |
+
CATEGORY = TREE_IMAGES
|
90 |
+
|
91 |
+
def upscale(self, IMAGE, upscale_method, TUPLE, side_length, side, crop):
|
92 |
+
samples = IMAGE.movedim(-1, 1)
|
93 |
+
|
94 |
+
width_B = int(TUPLE[0])
|
95 |
+
height_B = int(TUPLE[1])
|
96 |
+
|
97 |
+
width = width_B
|
98 |
+
height = height_B
|
99 |
+
|
100 |
+
if side == "Width":
|
101 |
+
heigh_ratio = height_B / width_B
|
102 |
+
width = side_length
|
103 |
+
height = heigh_ratio * width
|
104 |
+
elif side == "Height":
|
105 |
+
width_ratio = width_B / height_B
|
106 |
+
height = side_length
|
107 |
+
width = width_ratio * height
|
108 |
+
|
109 |
+
width = math.ceil(width)
|
110 |
+
height = math.ceil(height)
|
111 |
+
|
112 |
+
cls = comfy.utils.common_upscale(samples, width, height, upscale_method, crop)
|
113 |
+
cls = cls.movedim(1, -1)
|
114 |
+
return (cls, (width, height), )
|
115 |
+
|
116 |
+
|
117 |
+
class LatentScale_Ratio:
|
118 |
+
scale_methods = (["nearest-exact", "bilinear", "area"],)
|
119 |
+
crop_methods = (["disabled", "center"],)
|
120 |
+
|
121 |
+
def __init__(self):
|
122 |
+
pass
|
123 |
+
|
124 |
+
@classmethod
|
125 |
+
def INPUT_TYPES(cls):
|
126 |
+
return {
|
127 |
+
"required": {
|
128 |
+
"LATENT": (type.LATENT,),
|
129 |
+
"TUPLE": (type.TUPLE,),
|
130 |
+
"modifier": field.FLOAT,
|
131 |
+
"scale_method": cls.scale_methods,
|
132 |
+
"crop": cls.crop_methods,
|
133 |
+
}
|
134 |
+
}
|
135 |
+
|
136 |
+
RETURN_TYPES = (type.LATENT, type.TUPLE,)
|
137 |
+
FUNCTION = "scale"
|
138 |
+
CATEGORY = TREE_LATENTS
|
139 |
+
|
140 |
+
def scale(self, LATENT, scale_method, crop, modifier, TUPLE):
|
141 |
+
|
142 |
+
width = int(TUPLE[0] * modifier)
|
143 |
+
height = int(TUPLE[1] * modifier)
|
144 |
+
|
145 |
+
cls = LATENT.copy()
|
146 |
+
cls["samples"] = comfy.utils.common_upscale(LATENT["samples"], width // 8, height // 8, scale_method, crop)
|
147 |
+
return (cls, (width, height),)
|
148 |
+
|
149 |
+
|
150 |
+
class LatentScale_Side:
|
151 |
+
upscale_methods = ["nearest-exact", "bilinear", "area"]
|
152 |
+
crop_methods = ["disabled", "center"]
|
153 |
+
|
154 |
+
def __init__(self) -> None:
|
155 |
+
pass
|
156 |
+
|
157 |
+
@classmethod
|
158 |
+
def INPUT_TYPES(cls):
|
159 |
+
return {
|
160 |
+
"required": {
|
161 |
+
"LATENT": (type.LATENT,),
|
162 |
+
"TUPLE": (type.TUPLE,),
|
163 |
+
"side_length": field.INT,
|
164 |
+
"side": (["Width", "Height"],),
|
165 |
+
"scale_method": (cls.upscale_methods,),
|
166 |
+
"crop": (cls.crop_methods,)}}
|
167 |
+
|
168 |
+
RETURN_TYPES = (type.LATENT, type.TUPLE,)
|
169 |
+
FUNCTION = "upscale"
|
170 |
+
|
171 |
+
CATEGORY = TREE_LATENTS
|
172 |
+
|
173 |
+
def upscale(self, LATENT, scale_method, TUPLE, side_length, side, crop):
|
174 |
+
width_B = int(TUPLE[0])
|
175 |
+
height_B = int(TUPLE[1])
|
176 |
+
|
177 |
+
width = width_B
|
178 |
+
height = height_B
|
179 |
+
|
180 |
+
if side == "Width":
|
181 |
+
heigh_ratio = height_B / width_B
|
182 |
+
width = side_length
|
183 |
+
height = heigh_ratio * width
|
184 |
+
elif side == "Height":
|
185 |
+
width_ratio = width_B / height_B
|
186 |
+
height = side_length
|
187 |
+
width = width_ratio * height
|
188 |
+
|
189 |
+
width = math.ceil(width)
|
190 |
+
height = math.ceil(height)
|
191 |
+
|
192 |
+
cls = LATENT.copy()
|
193 |
+
cls["samples"] = comfy.utils.common_upscale(LATENT["samples"], width // 8, height // 8, scale_method, crop)
|
194 |
+
return (cls, (width, height),)
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/tree.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
TREE_MAIN = "Derfuu_Nodes"
|
2 |
+
|
3 |
+
TREE_VARIABLE = TREE_MAIN + "/Variables"
|
4 |
+
TREE_TUPLES = TREE_MAIN + "/Tuples"
|
5 |
+
TREE_MATH = TREE_MAIN + "/Math"
|
6 |
+
TREE_FUNCTIONS = TREE_MAIN + "/Functions"
|
7 |
+
TREE_TRIGONOMETRY = TREE_MATH + "/Trigonometry"
|
8 |
+
TREE_MODDED = TREE_MAIN + "/Modded nodes"
|
9 |
+
|
10 |
+
TREE_IMAGES = TREE_MODDED + "/Image"
|
11 |
+
TREE_LATENTS = TREE_MODDED + "/Latent"
|
12 |
+
|
13 |
+
TREE_DEBUG = TREE_MAIN + "/Debug"
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/trigonNodes.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import custom_nodes.Derfuu_Nodes.types as type
|
2 |
+
from custom_nodes.Derfuu_Nodes.tree import TREE_TRIGONOMETRY
|
3 |
+
|
4 |
+
import math
|
5 |
+
|
6 |
+
class SinNode:
|
7 |
+
def __init__(self):
|
8 |
+
pass
|
9 |
+
|
10 |
+
@classmethod
|
11 |
+
def INPUT_TYPES(self):
|
12 |
+
return {
|
13 |
+
"required": {
|
14 |
+
"FLOAT": (type.FLOAT,),
|
15 |
+
"INPUT_TYPE": (["RAD", "DEG"],),
|
16 |
+
"arcSin": ([False, True],)
|
17 |
+
}
|
18 |
+
}
|
19 |
+
|
20 |
+
RETURN_TYPES = (type.FLOAT,)
|
21 |
+
FUNCTION = "get_value"
|
22 |
+
CATEGORY = TREE_TRIGONOMETRY
|
23 |
+
|
24 |
+
def get_value(self, FLOAT, INPUT_TYPE="RAD", arcSin=False):
|
25 |
+
if INPUT_TYPE == "DEG":
|
26 |
+
FLOAT = math.radians(FLOAT)
|
27 |
+
if arcSin == True:
|
28 |
+
FLOAT = math.asin(FLOAT)
|
29 |
+
else:
|
30 |
+
FLOAT = math.sin(FLOAT)
|
31 |
+
return (FLOAT,)
|
32 |
+
|
33 |
+
|
34 |
+
class CosNode:
|
35 |
+
def __init__(self):
|
36 |
+
pass
|
37 |
+
|
38 |
+
@classmethod
|
39 |
+
def INPUT_TYPES(self):
|
40 |
+
return {
|
41 |
+
"required": {
|
42 |
+
"FLOAT": (type.FLOAT,),
|
43 |
+
"INPUT_TYPE": (["RAD", "DEG"],),
|
44 |
+
"arcCos": ([False, True],)
|
45 |
+
}
|
46 |
+
}
|
47 |
+
|
48 |
+
RETURN_TYPES = (type.FLOAT,)
|
49 |
+
FUNCTION = "get_value"
|
50 |
+
CATEGORY = TREE_TRIGONOMETRY
|
51 |
+
|
52 |
+
def get_value(self, FLOAT, INPUT_TYPE="RAD", arcCos=False):
|
53 |
+
if INPUT_TYPE == "DEG":
|
54 |
+
FLOAT = math.radians(FLOAT)
|
55 |
+
if arcCos == True:
|
56 |
+
FLOAT = math.acos(FLOAT)
|
57 |
+
else:
|
58 |
+
FLOAT = math.cos(FLOAT)
|
59 |
+
return (FLOAT,)
|
60 |
+
|
61 |
+
|
62 |
+
class tgNode:
|
63 |
+
def __init__(self):
|
64 |
+
pass
|
65 |
+
|
66 |
+
@classmethod
|
67 |
+
def INPUT_TYPES(self):
|
68 |
+
return {
|
69 |
+
"required": {
|
70 |
+
"FLOAT": (type.FLOAT,),
|
71 |
+
"INPUT_TYPE": (["RAD", "DEG"],),
|
72 |
+
"arcTan": ([False, True],)
|
73 |
+
}
|
74 |
+
}
|
75 |
+
|
76 |
+
RETURN_TYPES = (type.FLOAT,)
|
77 |
+
FUNCTION = "get_value"
|
78 |
+
CATEGORY = TREE_TRIGONOMETRY
|
79 |
+
|
80 |
+
def get_value(self, FLOAT, INPUT_TYPE="RAD", arcTan=False):
|
81 |
+
if INPUT_TYPE == "DEG":
|
82 |
+
FLOAT = math.radians(FLOAT)
|
83 |
+
if arcTan == True:
|
84 |
+
FLOAT = math.atan(FLOAT)
|
85 |
+
else:
|
86 |
+
FLOAT = math.tan(FLOAT)
|
87 |
+
return (FLOAT,)
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/typeNodes.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import custom_nodes.Derfuu_Nodes.types as type
|
3 |
+
import custom_nodes.Derfuu_Nodes.fields as field
|
4 |
+
from custom_nodes.Derfuu_Nodes.tree import TREE_VARIABLE
|
5 |
+
|
6 |
+
class FloatNode:
|
7 |
+
def __init__(self) -> None:
|
8 |
+
pass
|
9 |
+
|
10 |
+
@classmethod
|
11 |
+
def INPUT_TYPES(cls):
|
12 |
+
return {
|
13 |
+
"required": {
|
14 |
+
"VALUE": field.FLOAT,
|
15 |
+
},
|
16 |
+
}
|
17 |
+
|
18 |
+
RETURN_TYPES = (type.FLOAT,)
|
19 |
+
CATEGORY = TREE_VARIABLE
|
20 |
+
FUNCTION = "get_value"
|
21 |
+
|
22 |
+
def get_value(self, VALUE):
|
23 |
+
return (VALUE,)
|
24 |
+
|
25 |
+
|
26 |
+
class IntegerNode:
|
27 |
+
def __init__(self) -> None:
|
28 |
+
pass
|
29 |
+
|
30 |
+
@classmethod
|
31 |
+
def INPUT_TYPES(cls):
|
32 |
+
return {
|
33 |
+
"required": {
|
34 |
+
"VALUE": field.INT,
|
35 |
+
},
|
36 |
+
}
|
37 |
+
|
38 |
+
RETURN_TYPES = (type.INT,)
|
39 |
+
CATEGORY = TREE_VARIABLE
|
40 |
+
FUNCTION = "get_value"
|
41 |
+
|
42 |
+
def get_value(self, VALUE):
|
43 |
+
return (VALUE,)
|
44 |
+
|
45 |
+
|
46 |
+
class TupleNode:
|
47 |
+
def __init__(self):
|
48 |
+
pass
|
49 |
+
|
50 |
+
@classmethod
|
51 |
+
def INPUT_TYPES(cls):
|
52 |
+
return {
|
53 |
+
"required": {
|
54 |
+
"FLOAT_A": field.FLOAT,
|
55 |
+
"FLOAT_B": field.FLOAT,
|
56 |
+
"Ceil2Int": ([False, True],),
|
57 |
+
}
|
58 |
+
}
|
59 |
+
|
60 |
+
RETURN_TYPES = (type.TUPLE,)
|
61 |
+
CATEGORY = TREE_VARIABLE
|
62 |
+
|
63 |
+
FUNCTION = 'get_value'
|
64 |
+
|
65 |
+
def get_value(self, FLOAT_A, FLOAT_B, Ceil2Int="false"):
|
66 |
+
if Ceil2Int == "true":
|
67 |
+
FLOAT_A = math.ceil(FLOAT_A)
|
68 |
+
FLOAT_B = math.ceil(FLOAT_B)
|
69 |
+
return ((FLOAT_A, FLOAT_B),)
|
70 |
+
|
71 |
+
|
72 |
+
class StringNode:
|
73 |
+
def __init__(self):
|
74 |
+
pass
|
75 |
+
|
76 |
+
@classmethod
|
77 |
+
def INPUT_TYPES(cls):
|
78 |
+
return {
|
79 |
+
"required": {
|
80 |
+
"VALUE": field.STRING,
|
81 |
+
}
|
82 |
+
}
|
83 |
+
|
84 |
+
RETURN_TYPES = (type.STRING,)
|
85 |
+
FUNCTION = "get_value"
|
86 |
+
CATEGORY = TREE_VARIABLE
|
87 |
+
|
88 |
+
def get_value(self, VALUE):
|
89 |
+
return (VALUE,)
|
90 |
+
|
91 |
+
|
92 |
+
class MultilineStringNode:
|
93 |
+
def __init__(self):
|
94 |
+
pass
|
95 |
+
|
96 |
+
@classmethod
|
97 |
+
def INPUT_TYPES(cls):
|
98 |
+
return {
|
99 |
+
"required": {
|
100 |
+
"VALUE": field.STRING_ML,
|
101 |
+
}
|
102 |
+
}
|
103 |
+
|
104 |
+
RETURN_TYPES = (type.STRING,)
|
105 |
+
FUNCTION = "get_value"
|
106 |
+
CATEGORY = TREE_VARIABLE
|
107 |
+
|
108 |
+
def get_value(self, VALUE):
|
109 |
+
return (VALUE,)
|
110 |
+
|
111 |
+
|
112 |
+
# class Any:
|
113 |
+
# def __init__(self):
|
114 |
+
# pass
|
115 |
+
#
|
116 |
+
# @classmethod
|
117 |
+
# def INPUT_TYPES(cls):
|
118 |
+
# return {
|
119 |
+
# "required": {
|
120 |
+
# "VALUE": (type.STRING,),
|
121 |
+
# }
|
122 |
+
# }
|
123 |
+
#
|
124 |
+
# RETURN_TYPES = (type.ABS,)
|
125 |
+
# CATEGORY = TREE_VARIABLE
|
126 |
+
#
|
127 |
+
# FUNCTION = 'get_value'
|
128 |
+
#
|
129 |
+
# def get_value(self, VALUE):
|
130 |
+
# return (VALUE,)
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes/types.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FLOAT = "FlOAT"
|
2 |
+
INT = "INTEGER"
|
3 |
+
TUPLE = "TUPLE"
|
4 |
+
STRING = "STRING"
|
5 |
+
LATENT = "LATENT"
|
6 |
+
IMAGE = "IMAGE"
|
7 |
+
ABS = "ANY"
|
comfyuiDerfuuMathAnd_derfuuNodes02/Derfuu_Nodes_02.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import custom_nodes.Derfuu_Nodes.typeNodes as TypeNodes
|
2 |
+
import custom_nodes.Derfuu_Nodes.mathNodes as MathNodes
|
3 |
+
import custom_nodes.Derfuu_Nodes.functionsNodes as FunctionNodes
|
4 |
+
import custom_nodes.Derfuu_Nodes.moddedNodes as ModdedNodes
|
5 |
+
import custom_nodes.Derfuu_Nodes.debugNodes as Debug
|
6 |
+
import custom_nodes.Derfuu_Nodes.trigonNodes as TrigYNodes
|
7 |
+
|
8 |
+
|
9 |
+
class emptyNode:
|
10 |
+
def __init__(self):
|
11 |
+
pass
|
12 |
+
|
13 |
+
@classmethod
|
14 |
+
def INPUT_TYPES(self):
|
15 |
+
return {
|
16 |
+
"required": {
|
17 |
+
|
18 |
+
}
|
19 |
+
}
|
20 |
+
|
21 |
+
RETURN_TYPES = 0
|
22 |
+
FUNCTION = "get_value"
|
23 |
+
CATEGORY = "ABS"
|
24 |
+
|
25 |
+
def get_value(self, ):
|
26 |
+
return (None,)
|
27 |
+
|
28 |
+
|
29 |
+
NODE_CLASS_MAPPINGS = {
|
30 |
+
"FloatNode_DF": TypeNodes.FloatNode,
|
31 |
+
"IntegerNode_DF": TypeNodes.IntegerNode,
|
32 |
+
"StringNode_DF": TypeNodes.StringNode,
|
33 |
+
"TupleNode_DF": TypeNodes.TupleNode,
|
34 |
+
"MultilineStringNode_DF": TypeNodes.MultilineStringNode,
|
35 |
+
# "ABS_DF": TypeNodes.Any,
|
36 |
+
|
37 |
+
"Float2Tuple_DF": FunctionNodes.Float2Tuple,
|
38 |
+
"Tuple2Float_DF": FunctionNodes.Tuple2Float,
|
39 |
+
"Int2Float_DF": FunctionNodes.Int2Float,
|
40 |
+
"CeilNode_DF": FunctionNodes.CeilNode,
|
41 |
+
"FloorNode_DF": FunctionNodes.FloorNode,
|
42 |
+
"GetLatentSize_DF": FunctionNodes.GetLatentSize,
|
43 |
+
"GetImageSize_DF": FunctionNodes.GetImageSize,
|
44 |
+
|
45 |
+
"SumNode_DF": MathNodes.SumNode,
|
46 |
+
"SubtractNode_DF": MathNodes.SubtractNode,
|
47 |
+
"MultiplyNode_DF": MathNodes.MultiplyNode,
|
48 |
+
"DivideNode_DF": MathNodes.DivideNode,
|
49 |
+
"PowNode_DF": MathNodes.PowNode,
|
50 |
+
"SquareRootNode_DF": MathNodes.SquareRootNode,
|
51 |
+
|
52 |
+
"sin_DF": TrigYNodes.SinNode,
|
53 |
+
"cos_DF": TrigYNodes.CosNode,
|
54 |
+
"tg_DF": TrigYNodes.tgNode,
|
55 |
+
|
56 |
+
"EmptyLatentImage_DF": ModdedNodes.EmptyLatentImage,
|
57 |
+
"LatentScale_Ratio_DF": ModdedNodes.LatentScale_Ratio,
|
58 |
+
"LatentScale_Side_DF": ModdedNodes.LatentScale_Side,
|
59 |
+
"ImageScale_Ratio_DF": ModdedNodes.ImageScale_Ratio,
|
60 |
+
"ImageScale_Side_DF": ModdedNodes.ImageScale_Side,
|
61 |
+
|
62 |
+
# "DebugPrint_DF": Debug.baseDebugNode,
|
63 |
+
# "ExtDebugPrint_DF": Debug.extendedDebugNode,
|
64 |
+
}
|
wasNodeSuitesComfyui_externalSeedKsampler/Samplers_WAS.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import sys, os
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
|
7 |
+
sys.path.append('../ComfyUI')
|
8 |
+
|
9 |
+
import comfy.samplers
|
10 |
+
import comfy.sd
|
11 |
+
import comfy.utils
|
12 |
+
|
13 |
+
import comfy_extras.clip_vision
|
14 |
+
|
15 |
+
import model_management
|
16 |
+
import importlib
|
17 |
+
|
18 |
+
import nodes
|
19 |
+
|
20 |
+
class WAS_KSampler:
|
21 |
+
@classmethod
|
22 |
+
def INPUT_TYPES(s):
|
23 |
+
return {"required":
|
24 |
+
{"model": ("MODEL",),
|
25 |
+
"seed": ("SEED",),
|
26 |
+
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
|
27 |
+
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
|
28 |
+
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
|
29 |
+
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
|
30 |
+
"positive": ("CONDITIONING", ),
|
31 |
+
"negative": ("CONDITIONING", ),
|
32 |
+
"latent_image": ("LATENT", ),
|
33 |
+
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
34 |
+
}
|
35 |
+
}
|
36 |
+
|
37 |
+
RETURN_TYPES = ("LATENT",)
|
38 |
+
FUNCTION = "sample"
|
39 |
+
|
40 |
+
CATEGORY = "sampling"
|
41 |
+
|
42 |
+
def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
|
43 |
+
return nodes.common_ksampler(model, seed['seed'], steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
|
44 |
+
|
45 |
+
class WAS_Seed:
|
46 |
+
@classmethod
|
47 |
+
def INPUT_TYPES(s):
|
48 |
+
return {"required":
|
49 |
+
{"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff})}
|
50 |
+
}
|
51 |
+
|
52 |
+
|
53 |
+
RETURN_TYPES = ("SEED",)
|
54 |
+
FUNCTION = "seed"
|
55 |
+
|
56 |
+
CATEGORY = "constant"
|
57 |
+
|
58 |
+
def seed(self, seed):
|
59 |
+
return ( {"seed": seed,}, )
|
60 |
+
|
61 |
+
NODE_CLASS_MAPPINGS = {
|
62 |
+
"KSampler (WAS)": WAS_KSampler,
|
63 |
+
"Seed": WAS_Seed
|
64 |
+
}
|
wasNodeSuitesComfyui_externalSeedKsampler/WAS_License.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright 2023 Jordan Thompson (WASasquatch)
|
2 |
+
|
3 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
4 |
+
|
5 |
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
6 |
+
|
7 |
+
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
wasNodeSuitesComfyui_filtersSuiteV1/Canny_Filter_WAS.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch, os
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
class WAS_Canny_Filter:
|
7 |
+
def __init__(self):
|
8 |
+
pass
|
9 |
+
|
10 |
+
@classmethod
|
11 |
+
def INPUT_TYPES(cls):
|
12 |
+
return {
|
13 |
+
"required": {
|
14 |
+
"image": ("IMAGE",),
|
15 |
+
"enable_threshold": (['false', 'true'],),
|
16 |
+
"threshold_low": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
17 |
+
"threshold_high": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
18 |
+
},
|
19 |
+
}
|
20 |
+
|
21 |
+
RETURN_TYPES = ("IMAGE",)
|
22 |
+
FUNCTION = "canny_filter"
|
23 |
+
|
24 |
+
CATEGORY = "WAS"
|
25 |
+
|
26 |
+
def canny_filter(self, image, threshold_low, threshold_high, enable_threshold):
|
27 |
+
|
28 |
+
self.install_opencv()
|
29 |
+
|
30 |
+
if enable_threshold == 'false':
|
31 |
+
threshold_low = None
|
32 |
+
threshold_high = None
|
33 |
+
|
34 |
+
image_canny = self.Canny_detector(255. * image.cpu().numpy().squeeze(), threshold_low, threshold_high)
|
35 |
+
|
36 |
+
return ( torch.from_numpy( image_canny )[None,], )
|
37 |
+
|
38 |
+
# Defining the Canny Detector function
|
39 |
+
# From: https://www.geeksforgeeks.org/implement-canny-edge-detector-in-python-using-opencv/
|
40 |
+
|
41 |
+
# here weak_th and strong_th are thresholds for
|
42 |
+
# double thresholding step
|
43 |
+
def Canny_detector(self, img, weak_th = None, strong_th = None):
|
44 |
+
|
45 |
+
import cv2
|
46 |
+
|
47 |
+
# conversion of image to grayscale
|
48 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
49 |
+
|
50 |
+
# Noise reduction step
|
51 |
+
img = cv2.GaussianBlur(img, (5, 5), 1.4)
|
52 |
+
|
53 |
+
# Calculating the gradients
|
54 |
+
gx = cv2.Sobel(np.float32(img), cv2.CV_64F, 1, 0, 3)
|
55 |
+
gy = cv2.Sobel(np.float32(img), cv2.CV_64F, 0, 1, 3)
|
56 |
+
|
57 |
+
# Conversion of Cartesian coordinates to polar
|
58 |
+
mag, ang = cv2.cartToPolar(gx, gy, angleInDegrees = True)
|
59 |
+
|
60 |
+
# setting the minimum and maximum thresholds
|
61 |
+
# for double thresholding
|
62 |
+
mag_max = np.max(mag)
|
63 |
+
if not weak_th:weak_th = mag_max * 0.1
|
64 |
+
if not strong_th:strong_th = mag_max * 0.5
|
65 |
+
|
66 |
+
# getting the dimensions of the input image
|
67 |
+
height, width = img.shape
|
68 |
+
|
69 |
+
# Looping through every pixel of the grayscale
|
70 |
+
# image
|
71 |
+
for i_x in range(width):
|
72 |
+
for i_y in range(height):
|
73 |
+
|
74 |
+
grad_ang = ang[i_y, i_x]
|
75 |
+
grad_ang = abs(grad_ang-180) if abs(grad_ang)>180 else abs(grad_ang)
|
76 |
+
|
77 |
+
# selecting the neighbours of the target pixel
|
78 |
+
# according to the gradient direction
|
79 |
+
# In the x axis direction
|
80 |
+
if grad_ang<= 22.5:
|
81 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y
|
82 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y
|
83 |
+
|
84 |
+
# top right (diagonal-1) direction
|
85 |
+
elif grad_ang>22.5 and grad_ang<=(22.5 + 45):
|
86 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y-1
|
87 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y + 1
|
88 |
+
|
89 |
+
# In y-axis direction
|
90 |
+
elif grad_ang>(22.5 + 45) and grad_ang<=(22.5 + 90):
|
91 |
+
neighb_1_x, neighb_1_y = i_x, i_y-1
|
92 |
+
neighb_2_x, neighb_2_y = i_x, i_y + 1
|
93 |
+
|
94 |
+
# top left (diagonal-2) direction
|
95 |
+
elif grad_ang>(22.5 + 90) and grad_ang<=(22.5 + 135):
|
96 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y + 1
|
97 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y-1
|
98 |
+
|
99 |
+
# Now it restarts the cycle
|
100 |
+
elif grad_ang>(22.5 + 135) and grad_ang<=(22.5 + 180):
|
101 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y
|
102 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y
|
103 |
+
|
104 |
+
# Non-maximum suppression step
|
105 |
+
if width>neighb_1_x>= 0 and height>neighb_1_y>= 0:
|
106 |
+
if mag[i_y, i_x]<mag[neighb_1_y, neighb_1_x]:
|
107 |
+
mag[i_y, i_x]= 0
|
108 |
+
continue
|
109 |
+
|
110 |
+
if width>neighb_2_x>= 0 and height>neighb_2_y>= 0:
|
111 |
+
if mag[i_y, i_x]<mag[neighb_2_y, neighb_2_x]:
|
112 |
+
mag[i_y, i_x]= 0
|
113 |
+
|
114 |
+
weak_ids = np.zeros_like(img)
|
115 |
+
strong_ids = np.zeros_like(img)
|
116 |
+
ids = np.zeros_like(img)
|
117 |
+
|
118 |
+
# double thresholding step
|
119 |
+
for i_x in range(width):
|
120 |
+
for i_y in range(height):
|
121 |
+
|
122 |
+
grad_mag = mag[i_y, i_x]
|
123 |
+
|
124 |
+
if grad_mag<weak_th:
|
125 |
+
mag[i_y, i_x]= 0
|
126 |
+
elif strong_th>grad_mag>= weak_th:
|
127 |
+
ids[i_y, i_x]= 1
|
128 |
+
else:
|
129 |
+
ids[i_y, i_x]= 2
|
130 |
+
|
131 |
+
|
132 |
+
# finally returning the magnitude of
|
133 |
+
# gradients of edges
|
134 |
+
return mag
|
135 |
+
|
136 |
+
def install_opencv(self):
|
137 |
+
if 'opencv-python' not in self.packages():
|
138 |
+
print("Installing CV2...")
|
139 |
+
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'opencv-python'])
|
140 |
+
|
141 |
+
def packages(self):
|
142 |
+
import sys, subprocess
|
143 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
144 |
+
|
145 |
+
NODE_CLASS_MAPPINGS = {
|
146 |
+
"Canny Filter": WAS_Canny_Filter
|
147 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV1/Image_Filters_WAS.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image, ImageFilter, ImageEnhance
|
6 |
+
|
7 |
+
class WAS_Image_Filters:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image": ("IMAGE",),
|
16 |
+
"brightness": ("FLOAT", {"default": 0.0, "min": -1.0, "max": 1.0, "step": 0.01}),
|
17 |
+
"contrast": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 2.0, "step": 0.01}),
|
18 |
+
"saturation": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 5.0, "step": 0.01}),
|
19 |
+
"sharpness": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 5.0, "step": 0.01}),
|
20 |
+
"edge_enhance": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
21 |
+
},
|
22 |
+
}
|
23 |
+
|
24 |
+
RETURN_TYPES = ("IMAGE",)
|
25 |
+
FUNCTION = "image_filters"
|
26 |
+
|
27 |
+
CATEGORY = "WAS"
|
28 |
+
|
29 |
+
def image_filters(self, image, brightness, contrast, saturation, sharpness, edge_enhance):
|
30 |
+
|
31 |
+
pil_image = None
|
32 |
+
|
33 |
+
# Apply NP Adjustments
|
34 |
+
if brightness > 0.0 or brightness < 0.0:
|
35 |
+
# Apply brightness
|
36 |
+
image = np.clip(image + brightness, 0.0, 1.0)
|
37 |
+
|
38 |
+
if contrast > 1.0 or contrast < 1.0:
|
39 |
+
# Apply contrast
|
40 |
+
image = np.clip(image * contrast, 0.0, 1.0)
|
41 |
+
|
42 |
+
# Apply PIL Adjustments
|
43 |
+
if saturation > 1.0 or saturation < 1.0:
|
44 |
+
#PIL Image
|
45 |
+
pil_image = self.tensor2pil(image)
|
46 |
+
# Apply saturation
|
47 |
+
pil_image = ImageEnhance.Color(pil_image).enhance(saturation)
|
48 |
+
|
49 |
+
if sharpness > 1.0 or sharpness < 1.0:
|
50 |
+
# Assign or create PIL Image
|
51 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
52 |
+
# Apply sharpness
|
53 |
+
pil_image = ImageEnhance.Sharpness(pil_image).enhance(sharpness)
|
54 |
+
|
55 |
+
if edge_enhance > 0.0:
|
56 |
+
# Assign or create PIL Image
|
57 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
58 |
+
# Edge Enhancement
|
59 |
+
edge_enhanced_img = pil_image.filter(ImageFilter.EDGE_ENHANCE_MORE)
|
60 |
+
# Blend Mask
|
61 |
+
blend_mask = Image.new(mode = "L", size = pil_image.size, color = (round(edge_enhance * 255)))
|
62 |
+
# Composite Original and Enhanced Version
|
63 |
+
pil_image = Image.composite(pil_image, edge_enhanced_img, blend_mask)
|
64 |
+
# Clean-up
|
65 |
+
del blend_mask, edge_enhanced_img
|
66 |
+
|
67 |
+
# Output image
|
68 |
+
out_image = ( torch.from_numpy(np.array(pil_image).astype(np.float32) / 255.0).unsqueeze(0)
|
69 |
+
if pil_image else image )
|
70 |
+
|
71 |
+
return ( out_image, )
|
72 |
+
|
73 |
+
def tensor2pil(self, image):
|
74 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
75 |
+
|
76 |
+
NODE_CLASS_MAPPINGS = {
|
77 |
+
"Image Filters": WAS_Image_Filters
|
78 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV1/WAS_License.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright 2023 Jordan Thompson (WASasquatch)
|
2 |
+
|
3 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
4 |
+
|
5 |
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
6 |
+
|
7 |
+
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
wasNodeSuitesComfyui_filtersSuiteV2/Canny_Filter_WAS.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch, os
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
class WAS_Canny_Filter:
|
7 |
+
def __init__(self):
|
8 |
+
pass
|
9 |
+
|
10 |
+
@classmethod
|
11 |
+
def INPUT_TYPES(cls):
|
12 |
+
return {
|
13 |
+
"required": {
|
14 |
+
"image": ("IMAGE",),
|
15 |
+
"enable_threshold": (['false', 'true'],),
|
16 |
+
"threshold_low": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
17 |
+
"threshold_high": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
18 |
+
},
|
19 |
+
}
|
20 |
+
|
21 |
+
RETURN_TYPES = ("IMAGE",)
|
22 |
+
FUNCTION = "canny_filter"
|
23 |
+
|
24 |
+
CATEGORY = "WAS"
|
25 |
+
|
26 |
+
def canny_filter(self, image, threshold_low, threshold_high, enable_threshold):
|
27 |
+
|
28 |
+
self.install_opencv()
|
29 |
+
|
30 |
+
if enable_threshold == 'false':
|
31 |
+
threshold_low = None
|
32 |
+
threshold_high = None
|
33 |
+
|
34 |
+
image_canny = self.Canny_detector(255. * image.cpu().numpy().squeeze(), threshold_low, threshold_high)
|
35 |
+
|
36 |
+
return ( torch.from_numpy( image_canny )[None,], )
|
37 |
+
|
38 |
+
# Defining the Canny Detector function
|
39 |
+
# From: https://www.geeksforgeeks.org/implement-canny-edge-detector-in-python-using-opencv/
|
40 |
+
|
41 |
+
# here weak_th and strong_th are thresholds for
|
42 |
+
# double thresholding step
|
43 |
+
def Canny_detector(self, img, weak_th = None, strong_th = None):
|
44 |
+
|
45 |
+
import cv2
|
46 |
+
|
47 |
+
# conversion of image to grayscale
|
48 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
49 |
+
|
50 |
+
# Noise reduction step
|
51 |
+
img = cv2.GaussianBlur(img, (5, 5), 1.4)
|
52 |
+
|
53 |
+
# Calculating the gradients
|
54 |
+
gx = cv2.Sobel(np.float32(img), cv2.CV_64F, 1, 0, 3)
|
55 |
+
gy = cv2.Sobel(np.float32(img), cv2.CV_64F, 0, 1, 3)
|
56 |
+
|
57 |
+
# Conversion of Cartesian coordinates to polar
|
58 |
+
mag, ang = cv2.cartToPolar(gx, gy, angleInDegrees = True)
|
59 |
+
|
60 |
+
# setting the minimum and maximum thresholds
|
61 |
+
# for double thresholding
|
62 |
+
mag_max = np.max(mag)
|
63 |
+
if not weak_th:weak_th = mag_max * 0.1
|
64 |
+
if not strong_th:strong_th = mag_max * 0.5
|
65 |
+
|
66 |
+
# getting the dimensions of the input image
|
67 |
+
height, width = img.shape
|
68 |
+
|
69 |
+
# Looping through every pixel of the grayscale
|
70 |
+
# image
|
71 |
+
for i_x in range(width):
|
72 |
+
for i_y in range(height):
|
73 |
+
|
74 |
+
grad_ang = ang[i_y, i_x]
|
75 |
+
grad_ang = abs(grad_ang-180) if abs(grad_ang)>180 else abs(grad_ang)
|
76 |
+
|
77 |
+
# selecting the neighbours of the target pixel
|
78 |
+
# according to the gradient direction
|
79 |
+
# In the x axis direction
|
80 |
+
if grad_ang<= 22.5:
|
81 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y
|
82 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y
|
83 |
+
|
84 |
+
# top right (diagonal-1) direction
|
85 |
+
elif grad_ang>22.5 and grad_ang<=(22.5 + 45):
|
86 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y-1
|
87 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y + 1
|
88 |
+
|
89 |
+
# In y-axis direction
|
90 |
+
elif grad_ang>(22.5 + 45) and grad_ang<=(22.5 + 90):
|
91 |
+
neighb_1_x, neighb_1_y = i_x, i_y-1
|
92 |
+
neighb_2_x, neighb_2_y = i_x, i_y + 1
|
93 |
+
|
94 |
+
# top left (diagonal-2) direction
|
95 |
+
elif grad_ang>(22.5 + 90) and grad_ang<=(22.5 + 135):
|
96 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y + 1
|
97 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y-1
|
98 |
+
|
99 |
+
# Now it restarts the cycle
|
100 |
+
elif grad_ang>(22.5 + 135) and grad_ang<=(22.5 + 180):
|
101 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y
|
102 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y
|
103 |
+
|
104 |
+
# Non-maximum suppression step
|
105 |
+
if width>neighb_1_x>= 0 and height>neighb_1_y>= 0:
|
106 |
+
if mag[i_y, i_x]<mag[neighb_1_y, neighb_1_x]:
|
107 |
+
mag[i_y, i_x]= 0
|
108 |
+
continue
|
109 |
+
|
110 |
+
if width>neighb_2_x>= 0 and height>neighb_2_y>= 0:
|
111 |
+
if mag[i_y, i_x]<mag[neighb_2_y, neighb_2_x]:
|
112 |
+
mag[i_y, i_x]= 0
|
113 |
+
|
114 |
+
weak_ids = np.zeros_like(img)
|
115 |
+
strong_ids = np.zeros_like(img)
|
116 |
+
ids = np.zeros_like(img)
|
117 |
+
|
118 |
+
# double thresholding step
|
119 |
+
for i_x in range(width):
|
120 |
+
for i_y in range(height):
|
121 |
+
|
122 |
+
grad_mag = mag[i_y, i_x]
|
123 |
+
|
124 |
+
if grad_mag<weak_th:
|
125 |
+
mag[i_y, i_x]= 0
|
126 |
+
elif strong_th>grad_mag>= weak_th:
|
127 |
+
ids[i_y, i_x]= 1
|
128 |
+
else:
|
129 |
+
ids[i_y, i_x]= 2
|
130 |
+
|
131 |
+
|
132 |
+
# finally returning the magnitude of
|
133 |
+
# gradients of edges
|
134 |
+
return mag
|
135 |
+
|
136 |
+
def install_opencv(self):
|
137 |
+
if 'opencv-python' not in self.packages():
|
138 |
+
print("Installing CV2...")
|
139 |
+
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'opencv-python'])
|
140 |
+
|
141 |
+
def packages(self):
|
142 |
+
import sys, subprocess
|
143 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
144 |
+
|
145 |
+
NODE_CLASS_MAPPINGS = {
|
146 |
+
"Canny Filter": WAS_Canny_Filter
|
147 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV2/Image_Blend_WAS.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch, sys, subprocess
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
class WAS_Image_Blend:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image_a": ("IMAGE",),
|
16 |
+
"image_b": ("IMAGE",),
|
17 |
+
"blend_percentage": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
18 |
+
},
|
19 |
+
}
|
20 |
+
|
21 |
+
RETURN_TYPES = ("IMAGE",)
|
22 |
+
FUNCTION = "image_blend"
|
23 |
+
|
24 |
+
CATEGORY = "WAS"
|
25 |
+
|
26 |
+
def image_blend(self, image_a, image_b, blend_percentage):
|
27 |
+
|
28 |
+
# Convert images to PIL
|
29 |
+
img_a = self.tensor2pil(image_a)
|
30 |
+
img_b = self.tensor2pil(image_b)
|
31 |
+
|
32 |
+
# Blend image
|
33 |
+
blend_mask = Image.new(mode = "L", size = img_a.size, color = (round(blend_percentage * 255)))
|
34 |
+
img_result = Image.composite(img_a, img_b, blend_mask)
|
35 |
+
|
36 |
+
del img_a, img_b, blend_mask
|
37 |
+
|
38 |
+
return ( torch.from_numpy(np.array(img_result).astype(np.float32) / 255.0).unsqueeze(0), )
|
39 |
+
|
40 |
+
# Convert tesnor to PIL image
|
41 |
+
def tensor2pil(self, image):
|
42 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
43 |
+
|
44 |
+
# Freeze packages
|
45 |
+
def packages(self):
|
46 |
+
import sys, subprocess
|
47 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
48 |
+
|
49 |
+
NODE_CLASS_MAPPINGS = {
|
50 |
+
"Image Blend": WAS_Image_Blend
|
51 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV2/Image_Combine_WAS.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch, sys, subprocess
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
class WAS_Image_Combine:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image_a": ("IMAGE",),
|
16 |
+
"image_b": ("IMAGE",),
|
17 |
+
"mode": ([
|
18 |
+
"add",
|
19 |
+
"color",
|
20 |
+
"color_burn",
|
21 |
+
"color_dodge",
|
22 |
+
"darken",
|
23 |
+
#"difference",
|
24 |
+
#"exclusion",
|
25 |
+
"hard_light",
|
26 |
+
"hue",
|
27 |
+
"lighten",
|
28 |
+
"multiply",
|
29 |
+
"overlay",
|
30 |
+
"screen",
|
31 |
+
"soft_light"
|
32 |
+
],),
|
33 |
+
},
|
34 |
+
}
|
35 |
+
|
36 |
+
RETURN_TYPES = ("IMAGE",)
|
37 |
+
FUNCTION = "image_combine"
|
38 |
+
|
39 |
+
CATEGORY = "WAS"
|
40 |
+
|
41 |
+
def image_combine(self, image_a, image_b, mode):
|
42 |
+
|
43 |
+
# Install Pilgram
|
44 |
+
if 'pilgram' not in self.packages():
|
45 |
+
print("Installing Pilgram...")
|
46 |
+
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'pilgram'])
|
47 |
+
|
48 |
+
# Import Pilgram module
|
49 |
+
import pilgram
|
50 |
+
|
51 |
+
# Convert images to PIL
|
52 |
+
img_a = self.tensor2pil(image_a)
|
53 |
+
img_b = self.tensor2pil(image_b)
|
54 |
+
|
55 |
+
# Apply blending
|
56 |
+
match mode:
|
57 |
+
case "color":
|
58 |
+
out_image = pilgram.css.blending.color(img_a, img_b)
|
59 |
+
case "color_burn":
|
60 |
+
out_image = pilgram.css.blending.color_burn(img_a, img_b)
|
61 |
+
case "color_dodge":
|
62 |
+
out_image = pilgram.css.blending.color_dodge(img_a, img_b)
|
63 |
+
case "darken":
|
64 |
+
out_image = pilgram.css.blending.darken(img_a, img_b)
|
65 |
+
case "difference":
|
66 |
+
out_image = pilgram.css.blending.difference(img_a, img_b)
|
67 |
+
case "exclusion":
|
68 |
+
out_image = pilgram.css.blending.exclusion(img_a, img_b)
|
69 |
+
case "hard_light":
|
70 |
+
out_image = pilgram.css.blending.hard_light(img_a, img_b)
|
71 |
+
case "hue":
|
72 |
+
out_image = pilgram.css.blending.hue(img_a, img_b)
|
73 |
+
case "lighten":
|
74 |
+
out_image = pilgram.css.blending.lighten(img_a, img_b)
|
75 |
+
case "multiply":
|
76 |
+
out_image = pilgram.css.blending.multiply(img_a, img_b)
|
77 |
+
case "add":
|
78 |
+
out_image = pilgram.css.blending.normal(img_a, img_b)
|
79 |
+
case "overlay":
|
80 |
+
out_image = pilgram.css.blending.overlay(img_a, img_b)
|
81 |
+
case "screen":
|
82 |
+
out_image = pilgram.css.blending.screen(img_a, img_b)
|
83 |
+
case "soft_light":
|
84 |
+
out_image = pilgram.css.blending.soft_light(img_a, img_b)
|
85 |
+
case _:
|
86 |
+
out_image = img_a
|
87 |
+
|
88 |
+
out_image = out_image.convert("RGB")
|
89 |
+
|
90 |
+
return ( torch.from_numpy(np.array(out_image).astype(np.float32) / 255.0).unsqueeze(0), )
|
91 |
+
|
92 |
+
# Convert tesnro to PIL image
|
93 |
+
def tensor2pil(self, image):
|
94 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
95 |
+
|
96 |
+
# Freeze packages
|
97 |
+
def packages(self):
|
98 |
+
import sys, subprocess
|
99 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
100 |
+
|
101 |
+
NODE_CLASS_MAPPINGS = {
|
102 |
+
"Image Combine": WAS_Image_Combine
|
103 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV2/Image_Filters_WAS.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image, ImageFilter, ImageEnhance
|
6 |
+
|
7 |
+
class WAS_Image_Filters:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image": ("IMAGE",),
|
16 |
+
"brightness": ("FLOAT", {"default": 0.0, "min": -1.0, "max": 1.0, "step": 0.01}),
|
17 |
+
"contrast": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 2.0, "step": 0.01}),
|
18 |
+
"saturation": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 5.0, "step": 0.01}),
|
19 |
+
"sharpness": ("FLOAT", {"default": 1.0, "min": -5.0, "max": 5.0, "step": 0.01}),
|
20 |
+
"blur": ("INT", {"default": 0, "min": 0, "max": 16, "step": 1}),
|
21 |
+
"gaussian_blur": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 255.0, "step": 0.1}),
|
22 |
+
"edge_enhance": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
23 |
+
},
|
24 |
+
}
|
25 |
+
|
26 |
+
RETURN_TYPES = ("IMAGE",)
|
27 |
+
FUNCTION = "image_filters"
|
28 |
+
|
29 |
+
CATEGORY = "WAS"
|
30 |
+
|
31 |
+
def image_filters(self, image, brightness, contrast, saturation, sharpness, blur, gaussian_blur, edge_enhance):
|
32 |
+
|
33 |
+
pil_image = None
|
34 |
+
|
35 |
+
# Apply NP Adjustments
|
36 |
+
if brightness > 0.0 or brightness < 0.0:
|
37 |
+
# Apply brightness
|
38 |
+
image = np.clip(image + brightness, 0.0, 1.0)
|
39 |
+
|
40 |
+
if contrast > 1.0 or contrast < 1.0:
|
41 |
+
# Apply contrast
|
42 |
+
image = np.clip(image * contrast, 0.0, 1.0)
|
43 |
+
|
44 |
+
# Apply PIL Adjustments
|
45 |
+
if saturation > 1.0 or saturation < 1.0:
|
46 |
+
#PIL Image
|
47 |
+
pil_image = self.tensor2pil(image)
|
48 |
+
# Apply saturation
|
49 |
+
pil_image = ImageEnhance.Color(pil_image).enhance(saturation)
|
50 |
+
|
51 |
+
if sharpness > 1.0 or sharpness < 1.0:
|
52 |
+
# Assign or create PIL Image
|
53 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
54 |
+
# Apply sharpness
|
55 |
+
pil_image = ImageEnhance.Sharpness(pil_image).enhance(sharpness)
|
56 |
+
|
57 |
+
if blur > 0:
|
58 |
+
# Assign or create PIL Image
|
59 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
60 |
+
# Apply blur
|
61 |
+
for _ in range(blur):
|
62 |
+
pil_image = pil_image.filter(ImageFilter.BLUR)
|
63 |
+
|
64 |
+
if gaussian_blur > 0.0:
|
65 |
+
# Assign or create PIL Image
|
66 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
67 |
+
# Apply Gaussian blur
|
68 |
+
pil_image = pil_image.filter(ImageFilter.GaussianBlur(radius = gaussian_blur))
|
69 |
+
|
70 |
+
if edge_enhance > 0.0:
|
71 |
+
# Assign or create PIL Image
|
72 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
73 |
+
# Edge Enhancement
|
74 |
+
edge_enhanced_img = pil_image.filter(ImageFilter.EDGE_ENHANCE_MORE)
|
75 |
+
# Blend Mask
|
76 |
+
blend_mask = Image.new(mode = "L", size = pil_image.size, color = (round(edge_enhance * 255)))
|
77 |
+
# Composite Original and Enhanced Version
|
78 |
+
pil_image = Image.composite(pil_image, edge_enhanced_img, blend_mask)
|
79 |
+
# Clean-up
|
80 |
+
del blend_mask, edge_enhanced_img
|
81 |
+
|
82 |
+
# Output image
|
83 |
+
out_image = ( torch.from_numpy(np.array(pil_image).astype(np.float32) / 255.0).unsqueeze(0)
|
84 |
+
if pil_image else image )
|
85 |
+
|
86 |
+
return ( out_image, )
|
87 |
+
|
88 |
+
def tensor2pil(self, image):
|
89 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
90 |
+
|
91 |
+
NODE_CLASS_MAPPINGS = {
|
92 |
+
"Image Filters": WAS_Image_Filters
|
93 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV2/WAS_License.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright 2023 Jordan Thompson (WASasquatch)
|
2 |
+
|
3 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
4 |
+
|
5 |
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
6 |
+
|
7 |
+
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
wasNodeSuitesComfyui_filtersSuiteV3/Canny_Filter_WAS.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch, os
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
class WAS_Canny_Filter:
|
7 |
+
def __init__(self):
|
8 |
+
pass
|
9 |
+
|
10 |
+
@classmethod
|
11 |
+
def INPUT_TYPES(cls):
|
12 |
+
return {
|
13 |
+
"required": {
|
14 |
+
"image": ("IMAGE",),
|
15 |
+
"enable_threshold": (['false', 'true'],),
|
16 |
+
"threshold_low": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
17 |
+
"threshold_high": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
18 |
+
},
|
19 |
+
}
|
20 |
+
|
21 |
+
RETURN_TYPES = ("IMAGE",)
|
22 |
+
FUNCTION = "canny_filter"
|
23 |
+
|
24 |
+
CATEGORY = "WAS"
|
25 |
+
|
26 |
+
def canny_filter(self, image, threshold_low, threshold_high, enable_threshold):
|
27 |
+
|
28 |
+
self.install_opencv()
|
29 |
+
|
30 |
+
if enable_threshold == 'false':
|
31 |
+
threshold_low = None
|
32 |
+
threshold_high = None
|
33 |
+
|
34 |
+
image_canny = self.Canny_detector(255. * image.cpu().numpy().squeeze(), threshold_low, threshold_high)
|
35 |
+
|
36 |
+
return ( torch.from_numpy( image_canny )[None,], )
|
37 |
+
|
38 |
+
# Defining the Canny Detector function
|
39 |
+
# From: https://www.geeksforgeeks.org/implement-canny-edge-detector-in-python-using-opencv/
|
40 |
+
|
41 |
+
# here weak_th and strong_th are thresholds for
|
42 |
+
# double thresholding step
|
43 |
+
def Canny_detector(self, img, weak_th = None, strong_th = None):
|
44 |
+
|
45 |
+
import cv2
|
46 |
+
|
47 |
+
# conversion of image to grayscale
|
48 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
49 |
+
|
50 |
+
# Noise reduction step
|
51 |
+
img = cv2.GaussianBlur(img, (5, 5), 1.4)
|
52 |
+
|
53 |
+
# Calculating the gradients
|
54 |
+
gx = cv2.Sobel(np.float32(img), cv2.CV_64F, 1, 0, 3)
|
55 |
+
gy = cv2.Sobel(np.float32(img), cv2.CV_64F, 0, 1, 3)
|
56 |
+
|
57 |
+
# Conversion of Cartesian coordinates to polar
|
58 |
+
mag, ang = cv2.cartToPolar(gx, gy, angleInDegrees = True)
|
59 |
+
|
60 |
+
# setting the minimum and maximum thresholds
|
61 |
+
# for double thresholding
|
62 |
+
mag_max = np.max(mag)
|
63 |
+
if not weak_th:weak_th = mag_max * 0.1
|
64 |
+
if not strong_th:strong_th = mag_max * 0.5
|
65 |
+
|
66 |
+
# getting the dimensions of the input image
|
67 |
+
height, width = img.shape
|
68 |
+
|
69 |
+
# Looping through every pixel of the grayscale
|
70 |
+
# image
|
71 |
+
for i_x in range(width):
|
72 |
+
for i_y in range(height):
|
73 |
+
|
74 |
+
grad_ang = ang[i_y, i_x]
|
75 |
+
grad_ang = abs(grad_ang-180) if abs(grad_ang)>180 else abs(grad_ang)
|
76 |
+
|
77 |
+
# selecting the neighbours of the target pixel
|
78 |
+
# according to the gradient direction
|
79 |
+
# In the x axis direction
|
80 |
+
if grad_ang<= 22.5:
|
81 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y
|
82 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y
|
83 |
+
|
84 |
+
# top right (diagonal-1) direction
|
85 |
+
elif grad_ang>22.5 and grad_ang<=(22.5 + 45):
|
86 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y-1
|
87 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y + 1
|
88 |
+
|
89 |
+
# In y-axis direction
|
90 |
+
elif grad_ang>(22.5 + 45) and grad_ang<=(22.5 + 90):
|
91 |
+
neighb_1_x, neighb_1_y = i_x, i_y-1
|
92 |
+
neighb_2_x, neighb_2_y = i_x, i_y + 1
|
93 |
+
|
94 |
+
# top left (diagonal-2) direction
|
95 |
+
elif grad_ang>(22.5 + 90) and grad_ang<=(22.5 + 135):
|
96 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y + 1
|
97 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y-1
|
98 |
+
|
99 |
+
# Now it restarts the cycle
|
100 |
+
elif grad_ang>(22.5 + 135) and grad_ang<=(22.5 + 180):
|
101 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y
|
102 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y
|
103 |
+
|
104 |
+
# Non-maximum suppression step
|
105 |
+
if width>neighb_1_x>= 0 and height>neighb_1_y>= 0:
|
106 |
+
if mag[i_y, i_x]<mag[neighb_1_y, neighb_1_x]:
|
107 |
+
mag[i_y, i_x]= 0
|
108 |
+
continue
|
109 |
+
|
110 |
+
if width>neighb_2_x>= 0 and height>neighb_2_y>= 0:
|
111 |
+
if mag[i_y, i_x]<mag[neighb_2_y, neighb_2_x]:
|
112 |
+
mag[i_y, i_x]= 0
|
113 |
+
|
114 |
+
weak_ids = np.zeros_like(img)
|
115 |
+
strong_ids = np.zeros_like(img)
|
116 |
+
ids = np.zeros_like(img)
|
117 |
+
|
118 |
+
# double thresholding step
|
119 |
+
for i_x in range(width):
|
120 |
+
for i_y in range(height):
|
121 |
+
|
122 |
+
grad_mag = mag[i_y, i_x]
|
123 |
+
|
124 |
+
if grad_mag<weak_th:
|
125 |
+
mag[i_y, i_x]= 0
|
126 |
+
elif strong_th>grad_mag>= weak_th:
|
127 |
+
ids[i_y, i_x]= 1
|
128 |
+
else:
|
129 |
+
ids[i_y, i_x]= 2
|
130 |
+
|
131 |
+
|
132 |
+
# finally returning the magnitude of
|
133 |
+
# gradients of edges
|
134 |
+
return mag
|
135 |
+
|
136 |
+
def install_opencv(self):
|
137 |
+
if 'opencv-python' not in self.packages():
|
138 |
+
print("Installing CV2...")
|
139 |
+
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'opencv-python'])
|
140 |
+
|
141 |
+
def packages(self):
|
142 |
+
import sys, subprocess
|
143 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
144 |
+
|
145 |
+
NODE_CLASS_MAPPINGS = {
|
146 |
+
"Canny Filter": WAS_Canny_Filter
|
147 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV3/Image_Blend_WAS.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch, sys, subprocess
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
class WAS_Image_Blend:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image_a": ("IMAGE",),
|
16 |
+
"image_b": ("IMAGE",),
|
17 |
+
"blend_percentage": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
18 |
+
},
|
19 |
+
}
|
20 |
+
|
21 |
+
RETURN_TYPES = ("IMAGE",)
|
22 |
+
FUNCTION = "image_blend"
|
23 |
+
|
24 |
+
CATEGORY = "WAS"
|
25 |
+
|
26 |
+
def image_blend(self, image_a, image_b, blend_percentage):
|
27 |
+
|
28 |
+
# Convert images to PIL
|
29 |
+
img_a = self.tensor2pil(image_a)
|
30 |
+
img_b = self.tensor2pil(image_b)
|
31 |
+
|
32 |
+
# Blend image
|
33 |
+
blend_mask = Image.new(mode = "L", size = img_a.size, color = (round(blend_percentage * 255)))
|
34 |
+
img_result = Image.composite(img_a, img_b, blend_mask)
|
35 |
+
|
36 |
+
del img_a, img_b, blend_mask
|
37 |
+
|
38 |
+
return ( torch.from_numpy(np.array(img_result).astype(np.float32) / 255.0).unsqueeze(0), )
|
39 |
+
|
40 |
+
# Convert tesnor to PIL image
|
41 |
+
def tensor2pil(self, image):
|
42 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
43 |
+
|
44 |
+
# Freeze packages
|
45 |
+
def packages(self):
|
46 |
+
import sys, subprocess
|
47 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
48 |
+
|
49 |
+
NODE_CLASS_MAPPINGS = {
|
50 |
+
"Image Blend": WAS_Image_Blend
|
51 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV3/Image_Combine_WAS.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch, sys, subprocess
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
class WAS_Image_Combine:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image_a": ("IMAGE",),
|
16 |
+
"image_b": ("IMAGE",),
|
17 |
+
"mode": ([
|
18 |
+
"add",
|
19 |
+
"color",
|
20 |
+
"color_burn",
|
21 |
+
"color_dodge",
|
22 |
+
"darken",
|
23 |
+
#"difference",
|
24 |
+
#"exclusion",
|
25 |
+
"hard_light",
|
26 |
+
"hue",
|
27 |
+
"lighten",
|
28 |
+
"multiply",
|
29 |
+
"overlay",
|
30 |
+
"screen",
|
31 |
+
"soft_light"
|
32 |
+
],),
|
33 |
+
},
|
34 |
+
}
|
35 |
+
|
36 |
+
RETURN_TYPES = ("IMAGE",)
|
37 |
+
FUNCTION = "image_combine"
|
38 |
+
|
39 |
+
CATEGORY = "WAS"
|
40 |
+
|
41 |
+
def image_combine(self, image_a, image_b, mode):
|
42 |
+
|
43 |
+
# Install Pilgram
|
44 |
+
if 'pilgram' not in self.packages():
|
45 |
+
print("Installing Pilgram...")
|
46 |
+
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'pilgram'])
|
47 |
+
|
48 |
+
# Import Pilgram module
|
49 |
+
import pilgram
|
50 |
+
|
51 |
+
# Convert images to PIL
|
52 |
+
img_a = self.tensor2pil(image_a)
|
53 |
+
img_b = self.tensor2pil(image_b)
|
54 |
+
|
55 |
+
# Apply blending
|
56 |
+
match mode:
|
57 |
+
case "color":
|
58 |
+
out_image = pilgram.css.blending.color(img_a, img_b)
|
59 |
+
case "color_burn":
|
60 |
+
out_image = pilgram.css.blending.color_burn(img_a, img_b)
|
61 |
+
case "color_dodge":
|
62 |
+
out_image = pilgram.css.blending.color_dodge(img_a, img_b)
|
63 |
+
case "darken":
|
64 |
+
out_image = pilgram.css.blending.darken(img_a, img_b)
|
65 |
+
case "difference":
|
66 |
+
out_image = pilgram.css.blending.difference(img_a, img_b)
|
67 |
+
case "exclusion":
|
68 |
+
out_image = pilgram.css.blending.exclusion(img_a, img_b)
|
69 |
+
case "hard_light":
|
70 |
+
out_image = pilgram.css.blending.hard_light(img_a, img_b)
|
71 |
+
case "hue":
|
72 |
+
out_image = pilgram.css.blending.hue(img_a, img_b)
|
73 |
+
case "lighten":
|
74 |
+
out_image = pilgram.css.blending.lighten(img_a, img_b)
|
75 |
+
case "multiply":
|
76 |
+
out_image = pilgram.css.blending.multiply(img_a, img_b)
|
77 |
+
case "add":
|
78 |
+
out_image = pilgram.css.blending.normal(img_a, img_b)
|
79 |
+
case "overlay":
|
80 |
+
out_image = pilgram.css.blending.overlay(img_a, img_b)
|
81 |
+
case "screen":
|
82 |
+
out_image = pilgram.css.blending.screen(img_a, img_b)
|
83 |
+
case "soft_light":
|
84 |
+
out_image = pilgram.css.blending.soft_light(img_a, img_b)
|
85 |
+
case _:
|
86 |
+
out_image = img_a
|
87 |
+
|
88 |
+
out_image = out_image.convert("RGB")
|
89 |
+
|
90 |
+
return ( torch.from_numpy(np.array(out_image).astype(np.float32) / 255.0).unsqueeze(0), )
|
91 |
+
|
92 |
+
# Convert tesnro to PIL image
|
93 |
+
def tensor2pil(self, image):
|
94 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
95 |
+
|
96 |
+
# Freeze packages
|
97 |
+
def packages(self):
|
98 |
+
import sys, subprocess
|
99 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
100 |
+
|
101 |
+
NODE_CLASS_MAPPINGS = {
|
102 |
+
"Image Combine": WAS_Image_Combine
|
103 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV3/Image_Edge_Detection_WAS.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image, ImageFilter
|
6 |
+
|
7 |
+
class WAS_Image_Edge:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image": ("IMAGE",),
|
16 |
+
"mode": (["normal", "laplacian"],),
|
17 |
+
},
|
18 |
+
}
|
19 |
+
|
20 |
+
RETURN_TYPES = ("IMAGE",)
|
21 |
+
FUNCTION = "image_edges"
|
22 |
+
|
23 |
+
CATEGORY = "WAS"
|
24 |
+
|
25 |
+
def image_edges(self, image, mode):
|
26 |
+
|
27 |
+
# Convert image to PIL
|
28 |
+
image = self.tensor2pil(image)
|
29 |
+
|
30 |
+
# Detect edges
|
31 |
+
match mode:
|
32 |
+
case "normal":
|
33 |
+
image = image.filter(ImageFilter.FIND_EDGES)
|
34 |
+
case "laplacian":
|
35 |
+
image = image.filter(ImageFilter.Kernel((3, 3), (-1, -1, -1, -1, 8,
|
36 |
+
-1, -1, -1, -1), 1, 0))
|
37 |
+
case _:
|
38 |
+
image = image
|
39 |
+
|
40 |
+
return ( torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0), )
|
41 |
+
|
42 |
+
# Convert tesnor to PIL image
|
43 |
+
def tensor2pil(self, image):
|
44 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
45 |
+
|
46 |
+
NODE_CLASS_MAPPINGS = {
|
47 |
+
"Image Detect Edges": WAS_Image_Edge
|
48 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV3/Image_Filters_WAS.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image, ImageFilter, ImageEnhance
|
6 |
+
|
7 |
+
class WAS_Image_Filters:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image": ("IMAGE",),
|
16 |
+
"brightness": ("FLOAT", {"default": 0.0, "min": -1.0, "max": 1.0, "step": 0.01}),
|
17 |
+
"contrast": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 2.0, "step": 0.01}),
|
18 |
+
"saturation": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 5.0, "step": 0.01}),
|
19 |
+
"sharpness": ("FLOAT", {"default": 1.0, "min": -5.0, "max": 5.0, "step": 0.01}),
|
20 |
+
"blur": ("INT", {"default": 0, "min": 0, "max": 16, "step": 1}),
|
21 |
+
"gaussian_blur": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 255.0, "step": 0.1}),
|
22 |
+
"edge_enhance": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
23 |
+
},
|
24 |
+
}
|
25 |
+
|
26 |
+
RETURN_TYPES = ("IMAGE",)
|
27 |
+
FUNCTION = "image_filters"
|
28 |
+
|
29 |
+
CATEGORY = "WAS"
|
30 |
+
|
31 |
+
def image_filters(self, image, brightness, contrast, saturation, sharpness, blur, gaussian_blur, edge_enhance):
|
32 |
+
|
33 |
+
pil_image = None
|
34 |
+
|
35 |
+
# Apply NP Adjustments
|
36 |
+
if brightness > 0.0 or brightness < 0.0:
|
37 |
+
# Apply brightness
|
38 |
+
image = np.clip(image + brightness, 0.0, 1.0)
|
39 |
+
|
40 |
+
if contrast > 1.0 or contrast < 1.0:
|
41 |
+
# Apply contrast
|
42 |
+
image = np.clip(image * contrast, 0.0, 1.0)
|
43 |
+
|
44 |
+
# Apply PIL Adjustments
|
45 |
+
if saturation > 1.0 or saturation < 1.0:
|
46 |
+
#PIL Image
|
47 |
+
pil_image = self.tensor2pil(image)
|
48 |
+
# Apply saturation
|
49 |
+
pil_image = ImageEnhance.Color(pil_image).enhance(saturation)
|
50 |
+
|
51 |
+
if sharpness > 1.0 or sharpness < 1.0:
|
52 |
+
# Assign or create PIL Image
|
53 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
54 |
+
# Apply sharpness
|
55 |
+
pil_image = ImageEnhance.Sharpness(pil_image).enhance(sharpness)
|
56 |
+
|
57 |
+
if blur > 0:
|
58 |
+
# Assign or create PIL Image
|
59 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
60 |
+
# Apply blur
|
61 |
+
for _ in range(blur):
|
62 |
+
pil_image = pil_image.filter(ImageFilter.BLUR)
|
63 |
+
|
64 |
+
if gaussian_blur > 0.0:
|
65 |
+
# Assign or create PIL Image
|
66 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
67 |
+
# Apply Gaussian blur
|
68 |
+
pil_image = pil_image.filter(ImageFilter.GaussianBlur(radius = gaussian_blur))
|
69 |
+
|
70 |
+
if edge_enhance > 0.0:
|
71 |
+
# Assign or create PIL Image
|
72 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
73 |
+
# Edge Enhancement
|
74 |
+
edge_enhanced_img = pil_image.filter(ImageFilter.EDGE_ENHANCE_MORE)
|
75 |
+
# Blend Mask
|
76 |
+
blend_mask = Image.new(mode = "L", size = pil_image.size, color = (round(edge_enhance * 255)))
|
77 |
+
# Composite Original and Enhanced Version
|
78 |
+
pil_image = Image.composite(pil_image, edge_enhanced_img, blend_mask)
|
79 |
+
# Clean-up
|
80 |
+
del blend_mask, edge_enhanced_img
|
81 |
+
|
82 |
+
# Output image
|
83 |
+
out_image = ( torch.from_numpy(np.array(pil_image).astype(np.float32) / 255.0).unsqueeze(0)
|
84 |
+
if pil_image else image )
|
85 |
+
|
86 |
+
return ( out_image, )
|
87 |
+
|
88 |
+
def tensor2pil(self, image):
|
89 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
90 |
+
|
91 |
+
NODE_CLASS_MAPPINGS = {
|
92 |
+
"Image Filters": WAS_Image_Filters
|
93 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV3/Image_Style_Filter_WAS.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch, sys, subprocess
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
class WAS_Image_Style_Filter:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image": ("IMAGE",),
|
16 |
+
"style": ([
|
17 |
+
"1977",
|
18 |
+
"aden",
|
19 |
+
"brannan",
|
20 |
+
"brooklyn",
|
21 |
+
"clarendon",
|
22 |
+
"earlybird",
|
23 |
+
"gingham",
|
24 |
+
"hudson",
|
25 |
+
"inkwell",
|
26 |
+
"kelvin",
|
27 |
+
"lark",
|
28 |
+
"lofi",
|
29 |
+
"maven",
|
30 |
+
"mayfair",
|
31 |
+
"moon",
|
32 |
+
"nashville",
|
33 |
+
"perpetua",
|
34 |
+
"reyes",
|
35 |
+
"rise",
|
36 |
+
"slumber",
|
37 |
+
"stinson",
|
38 |
+
"toaster",
|
39 |
+
"valencia",
|
40 |
+
"walden",
|
41 |
+
"willow",
|
42 |
+
"xpro2"
|
43 |
+
],),
|
44 |
+
},
|
45 |
+
}
|
46 |
+
|
47 |
+
RETURN_TYPES = ("IMAGE",)
|
48 |
+
FUNCTION = "image_style_filter"
|
49 |
+
|
50 |
+
CATEGORY = "WAS"
|
51 |
+
|
52 |
+
def image_style_filter(self, image, style):
|
53 |
+
|
54 |
+
# Install Pilgram
|
55 |
+
if 'pilgram' not in self.packages():
|
56 |
+
print("Installing Pilgram...")
|
57 |
+
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'pilgram'])
|
58 |
+
|
59 |
+
# Import Pilgram module
|
60 |
+
import pilgram
|
61 |
+
|
62 |
+
# Convert image to PIL
|
63 |
+
image = self.tensor2pil(image)
|
64 |
+
|
65 |
+
# Apply blending
|
66 |
+
match style:
|
67 |
+
case "1977":
|
68 |
+
out_image = pilgram._1977(image)
|
69 |
+
case "aden":
|
70 |
+
out_image = pilgram.aden(image)
|
71 |
+
case "brannan":
|
72 |
+
out_image = pilgram.brannan(image)
|
73 |
+
case "brooklyn":
|
74 |
+
out_image = pilgram.brooklyn(image)
|
75 |
+
case "clarendon":
|
76 |
+
out_image = pilgram.clarendon(image)
|
77 |
+
case "earlybird":
|
78 |
+
out_image = pilgram.earlybird(image)
|
79 |
+
case "gingham":
|
80 |
+
out_image = pilgram.gingham(image)
|
81 |
+
case "hudson":
|
82 |
+
out_image = pilgram.hudson(image)
|
83 |
+
case "inkwell":
|
84 |
+
out_image = pilgram.inkwell(image)
|
85 |
+
case "kelvin":
|
86 |
+
out_image = pilgram.kelvin(image)
|
87 |
+
case "lark":
|
88 |
+
out_image = pilgram.lark(image)
|
89 |
+
case "lofi":
|
90 |
+
out_image = pilgram.lofi(image)
|
91 |
+
case "maven":
|
92 |
+
out_image = pilgram.maven(image)
|
93 |
+
case "mayfair":
|
94 |
+
out_image = pilgram.mayfair(image)
|
95 |
+
case "moon":
|
96 |
+
out_image = pilgram.moon(image)
|
97 |
+
case "nashville":
|
98 |
+
out_image = pilgram.nashville(image)
|
99 |
+
case "perpetua":
|
100 |
+
out_image = pilgram.perpetua(image)
|
101 |
+
case "reyes":
|
102 |
+
out_image = pilgram.reyes(image)
|
103 |
+
case "rise":
|
104 |
+
out_image = pilgram.rise(image)
|
105 |
+
case "slumber":
|
106 |
+
out_image = pilgram.slumber(image)
|
107 |
+
case "stinson":
|
108 |
+
out_image = pilgram.stinson(image)
|
109 |
+
case "toaster":
|
110 |
+
out_image = pilgram.toaster(image)
|
111 |
+
case "valencia":
|
112 |
+
out_image = pilgram.valencia(image)
|
113 |
+
case "walden":
|
114 |
+
out_image = pilgram.walden(image)
|
115 |
+
case "willow":
|
116 |
+
out_image = pilgram.willow(image)
|
117 |
+
case "xpro2":
|
118 |
+
out_image = pilgram.xpro2(image)
|
119 |
+
case _:
|
120 |
+
out_image = image
|
121 |
+
|
122 |
+
out_image = out_image.convert("RGB")
|
123 |
+
|
124 |
+
return ( torch.from_numpy(np.array(out_image).astype(np.float32) / 255.0).unsqueeze(0), )
|
125 |
+
|
126 |
+
# Convert tesnro to PIL image
|
127 |
+
def tensor2pil(self, image):
|
128 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
129 |
+
|
130 |
+
# Freeze packages
|
131 |
+
def packages(self):
|
132 |
+
import sys, subprocess
|
133 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
134 |
+
|
135 |
+
NODE_CLASS_MAPPINGS = {
|
136 |
+
"Image Style Filter": WAS_Image_Style_Filter
|
137 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV3/WAS_License.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright 2023 Jordan Thompson (WASasquatch)
|
2 |
+
|
3 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
4 |
+
|
5 |
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
6 |
+
|
7 |
+
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
wasNodeSuitesComfyui_filtersSuiteV42/Image_Blank_WAS.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image, ImageOps
|
6 |
+
import random
|
7 |
+
|
8 |
+
class WAS_Image_Blank:
|
9 |
+
def __init__(self):
|
10 |
+
pass
|
11 |
+
|
12 |
+
@classmethod
|
13 |
+
def INPUT_TYPES(s):
|
14 |
+
return {
|
15 |
+
"required": {
|
16 |
+
"width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 1}),
|
17 |
+
"height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 1}),
|
18 |
+
"red": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}),
|
19 |
+
"green": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}),
|
20 |
+
"blue": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}),
|
21 |
+
}
|
22 |
+
}
|
23 |
+
RETURN_TYPES = ("IMAGE",)
|
24 |
+
FUNCTION = "blank_image"
|
25 |
+
|
26 |
+
CATEGORY = "WAS"
|
27 |
+
|
28 |
+
def blank_image(self, width, height, red, green, blue):
|
29 |
+
|
30 |
+
# Ensure multiples
|
31 |
+
width = ( width // 8 ) * 8
|
32 |
+
height = ( height // 8 ) * 8
|
33 |
+
|
34 |
+
# Blend image
|
35 |
+
blank = Image.new(mode = "RGB", size = (width, height), color = (red, green, blue))
|
36 |
+
|
37 |
+
return ( self.pil2tensor(blank), )
|
38 |
+
|
39 |
+
# Convert PIL to Tensor
|
40 |
+
def pil2tensor(self, image):
|
41 |
+
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
|
42 |
+
|
43 |
+
NODE_CLASS_MAPPINGS = {
|
44 |
+
"Image Blank": WAS_Image_Blank
|
45 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV42/Image_Blend_WAS.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch, sys, subprocess
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image, ImageOps
|
6 |
+
|
7 |
+
class WAS_Image_Blend:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image_a": ("IMAGE",),
|
16 |
+
"image_b": ("IMAGE",),
|
17 |
+
"blend_percentage": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
18 |
+
},
|
19 |
+
}
|
20 |
+
|
21 |
+
RETURN_TYPES = ("IMAGE",)
|
22 |
+
FUNCTION = "image_blend"
|
23 |
+
|
24 |
+
CATEGORY = "WAS"
|
25 |
+
|
26 |
+
def image_blend(self, image_a, image_b, blend_percentage):
|
27 |
+
|
28 |
+
# Convert images to PIL
|
29 |
+
img_a = self.tensor2pil(image_a)
|
30 |
+
img_b = self.tensor2pil(image_b)
|
31 |
+
|
32 |
+
# Blend image
|
33 |
+
blend_mask = Image.new(mode = "L", size = img_a.size, color = (round(blend_percentage * 255)))
|
34 |
+
blend_mask = ImageOps.invert(blend_mask)
|
35 |
+
img_result = Image.composite(img_a, img_b, blend_mask)
|
36 |
+
|
37 |
+
del img_a, img_b, blend_mask
|
38 |
+
|
39 |
+
return ( torch.from_numpy(np.array(img_result).astype(np.float32) / 255.0).unsqueeze(0), )
|
40 |
+
|
41 |
+
# Convert tesnor to PIL image
|
42 |
+
def tensor2pil(self, image):
|
43 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
44 |
+
|
45 |
+
# Freeze packages
|
46 |
+
def packages(self):
|
47 |
+
import sys, subprocess
|
48 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
49 |
+
|
50 |
+
NODE_CLASS_MAPPINGS = {
|
51 |
+
"Image Blend": WAS_Image_Blend
|
52 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV42/Image_Canny_Filter_WAS.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch, os
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
class WAS_Canny_Filter:
|
7 |
+
def __init__(self):
|
8 |
+
pass
|
9 |
+
|
10 |
+
@classmethod
|
11 |
+
def INPUT_TYPES(cls):
|
12 |
+
return {
|
13 |
+
"required": {
|
14 |
+
"image": ("IMAGE",),
|
15 |
+
"enable_threshold": (['false', 'true'],),
|
16 |
+
"threshold_low": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
17 |
+
"threshold_high": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
18 |
+
},
|
19 |
+
}
|
20 |
+
|
21 |
+
RETURN_TYPES = ("IMAGE",)
|
22 |
+
FUNCTION = "canny_filter"
|
23 |
+
|
24 |
+
CATEGORY = "WAS"
|
25 |
+
|
26 |
+
def canny_filter(self, image, threshold_low, threshold_high, enable_threshold):
|
27 |
+
|
28 |
+
self.install_opencv()
|
29 |
+
|
30 |
+
if enable_threshold == 'false':
|
31 |
+
threshold_low = None
|
32 |
+
threshold_high = None
|
33 |
+
|
34 |
+
image_canny = self.Canny_detector(255. * image.cpu().numpy().squeeze(), threshold_low, threshold_high)
|
35 |
+
|
36 |
+
return ( torch.from_numpy( image_canny )[None,], )
|
37 |
+
|
38 |
+
# Defining the Canny Detector function
|
39 |
+
# From: https://www.geeksforgeeks.org/implement-canny-edge-detector-in-python-using-opencv/
|
40 |
+
|
41 |
+
# here weak_th and strong_th are thresholds for
|
42 |
+
# double thresholding step
|
43 |
+
def Canny_detector(self, img, weak_th = None, strong_th = None):
|
44 |
+
|
45 |
+
import cv2
|
46 |
+
|
47 |
+
# conversion of image to grayscale
|
48 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
49 |
+
|
50 |
+
# Noise reduction step
|
51 |
+
img = cv2.GaussianBlur(img, (5, 5), 1.4)
|
52 |
+
|
53 |
+
# Calculating the gradients
|
54 |
+
gx = cv2.Sobel(np.float32(img), cv2.CV_64F, 1, 0, 3)
|
55 |
+
gy = cv2.Sobel(np.float32(img), cv2.CV_64F, 0, 1, 3)
|
56 |
+
|
57 |
+
# Conversion of Cartesian coordinates to polar
|
58 |
+
mag, ang = cv2.cartToPolar(gx, gy, angleInDegrees = True)
|
59 |
+
|
60 |
+
# setting the minimum and maximum thresholds
|
61 |
+
# for double thresholding
|
62 |
+
mag_max = np.max(mag)
|
63 |
+
if not weak_th:weak_th = mag_max * 0.1
|
64 |
+
if not strong_th:strong_th = mag_max * 0.5
|
65 |
+
|
66 |
+
# getting the dimensions of the input image
|
67 |
+
height, width = img.shape
|
68 |
+
|
69 |
+
# Looping through every pixel of the grayscale
|
70 |
+
# image
|
71 |
+
for i_x in range(width):
|
72 |
+
for i_y in range(height):
|
73 |
+
|
74 |
+
grad_ang = ang[i_y, i_x]
|
75 |
+
grad_ang = abs(grad_ang-180) if abs(grad_ang)>180 else abs(grad_ang)
|
76 |
+
|
77 |
+
# selecting the neighbours of the target pixel
|
78 |
+
# according to the gradient direction
|
79 |
+
# In the x axis direction
|
80 |
+
if grad_ang<= 22.5:
|
81 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y
|
82 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y
|
83 |
+
|
84 |
+
# top right (diagonal-1) direction
|
85 |
+
elif grad_ang>22.5 and grad_ang<=(22.5 + 45):
|
86 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y-1
|
87 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y + 1
|
88 |
+
|
89 |
+
# In y-axis direction
|
90 |
+
elif grad_ang>(22.5 + 45) and grad_ang<=(22.5 + 90):
|
91 |
+
neighb_1_x, neighb_1_y = i_x, i_y-1
|
92 |
+
neighb_2_x, neighb_2_y = i_x, i_y + 1
|
93 |
+
|
94 |
+
# top left (diagonal-2) direction
|
95 |
+
elif grad_ang>(22.5 + 90) and grad_ang<=(22.5 + 135):
|
96 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y + 1
|
97 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y-1
|
98 |
+
|
99 |
+
# Now it restarts the cycle
|
100 |
+
elif grad_ang>(22.5 + 135) and grad_ang<=(22.5 + 180):
|
101 |
+
neighb_1_x, neighb_1_y = i_x-1, i_y
|
102 |
+
neighb_2_x, neighb_2_y = i_x + 1, i_y
|
103 |
+
|
104 |
+
# Non-maximum suppression step
|
105 |
+
if width>neighb_1_x>= 0 and height>neighb_1_y>= 0:
|
106 |
+
if mag[i_y, i_x]<mag[neighb_1_y, neighb_1_x]:
|
107 |
+
mag[i_y, i_x]= 0
|
108 |
+
continue
|
109 |
+
|
110 |
+
if width>neighb_2_x>= 0 and height>neighb_2_y>= 0:
|
111 |
+
if mag[i_y, i_x]<mag[neighb_2_y, neighb_2_x]:
|
112 |
+
mag[i_y, i_x]= 0
|
113 |
+
|
114 |
+
weak_ids = np.zeros_like(img)
|
115 |
+
strong_ids = np.zeros_like(img)
|
116 |
+
ids = np.zeros_like(img)
|
117 |
+
|
118 |
+
# double thresholding step
|
119 |
+
for i_x in range(width):
|
120 |
+
for i_y in range(height):
|
121 |
+
|
122 |
+
grad_mag = mag[i_y, i_x]
|
123 |
+
|
124 |
+
if grad_mag<weak_th:
|
125 |
+
mag[i_y, i_x]= 0
|
126 |
+
elif strong_th>grad_mag>= weak_th:
|
127 |
+
ids[i_y, i_x]= 1
|
128 |
+
else:
|
129 |
+
ids[i_y, i_x]= 2
|
130 |
+
|
131 |
+
|
132 |
+
# finally returning the magnitude of
|
133 |
+
# gradients of edges
|
134 |
+
return mag
|
135 |
+
|
136 |
+
def install_opencv(self):
|
137 |
+
if 'opencv-python' not in self.packages():
|
138 |
+
print("Installing CV2...")
|
139 |
+
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'opencv-python'])
|
140 |
+
|
141 |
+
def packages(self):
|
142 |
+
import sys, subprocess
|
143 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
144 |
+
|
145 |
+
NODE_CLASS_MAPPINGS = {
|
146 |
+
"Canny Filter": WAS_Canny_Filter
|
147 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV42/Image_Combine_WAS.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch, sys, subprocess
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
class WAS_Image_Combine:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image_a": ("IMAGE",),
|
16 |
+
"image_b": ("IMAGE",),
|
17 |
+
"mode": ([
|
18 |
+
"add",
|
19 |
+
"color",
|
20 |
+
"color_burn",
|
21 |
+
"color_dodge",
|
22 |
+
"darken",
|
23 |
+
#"difference",
|
24 |
+
#"exclusion",
|
25 |
+
"hard_light",
|
26 |
+
"hue",
|
27 |
+
"lighten",
|
28 |
+
"multiply",
|
29 |
+
"overlay",
|
30 |
+
"screen",
|
31 |
+
"soft_light"
|
32 |
+
],),
|
33 |
+
},
|
34 |
+
}
|
35 |
+
|
36 |
+
RETURN_TYPES = ("IMAGE",)
|
37 |
+
FUNCTION = "image_combine"
|
38 |
+
|
39 |
+
CATEGORY = "WAS"
|
40 |
+
|
41 |
+
def image_combine(self, image_a, image_b, mode):
|
42 |
+
|
43 |
+
# Install Pilgram
|
44 |
+
if 'pilgram' not in self.packages():
|
45 |
+
print("Installing Pilgram...")
|
46 |
+
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'pilgram'])
|
47 |
+
|
48 |
+
# Import Pilgram module
|
49 |
+
import pilgram
|
50 |
+
|
51 |
+
# Convert images to PIL
|
52 |
+
img_a = self.tensor2pil(image_a)
|
53 |
+
img_b = self.tensor2pil(image_b)
|
54 |
+
|
55 |
+
# Apply blending
|
56 |
+
match mode:
|
57 |
+
case "color":
|
58 |
+
out_image = pilgram.css.blending.color(img_a, img_b)
|
59 |
+
case "color_burn":
|
60 |
+
out_image = pilgram.css.blending.color_burn(img_a, img_b)
|
61 |
+
case "color_dodge":
|
62 |
+
out_image = pilgram.css.blending.color_dodge(img_a, img_b)
|
63 |
+
case "darken":
|
64 |
+
out_image = pilgram.css.blending.darken(img_a, img_b)
|
65 |
+
case "difference":
|
66 |
+
out_image = pilgram.css.blending.difference(img_a, img_b)
|
67 |
+
case "exclusion":
|
68 |
+
out_image = pilgram.css.blending.exclusion(img_a, img_b)
|
69 |
+
case "hard_light":
|
70 |
+
out_image = pilgram.css.blending.hard_light(img_a, img_b)
|
71 |
+
case "hue":
|
72 |
+
out_image = pilgram.css.blending.hue(img_a, img_b)
|
73 |
+
case "lighten":
|
74 |
+
out_image = pilgram.css.blending.lighten(img_a, img_b)
|
75 |
+
case "multiply":
|
76 |
+
out_image = pilgram.css.blending.multiply(img_a, img_b)
|
77 |
+
case "add":
|
78 |
+
out_image = pilgram.css.blending.normal(img_a, img_b)
|
79 |
+
case "overlay":
|
80 |
+
out_image = pilgram.css.blending.overlay(img_a, img_b)
|
81 |
+
case "screen":
|
82 |
+
out_image = pilgram.css.blending.screen(img_a, img_b)
|
83 |
+
case "soft_light":
|
84 |
+
out_image = pilgram.css.blending.soft_light(img_a, img_b)
|
85 |
+
case _:
|
86 |
+
out_image = img_a
|
87 |
+
|
88 |
+
out_image = out_image.convert("RGB")
|
89 |
+
|
90 |
+
return ( torch.from_numpy(np.array(out_image).astype(np.float32) / 255.0).unsqueeze(0), )
|
91 |
+
|
92 |
+
# Convert tesnro to PIL image
|
93 |
+
def tensor2pil(self, image):
|
94 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
95 |
+
|
96 |
+
# Freeze packages
|
97 |
+
def packages(self):
|
98 |
+
import sys, subprocess
|
99 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
100 |
+
|
101 |
+
NODE_CLASS_MAPPINGS = {
|
102 |
+
"Image Combine": WAS_Image_Combine
|
103 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV42/Image_Edge_Detection_WAS.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image, ImageFilter
|
6 |
+
|
7 |
+
class WAS_Image_Edge:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image": ("IMAGE",),
|
16 |
+
"mode": (["normal", "laplacian"],),
|
17 |
+
},
|
18 |
+
}
|
19 |
+
|
20 |
+
RETURN_TYPES = ("IMAGE",)
|
21 |
+
FUNCTION = "image_edges"
|
22 |
+
|
23 |
+
CATEGORY = "WAS"
|
24 |
+
|
25 |
+
def image_edges(self, image, mode):
|
26 |
+
|
27 |
+
# Convert image to PIL
|
28 |
+
image = self.tensor2pil(image)
|
29 |
+
|
30 |
+
# Detect edges
|
31 |
+
match mode:
|
32 |
+
case "normal":
|
33 |
+
image = image.filter(ImageFilter.FIND_EDGES)
|
34 |
+
case "laplacian":
|
35 |
+
image = image.filter(ImageFilter.Kernel((3, 3), (-1, -1, -1, -1, 8,
|
36 |
+
-1, -1, -1, -1), 1, 0))
|
37 |
+
case _:
|
38 |
+
image = image
|
39 |
+
|
40 |
+
return ( torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0), )
|
41 |
+
|
42 |
+
# Convert tesnor to PIL image
|
43 |
+
def tensor2pil(self, image):
|
44 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
45 |
+
|
46 |
+
NODE_CLASS_MAPPINGS = {
|
47 |
+
"Image Detect Edges": WAS_Image_Edge
|
48 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV42/Image_Film_Grain_WAS.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image, ImageFilter, ImageEnhance
|
6 |
+
import random
|
7 |
+
|
8 |
+
class WAS_Film_Grain:
|
9 |
+
def __init__(self):
|
10 |
+
pass
|
11 |
+
|
12 |
+
@classmethod
|
13 |
+
def INPUT_TYPES(s):
|
14 |
+
return {
|
15 |
+
"required": {
|
16 |
+
"image": ("IMAGE",),
|
17 |
+
"density": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 1.0, "step": 0.01}),
|
18 |
+
"intensity": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 1.0, "step": 0.01}),
|
19 |
+
"highlights": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 255.0, "step": 0.01}),
|
20 |
+
"supersample_factor": ("INT", {"default": 4, "min": 1, "max": 8, "step": 1})
|
21 |
+
}
|
22 |
+
}
|
23 |
+
RETURN_TYPES = ("IMAGE",)
|
24 |
+
FUNCTION = "film_grain"
|
25 |
+
|
26 |
+
CATEGORY = "WAS"
|
27 |
+
|
28 |
+
def film_grain(self, image, density, intensity, highlights, supersample_factor):
|
29 |
+
return ( self.pil2tensor(self.apply_film_grain(self.tensor2pil(image), density, intensity, highlights, supersample_factor)), )
|
30 |
+
|
31 |
+
def apply_film_grain(self, img, density=0.1, intensity=1.0, highlights=1.0, supersample_factor = 4):
|
32 |
+
"""
|
33 |
+
Apply grayscale noise with specified density, intensity, and highlights to a PIL image.
|
34 |
+
"""
|
35 |
+
# Convert the image to grayscale
|
36 |
+
img_gray = img.convert('L')
|
37 |
+
|
38 |
+
# Super Resolution noise image
|
39 |
+
original_size = img.size
|
40 |
+
img_gray = img_gray.resize(((img.size[0] * supersample_factor), (img.size[1] * supersample_factor)), Image.Resampling(2))
|
41 |
+
|
42 |
+
# Calculate the number of noise pixels to add
|
43 |
+
num_pixels = int(density * img_gray.size[0] * img_gray.size[1])
|
44 |
+
|
45 |
+
# Create a list of noise pixel positions
|
46 |
+
noise_pixels = []
|
47 |
+
for i in range(num_pixels):
|
48 |
+
x = random.randint(0, img_gray.size[0]-1)
|
49 |
+
y = random.randint(0, img_gray.size[1]-1)
|
50 |
+
noise_pixels.append((x, y))
|
51 |
+
|
52 |
+
# Apply the noise to the grayscale image
|
53 |
+
for x, y in noise_pixels:
|
54 |
+
value = random.randint(0, 255)
|
55 |
+
img_gray.putpixel((x, y), value)
|
56 |
+
|
57 |
+
# Convert the grayscale image back to RGB
|
58 |
+
img_noise = img_gray.convert('RGB')
|
59 |
+
|
60 |
+
# Blur noise image
|
61 |
+
img_noise = img_noise.filter(ImageFilter.GaussianBlur(radius = 0.125))
|
62 |
+
|
63 |
+
# Downsize noise image
|
64 |
+
img_noise = img_noise.resize(original_size, Image.Resampling(1))
|
65 |
+
|
66 |
+
# Sharpen super resolution result
|
67 |
+
img_noise = img_noise.filter(ImageFilter.EDGE_ENHANCE_MORE)
|
68 |
+
|
69 |
+
# Blend the noisy color image with the original color image
|
70 |
+
img_final = Image.blend(img, img_noise, intensity)
|
71 |
+
|
72 |
+
# Adjust the highlights
|
73 |
+
enhancer = ImageEnhance.Brightness(img_final)
|
74 |
+
img_highlights = enhancer.enhance(highlights)
|
75 |
+
|
76 |
+
# Return the final image
|
77 |
+
return img_highlights
|
78 |
+
|
79 |
+
# Convert PIL to Tensor
|
80 |
+
def pil2tensor(self, image):
|
81 |
+
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
|
82 |
+
|
83 |
+
# Convert Tensor to PIL image
|
84 |
+
def tensor2pil(self, image):
|
85 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
86 |
+
|
87 |
+
NODE_CLASS_MAPPINGS = {
|
88 |
+
"Image Film Grain": WAS_Film_Grain
|
89 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV42/Image_Filters_WAS.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image, ImageFilter, ImageEnhance, ImageOps
|
6 |
+
|
7 |
+
class WAS_Image_Filters:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image": ("IMAGE",),
|
16 |
+
"brightness": ("FLOAT", {"default": 0.0, "min": -1.0, "max": 1.0, "step": 0.01}),
|
17 |
+
"contrast": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 2.0, "step": 0.01}),
|
18 |
+
"saturation": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.01}),
|
19 |
+
"sharpness": ("FLOAT", {"default": 1.0, "min": -5.0, "max": 5.0, "step": 0.01}),
|
20 |
+
"blur": ("INT", {"default": 0, "min": 0, "max": 16, "step": 1}),
|
21 |
+
"gaussian_blur": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 255.0, "step": 0.1}),
|
22 |
+
"edge_enhance": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
23 |
+
},
|
24 |
+
}
|
25 |
+
|
26 |
+
RETURN_TYPES = ("IMAGE",)
|
27 |
+
FUNCTION = "image_filters"
|
28 |
+
|
29 |
+
CATEGORY = "WAS"
|
30 |
+
|
31 |
+
def image_filters(self, image, brightness, contrast, saturation, sharpness, blur, gaussian_blur, edge_enhance):
|
32 |
+
|
33 |
+
pil_image = None
|
34 |
+
|
35 |
+
# Apply NP Adjustments
|
36 |
+
if brightness > 0.0 or brightness < 0.0:
|
37 |
+
# Apply brightness
|
38 |
+
image = np.clip(image + brightness, 0.0, 1.0)
|
39 |
+
|
40 |
+
if contrast > 1.0 or contrast < 1.0:
|
41 |
+
# Apply contrast
|
42 |
+
image = np.clip(image * contrast, 0.0, 1.0)
|
43 |
+
|
44 |
+
# Apply PIL Adjustments
|
45 |
+
if saturation > 1.0 or saturation < 1.0:
|
46 |
+
#PIL Image
|
47 |
+
pil_image = self.tensor2pil(image)
|
48 |
+
# Apply saturation
|
49 |
+
pil_image = ImageEnhance.Color(pil_image).enhance(saturation)
|
50 |
+
|
51 |
+
if sharpness > 1.0 or sharpness < 1.0:
|
52 |
+
# Assign or create PIL Image
|
53 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
54 |
+
# Apply sharpness
|
55 |
+
pil_image = ImageEnhance.Sharpness(pil_image).enhance(sharpness)
|
56 |
+
|
57 |
+
if blur > 0:
|
58 |
+
# Assign or create PIL Image
|
59 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
60 |
+
# Apply blur
|
61 |
+
for _ in range(blur):
|
62 |
+
pil_image = pil_image.filter(ImageFilter.BLUR)
|
63 |
+
|
64 |
+
if gaussian_blur > 0.0:
|
65 |
+
# Assign or create PIL Image
|
66 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
67 |
+
# Apply Gaussian blur
|
68 |
+
pil_image = pil_image.filter(ImageFilter.GaussianBlur(radius = gaussian_blur))
|
69 |
+
|
70 |
+
if edge_enhance > 0.0:
|
71 |
+
# Assign or create PIL Image
|
72 |
+
pil_image = pil_image if pil_image else self.tensor2pil(image)
|
73 |
+
# Edge Enhancement
|
74 |
+
edge_enhanced_img = pil_image.filter(ImageFilter.EDGE_ENHANCE_MORE)
|
75 |
+
# Blend Mask
|
76 |
+
blend_mask = Image.new(mode = "L", size = pil_image.size, color = (round(edge_enhance * 255)))
|
77 |
+
blend_mask = ImageOps.invert(blend_maskk)
|
78 |
+
# Composite Original and Enhanced Version
|
79 |
+
pil_image = Image.composite(edge_enhanced_img, pil_image, blend_mask)
|
80 |
+
# Clean-up
|
81 |
+
del blend_mask, edge_enhanced_img
|
82 |
+
|
83 |
+
# Output image
|
84 |
+
out_image = ( self.pil2tensor(pil_image) if pil_image else image )
|
85 |
+
|
86 |
+
return ( out_image, )
|
87 |
+
|
88 |
+
# Tensor to PIL
|
89 |
+
def tensor2pil(self, image):
|
90 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
91 |
+
|
92 |
+
# Convert PIL to Tensor
|
93 |
+
def pil2tensor(self, image):
|
94 |
+
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
|
95 |
+
|
96 |
+
# Freeze packages
|
97 |
+
def packages(self):
|
98 |
+
import sys, subprocess
|
99 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
100 |
+
|
101 |
+
NODE_CLASS_MAPPINGS = {
|
102 |
+
"Image Filters": WAS_Image_Filters
|
103 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV42/Image_Flip_WAS.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
import PIL
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
class WAS_Image_Flip:
|
9 |
+
def __init__(self):
|
10 |
+
pass
|
11 |
+
|
12 |
+
@classmethod
|
13 |
+
def INPUT_TYPES(cls):
|
14 |
+
return {
|
15 |
+
"required": {
|
16 |
+
"image": ("IMAGE",),
|
17 |
+
"mode": (["horizontal", "vertical",],),
|
18 |
+
},
|
19 |
+
}
|
20 |
+
|
21 |
+
RETURN_TYPES = ("IMAGE",)
|
22 |
+
FUNCTION = "image_flip"
|
23 |
+
|
24 |
+
CATEGORY = "WAS"
|
25 |
+
|
26 |
+
def image_flip(self, image, mode):
|
27 |
+
|
28 |
+
# PIL Image
|
29 |
+
image = self.tensor2pil(image)
|
30 |
+
|
31 |
+
# Rotate Image
|
32 |
+
if mode == 'horizontal':
|
33 |
+
image = image.transpose(0)
|
34 |
+
if mode == 'vertical':
|
35 |
+
image = image.transpose(1)
|
36 |
+
|
37 |
+
return ( torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0), )
|
38 |
+
|
39 |
+
# Tensor to PIL
|
40 |
+
def tensor2pil(self, image):
|
41 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
42 |
+
|
43 |
+
# Freeze packages
|
44 |
+
def packages(self):
|
45 |
+
import sys, subprocess
|
46 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
47 |
+
|
48 |
+
NODE_CLASS_MAPPINGS = {
|
49 |
+
"Image Flip": WAS_Image_Flip
|
50 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV42/Image_Nova_Filter_WAS.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
class WAS_Image_Nova_Filter:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image": ("IMAGE",),
|
16 |
+
"amplitude": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.001}),
|
17 |
+
"frequency": ("FLOAT", {"default": 3.14, "min": 0.0, "max": 100.0, "step": 0.001}),
|
18 |
+
},
|
19 |
+
}
|
20 |
+
|
21 |
+
RETURN_TYPES = ("IMAGE",)
|
22 |
+
FUNCTION = "nova_sine"
|
23 |
+
|
24 |
+
CATEGORY = "WAS"
|
25 |
+
|
26 |
+
def nova_sine(self, image, amplitude, frequency):
|
27 |
+
|
28 |
+
# Convert image to numpy
|
29 |
+
img = self.tensor2pil(image)
|
30 |
+
|
31 |
+
# Convert the image to a numpy array
|
32 |
+
img_array = np.array(img)
|
33 |
+
|
34 |
+
# Define a sine wave function
|
35 |
+
def sine(x, freq, amp):
|
36 |
+
return amp * np.sin(2 * np.pi * freq * x)
|
37 |
+
|
38 |
+
# Calculate the sampling frequency of the image
|
39 |
+
resolution = img.info.get('dpi') # PPI
|
40 |
+
physical_size = img.size # pixels
|
41 |
+
|
42 |
+
if resolution is not None:
|
43 |
+
# Convert PPI to pixels per millimeter (PPM)
|
44 |
+
ppm = 25.4 / resolution
|
45 |
+
physical_size = tuple(int(pix * ppm) for pix in physical_size)
|
46 |
+
|
47 |
+
# Set the maximum frequency for the sine wave
|
48 |
+
max_freq = img.width / 2
|
49 |
+
|
50 |
+
# Ensure frequency isn't outside visual representable range
|
51 |
+
if frequency > max_freq:
|
52 |
+
frequency = max_freq
|
53 |
+
|
54 |
+
# Apply levels to the image using the sine function
|
55 |
+
for i in range(img_array.shape[0]):
|
56 |
+
for j in range(img_array.shape[1]):
|
57 |
+
for k in range(img_array.shape[2]):
|
58 |
+
img_array[i,j,k] = int(sine(img_array[i,j,k]/255, frequency, amplitude) * 255)
|
59 |
+
|
60 |
+
return ( torch.from_numpy(img_array.astype(np.float32) / 255.0).unsqueeze(0), )
|
61 |
+
|
62 |
+
# Convert tesnro to PIL image
|
63 |
+
def tensor2pil(self, image):
|
64 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
65 |
+
|
66 |
+
# Freeze installed pip modules
|
67 |
+
def packages(self):
|
68 |
+
import sys, subprocess
|
69 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
70 |
+
|
71 |
+
NODE_CLASS_MAPPINGS = {
|
72 |
+
"Image Nova Filter": WAS_Image_Nova_Filter
|
73 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV42/Image_Rotate_WAS.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
import PIL
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
class WAS_Image_Rotate:
|
9 |
+
def __init__(self):
|
10 |
+
pass
|
11 |
+
|
12 |
+
@classmethod
|
13 |
+
def INPUT_TYPES(cls):
|
14 |
+
return {
|
15 |
+
"required": {
|
16 |
+
"image": ("IMAGE",),
|
17 |
+
"mode": (["transpose", "internal",],),
|
18 |
+
"rotation": ("INT", {"default": 0, "min": 0, "max": 360, "step": 90}),
|
19 |
+
"sampler": (["nearest", "bilinear", "bicubic"],),
|
20 |
+
},
|
21 |
+
}
|
22 |
+
|
23 |
+
RETURN_TYPES = ("IMAGE",)
|
24 |
+
FUNCTION = "image_rotate"
|
25 |
+
|
26 |
+
CATEGORY = "WAS"
|
27 |
+
|
28 |
+
def image_rotate(self, image, mode, rotation, sampler):
|
29 |
+
|
30 |
+
# PIL Image
|
31 |
+
image = self.tensor2pil(image)
|
32 |
+
|
33 |
+
# Check rotation
|
34 |
+
if rotation > 360:
|
35 |
+
rotation = int(360)
|
36 |
+
if (rotation % 90 != 0):
|
37 |
+
rotation = int((rotation//90)*90);
|
38 |
+
|
39 |
+
# Set Sampler
|
40 |
+
match sampler:
|
41 |
+
case 'nearest':
|
42 |
+
sampler = PIL.Image.NEAREST
|
43 |
+
case 'bicubic':
|
44 |
+
sampler = PIL.Image.BICUBIC
|
45 |
+
case 'bilinear':
|
46 |
+
sampler = PIL.Image.BILINEAR
|
47 |
+
|
48 |
+
# Rotate Image
|
49 |
+
if mode == 'internal':
|
50 |
+
image = image.rotate(rotation, sampler)
|
51 |
+
else:
|
52 |
+
rot = int(rotation / 90)
|
53 |
+
for _ in range(rot):
|
54 |
+
image = image.transpose(2)
|
55 |
+
|
56 |
+
return ( torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0), )
|
57 |
+
|
58 |
+
# Tensor to PIL
|
59 |
+
def tensor2pil(self, image):
|
60 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
61 |
+
|
62 |
+
# Freeze packages
|
63 |
+
def packages(self):
|
64 |
+
import sys, subprocess
|
65 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
66 |
+
|
67 |
+
NODE_CLASS_MAPPINGS = {
|
68 |
+
"Image Rotate": WAS_Image_Rotate
|
69 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV42/Image_Style_Filter_WAS.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# By WASasquatch (Discord: WAS#0263)
|
2 |
+
|
3 |
+
import torch, sys, subprocess
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
class WAS_Image_Style_Filter:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def INPUT_TYPES(cls):
|
13 |
+
return {
|
14 |
+
"required": {
|
15 |
+
"image": ("IMAGE",),
|
16 |
+
"style": ([
|
17 |
+
"1977",
|
18 |
+
"aden",
|
19 |
+
"brannan",
|
20 |
+
"brooklyn",
|
21 |
+
"clarendon",
|
22 |
+
"earlybird",
|
23 |
+
"gingham",
|
24 |
+
"hudson",
|
25 |
+
"inkwell",
|
26 |
+
"kelvin",
|
27 |
+
"lark",
|
28 |
+
"lofi",
|
29 |
+
"maven",
|
30 |
+
"mayfair",
|
31 |
+
"moon",
|
32 |
+
"nashville",
|
33 |
+
"perpetua",
|
34 |
+
"reyes",
|
35 |
+
"rise",
|
36 |
+
"slumber",
|
37 |
+
"stinson",
|
38 |
+
"toaster",
|
39 |
+
"valencia",
|
40 |
+
"walden",
|
41 |
+
"willow",
|
42 |
+
"xpro2"
|
43 |
+
],),
|
44 |
+
},
|
45 |
+
}
|
46 |
+
|
47 |
+
RETURN_TYPES = ("IMAGE",)
|
48 |
+
FUNCTION = "image_style_filter"
|
49 |
+
|
50 |
+
CATEGORY = "WAS"
|
51 |
+
|
52 |
+
def image_style_filter(self, image, style):
|
53 |
+
|
54 |
+
# Install Pilgram
|
55 |
+
if 'pilgram' not in self.packages():
|
56 |
+
print("Installing Pilgram...")
|
57 |
+
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'pilgram'])
|
58 |
+
|
59 |
+
# Import Pilgram module
|
60 |
+
import pilgram
|
61 |
+
|
62 |
+
# Convert image to PIL
|
63 |
+
image = self.tensor2pil(image)
|
64 |
+
|
65 |
+
# Apply blending
|
66 |
+
match style:
|
67 |
+
case "1977":
|
68 |
+
out_image = pilgram._1977(image)
|
69 |
+
case "aden":
|
70 |
+
out_image = pilgram.aden(image)
|
71 |
+
case "brannan":
|
72 |
+
out_image = pilgram.brannan(image)
|
73 |
+
case "brooklyn":
|
74 |
+
out_image = pilgram.brooklyn(image)
|
75 |
+
case "clarendon":
|
76 |
+
out_image = pilgram.clarendon(image)
|
77 |
+
case "earlybird":
|
78 |
+
out_image = pilgram.earlybird(image)
|
79 |
+
case "gingham":
|
80 |
+
out_image = pilgram.gingham(image)
|
81 |
+
case "hudson":
|
82 |
+
out_image = pilgram.hudson(image)
|
83 |
+
case "inkwell":
|
84 |
+
out_image = pilgram.inkwell(image)
|
85 |
+
case "kelvin":
|
86 |
+
out_image = pilgram.kelvin(image)
|
87 |
+
case "lark":
|
88 |
+
out_image = pilgram.lark(image)
|
89 |
+
case "lofi":
|
90 |
+
out_image = pilgram.lofi(image)
|
91 |
+
case "maven":
|
92 |
+
out_image = pilgram.maven(image)
|
93 |
+
case "mayfair":
|
94 |
+
out_image = pilgram.mayfair(image)
|
95 |
+
case "moon":
|
96 |
+
out_image = pilgram.moon(image)
|
97 |
+
case "nashville":
|
98 |
+
out_image = pilgram.nashville(image)
|
99 |
+
case "perpetua":
|
100 |
+
out_image = pilgram.perpetua(image)
|
101 |
+
case "reyes":
|
102 |
+
out_image = pilgram.reyes(image)
|
103 |
+
case "rise":
|
104 |
+
out_image = pilgram.rise(image)
|
105 |
+
case "slumber":
|
106 |
+
out_image = pilgram.slumber(image)
|
107 |
+
case "stinson":
|
108 |
+
out_image = pilgram.stinson(image)
|
109 |
+
case "toaster":
|
110 |
+
out_image = pilgram.toaster(image)
|
111 |
+
case "valencia":
|
112 |
+
out_image = pilgram.valencia(image)
|
113 |
+
case "walden":
|
114 |
+
out_image = pilgram.walden(image)
|
115 |
+
case "willow":
|
116 |
+
out_image = pilgram.willow(image)
|
117 |
+
case "xpro2":
|
118 |
+
out_image = pilgram.xpro2(image)
|
119 |
+
case _:
|
120 |
+
out_image = image
|
121 |
+
|
122 |
+
out_image = out_image.convert("RGB")
|
123 |
+
|
124 |
+
return ( torch.from_numpy(np.array(out_image).astype(np.float32) / 255.0).unsqueeze(0), )
|
125 |
+
|
126 |
+
# Convert tesnro to PIL image
|
127 |
+
def tensor2pil(self, image):
|
128 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
129 |
+
|
130 |
+
# Freeze packages
|
131 |
+
def packages(self):
|
132 |
+
import sys, subprocess
|
133 |
+
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
|
134 |
+
|
135 |
+
NODE_CLASS_MAPPINGS = {
|
136 |
+
"Image Style Filter": WAS_Image_Style_Filter
|
137 |
+
}
|
wasNodeSuitesComfyui_filtersSuiteV42/WAS_License.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright 2023 Jordan Thompson (WASasquatch)
|
2 |
+
|
3 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
4 |
+
|
5 |
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
6 |
+
|
7 |
+
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|