diff --git a/ComfyUI-InstantID/.github/FUNDING.yml b/ComfyUI-InstantID/.github/FUNDING.yml
new file mode 100644
index 0000000000000000000000000000000000000000..98c529eddd2a30ebcbaddccc3a369140d850ec95
--- /dev/null
+++ b/ComfyUI-InstantID/.github/FUNDING.yml
@@ -0,0 +1,13 @@
+# These are supported funding model platforms
+
+github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
+patreon: # Replace with a single Patreon username
+open_collective: # Replace with a single Open Collective username
+ko_fi: # Replace with a single Ko-fi username
+tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
+community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
+liberapay: # Replace with a single Liberapay username
+issuehunt: # Replace with a single IssueHunt username
+otechie: # Replace with a single Otechie username
+lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
+custom: https://afdian.net/a/ZHOZHO # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
diff --git "a/ComfyUI-InstantID/INSTANTID WORKFLOWS/V1.0 InstantID + ArtGallery\343\200\220Zho\343\200\221.json" "b/ComfyUI-InstantID/INSTANTID WORKFLOWS/V1.0 InstantID + ArtGallery\343\200\220Zho\343\200\221.json"
new file mode 100644
index 0000000000000000000000000000000000000000..2b904fa7ef325c61e2a164e4243bbef963a31387
--- /dev/null
+++ "b/ComfyUI-InstantID/INSTANTID WORKFLOWS/V1.0 InstantID + ArtGallery\343\200\220Zho\343\200\221.json"
@@ -0,0 +1,615 @@
+{
+ "last_node_id": 21,
+ "last_link_id": 41,
+ "nodes": [
+ {
+ "id": 6,
+ "type": "LoadImage",
+ "pos": [
+ 100,
+ 1770
+ ],
+ "size": {
+ "0": 380,
+ "1": 500
+ },
+ "flags": {},
+ "order": 0,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 31
+ ],
+ "shape": 3
+ },
+ {
+ "name": "MASK",
+ "type": "MASK",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoadImage"
+ },
+ "widgets_values": [
+ "download.jpg",
+ "image"
+ ]
+ },
+ {
+ "id": 16,
+ "type": "IDBaseModelLoader_fromhub",
+ "pos": [
+ 100,
+ 1420
+ ],
+ "size": {
+ "0": 290,
+ "1": 60
+ },
+ "flags": {},
+ "order": 5,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "controlnet",
+ "type": "MODEL",
+ "link": 33
+ }
+ ],
+ "outputs": [
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "links": [
+ 34
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDBaseModelLoader_fromhub"
+ },
+ "widgets_values": [
+ "wangqixun/YamerMIX_v8"
+ ]
+ },
+ {
+ "id": 11,
+ "type": "InsightFaceLoader",
+ "pos": [
+ 100,
+ 1660
+ ],
+ "size": {
+ "0": 290,
+ "1": 60
+ },
+ "flags": {},
+ "order": 1,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "INSIGHTFACE",
+ "type": "INSIGHTFACE",
+ "links": [
+ 29
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "InsightFaceLoader"
+ },
+ "widgets_values": [
+ "CUDA"
+ ]
+ },
+ {
+ "id": 4,
+ "type": "Ipadapter_instantidLoader",
+ "pos": [
+ 98,
+ 1526
+ ],
+ "size": {
+ "0": 290,
+ "1": 90
+ },
+ "flags": {},
+ "order": 7,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "link": 34
+ }
+ ],
+ "outputs": [
+ {
+ "name": "MODEL",
+ "type": "MODEL",
+ "links": [
+ 30
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "Ipadapter_instantidLoader"
+ },
+ "widgets_values": [
+ "/content/ComfyUI/models/checkpoints",
+ "ip-adapter.bin"
+ ]
+ },
+ {
+ "id": 2,
+ "type": "IDControlNetLoader",
+ "pos": [
+ 100,
+ 1320
+ ],
+ "size": {
+ "0": 290,
+ "1": 60
+ },
+ "flags": {},
+ "order": 2,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "controlnet",
+ "type": "MODEL",
+ "links": [
+ 33
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDControlNetLoader"
+ },
+ "widgets_values": [
+ "/content/ComfyUI/models/controlnet"
+ ]
+ },
+ {
+ "id": 12,
+ "type": "PreviewImage",
+ "pos": [
+ 500,
+ 1770
+ ],
+ "size": {
+ "0": 470,
+ "1": 500
+ },
+ "flags": {},
+ "order": 11,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "images",
+ "type": "IMAGE",
+ "link": 32
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "PreviewImage"
+ }
+ },
+ {
+ "id": 18,
+ "type": "ArtistsImage_Zho",
+ "pos": [
+ 410,
+ 1320
+ ],
+ "size": [
+ 300,
+ 400
+ ],
+ "flags": {},
+ "order": 3,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "name",
+ "type": "STRING",
+ "links": [
+ 40
+ ],
+ "shape": 3,
+ "slot_index": 0
+ },
+ {
+ "name": "image",
+ "type": "IMAGE",
+ "links": null,
+ "shape": 3,
+ "slot_index": 1
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ArtistsImage_Zho"
+ },
+ "widgets_values": [
+ "Alphonse Osbert .png",
+ 1.2
+ ]
+ },
+ {
+ "id": 20,
+ "type": "MovementsImage_Zho",
+ "pos": [
+ 730,
+ 1320
+ ],
+ "size": [
+ 300,
+ 400
+ ],
+ "flags": {},
+ "order": 4,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "name",
+ "type": "STRING",
+ "links": [
+ 39
+ ],
+ "shape": 3,
+ "slot_index": 0
+ },
+ {
+ "name": "image",
+ "type": "IMAGE",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "MovementsImage_Zho"
+ },
+ "widgets_values": [
+ "Gutai Group.png",
+ 0.6
+ ]
+ },
+ {
+ "id": 9,
+ "type": "ID_Prompt_Styler",
+ "pos": [
+ 1050,
+ 1570
+ ],
+ "size": [
+ 230,
+ 150
+ ],
+ "flags": {},
+ "order": 9,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "prompt",
+ "type": "STRING",
+ "link": 36,
+ "widget": {
+ "name": "prompt"
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "positive_prompt",
+ "type": "STRING",
+ "links": [
+ 28
+ ],
+ "shape": 3,
+ "slot_index": 0
+ },
+ {
+ "name": "negative_prompt",
+ "type": "STRING",
+ "links": [
+ 27
+ ],
+ "shape": 3,
+ "slot_index": 1
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ID_Prompt_Styler"
+ },
+ "widgets_values": [
+ " a woman, retro futurism, retro game",
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured (lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch,deformed, mutated, cross-eyed, ugly, disfigured",
+ "Neon"
+ ]
+ },
+ {
+ "id": 19,
+ "type": "ConcatText_Zho",
+ "pos": [
+ 1050,
+ 1420
+ ],
+ "size": [
+ 230,
+ 100
+ ],
+ "flags": {},
+ "order": 8,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "text_1",
+ "type": "STRING",
+ "link": 41,
+ "widget": {
+ "name": "text_1"
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "text",
+ "type": "STRING",
+ "links": [
+ 36
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ConcatText_Zho"
+ },
+ "widgets_values": [
+ "",
+ " a woman, retro futurism, retro game"
+ ]
+ },
+ {
+ "id": 21,
+ "type": "ConcatText_Zho",
+ "pos": [
+ 1050,
+ 1320
+ ],
+ "size": [
+ 230,
+ 50
+ ],
+ "flags": {},
+ "order": 6,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "text_1",
+ "type": "STRING",
+ "link": 40,
+ "widget": {
+ "name": "text_1"
+ }
+ },
+ {
+ "name": "text_2",
+ "type": "STRING",
+ "link": 39,
+ "widget": {
+ "name": "text_2"
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "text",
+ "type": "STRING",
+ "links": [
+ 41
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ConcatText_Zho"
+ },
+ "widgets_values": [
+ "",
+ " a woman, retro futurism, retro game"
+ ]
+ },
+ {
+ "id": 15,
+ "type": "IDGenerationNode",
+ "pos": [
+ 990,
+ 1770
+ ],
+ "size": [
+ 290,
+ 500
+ ],
+ "flags": {},
+ "order": 10,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "face_image",
+ "type": "IMAGE",
+ "link": 31,
+ "slot_index": 0
+ },
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "link": 30
+ },
+ {
+ "name": "insightface",
+ "type": "INSIGHTFACE",
+ "link": 29
+ },
+ {
+ "name": "positive",
+ "type": "STRING",
+ "link": 28,
+ "widget": {
+ "name": "positive"
+ }
+ },
+ {
+ "name": "negative",
+ "type": "STRING",
+ "link": 27,
+ "widget": {
+ "name": "negative"
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 32
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDGenerationNode"
+ },
+ "widgets_values": [
+ "",
+ "",
+ 0.8,
+ 0.8,
+ 50,
+ 5,
+ 1024,
+ 1024,
+ 780847710635035,
+ "randomize"
+ ]
+ }
+ ],
+ "links": [
+ [
+ 27,
+ 9,
+ 1,
+ 15,
+ 4,
+ "STRING"
+ ],
+ [
+ 28,
+ 9,
+ 0,
+ 15,
+ 3,
+ "STRING"
+ ],
+ [
+ 29,
+ 11,
+ 0,
+ 15,
+ 2,
+ "INSIGHTFACE"
+ ],
+ [
+ 30,
+ 4,
+ 0,
+ 15,
+ 1,
+ "MODEL"
+ ],
+ [
+ 31,
+ 6,
+ 0,
+ 15,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 32,
+ 15,
+ 0,
+ 12,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 33,
+ 2,
+ 0,
+ 16,
+ 0,
+ "MODEL"
+ ],
+ [
+ 34,
+ 16,
+ 0,
+ 4,
+ 0,
+ "MODEL"
+ ],
+ [
+ 36,
+ 19,
+ 0,
+ 9,
+ 0,
+ "STRING"
+ ],
+ [
+ 39,
+ 20,
+ 0,
+ 21,
+ 1,
+ "STRING"
+ ],
+ [
+ 40,
+ 18,
+ 0,
+ 21,
+ 0,
+ "STRING"
+ ],
+ [
+ 41,
+ 21,
+ 0,
+ 19,
+ 0,
+ "STRING"
+ ]
+ ],
+ "groups": [],
+ "config": {},
+ "extra": {},
+ "version": 0.4
+}
\ No newline at end of file
diff --git "a/ComfyUI-InstantID/INSTANTID WORKFLOWS/V1.0 InstantID_fromhub\343\200\220Zho\343\200\221.json" "b/ComfyUI-InstantID/INSTANTID WORKFLOWS/V1.0 InstantID_fromhub\343\200\220Zho\343\200\221.json"
new file mode 100644
index 0000000000000000000000000000000000000000..c39a7a52aa7981f2699e0c6e5eabd8efb5a82de3
--- /dev/null
+++ "b/ComfyUI-InstantID/INSTANTID WORKFLOWS/V1.0 InstantID_fromhub\343\200\220Zho\343\200\221.json"
@@ -0,0 +1,400 @@
+{
+ "last_node_id": 16,
+ "last_link_id": 34,
+ "nodes": [
+ {
+ "id": 6,
+ "type": "LoadImage",
+ "pos": [
+ 100,
+ 1770
+ ],
+ "size": {
+ "0": 380,
+ "1": 500
+ },
+ "flags": {},
+ "order": 0,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 31
+ ],
+ "shape": 3
+ },
+ {
+ "name": "MASK",
+ "type": "MASK",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoadImage"
+ },
+ "widgets_values": [
+ "download.jpg",
+ "image"
+ ]
+ },
+ {
+ "id": 15,
+ "type": "IDGenerationNode",
+ "pos": [
+ 690,
+ 1320
+ ],
+ "size": [
+ 280,
+ 400
+ ],
+ "flags": {},
+ "order": 6,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "face_image",
+ "type": "IMAGE",
+ "link": 31,
+ "slot_index": 0
+ },
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "link": 30
+ },
+ {
+ "name": "insightface",
+ "type": "INSIGHTFACE",
+ "link": 29
+ },
+ {
+ "name": "positive",
+ "type": "STRING",
+ "link": 28,
+ "widget": {
+ "name": "positive"
+ }
+ },
+ {
+ "name": "negative",
+ "type": "STRING",
+ "link": 27,
+ "widget": {
+ "name": "negative"
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 32
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDGenerationNode"
+ },
+ "widgets_values": [
+ "",
+ "",
+ 0.8,
+ 0.8,
+ 50,
+ 5,
+ 1024,
+ 1024,
+ 1010461052994244,
+ "randomize"
+ ]
+ },
+ {
+ "id": 16,
+ "type": "IDBaseModelLoader_fromhub",
+ "pos": [
+ 100,
+ 1420
+ ],
+ "size": [
+ 290,
+ 60
+ ],
+ "flags": {},
+ "order": 4,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "controlnet",
+ "type": "MODEL",
+ "link": 33
+ }
+ ],
+ "outputs": [
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "links": [
+ 34
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDBaseModelLoader_fromhub"
+ },
+ "widgets_values": [
+ "wangqixun/YamerMIX_v8"
+ ]
+ },
+ {
+ "id": 11,
+ "type": "InsightFaceLoader",
+ "pos": [
+ 100,
+ 1660
+ ],
+ "size": [
+ 290,
+ 60
+ ],
+ "flags": {},
+ "order": 1,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "INSIGHTFACE",
+ "type": "INSIGHTFACE",
+ "links": [
+ 29
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "InsightFaceLoader"
+ },
+ "widgets_values": [
+ "CUDA"
+ ]
+ },
+ {
+ "id": 2,
+ "type": "IDControlNetLoader",
+ "pos": [
+ 100,
+ 1320
+ ],
+ "size": [
+ 290,
+ 60
+ ],
+ "flags": {},
+ "order": 2,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "controlnet",
+ "type": "MODEL",
+ "links": [
+ 33
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDControlNetLoader"
+ },
+ "widgets_values": [
+ "/content/ComfyUI/models/controlnet"
+ ]
+ },
+ {
+ "id": 12,
+ "type": "PreviewImage",
+ "pos": [
+ 500,
+ 1770
+ ],
+ "size": {
+ "0": 470,
+ "1": 500
+ },
+ "flags": {},
+ "order": 7,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "images",
+ "type": "IMAGE",
+ "link": 32
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "PreviewImage"
+ }
+ },
+ {
+ "id": 9,
+ "type": "ID_Prompt_Styler",
+ "pos": [
+ 410,
+ 1320
+ ],
+ "size": [
+ 260,
+ 400
+ ],
+ "flags": {},
+ "order": 3,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "positive_prompt",
+ "type": "STRING",
+ "links": [
+ 28
+ ],
+ "shape": 3,
+ "slot_index": 0
+ },
+ {
+ "name": "negative_prompt",
+ "type": "STRING",
+ "links": [
+ 27
+ ],
+ "shape": 3,
+ "slot_index": 1
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ID_Prompt_Styler"
+ },
+ "widgets_values": [
+ " a woman, retro futurism, retro game",
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured (lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch,deformed, mutated, cross-eyed, ugly, disfigured",
+ "Snow"
+ ]
+ },
+ {
+ "id": 4,
+ "type": "Ipadapter_instantidLoader",
+ "pos": [
+ 98,
+ 1526
+ ],
+ "size": [
+ 290,
+ 90
+ ],
+ "flags": {},
+ "order": 5,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "link": 34
+ }
+ ],
+ "outputs": [
+ {
+ "name": "MODEL",
+ "type": "MODEL",
+ "links": [
+ 30
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "Ipadapter_instantidLoader"
+ },
+ "widgets_values": [
+ "/content/ComfyUI/models/checkpoints",
+ "ip-adapter.bin"
+ ]
+ }
+ ],
+ "links": [
+ [
+ 27,
+ 9,
+ 1,
+ 15,
+ 4,
+ "STRING"
+ ],
+ [
+ 28,
+ 9,
+ 0,
+ 15,
+ 3,
+ "STRING"
+ ],
+ [
+ 29,
+ 11,
+ 0,
+ 15,
+ 2,
+ "INSIGHTFACE"
+ ],
+ [
+ 30,
+ 4,
+ 0,
+ 15,
+ 1,
+ "MODEL"
+ ],
+ [
+ 31,
+ 6,
+ 0,
+ 15,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 32,
+ 15,
+ 0,
+ 12,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 33,
+ 2,
+ 0,
+ 16,
+ 0,
+ "MODEL"
+ ],
+ [
+ 34,
+ 16,
+ 0,
+ 4,
+ 0,
+ "MODEL"
+ ]
+ ],
+ "groups": [],
+ "config": {},
+ "extra": {},
+ "version": 0.4
+}
\ No newline at end of file
diff --git "a/ComfyUI-InstantID/INSTANTID WORKFLOWS/V1.0 InstantID_locally\343\200\220Zho\343\200\221.json" "b/ComfyUI-InstantID/INSTANTID WORKFLOWS/V1.0 InstantID_locally\343\200\220Zho\343\200\221.json"
new file mode 100644
index 0000000000000000000000000000000000000000..a19872fd2c1d46f44410330f48ef37e107aa42e5
--- /dev/null
+++ "b/ComfyUI-InstantID/INSTANTID WORKFLOWS/V1.0 InstantID_locally\343\200\220Zho\343\200\221.json"
@@ -0,0 +1,400 @@
+{
+ "last_node_id": 17,
+ "last_link_id": 36,
+ "nodes": [
+ {
+ "id": 6,
+ "type": "LoadImage",
+ "pos": [
+ 100,
+ 1770
+ ],
+ "size": {
+ "0": 380,
+ "1": 500
+ },
+ "flags": {},
+ "order": 0,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 31
+ ],
+ "shape": 3
+ },
+ {
+ "name": "MASK",
+ "type": "MASK",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoadImage"
+ },
+ "widgets_values": [
+ "download.jpg",
+ "image"
+ ]
+ },
+ {
+ "id": 15,
+ "type": "IDGenerationNode",
+ "pos": [
+ 690,
+ 1320
+ ],
+ "size": [
+ 280,
+ 400
+ ],
+ "flags": {},
+ "order": 6,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "face_image",
+ "type": "IMAGE",
+ "link": 31,
+ "slot_index": 0
+ },
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "link": 30
+ },
+ {
+ "name": "insightface",
+ "type": "INSIGHTFACE",
+ "link": 29
+ },
+ {
+ "name": "positive",
+ "type": "STRING",
+ "link": 28,
+ "widget": {
+ "name": "positive"
+ }
+ },
+ {
+ "name": "negative",
+ "type": "STRING",
+ "link": 27,
+ "widget": {
+ "name": "negative"
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 32
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDGenerationNode"
+ },
+ "widgets_values": [
+ "",
+ "",
+ 0.8,
+ 0.8,
+ 50,
+ 5,
+ 1024,
+ 1024,
+ 836332103938257,
+ "randomize"
+ ]
+ },
+ {
+ "id": 11,
+ "type": "InsightFaceLoader",
+ "pos": [
+ 100,
+ 1660
+ ],
+ "size": [
+ 290,
+ 60
+ ],
+ "flags": {},
+ "order": 1,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "INSIGHTFACE",
+ "type": "INSIGHTFACE",
+ "links": [
+ 29
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "InsightFaceLoader"
+ },
+ "widgets_values": [
+ "CUDA"
+ ]
+ },
+ {
+ "id": 12,
+ "type": "PreviewImage",
+ "pos": [
+ 500,
+ 1770
+ ],
+ "size": {
+ "0": 470,
+ "1": 500
+ },
+ "flags": {},
+ "order": 7,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "images",
+ "type": "IMAGE",
+ "link": 32
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "PreviewImage"
+ }
+ },
+ {
+ "id": 4,
+ "type": "Ipadapter_instantidLoader",
+ "pos": [
+ 98,
+ 1526
+ ],
+ "size": [
+ 290,
+ 90
+ ],
+ "flags": {},
+ "order": 5,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "link": 36
+ }
+ ],
+ "outputs": [
+ {
+ "name": "MODEL",
+ "type": "MODEL",
+ "links": [
+ 30
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "Ipadapter_instantidLoader"
+ },
+ "widgets_values": [
+ "/content/ComfyUI/models/checkpoints",
+ "ip-adapter.bin"
+ ]
+ },
+ {
+ "id": 9,
+ "type": "ID_Prompt_Styler",
+ "pos": [
+ 410,
+ 1320
+ ],
+ "size": [
+ 260,
+ 400
+ ],
+ "flags": {},
+ "order": 2,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "positive_prompt",
+ "type": "STRING",
+ "links": [
+ 28
+ ],
+ "shape": 3,
+ "slot_index": 0
+ },
+ {
+ "name": "negative_prompt",
+ "type": "STRING",
+ "links": [
+ 27
+ ],
+ "shape": 3,
+ "slot_index": 1
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ID_Prompt_Styler"
+ },
+ "widgets_values": [
+ " a woman, retro futurism, retro game",
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured (lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch,deformed, mutated, cross-eyed, ugly, disfigured",
+ "Vibrant Color"
+ ]
+ },
+ {
+ "id": 2,
+ "type": "IDControlNetLoader",
+ "pos": [
+ 100,
+ 1320
+ ],
+ "size": [
+ 290,
+ 60
+ ],
+ "flags": {},
+ "order": 3,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "controlnet",
+ "type": "MODEL",
+ "links": [
+ 35
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDControlNetLoader"
+ },
+ "widgets_values": [
+ "/content/ComfyUI/models/controlnet"
+ ]
+ },
+ {
+ "id": 17,
+ "type": "IDBaseModelLoader_local",
+ "pos": [
+ 100,
+ 1420
+ ],
+ "size": [
+ 290,
+ 60
+ ],
+ "flags": {},
+ "order": 4,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "controlnet",
+ "type": "MODEL",
+ "link": 35
+ }
+ ],
+ "outputs": [
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "links": [
+ 36
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDBaseModelLoader_local"
+ },
+ "widgets_values": [
+ "sd_xl_base_1.0.safetensors"
+ ]
+ }
+ ],
+ "links": [
+ [
+ 27,
+ 9,
+ 1,
+ 15,
+ 4,
+ "STRING"
+ ],
+ [
+ 28,
+ 9,
+ 0,
+ 15,
+ 3,
+ "STRING"
+ ],
+ [
+ 29,
+ 11,
+ 0,
+ 15,
+ 2,
+ "INSIGHTFACE"
+ ],
+ [
+ 30,
+ 4,
+ 0,
+ 15,
+ 1,
+ "MODEL"
+ ],
+ [
+ 31,
+ 6,
+ 0,
+ 15,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 32,
+ 15,
+ 0,
+ 12,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 35,
+ 2,
+ 0,
+ 17,
+ 0,
+ "MODEL"
+ ],
+ [
+ 36,
+ 17,
+ 0,
+ 4,
+ 0,
+ "MODEL"
+ ]
+ ],
+ "groups": [],
+ "config": {},
+ "extra": {},
+ "version": 0.4
+}
\ No newline at end of file
diff --git "a/ComfyUI-InstantID/INSTANTID WORKFLOWS/V2.0 InstantID_fromhub_pose_ref\343\200\220Zho\343\200\221.json" "b/ComfyUI-InstantID/INSTANTID WORKFLOWS/V2.0 InstantID_fromhub_pose_ref\343\200\220Zho\343\200\221.json"
new file mode 100644
index 0000000000000000000000000000000000000000..6936b3e2576b26d36b739df82fe7b80dee18ec29
--- /dev/null
+++ "b/ComfyUI-InstantID/INSTANTID WORKFLOWS/V2.0 InstantID_fromhub_pose_ref\343\200\220Zho\343\200\221.json"
@@ -0,0 +1,451 @@
+{
+ "last_node_id": 9,
+ "last_link_id": 9,
+ "nodes": [
+ {
+ "id": 3,
+ "type": "IDBaseModelLoader_fromhub",
+ "pos": [
+ 350,
+ 640
+ ],
+ "size": [
+ 310,
+ 60
+ ],
+ "flags": {},
+ "order": 5,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "controlnet",
+ "type": "MODEL",
+ "link": 1
+ }
+ ],
+ "outputs": [
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "links": [
+ 2
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDBaseModelLoader_fromhub"
+ },
+ "widgets_values": [
+ "wangqixun/YamerMIX_v8"
+ ]
+ },
+ {
+ "id": 2,
+ "type": "IDControlNetLoader",
+ "pos": [
+ 350,
+ 530
+ ],
+ "size": [
+ 310,
+ 60
+ ],
+ "flags": {},
+ "order": 0,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "controlnet",
+ "type": "MODEL",
+ "links": [
+ 1
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDControlNetLoader"
+ },
+ "widgets_values": [
+ "/content/ComfyUI/models/controlnet"
+ ]
+ },
+ {
+ "id": 1,
+ "type": "InsightFaceLoader_Zho",
+ "pos": [
+ 680,
+ 530
+ ],
+ "size": [
+ 370,
+ 60
+ ],
+ "flags": {},
+ "order": 1,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "INSIGHTFACEMODEL",
+ "type": "INSIGHTFACEMODEL",
+ "links": [
+ 7
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "InsightFaceLoader_Zho"
+ },
+ "widgets_values": [
+ "CUDA"
+ ]
+ },
+ {
+ "id": 4,
+ "type": "Ipadapter_instantidLoader",
+ "pos": [
+ 350,
+ 750
+ ],
+ "size": [
+ 310,
+ 80
+ ],
+ "flags": {},
+ "order": 6,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "link": 2
+ }
+ ],
+ "outputs": [
+ {
+ "name": "MODEL",
+ "type": "MODEL",
+ "links": [
+ 8
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "Ipadapter_instantidLoader"
+ },
+ "widgets_values": [
+ "/content/ComfyUI/models/checkpoints",
+ "ip-adapter.bin"
+ ]
+ },
+ {
+ "id": 7,
+ "type": "LoadImage",
+ "pos": [
+ 350,
+ 880
+ ],
+ "size": [
+ 310,
+ 430
+ ],
+ "flags": {},
+ "order": 2,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 5
+ ],
+ "shape": 3
+ },
+ {
+ "name": "MASK",
+ "type": "MASK",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoadImage"
+ },
+ "widgets_values": [
+ "download.jpg",
+ "image"
+ ]
+ },
+ {
+ "id": 5,
+ "type": "ID_Prompt_Styler",
+ "pos": [
+ 680,
+ 640
+ ],
+ "size": [
+ 370,
+ 190
+ ],
+ "flags": {},
+ "order": 3,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "positive_prompt",
+ "type": "STRING",
+ "links": [
+ 3
+ ],
+ "shape": 3,
+ "slot_index": 0
+ },
+ {
+ "name": "negative_prompt",
+ "type": "STRING",
+ "links": [
+ 4
+ ],
+ "shape": 3,
+ "slot_index": 1
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ID_Prompt_Styler"
+ },
+ "widgets_values": [
+ "a woman, retro futurism, retro game",
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly",
+ "Vibrant Color"
+ ]
+ },
+ {
+ "id": 9,
+ "type": "PreviewImage",
+ "pos": [
+ 680,
+ 880
+ ],
+ "size": [
+ 720,
+ 980
+ ],
+ "flags": {},
+ "order": 8,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "images",
+ "type": "IMAGE",
+ "link": 9
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "PreviewImage"
+ }
+ },
+ {
+ "id": 6,
+ "type": "IDGenerationNode",
+ "pos": [
+ 1070,
+ 530
+ ],
+ "size": [
+ 330,
+ 300
+ ],
+ "flags": {},
+ "order": 7,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "face_image",
+ "type": "IMAGE",
+ "link": 5,
+ "slot_index": 0
+ },
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "link": 8
+ },
+ {
+ "name": "insightface",
+ "type": "INSIGHTFACEMODEL",
+ "link": 7
+ },
+ {
+ "name": "pose_image_optional",
+ "type": "IMAGE",
+ "link": 6,
+ "slot_index": 3
+ },
+ {
+ "name": "positive",
+ "type": "STRING",
+ "link": 3,
+ "widget": {
+ "name": "positive"
+ }
+ },
+ {
+ "name": "negative",
+ "type": "STRING",
+ "link": 4,
+ "widget": {
+ "name": "negative"
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 9
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDGenerationNode"
+ },
+ "widgets_values": [
+ "",
+ "",
+ 0.8,
+ 0.8,
+ 50,
+ 5,
+ true,
+ 1041987604602403,
+ "fixed"
+ ]
+ },
+ {
+ "id": 8,
+ "type": "LoadImage",
+ "pos": [
+ 350,
+ 1360
+ ],
+ "size": [
+ 310,
+ 500
+ ],
+ "flags": {},
+ "order": 4,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 6
+ ],
+ "shape": 3
+ },
+ {
+ "name": "MASK",
+ "type": "MASK",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoadImage"
+ },
+ "widgets_values": [
+ "u=425589426,3799949985&fm=253&fmt=auto&app=138&f=JPEG.webp",
+ "image"
+ ]
+ }
+ ],
+ "links": [
+ [
+ 1,
+ 2,
+ 0,
+ 3,
+ 0,
+ "MODEL"
+ ],
+ [
+ 2,
+ 3,
+ 0,
+ 4,
+ 0,
+ "MODEL"
+ ],
+ [
+ 3,
+ 5,
+ 0,
+ 6,
+ 4,
+ "STRING"
+ ],
+ [
+ 4,
+ 5,
+ 1,
+ 6,
+ 5,
+ "STRING"
+ ],
+ [
+ 5,
+ 7,
+ 0,
+ 6,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 6,
+ 8,
+ 0,
+ 6,
+ 3,
+ "IMAGE"
+ ],
+ [
+ 7,
+ 1,
+ 0,
+ 6,
+ 2,
+ "INSIGHTFACEMODEL"
+ ],
+ [
+ 8,
+ 4,
+ 0,
+ 6,
+ 1,
+ "MODEL"
+ ],
+ [
+ 9,
+ 6,
+ 0,
+ 9,
+ 0,
+ "IMAGE"
+ ]
+ ],
+ "groups": [],
+ "config": {},
+ "extra": {},
+ "version": 0.4
+}
diff --git "a/ComfyUI-InstantID/INSTANTID WORKFLOWS/V2.0 InstantID_locally_pose_ref\343\200\220Zho\343\200\221.json" "b/ComfyUI-InstantID/INSTANTID WORKFLOWS/V2.0 InstantID_locally_pose_ref\343\200\220Zho\343\200\221.json"
new file mode 100644
index 0000000000000000000000000000000000000000..74affc38af6fd71166d5959b00878af0c5e3e02f
--- /dev/null
+++ "b/ComfyUI-InstantID/INSTANTID WORKFLOWS/V2.0 InstantID_locally_pose_ref\343\200\220Zho\343\200\221.json"
@@ -0,0 +1,452 @@
+{
+ "last_node_id": 10,
+ "last_link_id": 11,
+ "nodes": [
+ {
+ "id": 2,
+ "type": "IDControlNetLoader",
+ "pos": [
+ 350,
+ 530
+ ],
+ "size": [
+ 310,
+ 60
+ ],
+ "flags": {},
+ "order": 0,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "controlnet",
+ "type": "MODEL",
+ "links": [
+ 11
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDControlNetLoader"
+ },
+ "widgets_values": [
+ "/content/ComfyUI/models/controlnet"
+ ]
+ },
+ {
+ "id": 1,
+ "type": "InsightFaceLoader_Zho",
+ "pos": [
+ 680,
+ 530
+ ],
+ "size": [
+ 370,
+ 60
+ ],
+ "flags": {},
+ "order": 1,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "INSIGHTFACEMODEL",
+ "type": "INSIGHTFACEMODEL",
+ "links": [
+ 7
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "InsightFaceLoader_Zho"
+ },
+ "widgets_values": [
+ "CUDA"
+ ]
+ },
+ {
+ "id": 4,
+ "type": "Ipadapter_instantidLoader",
+ "pos": [
+ 350,
+ 750
+ ],
+ "size": [
+ 310,
+ 80
+ ],
+ "flags": {},
+ "order": 6,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "link": 10
+ }
+ ],
+ "outputs": [
+ {
+ "name": "MODEL",
+ "type": "MODEL",
+ "links": [
+ 8
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "Ipadapter_instantidLoader"
+ },
+ "widgets_values": [
+ "/content/ComfyUI/models/checkpoints",
+ "ip-adapter.bin"
+ ]
+ },
+ {
+ "id": 7,
+ "type": "LoadImage",
+ "pos": [
+ 350,
+ 880
+ ],
+ "size": [
+ 310,
+ 430
+ ],
+ "flags": {},
+ "order": 2,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 5
+ ],
+ "shape": 3
+ },
+ {
+ "name": "MASK",
+ "type": "MASK",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoadImage"
+ },
+ "widgets_values": [
+ "download.jpg",
+ "image"
+ ]
+ },
+ {
+ "id": 9,
+ "type": "PreviewImage",
+ "pos": [
+ 680,
+ 880
+ ],
+ "size": [
+ 720,
+ 980
+ ],
+ "flags": {},
+ "order": 8,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "images",
+ "type": "IMAGE",
+ "link": 9
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "PreviewImage"
+ }
+ },
+ {
+ "id": 8,
+ "type": "LoadImage",
+ "pos": [
+ 350,
+ 1360
+ ],
+ "size": [
+ 310,
+ 500
+ ],
+ "flags": {},
+ "order": 3,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 6
+ ],
+ "shape": 3
+ },
+ {
+ "name": "MASK",
+ "type": "MASK",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoadImage"
+ },
+ "widgets_values": [
+ "u=425589426,3799949985&fm=253&fmt=auto&app=138&f=JPEG.webp",
+ "image"
+ ]
+ },
+ {
+ "id": 10,
+ "type": "IDBaseModelLoader_local",
+ "pos": [
+ 350,
+ 640
+ ],
+ "size": [
+ 310,
+ 60
+ ],
+ "flags": {},
+ "order": 5,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "controlnet",
+ "type": "MODEL",
+ "link": 11,
+ "slot_index": 0
+ }
+ ],
+ "outputs": [
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "links": [
+ 10
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDBaseModelLoader_local"
+ },
+ "widgets_values": [
+ "sd_xl_base_1.0.safetensors"
+ ]
+ },
+ {
+ "id": 5,
+ "type": "ID_Prompt_Styler",
+ "pos": [
+ 680,
+ 640
+ ],
+ "size": [
+ 370,
+ 190
+ ],
+ "flags": {},
+ "order": 4,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "positive_prompt",
+ "type": "STRING",
+ "links": [
+ 3
+ ],
+ "shape": 3,
+ "slot_index": 0
+ },
+ {
+ "name": "negative_prompt",
+ "type": "STRING",
+ "links": [
+ 4
+ ],
+ "shape": 3,
+ "slot_index": 1
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ID_Prompt_Styler"
+ },
+ "widgets_values": [
+ "a woman, retro futurism, retro game",
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly",
+ "Film Noir"
+ ]
+ },
+ {
+ "id": 6,
+ "type": "IDGenerationNode",
+ "pos": [
+ 1070,
+ 530
+ ],
+ "size": [
+ 330,
+ 300
+ ],
+ "flags": {},
+ "order": 7,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "face_image",
+ "type": "IMAGE",
+ "link": 5,
+ "slot_index": 0
+ },
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "link": 8
+ },
+ {
+ "name": "insightface",
+ "type": "INSIGHTFACEMODEL",
+ "link": 7
+ },
+ {
+ "name": "pose_image_optional",
+ "type": "IMAGE",
+ "link": 6,
+ "slot_index": 3
+ },
+ {
+ "name": "positive",
+ "type": "STRING",
+ "link": 3,
+ "widget": {
+ "name": "positive"
+ }
+ },
+ {
+ "name": "negative",
+ "type": "STRING",
+ "link": 4,
+ "widget": {
+ "name": "negative"
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 9
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDGenerationNode"
+ },
+ "widgets_values": [
+ "",
+ "",
+ 0.8,
+ 0.8,
+ 50,
+ 5,
+ true,
+ 210876416756428,
+ "randomize"
+ ]
+ }
+ ],
+ "links": [
+ [
+ 3,
+ 5,
+ 0,
+ 6,
+ 4,
+ "STRING"
+ ],
+ [
+ 4,
+ 5,
+ 1,
+ 6,
+ 5,
+ "STRING"
+ ],
+ [
+ 5,
+ 7,
+ 0,
+ 6,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 6,
+ 8,
+ 0,
+ 6,
+ 3,
+ "IMAGE"
+ ],
+ [
+ 7,
+ 1,
+ 0,
+ 6,
+ 2,
+ "INSIGHTFACEMODEL"
+ ],
+ [
+ 8,
+ 4,
+ 0,
+ 6,
+ 1,
+ "MODEL"
+ ],
+ [
+ 9,
+ 6,
+ 0,
+ 9,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 10,
+ 10,
+ 0,
+ 4,
+ 0,
+ "MODEL"
+ ],
+ [
+ 11,
+ 2,
+ 0,
+ 10,
+ 0,
+ "MODEL"
+ ]
+ ],
+ "groups": [],
+ "config": {},
+ "extra": {},
+ "version": 0.4
+}
diff --git "a/ComfyUI-InstantID/INSTANTID WORKFLOWS/V2.0 InstantID_pose_ref + ArtGallery \343\200\220Zho\343\200\221.json" "b/ComfyUI-InstantID/INSTANTID WORKFLOWS/V2.0 InstantID_pose_ref + ArtGallery \343\200\220Zho\343\200\221.json"
new file mode 100644
index 0000000000000000000000000000000000000000..d62fd9b3537b012ad81eac4642adc97e828d7aba
--- /dev/null
+++ "b/ComfyUI-InstantID/INSTANTID WORKFLOWS/V2.0 InstantID_pose_ref + ArtGallery \343\200\220Zho\343\200\221.json"
@@ -0,0 +1,665 @@
+{
+ "last_node_id": 13,
+ "last_link_id": 13,
+ "nodes": [
+ {
+ "id": 3,
+ "type": "IDBaseModelLoader_fromhub",
+ "pos": [
+ 350,
+ 530
+ ],
+ "size": {
+ "0": 310,
+ "1": 60
+ },
+ "flags": {},
+ "order": 6,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "controlnet",
+ "type": "MODEL",
+ "link": 1
+ }
+ ],
+ "outputs": [
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "links": [
+ 2
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDBaseModelLoader_fromhub"
+ },
+ "widgets_values": [
+ "wangqixun/YamerMIX_v8"
+ ]
+ },
+ {
+ "id": 4,
+ "type": "Ipadapter_instantidLoader",
+ "pos": [
+ 350,
+ 640
+ ],
+ "size": {
+ "0": 310,
+ "1": 82
+ },
+ "flags": {},
+ "order": 8,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "link": 2
+ }
+ ],
+ "outputs": [
+ {
+ "name": "MODEL",
+ "type": "MODEL",
+ "links": [
+ 8
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "Ipadapter_instantidLoader"
+ },
+ "widgets_values": [
+ "/content/ComfyUI/models/checkpoints",
+ "ip-adapter.bin"
+ ]
+ },
+ {
+ "id": 1,
+ "type": "InsightFaceLoader_Zho",
+ "pos": [
+ 350,
+ 770
+ ],
+ "size": [
+ 310,
+ 60
+ ],
+ "flags": {},
+ "order": 0,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "INSIGHTFACEMODEL",
+ "type": "INSIGHTFACEMODEL",
+ "links": [
+ 7
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "InsightFaceLoader_Zho"
+ },
+ "widgets_values": [
+ "CUDA"
+ ]
+ },
+ {
+ "id": 2,
+ "type": "IDControlNetLoader",
+ "pos": [
+ 350,
+ 420
+ ],
+ "size": {
+ "0": 310,
+ "1": 60
+ },
+ "flags": {},
+ "order": 1,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "controlnet",
+ "type": "MODEL",
+ "links": [
+ 1
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDControlNetLoader"
+ },
+ "widgets_values": [
+ "/content/ComfyUI/models/controlnet"
+ ]
+ },
+ {
+ "id": 13,
+ "type": "ConcatText_Zho",
+ "pos": [
+ 1340,
+ 530
+ ],
+ "size": [
+ 300,
+ 96
+ ],
+ "flags": {},
+ "order": 9,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "text_1",
+ "type": "STRING",
+ "link": 12,
+ "widget": {
+ "name": "text_1"
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "text",
+ "type": "STRING",
+ "links": [
+ 13
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ConcatText_Zho"
+ },
+ "widgets_values": [
+ "",
+ "a woman, retro futurism, retro game"
+ ]
+ },
+ {
+ "id": 11,
+ "type": "MovementsImage_Zho",
+ "pos": [
+ 1010,
+ 420
+ ],
+ "size": [
+ 310,
+ 410
+ ],
+ "flags": {},
+ "order": 2,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "name",
+ "type": "STRING",
+ "links": [
+ 11
+ ],
+ "shape": 3,
+ "slot_index": 0
+ },
+ {
+ "name": "image",
+ "type": "IMAGE",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "MovementsImage_Zho"
+ },
+ "widgets_values": [
+ "Abstract Expressionism.png",
+ 1.2
+ ]
+ },
+ {
+ "id": 12,
+ "type": "ConcatText_Zho",
+ "pos": [
+ 1340,
+ 420
+ ],
+ "size": [
+ 300,
+ 66.00003337860107
+ ],
+ "flags": {},
+ "order": 7,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "text_1",
+ "type": "STRING",
+ "link": 10,
+ "widget": {
+ "name": "text_1"
+ }
+ },
+ {
+ "name": "text_2",
+ "type": "STRING",
+ "link": 11,
+ "widget": {
+ "name": "text_2"
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "text",
+ "type": "STRING",
+ "links": [
+ 12
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ConcatText_Zho"
+ },
+ "widgets_values": [
+ "",
+ ""
+ ]
+ },
+ {
+ "id": 7,
+ "type": "LoadImage",
+ "pos": [
+ 350,
+ 880
+ ],
+ "size": {
+ "0": 310,
+ "1": 430
+ },
+ "flags": {},
+ "order": 3,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 5
+ ],
+ "shape": 3
+ },
+ {
+ "name": "MASK",
+ "type": "MASK",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoadImage"
+ },
+ "widgets_values": [
+ "download.jpg",
+ "image"
+ ]
+ },
+ {
+ "id": 5,
+ "type": "ID_Prompt_Styler",
+ "pos": [
+ 1340,
+ 670
+ ],
+ "size": [
+ 300,
+ 160
+ ],
+ "flags": {},
+ "order": 10,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "prompt",
+ "type": "STRING",
+ "link": 13,
+ "widget": {
+ "name": "prompt"
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "positive_prompt",
+ "type": "STRING",
+ "links": [
+ 3
+ ],
+ "shape": 3,
+ "slot_index": 0
+ },
+ {
+ "name": "negative_prompt",
+ "type": "STRING",
+ "links": [
+ 4
+ ],
+ "shape": 3,
+ "slot_index": 1
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ID_Prompt_Styler"
+ },
+ "widgets_values": [
+ "",
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly",
+ "Neon"
+ ]
+ },
+ {
+ "id": 8,
+ "type": "LoadImage",
+ "pos": [
+ 350,
+ 1360
+ ],
+ "size": [
+ 310,
+ 270
+ ],
+ "flags": {},
+ "order": 4,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 6
+ ],
+ "shape": 3
+ },
+ {
+ "name": "MASK",
+ "type": "MASK",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoadImage"
+ },
+ "widgets_values": [
+ "u=2585260240,4217738218&fm=253&fmt=auto&app=120&f=JPEG.webp",
+ "image"
+ ]
+ },
+ {
+ "id": 9,
+ "type": "PreviewImage",
+ "pos": [
+ 680,
+ 880
+ ],
+ "size": [
+ 1310,
+ 750
+ ],
+ "flags": {},
+ "order": 12,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "images",
+ "type": "IMAGE",
+ "link": 9
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "PreviewImage"
+ }
+ },
+ {
+ "id": 6,
+ "type": "IDGenerationNode",
+ "pos": [
+ 1660,
+ 420
+ ],
+ "size": [
+ 330,
+ 410
+ ],
+ "flags": {},
+ "order": 11,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "face_image",
+ "type": "IMAGE",
+ "link": 5,
+ "slot_index": 0
+ },
+ {
+ "name": "pipe",
+ "type": "MODEL",
+ "link": 8
+ },
+ {
+ "name": "insightface",
+ "type": "INSIGHTFACEMODEL",
+ "link": 7
+ },
+ {
+ "name": "pose_image_optional",
+ "type": "IMAGE",
+ "link": 6,
+ "slot_index": 3
+ },
+ {
+ "name": "positive",
+ "type": "STRING",
+ "link": 3,
+ "widget": {
+ "name": "positive"
+ }
+ },
+ {
+ "name": "negative",
+ "type": "STRING",
+ "link": 4,
+ "widget": {
+ "name": "negative"
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 9
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "IDGenerationNode"
+ },
+ "widgets_values": [
+ "",
+ "",
+ 0.8,
+ 0.8,
+ 50,
+ 5,
+ true,
+ 1041987604602403,
+ "fixed"
+ ]
+ },
+ {
+ "id": 10,
+ "type": "ArtistsImage_Zho",
+ "pos": [
+ 680,
+ 420
+ ],
+ "size": [
+ 310,
+ 410
+ ],
+ "flags": {},
+ "order": 5,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "name",
+ "type": "STRING",
+ "links": [
+ 10
+ ],
+ "shape": 3,
+ "slot_index": 0
+ },
+ {
+ "name": "image",
+ "type": "IMAGE",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ArtistsImage_Zho"
+ },
+ "widgets_values": [
+ "Atey Ghailan .png",
+ 1.2
+ ]
+ }
+ ],
+ "links": [
+ [
+ 1,
+ 2,
+ 0,
+ 3,
+ 0,
+ "MODEL"
+ ],
+ [
+ 2,
+ 3,
+ 0,
+ 4,
+ 0,
+ "MODEL"
+ ],
+ [
+ 3,
+ 5,
+ 0,
+ 6,
+ 4,
+ "STRING"
+ ],
+ [
+ 4,
+ 5,
+ 1,
+ 6,
+ 5,
+ "STRING"
+ ],
+ [
+ 5,
+ 7,
+ 0,
+ 6,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 6,
+ 8,
+ 0,
+ 6,
+ 3,
+ "IMAGE"
+ ],
+ [
+ 7,
+ 1,
+ 0,
+ 6,
+ 2,
+ "INSIGHTFACEMODEL"
+ ],
+ [
+ 8,
+ 4,
+ 0,
+ 6,
+ 1,
+ "MODEL"
+ ],
+ [
+ 9,
+ 6,
+ 0,
+ 9,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 10,
+ 10,
+ 0,
+ 12,
+ 0,
+ "STRING"
+ ],
+ [
+ 11,
+ 11,
+ 0,
+ 12,
+ 1,
+ "STRING"
+ ],
+ [
+ 12,
+ 12,
+ 0,
+ 13,
+ 0,
+ "STRING"
+ ],
+ [
+ 13,
+ 13,
+ 0,
+ 5,
+ 0,
+ "STRING"
+ ]
+ ],
+ "groups": [],
+ "config": {},
+ "extra": {},
+ "version": 0.4
+}
diff --git a/ComfyUI-InstantID/InstantIDNode.py b/ComfyUI-InstantID/InstantIDNode.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f8c17a8489910392cdbabe462793388fd6f3a8f
--- /dev/null
+++ b/ComfyUI-InstantID/InstantIDNode.py
@@ -0,0 +1,352 @@
+import diffusers
+from diffusers.utils import load_image
+from diffusers.models import ControlNetModel
+from .style_template import styles
+
+import os
+import cv2
+import torch
+import numpy as np
+from PIL import Image
+import folder_paths
+
+from huggingface_hub import hf_hub_download
+from insightface.app import FaceAnalysis
+from .pipeline_stable_diffusion_xl_instantid import StableDiffusionXLInstantIDPipeline, draw_kps
+
+
+current_directory = os.path.dirname(os.path.abspath(__file__))
+device = "cuda" if torch.cuda.is_available() else "cpu"
+STYLE_NAMES = list(styles.keys())
+DEFAULT_STYLE_NAME = "Neon"
+
+
+def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]:
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
+ return p.replace("{prompt}", positive), n + ' ' + negative
+
+
+def resize_img(input_image, max_side=1280, min_side=1024, size=None,
+ pad_to_max_side=False, mode=Image.BILINEAR, base_pixel_number=64):
+
+ image_np = (255. * input_image.cpu().numpy().squeeze()).clip(0, 255).astype(np.uint8)
+ input_image = Image.fromarray(image_np)
+
+ w, h = input_image.size
+ if size is not None:
+ w_resize_new, h_resize_new = size
+ else:
+ ratio = min_side / min(h, w)
+ w, h = round(ratio*w), round(ratio*h)
+ ratio = max_side / max(h, w)
+ input_image = input_image.resize([round(ratio*w), round(ratio*h)], mode)
+ w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
+ h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
+ input_image = input_image.resize([w_resize_new, h_resize_new], mode)
+
+ if pad_to_max_side:
+ res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
+ offset_x = (max_side - w_resize_new) // 2
+ offset_y = (max_side - h_resize_new) // 2
+ res[offset_y:offset_y+h_resize_new, offset_x:offset_x+w_resize_new] = np.array(input_image)
+ input_image = Image.fromarray(res)
+ return input_image
+
+
+class InsightFaceLoader_Node_Zho:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "provider": (["CUDA", "CPU"], ),
+ },
+ }
+
+ RETURN_TYPES = ("INSIGHTFACEMODEL",)
+ FUNCTION = "load_insight_face_antelopev2"
+ CATEGORY = "📷InstantID"
+
+ def load_insight_face_antelopev2(self, provider):
+
+ model = FaceAnalysis(name="antelopev2", root=current_directory, providers=[provider + 'ExecutionProvider',])
+ model.prepare(ctx_id=0, det_size=(640, 640))
+
+ return (model,)
+
+
+class IDControlNetLoaderNode_Zho:
+ def __init__(self):
+ pass
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "controlnet_path": ("STRING", {"default": "enter your path"}),
+ }
+ }
+
+ RETURN_TYPES = ("MODEL",)
+ RETURN_NAMES = ("controlnet",)
+ FUNCTION = "load_idcontrolnet"
+ CATEGORY = "📷InstantID"
+
+ def load_idcontrolnet(self, controlnet_path):
+
+ controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
+
+ return [controlnet]
+
+
+class IDBaseModelLoader_fromhub_Node_Zho:
+ def __init__(self):
+ pass
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "base_model_path": ("STRING", {"default": "wangqixun/YamerMIX_v8"}),
+ "controlnet": ("MODEL",)
+ }
+ }
+
+ RETURN_TYPES = ("MODEL",)
+ RETURN_NAMES = ("pipe",)
+ FUNCTION = "load_model"
+ CATEGORY = "📷InstantID"
+
+ def load_model(self, base_model_path, controlnet):
+ # Code to load the base model
+ pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
+ base_model_path,
+ controlnet=controlnet,
+ torch_dtype=torch.float16,
+ local_dir="./checkpoints"
+ ).to(device)
+ return [pipe]
+
+
+class IDBaseModelLoader_local_Node_Zho:
+ def __init__(self):
+ pass
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
+ "controlnet": ("MODEL",)
+ }
+ }
+
+ RETURN_TYPES = ("MODEL",)
+ RETURN_NAMES = ("pipe",)
+ FUNCTION = "load_model"
+ CATEGORY = "📷InstantID"
+
+ def load_model(self, ckpt_name, controlnet):
+ # Code to load the base model
+ if not ckpt_name:
+ raise ValueError("Please provide the ckpt_name parameter with the name of the checkpoint file.")
+
+ ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
+
+ if not os.path.exists(ckpt_path):
+ raise FileNotFoundError(f"Checkpoint file {ckpt_path} not found.")
+
+ pipe = StableDiffusionXLInstantIDPipeline.from_single_file(
+ pretrained_model_link_or_path=ckpt_path,
+ controlnet=controlnet,
+ torch_dtype=torch.float16,
+ use_safetensors=True,
+ variant="fp16"
+ ).to(device)
+ return [pipe]
+
+
+class Ipadapter_instantidLoader_Node_Zho:
+ def __init__(self):
+ pass
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "Ipadapter_instantid_path": ("STRING", {"default": "enter your path"}),
+ "filename": ("STRING", {"default": "ip-adapter.bin"}),
+ "pipe": ("MODEL",),
+ }
+ }
+
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "load_ip_adapter_instantid"
+ CATEGORY = "📷InstantID"
+
+ def load_ip_adapter_instantid(self, pipe, Ipadapter_instantid_path, filename):
+ # 使用hf_hub_download方法获取PhotoMaker文件的路径
+ face_adapter = os.path.join(Ipadapter_instantid_path, filename)
+
+ # load adapter
+ pipe.load_ip_adapter_instantid(face_adapter)
+
+ return [pipe]
+
+
+class ID_Prompt_Style_Zho:
+ def __init__(self):
+ pass
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "prompt": ("STRING", {"default": "a woman, retro futurism, retro game", "multiline": True}),
+ "negative_prompt": ("STRING", {"default": "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly", "multiline": True}),
+ "style_name": (STYLE_NAMES, {"default": DEFAULT_STYLE_NAME})
+ }
+ }
+
+ RETURN_TYPES = ('STRING','STRING',)
+ RETURN_NAMES = ('positive_prompt','negative_prompt',)
+ FUNCTION = "id_prompt_style"
+ CATEGORY = "📷InstantID"
+
+ def id_prompt_style(self, style_name, prompt, negative_prompt):
+ prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
+
+ return prompt, negative_prompt
+
+
+class IDGenerationNode_Zho:
+ def __init__(self):
+ pass
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "face_image": ("IMAGE",),
+ "pipe": ("MODEL",),
+ "insightface": ("INSIGHTFACEMODEL",),
+ "positive": ("STRING", {"multiline": True, "forceInput": True}),
+ "negative": ("STRING", {"multiline": True, "forceInput": True}),
+ "ip_adapter_scale": ("FLOAT", {"default": 0.8, "min": 0, "max": 1.0, "display": "slider"}),
+ "controlnet_conditioning_scale": ("FLOAT", {"default": 0.8, "min": 0, "max": 1.0, "display": "slider"}),
+ "steps": ("INT", {"default": 50, "min": 1, "max": 100, "step": 1, "display": "slider"}),
+ "guidance_scale": ("FLOAT", {"default": 5, "min": 0, "max": 10, "display": "slider"}),
+ "enhance_face_region": ("BOOLEAN", {"default": True}),
+ "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
+ },
+ "optional": {
+ "pose_image_optional": ("IMAGE",),
+ }
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "id_generate_image"
+ CATEGORY = "📷InstantID"
+
+ def id_generate_image(self, insightface, positive, negative, face_image, pipe, ip_adapter_scale, controlnet_conditioning_scale, steps, guidance_scale, seed, enhance_face_region, pose_image_optional=None):
+
+ face_image = resize_img(face_image)
+
+ # prepare face emb
+ face_info = insightface.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))
+ if not face_info:
+ return "No face detected"
+
+ face_info = sorted(face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * (x['bbox'][3] - x['bbox'][1]))[-1]
+ face_emb = face_info['embedding']
+ face_kps = draw_kps(face_image, face_info['kps'])
+ width, height = face_kps.size
+
+ if pose_image_optional is not None:
+ pose_image = resize_img(pose_image_optional)
+ face_info = insightface.get(cv2.cvtColor(np.array(pose_image), cv2.COLOR_RGB2BGR))
+ if len(face_info) == 0:
+ raise gr.Error(f"Cannot find any face in the reference image! Please upload another person image")
+
+ face_info = face_info[-1]
+ face_kps = draw_kps(pose_image, face_info['kps'])
+
+ width, height = face_kps.size
+
+ if enhance_face_region:
+ control_mask = np.zeros([height, width, 3])
+ x1, y1, x2, y2 = face_info['bbox']
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
+ control_mask[y1:y2, x1:x2] = 255
+ control_mask = Image.fromarray(control_mask.astype(np.uint8))
+ else:
+ control_mask = None
+
+ generator = torch.Generator(device=device).manual_seed(seed)
+
+ pipe.set_ip_adapter_scale(ip_adapter_scale)
+
+ output = pipe(
+ prompt=positive,
+ negative_prompt=negative,
+ image_embeds=face_emb,
+ image=face_kps,
+ control_mask=control_mask,
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
+ num_inference_steps=steps,
+ generator=generator,
+ guidance_scale=guidance_scale,
+ width=width,
+ height=height,
+ return_dict=False
+ )
+
+ # 检查输出类型并相应处理
+ if isinstance(output, tuple):
+ # 当返回的是元组时,第一个元素是图像列表
+ images_list = output[0]
+ else:
+ # 如果返回的是 StableDiffusionXLPipelineOutput,需要从中提取图像
+ images_list = output.images
+
+ # 转换图像为 torch.Tensor,并调整维度顺序为 NHWC
+ images_tensors = []
+ for img in images_list:
+ # 将 PIL.Image 转换为 numpy.ndarray
+ img_array = np.array(img)
+ # 转换 numpy.ndarray 为 torch.Tensor
+ img_tensor = torch.from_numpy(img_array).float() / 255.
+ # 转换图像格式为 CHW (如果需要)
+ if img_tensor.ndim == 3 and img_tensor.shape[-1] == 3:
+ img_tensor = img_tensor.permute(2, 0, 1)
+ # 添加批次维度并转换为 NHWC
+ img_tensor = img_tensor.unsqueeze(0).permute(0, 2, 3, 1)
+ images_tensors.append(img_tensor)
+
+ if len(images_tensors) > 1:
+ output_image = torch.cat(images_tensors, dim=0)
+ else:
+ output_image = images_tensors[0]
+
+ return (output_image,)
+
+
+
+NODE_CLASS_MAPPINGS = {
+ "InsightFaceLoader_Zho": InsightFaceLoader_Node_Zho,
+ "IDControlNetLoader": IDControlNetLoaderNode_Zho,
+ "IDBaseModelLoader_fromhub": IDBaseModelLoader_fromhub_Node_Zho,
+ "IDBaseModelLoader_local": IDBaseModelLoader_local_Node_Zho,
+ "Ipadapter_instantidLoader": Ipadapter_instantidLoader_Node_Zho,
+ "ID_Prompt_Styler": ID_Prompt_Style_Zho,
+ "IDGenerationNode": IDGenerationNode_Zho
+}
+
+NODE_DISPLAY_NAME_MAPPINGS = {
+ "InsightFaceLoader_Zho": "📷InsightFace Loader",
+ "IDControlNetLoader": "📷ID ControlNet Loader",
+ "IDBaseModelLoader_fromhub": "📷ID Base Model Loader from hub 🤗",
+ "IDBaseModelLoader_local": "📷ID Base Model Loader locally",
+ "Ipadapter_instantidLoader": "📷Ipadapter_instantid Loader",
+ "ID_Prompt_Styler": "📷ID Prompt_Styler",
+ "IDGenerationNode": "📷InstantID Generation"
+}
diff --git a/ComfyUI-InstantID/README.md b/ComfyUI-InstantID/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e590a4ee0182e6d62dbce225502417f778299ca3
--- /dev/null
+++ b/ComfyUI-InstantID/README.md
@@ -0,0 +1,213 @@
+
+![ISID_](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/01393483-3145-4691-9daa-7ce9035c9bd0)
+
+
+# ComfyUI InstantID
+
+Unofficial implementation of [InstantID](https://github.com/InstantID/InstantID) for ComfyUI
+
+![Dingtalk_20240123182131](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/7a99b32c-b4a2-4c46-acb0-f796fc46f9ee)
+
++ pose_ref
+
+![Dingtalk_20240124232946](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/caa60456-f2d8-4315-864b-659a9e7cea89)
+
+
+## 项目介绍 | Info
+
+- 来自对[InstantID](https://github.com/InstantID/InstantID)的非官方实现
+
+- 版本:V2.0 支持姿势参考图
+
+
+
+## 视频演示
+
+V2.0
+
+
+https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/083c9e5e-06a0-4623-b5ac-05f7e85a74f2
+
+
+V1.0
+
+https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/7295c0d7-1d1b-4044-aea3-8efa67047362
+
+
+
+## 节点说明 | Features
+
+- 基础模型加载 | base model loader
+ - 📷ID Base Model Loader from hub 🤗:支持从 huggingface hub 自动下载模型,输入模型名称(如:wangqixun/YamerMIX_v8)即可
+ - 📷ID Base Model Loader locally:支持加载本地模型(需 SDXL 系列模型)
+
+- InsightFace 模型加载 | 📷InsightFace Loader
+ - :支持 CUDA 和 CPU
+
+- ID ControlNet 模型加载 | 📷ID ControlNet Loader
+ - controlnet_path:ID ControlNet 模型地址
+
+- Ipadapter_instantid 模型加载 | 📷Ipadapter_instantid Loader
+ - Ipadapter_instantid_path:模型路径
+ - filename:模型名称
+
+ - 提示词 + 风格 | 📷ID Prompt_Styler
+ - 与各种提示词(文本)输入(如肖像大师等)、styler、 Photomaker Prompt_Styler 兼容
+ - prompt、negative:正负提示词
+ - style_name:支持官方提供的8种风格
+ - (No style)
+ - Watercolor
+ - Film Noir
+ - Neon
+ - Jungle
+ - Mars
+ - Vibrant Color
+ - Snow
+ - Line art
+
+- InstantID 生成 | 📷InstantID Generation 🆕
+ - face_image:接入脸部参考图像
+ - pipe:接入模型
+ - insightface:接入 insightface 模型 🆕
+ - pose_image_optional(非必要):接入姿势参考图像(注意:仅对面部周围姿势起效,与通常的 openpose 不同)
+ - positivet、negative:正负提示词
+ - ip_adapter_scale:IPA 强度
+ - controlnet_conditioning_scale:ID Controlnet 强度
+ - step:步数,官方默认30步
+ - guidance_scale:提示词相关度,一般默认为5
+ - enhance_face_region:脸部增强选项 🆕
+ - seed:种子
+
+
+## 风格 | Styles
+
+![ISID_STYLE](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/142bda7a-798b-46b3-aa69-1b88701c8311)
+
+
+
+## 安装 | Install
+
+
+- 推荐使用管理器 ComfyUI Manager 安装(On the Way)
+
+
+- 手动安装:
+ 1. `cd custom_nodes`
+ 2. `git clone https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID.git`
+ 3. `cd custom_nodes/ComfyUI-InstantID`
+ 4. `pip install -r requirements.txt`
+ 5. 重启 ComfyUI
+
+
+## 使用方法 | How to Use
+
+- 下载 [InstantID/ControlNetModel](https://huggingface.co/InstantX/InstantID/tree/main/ControlNetModel) 中的 config.json 和 diffusion_pytorch_model.safetensors ,将模型地址填入 📷ID ControlNet Loader 节点中(例如:ComfyUI/custom_nodes/ComfyUI-InstantID/checkpoints/controlnet)
+
+- 下载 [InstantID/ip-adapter](https://huggingface.co/InstantX/InstantID/tree/main) 中的 ip-adapter.bin ,将其地址填入 📷Ipadapter_instantid Loader 节点中(例如:ComfyUI/custom_nodes/ComfyUI-InstantID/checkpoints)
+
+- 下载 [DIAMONIK7777/antelopev2](https://huggingface.co/DIAMONIK7777/antelopev2/tree/main) 中的所有模型,将其放入 ComfyUI//custom_nodes/ComfyUI-InstantID/models/antelopev2 中
+
+- 兼容性: CUDA11 支持默认安装的 onnxruntime-gpu(1.16.0),如果是 CUDA12 则需手动安装 onnxruntime-gpu==1.17.0 [地址](https://dev.azure.com/onnxruntime/onnxruntime/_artifacts/feed/onnxruntime-cuda-12/PyPI/onnxruntime-gpu/overview/1.17.0)
+
+
+## 工作流 | Workflows
+
+V2.0
+
+- [V2.0 InstantID_pose_ref + ArtGallery](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/blob/main/INSTANTID%20WORKFLOWS/V2.0%20InstantID_pose_ref%20%2B%20ArtGallery%20%E3%80%90Zho%E3%80%91.json)
+
+ ![Dingtalk_20240124232833](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/99be9592-775d-4c33-bafc-5bd5c95a7222)
+
+
+- [V2.0 自动下载 huggingface hub](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/blob/main/INSTANTID%20WORKFLOWS/V2.0%20InstantID_fromhub_pose_ref%E3%80%90Zho%E3%80%91.json)
+
+ ![Dingtalk_20240124230145](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/95c4a1dd-864d-4a46-8c45-a48866aef29f)
+
+
+- [V2.0 InstantID_locally_pose_ref](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/blob/main/INSTANTID%20WORKFLOWS/V2.0%20InstantID_locally_pose_ref%E3%80%90Zho%E3%80%91.json)
+
+ ![Dingtalk_20240124230609](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/d4c22389-f853-44bd-9ea2-568b2ac7ed06)
+
+
+V1.0 工作流仅适用于V1.0 版本
+
+- [V1.0 InstantID + ArtGallery](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/blob/main/INSTANTID%20WORKFLOWS/V1.0%20InstantID%20%2B%20ArtGallery%E3%80%90Zho%E3%80%91.json)
+
+
+ ![Dingtalk_20240123182440](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/c6ee25bf-a528-4d78-9b35-f5b0d0303601)
+
+
+- [V1.0 本地模型 locally](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/blob/main/INSTANTID%20WORKFLOWS/V1.0%20InstantID_locally%E3%80%90Zho%E3%80%91.json)
+
+ ![Dingtalk_20240123175624](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/459bfede-59e8-4d8d-941c-a950c4827c49)
+
+
+- [V1.0 自动下载 huggingface hub](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/blob/main/INSTANTID%20WORKFLOWS/V1.0%20InstantID_fromhub%E3%80%90Zho%E3%80%91.json)
+
+ ![Dingtalk_20240123174950](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/50133961-1752-4ec8-ac0b-068d998b8534)
+
+
+
+
+## 更新日志
+
+- 20240124
+
+ 更新为 V2.0 :新增姿势参考图、优化代码
+
+ 修复 insightfaceloader 冲突问题
+
+ 修复 onnxruntime-gpu 版本兼容性的问题
+
+- 20240123
+
+ V1.0 上线:同时支持本地、huggingface hub托管模型,支持8种风格
+
+- 20240122
+
+ 创建项目
+
+
+## 速度实测 | Speed
+
+- V1.0
+
+ - A100 50步 14s
+
+ ![image](https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID/assets/140084057/dc535e67-3f56-4faf-be81-621b84bb6ee2)
+
+
+
+## Stars
+
+[![Star History Chart](https://api.star-history.com/svg?repos=ZHO-ZHO-ZHO/ComfyUI-InstantID&type=Date)](https://star-history.com/#ZHO-ZHO-ZHO/ComfyUI-InstantID&Date)
+
+
+## 关于我 | About me
+
+📬 **联系我**:
+- 邮箱:zhozho3965@gmail.com
+- QQ 群:839821928
+
+🔗 **社交媒体**:
+- 个人页:[-Zho-](https://jike.city/zho)
+- Bilibili:[我的B站主页](https://space.bilibili.com/484366804)
+- X(Twitter):[我的Twitter](https://twitter.com/ZHOZHO672070)
+- 小红书:[我的小红书主页](https://www.xiaohongshu.com/user/profile/63f11530000000001001e0c8?xhsshare=CopyLink&appuid=63f11530000000001001e0c8&apptime=1690528872)
+
+💡 **支持我**:
+- B站:[B站充电](https://space.bilibili.com/484366804)
+- 爱发电:[为我充电](https://afdian.net/a/ZHOZHO)
+
+
+## Credits
+
+[InstantID](https://github.com/InstantID/InstantID)
+
+📷InsightFace Loader 代码修改自 [ComfyUI_IPAdapter_plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus),感谢 [@cubiq](https://github.com/cubiq)!
+
+感谢 [@hidecloud](https://twitter.com/hidecloud) 对 onnxruntime 版本兼容性的测试与反馈!
+
+感谢 [esheep](https://www.esheep.com/) 技术人员对节点冲突问题的反馈!
diff --git a/ComfyUI-InstantID/__init__.py b/ComfyUI-InstantID/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..31ec00fe479f4ee4e7bbf1c7e66e4ac8725be5f1
--- /dev/null
+++ b/ComfyUI-InstantID/__init__.py
@@ -0,0 +1,3 @@
+from .InstantIDNode import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
+
+__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS']
diff --git a/ComfyUI-InstantID/__pycache__/InstantIDNode.cpython-312.pyc b/ComfyUI-InstantID/__pycache__/InstantIDNode.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bfce3f232ccfb1b134ba3c5ec961e958f73b8da2
Binary files /dev/null and b/ComfyUI-InstantID/__pycache__/InstantIDNode.cpython-312.pyc differ
diff --git a/ComfyUI-InstantID/__pycache__/__init__.cpython-312.pyc b/ComfyUI-InstantID/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b32fbaac6c629d11de00bc4be224a7dbcccf933d
Binary files /dev/null and b/ComfyUI-InstantID/__pycache__/__init__.cpython-312.pyc differ
diff --git a/ComfyUI-InstantID/__pycache__/pipeline_stable_diffusion_xl_instantid.cpython-312.pyc b/ComfyUI-InstantID/__pycache__/pipeline_stable_diffusion_xl_instantid.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..62cf666a036c5197f72fff8735fce933c89cd9d5
Binary files /dev/null and b/ComfyUI-InstantID/__pycache__/pipeline_stable_diffusion_xl_instantid.cpython-312.pyc differ
diff --git a/ComfyUI-InstantID/__pycache__/style_template.cpython-312.pyc b/ComfyUI-InstantID/__pycache__/style_template.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c21efeef0af0acec1ac422f9ef674fce7f69593b
Binary files /dev/null and b/ComfyUI-InstantID/__pycache__/style_template.cpython-312.pyc differ
diff --git a/ComfyUI-InstantID/checkpoints/put_models_here.txt b/ComfyUI-InstantID/checkpoints/put_models_here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d3f5a12faa99758192ecc4ed3fc22c9249232e86
--- /dev/null
+++ b/ComfyUI-InstantID/checkpoints/put_models_here.txt
@@ -0,0 +1 @@
+
diff --git a/ComfyUI-InstantID/ip_adapter/__pycache__/attention_processor.cpython-312.pyc b/ComfyUI-InstantID/ip_adapter/__pycache__/attention_processor.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e09b4ea4b8cb336ef434133451169b8589994c28
Binary files /dev/null and b/ComfyUI-InstantID/ip_adapter/__pycache__/attention_processor.cpython-312.pyc differ
diff --git a/ComfyUI-InstantID/ip_adapter/__pycache__/resampler.cpython-312.pyc b/ComfyUI-InstantID/ip_adapter/__pycache__/resampler.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e198653c9ddc1765bf0122b8fd2bb88548c41a1
Binary files /dev/null and b/ComfyUI-InstantID/ip_adapter/__pycache__/resampler.cpython-312.pyc differ
diff --git a/ComfyUI-InstantID/ip_adapter/__pycache__/utils.cpython-312.pyc b/ComfyUI-InstantID/ip_adapter/__pycache__/utils.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..25384f164663cabf5f1f7595fbaa1c82cbc3dce7
Binary files /dev/null and b/ComfyUI-InstantID/ip_adapter/__pycache__/utils.cpython-312.pyc differ
diff --git a/ComfyUI-InstantID/ip_adapter/attention_processor.py b/ComfyUI-InstantID/ip_adapter/attention_processor.py
new file mode 100644
index 0000000000000000000000000000000000000000..d21c4942b4fd505d71e795f632e0e2a9d071195f
--- /dev/null
+++ b/ComfyUI-InstantID/ip_adapter/attention_processor.py
@@ -0,0 +1,308 @@
+# modified from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+try:
+ import xformers
+ import xformers.ops
+ xformers_available = True
+except Exception as e:
+ xformers_available = False
+
+
+
+class RegionControler(object):
+ def __init__(self) -> None:
+ self.prompt_image_conditioning = []
+region_control = RegionControler()
+
+
+class AttnProcessor(nn.Module):
+ r"""
+ Default processor for performing attention-related computations.
+ """
+ def __init__(
+ self,
+ hidden_size=None,
+ cross_attention_dim=None,
+ ):
+ super().__init__()
+
+ def __call__(
+ self,
+ attn,
+ hidden_states,
+ encoder_hidden_states=None,
+ attention_mask=None,
+ temb=None,
+ ):
+ residual = hidden_states
+
+ if attn.spatial_norm is not None:
+ hidden_states = attn.spatial_norm(hidden_states, temb)
+
+ input_ndim = hidden_states.ndim
+
+ if input_ndim == 4:
+ batch_size, channel, height, width = hidden_states.shape
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
+
+ batch_size, sequence_length, _ = (
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
+ )
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
+
+ if attn.group_norm is not None:
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = attn.to_q(hidden_states)
+
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+ elif attn.norm_cross:
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
+
+ key = attn.to_k(encoder_hidden_states)
+ value = attn.to_v(encoder_hidden_states)
+
+ query = attn.head_to_batch_dim(query)
+ key = attn.head_to_batch_dim(key)
+ value = attn.head_to_batch_dim(value)
+
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
+ hidden_states = torch.bmm(attention_probs, value)
+ hidden_states = attn.batch_to_head_dim(hidden_states)
+
+ # linear proj
+ hidden_states = attn.to_out[0](hidden_states)
+ # dropout
+ hidden_states = attn.to_out[1](hidden_states)
+
+ if input_ndim == 4:
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
+
+ if attn.residual_connection:
+ hidden_states = hidden_states + residual
+
+ hidden_states = hidden_states / attn.rescale_output_factor
+
+ return hidden_states
+
+
+class IPAttnProcessor(nn.Module):
+ r"""
+ Attention processor for IP-Adapater.
+ Args:
+ hidden_size (`int`):
+ The hidden size of the attention layer.
+ cross_attention_dim (`int`):
+ The number of channels in the `encoder_hidden_states`.
+ scale (`float`, defaults to 1.0):
+ the weight scale of image prompt.
+ num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
+ The context length of the image features.
+ """
+
+ def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4):
+ super().__init__()
+
+ self.hidden_size = hidden_size
+ self.cross_attention_dim = cross_attention_dim
+ self.scale = scale
+ self.num_tokens = num_tokens
+
+ self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
+ self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
+
+ def __call__(
+ self,
+ attn,
+ hidden_states,
+ encoder_hidden_states=None,
+ attention_mask=None,
+ temb=None,
+ ):
+ residual = hidden_states
+
+ if attn.spatial_norm is not None:
+ hidden_states = attn.spatial_norm(hidden_states, temb)
+
+ input_ndim = hidden_states.ndim
+
+ if input_ndim == 4:
+ batch_size, channel, height, width = hidden_states.shape
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
+
+ batch_size, sequence_length, _ = (
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
+ )
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
+
+ if attn.group_norm is not None:
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = attn.to_q(hidden_states)
+
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+ else:
+ # get encoder_hidden_states, ip_hidden_states
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens
+ encoder_hidden_states, ip_hidden_states = encoder_hidden_states[:, :end_pos, :], encoder_hidden_states[:, end_pos:, :]
+ if attn.norm_cross:
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
+
+ key = attn.to_k(encoder_hidden_states)
+ value = attn.to_v(encoder_hidden_states)
+
+ query = attn.head_to_batch_dim(query)
+ key = attn.head_to_batch_dim(key)
+ value = attn.head_to_batch_dim(value)
+
+ if xformers_available:
+ hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask)
+ else:
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
+ hidden_states = torch.bmm(attention_probs, value)
+ hidden_states = attn.batch_to_head_dim(hidden_states)
+
+ # for ip-adapter
+ ip_key = self.to_k_ip(ip_hidden_states)
+ ip_value = self.to_v_ip(ip_hidden_states)
+
+ ip_key = attn.head_to_batch_dim(ip_key)
+ ip_value = attn.head_to_batch_dim(ip_value)
+
+ if xformers_available:
+ ip_hidden_states = self._memory_efficient_attention_xformers(query, ip_key, ip_value, None)
+ else:
+ ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
+ ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
+ ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states)
+
+ # region control
+ if len(region_control.prompt_image_conditioning) == 1:
+ region_mask = region_control.prompt_image_conditioning[0].get('region_mask', None)
+ if region_mask is not None:
+ h, w = region_mask.shape[:2]
+ ratio = (h * w / query.shape[1]) ** 0.5
+ mask = F.interpolate(region_mask[None, None], scale_factor=1/ratio, mode='nearest').reshape([1, -1, 1])
+ else:
+ mask = torch.ones_like(ip_hidden_states)
+ ip_hidden_states = ip_hidden_states * mask
+
+ hidden_states = hidden_states + self.scale * ip_hidden_states
+
+ # linear proj
+ hidden_states = attn.to_out[0](hidden_states)
+ # dropout
+ hidden_states = attn.to_out[1](hidden_states)
+
+ if input_ndim == 4:
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
+
+ if attn.residual_connection:
+ hidden_states = hidden_states + residual
+
+ hidden_states = hidden_states / attn.rescale_output_factor
+
+ return hidden_states
+
+
+ def _memory_efficient_attention_xformers(self, query, key, value, attention_mask):
+ # TODO attention_mask
+ query = query.contiguous()
+ key = key.contiguous()
+ value = value.contiguous()
+ hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask)
+ # hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
+ return hidden_states
+
+
+class AttnProcessor2_0(torch.nn.Module):
+ r"""
+ Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
+ """
+ def __init__(
+ self,
+ hidden_size=None,
+ cross_attention_dim=None,
+ ):
+ super().__init__()
+ if not hasattr(F, "scaled_dot_product_attention"):
+ raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
+
+ def __call__(
+ self,
+ attn,
+ hidden_states,
+ encoder_hidden_states=None,
+ attention_mask=None,
+ temb=None,
+ ):
+ residual = hidden_states
+
+ if attn.spatial_norm is not None:
+ hidden_states = attn.spatial_norm(hidden_states, temb)
+
+ input_ndim = hidden_states.ndim
+
+ if input_ndim == 4:
+ batch_size, channel, height, width = hidden_states.shape
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
+
+ batch_size, sequence_length, _ = (
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
+ )
+
+ if attention_mask is not None:
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
+ # scaled_dot_product_attention expects attention_mask shape to be
+ # (batch, heads, source_length, target_length)
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
+
+ if attn.group_norm is not None:
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = attn.to_q(hidden_states)
+
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+ elif attn.norm_cross:
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
+
+ key = attn.to_k(encoder_hidden_states)
+ value = attn.to_v(encoder_hidden_states)
+
+ inner_dim = key.shape[-1]
+ head_dim = inner_dim // attn.heads
+
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
+ # TODO: add support for attn.scale when we move to Torch 2.1
+ hidden_states = F.scaled_dot_product_attention(
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
+ )
+
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
+ hidden_states = hidden_states.to(query.dtype)
+
+ # linear proj
+ hidden_states = attn.to_out[0](hidden_states)
+ # dropout
+ hidden_states = attn.to_out[1](hidden_states)
+
+ if input_ndim == 4:
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
+
+ if attn.residual_connection:
+ hidden_states = hidden_states + residual
+
+ hidden_states = hidden_states / attn.rescale_output_factor
+
+ return hidden_states
\ No newline at end of file
diff --git a/ComfyUI-InstantID/ip_adapter/resampler.py b/ComfyUI-InstantID/ip_adapter/resampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b1e84d81b9a52d0180a987585ae6c39ec38c0ba
--- /dev/null
+++ b/ComfyUI-InstantID/ip_adapter/resampler.py
@@ -0,0 +1,121 @@
+# modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py
+import math
+
+import torch
+import torch.nn as nn
+
+
+# FFN
+def FeedForward(dim, mult=4):
+ inner_dim = int(dim * mult)
+ return nn.Sequential(
+ nn.LayerNorm(dim),
+ nn.Linear(dim, inner_dim, bias=False),
+ nn.GELU(),
+ nn.Linear(inner_dim, dim, bias=False),
+ )
+
+
+def reshape_tensor(x, heads):
+ bs, length, width = x.shape
+ #(bs, length, width) --> (bs, length, n_heads, dim_per_head)
+ x = x.view(bs, length, heads, -1)
+ # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
+ x = x.transpose(1, 2)
+ # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
+ x = x.reshape(bs, heads, length, -1)
+ return x
+
+
+class PerceiverAttention(nn.Module):
+ def __init__(self, *, dim, dim_head=64, heads=8):
+ super().__init__()
+ self.scale = dim_head**-0.5
+ self.dim_head = dim_head
+ self.heads = heads
+ inner_dim = dim_head * heads
+
+ self.norm1 = nn.LayerNorm(dim)
+ self.norm2 = nn.LayerNorm(dim)
+
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
+ self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
+ self.to_out = nn.Linear(inner_dim, dim, bias=False)
+
+
+ def forward(self, x, latents):
+ """
+ Args:
+ x (torch.Tensor): image features
+ shape (b, n1, D)
+ latent (torch.Tensor): latent features
+ shape (b, n2, D)
+ """
+ x = self.norm1(x)
+ latents = self.norm2(latents)
+
+ b, l, _ = latents.shape
+
+ q = self.to_q(latents)
+ kv_input = torch.cat((x, latents), dim=-2)
+ k, v = self.to_kv(kv_input).chunk(2, dim=-1)
+
+ q = reshape_tensor(q, self.heads)
+ k = reshape_tensor(k, self.heads)
+ v = reshape_tensor(v, self.heads)
+
+ # attention
+ scale = 1 / math.sqrt(math.sqrt(self.dim_head))
+ weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
+ weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
+ out = weight @ v
+
+ out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
+
+ return self.to_out(out)
+
+
+class Resampler(nn.Module):
+ def __init__(
+ self,
+ dim=1024,
+ depth=8,
+ dim_head=64,
+ heads=16,
+ num_queries=8,
+ embedding_dim=768,
+ output_dim=1024,
+ ff_mult=4,
+ ):
+ super().__init__()
+
+ self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
+
+ self.proj_in = nn.Linear(embedding_dim, dim)
+
+ self.proj_out = nn.Linear(dim, output_dim)
+ self.norm_out = nn.LayerNorm(output_dim)
+
+ self.layers = nn.ModuleList([])
+ for _ in range(depth):
+ self.layers.append(
+ nn.ModuleList(
+ [
+ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
+ FeedForward(dim=dim, mult=ff_mult),
+ ]
+ )
+ )
+
+ def forward(self, x):
+
+ latents = self.latents.repeat(x.size(0), 1, 1)
+
+ x = self.proj_in(x)
+
+ for attn, ff in self.layers:
+ latents = attn(x, latents) + latents
+ latents = ff(latents) + latents
+
+ latents = self.proj_out(latents)
+ return self.norm_out(latents)
\ No newline at end of file
diff --git a/ComfyUI-InstantID/ip_adapter/utils.py b/ComfyUI-InstantID/ip_adapter/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..a76ffdeeaf5f758caaf3067c005b360be3155d97
--- /dev/null
+++ b/ComfyUI-InstantID/ip_adapter/utils.py
@@ -0,0 +1,5 @@
+import torch.nn.functional as F
+
+
+def is_torch2_available():
+ return hasattr(F, "scaled_dot_product_attention")
diff --git a/ComfyUI-InstantID/models/antelopev2/1k3d68.onnx b/ComfyUI-InstantID/models/antelopev2/1k3d68.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..bab1a9f1eabb0284b66e7386926c656de69acecb
--- /dev/null
+++ b/ComfyUI-InstantID/models/antelopev2/1k3d68.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:42c8b6575bc827380f1b27e4c80b326962531da96186fd1597fa35693fdc9515
+size 137
diff --git a/ComfyUI-InstantID/models/antelopev2/2d106det.onnx b/ComfyUI-InstantID/models/antelopev2/2d106det.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..57fe3832d5294b08efa1af7e8c99abcf83a3bd1e
--- /dev/null
+++ b/ComfyUI-InstantID/models/antelopev2/2d106det.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f75c331a32849d158fda14b6f8dbe84cf31ccb5db41e031bab768680b96f873f
+size 135
diff --git a/ComfyUI-InstantID/models/antelopev2/genderage.onnx b/ComfyUI-InstantID/models/antelopev2/genderage.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..0028244577281135afc55f99ddaf62f524b4c8ea
--- /dev/null
+++ b/ComfyUI-InstantID/models/antelopev2/genderage.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f53e9ee9d98434da0782d1b205576c3a370dbc2331cd70188f8426a3aef121b9
+size 135
diff --git a/ComfyUI-InstantID/models/antelopev2/glintr100.onnx b/ComfyUI-InstantID/models/antelopev2/glintr100.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..e21fe4d67867a26066b1a4007d5eeb2f8822577f
--- /dev/null
+++ b/ComfyUI-InstantID/models/antelopev2/glintr100.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af374ec7db9ea896ee633dee1e1721e9f7957e19c43ce3fc9869950f82290c1e
+size 137
diff --git a/ComfyUI-InstantID/models/antelopev2/scrfd_10g_bnkps.onnx b/ComfyUI-InstantID/models/antelopev2/scrfd_10g_bnkps.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..454b95cff8010394dcfeaec2043be8d01bcf86c1
--- /dev/null
+++ b/ComfyUI-InstantID/models/antelopev2/scrfd_10g_bnkps.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2e683c9e3c78b5b43aa3b5aa9566d06ab1befeae995ebc8259a7eb30f7b2421b
+size 136
diff --git a/ComfyUI-InstantID/pipeline_stable_diffusion_xl_instantid.py b/ComfyUI-InstantID/pipeline_stable_diffusion_xl_instantid.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1ae737443658635545cf1351cdd5a84285c957d
--- /dev/null
+++ b/ComfyUI-InstantID/pipeline_stable_diffusion_xl_instantid.py
@@ -0,0 +1,753 @@
+# Copyright 2024 The InstantX Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+import cv2
+import math
+
+import numpy as np
+import PIL.Image
+import torch
+import torch.nn.functional as F
+
+from diffusers.image_processor import PipelineImageInput
+
+from diffusers.models import ControlNetModel
+
+from diffusers.utils import (
+ deprecate,
+ logging,
+ replace_example_docstring,
+)
+from diffusers.utils.torch_utils import is_compiled_module, is_torch_version
+from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
+
+from diffusers import StableDiffusionXLControlNetPipeline
+from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
+from diffusers.utils.import_utils import is_xformers_available
+
+from .ip_adapter.resampler import Resampler
+from .ip_adapter.utils import is_torch2_available
+
+from .ip_adapter.attention_processor import AttnProcessor, IPAttnProcessor
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+EXAMPLE_DOC_STRING = """
+ Examples:
+ ```py
+ >>> # !pip install opencv-python transformers accelerate insightface
+ >>> import diffusers
+ >>> from diffusers.utils import load_image
+ >>> from diffusers.models import ControlNetModel
+
+ >>> import cv2
+ >>> import torch
+ >>> import numpy as np
+ >>> from PIL import Image
+
+ >>> from insightface.app import FaceAnalysis
+ >>> from pipeline_stable_diffusion_xl_instantid import StableDiffusionXLInstantIDPipeline, draw_kps
+
+ >>> # download 'antelopev2' under ./models
+ >>> app = FaceAnalysis(name='antelopev2', root='./', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
+ >>> app.prepare(ctx_id=0, det_size=(640, 640))
+
+ >>> # download models under ./checkpoints
+ >>> face_adapter = f'./checkpoints/ip-adapter.bin'
+ >>> controlnet_path = f'./checkpoints/ControlNetModel'
+
+ >>> # load IdentityNet
+ >>> controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
+
+ >>> pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
+ ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16
+ ... )
+ >>> pipe.cuda()
+
+ >>> # load adapter
+ >>> pipe.load_ip_adapter_instantid(face_adapter)
+
+ >>> prompt = "analog film photo of a man. faded film, desaturated, 35mm photo, grainy, vignette, vintage, Kodachrome, Lomography, stained, highly detailed, found footage, masterpiece, best quality"
+ >>> negative_prompt = "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured (lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch,deformed, mutated, cross-eyed, ugly, disfigured"
+
+ >>> # load an image
+ >>> image = load_image("your-example.jpg")
+
+ >>> face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))[-1]
+ >>> face_emb = face_info['embedding']
+ >>> face_kps = draw_kps(face_image, face_info['kps'])
+
+ >>> pipe.set_ip_adapter_scale(0.8)
+
+ >>> # generate image
+ >>> image = pipe(
+ ... prompt, image_embeds=face_emb, image=face_kps, controlnet_conditioning_scale=0.8
+ ... ).images[0]
+ ```
+"""
+
+def draw_kps(image_pil, kps, color_list=[(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]):
+
+ stickwidth = 4
+ limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
+ kps = np.array(kps)
+
+ w, h = image_pil.size
+ out_img = np.zeros([h, w, 3])
+
+ for i in range(len(limbSeq)):
+ index = limbSeq[i]
+ color = color_list[index[0]]
+
+ x = kps[index][:, 0]
+ y = kps[index][:, 1]
+ length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
+ angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
+ polygon = cv2.ellipse2Poly((int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
+ out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
+ out_img = (out_img * 0.6).astype(np.uint8)
+
+ for idx_kp, kp in enumerate(kps):
+ color = color_list[idx_kp]
+ x, y = kp
+ out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
+
+ out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8))
+ return out_img_pil
+
+class StableDiffusionXLInstantIDPipeline(StableDiffusionXLControlNetPipeline):
+
+ def cuda(self, dtype=torch.float16, use_xformers=False):
+ self.to('cuda', dtype)
+
+ if hasattr(self, 'image_proj_model'):
+ self.image_proj_model.to(self.unet.device).to(self.unet.dtype)
+
+ if use_xformers:
+ if is_xformers_available():
+ import xformers
+ from packaging import version
+
+ xformers_version = version.parse(xformers.__version__)
+ if xformers_version == version.parse("0.0.16"):
+ logger.warn(
+ "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
+ )
+ self.enable_xformers_memory_efficient_attention()
+ else:
+ raise ValueError("xformers is not available. Make sure it is installed correctly")
+
+ def load_ip_adapter_instantid(self, model_ckpt, image_emb_dim=512, num_tokens=16, scale=0.5):
+ self.set_image_proj_model(model_ckpt, image_emb_dim, num_tokens)
+ self.set_ip_adapter(model_ckpt, num_tokens, scale)
+
+ def set_image_proj_model(self, model_ckpt, image_emb_dim=512, num_tokens=16):
+
+ image_proj_model = Resampler(
+ dim=1280,
+ depth=4,
+ dim_head=64,
+ heads=20,
+ num_queries=num_tokens,
+ embedding_dim=image_emb_dim,
+ output_dim=self.unet.config.cross_attention_dim,
+ ff_mult=4,
+ )
+
+ image_proj_model.eval()
+
+ self.image_proj_model = image_proj_model.to(self.device, dtype=self.dtype)
+ state_dict = torch.load(model_ckpt, map_location="cpu")
+ if 'image_proj' in state_dict:
+ state_dict = state_dict["image_proj"]
+ self.image_proj_model.load_state_dict(state_dict)
+
+ self.image_proj_model_in_features = image_emb_dim
+
+ def set_ip_adapter(self, model_ckpt, num_tokens, scale):
+
+ unet = self.unet
+ attn_procs = {}
+ for name in unet.attn_processors.keys():
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
+ if name.startswith("mid_block"):
+ hidden_size = unet.config.block_out_channels[-1]
+ elif name.startswith("up_blocks"):
+ block_id = int(name[len("up_blocks.")])
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
+ elif name.startswith("down_blocks"):
+ block_id = int(name[len("down_blocks.")])
+ hidden_size = unet.config.block_out_channels[block_id]
+ if cross_attention_dim is None:
+ attn_procs[name] = AttnProcessor().to(unet.device, dtype=unet.dtype)
+ else:
+ attn_procs[name] = IPAttnProcessor(hidden_size=hidden_size,
+ cross_attention_dim=cross_attention_dim,
+ scale=scale,
+ num_tokens=num_tokens).to(unet.device, dtype=unet.dtype)
+ unet.set_attn_processor(attn_procs)
+
+ state_dict = torch.load(model_ckpt, map_location="cpu")
+ ip_layers = torch.nn.ModuleList(self.unet.attn_processors.values())
+ if 'ip_adapter' in state_dict:
+ state_dict = state_dict['ip_adapter']
+ ip_layers.load_state_dict(state_dict)
+
+ def set_ip_adapter_scale(self, scale):
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
+ for attn_processor in unet.attn_processors.values():
+ if isinstance(attn_processor, IPAttnProcessor):
+ attn_processor.scale = scale
+
+ def _encode_prompt_image_emb(self, prompt_image_emb, device, dtype, do_classifier_free_guidance):
+
+ if isinstance(prompt_image_emb, torch.Tensor):
+ prompt_image_emb = prompt_image_emb.clone().detach()
+ else:
+ prompt_image_emb = torch.tensor(prompt_image_emb)
+
+ prompt_image_emb = prompt_image_emb.to(device=device, dtype=dtype)
+ prompt_image_emb = prompt_image_emb.reshape([1, -1, self.image_proj_model_in_features])
+
+ if do_classifier_free_guidance:
+ prompt_image_emb = torch.cat([torch.zeros_like(prompt_image_emb), prompt_image_emb], dim=0)
+ else:
+ prompt_image_emb = torch.cat([prompt_image_emb], dim=0)
+
+ prompt_image_emb = self.image_proj_model(prompt_image_emb)
+ return prompt_image_emb
+
+ @torch.no_grad()
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
+ def __call__(
+ self,
+ prompt: Union[str, List[str]] = None,
+ prompt_2: Optional[Union[str, List[str]]] = None,
+ image: PipelineImageInput = None,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ num_inference_steps: int = 50,
+ guidance_scale: float = 5.0,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
+ num_images_per_prompt: Optional[int] = 1,
+ eta: float = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
+ image_embeds: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
+ guess_mode: bool = False,
+ control_guidance_start: Union[float, List[float]] = 0.0,
+ control_guidance_end: Union[float, List[float]] = 1.0,
+ original_size: Tuple[int, int] = None,
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
+ target_size: Tuple[int, int] = None,
+ negative_original_size: Optional[Tuple[int, int]] = None,
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
+ negative_target_size: Optional[Tuple[int, int]] = None,
+ clip_skip: Optional[int] = None,
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ **kwargs,
+ ):
+ r"""
+ The call function to the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
+ prompt_2 (`str` or `List[str]`, *optional*):
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
+ used in both text-encoders.
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
+ input to a single ControlNet.
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
+ The height in pixels of the generated image. Anything below 512 pixels won't work well for
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
+ and checkpoints that are not specifically fine-tuned on low resolutions.
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
+ The width in pixels of the generated image. Anything below 512 pixels won't work well for
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
+ and checkpoints that are not specifically fine-tuned on low resolutions.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ guidance_scale (`float`, *optional*, defaults to 5.0):
+ A higher guidance scale value encourages the model to generate images closely linked to the text
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2`
+ and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders.
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ eta (`float`, *optional*, defaults to 0.0):
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
+ generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor is generated by sampling using the supplied random `generator`.
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
+ provided, text embeddings are generated from the `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
+ not provided, pooled text embeddings are generated from `prompt` input argument.
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt
+ weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input
+ argument.
+ image_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated image embeddings.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
+ plain tuple.
+ cross_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
+ the corresponding scale as a list.
+ guess_mode (`bool`, *optional*, defaults to `False`):
+ The ControlNet encoder tries to recognize the content of the input image even if you remove all
+ prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
+ The percentage of total steps at which the ControlNet starts applying.
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
+ The percentage of total steps at which the ControlNet stops applying.
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
+ explained in section 2.2 of
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
+ micro-conditioning as explained in section 2.2 of
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
+ micro-conditioning as explained in section 2.2 of
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
+ To negatively condition the generation process based on a target image resolution. It should be as same
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
+ clip_skip (`int`, *optional*):
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
+ the output of the pre-final layer will be used for computing the prompt embeddings.
+ callback_on_step_end (`Callable`, *optional*):
+ A function that calls at the end of each denoising steps during the inference. The function is called
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
+ `callback_on_step_end_tensor_inputs`.
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
+ `._callback_tensor_inputs` attribute of your pipeine class.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
+ otherwise a `tuple` is returned containing the output images.
+ """
+
+ callback = kwargs.pop("callback", None)
+ callback_steps = kwargs.pop("callback_steps", None)
+
+ if callback is not None:
+ deprecate(
+ "callback",
+ "1.0.0",
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
+ )
+ if callback_steps is not None:
+ deprecate(
+ "callback_steps",
+ "1.0.0",
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
+ )
+
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
+
+ # align format for control guidance
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
+ control_guidance_start, control_guidance_end = (
+ mult * [control_guidance_start],
+ mult * [control_guidance_end],
+ )
+
+ # 1. Check inputs. Raise error if not correct
+ self.check_inputs(
+ prompt,
+ prompt_2,
+ image,
+ callback_steps,
+ negative_prompt,
+ negative_prompt_2,
+ prompt_embeds,
+ negative_prompt_embeds,
+ pooled_prompt_embeds,
+ negative_pooled_prompt_embeds,
+ controlnet_conditioning_scale,
+ control_guidance_start,
+ control_guidance_end,
+ callback_on_step_end_tensor_inputs,
+ )
+
+ self._guidance_scale = guidance_scale
+ self._clip_skip = clip_skip
+ self._cross_attention_kwargs = cross_attention_kwargs
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ device = self._execution_device
+
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
+
+ global_pool_conditions = (
+ controlnet.config.global_pool_conditions
+ if isinstance(controlnet, ControlNetModel)
+ else controlnet.nets[0].config.global_pool_conditions
+ )
+ guess_mode = guess_mode or global_pool_conditions
+
+ # 3.1 Encode input prompt
+ text_encoder_lora_scale = (
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
+ )
+ (
+ prompt_embeds,
+ negative_prompt_embeds,
+ pooled_prompt_embeds,
+ negative_pooled_prompt_embeds,
+ ) = self.encode_prompt(
+ prompt,
+ prompt_2,
+ device,
+ num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ negative_prompt,
+ negative_prompt_2,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ pooled_prompt_embeds=pooled_prompt_embeds,
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
+ lora_scale=text_encoder_lora_scale,
+ clip_skip=self.clip_skip,
+ )
+
+ # 3.2 Encode image prompt
+ prompt_image_emb = self._encode_prompt_image_emb(image_embeds,
+ device,
+ self.unet.dtype,
+ self.do_classifier_free_guidance)
+
+ # 4. Prepare image
+ if isinstance(controlnet, ControlNetModel):
+ image = self.prepare_image(
+ image=image,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=controlnet.dtype,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ guess_mode=guess_mode,
+ )
+ height, width = image.shape[-2:]
+ elif isinstance(controlnet, MultiControlNetModel):
+ images = []
+
+ for image_ in image:
+ image_ = self.prepare_image(
+ image=image_,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=controlnet.dtype,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ guess_mode=guess_mode,
+ )
+
+ images.append(image_)
+
+ image = images
+ height, width = image[0].shape[-2:]
+ else:
+ assert False
+
+ # 5. Prepare timesteps
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
+ timesteps = self.scheduler.timesteps
+ self._num_timesteps = len(timesteps)
+
+ # 6. Prepare latent variables
+ num_channels_latents = self.unet.config.in_channels
+ latents = self.prepare_latents(
+ batch_size * num_images_per_prompt,
+ num_channels_latents,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ latents,
+ )
+
+ # 6.5 Optionally get Guidance Scale Embedding
+ timestep_cond = None
+ if self.unet.config.time_cond_proj_dim is not None:
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
+ timestep_cond = self.get_guidance_scale_embedding(
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
+ ).to(device=device, dtype=latents.dtype)
+
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+ # 7.1 Create tensor stating which controlnets to keep
+ controlnet_keep = []
+ for i in range(len(timesteps)):
+ keeps = [
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
+ for s, e in zip(control_guidance_start, control_guidance_end)
+ ]
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
+
+ # 7.2 Prepare added time ids & embeddings
+ if isinstance(image, list):
+ original_size = original_size or image[0].shape[-2:]
+ else:
+ original_size = original_size or image.shape[-2:]
+ target_size = target_size or (height, width)
+
+ add_text_embeds = pooled_prompt_embeds
+ if self.text_encoder_2 is None:
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
+ else:
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
+
+ add_time_ids = self._get_add_time_ids(
+ original_size,
+ crops_coords_top_left,
+ target_size,
+ dtype=prompt_embeds.dtype,
+ text_encoder_projection_dim=text_encoder_projection_dim,
+ )
+
+ if negative_original_size is not None and negative_target_size is not None:
+ negative_add_time_ids = self._get_add_time_ids(
+ negative_original_size,
+ negative_crops_coords_top_left,
+ negative_target_size,
+ dtype=prompt_embeds.dtype,
+ text_encoder_projection_dim=text_encoder_projection_dim,
+ )
+ else:
+ negative_add_time_ids = add_time_ids
+
+ if self.do_classifier_free_guidance:
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
+
+ prompt_embeds = prompt_embeds.to(device)
+ add_text_embeds = add_text_embeds.to(device)
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
+ encoder_hidden_states = torch.cat([prompt_embeds, prompt_image_emb], dim=1)
+
+ # 8. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ is_unet_compiled = is_compiled_module(self.unet)
+ is_controlnet_compiled = is_compiled_module(self.controlnet)
+ is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
+
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ for i, t in enumerate(timesteps):
+ # Relevant thread:
+ # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
+ if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
+ torch._inductor.cudagraph_mark_step_begin()
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
+
+ # controlnet(s) inference
+ if guess_mode and self.do_classifier_free_guidance:
+ # Infer ControlNet only for the conditional batch.
+ control_model_input = latents
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
+ controlnet_added_cond_kwargs = {
+ "text_embeds": add_text_embeds.chunk(2)[1],
+ "time_ids": add_time_ids.chunk(2)[1],
+ }
+ else:
+ control_model_input = latent_model_input
+ controlnet_prompt_embeds = prompt_embeds
+ controlnet_added_cond_kwargs = added_cond_kwargs
+
+ if isinstance(controlnet_keep[i], list):
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
+ else:
+ controlnet_cond_scale = controlnet_conditioning_scale
+ if isinstance(controlnet_cond_scale, list):
+ controlnet_cond_scale = controlnet_cond_scale[0]
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
+
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
+ control_model_input,
+ t,
+ encoder_hidden_states=prompt_image_emb,
+ controlnet_cond=image,
+ conditioning_scale=cond_scale,
+ guess_mode=guess_mode,
+ added_cond_kwargs=controlnet_added_cond_kwargs,
+ return_dict=False,
+ )
+
+ if guess_mode and self.do_classifier_free_guidance:
+ # Infered ControlNet only for the conditional batch.
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
+ # add 0 to the unconditional batch to keep it unchanged.
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
+
+ # predict the noise residual
+ noise_pred = self.unet(
+ latent_model_input,
+ t,
+ encoder_hidden_states=encoder_hidden_states,
+ timestep_cond=timestep_cond,
+ cross_attention_kwargs=self.cross_attention_kwargs,
+ down_block_additional_residuals=down_block_res_samples,
+ mid_block_additional_residual=mid_block_res_sample,
+ added_cond_kwargs=added_cond_kwargs,
+ return_dict=False,
+ )[0]
+
+ # perform guidance
+ if self.do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
+
+ if callback_on_step_end is not None:
+ callback_kwargs = {}
+ for k in callback_on_step_end_tensor_inputs:
+ callback_kwargs[k] = locals()[k]
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
+
+ latents = callback_outputs.pop("latents", latents)
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ step_idx = i // getattr(self.scheduler, "order", 1)
+ callback(step_idx, t, latents)
+
+ if not output_type == "latent":
+ # make sure the VAE is in float32 mode, as it overflows in float16
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
+ if needs_upcasting:
+ self.upcast_vae()
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
+
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
+
+ # cast back to fp16 if needed
+ if needs_upcasting:
+ self.vae.to(dtype=torch.float16)
+ else:
+ image = latents
+
+ if not output_type == "latent":
+ # apply watermark if available
+ if self.watermark is not None:
+ image = self.watermark.apply_watermark(image)
+
+ image = self.image_processor.postprocess(image, output_type=output_type)
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ if not return_dict:
+ return (image,)
+
+ return StableDiffusionXLPipelineOutput(images=image)
diff --git a/ComfyUI-InstantID/requirements.txt b/ComfyUI-InstantID/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b3855e35bc916ed1cf26c8e82f0a781cd61ad50f
--- /dev/null
+++ b/ComfyUI-InstantID/requirements.txt
@@ -0,0 +1,6 @@
+opencv-python
+transformers
+accelerate
+insightface
+diffusers
+onnxruntime-gpu
diff --git a/ComfyUI-InstantID/style_template.py b/ComfyUI-InstantID/style_template.py
new file mode 100644
index 0000000000000000000000000000000000000000..af8b7e873ecac07e653b35c749f09499f77d3cdb
--- /dev/null
+++ b/ComfyUI-InstantID/style_template.py
@@ -0,0 +1,49 @@
+style_list = [
+ {
+ "name": "(No style)",
+ "prompt": "{prompt}",
+ "negative_prompt": "",
+ },
+ {
+ "name": "Watercolor",
+ "prompt": "watercolor painting, {prompt}. vibrant, beautiful, painterly, detailed, textural, artistic",
+ "negative_prompt": "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, anime, photorealistic, 35mm film, deformed, glitch, low contrast, noisy",
+ },
+ {
+ "name": "Film Noir",
+ "prompt": "film noir style, ink sketch|vector, {prompt} highly detailed, sharp focus, ultra sharpness, monochrome, high contrast, dramatic shadows, 1940s style, mysterious, cinematic",
+ "negative_prompt": "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
+ },
+ {
+ "name": "Neon",
+ "prompt": "masterpiece painting, buildings in the backdrop, kaleidoscope, lilac orange blue cream fuchsia bright vivid gradient colors, the scene is cinematic, {prompt}, emotional realism, double exposure, watercolor ink pencil, graded wash, color layering, magic realism, figurative painting, intricate motifs, organic tracery, polished",
+ "negative_prompt": "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
+ },
+ {
+ "name": "Jungle",
+ "prompt": 'waist-up "{prompt} in a Jungle" by Syd Mead, tangerine cold color palette, muted colors, detailed, 8k,photo r3al,dripping paint,3d toon style,3d style,Movie Still',
+ "negative_prompt": "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
+ },
+ {
+ "name": "Mars",
+ "prompt": "{prompt}, Post-apocalyptic. Mars Colony, Scavengers roam the wastelands searching for valuable resources, rovers, bright morning sunlight shining, (detailed) (intricate) (8k) (HDR) (cinematic lighting) (sharp focus)",
+ "negative_prompt": "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
+ },
+ {
+ "name": "Vibrant Color",
+ "prompt": "vibrant colorful, ink sketch|vector|2d colors, at nightfall, sharp focus, {prompt}, highly detailed, sharp focus, the clouds,colorful,ultra sharpness",
+ "negative_prompt": "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
+ },
+ {
+ "name": "Snow",
+ "prompt": "cinema 4d render, {prompt}, high contrast, vibrant and saturated, sico style, surrounded by magical glow,floating ice shards, snow crystals, cold, windy background, frozen natural landscape in background cinematic atmosphere,highly detailed, sharp focus, intricate design, 3d, unreal engine, octane render, CG best quality, highres, photorealistic, dramatic lighting, artstation, concept art, cinematic, epic Steven Spielberg movie still, sharp focus, smoke, sparks, art by pascal blanche and greg rutkowski and repin, trending on artstation, hyperrealism painting, matte painting, 4k resolution",
+ "negative_prompt": "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
+ },
+ {
+ "name": "Line art",
+ "prompt": "line art drawing {prompt} . professional, sleek, modern, minimalist, graphic, line art, vector graphics",
+ "negative_prompt": "anime, photorealistic, 35mm film, deformed, glitch, blurry, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, mutated, realism, realistic, impressionism, expressionism, oil, acrylic",
+ },
+]
+
+styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
diff --git a/ComfyUI-KJNodes/.github/FUNDING.yml b/ComfyUI-KJNodes/.github/FUNDING.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3f53dcd3b72ebc612b9f425af64f8158e48c3f02
--- /dev/null
+++ b/ComfyUI-KJNodes/.github/FUNDING.yml
@@ -0,0 +1,2 @@
+github: [kijai]
+custom: ["https://www.paypal.me/kijaidesign"]
diff --git a/ComfyUI-KJNodes/.github/workflows/publish.yml b/ComfyUI-KJNodes/.github/workflows/publish.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6e7201833e56c009e347731016b54e1c6d2254ab
--- /dev/null
+++ b/ComfyUI-KJNodes/.github/workflows/publish.yml
@@ -0,0 +1,21 @@
+name: Publish to Comfy registry
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - main
+ paths:
+ - "pyproject.toml"
+
+jobs:
+ publish-node:
+ name: Publish Custom Node to registry
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v4
+ - name: Publish Custom Node
+ uses: Comfy-Org/publish-node-action@main
+ with:
+ ## Add your own personal access token to your Github Repository secrets and reference it here.
+ personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/.gitignore b/ComfyUI-KJNodes/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..8dce7660175c676eb52991af9dd28d4046aeab62
--- /dev/null
+++ b/ComfyUI-KJNodes/.gitignore
@@ -0,0 +1,11 @@
+__pycache__
+/venv
+*.code-workspace
+.history
+.vscode
+*.ckpt
+*.pth
+types
+models
+jsconfig.json
+custom_dimensions.json
diff --git a/ComfyUI-KJNodes/LICENSE b/ComfyUI-KJNodes/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..3877ae0a7ff6f94ac222fd704e112723db776114
--- /dev/null
+++ b/ComfyUI-KJNodes/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+ .
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/ComfyUI-KJNodes/README.md b/ComfyUI-KJNodes/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d1d82ffe1f86caee7fcadf2dae908407462cbc50
--- /dev/null
+++ b/ComfyUI-KJNodes/README.md
@@ -0,0 +1,65 @@
+# KJNodes for ComfyUI
+
+Various quality of life and masking related -nodes and scripts made by combining functionality of existing nodes for ComfyUI.
+
+I know I'm bad at documentation, especially this project that has grown from random practice nodes to... too many lines in one file.
+I have however started to add descriptions to the nodes themselves, there's a small ? you can click for info what the node does.
+This is still work in progress, like everything else.
+
+# Installation
+1. Clone this repo into `custom_nodes` folder.
+2. Install dependencies: `pip install -r requirements.txt`
+ or if you use the portable install, run this in ComfyUI_windows_portable -folder:
+
+ `python_embeded\python.exe -m pip install -r ComfyUI\custom_nodes\ComfyUI-KJNodes\requirements.txt`
+
+
+## Javascript
+
+### browserstatus.js
+Sets the favicon to green circle when not processing anything, sets it to red when processing and shows progress percentage and the lenghth of your queue.
+Default off, needs to be enabled from options, overrides Custom-Scripts favicon when enabled.
+
+## Nodes:
+
+### Set/Get
+
+Javascript nodes to set and get constants to reduce unnecessary lines. Takes in and returns anything, purely visual nodes.
+On the right click menu of these nodes there's now an options to visualize the paths, as well as option to jump to the corresponding node on the other end.
+
+**Known limitations**:
+ - Will not work with any node that dynamically sets it's outpute, such as reroute or other Set/Get node
+ - Will not work when directly connected to a bypassed node
+ - Other possible conflicts with javascript based nodes.
+
+### ColorToMask
+
+RBG color value to mask, works with batches and AnimateDiff.
+
+### ConditioningMultiCombine
+
+Combine any number of conditions, saves space.
+
+### ConditioningSetMaskAndCombine
+
+Mask and combine two sets of conditions, saves space.
+
+### GrowMaskWithBlur
+
+Grows or shrinks (with negative values) mask, option to invert input, returns mask and inverted mask. Additionally Blurs the mask, this is a slow operation especially with big batches.
+
+### RoundMask
+
+![image](https://github.com/kijai/ComfyUI-KJNodes/assets/40791699/52c85202-f74e-4b96-9dac-c8bda5ddcc40)
+
+### WidgetToString
+Outputs the value of a widget on any node as a string
+![example of use](docs/images/2024-04-03_20_49_29-ComfyUI.png)
+
+Enable node id display from Manager menu, to get the ID of the node you want to read a widget from:
+![enable node id display](docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png)
+
+Use the node id of the target node, and add the name of the widget to read from
+![use node id and widget name](docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png)
+
+Recreating or reloading the target node will change its id, and the WidgetToString node will no longer be able to find it until you update the node id value with the new id.
diff --git a/ComfyUI-KJNodes/__init__.py b/ComfyUI-KJNodes/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..94bedf43f799b0954f9c39b24f0943e53fa91843
--- /dev/null
+++ b/ComfyUI-KJNodes/__init__.py
@@ -0,0 +1,195 @@
+from .nodes.nodes import *
+from .nodes.curve_nodes import *
+from .nodes.batchcrop_nodes import *
+from .nodes.audioscheduler_nodes import *
+from .nodes.image_nodes import *
+from .nodes.intrinsic_lora_nodes import *
+from .nodes.mask_nodes import *
+NODE_CONFIG = {
+ #constants
+ "BOOLConstant": {"class": BOOLConstant, "name": "BOOL Constant"},
+ "INTConstant": {"class": INTConstant, "name": "INT Constant"},
+ "FloatConstant": {"class": FloatConstant, "name": "Float Constant"},
+ "StringConstant": {"class": StringConstant, "name": "String Constant"},
+ "StringConstantMultiline": {"class": StringConstantMultiline, "name": "String Constant Multiline"},
+ #conditioning
+ "ConditioningMultiCombine": {"class": ConditioningMultiCombine, "name": "Conditioning Multi Combine"},
+ "ConditioningSetMaskAndCombine": {"class": ConditioningSetMaskAndCombine, "name": "ConditioningSetMaskAndCombine"},
+ "ConditioningSetMaskAndCombine3": {"class": ConditioningSetMaskAndCombine3, "name": "ConditioningSetMaskAndCombine3"},
+ "ConditioningSetMaskAndCombine4": {"class": ConditioningSetMaskAndCombine4, "name": "ConditioningSetMaskAndCombine4"},
+ "ConditioningSetMaskAndCombine5": {"class": ConditioningSetMaskAndCombine5, "name": "ConditioningSetMaskAndCombine5"},
+ "CondPassThrough": {"class": CondPassThrough},
+ #masking
+ "DownloadAndLoadCLIPSeg": {"class": DownloadAndLoadCLIPSeg, "name": "(Down)load CLIPSeg"},
+ "BatchCLIPSeg": {"class": BatchCLIPSeg, "name": "Batch CLIPSeg"},
+ "ColorToMask": {"class": ColorToMask, "name": "Color To Mask"},
+ "CreateGradientMask": {"class": CreateGradientMask, "name": "Create Gradient Mask"},
+ "CreateTextMask": {"class": CreateTextMask, "name": "Create Text Mask"},
+ "CreateAudioMask": {"class": CreateAudioMask, "name": "Create Audio Mask"},
+ "CreateFadeMask": {"class": CreateFadeMask, "name": "Create Fade Mask"},
+ "CreateFadeMaskAdvanced": {"class": CreateFadeMaskAdvanced, "name": "Create Fade Mask Advanced"},
+ "CreateFluidMask": {"class": CreateFluidMask, "name": "Create Fluid Mask"},
+ "CreateShapeMask": {"class": CreateShapeMask, "name": "Create Shape Mask"},
+ "CreateVoronoiMask": {"class": CreateVoronoiMask, "name": "Create Voronoi Mask"},
+ "CreateMagicMask": {"class": CreateMagicMask, "name": "Create Magic Mask"},
+ "GetMaskSizeAndCount": {"class": GetMaskSizeAndCount, "name": "Get Mask Size & Count"},
+ "GrowMaskWithBlur": {"class": GrowMaskWithBlur, "name": "Grow Mask With Blur"},
+ "MaskBatchMulti": {"class": MaskBatchMulti, "name": "Mask Batch Multi"},
+ "OffsetMask": {"class": OffsetMask, "name": "Offset Mask"},
+ "RemapMaskRange": {"class": RemapMaskRange, "name": "Remap Mask Range"},
+ "ResizeMask": {"class": ResizeMask, "name": "Resize Mask"},
+ "RoundMask": {"class": RoundMask, "name": "Round Mask"},
+ #images
+ "AddLabel": {"class": AddLabel, "name": "Add Label"},
+ "ColorMatch": {"class": ColorMatch, "name": "Color Match"},
+ "CrossFadeImages": {"class": CrossFadeImages, "name": "Cross Fade Images"},
+ "CrossFadeImagesMulti": {"class": CrossFadeImagesMulti, "name": "Cross Fade Images Multi"},
+ "GetImagesFromBatchIndexed": {"class": GetImagesFromBatchIndexed, "name": "Get Images From Batch Indexed"},
+ "GetImageRangeFromBatch": {"class": GetImageRangeFromBatch, "name": "Get Image or Mask Range From Batch"},
+ "GetImageSizeAndCount": {"class": GetImageSizeAndCount, "name": "Get Image Size & Count"},
+ "FastPreview": {"class": FastPreview, "name": "Fast Preview"},
+ "ImageAndMaskPreview": {"class": ImageAndMaskPreview},
+ "ImageAddMulti": {"class": ImageAddMulti, "name": "Image Add Multi"},
+ "ImageBatchMulti": {"class": ImageBatchMulti, "name": "Image Batch Multi"},
+ "ImageBatchRepeatInterleaving": {"class": ImageBatchRepeatInterleaving},
+ "ImageBatchTestPattern": {"class": ImageBatchTestPattern, "name": "Image Batch Test Pattern"},
+ "ImageConcanate": {"class": ImageConcanate, "name": "Image Concatenate"},
+ "ImageConcatFromBatch": {"class": ImageConcatFromBatch, "name": "Image Concatenate From Batch"},
+ "ImageConcatMulti": {"class": ImageConcatMulti, "name": "Image Concatenate Multi"},
+ "ImageCropByMaskAndResize": {"class": ImageCropByMaskAndResize, "name": "Image Crop By Mask And Resize"},
+ "ImageUncropByMask": {"class": ImageUncropByMask, "name": "Image Uncrop By Mask"},
+ "ImageGrabPIL": {"class": ImageGrabPIL, "name": "Image Grab PIL"},
+ "ImageGridComposite2x2": {"class": ImageGridComposite2x2, "name": "Image Grid Composite 2x2"},
+ "ImageGridComposite3x3": {"class": ImageGridComposite3x3, "name": "Image Grid Composite 3x3"},
+ "ImageGridtoBatch": {"class": ImageGridtoBatch, "name": "Image Grid To Batch"},
+ "ImageNormalize_Neg1_To_1": {"class": ImageNormalize_Neg1_To_1, "name": "Image Normalize -1 to 1"},
+ "ImagePass": {"class": ImagePass},
+ "ImagePadForOutpaintMasked": {"class": ImagePadForOutpaintMasked, "name": "Image Pad For Outpaint Masked"},
+ "ImagePadForOutpaintTargetSize": {"class": ImagePadForOutpaintTargetSize, "name": "Image Pad For Outpaint Target Size"},
+ "ImageResizeKJ": {"class": ImageResizeKJ, "name": "Resize Image"},
+ "ImageUpscaleWithModelBatched": {"class": ImageUpscaleWithModelBatched, "name": "Image Upscale With Model Batched"},
+ "InsertImagesToBatchIndexed": {"class": InsertImagesToBatchIndexed, "name": "Insert Images To Batch Indexed"},
+ "LoadAndResizeImage": {"class": LoadAndResizeImage, "name": "Load & Resize Image"},
+ "LoadImagesFromFolderKJ": {"class": LoadImagesFromFolderKJ, "name": "Load Images From Folder (KJ)"},
+ "MergeImageChannels": {"class": MergeImageChannels, "name": "Merge Image Channels"},
+ "PreviewAnimation": {"class": PreviewAnimation, "name": "Preview Animation"},
+ "RemapImageRange": {"class": RemapImageRange, "name": "Remap Image Range"},
+ "ReverseImageBatch": {"class": ReverseImageBatch, "name": "Reverse Image Batch"},
+ "ReplaceImagesInBatch": {"class": ReplaceImagesInBatch, "name": "Replace Images In Batch"},
+ "SaveImageWithAlpha": {"class": SaveImageWithAlpha, "name": "Save Image With Alpha"},
+ "SaveImageKJ": {"class": SaveImageKJ, "name": "Save Image KJ"},
+ "ShuffleImageBatch": {"class": ShuffleImageBatch, "name": "Shuffle Image Batch"},
+ "SplitImageChannels": {"class": SplitImageChannels, "name": "Split Image Channels"},
+ "TransitionImagesMulti": {"class": TransitionImagesMulti, "name": "Transition Images Multi"},
+ "TransitionImagesInBatch": {"class": TransitionImagesInBatch, "name": "Transition Images In Batch"},
+ #batch cropping
+ "BatchCropFromMask": {"class": BatchCropFromMask, "name": "Batch Crop From Mask"},
+ "BatchCropFromMaskAdvanced": {"class": BatchCropFromMaskAdvanced, "name": "Batch Crop From Mask Advanced"},
+ "FilterZeroMasksAndCorrespondingImages": {"class": FilterZeroMasksAndCorrespondingImages},
+ "InsertImageBatchByIndexes": {"class": InsertImageBatchByIndexes, "name": "Insert Image Batch By Indexes"},
+ "BatchUncrop": {"class": BatchUncrop, "name": "Batch Uncrop"},
+ "BatchUncropAdvanced": {"class": BatchUncropAdvanced, "name": "Batch Uncrop Advanced"},
+ "SplitBboxes": {"class": SplitBboxes, "name": "Split Bboxes"},
+ "BboxToInt": {"class": BboxToInt, "name": "Bbox To Int"},
+ "BboxVisualize": {"class": BboxVisualize, "name": "Bbox Visualize"},
+ #noise
+ "GenerateNoise": {"class": GenerateNoise, "name": "Generate Noise"},
+ "FlipSigmasAdjusted": {"class": FlipSigmasAdjusted, "name": "Flip Sigmas Adjusted"},
+ "InjectNoiseToLatent": {"class": InjectNoiseToLatent, "name": "Inject Noise To Latent"},
+ "CustomSigmas": {"class": CustomSigmas, "name": "Custom Sigmas"},
+ #utility
+ "WidgetToString": {"class": WidgetToString, "name": "Widget To String"},
+ "DummyOut": {"class": DummyOut, "name": "Dummy Out"},
+ "GetLatentsFromBatchIndexed": {"class": GetLatentsFromBatchIndexed, "name": "Get Latents From Batch Indexed"},
+ "ScaleBatchPromptSchedule": {"class": ScaleBatchPromptSchedule, "name": "Scale Batch Prompt Schedule"},
+ "CameraPoseVisualizer": {"class": CameraPoseVisualizer, "name": "Camera Pose Visualizer"},
+ "AppendStringsToList": {"class": AppendStringsToList, "name": "Append Strings To List"},
+ "JoinStrings": {"class": JoinStrings, "name": "Join Strings"},
+ "JoinStringMulti": {"class": JoinStringMulti, "name": "Join String Multi"},
+ "SomethingToString": {"class": SomethingToString, "name": "Something To String"},
+ "Sleep": {"class": Sleep, "name": "Sleep"},
+ "VRAM_Debug": {"class": VRAM_Debug, "name": "VRAM Debug"},
+ "SomethingToString": {"class": SomethingToString, "name": "Something To String"},
+ "EmptyLatentImagePresets": {"class": EmptyLatentImagePresets, "name": "Empty Latent Image Presets"},
+ "EmptyLatentImageCustomPresets": {"class": EmptyLatentImageCustomPresets, "name": "Empty Latent Image Custom Presets"},
+ "ModelPassThrough": {"class": ModelPassThrough, "name": "ModelPass"},
+ "ModelSaveKJ": {"class": ModelSaveKJ, "name": "Model Save KJ"},
+ "SetShakkerLabsUnionControlNetType": {"class": SetShakkerLabsUnionControlNetType, "name": "Set Shakker Labs Union ControlNet Type"},
+ #audioscheduler stuff
+ "NormalizedAmplitudeToMask": {"class": NormalizedAmplitudeToMask},
+ "NormalizedAmplitudeToFloatList": {"class": NormalizedAmplitudeToFloatList},
+ "OffsetMaskByNormalizedAmplitude": {"class": OffsetMaskByNormalizedAmplitude},
+ "ImageTransformByNormalizedAmplitude": {"class": ImageTransformByNormalizedAmplitude},
+ #curve nodes
+ "SplineEditor": {"class": SplineEditor, "name": "Spline Editor"},
+ "CreateShapeImageOnPath": {"class": CreateShapeImageOnPath, "name": "Create Shape Image On Path"},
+ "CreateShapeMaskOnPath": {"class": CreateShapeMaskOnPath, "name": "Create Shape Mask On Path"},
+ "CreateTextOnPath": {"class": CreateTextOnPath, "name": "Create Text On Path"},
+ "CreateGradientFromCoords": {"class": CreateGradientFromCoords, "name": "Create Gradient From Coords"},
+ "GradientToFloat": {"class": GradientToFloat, "name": "Gradient To Float"},
+ "WeightScheduleExtend": {"class": WeightScheduleExtend, "name": "Weight Schedule Extend"},
+ "MaskOrImageToWeight": {"class": MaskOrImageToWeight, "name": "Mask Or Image To Weight"},
+ "WeightScheduleConvert": {"class": WeightScheduleConvert, "name": "Weight Schedule Convert"},
+ "FloatToMask": {"class": FloatToMask, "name": "Float To Mask"},
+ "FloatToSigmas": {"class": FloatToSigmas, "name": "Float To Sigmas"},
+ "SigmasToFloat": {"class": SigmasToFloat, "name": "Sigmas To Float"},
+ "PlotCoordinates": {"class": PlotCoordinates, "name": "Plot Coordinates"},
+ "InterpolateCoords": {"class": InterpolateCoords, "name": "Interpolate Coords"},
+ "PointsEditor": {"class": PointsEditor, "name": "Points Editor"},
+ #experimental
+ "StabilityAPI_SD3": {"class": StabilityAPI_SD3, "name": "Stability API SD3"},
+ "SoundReactive": {"class": SoundReactive, "name": "Sound Reactive"},
+ "StableZero123_BatchSchedule": {"class": StableZero123_BatchSchedule, "name": "Stable Zero123 Batch Schedule"},
+ "SV3D_BatchSchedule": {"class": SV3D_BatchSchedule, "name": "SV3D Batch Schedule"},
+ "LoadResAdapterNormalization": {"class": LoadResAdapterNormalization},
+ "Superprompt": {"class": Superprompt, "name": "Superprompt"},
+ "GLIGENTextBoxApplyBatchCoords": {"class": GLIGENTextBoxApplyBatchCoords},
+ "Intrinsic_lora_sampling": {"class": Intrinsic_lora_sampling, "name": "Intrinsic Lora Sampling"},
+ "CheckpointPerturbWeights": {"class": CheckpointPerturbWeights, "name": "CheckpointPerturbWeights"},
+ "Screencap_mss": {"class": Screencap_mss, "name": "Screencap mss"},
+ "WebcamCaptureCV2": {"class": WebcamCaptureCV2, "name": "Webcam Capture CV2"},
+ "DifferentialDiffusionAdvanced": {"class": DifferentialDiffusionAdvanced, "name": "Differential Diffusion Advanced"},
+ "FluxBlockLoraLoader": {"class": FluxBlockLoraLoader, "name": "Flux Block Lora Loader"},
+ "FluxBlockLoraSelect": {"class": FluxBlockLoraSelect, "name": "Flux Block Lora Select"},
+ "CustomControlNetWeightsFluxFromList": {"class": CustomControlNetWeightsFluxFromList, "name": "Custom ControlNet Weights Flux From List"},
+ "CheckpointLoaderKJ": {"class": CheckpointLoaderKJ, "name": "CheckpointLoaderKJ"},
+ "TorchCompileModelFluxAdvanced": {"class": TorchCompileModelFluxAdvanced, "name": "TorchCompileModelFluxAdvanced"},
+ "TorchCompileVAE": {"class": TorchCompileVAE, "name": "TorchCompileVAE"},
+ "TorchCompileControlNet": {"class": TorchCompileControlNet, "name": "TorchCompileControlNet"},
+ "PatchModelPatcherOrder": {"class": PatchModelPatcherOrder, "name": "Patch Model Patcher Order"},
+
+ #instance diffusion
+ "CreateInstanceDiffusionTracking": {"class": CreateInstanceDiffusionTracking},
+ "AppendInstanceDiffusionTracking": {"class": AppendInstanceDiffusionTracking},
+ "DrawInstanceDiffusionTracking": {"class": DrawInstanceDiffusionTracking},
+}
+
+def generate_node_mappings(node_config):
+ node_class_mappings = {}
+ node_display_name_mappings = {}
+
+ for node_name, node_info in node_config.items():
+ node_class_mappings[node_name] = node_info["class"]
+ node_display_name_mappings[node_name] = node_info.get("name", node_info["class"].__name__)
+
+ return node_class_mappings, node_display_name_mappings
+
+NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS = generate_node_mappings(NODE_CONFIG)
+
+__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"]
+
+WEB_DIRECTORY = "./web"
+
+from aiohttp import web
+from server import PromptServer
+from pathlib import Path
+
+if hasattr(PromptServer, "instance"):
+ try:
+ # NOTE: we add an extra static path to avoid comfy mechanism
+ # that loads every script in web.
+ PromptServer.instance.app.add_routes(
+ [web.static("/kjweb_async", (Path(__file__).parent.absolute() / "kjweb_async").as_posix())]
+ )
+ except:
+ pass
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/__pycache__/__init__.cpython-312.pyc b/ComfyUI-KJNodes/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b33ffc8e79a4e70c04b9ec7c06a15ae919d4030
Binary files /dev/null and b/ComfyUI-KJNodes/__pycache__/__init__.cpython-312.pyc differ
diff --git a/ComfyUI-KJNodes/config.json b/ComfyUI-KJNodes/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..8ed6d1c5728782545f7511fa137e2aab8152dae7
--- /dev/null
+++ b/ComfyUI-KJNodes/config.json
@@ -0,0 +1,3 @@
+{
+ "sai_api_key": "your_api_key_here"
+}
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/custom_dimensions.json b/ComfyUI-KJNodes/custom_dimensions.json
new file mode 100644
index 0000000000000000000000000000000000000000..17de247b2901a9a0d7112b1929074ce17d6ba45a
--- /dev/null
+++ b/ComfyUI-KJNodes/custom_dimensions.json
@@ -0,0 +1,22 @@
+[
+ {
+ "label": "SD",
+ "value": "512x512"
+ },
+ {
+ "label": "HD",
+ "value": "768x768"
+ },
+ {
+ "label": "Full HD",
+ "value": "1024x1024"
+ },
+ {
+ "label": "4k",
+ "value": "2048x2048"
+ },
+ {
+ "label": "SVD",
+ "value": "1024x576"
+ }
+]
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/docs/images/2024-04-03_20_49_29-ComfyUI.png b/ComfyUI-KJNodes/docs/images/2024-04-03_20_49_29-ComfyUI.png
new file mode 100644
index 0000000000000000000000000000000000000000..43304322e9bad65a4d0bf5d216e3493d3bf67fc3
Binary files /dev/null and b/ComfyUI-KJNodes/docs/images/2024-04-03_20_49_29-ComfyUI.png differ
diff --git a/ComfyUI-KJNodes/docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png b/ComfyUI-KJNodes/docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png
new file mode 100644
index 0000000000000000000000000000000000000000..e749239c1c4ffd5ab29b51695dd8d8b51ed3597f
Binary files /dev/null and b/ComfyUI-KJNodes/docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png differ
diff --git a/ComfyUI-KJNodes/docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png b/ComfyUI-KJNodes/docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png
new file mode 100644
index 0000000000000000000000000000000000000000..b53ad666ff060d87971f3962e74101f0cb2a5c3f
Binary files /dev/null and b/ComfyUI-KJNodes/docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png differ
diff --git a/ComfyUI-KJNodes/fonts/FreeMono.ttf b/ComfyUI-KJNodes/fonts/FreeMono.ttf
new file mode 100644
index 0000000000000000000000000000000000000000..7485f9e4c84d5a372c81e11df2cd9f5e2eb2064a
Binary files /dev/null and b/ComfyUI-KJNodes/fonts/FreeMono.ttf differ
diff --git a/ComfyUI-KJNodes/fonts/FreeMonoBoldOblique.otf b/ComfyUI-KJNodes/fonts/FreeMonoBoldOblique.otf
new file mode 100644
index 0000000000000000000000000000000000000000..dac575acbd08b6fa36d9aac4acd0b2d7ea140eec
Binary files /dev/null and b/ComfyUI-KJNodes/fonts/FreeMonoBoldOblique.otf differ
diff --git a/ComfyUI-KJNodes/fonts/TTNorms-Black.otf b/ComfyUI-KJNodes/fonts/TTNorms-Black.otf
new file mode 100644
index 0000000000000000000000000000000000000000..39f96c80af61c25ca8c35b25b3cb21673cb0d037
Binary files /dev/null and b/ComfyUI-KJNodes/fonts/TTNorms-Black.otf differ
diff --git a/ComfyUI-KJNodes/intrinsic_loras/intrinsic_lora_sd15_albedo.safetensors b/ComfyUI-KJNodes/intrinsic_loras/intrinsic_lora_sd15_albedo.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3d84a8b75363549dff202eb2f2353e63d5245a04
--- /dev/null
+++ b/ComfyUI-KJNodes/intrinsic_loras/intrinsic_lora_sd15_albedo.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d897f04ff2bb452e29a8f2a3c5c3cd5c55e95f314242cd645fbbe24a5ac59961
+size 6416109
diff --git a/ComfyUI-KJNodes/intrinsic_loras/intrinsic_lora_sd15_depth.safetensors b/ComfyUI-KJNodes/intrinsic_loras/intrinsic_lora_sd15_depth.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6048b84f9e5348240d84d1c0d24e96c9655032e2
--- /dev/null
+++ b/ComfyUI-KJNodes/intrinsic_loras/intrinsic_lora_sd15_depth.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f199d6bf3180fe7271073c3769dcb764b40f35f41b30fcb183ae5bf4b6a9997f
+size 6416109
diff --git a/ComfyUI-KJNodes/intrinsic_loras/intrinsic_lora_sd15_normal.safetensors b/ComfyUI-KJNodes/intrinsic_loras/intrinsic_lora_sd15_normal.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..506b1dd0a3b9a07c423f6cda497fa6a196014c18
--- /dev/null
+++ b/ComfyUI-KJNodes/intrinsic_loras/intrinsic_lora_sd15_normal.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:02934db0a0b92a9cdda402e42548560beda7d31b268e561dbc6815551e876268
+size 6416109
diff --git a/ComfyUI-KJNodes/intrinsic_loras/intrinsic_lora_sd15_shading.safetensors b/ComfyUI-KJNodes/intrinsic_loras/intrinsic_lora_sd15_shading.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5b8bbfcf7926ac3ecefe84229ca6de2fc1b523eb
--- /dev/null
+++ b/ComfyUI-KJNodes/intrinsic_loras/intrinsic_lora_sd15_shading.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:635e998063a10211633edd3e4b1676201822cd67f790ec71dba5f32d8b625c8b
+size 6416109
diff --git a/ComfyUI-KJNodes/intrinsic_loras/intrinsic_loras.txt b/ComfyUI-KJNodes/intrinsic_loras/intrinsic_loras.txt
new file mode 100644
index 0000000000000000000000000000000000000000..11572bb08c5c0701f20a2a8eadfcbce17d045488
--- /dev/null
+++ b/ComfyUI-KJNodes/intrinsic_loras/intrinsic_loras.txt
@@ -0,0 +1,4 @@
+source for the loras:
+https://github.com/duxiaodan/intrinsic-lora
+
+Renamed and conveted to .safetensors
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/kjweb_async/marked.min.js b/ComfyUI-KJNodes/kjweb_async/marked.min.js
new file mode 100644
index 0000000000000000000000000000000000000000..ba7976efddaa97edaf2a7600e851308b747cfd83
--- /dev/null
+++ b/ComfyUI-KJNodes/kjweb_async/marked.min.js
@@ -0,0 +1,6 @@
+/**
+ * marked v12.0.1 - a markdown parser
+ * Copyright (c) 2011-2024, Christopher Jeffrey. (MIT Licensed)
+ * https://github.com/markedjs/marked
+ */
+!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self).marked={})}(this,(function(e){"use strict";function t(){return{async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null}}function n(t){e.defaults=t}e.defaults={async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null};const s=/[&<>"']/,r=new RegExp(s.source,"g"),i=/[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/,l=new RegExp(i.source,"g"),o={"&":"&","<":"<",">":">",'"':""","'":"'"},a=e=>o[e];function c(e,t){if(t){if(s.test(e))return e.replace(r,a)}else if(i.test(e))return e.replace(l,a);return e}const h=/&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/gi;function p(e){return e.replace(h,((e,t)=>"colon"===(t=t.toLowerCase())?":":"#"===t.charAt(0)?"x"===t.charAt(1)?String.fromCharCode(parseInt(t.substring(2),16)):String.fromCharCode(+t.substring(1)):""))}const u=/(^|[^\[])\^/g;function k(e,t){let n="string"==typeof e?e:e.source;t=t||"";const s={replace:(e,t)=>{let r="string"==typeof t?t:t.source;return r=r.replace(u,"$1"),n=n.replace(e,r),s},getRegex:()=>new RegExp(n,t)};return s}function g(e){try{e=encodeURI(e).replace(/%25/g,"%")}catch(e){return null}return e}const f={exec:()=>null};function d(e,t){const n=e.replace(/\|/g,((e,t,n)=>{let s=!1,r=t;for(;--r>=0&&"\\"===n[r];)s=!s;return s?"|":" |"})).split(/ \|/);let s=0;if(n[0].trim()||n.shift(),n.length>0&&!n[n.length-1].trim()&&n.pop(),t)if(n.length>t)n.splice(t);else for(;n.length0)return{type:"space",raw:t[0]}}code(e){const t=this.rules.block.code.exec(e);if(t){const e=t[0].replace(/^ {1,4}/gm,"");return{type:"code",raw:t[0],codeBlockStyle:"indented",text:this.options.pedantic?e:x(e,"\n")}}}fences(e){const t=this.rules.block.fences.exec(e);if(t){const e=t[0],n=function(e,t){const n=e.match(/^(\s+)(?:```)/);if(null===n)return t;const s=n[1];return t.split("\n").map((e=>{const t=e.match(/^\s+/);if(null===t)return e;const[n]=t;return n.length>=s.length?e.slice(s.length):e})).join("\n")}(e,t[3]||"");return{type:"code",raw:e,lang:t[2]?t[2].trim().replace(this.rules.inline.anyPunctuation,"$1"):t[2],text:n}}}heading(e){const t=this.rules.block.heading.exec(e);if(t){let e=t[2].trim();if(/#$/.test(e)){const t=x(e,"#");this.options.pedantic?e=t.trim():t&&!/ $/.test(t)||(e=t.trim())}return{type:"heading",raw:t[0],depth:t[1].length,text:e,tokens:this.lexer.inline(e)}}}hr(e){const t=this.rules.block.hr.exec(e);if(t)return{type:"hr",raw:t[0]}}blockquote(e){const t=this.rules.block.blockquote.exec(e);if(t){const e=x(t[0].replace(/^ *>[ \t]?/gm,""),"\n"),n=this.lexer.state.top;this.lexer.state.top=!0;const s=this.lexer.blockTokens(e);return this.lexer.state.top=n,{type:"blockquote",raw:t[0],tokens:s,text:e}}}list(e){let t=this.rules.block.list.exec(e);if(t){let n=t[1].trim();const s=n.length>1,r={type:"list",raw:"",ordered:s,start:s?+n.slice(0,-1):"",loose:!1,items:[]};n=s?`\\d{1,9}\\${n.slice(-1)}`:`\\${n}`,this.options.pedantic&&(n=s?n:"[*+-]");const i=new RegExp(`^( {0,3}${n})((?:[\t ][^\\n]*)?(?:\\n|$))`);let l="",o="",a=!1;for(;e;){let n=!1;if(!(t=i.exec(e)))break;if(this.rules.block.hr.test(e))break;l=t[0],e=e.substring(l.length);let s=t[2].split("\n",1)[0].replace(/^\t+/,(e=>" ".repeat(3*e.length))),c=e.split("\n",1)[0],h=0;this.options.pedantic?(h=2,o=s.trimStart()):(h=t[2].search(/[^ ]/),h=h>4?1:h,o=s.slice(h),h+=t[1].length);let p=!1;if(!s&&/^ *$/.test(c)&&(l+=c+"\n",e=e.substring(c.length+1),n=!0),!n){const t=new RegExp(`^ {0,${Math.min(3,h-1)}}(?:[*+-]|\\d{1,9}[.)])((?:[ \t][^\\n]*)?(?:\\n|$))`),n=new RegExp(`^ {0,${Math.min(3,h-1)}}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)`),r=new RegExp(`^ {0,${Math.min(3,h-1)}}(?:\`\`\`|~~~)`),i=new RegExp(`^ {0,${Math.min(3,h-1)}}#`);for(;e;){const a=e.split("\n",1)[0];if(c=a,this.options.pedantic&&(c=c.replace(/^ {1,4}(?=( {4})*[^ ])/g," ")),r.test(c))break;if(i.test(c))break;if(t.test(c))break;if(n.test(e))break;if(c.search(/[^ ]/)>=h||!c.trim())o+="\n"+c.slice(h);else{if(p)break;if(s.search(/[^ ]/)>=4)break;if(r.test(s))break;if(i.test(s))break;if(n.test(s))break;o+="\n"+c}p||c.trim()||(p=!0),l+=a+"\n",e=e.substring(a.length+1),s=c.slice(h)}}r.loose||(a?r.loose=!0:/\n *\n *$/.test(l)&&(a=!0));let u,k=null;this.options.gfm&&(k=/^\[[ xX]\] /.exec(o),k&&(u="[ ] "!==k[0],o=o.replace(/^\[[ xX]\] +/,""))),r.items.push({type:"list_item",raw:l,task:!!k,checked:u,loose:!1,text:o,tokens:[]}),r.raw+=l}r.items[r.items.length-1].raw=l.trimEnd(),r.items[r.items.length-1].text=o.trimEnd(),r.raw=r.raw.trimEnd();for(let e=0;e"space"===e.type)),n=t.length>0&&t.some((e=>/\n.*\n/.test(e.raw)));r.loose=n}if(r.loose)for(let e=0;e$/,"$1").replace(this.rules.inline.anyPunctuation,"$1"):"",s=t[3]?t[3].substring(1,t[3].length-1).replace(this.rules.inline.anyPunctuation,"$1"):t[3];return{type:"def",tag:e,raw:t[0],href:n,title:s}}}table(e){const t=this.rules.block.table.exec(e);if(!t)return;if(!/[:|]/.test(t[2]))return;const n=d(t[1]),s=t[2].replace(/^\||\| *$/g,"").split("|"),r=t[3]&&t[3].trim()?t[3].replace(/\n[ \t]*$/,"").split("\n"):[],i={type:"table",raw:t[0],header:[],align:[],rows:[]};if(n.length===s.length){for(const e of s)/^ *-+: *$/.test(e)?i.align.push("right"):/^ *:-+: *$/.test(e)?i.align.push("center"):/^ *:-+ *$/.test(e)?i.align.push("left"):i.align.push(null);for(const e of n)i.header.push({text:e,tokens:this.lexer.inline(e)});for(const e of r)i.rows.push(d(e,i.header.length).map((e=>({text:e,tokens:this.lexer.inline(e)}))));return i}}lheading(e){const t=this.rules.block.lheading.exec(e);if(t)return{type:"heading",raw:t[0],depth:"="===t[2].charAt(0)?1:2,text:t[1],tokens:this.lexer.inline(t[1])}}paragraph(e){const t=this.rules.block.paragraph.exec(e);if(t){const e="\n"===t[1].charAt(t[1].length-1)?t[1].slice(0,-1):t[1];return{type:"paragraph",raw:t[0],text:e,tokens:this.lexer.inline(e)}}}text(e){const t=this.rules.block.text.exec(e);if(t)return{type:"text",raw:t[0],text:t[0],tokens:this.lexer.inline(t[0])}}escape(e){const t=this.rules.inline.escape.exec(e);if(t)return{type:"escape",raw:t[0],text:c(t[1])}}tag(e){const t=this.rules.inline.tag.exec(e);if(t)return!this.lexer.state.inLink&&/^/i.test(t[0])&&(this.lexer.state.inLink=!1),!this.lexer.state.inRawBlock&&/^<(pre|code|kbd|script)(\s|>)/i.test(t[0])?this.lexer.state.inRawBlock=!0:this.lexer.state.inRawBlock&&/^<\/(pre|code|kbd|script)(\s|>)/i.test(t[0])&&(this.lexer.state.inRawBlock=!1),{type:"html",raw:t[0],inLink:this.lexer.state.inLink,inRawBlock:this.lexer.state.inRawBlock,block:!1,text:t[0]}}link(e){const t=this.rules.inline.link.exec(e);if(t){const e=t[2].trim();if(!this.options.pedantic&&/^$/.test(e))return;const t=x(e.slice(0,-1),"\\");if((e.length-t.length)%2==0)return}else{const e=function(e,t){if(-1===e.indexOf(t[1]))return-1;let n=0;for(let s=0;s-1){const n=(0===t[0].indexOf("!")?5:4)+t[1].length+e;t[2]=t[2].substring(0,e),t[0]=t[0].substring(0,n).trim(),t[3]=""}}let n=t[2],s="";if(this.options.pedantic){const e=/^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(n);e&&(n=e[1],s=e[3])}else s=t[3]?t[3].slice(1,-1):"";return n=n.trim(),/^$/.test(e)?n.slice(1):n.slice(1,-1)),b(t,{href:n?n.replace(this.rules.inline.anyPunctuation,"$1"):n,title:s?s.replace(this.rules.inline.anyPunctuation,"$1"):s},t[0],this.lexer)}}reflink(e,t){let n;if((n=this.rules.inline.reflink.exec(e))||(n=this.rules.inline.nolink.exec(e))){const e=t[(n[2]||n[1]).replace(/\s+/g," ").toLowerCase()];if(!e){const e=n[0].charAt(0);return{type:"text",raw:e,text:e}}return b(n,e,n[0],this.lexer)}}emStrong(e,t,n=""){let s=this.rules.inline.emStrongLDelim.exec(e);if(!s)return;if(s[3]&&n.match(/[\p{L}\p{N}]/u))return;if(!(s[1]||s[2]||"")||!n||this.rules.inline.punctuation.exec(n)){const n=[...s[0]].length-1;let r,i,l=n,o=0;const a="*"===s[0][0]?this.rules.inline.emStrongRDelimAst:this.rules.inline.emStrongRDelimUnd;for(a.lastIndex=0,t=t.slice(-1*e.length+n);null!=(s=a.exec(t));){if(r=s[1]||s[2]||s[3]||s[4]||s[5]||s[6],!r)continue;if(i=[...r].length,s[3]||s[4]){l+=i;continue}if((s[5]||s[6])&&n%3&&!((n+i)%3)){o+=i;continue}if(l-=i,l>0)continue;i=Math.min(i,i+l+o);const t=[...s[0]][0].length,a=e.slice(0,n+s.index+t+i);if(Math.min(n,i)%2){const e=a.slice(1,-1);return{type:"em",raw:a,text:e,tokens:this.lexer.inlineTokens(e)}}const c=a.slice(2,-2);return{type:"strong",raw:a,text:c,tokens:this.lexer.inlineTokens(c)}}}}codespan(e){const t=this.rules.inline.code.exec(e);if(t){let e=t[2].replace(/\n/g," ");const n=/[^ ]/.test(e),s=/^ /.test(e)&&/ $/.test(e);return n&&s&&(e=e.substring(1,e.length-1)),e=c(e,!0),{type:"codespan",raw:t[0],text:e}}}br(e){const t=this.rules.inline.br.exec(e);if(t)return{type:"br",raw:t[0]}}del(e){const t=this.rules.inline.del.exec(e);if(t)return{type:"del",raw:t[0],text:t[2],tokens:this.lexer.inlineTokens(t[2])}}autolink(e){const t=this.rules.inline.autolink.exec(e);if(t){let e,n;return"@"===t[2]?(e=c(t[1]),n="mailto:"+e):(e=c(t[1]),n=e),{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}url(e){let t;if(t=this.rules.inline.url.exec(e)){let e,n;if("@"===t[2])e=c(t[0]),n="mailto:"+e;else{let s;do{s=t[0],t[0]=this.rules.inline._backpedal.exec(t[0])?.[0]??""}while(s!==t[0]);e=c(t[0]),n="www."===t[1]?"http://"+t[0]:t[0]}return{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}inlineText(e){const t=this.rules.inline.text.exec(e);if(t){let e;return e=this.lexer.state.inRawBlock?t[0]:c(t[0]),{type:"text",raw:t[0],text:e}}}}const m=/^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/,y=/(?:[*+-]|\d{1,9}[.)])/,$=k(/^(?!bull |blockCode|fences|blockquote|heading|html)((?:.|\n(?!\s*?\n|bull |blockCode|fences|blockquote|heading|html))+?)\n {0,3}(=+|-+) *(?:\n+|$)/).replace(/bull/g,y).replace(/blockCode/g,/ {4}/).replace(/fences/g,/ {0,3}(?:`{3,}|~{3,})/).replace(/blockquote/g,/ {0,3}>/).replace(/heading/g,/ {0,3}#{1,6}/).replace(/html/g,/ {0,3}<[^\n>]+>\n/).getRegex(),z=/^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/,T=/(?!\s*\])(?:\\.|[^\[\]\\])+/,R=k(/^ {0,3}\[(label)\]: *(?:\n *)?([^<\s][^\s]*|<.*?>)(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/).replace("label",T).replace("title",/(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/).getRegex(),_=k(/^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/).replace(/bull/g,y).getRegex(),A="address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|search|section|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul",S=/|$))/,I=k("^ {0,3}(?:<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:\\1>[^\\n]*\\n+|$)|comment[^\\n]*(\\n+|$)|<\\?[\\s\\S]*?(?:\\?>\\n*|$)|\\n*|$)|\\n*|$)|?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:(?:\\n *)+\\n|$)|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)|(?!script|pre|style|textarea)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$))","i").replace("comment",S).replace("tag",A).replace("attribute",/ +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex(),E=k(z).replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("|table","").replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html","?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex(),q={blockquote:k(/^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/).replace("paragraph",E).getRegex(),code:/^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,def:R,fences:/^ {0,3}(`{3,}(?=[^`\n]*(?:\n|$))|~{3,})([^\n]*)(?:\n|$)(?:|([\s\S]*?)(?:\n|$))(?: {0,3}\1[~`]* *(?=\n|$)|$)/,heading:/^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,hr:m,html:I,lheading:$,list:_,newline:/^(?: *(?:\n|$))+/,paragraph:E,table:f,text:/^[^\n]+/},Z=k("^ *([^\\n ].*)\\n {0,3}((?:\\| *)?:?-+:? *(?:\\| *:?-+:? *)*(?:\\| *)?)(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)").replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("blockquote"," {0,3}>").replace("code"," {4}[^\\n]").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html","?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex(),L={...q,table:Z,paragraph:k(z).replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("table",Z).replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html","?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex()},P={...q,html:k("^ *(?:comment *(?:\\n|\\s*$)|<(tag)[\\s\\S]+?\\1> *(?:\\n{2,}|\\s*$)| \\s]*)*?/?> *(?:\\n{2,}|\\s*$))").replace("comment",S).replace(/tag/g,"(?!(?:a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)\\b)\\w+(?!:|[^\\w\\s@]*@)\\b").getRegex(),def:/^ *\[([^\]]+)\]: *([^\s>]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,heading:/^(#{1,6})(.*)(?:\n+|$)/,fences:f,lheading:/^(.+?)\n {0,3}(=+|-+) *(?:\n+|$)/,paragraph:k(z).replace("hr",m).replace("heading"," *#{1,6} *[^\n]").replace("lheading",$).replace("|table","").replace("blockquote"," {0,3}>").replace("|fences","").replace("|list","").replace("|html","").replace("|tag","").getRegex()},Q=/^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,v=/^( {2,}|\\)\n(?!\s*$)/,B="\\p{P}\\p{S}",C=k(/^((?![*_])[\spunctuation])/,"u").replace(/punctuation/g,B).getRegex(),M=k(/^(?:\*+(?:((?!\*)[punct])|[^\s*]))|^_+(?:((?!_)[punct])|([^\s_]))/,"u").replace(/punct/g,B).getRegex(),O=k("^[^_*]*?__[^_*]*?\\*[^_*]*?(?=__)|[^*]+(?=[^*])|(?!\\*)[punct](\\*+)(?=[\\s]|$)|[^punct\\s](\\*+)(?!\\*)(?=[punct\\s]|$)|(?!\\*)[punct\\s](\\*+)(?=[^punct\\s])|[\\s](\\*+)(?!\\*)(?=[punct])|(?!\\*)[punct](\\*+)(?!\\*)(?=[punct])|[^punct\\s](\\*+)(?=[^punct\\s])","gu").replace(/punct/g,B).getRegex(),D=k("^[^_*]*?\\*\\*[^_*]*?_[^_*]*?(?=\\*\\*)|[^_]+(?=[^_])|(?!_)[punct](_+)(?=[\\s]|$)|[^punct\\s](_+)(?!_)(?=[punct\\s]|$)|(?!_)[punct\\s](_+)(?=[^punct\\s])|[\\s](_+)(?!_)(?=[punct])|(?!_)[punct](_+)(?!_)(?=[punct])","gu").replace(/punct/g,B).getRegex(),j=k(/\\([punct])/,"gu").replace(/punct/g,B).getRegex(),H=k(/^<(scheme:[^\s\x00-\x1f<>]*|email)>/).replace("scheme",/[a-zA-Z][a-zA-Z0-9+.-]{1,31}/).replace("email",/[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/).getRegex(),U=k(S).replace("(?:--\x3e|$)","--\x3e").getRegex(),X=k("^comment|^[a-zA-Z][\\w:-]*\\s*>|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>|^<\\?[\\s\\S]*?\\?>|^|^").replace("comment",U).replace("attribute",/\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/).getRegex(),F=/(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/,N=k(/^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/).replace("label",F).replace("href",/<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/).replace("title",/"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/).getRegex(),G=k(/^!?\[(label)\]\[(ref)\]/).replace("label",F).replace("ref",T).getRegex(),J=k(/^!?\[(ref)\](?:\[\])?/).replace("ref",T).getRegex(),K={_backpedal:f,anyPunctuation:j,autolink:H,blockSkip:/\[[^[\]]*?\]\([^\(\)]*?\)|`[^`]*?`|<[^<>]*?>/g,br:v,code:/^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,del:f,emStrongLDelim:M,emStrongRDelimAst:O,emStrongRDelimUnd:D,escape:Q,link:N,nolink:J,punctuation:C,reflink:G,reflinkSearch:k("reflink|nolink(?!\\()","g").replace("reflink",G).replace("nolink",J).getRegex(),tag:X,text:/^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\t+" ".repeat(n.length)));e;)if(!(this.options.extensions&&this.options.extensions.block&&this.options.extensions.block.some((s=>!!(n=s.call({lexer:this},e,t))&&(e=e.substring(n.raw.length),t.push(n),!0)))))if(n=this.tokenizer.space(e))e=e.substring(n.raw.length),1===n.raw.length&&t.length>0?t[t.length-1].raw+="\n":t.push(n);else if(n=this.tokenizer.code(e))e=e.substring(n.raw.length),s=t[t.length-1],!s||"paragraph"!==s.type&&"text"!==s.type?t.push(n):(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue[this.inlineQueue.length-1].src=s.text);else if(n=this.tokenizer.fences(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.heading(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.hr(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.blockquote(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.list(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.html(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.def(e))e=e.substring(n.raw.length),s=t[t.length-1],!s||"paragraph"!==s.type&&"text"!==s.type?this.tokens.links[n.tag]||(this.tokens.links[n.tag]={href:n.href,title:n.title}):(s.raw+="\n"+n.raw,s.text+="\n"+n.raw,this.inlineQueue[this.inlineQueue.length-1].src=s.text);else if(n=this.tokenizer.table(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.lheading(e))e=e.substring(n.raw.length),t.push(n);else{if(r=e,this.options.extensions&&this.options.extensions.startBlock){let t=1/0;const n=e.slice(1);let s;this.options.extensions.startBlock.forEach((e=>{s=e.call({lexer:this},n),"number"==typeof s&&s>=0&&(t=Math.min(t,s))})),t<1/0&&t>=0&&(r=e.substring(0,t+1))}if(this.state.top&&(n=this.tokenizer.paragraph(r)))s=t[t.length-1],i&&"paragraph"===s.type?(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=s.text):t.push(n),i=r.length!==e.length,e=e.substring(n.raw.length);else if(n=this.tokenizer.text(e))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===s.type?(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=s.text):t.push(n);else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}return this.state.top=!0,t}inline(e,t=[]){return this.inlineQueue.push({src:e,tokens:t}),t}inlineTokens(e,t=[]){let n,s,r,i,l,o,a=e;if(this.tokens.links){const e=Object.keys(this.tokens.links);if(e.length>0)for(;null!=(i=this.tokenizer.rules.inline.reflinkSearch.exec(a));)e.includes(i[0].slice(i[0].lastIndexOf("[")+1,-1))&&(a=a.slice(0,i.index)+"["+"a".repeat(i[0].length-2)+"]"+a.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex))}for(;null!=(i=this.tokenizer.rules.inline.blockSkip.exec(a));)a=a.slice(0,i.index)+"["+"a".repeat(i[0].length-2)+"]"+a.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);for(;null!=(i=this.tokenizer.rules.inline.anyPunctuation.exec(a));)a=a.slice(0,i.index)+"++"+a.slice(this.tokenizer.rules.inline.anyPunctuation.lastIndex);for(;e;)if(l||(o=""),l=!1,!(this.options.extensions&&this.options.extensions.inline&&this.options.extensions.inline.some((s=>!!(n=s.call({lexer:this},e,t))&&(e=e.substring(n.raw.length),t.push(n),!0)))))if(n=this.tokenizer.escape(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.tag(e))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===n.type&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(n=this.tokenizer.link(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.reflink(e,this.tokens.links))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===n.type&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(n=this.tokenizer.emStrong(e,a,o))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.codespan(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.br(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.del(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.autolink(e))e=e.substring(n.raw.length),t.push(n);else if(this.state.inLink||!(n=this.tokenizer.url(e))){if(r=e,this.options.extensions&&this.options.extensions.startInline){let t=1/0;const n=e.slice(1);let s;this.options.extensions.startInline.forEach((e=>{s=e.call({lexer:this},n),"number"==typeof s&&s>=0&&(t=Math.min(t,s))})),t<1/0&&t>=0&&(r=e.substring(0,t+1))}if(n=this.tokenizer.inlineText(r))e=e.substring(n.raw.length),"_"!==n.raw.slice(-1)&&(o=n.raw.slice(-1)),l=!0,s=t[t.length-1],s&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}else e=e.substring(n.raw.length),t.push(n);return t}}class se{options;constructor(t){this.options=t||e.defaults}code(e,t,n){const s=(t||"").match(/^\S*/)?.[0];return e=e.replace(/\n$/,"")+"\n",s?''+(n?e:c(e,!0))+"
\n":""+(n?e:c(e,!0))+"
\n"}blockquote(e){return`\n${e} \n`}html(e,t){return e}heading(e,t,n){return`${e} \n`}hr(){return" \n"}list(e,t,n){const s=t?"ol":"ul";return"<"+s+(t&&1!==n?' start="'+n+'"':"")+">\n"+e+""+s+">\n"}listitem(e,t,n){return`${e} \n`}checkbox(e){return" '}paragraph(e){return`${e}
\n`}table(e,t){return t&&(t=`${t} `),"\n"}tablerow(e){return`\n${e} \n`}tablecell(e,t){const n=t.header?"th":"td";return(t.align?`<${n} align="${t.align}">`:`<${n}>`)+e+`${n}>\n`}strong(e){return`${e} `}em(e){return`${e} `}codespan(e){return`${e}
`}br(){return" "}del(e){return`${e}`}link(e,t,n){const s=g(e);if(null===s)return n;let r='"+n+" ",r}image(e,t,n){const s=g(e);if(null===s)return n;let r=` ",r}text(e){return e}}class re{strong(e){return e}em(e){return e}codespan(e){return e}del(e){return e}html(e){return e}text(e){return e}link(e,t,n){return""+n}image(e,t,n){return""+n}br(){return""}}class ie{options;renderer;textRenderer;constructor(t){this.options=t||e.defaults,this.options.renderer=this.options.renderer||new se,this.renderer=this.options.renderer,this.renderer.options=this.options,this.textRenderer=new re}static parse(e,t){return new ie(t).parse(e)}static parseInline(e,t){return new ie(t).parseInline(e)}parse(e,t=!0){let n="";for(let s=0;s0&&"paragraph"===n.tokens[0].type?(n.tokens[0].text=e+" "+n.tokens[0].text,n.tokens[0].tokens&&n.tokens[0].tokens.length>0&&"text"===n.tokens[0].tokens[0].type&&(n.tokens[0].tokens[0].text=e+" "+n.tokens[0].tokens[0].text)):n.tokens.unshift({type:"text",text:e+" "}):o+=e+" "}o+=this.parse(n.tokens,i),l+=this.renderer.listitem(o,r,!!s)}n+=this.renderer.list(l,t,s);continue}case"html":{const e=r;n+=this.renderer.html(e.text,e.block);continue}case"paragraph":{const e=r;n+=this.renderer.paragraph(this.parseInline(e.tokens));continue}case"text":{let i=r,l=i.tokens?this.parseInline(i.tokens):i.text;for(;s+1{const r=e[s].flat(1/0);n=n.concat(this.walkTokens(r,t))})):e.tokens&&(n=n.concat(this.walkTokens(e.tokens,t)))}}return n}use(...e){const t=this.defaults.extensions||{renderers:{},childTokens:{}};return e.forEach((e=>{const n={...e};if(n.async=this.defaults.async||n.async||!1,e.extensions&&(e.extensions.forEach((e=>{if(!e.name)throw new Error("extension name required");if("renderer"in e){const n=t.renderers[e.name];t.renderers[e.name]=n?function(...t){let s=e.renderer.apply(this,t);return!1===s&&(s=n.apply(this,t)),s}:e.renderer}if("tokenizer"in e){if(!e.level||"block"!==e.level&&"inline"!==e.level)throw new Error("extension level must be 'block' or 'inline'");const n=t[e.level];n?n.unshift(e.tokenizer):t[e.level]=[e.tokenizer],e.start&&("block"===e.level?t.startBlock?t.startBlock.push(e.start):t.startBlock=[e.start]:"inline"===e.level&&(t.startInline?t.startInline.push(e.start):t.startInline=[e.start]))}"childTokens"in e&&e.childTokens&&(t.childTokens[e.name]=e.childTokens)})),n.extensions=t),e.renderer){const t=this.defaults.renderer||new se(this.defaults);for(const n in e.renderer){if(!(n in t))throw new Error(`renderer '${n}' does not exist`);if("options"===n)continue;const s=n,r=e.renderer[s],i=t[s];t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n||""}}n.renderer=t}if(e.tokenizer){const t=this.defaults.tokenizer||new w(this.defaults);for(const n in e.tokenizer){if(!(n in t))throw new Error(`tokenizer '${n}' does not exist`);if(["options","rules","lexer"].includes(n))continue;const s=n,r=e.tokenizer[s],i=t[s];t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n}}n.tokenizer=t}if(e.hooks){const t=this.defaults.hooks||new le;for(const n in e.hooks){if(!(n in t))throw new Error(`hook '${n}' does not exist`);if("options"===n)continue;const s=n,r=e.hooks[s],i=t[s];le.passThroughHooks.has(n)?t[s]=e=>{if(this.defaults.async)return Promise.resolve(r.call(t,e)).then((e=>i.call(t,e)));const n=r.call(t,e);return i.call(t,n)}:t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n}}n.hooks=t}if(e.walkTokens){const t=this.defaults.walkTokens,s=e.walkTokens;n.walkTokens=function(e){let n=[];return n.push(s.call(this,e)),t&&(n=n.concat(t.call(this,e))),n}}this.defaults={...this.defaults,...n}})),this}setOptions(e){return this.defaults={...this.defaults,...e},this}lexer(e,t){return ne.lex(e,t??this.defaults)}parser(e,t){return ie.parse(e,t??this.defaults)}#e(e,t){return(n,s)=>{const r={...s},i={...this.defaults,...r};!0===this.defaults.async&&!1===r.async&&(i.silent||console.warn("marked(): The async option was set to true by an extension. The async: false option sent to parse will be ignored."),i.async=!0);const l=this.#t(!!i.silent,!!i.async);if(null==n)return l(new Error("marked(): input parameter is undefined or null"));if("string"!=typeof n)return l(new Error("marked(): input parameter is of type "+Object.prototype.toString.call(n)+", string expected"));if(i.hooks&&(i.hooks.options=i),i.async)return Promise.resolve(i.hooks?i.hooks.preprocess(n):n).then((t=>e(t,i))).then((e=>i.hooks?i.hooks.processAllTokens(e):e)).then((e=>i.walkTokens?Promise.all(this.walkTokens(e,i.walkTokens)).then((()=>e)):e)).then((e=>t(e,i))).then((e=>i.hooks?i.hooks.postprocess(e):e)).catch(l);try{i.hooks&&(n=i.hooks.preprocess(n));let s=e(n,i);i.hooks&&(s=i.hooks.processAllTokens(s)),i.walkTokens&&this.walkTokens(s,i.walkTokens);let r=t(s,i);return i.hooks&&(r=i.hooks.postprocess(r)),r}catch(e){return l(e)}}}#t(e,t){return n=>{if(n.message+="\nPlease report this to https://github.com/markedjs/marked.",e){const e="An error occurred:
"+c(n.message+"",!0)+" ";return t?Promise.resolve(e):e}if(t)return Promise.reject(n);throw n}}}const ae=new oe;function ce(e,t){return ae.parse(e,t)}ce.options=ce.setOptions=function(e){return ae.setOptions(e),ce.defaults=ae.defaults,n(ce.defaults),ce},ce.getDefaults=t,ce.defaults=e.defaults,ce.use=function(...e){return ae.use(...e),ce.defaults=ae.defaults,n(ce.defaults),ce},ce.walkTokens=function(e,t){return ae.walkTokens(e,t)},ce.parseInline=ae.parseInline,ce.Parser=ie,ce.parser=ie.parse,ce.Renderer=se,ce.TextRenderer=re,ce.Lexer=ne,ce.lexer=ne.lex,ce.Tokenizer=w,ce.Hooks=le,ce.parse=ce;const he=ce.options,pe=ce.setOptions,ue=ce.use,ke=ce.walkTokens,ge=ce.parseInline,fe=ce,de=ie.parse,xe=ne.lex;e.Hooks=le,e.Lexer=ne,e.Marked=oe,e.Parser=ie,e.Renderer=se,e.TextRenderer=re,e.Tokenizer=w,e.getDefaults=t,e.lexer=xe,e.marked=ce,e.options=he,e.parse=fe,e.parseInline=ge,e.parser=de,e.setOptions=pe,e.use=ue,e.walkTokens=ke}));
diff --git a/ComfyUI-KJNodes/kjweb_async/protovis.min.js b/ComfyUI-KJNodes/kjweb_async/protovis.min.js
new file mode 100644
index 0000000000000000000000000000000000000000..4c7f784c0f3b8021cf44934853f5c29a5633bc40
--- /dev/null
+++ b/ComfyUI-KJNodes/kjweb_async/protovis.min.js
@@ -0,0 +1,277 @@
+var a;if(!Array.prototype.map)Array.prototype.map=function(b,c){for(var d=this.length,f=new Array(d),g=0;g>>0,f=0;f=d)throw new Error("reduce: no values, no initial value");}for(;f=0&&d=69&&m<100?1900:0)});return"([0-9]+)";case "%Y":q.push(function(m){g=m});return"([0-9]+)";case "%%":q.push(function(){});
+return"%"}return p});(f=f.match(n))&&f.forEach(function(p,m){q[m](p)});return new Date(g,h,i,j,k,l)};return c};
+pv.Format.time=function(b){function c(f){f=Number(f);switch(b){case "short":if(f>=31536E6)return(f/31536E6).toFixed(1)+" years";else if(f>=6048E5)return(f/6048E5).toFixed(1)+" weeks";else if(f>=864E5)return(f/864E5).toFixed(1)+" days";else if(f>=36E5)return(f/36E5).toFixed(1)+" hours";else if(f>=6E4)return(f/6E4).toFixed(1)+" minutes";return(f/1E3).toFixed(1)+" seconds";case "long":var g=[],h=f%36E5/6E4>>0;g.push(d("0",2,f%6E4/1E3>>0));if(f>=36E5){var i=f%864E5/36E5>>0;g.push(d("0",2,h));if(f>=864E5){g.push(d("0",
+2,i));g.push(Math.floor(f/864E5).toFixed())}else g.push(i.toFixed())}else g.push(h.toFixed());return g.reverse().join(":")}}var d=pv.Format.pad;c.format=c;c.parse=function(f){switch(b){case "short":for(var g=/([0-9,.]+)\s*([a-z]+)/g,h,i=0;h=g.exec(f);){var j=parseFloat(h[0].replace(",","")),k=0;switch(h[2].toLowerCase()){case "year":case "years":k=31536E6;break;case "week":case "weeks":k=6048E5;break;case "day":case "days":k=864E5;break;case "hour":case "hours":k=36E5;break;case "minute":case "minutes":k=
+6E4;break;case "second":case "seconds":k=1E3;break}i+=j*k}return i;case "long":h=f.replace(",","").split(":").reverse();i=0;if(h.length)i+=parseFloat(h[0])*1E3;if(h.length>1)i+=parseFloat(h[1])*6E4;if(h.length>2)i+=parseFloat(h[2])*36E5;if(h.length>3)i+=parseFloat(h[3])*864E5;return i}};return c};
+pv.Format.number=function(){function b(r){if(Infinity>h)r=Math.round(r*i)/i;var s=String(Math.abs(r)).split("."),t=s[0];if(t.length>d)t=t.substring(t.length-d);if(l&&t.length3)t=t.replace(/\B(?=(?:\d{3})+(?!\d))/g,n);if(!l&&t.lengthd)s=s.substring(s.length-d);r=r[1]?Number("0."+r[1]):0;if(Infinity>h)r=Math.round(r*i)/i;return Math.round(s)+r};b.integerDigits=function(r,s){if(arguments.length){c=Number(r);d=arguments.length>1?Number(s):c;f=c+Math.floor(c/3)*n.length;return this}return[c,d]};b.fractionDigits=function(r,s){if(arguments.length){g=
+Number(r);h=arguments.length>1?Number(s):g;i=Math.pow(10,h);return this}return[g,h]};b.integerPad=function(r){if(arguments.length){j=String(r);l=/\d/.test(j);return this}return j};b.fractionPad=function(r){if(arguments.length){k=String(r);return this}return k};b.decimal=function(r){if(arguments.length){q=String(r);return this}return q};b.group=function(r){if(arguments.length){n=r?String(r):"";f=c+Math.floor(c/3)*n.length;return this}return n};b.negativeAffix=function(r,s){if(arguments.length){p=String(r||
+"");m=String(s||"");return this}return[p,m]};return b};pv.map=function(b,c){var d={};return c?b.map(function(f,g){d.index=g;return c.call(d,f)}):b.slice()};pv.repeat=function(b,c){if(arguments.length==1)c=2;return pv.blend(pv.range(c).map(function(){return b}))};pv.cross=function(b,c){for(var d=[],f=0,g=b.length,h=c.length;fc){b.length=d;for(var f=c;fc?1:0};
+pv.reverseOrder=function(b,c){return cb?1:0};pv.search=function(b,c,d){if(!d)d=pv.identity;for(var f=0,g=b.length-1;f<=g;){var h=f+g>>1,i=d(b[h]);if(ic)g=h-1;else return h}return-f-1};pv.search.index=function(b,c,d){b=pv.search(b,c,d);return b<0?-b-1:b};
+pv.range=function(b,c,d){if(arguments.length==1){c=b;b=0}if(d==undefined)d=1;if((c-b)/d==Infinity)throw new Error("range must be finite");var f=[],g=0,h;c-=(c-b)*1.0E-10;if(d<0)for(;(h=b+d*g++)>c;)f.push(h);else for(;(h=b+d*g++)f){f=i;d=h}}return d};
+pv.min=function(b,c){if(c==pv.index)return 0;return Math.min.apply(null,c?pv.map(b,c):b)};pv.min.index=function(b,c){if(!b.length)return-1;if(c==pv.index)return 0;if(!c)c=pv.identity;for(var d=0,f=Infinity,g={},h=0;h0?Math.pow(c,Math.floor(pv.log(b,c))):-Math.pow(c,-Math.floor(-pv.log(-b,c)))};pv.logCeil=function(b,c){return b>0?Math.pow(c,Math.ceil(pv.log(b,c))):-Math.pow(c,-Math.ceil(-pv.log(-b,c)))};
+(function(){var b=Math.PI/180,c=180/Math.PI;pv.radians=function(d){return b*d};pv.degrees=function(d){return c*d}})();pv.keys=function(b){var c=[];for(var d in b)c.push(d);return c};pv.entries=function(b){var c=[];for(var d in b)c.push({key:d,value:b[d]});return c};pv.values=function(b){var c=[];for(var d in b)c.push(b[d]);return c};pv.dict=function(b,c){for(var d={},f={},g=0;g=94608E6){p=31536E6;u="%Y";o=function(w){w.setFullYear(w.getFullYear()+v)}}else if(t>=7776E6){p=2592E6;u="%m/%Y";o=function(w){w.setMonth(w.getMonth()+v)}}else if(t>=18144E5){p=6048E5;u="%m/%d";o=function(w){w.setDate(w.getDate()+7*v)}}else if(t>=2592E5){p=864E5;u="%m/%d";o=function(w){w.setDate(w.getDate()+v)}}else if(t>=108E5){p=36E5;u="%I:%M %p";o=function(w){w.setHours(w.getHours()+
+v)}}else if(t>=18E4){p=6E4;u="%I:%M %p";o=function(w){w.setMinutes(w.getMinutes()+v)}}else if(t>=3E3){p=1E3;u="%I:%M:%S";o=function(w){w.setSeconds(w.getSeconds()+v)}}else{p=1;u="%S.%Qs";o=function(w){w.setTime(w.getTime()+v)}}q=pv.Format.date(u);s=new Date(s);u=[];x(s,p);t=t/p;if(t>10)switch(p){case 36E5:v=t>20?6:3;s.setHours(Math.floor(s.getHours()/v)*v);break;case 2592E6:v=3;s.setMonth(Math.floor(s.getMonth()/v)*v);break;case 6E4:v=t>30?15:t>15?10:5;s.setMinutes(Math.floor(s.getMinutes()/v)*v);
+break;case 1E3:v=t>90?15:t>60?10:5;s.setSeconds(Math.floor(s.getSeconds()/v)*v);break;case 1:v=t>1E3?250:t>200?100:t>100?50:t>50?25:5;s.setMilliseconds(Math.floor(s.getMilliseconds()/v)*v);break;default:v=pv.logCeil(t/15,10);if(t/v<2)v/=5;else if(t/v<5)v/=2;s.setFullYear(Math.floor(s.getFullYear()/v)*v);break}for(;;){o(s);if(s>m)break;u.push(new Date(s))}return r?u.reverse():u}arguments.length||(n=10);v=pv.logFloor(t/n,10);p=n/(t/v);if(p<=0.15)v*=10;else if(p<=0.35)v*=5;else if(p<=0.75)v*=2;p=Math.ceil(s/
+v)*v;m=Math.floor(m/v)*v;q=pv.Format.number().fractionDigits(Math.max(0,-Math.floor(pv.log(v,10)+0.01)));m=pv.range(p,m+v,v);return r?m.reverse():m};c.tickFormat=function(n){return q(n)};c.nice=function(){if(d.length!=2)return this;var n=d[0],p=d[d.length-1],m=p0;i--)l.push(-g(-j)*i);else{for(;jh[1];k--);return l.slice(j,k)};b.tickFormat=function(h){return h.toPrecision(1)};
+b.nice=function(){var h=b.domain();return b.domain(pv.logFloor(h[0],c),pv.logCeil(h[1],c))};b.base=function(h){if(arguments.length){c=Number(h);d=Math.log(c);b.transform(f,g);return this}return c};b.domain.apply(b,arguments);return b.base(10)};pv.Scale.root=function(){var b=pv.Scale.quantitative();b.power=function(c){if(arguments.length){var d=Number(c),f=1/d;b.transform(function(g){return Math.pow(g,f)},function(g){return Math.pow(g,d)});return this}return d};b.domain.apply(b,arguments);return b.power(2)};
+pv.Scale.ordinal=function(){function b(g){g in d||(d[g]=c.push(g)-1);return f[d[g]%f.length]}var c=[],d={},f=[];b.domain=function(g,h){if(arguments.length){g=g instanceof Array?arguments.length>1?pv.map(g,h):g:Array.prototype.slice.call(arguments);c=[];for(var i={},j=0;j1?pv.map(g,h):g:Array.prototype.slice.call(arguments);
+if(typeof f[0]=="string")f=f.map(pv.color);return this}return f};b.split=function(g,h){var i=(h-g)/this.domain().length;f=pv.range(g+i/2,h,i);return this};b.splitFlush=function(g,h){var i=this.domain().length,j=(h-g)/(i-1);f=i==1?[(g+h)/2]:pv.range(g,h+j/2,j);return this};b.splitBanded=function(g,h,i){if(arguments.length<3)i=1;if(i<0){var j=this.domain().length;j=(h-g- -i*j)/(j+1);f=pv.range(g+j,h,j-i);f.band=-i}else{j=(h-g)/(this.domain().length+(1-i));f=pv.range(g+j*(1-i),h,j);f.band=j*i}return this};
+b.by=function(g){function h(){return b(g.apply(this,arguments))}for(var i in b)h[i]=b[i];return h};b.domain.apply(b,arguments);return b};
+pv.Scale.quantile=function(){function b(i){return h(Math.max(0,Math.min(d,pv.search.index(f,i)-1))/d)}var c=-1,d=-1,f=[],g=[],h=pv.Scale.linear();b.quantiles=function(i){if(arguments.length){c=Number(i);if(c<0){f=[g[0]].concat(g);d=g.length-1}else{f=[];f[0]=g[0];for(var j=1;j<=c;j++)f[j]=g[~~(j*(g.length-1)/c)];d=c-1}return this}return f};b.domain=function(i,j){if(arguments.length){g=i instanceof Array?pv.map(i,j):Array.prototype.slice.call(arguments);g.sort(pv.naturalOrder);b.quantiles(c);return this}return g};
+b.range=function(){if(arguments.length){h.range.apply(h,arguments);return this}return h.range()};b.by=function(i){function j(){return b(i.apply(this,arguments))}for(var k in b)j[k]=b[k];return j};b.domain.apply(b,arguments);return b};
+pv.histogram=function(b,c){var d=true;return{bins:function(f){var g=pv.map(b,c),h=[];arguments.length||(f=pv.Scale.linear(g).ticks());for(var i=0;i360)j-=360;else if(j<0)j+=360;if(j<60)return i+(h-i)*j/60;if(j<180)return h;if(j<240)return i+(h-i)*(240-j)/60;return i}function c(j){return Math.round(b(j)*255)}var d=this.h,f=this.s,g=this.l;d%=360;if(d<0)d+=360;f=Math.max(0,Math.min(f,1));g=Math.max(0,Math.min(g,1));var h=g<=0.5?g*(1+f):g+f-g*f,i=2*g-h;return pv.rgb(c(d+120),c(d),c(d-120),this.a)};
+pv.Color.names={aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyan:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",
+darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",
+ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",
+lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",
+moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",
+seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",steelblue:"#4682b4",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",tomato:"#ff6347",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32",transparent:pv.Color.transparent=pv.rgb(0,0,0,0)};(function(){var b=pv.Color.names;for(var c in b)b[c]=pv.color(b[c])})();
+pv.colors=function(){var b=pv.Scale.ordinal();b.range.apply(b,arguments);return b};pv.Colors={};pv.Colors.category10=function(){var b=pv.colors("#1f77b4","#ff7f0e","#2ca02c","#d62728","#9467bd","#8c564b","#e377c2","#7f7f7f","#bcbd22","#17becf");b.domain.apply(b,arguments);return b};
+pv.Colors.category20=function(){var b=pv.colors("#1f77b4","#aec7e8","#ff7f0e","#ffbb78","#2ca02c","#98df8a","#d62728","#ff9896","#9467bd","#c5b0d5","#8c564b","#c49c94","#e377c2","#f7b6d2","#7f7f7f","#c7c7c7","#bcbd22","#dbdb8d","#17becf","#9edae5");b.domain.apply(b,arguments);return b};
+pv.Colors.category19=function(){var b=pv.colors("#9c9ede","#7375b5","#4a5584","#cedb9c","#b5cf6b","#8ca252","#637939","#e7cb94","#e7ba52","#bd9e39","#8c6d31","#e7969c","#d6616b","#ad494a","#843c39","#de9ed6","#ce6dbd","#a55194","#7b4173");b.domain.apply(b,arguments);return b};pv.ramp=function(){var b=pv.Scale.linear();b.range.apply(b,arguments);return b};
+pv.Scene=pv.SvgScene={svg:"http://www.w3.org/2000/svg",xmlns:"http://www.w3.org/2000/xmlns",xlink:"http://www.w3.org/1999/xlink",xhtml:"http://www.w3.org/1999/xhtml",scale:1,events:["DOMMouseScroll","mousewheel","mousedown","mouseup","mouseover","mouseout","mousemove","click","dblclick"],implicit:{svg:{"shape-rendering":"auto","pointer-events":"painted",x:0,y:0,dy:0,"text-anchor":"start",transform:"translate(0,0)",fill:"none","fill-opacity":1,stroke:"none","stroke-opacity":1,"stroke-width":1.5,"stroke-linejoin":"miter"},
+css:{font:"10px sans-serif"}}};pv.SvgScene.updateAll=function(b){if(b.length&&b[0].reverse&&b.type!="line"&&b.type!="area"){for(var c=pv.extend(b),d=0,f=b.length-1;f>=0;d++,f--)c[d]=b[f];b=c}this.removeSiblings(this[b.type](b))};pv.SvgScene.create=function(b){return document.createElementNS(this.svg,b)};
+pv.SvgScene.expect=function(b,c,d,f){if(b){if(b.tagName=="a")b=b.firstChild;if(b.tagName!=c){c=this.create(c);b.parentNode.replaceChild(c,b);b=c}}else b=this.create(c);for(var g in d){c=d[g];if(c==this.implicit.svg[g])c=null;c==null?b.removeAttribute(g):b.setAttribute(g,c)}for(g in f){c=f[g];if(c==this.implicit.css[g])c=null;if(c==null)b.style.removeProperty(g);else b.style[g]=c}return b};
+pv.SvgScene.append=function(b,c,d){b.$scene={scenes:c,index:d};b=this.title(b,c[d]);b.parentNode||c.$g.appendChild(b);return b.nextSibling};pv.SvgScene.title=function(b,c){var d=b.parentNode;if(d&&d.tagName!="a")d=null;if(c.title){if(!d){d=this.create("a");b.parentNode&&b.parentNode.replaceChild(d,b);d.appendChild(b)}d.setAttributeNS(this.xlink,"title",c.title);return d}d&&d.parentNode.replaceChild(b,d);return b};
+pv.SvgScene.dispatch=pv.listener(function(b){var c=b.target.$scene;if(c){var d=b.type;switch(d){case "DOMMouseScroll":d="mousewheel";b.wheel=-480*b.detail;break;case "mousewheel":b.wheel=(window.opera?12:1)*b.wheelDelta;break}pv.Mark.dispatch(d,c.scenes,c.index)&&b.preventDefault()}});pv.SvgScene.removeSiblings=function(b){for(;b;){var c=b.nextSibling;b.parentNode.removeChild(b);b=c}};pv.SvgScene.undefined=function(){};
+pv.SvgScene.pathBasis=function(){function b(f,g,h,i,j){return{x:f[0]*g.left+f[1]*h.left+f[2]*i.left+f[3]*j.left,y:f[0]*g.top+f[1]*h.top+f[2]*i.top+f[3]*j.top}}var c=[[1/6,2/3,1/6,0],[0,2/3,1/3,0],[0,1/3,2/3,0],[0,1/6,2/3,1/6]],d=function(f,g,h,i){var j=b(c[1],f,g,h,i),k=b(c[2],f,g,h,i);f=b(c[3],f,g,h,i);return"C"+j.x+","+j.y+","+k.x+","+k.y+","+f.x+","+f.y};d.segment=function(f,g,h,i){var j=b(c[0],f,g,h,i),k=b(c[1],f,g,h,i),l=b(c[2],f,g,h,i);f=b(c[3],f,g,h,i);return"M"+j.x+","+j.y+"C"+k.x+","+k.y+
+","+l.x+","+l.y+","+f.x+","+f.y};return d}();pv.SvgScene.curveBasis=function(b){if(b.length<=2)return"";var c="",d=b[0],f=d,g=d,h=b[1];c+=this.pathBasis(d,f,g,h);for(var i=2;i1){j=c[1];h=b[k];k++;f+="C"+(g.left+i.x)+","+(g.top+i.y)+","+(h.left-j.x)+","+(h.top-j.y)+","+h.left+","+h.top;for(g=2;g9){k=3/Math.sqrt(k);f[h]=
+k*i*d[h];f[h+1]=k*j*d[h]}}for(h=0;h2&&(g.interpolate=="basis"||g.interpolate=="cardinal"||g.interpolate=="monotone")?d:c)(l,q-1));l=q-1}}if(!j.length)return f;f=this.expect(f,"path",{"shape-rendering":g.antialias?null:"crispEdges","pointer-events":g.events,cursor:g.cursor,d:"M"+j.join("ZM")+"Z",fill:h.color,"fill-opacity":h.opacity||
+null,stroke:i.color,"stroke-opacity":i.opacity||null,"stroke-width":i.opacity?g.lineWidth/this.scale:null});return this.append(f,b,0)};
+pv.SvgScene.areaSegment=function(b){var c=b.$g.firstChild,d=b[0],f,g;if(d.interpolate=="basis"||d.interpolate=="cardinal"||d.interpolate=="monotone"){f=[];g=[];for(var h=0,i=b.length;h2&&(d.interpolate=="basis"||d.interpolate=="cardinal"||d.interpolate=="monotone"))switch(d.interpolate){case "basis":h+=this.curveBasis(b);break;case "cardinal":h+=this.curveCardinal(b,d.tension);break;case "monotone":h+=this.curveMonotone(b);
+break}else for(var i=1;i1)break;return"A"+f+","+f+" 0 0,"+d+" "+c.left+","+c.top;case "step-before":return"V"+c.top+"H"+c.left;case "step-after":return"H"+c.left+"V"+c.top}return"L"+c.left+","+c.top};pv.SvgScene.lineIntersect=function(b,c,d,f){return b.plus(c.times(d.minus(b).dot(f.perp())/c.dot(f.perp())))};
+pv.SvgScene.pathJoin=function(b,c,d,f){var g=pv.vector(c.left,c.top);d=pv.vector(d.left,d.top);var h=d.minus(g),i=h.perp().norm(),j=i.times(c.lineWidth/(2*this.scale));c=g.plus(j);var k=d.plus(j),l=d.minus(j);j=g.minus(j);if(b&&b.visible){b=g.minus(b.left,b.top).perp().norm().plus(i);j=this.lineIntersect(g,b,j,h);c=this.lineIntersect(g,b,c,h)}if(f&&f.visible){f=pv.vector(f.left,f.top).minus(d).perp().norm().plus(i);l=this.lineIntersect(d,f,l,h);k=this.lineIntersect(d,f,k,h)}return"M"+c.x+","+c.y+
+"L"+k.x+","+k.y+" "+l.x+","+l.y+" "+j.x+","+j.y};
+pv.SvgScene.panel=function(b){for(var c=b.$g,d=c&&c.firstChild,f=0;f=2*Math.PI)i=i?"M0,"+j+"A"+j+","+j+" 0 1,1 0,"+-j+"A"+j+","+j+" 0 1,1 0,"+j+"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":"M0,"+j+"A"+j+","+j+" 0 1,1 0,"+-j+"A"+j+","+j+" 0 1,1 0,"+j+"Z";else{var l=Math.min(f.startAngle,f.endAngle),q=Math.max(f.startAngle,f.endAngle),
+n=Math.cos(l),p=Math.cos(q);l=Math.sin(l);q=Math.sin(q);i=i?"M"+j*n+","+j*l+"A"+j+","+j+" 0 "+(k1?c:null)};
+a.anchor=function(b){b||(b="center");return(new pv.Anchor(this)).name(b).data(function(){return this.scene.target.map(function(c){return c.data})}).visible(function(){return this.scene.target[this.index].visible}).left(function(){var c=this.scene.target[this.index],d=c.width||0;switch(this.name()){case "bottom":case "top":case "center":return c.left+d/2;case "left":return null}return c.left+d}).top(function(){var c=this.scene.target[this.index],d=c.height||0;switch(this.name()){case "left":case "right":case "center":return c.top+
+d/2;case "top":return null}return c.top+d}).right(function(){var c=this.scene.target[this.index];return this.name()=="left"?c.right+(c.width||0):null}).bottom(function(){var c=this.scene.target[this.index];return this.name()=="top"?c.bottom+(c.height||0):null}).textAlign(function(){switch(this.name()){case "bottom":case "top":case "center":return"center";case "right":return"right"}return"left"}).textBaseline(function(){switch(this.name()){case "right":case "left":case "center":return"middle";case "top":return"top"}return"bottom"})};
+a.anchorTarget=function(){return this.target};a.margin=function(b){return this.left(b).right(b).top(b).bottom(b)};a.instance=function(b){var c=this.scene||this.parent.instance(-1).children[this.childIndex],d=!arguments.length||this.hasOwnProperty("index")?this.index:b;return c[d<0?c.length-1:d]};
+a.instances=function(b){for(var c=this,d=[],f;!(f=c.scene);){b=b.parent;d.push({index:b.index,childIndex:c.childIndex});c=c.parent}for(;d.length;){b=d.pop();f=f[b.index].children[b.childIndex]}if(this.hasOwnProperty("index")){d=pv.extend(f[this.index]);d.right=d.top=d.left=d.bottom=0;return[d]}return f};a.first=function(){return this.scene[0]};a.last=function(){return this.scene[this.scene.length-1]};a.sibling=function(){return this.index==0?null:this.scene[this.index-1]};
+a.cousin=function(){var b=this.parent;return(b=b&&b.sibling())&&b.children?b.children[this.childIndex][this.index]:null};
+a.render=function(){function b(i,j,k){i.scale=k;if(j=0;l--){var q=k[l];if(!(q.name in c)){c[q.name]=q;switch(q.name){case "data":f=q;break;case "visible":g=q;break;default:d[q.type].push(q);break}}}while(j=j.proto)}var c={},d=[[],[],[],[]],f,g;b(this);b(this.defaults);d[1].reverse();d[3].reverse();var h=this;do for(var i in h.properties)i in c||d[2].push(c[i]={name:i,type:2,value:null});while(h=h.proto);h=d[0].concat(d[1]);for(i=0;ih.id)d[g.name]={id:0,value:g.type&1?g.value.apply(this,c):g.value}}}d=this.binds.data;d=d.type&
+1?d.value.apply(this,c):d.value;c.unshift(null);b.length=d.length;for(f=0;f0;l--){p=m[l];p.scale=q;q*=p.scene[p.index].transform.k}if(n.children){l=0;for(m=n.children.length;l=3*Math.PI/2};pv.Wedge.prototype.buildImplied=function(b){if(b.angle==null)b.angle=b.endAngle-b.startAngle;else if(b.endAngle==null)b.endAngle=b.startAngle+b.angle;pv.Mark.prototype.buildImplied.call(this,b)};pv.simulation=function(b){return new pv.Simulation(b)};pv.Simulation=function(b){for(var c=0;c=s,u=q.y>=t;l.leaf=false;switch((u<<1)+x){case 0:l=l.c1||(l.c1=new pv.Quadtree.Node);break;case 1:l=l.c2||(l.c2=new pv.Quadtree.Node);break;case 2:l=l.c3||(l.c3=new pv.Quadtree.Node);break;case 3:l=l.c4||(l.c4=new pv.Quadtree.Node);
+break}if(x)n=s;else m=s;if(u)p=t;else r=t;c(l,q,n,p,m,r)}var f,g=Number.POSITIVE_INFINITY,h=g,i=Number.NEGATIVE_INFINITY,j=i;for(f=b;f;f=f.next){if(f.xi)i=f.x;if(f.y>j)j=f.y}f=i-g;var k=j-h;if(f>k)j=h+f;else i=g+k;this.xMin=g;this.yMin=h;this.xMax=i;this.yMax=j;this.root=new pv.Quadtree.Node;for(f=b;f;f=f.next)c(this.root,f,g,h,i,j)};pv.Quadtree.Node=function(){this.leaf=true;this.p=this.c4=this.c3=this.c2=this.c1=null};pv.Force={};
+pv.Force.charge=function(b){function c(l){function q(m){c(m);l.cn+=m.cn;n+=m.cn*m.cx;p+=m.cn*m.cy}var n=0,p=0;l.cn=0;if(!l.leaf){l.c1&&q(l.c1);l.c2&&q(l.c2);l.c3&&q(l.c3);l.c4&&q(l.c4)}if(l.p){l.cn+=b;n+=b*l.p.x;p+=b*l.p.y}l.cx=n/l.cn;l.cy=p/l.cn}function d(l,q,n,p,m,r){var s=l.cx-q.x,t=l.cy-q.y,x=1/Math.sqrt(s*s+t*t);if(l.leaf&&l.p!=q||(m-n)*xg)x=g;l=l.cn*x*x*x;s=s*l;t=t*l;q.fx+=s;q.fy+=t}}else if(!l.leaf){var u=(n+m)*0.5,o=(p+r)*0.5;l.c1&&d(l.c1,q,n,p,u,o);l.c2&&d(l.c2,q,u,p,
+m,o);l.c3&&d(l.c3,q,n,o,u,r);l.c4&&d(l.c4,q,u,o,m,r);if(!(xg)x=g;if(l.p&&l.p!=q){l=b*x*x*x;s=s*l;t=t*l;q.fx+=s;q.fy+=t}}}}var f=2,g=1/f,h=500,i=1/h,j=0.9,k={};arguments.length||(b=-40);k.constant=function(l){if(arguments.length){b=Number(l);return k}return b};k.domain=function(l,q){if(arguments.length){f=Number(l);g=1/f;h=Number(q);i=1/h;return k}return[f,h]};k.theta=function(l){if(arguments.length){j=Number(l);return k}return j};k.apply=function(l,q){c(q.root);for(l=l;l;l=l.next)d(q.root,
+l,q.xMin,q.yMin,q.xMax,q.yMax)};return k};pv.Force.drag=function(b){var c={};arguments.length||(b=0.1);c.constant=function(d){if(arguments.length){b=d;return c}return b};c.apply=function(d){if(b)for(d=d;d;d=d.next){d.fx-=b*d.vx;d.fy-=b*d.vy}};return c};
+pv.Force.spring=function(b){var c=0.1,d=20,f,g,h={};arguments.length||(b=0.1);h.links=function(i){if(arguments.length){f=i;g=i.map(function(j){return 1/Math.sqrt(Math.max(j.sourceNode.linkDegree,j.targetNode.linkDegree))});return h}return f};h.constant=function(i){if(arguments.length){b=Number(i);return h}return b};h.damping=function(i){if(arguments.length){c=Number(i);return h}return c};h.length=function(i){if(arguments.length){d=Number(i);return h}return d};h.apply=function(){for(var i=0;ig,o=sh){l.c1&&u&&c(l.c1,q,n,p,s,t);l.c2&&o&&c(l.c2,q,s,p,m,t)}if(x){l.c3&&u&&c(l.c3,q,n,t,s,r);l.c4&&o&&c(l.c4,q,s,t,m,r)}}if(l.p&&l.p!=q){n=q.x-l.p.x;p=q.y-l.p.y;m=Math.sqrt(n*n+p*p);r=f+b(l.p);if(mm)m=p}for(var r=0;rc.max?c.max:g.x;if(d)for(g=f;g;g=g.next)g.y=g.yd.max?d.max:g.y};return b};pv.Layout=function(){pv.Panel.call(this)};pv.Layout.prototype=pv.extend(pv.Panel);
+pv.Layout.prototype.property=function(b,c){if(!this.hasOwnProperty("properties"))this.properties=pv.extend(this.properties);this.properties[b]=true;this.propertyMethod(b,false,pv.Mark.cast[b]=c);return this};
+pv.Layout.Network=function(){pv.Layout.call(this);var b=this;this.$id=pv.id();(this.node=(new pv.Mark).data(function(){return b.nodes()}).strokeStyle("#1f77b4").fillStyle("#fff").left(function(c){return c.x}).top(function(c){return c.y})).parent=this;this.link=(new pv.Mark).extend(this.node).data(function(c){return[c.sourceNode,c.targetNode]}).fillStyle(null).lineWidth(function(c,d){return d.linkValue*1.5}).strokeStyle("rgba(0,0,0,.2)");this.link.add=function(c){return b.add(pv.Panel).data(function(){return b.links()}).add(c).extend(this)};
+(this.label=(new pv.Mark).extend(this.node).textMargin(7).textBaseline("middle").text(function(c){return c.nodeName||c.nodeValue}).textAngle(function(c){c=c.midAngle;return pv.Wedge.upright(c)?c:c+Math.PI}).textAlign(function(c){return pv.Wedge.upright(c.midAngle)?"left":"right"})).parent=this};
+pv.Layout.Network.prototype=pv.extend(pv.Layout).property("nodes",function(b){return b.map(function(c,d){if(typeof c!="object")c={nodeValue:c};c.index=d;return c})}).property("links",function(b){return b.map(function(c){if(isNaN(c.linkValue))c.linkValue=isNaN(c.value)?1:c.value;return c})});pv.Layout.Network.prototype.reset=function(){this.$id=pv.id();return this};pv.Layout.Network.prototype.buildProperties=function(b,c){if((b.$id||0)=this.$id)return true;b.$id=this.$id;b.nodes.forEach(function(c){c.linkDegree=0});b.links.forEach(function(c){var d=c.linkValue;(c.sourceNode||(c.sourceNode=b.nodes[c.source])).linkDegree+=d;(c.targetNode||(c.targetNode=b.nodes[c.target])).linkDegree+=d})};pv.Layout.Hierarchy=function(){pv.Layout.Network.call(this);this.link.strokeStyle("#ccc")};pv.Layout.Hierarchy.prototype=pv.extend(pv.Layout.Network);
+pv.Layout.Hierarchy.prototype.buildImplied=function(b){if(!b.links)b.links=pv.Layout.Hierarchy.links.call(this);pv.Layout.Network.prototype.buildImplied.call(this,b)};pv.Layout.Hierarchy.links=function(){return this.nodes().filter(function(b){return b.parentNode}).map(function(b){return{sourceNode:b,targetNode:b.parentNode,linkValue:1}})};
+pv.Layout.Hierarchy.NodeLink={buildImplied:function(b){function c(m){return m.parentNode?m.depth*(n-q)+q:0}function d(m){return m.parentNode?(m.breadth-0.25)*2*Math.PI:0}function f(m){switch(i){case "left":return m.depth*k;case "right":return k-m.depth*k;case "top":return m.breadth*k;case "bottom":return k-m.breadth*k;case "radial":return k/2+c(m)*Math.cos(m.midAngle)}}function g(m){switch(i){case "left":return m.breadth*l;case "right":return l-m.breadth*l;case "top":return m.depth*l;case "bottom":return l-
+m.depth*l;case "radial":return l/2+c(m)*Math.sin(m.midAngle)}}var h=b.nodes,i=b.orient,j=/^(top|bottom)$/.test(i),k=b.width,l=b.height;if(i=="radial"){var q=b.innerRadius,n=b.outerRadius;if(q==null)q=0;if(n==null)n=Math.min(k,l)/2}for(b=0;bb.dy?0:-Math.PI/2});(this.leaf=(new pv.Mark).extend(this.node).fillStyle(null).strokeStyle(null).visible(function(b){return!b.firstChild})).parent=
+this;delete this.link};pv.Layout.Treemap.prototype=pv.extend(pv.Layout.Hierarchy).property("round",Boolean).property("paddingLeft",Number).property("paddingRight",Number).property("paddingTop",Number).property("paddingBottom",Number).property("mode",String).property("order",String);a=pv.Layout.Treemap.prototype;a.defaults=(new pv.Layout.Treemap).extend(pv.Layout.Hierarchy.prototype.defaults).mode("squarify").order("ascending");a.padding=function(b){return this.paddingLeft(b).paddingRight(b).paddingTop(b).paddingBottom(b)};
+a.$size=function(b){return Number(b.nodeValue)};a.size=function(b){this.$size=pv.functor(b);return this};
+a.buildImplied=function(b){function c(r,s,t,x,u,o,v){for(var w=0,y=0;wt)t=v;u+=v}u*=u;s*=s;return Math.max(s*t/u,u/(s*x))}function f(r,s){function t(A){var D=o==y,G=pv.sum(A,n),E=y?p(G/y):0;c(A,G,D,x,u,D?o:E,D?E:v);if(D){u+=E;v-=E}else{x+=
+E;o-=E}y=Math.min(o,v);return D}var x=r.x+j,u=r.y+l,o=r.dx-j-k,v=r.dy-l-q;if(m!="squarify")c(r.childNodes,r.size,m=="slice"?true:m=="dice"?false:s&1,x,u,o,v);else{var w=[];s=Infinity;var y=Math.min(o,v),z=o*v/r.size;if(!(r.size<=0)){r.visitBefore(function(A){A.size*=z});for(r=r.childNodes.slice();r.length;){var C=r[r.length-1];if(C.size){w.push(C);z=d(w,y);if(z<=s){r.pop();s=z}else{w.pop();t(w);w.length=0;s=Infinity}}else r.pop()}if(t(w))for(s=0;s0){i(k(C,o,v),o,B);A+=B;D+=B}G+=C.mod;A+=y.mod;E+=w.mod;D+=z.mod;C=h(C);y=g(y)}if(C&&!h(z)){z.thread=C;z.mod+=G-D}if(y&&!g(w)){w.thread=y;w.mod+=A-E;v=o}}return v}function g(o){return o.firstChild||o.thread}function h(o){return o.lastChild||o.thread}function i(o,v,w){var y=v.number-o.number;v.change-=w/y;v.shift+=w;o.change+=
+w/y;v.prelim+=w;v.mod+=w}function j(o){var v=0,w=0;for(o=o.lastChild;o;o=o.previousSibling){o.prelim+=v;o.mod+=v;w+=o.change;v+=o.shift+w}}function k(o,v,w){return o.ancestor.parentNode==v.parentNode?o.ancestor:w}function l(o,v){return(v?1:t+1)/(m=="radial"?o:1)}function q(o){return m=="radial"?o.breadth/r:0}function n(o){switch(m){case "left":return o.depth;case "right":return x-o.depth;case "top":case "bottom":return o.breadth+x/2;case "radial":return x/2+o.depth*Math.cos(q(o))}}function p(o){switch(m){case "left":case "right":return o.breadth+
+u/2;case "top":return o.depth;case "bottom":return u-o.depth;case "radial":return u/2+o.depth*Math.sin(q(o))}}if(!pv.Layout.Hierarchy.prototype.buildImplied.call(this,b)){var m=b.orient,r=b.depth,s=b.breadth,t=b.group,x=b.width,u=b.height;b=b.nodes[0];b.visitAfter(function(o,v){o.ancestor=o;o.prelim=0;o.mod=0;o.change=0;o.shift=0;o.number=o.previousSibling?o.previousSibling.number+1:0;o.depth=v});c(b);d(b,-b.prelim,0);b.visitAfter(function(o){o.breadth*=s;o.depth*=r;o.midAngle=q(o);o.x=n(o);o.y=p(o);
+if(o.firstChild)o.midAngle+=Math.PI;delete o.breadth;delete o.depth;delete o.ancestor;delete o.prelim;delete o.mod;delete o.change;delete o.shift;delete o.number;delete o.thread})}};pv.Layout.Indent=function(){pv.Layout.Hierarchy.call(this);this.link.interpolate("step-after")};pv.Layout.Indent.prototype=pv.extend(pv.Layout.Hierarchy).property("depth",Number).property("breadth",Number);pv.Layout.Indent.prototype.defaults=(new pv.Layout.Indent).extend(pv.Layout.Hierarchy.prototype.defaults).depth(15).breadth(15);
+pv.Layout.Indent.prototype.buildImplied=function(b){function c(i,j,k){i.x=g+k++*f;i.y=h+j++*d;i.midAngle=0;for(i=i.firstChild;i;i=i.nextSibling)j=c(i,j,k);return j}if(!pv.Layout.Hierarchy.prototype.buildImplied.call(this,b)){var d=b.breadth,f=b.depth,g=0,h=0;c(b.nodes[0],1,1)}};pv.Layout.Pack=function(){pv.Layout.Hierarchy.call(this);this.node.radius(function(b){return b.radius}).strokeStyle("rgb(31, 119, 180)").fillStyle("rgba(31, 119, 180, .25)");this.label.textAlign("center");delete this.link};
+pv.Layout.Pack.prototype=pv.extend(pv.Layout.Hierarchy).property("spacing",Number).property("order",String);pv.Layout.Pack.prototype.defaults=(new pv.Layout.Pack).extend(pv.Layout.Hierarchy.prototype.defaults).spacing(1).order("ascending");pv.Layout.Pack.prototype.$radius=function(){return 1};pv.Layout.Pack.prototype.size=function(b){this.$radius=typeof b=="function"?function(){return Math.sqrt(b.apply(this,arguments))}:(b=Math.sqrt(b),function(){return b});return this};
+pv.Layout.Pack.prototype.buildImplied=function(b){function c(n){var p=pv.Mark.stack;p.unshift(null);for(var m=0,r=n.length;m0.0010}var t=Infinity,x=-Infinity,u=Infinity,o=-Infinity,v,w,y,z,C;v=n[0];v.x=-v.radius;v.y=0;p(v);if(n.length>1){w=n[1];w.x=w.radius;w.y=0;p(w);if(n.length>2){y=n[2];g(v,w,y);p(y);m(v,y);v.p=
+y;m(y,w);w=v.n;for(var A=3;A0){r(v,z);w=z;A--}else if(D<0){r(z,w);v=z;A--}}}}v=(t+x)/2;w=(u+o)/2;for(A=y=0;An.min){n.sim.step();
+q=true}q&&d.render()},42)}else for(k=0;kg)g=j;i.size=i.firstChild?pv.sum(i.childNodes,function(k){return k.size}):c.$size.apply(c,(f[0]=i,f))});f.shift();switch(b.order){case "ascending":d.sort(function(i,j){return i.size-j.size});break;case "descending":d.sort(function(i,j){return j.size-i.size});break}var h=1/g;d.minBreadth=0;d.breadth=
+0.5;d.maxBreadth=1;d.visitBefore(function(i){for(var j=i.minBreadth,k=i.maxBreadth-j,l=i.firstChild;l;l=l.nextSibling){l.minBreadth=j;l.maxBreadth=j+=l.size/i.size*k;l.breadth=(j+l.minBreadth)/2}});d.visitAfter(function(i,j){i.minDepth=(j-1)*h;i.maxDepth=i.depth=j*h});pv.Layout.Hierarchy.NodeLink.buildImplied.call(this,b)}};pv.Layout.Partition.Fill=function(){pv.Layout.Partition.call(this);pv.Layout.Hierarchy.Fill.constructor.call(this)};pv.Layout.Partition.Fill.prototype=pv.extend(pv.Layout.Partition);
+pv.Layout.Partition.Fill.prototype.buildImplied=function(b){pv.Layout.Partition.prototype.buildImplied.call(this,b)||pv.Layout.Hierarchy.Fill.buildImplied.call(this,b)};pv.Layout.Arc=function(){pv.Layout.Network.call(this);var b,c,d,f=this.buildImplied;this.buildImplied=function(g){f.call(this,g);c=g.directed;b=g.orient=="radial"?"linear":"polar";d=g.orient=="right"||g.orient=="top"};this.link.data(function(g){var h=g.sourceNode;g=g.targetNode;return d!=(c||h.breadth>1)*f:null}).bottom(function(k,l){return d=="mirror"?l&1?null:(l+1>>1)*-f:(l&1||-1)*(l+1>>1)*f}).fillStyle(function(k,l){return(l&1?h:i)((l>>1)+1)});this.band.add=function(k){return b.add(pv.Panel).extend(c).add(k).extend(this)}};pv.Layout.Horizon.prototype=pv.extend(pv.Layout).property("bands",Number).property("mode",String).property("backgroundStyle",pv.color).property("positiveStyle",pv.color).property("negativeStyle",pv.color);
+pv.Layout.Horizon.prototype.defaults=(new pv.Layout.Horizon).extend(pv.Layout.prototype.defaults).bands(2).mode("offset").backgroundStyle("white").positiveStyle("#1f77b4").negativeStyle("#d62728");
+pv.Layout.Rollup=function(){pv.Layout.Network.call(this);var b=this,c,d,f=b.buildImplied;this.buildImplied=function(g){f.call(this,g);c=g.$rollup.nodes;d=g.$rollup.links};this.node.data(function(){return c}).size(function(g){return g.nodes.length*20});this.link.interpolate("polar").eccentricity(0.8);this.link.add=function(g){return b.add(pv.Panel).data(function(){return d}).add(g).extend(this)}};pv.Layout.Rollup.prototype=pv.extend(pv.Layout.Network).property("directed",Boolean);
+pv.Layout.Rollup.prototype.x=function(b){this.$x=pv.functor(b);return this};pv.Layout.Rollup.prototype.y=function(b){this.$y=pv.functor(b);return this};
+pv.Layout.Rollup.prototype.buildImplied=function(b){function c(r){return i[r]+","+j[r]}if(!pv.Layout.Network.prototype.buildImplied.call(this,b)){var d=b.nodes,f=b.links,g=b.directed,h=d.length,i=[],j=[],k=0,l={},q={},n=pv.Mark.stack,p={parent:this};n.unshift(null);for(var m=0;mk.index?k.index+","+d.index:d.index+","+k.index;(n=q[h])||(n=q[h]={sourceNode:d,targetNode:k,linkValue:0,links:[]});n.links.push(f[m]);n.linkValue+=f[m].linkValue}b.$rollup={nodes:pv.values(l),links:pv.values(q)}}};
+pv.Layout.Matrix=function(){pv.Layout.Network.call(this);var b,c,d,f,g,h=this.buildImplied;this.buildImplied=function(i){h.call(this,i);b=i.nodes.length;c=i.width/b;d=i.height/b;f=i.$matrix.labels;g=i.$matrix.pairs};this.link.data(function(){return g}).left(function(){return c*(this.index%b)}).top(function(){return d*Math.floor(this.index/b)}).width(function(){return c}).height(function(){return d}).lineWidth(1.5).strokeStyle("#fff").fillStyle(function(i){return i.linkValue?"#555":"#eee"}).parent=
+this;delete this.link.add;this.label.data(function(){return f}).left(function(){return this.index&1?c*((this.index>>1)+0.5):0}).top(function(){return this.index&1?0:d*((this.index>>1)+0.5)}).textMargin(4).textAlign(function(){return this.index&1?"left":"right"}).textAngle(function(){return this.index&1?-Math.PI/2:0});delete this.node};pv.Layout.Matrix.prototype=pv.extend(pv.Layout.Network).property("directed",Boolean);pv.Layout.Matrix.prototype.sort=function(b){this.$sort=b;return this};
+pv.Layout.Matrix.prototype.buildImplied=function(b){if(!pv.Layout.Network.prototype.buildImplied.call(this,b)){var c=b.nodes,d=b.links,f=this.$sort,g=c.length,h=pv.range(g),i=[],j=[],k={};b.$matrix={labels:i,pairs:j};f&&h.sort(function(m,r){return f(c[m],c[r])});for(var l=0;lk)l=null;if(g){if(l&&g.scene==l.scene&&g.index==l.index)return;pv.Mark.dispatch("unpoint",g.scene,g.index)}if(g=l){pv.Mark.dispatch("point",l.scene,l.index);pv.listen(this.root.canvas(),"mouseout",f)}}function f(l){if(g&&!pv.ancestor(this,l.relatedTarget)){pv.Mark.dispatch("unpoint",g.scene,g.index);g=null}}var g,h=null,i=1,j=1,k=arguments.length?b*b:900;d.collapse=function(l){if(arguments.length){h=String(l);switch(h){case "y":i=
+1;j=0;break;case "x":i=0;j=1;break;default:j=i=1;break}return d}return h};return d};
+pv.Behavior.select=function(){function b(j){g=this.index;f=this.scene;i=this.mouse();h=j;h.x=i.x;h.y=i.y;h.dx=h.dy=0;pv.Mark.dispatch("selectstart",f,g)}function c(){if(f){f.mark.context(f,g,function(){var j=this.mouse();h.x=Math.max(0,Math.min(i.x,j.x));h.y=Math.max(0,Math.min(i.y,j.y));h.dx=Math.min(this.width(),Math.max(j.x,i.x))-h.x;h.dy=Math.min(this.height(),Math.max(j.y,i.y))-h.y;this.render()});pv.Mark.dispatch("select",f,g)}}function d(){if(f){pv.Mark.dispatch("selectend",f,g);f=null}}var f,
+g,h,i;pv.listen(window,"mousemove",c);pv.listen(window,"mouseup",d);return b};
+pv.Behavior.resize=function(b){function c(k){h=this.index;g=this.scene;j=this.mouse();i=k;switch(b){case "left":j.x=i.x+i.dx;break;case "right":j.x=i.x;break;case "top":j.y=i.y+i.dy;break;case "bottom":j.y=i.y;break}pv.Mark.dispatch("resizestart",g,h)}function d(){if(g){g.mark.context(g,h,function(){var k=this.mouse();i.x=Math.max(0,Math.min(j.x,k.x));i.y=Math.max(0,Math.min(j.y,k.y));i.dx=Math.min(this.parent.width(),Math.max(k.x,j.x))-i.x;i.dy=Math.min(this.parent.height(),Math.max(k.y,j.y))-i.y;
+this.render()});pv.Mark.dispatch("resize",g,h)}}function f(){if(g){pv.Mark.dispatch("resizeend",g,h);g=null}}var g,h,i,j;pv.listen(window,"mousemove",d);pv.listen(window,"mouseup",f);return c};
+pv.Behavior.pan=function(){function b(){g=this.index;f=this.scene;i=pv.vector(pv.event.pageX,pv.event.pageY);h=this.transform();j=1/(h.k*this.scale);if(k)k={x:(1-h.k)*this.width(),y:(1-h.k)*this.height()}}function c(){if(f){f.mark.context(f,g,function(){var l=h.translate((pv.event.pageX-i.x)*j,(pv.event.pageY-i.y)*j);if(k){l.x=Math.max(k.x,Math.min(0,l.x));l.y=Math.max(k.y,Math.min(0,l.y))}this.transform(l).render()});pv.Mark.dispatch("pan",f,g)}}function d(){f=null}var f,g,h,i,j,k;b.bound=function(l){if(arguments.length){k=
+Boolean(l);return this}return Boolean(k)};pv.listen(window,"mousemove",c);pv.listen(window,"mouseup",d);return b};
+pv.Behavior.zoom=function(b){function c(){var f=this.mouse(),g=pv.event.wheel*b;f=this.transform().translate(f.x,f.y).scale(g<0?1E3/(1E3-g):(1E3+g)/1E3).translate(-f.x,-f.y);if(d){f.k=Math.max(1,f.k);f.x=Math.max((1-f.k)*this.width(),Math.min(0,f.x));f.y=Math.max((1-f.k)*this.height(),Math.min(0,f.y))}this.transform(f).render();pv.Mark.dispatch("zoom",this.scene,this.index)}var d;arguments.length||(b=1/48);c.bound=function(f){if(arguments.length){d=Boolean(f);return this}return Boolean(d)};return c};
+pv.Geo=function(){};
+pv.Geo.projections={mercator:{project:function(b){return{x:b.lng/180,y:b.lat>85?1:b.lat<-85?-1:Math.log(Math.tan(Math.PI/4+pv.radians(b.lat)/2))/Math.PI}},invert:function(b){return{lng:b.x*180,lat:pv.degrees(2*Math.atan(Math.exp(b.y*Math.PI))-Math.PI/2)}}},"gall-peters":{project:function(b){return{x:b.lng/180,y:Math.sin(pv.radians(b.lat))}},invert:function(b){return{lng:b.x*180,lat:pv.degrees(Math.asin(b.y))}}},sinusoidal:{project:function(b){return{x:pv.radians(b.lng)*Math.cos(pv.radians(b.lat))/Math.PI,
+y:b.lat/90}},invert:function(b){return{lng:pv.degrees(b.x*Math.PI/Math.cos(b.y*Math.PI/2)),lat:b.y*90}}},aitoff:{project:function(b){var c=pv.radians(b.lng);b=pv.radians(b.lat);var d=Math.acos(Math.cos(b)*Math.cos(c/2));return{x:2*(d?Math.cos(b)*Math.sin(c/2)*d/Math.sin(d):0)/Math.PI,y:2*(d?Math.sin(b)*d/Math.sin(d):0)/Math.PI}},invert:function(b){var c=b.y*Math.PI/2;return{lng:pv.degrees(b.x*Math.PI/2/Math.cos(c)),lat:pv.degrees(c)}}},hammer:{project:function(b){var c=pv.radians(b.lng);b=pv.radians(b.lat);
+var d=Math.sqrt(1+Math.cos(b)*Math.cos(c/2));return{x:2*Math.SQRT2*Math.cos(b)*Math.sin(c/2)/d/3,y:Math.SQRT2*Math.sin(b)/d/1.5}},invert:function(b){var c=b.x*3;b=b.y*1.5;var d=Math.sqrt(1-c*c/16-b*b/4);return{lng:pv.degrees(2*Math.atan2(d*c,2*(2*d*d-1))),lat:pv.degrees(Math.asin(d*b))}}},identity:{project:function(b){return{x:b.lng/180,y:b.lat/90}},invert:function(b){return{lng:b.x*180,lat:b.y*90}}}};
+pv.Geo.scale=function(b){function c(m){if(!n||m.lng!=n.lng||m.lat!=n.lat){n=m;m=d(m);p={x:k(m.x),y:l(m.y)}}return p}function d(m){return j.project({lng:m.lng-q.lng,lat:m.lat})}function f(m){m=j.invert(m);m.lng+=q.lng;return m}var g={x:0,y:0},h={x:1,y:1},i=[],j=pv.Geo.projections.identity,k=pv.Scale.linear(-1,1).range(0,1),l=pv.Scale.linear(-1,1).range(1,0),q={lng:0,lat:0},n,p;c.x=function(m){return c(m).x};c.y=function(m){return c(m).y};c.ticks={lng:function(m){var r;if(i.length>1){var s=pv.Scale.linear();
+if(m==undefined)m=10;r=s.domain(i,function(t){return t.lat}).ticks(m);m=s.domain(i,function(t){return t.lng}).ticks(m)}else{r=pv.range(-80,81,10);m=pv.range(-180,181,10)}return m.map(function(t){return r.map(function(x){return{lat:x,lng:t}})})},lat:function(m){return pv.transpose(c.ticks.lng(m))}};c.invert=function(m){return f({x:k.invert(m.x),y:l.invert(m.y)})};c.domain=function(m,r){if(arguments.length){i=m instanceof Array?arguments.length>1?pv.map(m,r):m:Array.prototype.slice.call(arguments);
+if(i.length>1){var s=i.map(function(x){return x.lng}),t=i.map(function(x){return x.lat});q={lng:(pv.max(s)+pv.min(s))/2,lat:(pv.max(t)+pv.min(t))/2};s=i.map(d);k.domain(s,function(x){return x.x});l.domain(s,function(x){return x.y})}else{q={lng:0,lat:0};k.domain(-1,1);l.domain(-1,1)}n=null;return this}return i};c.range=function(m,r){if(arguments.length){if(typeof m=="object"){g={x:Number(m.x),y:Number(m.y)};h={x:Number(r.x),y:Number(r.y)}}else{g={x:0,y:0};h={x:Number(m),y:Number(r)}}k.range(g.x,h.x);
+l.range(h.y,g.y);n=null;return this}return[g,h]};c.projection=function(m){if(arguments.length){j=typeof m=="string"?pv.Geo.projections[m]||pv.Geo.projections.identity:m;return this.domain(i)}return m};c.by=function(m){function r(){return c(m.apply(this,arguments))}for(var s in c)r[s]=c[s];return r};arguments.length&&c.projection(b);return c};
diff --git a/ComfyUI-KJNodes/kjweb_async/purify.min.js b/ComfyUI-KJNodes/kjweb_async/purify.min.js
new file mode 100644
index 0000000000000000000000000000000000000000..42e7f3bdb08798be14106228c1316b2ee837d4ea
--- /dev/null
+++ b/ComfyUI-KJNodes/kjweb_async/purify.min.js
@@ -0,0 +1,3 @@
+/*! @license DOMPurify 3.0.11 | (c) Cure53 and other contributors | Released under the Apache license 2.0 and Mozilla Public License 2.0 | github.com/cure53/DOMPurify/blob/3.0.11/LICENSE */
+!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).DOMPurify=t()}(this,(function(){"use strict";const{entries:e,setPrototypeOf:t,isFrozen:n,getPrototypeOf:o,getOwnPropertyDescriptor:r}=Object;let{freeze:i,seal:a,create:l}=Object,{apply:c,construct:s}="undefined"!=typeof Reflect&&Reflect;i||(i=function(e){return e}),a||(a=function(e){return e}),c||(c=function(e,t,n){return e.apply(t,n)}),s||(s=function(e,t){return new e(...t)});const u=b(Array.prototype.forEach),m=b(Array.prototype.pop),p=b(Array.prototype.push),f=b(String.prototype.toLowerCase),d=b(String.prototype.toString),h=b(String.prototype.match),g=b(String.prototype.replace),T=b(String.prototype.indexOf),y=b(String.prototype.trim),E=b(Object.prototype.hasOwnProperty),A=b(RegExp.prototype.test),_=(N=TypeError,function(){for(var e=arguments.length,t=new Array(e),n=0;n1?n-1:0),r=1;r2&&void 0!==arguments[2]?arguments[2]:f;t&&t(e,null);let i=o.length;for(;i--;){let t=o[i];if("string"==typeof t){const e=r(t);e!==t&&(n(o)||(o[i]=e),t=e)}e[t]=!0}return e}function R(e){for(let t=0;t/gm),B=a(/\${[\w\W]*}/gm),W=a(/^data-[\-\w.\u00B7-\uFFFF]/),G=a(/^aria-[\-\w]+$/),Y=a(/^(?:(?:(?:f|ht)tps?|mailto|tel|callto|sms|cid|xmpp):|[^a-z]|[a-z+.\-]+(?:[^a-z+.\-:]|$))/i),j=a(/^(?:\w+script|data):/i),X=a(/[\u0000-\u0020\u00A0\u1680\u180E\u2000-\u2029\u205F\u3000]/g),q=a(/^html$/i),$=a(/^[a-z][.\w]*(-[.\w]+)+$/i);var K=Object.freeze({__proto__:null,MUSTACHE_EXPR:H,ERB_EXPR:z,TMPLIT_EXPR:B,DATA_ATTR:W,ARIA_ATTR:G,IS_ALLOWED_URI:Y,IS_SCRIPT_OR_DATA:j,ATTR_WHITESPACE:X,DOCTYPE_NAME:q,CUSTOM_ELEMENT:$});const V=function(){return"undefined"==typeof window?null:window},Z=function(e,t){if("object"!=typeof e||"function"!=typeof e.createPolicy)return null;let n=null;const o="data-tt-policy-suffix";t&&t.hasAttribute(o)&&(n=t.getAttribute(o));const r="dompurify"+(n?"#"+n:"");try{return e.createPolicy(r,{createHTML:e=>e,createScriptURL:e=>e})}catch(e){return console.warn("TrustedTypes policy "+r+" could not be created."),null}};var J=function t(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:V();const o=e=>t(e);if(o.version="3.0.11",o.removed=[],!n||!n.document||9!==n.document.nodeType)return o.isSupported=!1,o;let{document:r}=n;const a=r,c=a.currentScript,{DocumentFragment:s,HTMLTemplateElement:N,Node:b,Element:R,NodeFilter:H,NamedNodeMap:z=n.NamedNodeMap||n.MozNamedAttrMap,HTMLFormElement:B,DOMParser:W,trustedTypes:G}=n,j=R.prototype,X=L(j,"cloneNode"),$=L(j,"nextSibling"),J=L(j,"childNodes"),Q=L(j,"parentNode");if("function"==typeof N){const e=r.createElement("template");e.content&&e.content.ownerDocument&&(r=e.content.ownerDocument)}let ee,te="";const{implementation:ne,createNodeIterator:oe,createDocumentFragment:re,getElementsByTagName:ie}=r,{importNode:ae}=a;let le={};o.isSupported="function"==typeof e&&"function"==typeof Q&&ne&&void 0!==ne.createHTMLDocument;const{MUSTACHE_EXPR:ce,ERB_EXPR:se,TMPLIT_EXPR:ue,DATA_ATTR:me,ARIA_ATTR:pe,IS_SCRIPT_OR_DATA:fe,ATTR_WHITESPACE:de,CUSTOM_ELEMENT:he}=K;let{IS_ALLOWED_URI:ge}=K,Te=null;const ye=S({},[...D,...C,...O,...v,...M]);let Ee=null;const Ae=S({},[...I,...U,...P,...F]);let _e=Object.seal(l(null,{tagNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},attributeNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},allowCustomizedBuiltInElements:{writable:!0,configurable:!1,enumerable:!0,value:!1}})),Ne=null,be=null,Se=!0,Re=!0,we=!1,Le=!0,De=!1,Ce=!0,Oe=!1,xe=!1,ve=!1,ke=!1,Me=!1,Ie=!1,Ue=!0,Pe=!1;const Fe="user-content-";let He=!0,ze=!1,Be={},We=null;const Ge=S({},["annotation-xml","audio","colgroup","desc","foreignobject","head","iframe","math","mi","mn","mo","ms","mtext","noembed","noframes","noscript","plaintext","script","style","svg","template","thead","title","video","xmp"]);let Ye=null;const je=S({},["audio","video","img","source","image","track"]);let Xe=null;const qe=S({},["alt","class","for","id","label","name","pattern","placeholder","role","summary","title","value","style","xmlns"]),$e="http://www.w3.org/1998/Math/MathML",Ke="http://www.w3.org/2000/svg",Ve="http://www.w3.org/1999/xhtml";let Ze=Ve,Je=!1,Qe=null;const et=S({},[$e,Ke,Ve],d);let tt=null;const nt=["application/xhtml+xml","text/html"],ot="text/html";let rt=null,it=null;const at=r.createElement("form"),lt=function(e){return e instanceof RegExp||e instanceof Function},ct=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};if(!it||it!==e){if(e&&"object"==typeof e||(e={}),e=w(e),tt=-1===nt.indexOf(e.PARSER_MEDIA_TYPE)?ot:e.PARSER_MEDIA_TYPE,rt="application/xhtml+xml"===tt?d:f,Te=E(e,"ALLOWED_TAGS")?S({},e.ALLOWED_TAGS,rt):ye,Ee=E(e,"ALLOWED_ATTR")?S({},e.ALLOWED_ATTR,rt):Ae,Qe=E(e,"ALLOWED_NAMESPACES")?S({},e.ALLOWED_NAMESPACES,d):et,Xe=E(e,"ADD_URI_SAFE_ATTR")?S(w(qe),e.ADD_URI_SAFE_ATTR,rt):qe,Ye=E(e,"ADD_DATA_URI_TAGS")?S(w(je),e.ADD_DATA_URI_TAGS,rt):je,We=E(e,"FORBID_CONTENTS")?S({},e.FORBID_CONTENTS,rt):Ge,Ne=E(e,"FORBID_TAGS")?S({},e.FORBID_TAGS,rt):{},be=E(e,"FORBID_ATTR")?S({},e.FORBID_ATTR,rt):{},Be=!!E(e,"USE_PROFILES")&&e.USE_PROFILES,Se=!1!==e.ALLOW_ARIA_ATTR,Re=!1!==e.ALLOW_DATA_ATTR,we=e.ALLOW_UNKNOWN_PROTOCOLS||!1,Le=!1!==e.ALLOW_SELF_CLOSE_IN_ATTR,De=e.SAFE_FOR_TEMPLATES||!1,Ce=!1!==e.SAFE_FOR_XML,Oe=e.WHOLE_DOCUMENT||!1,ke=e.RETURN_DOM||!1,Me=e.RETURN_DOM_FRAGMENT||!1,Ie=e.RETURN_TRUSTED_TYPE||!1,ve=e.FORCE_BODY||!1,Ue=!1!==e.SANITIZE_DOM,Pe=e.SANITIZE_NAMED_PROPS||!1,He=!1!==e.KEEP_CONTENT,ze=e.IN_PLACE||!1,ge=e.ALLOWED_URI_REGEXP||Y,Ze=e.NAMESPACE||Ve,_e=e.CUSTOM_ELEMENT_HANDLING||{},e.CUSTOM_ELEMENT_HANDLING&<(e.CUSTOM_ELEMENT_HANDLING.tagNameCheck)&&(_e.tagNameCheck=e.CUSTOM_ELEMENT_HANDLING.tagNameCheck),e.CUSTOM_ELEMENT_HANDLING&<(e.CUSTOM_ELEMENT_HANDLING.attributeNameCheck)&&(_e.attributeNameCheck=e.CUSTOM_ELEMENT_HANDLING.attributeNameCheck),e.CUSTOM_ELEMENT_HANDLING&&"boolean"==typeof e.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements&&(_e.allowCustomizedBuiltInElements=e.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements),De&&(Re=!1),Me&&(ke=!0),Be&&(Te=S({},M),Ee=[],!0===Be.html&&(S(Te,D),S(Ee,I)),!0===Be.svg&&(S(Te,C),S(Ee,U),S(Ee,F)),!0===Be.svgFilters&&(S(Te,O),S(Ee,U),S(Ee,F)),!0===Be.mathMl&&(S(Te,v),S(Ee,P),S(Ee,F))),e.ADD_TAGS&&(Te===ye&&(Te=w(Te)),S(Te,e.ADD_TAGS,rt)),e.ADD_ATTR&&(Ee===Ae&&(Ee=w(Ee)),S(Ee,e.ADD_ATTR,rt)),e.ADD_URI_SAFE_ATTR&&S(Xe,e.ADD_URI_SAFE_ATTR,rt),e.FORBID_CONTENTS&&(We===Ge&&(We=w(We)),S(We,e.FORBID_CONTENTS,rt)),He&&(Te["#text"]=!0),Oe&&S(Te,["html","head","body"]),Te.table&&(S(Te,["tbody"]),delete Ne.tbody),e.TRUSTED_TYPES_POLICY){if("function"!=typeof e.TRUSTED_TYPES_POLICY.createHTML)throw _('TRUSTED_TYPES_POLICY configuration option must provide a "createHTML" hook.');if("function"!=typeof e.TRUSTED_TYPES_POLICY.createScriptURL)throw _('TRUSTED_TYPES_POLICY configuration option must provide a "createScriptURL" hook.');ee=e.TRUSTED_TYPES_POLICY,te=ee.createHTML("")}else void 0===ee&&(ee=Z(G,c)),null!==ee&&"string"==typeof te&&(te=ee.createHTML(""));i&&i(e),it=e}},st=S({},["mi","mo","mn","ms","mtext"]),ut=S({},["foreignobject","desc","title","annotation-xml"]),mt=S({},["title","style","font","a","script"]),pt=S({},[...C,...O,...x]),ft=S({},[...v,...k]),dt=function(e){let t=Q(e);t&&t.tagName||(t={namespaceURI:Ze,tagName:"template"});const n=f(e.tagName),o=f(t.tagName);return!!Qe[e.namespaceURI]&&(e.namespaceURI===Ke?t.namespaceURI===Ve?"svg"===n:t.namespaceURI===$e?"svg"===n&&("annotation-xml"===o||st[o]):Boolean(pt[n]):e.namespaceURI===$e?t.namespaceURI===Ve?"math"===n:t.namespaceURI===Ke?"math"===n&&ut[o]:Boolean(ft[n]):e.namespaceURI===Ve?!(t.namespaceURI===Ke&&!ut[o])&&(!(t.namespaceURI===$e&&!st[o])&&(!ft[n]&&(mt[n]||!pt[n]))):!("application/xhtml+xml"!==tt||!Qe[e.namespaceURI]))},ht=function(e){p(o.removed,{element:e});try{e.parentNode.removeChild(e)}catch(t){e.remove()}},gt=function(e,t){try{p(o.removed,{attribute:t.getAttributeNode(e),from:t})}catch(e){p(o.removed,{attribute:null,from:t})}if(t.removeAttribute(e),"is"===e&&!Ee[e])if(ke||Me)try{ht(t)}catch(e){}else try{t.setAttribute(e,"")}catch(e){}},Tt=function(e){let t=null,n=null;if(ve)e=" "+e;else{const t=h(e,/^[\r\n\t ]+/);n=t&&t[0]}"application/xhtml+xml"===tt&&Ze===Ve&&(e=''+e+"");const o=ee?ee.createHTML(e):e;if(Ze===Ve)try{t=(new W).parseFromString(o,tt)}catch(e){}if(!t||!t.documentElement){t=ne.createDocument(Ze,"template",null);try{t.documentElement.innerHTML=Je?te:o}catch(e){}}const i=t.body||t.documentElement;return e&&n&&i.insertBefore(r.createTextNode(n),i.childNodes[0]||null),Ze===Ve?ie.call(t,Oe?"html":"body")[0]:Oe?t.documentElement:i},yt=function(e){return oe.call(e.ownerDocument||e,e,H.SHOW_ELEMENT|H.SHOW_COMMENT|H.SHOW_TEXT|H.SHOW_PROCESSING_INSTRUCTION|H.SHOW_CDATA_SECTION,null)},Et=function(e){return e instanceof B&&("string"!=typeof e.nodeName||"string"!=typeof e.textContent||"function"!=typeof e.removeChild||!(e.attributes instanceof z)||"function"!=typeof e.removeAttribute||"function"!=typeof e.setAttribute||"string"!=typeof e.namespaceURI||"function"!=typeof e.insertBefore||"function"!=typeof e.hasChildNodes)},At=function(e){return"function"==typeof b&&e instanceof b},_t=function(e,t,n){le[e]&&u(le[e],(e=>{e.call(o,t,n,it)}))},Nt=function(e){let t=null;if(_t("beforeSanitizeElements",e,null),Et(e))return ht(e),!0;const n=rt(e.nodeName);if(_t("uponSanitizeElement",e,{tagName:n,allowedTags:Te}),e.hasChildNodes()&&!At(e.firstElementChild)&&A(/<[/\w]/g,e.innerHTML)&&A(/<[/\w]/g,e.textContent))return ht(e),!0;if(7===e.nodeType)return ht(e),!0;if(Ce&&8===e.nodeType&&A(/<[/\w]/g,e.data))return ht(e),!0;if(!Te[n]||Ne[n]){if(!Ne[n]&&St(n)){if(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,n))return!1;if(_e.tagNameCheck instanceof Function&&_e.tagNameCheck(n))return!1}if(He&&!We[n]){const t=Q(e)||e.parentNode,n=J(e)||e.childNodes;if(n&&t){for(let o=n.length-1;o>=0;--o)t.insertBefore(X(n[o],!0),$(e))}}return ht(e),!0}return e instanceof R&&!dt(e)?(ht(e),!0):"noscript"!==n&&"noembed"!==n&&"noframes"!==n||!A(/<\/no(script|embed|frames)/i,e.innerHTML)?(De&&3===e.nodeType&&(t=e.textContent,u([ce,se,ue],(e=>{t=g(t,e," ")})),e.textContent!==t&&(p(o.removed,{element:e.cloneNode()}),e.textContent=t)),_t("afterSanitizeElements",e,null),!1):(ht(e),!0)},bt=function(e,t,n){if(Ue&&("id"===t||"name"===t)&&(n in r||n in at))return!1;if(Re&&!be[t]&&A(me,t));else if(Se&&A(pe,t));else if(!Ee[t]||be[t]){if(!(St(e)&&(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,e)||_e.tagNameCheck instanceof Function&&_e.tagNameCheck(e))&&(_e.attributeNameCheck instanceof RegExp&&A(_e.attributeNameCheck,t)||_e.attributeNameCheck instanceof Function&&_e.attributeNameCheck(t))||"is"===t&&_e.allowCustomizedBuiltInElements&&(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,n)||_e.tagNameCheck instanceof Function&&_e.tagNameCheck(n))))return!1}else if(Xe[t]);else if(A(ge,g(n,de,"")));else if("src"!==t&&"xlink:href"!==t&&"href"!==t||"script"===e||0!==T(n,"data:")||!Ye[e]){if(we&&!A(fe,g(n,de,"")));else if(n)return!1}else;return!0},St=function(e){return"annotation-xml"!==e&&h(e,he)},Rt=function(e){_t("beforeSanitizeAttributes",e,null);const{attributes:t}=e;if(!t)return;const n={attrName:"",attrValue:"",keepAttr:!0,allowedAttributes:Ee};let r=t.length;for(;r--;){const i=t[r],{name:a,namespaceURI:l,value:c}=i,s=rt(a);let p="value"===a?c:y(c);if(n.attrName=s,n.attrValue=p,n.keepAttr=!0,n.forceKeepAttr=void 0,_t("uponSanitizeAttribute",e,n),p=n.attrValue,n.forceKeepAttr)continue;if(gt(a,e),!n.keepAttr)continue;if(!Le&&A(/\/>/i,p)){gt(a,e);continue}De&&u([ce,se,ue],(e=>{p=g(p,e," ")}));const f=rt(e.nodeName);if(bt(f,s,p)){if(!Pe||"id"!==s&&"name"!==s||(gt(a,e),p=Fe+p),ee&&"object"==typeof G&&"function"==typeof G.getAttributeType)if(l);else switch(G.getAttributeType(f,s)){case"TrustedHTML":p=ee.createHTML(p);break;case"TrustedScriptURL":p=ee.createScriptURL(p)}try{l?e.setAttributeNS(l,a,p):e.setAttribute(a,p),m(o.removed)}catch(e){}}}_t("afterSanitizeAttributes",e,null)},wt=function e(t){let n=null;const o=yt(t);for(_t("beforeSanitizeShadowDOM",t,null);n=o.nextNode();)_t("uponSanitizeShadowNode",n,null),Nt(n)||(n.content instanceof s&&e(n.content),Rt(n));_t("afterSanitizeShadowDOM",t,null)};return o.sanitize=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=null,r=null,i=null,l=null;if(Je=!e,Je&&(e="\x3c!--\x3e"),"string"!=typeof e&&!At(e)){if("function"!=typeof e.toString)throw _("toString is not a function");if("string"!=typeof(e=e.toString()))throw _("dirty is not a string, aborting")}if(!o.isSupported)return e;if(xe||ct(t),o.removed=[],"string"==typeof e&&(ze=!1),ze){if(e.nodeName){const t=rt(e.nodeName);if(!Te[t]||Ne[t])throw _("root node is forbidden and cannot be sanitized in-place")}}else if(e instanceof b)n=Tt("\x3c!----\x3e"),r=n.ownerDocument.importNode(e,!0),1===r.nodeType&&"BODY"===r.nodeName||"HTML"===r.nodeName?n=r:n.appendChild(r);else{if(!ke&&!De&&!Oe&&-1===e.indexOf("<"))return ee&&Ie?ee.createHTML(e):e;if(n=Tt(e),!n)return ke?null:Ie?te:""}n&&ve&&ht(n.firstChild);const c=yt(ze?e:n);for(;i=c.nextNode();)Nt(i)||(i.content instanceof s&&wt(i.content),Rt(i));if(ze)return e;if(ke){if(Me)for(l=re.call(n.ownerDocument);n.firstChild;)l.appendChild(n.firstChild);else l=n;return(Ee.shadowroot||Ee.shadowrootmode)&&(l=ae.call(a,l,!0)),l}let m=Oe?n.outerHTML:n.innerHTML;return Oe&&Te["!doctype"]&&n.ownerDocument&&n.ownerDocument.doctype&&n.ownerDocument.doctype.name&&A(q,n.ownerDocument.doctype.name)&&(m="\n"+m),De&&u([ce,se,ue],(e=>{m=g(m,e," ")})),ee&&Ie?ee.createHTML(m):m},o.setConfig=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};ct(e),xe=!0},o.clearConfig=function(){it=null,xe=!1},o.isValidAttribute=function(e,t,n){it||ct({});const o=rt(e),r=rt(t);return bt(o,r,n)},o.addHook=function(e,t){"function"==typeof t&&(le[e]=le[e]||[],p(le[e],t))},o.removeHook=function(e){if(le[e])return m(le[e])},o.removeHooks=function(e){le[e]&&(le[e]=[])},o.removeAllHooks=function(){le={}},o}();return J}));
+//# sourceMappingURL=purify.min.js.map
diff --git a/ComfyUI-KJNodes/kjweb_async/svg-path-properties.min.js b/ComfyUI-KJNodes/kjweb_async/svg-path-properties.min.js
new file mode 100644
index 0000000000000000000000000000000000000000..a77359e6d733d515b7039ac9b18bced780f433b4
--- /dev/null
+++ b/ComfyUI-KJNodes/kjweb_async/svg-path-properties.min.js
@@ -0,0 +1,2 @@
+// http://geoexamples.com/path-properties/ v1.2.0 Copyright 2023 Roger Veciana i Rovira
+!function(t,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n((t="undefined"!=typeof globalThis?globalThis:t||self).svgPathProperties={})}(this,(function(t){"use strict";function n(t,n){for(var e=0;et.length)&&(n=t.length);for(var e=0,i=new Array(n);eu.length&&(t=u.length);var n=f({x:u.x0,y:u.y0},u.rx,u.ry,u.xAxisRotate,u.LargeArcFlag,u.SweepFlag,{x:u.x1,y:u.y1},t/u.length);return{x:n.x,y:n.y}})),i(this,"getTangentAtLength",(function(t){t<0?t=0:t>u.length&&(t=u.length);var n,e=.05,i=u.getPointAtLength(t);t<0?t=0:t>u.length&&(t=u.length);var r=(n=t1&&(n=Math.sqrt(c)*n,e=Math.sqrt(c)*e);var f=(Math.pow(n,2)*Math.pow(e,2)-Math.pow(n,2)*Math.pow(l.y,2)-Math.pow(e,2)*Math.pow(l.x,2))/(Math.pow(n,2)*Math.pow(l.y,2)+Math.pow(e,2)*Math.pow(l.x,2));f=f<0?0:f;var y=(r!==h?1:-1)*Math.sqrt(f),v=y*(n*l.y/e),M=y*(-e*l.x/n),L={x:Math.cos(o)*v-Math.sin(o)*M+(t.x+s.x)/2,y:Math.sin(o)*v+Math.cos(o)*M+(t.y+s.y)/2},d={x:(l.x-v)/n,y:(l.y-M)/e},A=w({x:1,y:0},d),b=w(d,{x:(-l.x-v)/n,y:(-l.y-M)/e});!h&&b>0?b-=2*Math.PI:h&&b<0&&(b+=2*Math.PI);var P=A+(b%=2*Math.PI)*a,m=n*Math.cos(P),T=e*Math.sin(P);return{x:Math.cos(o)*m-Math.sin(o)*T+L.x,y:Math.sin(o)*m+Math.cos(o)*T+L.y,ellipticalArcStartAngle:A,ellipticalArcEndAngle:A+b,ellipticalArcAngle:P,ellipticalArcCenter:L,resultantRx:n,resultantRy:e}},y=function(t,n){t=t||500;for(var e,i=0,r=[],h=[],s=n(0),a=0;a0?Math.sqrt(l*l+c):0,y=u*u+c>0?Math.sqrt(u*u+c):0,p=u+Math.sqrt(u*u+c)!==0&&(l+f)/(u+y)!=0?c*Math.log(Math.abs((l+f)/(u+y))):0;return Math.sqrt(a)/2*(l*f-u*y+p)},_=function(t,n,e){return{x:2*(1-e)*(t[1]-t[0])+2*e*(t[2]-t[1]),y:2*(1-e)*(n[1]-n[0])+2*e*(n[2]-n[1])}};function S(t,n,e){var i=N(1,e,t),r=N(1,e,n),h=i*i+r*r;return Math.sqrt(h)}var N=function t(n,e,i){var r,h,s=i.length-1;if(0===s)return 0;if(0===n){h=0;for(var a=0;a<=s;a++)h+=A[s][a]*Math.pow(1-e,s-a)*Math.pow(e,a)*i[a];return h}r=new Array(s);for(var o=0;o.001;){var a=e(r+h),o=Math.abs(t-a)/n;if(o500)break}return r},j=e((function(t,n,e,r,h,s,a,o){var g=this;i(this,"a",void 0),i(this,"b",void 0),i(this,"c",void 0),i(this,"d",void 0),i(this,"length",void 0),i(this,"getArcLength",void 0),i(this,"getPoint",void 0),i(this,"getDerivative",void 0),i(this,"getTotalLength",(function(){return g.length})),i(this,"getPointAtLength",(function(t){var n=[g.a.x,g.b.x,g.c.x,g.d.x],e=[g.a.y,g.b.y,g.c.y,g.d.y],i=C(t,g.length,(function(t){return g.getArcLength(n,e,t)}));return g.getPoint(n,e,i)})),i(this,"getTangentAtLength",(function(t){var n=[g.a.x,g.b.x,g.c.x,g.d.x],e=[g.a.y,g.b.y,g.c.y,g.d.y],i=C(t,g.length,(function(t){return g.getArcLength(n,e,t)})),r=g.getDerivative(n,e,i),h=Math.sqrt(r.x*r.x+r.y*r.y);return h>0?{x:r.x/h,y:r.y/h}:{x:0,y:0}})),i(this,"getPropertiesAtLength",(function(t){var n,e=[g.a.x,g.b.x,g.c.x,g.d.x],i=[g.a.y,g.b.y,g.c.y,g.d.y],r=C(t,g.length,(function(t){return g.getArcLength(e,i,t)})),h=g.getDerivative(e,i,r),s=Math.sqrt(h.x*h.x+h.y*h.y);n=s>0?{x:h.x/s,y:h.y/s}:{x:0,y:0};var a=g.getPoint(e,i,r);return{x:a.x,y:a.y,tangentX:n.x,tangentY:n.y}})),i(this,"getC",(function(){return g.c})),i(this,"getD",(function(){return g.d})),this.a={x:t,y:n},this.b={x:e,y:r},this.c={x:h,y:s},void 0!==a&&void 0!==o?(this.getArcLength=m,this.getPoint=b,this.getDerivative=P,this.d={x:a,y:o}):(this.getArcLength=q,this.getPoint=T,this.getDerivative=_,this.d={x:0,y:0}),this.length=this.getArcLength([this.a.x,this.b.x,this.c.x,this.d.x],[this.a.y,this.b.y,this.c.y,this.d.y],1)})),O=e((function(t){var n=this;i(this,"length",0),i(this,"partial_lengths",[]),i(this,"functions",[]),i(this,"initial_point",null),i(this,"getPartAtLength",(function(t){t<0?t=0:t>n.length&&(t=n.length);for(var e=n.partial_lengths.length-1;n.partial_lengths[e]>=t&&e>0;)e--;return e++,{fraction:t-n.partial_lengths[e-1],i:e}})),i(this,"getTotalLength",(function(){return n.length})),i(this,"getPointAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getPointAtLength(e.fraction);if(n.initial_point)return n.initial_point;throw new Error("Wrong function at this part.")})),i(this,"getTangentAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getTangentAtLength(e.fraction);if(n.initial_point)return{x:0,y:0};throw new Error("Wrong function at this part.")})),i(this,"getPropertiesAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getPropertiesAtLength(e.fraction);if(n.initial_point)return{x:n.initial_point.x,y:n.initial_point.y,tangentX:0,tangentY:0};throw new Error("Wrong function at this part.")})),i(this,"getParts",(function(){for(var t=[],e=0;e0?t:"M0,0").match(o);if(!n)throw new Error("No path elements found in string ".concat(t));return n.reduce((function(t,n){var e=n.charAt(0),i=e.toLowerCase(),h=u(n.substring(1));if("m"===i&&h.length>2&&(t.push([e].concat(r(h.splice(0,2)))),i="l",e="m"===e?"l":"L"),"a"===i.toLowerCase()&&(5===h.length||6===h.length)){var s=n.substring(1).trim().split(" ");h=[Number(s[0]),Number(s[1]),Number(s[2]),Number(s[3].charAt(0)),Number(s[3].charAt(1)),Number(s[3].substring(2)),Number(s[4])]}for(;h.length>=0;){if(h.length===a[i]){t.push([e].concat(r(h.splice(0,a[i]))));break}if(h.length0?(this.length+=e.getTotalLength(),this.functions.push(e),s=[h[y][5]+s[0],h[y][6]+s[1]]):this.functions.push(new l(s[0],s[0],s[1],s[1]));else if("S"===h[y][0]){if(y>0&&["C","c","S","s"].indexOf(h[y-1][0])>-1){if(e){var p=e.getC();e=new j(s[0],s[1],2*s[0]-p.x,2*s[1]-p.y,h[y][1],h[y][2],h[y][3],h[y][4])}}else e=new j(s[0],s[1],s[0],s[1],h[y][1],h[y][2],h[y][3],h[y][4]);e&&(this.length+=e.getTotalLength(),s=[h[y][3],h[y][4]],this.functions.push(e))}else if("s"===h[y][0]){if(y>0&&["C","c","S","s"].indexOf(h[y-1][0])>-1){if(e){var x=e.getC(),v=e.getD();e=new j(s[0],s[1],s[0]+v.x-x.x,s[1]+v.y-x.y,s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4])}}else e=new j(s[0],s[1],s[0],s[1],s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4]);e&&(this.length+=e.getTotalLength(),s=[h[y][3]+s[0],h[y][4]+s[1]],this.functions.push(e))}else if("Q"===h[y][0]){if(s[0]==h[y][1]&&s[1]==h[y][2]){var M=new l(h[y][1],h[y][3],h[y][2],h[y][4]);this.length+=M.getTotalLength(),this.functions.push(M)}else e=new j(s[0],s[1],h[y][1],h[y][2],h[y][3],h[y][4],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);s=[h[y][3],h[y][4]],g=[h[y][1],h[y][2]]}else if("q"===h[y][0]){if(0!=h[y][1]||0!=h[y][2])e=new j(s[0],s[1],s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);else{var w=new l(s[0]+h[y][1],s[0]+h[y][3],s[1]+h[y][2],s[1]+h[y][4]);this.length+=w.getTotalLength(),this.functions.push(w)}g=[s[0]+h[y][1],s[1]+h[y][2]],s=[h[y][3]+s[0],h[y][4]+s[1]]}else if("T"===h[y][0]){if(y>0&&["Q","q","T","t"].indexOf(h[y-1][0])>-1)e=new j(s[0],s[1],2*s[0]-g[0],2*s[1]-g[1],h[y][1],h[y][2],void 0,void 0),this.functions.push(e),this.length+=e.getTotalLength();else{var L=new l(s[0],h[y][1],s[1],h[y][2]);this.functions.push(L),this.length+=L.getTotalLength()}g=[2*s[0]-g[0],2*s[1]-g[1]],s=[h[y][1],h[y][2]]}else if("t"===h[y][0]){if(y>0&&["Q","q","T","t"].indexOf(h[y-1][0])>-1)e=new j(s[0],s[1],2*s[0]-g[0],2*s[1]-g[1],s[0]+h[y][1],s[1]+h[y][2],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);else{var d=new l(s[0],s[0]+h[y][1],s[1],s[1]+h[y][2]);this.length+=d.getTotalLength(),this.functions.push(d)}g=[2*s[0]-g[0],2*s[1]-g[1]],s=[h[y][1]+s[0],h[y][2]+s[1]]}else if("A"===h[y][0]){var A=new c(s[0],s[1],h[y][1],h[y][2],h[y][3],1===h[y][4],1===h[y][5],h[y][6],h[y][7]);this.length+=A.getTotalLength(),s=[h[y][6],h[y][7]],this.functions.push(A)}else if("a"===h[y][0]){var b=new c(s[0],s[1],h[y][1],h[y][2],h[y][3],1===h[y][4],1===h[y][5],s[0]+h[y][6],s[1]+h[y][7]);this.length+=b.getTotalLength(),s=[s[0]+h[y][6],s[1]+h[y][7]],this.functions.push(b)}this.partial_lengths.push(this.length)}})),E=e((function(t){var n=this;if(i(this,"inst",void 0),i(this,"getTotalLength",(function(){return n.inst.getTotalLength()})),i(this,"getPointAtLength",(function(t){return n.inst.getPointAtLength(t)})),i(this,"getTangentAtLength",(function(t){return n.inst.getTangentAtLength(t)})),i(this,"getPropertiesAtLength",(function(t){return n.inst.getPropertiesAtLength(t)})),i(this,"getParts",(function(){return n.inst.getParts()})),this.inst=new O(t),!(this instanceof E))return new E(t)}));t.svgPathProperties=E}));
diff --git a/ComfyUI-KJNodes/nodes/__pycache__/audioscheduler_nodes.cpython-312.pyc b/ComfyUI-KJNodes/nodes/__pycache__/audioscheduler_nodes.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..296f35f4a59a0b5d3c5352dfc9fe227c96e6dcbb
Binary files /dev/null and b/ComfyUI-KJNodes/nodes/__pycache__/audioscheduler_nodes.cpython-312.pyc differ
diff --git a/ComfyUI-KJNodes/nodes/__pycache__/batchcrop_nodes.cpython-312.pyc b/ComfyUI-KJNodes/nodes/__pycache__/batchcrop_nodes.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c81a5876284fbee11845dc983c03547e19ac43e7
Binary files /dev/null and b/ComfyUI-KJNodes/nodes/__pycache__/batchcrop_nodes.cpython-312.pyc differ
diff --git a/ComfyUI-KJNodes/nodes/__pycache__/curve_nodes.cpython-312.pyc b/ComfyUI-KJNodes/nodes/__pycache__/curve_nodes.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..56b84d24b7ec684b72e8d9c1932fb5696a7a96df
Binary files /dev/null and b/ComfyUI-KJNodes/nodes/__pycache__/curve_nodes.cpython-312.pyc differ
diff --git a/ComfyUI-KJNodes/nodes/__pycache__/image_nodes.cpython-312.pyc b/ComfyUI-KJNodes/nodes/__pycache__/image_nodes.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4b90eb1f88c1412ca3b7e0b0c1263b8cf98e30ba
Binary files /dev/null and b/ComfyUI-KJNodes/nodes/__pycache__/image_nodes.cpython-312.pyc differ
diff --git a/ComfyUI-KJNodes/nodes/__pycache__/intrinsic_lora_nodes.cpython-312.pyc b/ComfyUI-KJNodes/nodes/__pycache__/intrinsic_lora_nodes.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..42a3803e3d8de7c484fbe0c9c01eceeb9edbee34
Binary files /dev/null and b/ComfyUI-KJNodes/nodes/__pycache__/intrinsic_lora_nodes.cpython-312.pyc differ
diff --git a/ComfyUI-KJNodes/nodes/__pycache__/mask_nodes.cpython-312.pyc b/ComfyUI-KJNodes/nodes/__pycache__/mask_nodes.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..52276456c773877a7421f5c6fe78a73cad35fe7e
Binary files /dev/null and b/ComfyUI-KJNodes/nodes/__pycache__/mask_nodes.cpython-312.pyc differ
diff --git a/ComfyUI-KJNodes/nodes/__pycache__/nodes.cpython-312.pyc b/ComfyUI-KJNodes/nodes/__pycache__/nodes.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a7f156e2d04c48818276df5cb02fdfa7945d5e89
Binary files /dev/null and b/ComfyUI-KJNodes/nodes/__pycache__/nodes.cpython-312.pyc differ
diff --git a/ComfyUI-KJNodes/nodes/audioscheduler_nodes.py b/ComfyUI-KJNodes/nodes/audioscheduler_nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..31b65c094b24eb193ff7098e22b9d9f1ea90fc17
--- /dev/null
+++ b/ComfyUI-KJNodes/nodes/audioscheduler_nodes.py
@@ -0,0 +1,251 @@
+# to be used with https://github.com/a1lazydog/ComfyUI-AudioScheduler
+import torch
+from torchvision.transforms import functional as TF
+from PIL import Image, ImageDraw
+import numpy as np
+from ..utility.utility import pil2tensor
+from nodes import MAX_RESOLUTION
+
+class NormalizedAmplitudeToMask:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "normalized_amp": ("NORMALIZED_AMPLITUDE",),
+ "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "frame_offset": ("INT", {"default": 0,"min": -255, "max": 255, "step": 1}),
+ "location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}),
+ "location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}),
+ "size": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}),
+ "shape": (
+ [
+ 'none',
+ 'circle',
+ 'square',
+ 'triangle',
+ ],
+ {
+ "default": 'none'
+ }),
+ "color": (
+ [
+ 'white',
+ 'amplitude',
+ ],
+ {
+ "default": 'amplitude'
+ }),
+ },}
+
+ CATEGORY = "KJNodes/audio"
+ RETURN_TYPES = ("MASK",)
+ FUNCTION = "convert"
+ DESCRIPTION = """
+Works as a bridge to the AudioScheduler -nodes:
+https://github.com/a1lazydog/ComfyUI-AudioScheduler
+Creates masks based on the normalized amplitude.
+"""
+
+ def convert(self, normalized_amp, width, height, frame_offset, shape, location_x, location_y, size, color):
+ # Ensure normalized_amp is an array and within the range [0, 1]
+ normalized_amp = np.clip(normalized_amp, 0.0, 1.0)
+
+ # Offset the amplitude values by rolling the array
+ normalized_amp = np.roll(normalized_amp, frame_offset)
+
+ # Initialize an empty list to hold the image tensors
+ out = []
+ # Iterate over each amplitude value to create an image
+ for amp in normalized_amp:
+ # Scale the amplitude value to cover the full range of grayscale values
+ if color == 'amplitude':
+ grayscale_value = int(amp * 255)
+ elif color == 'white':
+ grayscale_value = 255
+ # Convert the grayscale value to an RGB format
+ gray_color = (grayscale_value, grayscale_value, grayscale_value)
+ finalsize = size * amp
+
+ if shape == 'none':
+ shapeimage = Image.new("RGB", (width, height), gray_color)
+ else:
+ shapeimage = Image.new("RGB", (width, height), "black")
+
+ draw = ImageDraw.Draw(shapeimage)
+ if shape == 'circle' or shape == 'square':
+ # Define the bounding box for the shape
+ left_up_point = (location_x - finalsize, location_y - finalsize)
+ right_down_point = (location_x + finalsize,location_y + finalsize)
+ two_points = [left_up_point, right_down_point]
+
+ if shape == 'circle':
+ draw.ellipse(two_points, fill=gray_color)
+ elif shape == 'square':
+ draw.rectangle(two_points, fill=gray_color)
+
+ elif shape == 'triangle':
+ # Define the points for the triangle
+ left_up_point = (location_x - finalsize, location_y + finalsize) # bottom left
+ right_down_point = (location_x + finalsize, location_y + finalsize) # bottom right
+ top_point = (location_x, location_y) # top point
+ draw.polygon([top_point, left_up_point, right_down_point], fill=gray_color)
+
+ shapeimage = pil2tensor(shapeimage)
+ mask = shapeimage[:, :, :, 0]
+ out.append(mask)
+
+ return (torch.cat(out, dim=0),)
+
+class NormalizedAmplitudeToFloatList:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "normalized_amp": ("NORMALIZED_AMPLITUDE",),
+ },}
+
+ CATEGORY = "KJNodes/audio"
+ RETURN_TYPES = ("FLOAT",)
+ FUNCTION = "convert"
+ DESCRIPTION = """
+Works as a bridge to the AudioScheduler -nodes:
+https://github.com/a1lazydog/ComfyUI-AudioScheduler
+Creates a list of floats from the normalized amplitude.
+"""
+
+ def convert(self, normalized_amp):
+ # Ensure normalized_amp is an array and within the range [0, 1]
+ normalized_amp = np.clip(normalized_amp, 0.0, 1.0)
+ return (normalized_amp.tolist(),)
+
+class OffsetMaskByNormalizedAmplitude:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "normalized_amp": ("NORMALIZED_AMPLITUDE",),
+ "mask": ("MASK",),
+ "x": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
+ "y": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
+ "rotate": ("BOOLEAN", { "default": False }),
+ "angle_multiplier": ("FLOAT", { "default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number" }),
+ }
+ }
+
+ RETURN_TYPES = ("MASK",)
+ RETURN_NAMES = ("mask",)
+ FUNCTION = "offset"
+ CATEGORY = "KJNodes/audio"
+ DESCRIPTION = """
+Works as a bridge to the AudioScheduler -nodes:
+https://github.com/a1lazydog/ComfyUI-AudioScheduler
+Offsets masks based on the normalized amplitude.
+"""
+
+ def offset(self, mask, x, y, angle_multiplier, rotate, normalized_amp):
+
+ # Ensure normalized_amp is an array and within the range [0, 1]
+ offsetmask = mask.clone()
+ normalized_amp = np.clip(normalized_amp, 0.0, 1.0)
+
+ batch_size, height, width = mask.shape
+
+ if rotate:
+ for i in range(batch_size):
+ rotation_amp = int(normalized_amp[i] * (360 * angle_multiplier))
+ rotation_angle = rotation_amp
+ offsetmask[i] = TF.rotate(offsetmask[i].unsqueeze(0), rotation_angle).squeeze(0)
+ if x != 0 or y != 0:
+ for i in range(batch_size):
+ offset_amp = normalized_amp[i] * 10
+ shift_x = min(x*offset_amp, width-1)
+ shift_y = min(y*offset_amp, height-1)
+ if shift_x != 0:
+ offsetmask[i] = torch.roll(offsetmask[i], shifts=int(shift_x), dims=1)
+ if shift_y != 0:
+ offsetmask[i] = torch.roll(offsetmask[i], shifts=int(shift_y), dims=0)
+
+ return offsetmask,
+
+class ImageTransformByNormalizedAmplitude:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "normalized_amp": ("NORMALIZED_AMPLITUDE",),
+ "zoom_scale": ("FLOAT", { "default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number" }),
+ "x_offset": ("INT", { "default": 0, "min": (1 -MAX_RESOLUTION), "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
+ "y_offset": ("INT", { "default": 0, "min": (1 -MAX_RESOLUTION), "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
+ "cumulative": ("BOOLEAN", { "default": False }),
+ "image": ("IMAGE",),
+ }}
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "amptransform"
+ CATEGORY = "KJNodes/audio"
+ DESCRIPTION = """
+Works as a bridge to the AudioScheduler -nodes:
+https://github.com/a1lazydog/ComfyUI-AudioScheduler
+Transforms image based on the normalized amplitude.
+"""
+
+ def amptransform(self, image, normalized_amp, zoom_scale, cumulative, x_offset, y_offset):
+ # Ensure normalized_amp is an array and within the range [0, 1]
+ normalized_amp = np.clip(normalized_amp, 0.0, 1.0)
+ transformed_images = []
+
+ # Initialize the cumulative zoom factor
+ prev_amp = 0.0
+
+ for i in range(image.shape[0]):
+ img = image[i] # Get the i-th image in the batch
+ amp = normalized_amp[i] # Get the corresponding amplitude value
+
+ # Incrementally increase the cumulative zoom factor
+ if cumulative:
+ prev_amp += amp
+ amp += prev_amp
+
+ # Convert the image tensor from BxHxWxC to CxHxW format expected by torchvision
+ img = img.permute(2, 0, 1)
+
+ # Convert PyTorch tensor to PIL Image for processing
+ pil_img = TF.to_pil_image(img)
+
+ # Calculate the crop size based on the amplitude
+ width, height = pil_img.size
+ crop_size = int(min(width, height) * (1 - amp * zoom_scale))
+ crop_size = max(crop_size, 1)
+
+ # Calculate the crop box coordinates (centered crop)
+ left = (width - crop_size) // 2
+ top = (height - crop_size) // 2
+ right = (width + crop_size) // 2
+ bottom = (height + crop_size) // 2
+
+ # Crop and resize back to original size
+ cropped_img = TF.crop(pil_img, top, left, crop_size, crop_size)
+ resized_img = TF.resize(cropped_img, (height, width))
+
+ # Convert back to tensor in CxHxW format
+ tensor_img = TF.to_tensor(resized_img)
+
+ # Convert the tensor back to BxHxWxC format
+ tensor_img = tensor_img.permute(1, 2, 0)
+
+ # Offset the image based on the amplitude
+ offset_amp = amp * 10 # Calculate the offset magnitude based on the amplitude
+ shift_x = min(x_offset * offset_amp, img.shape[1] - 1) # Calculate the shift in x direction
+ shift_y = min(y_offset * offset_amp, img.shape[0] - 1) # Calculate the shift in y direction
+
+ # Apply the offset to the image tensor
+ if shift_x != 0:
+ tensor_img = torch.roll(tensor_img, shifts=int(shift_x), dims=1)
+ if shift_y != 0:
+ tensor_img = torch.roll(tensor_img, shifts=int(shift_y), dims=0)
+
+ # Add to the list
+ transformed_images.append(tensor_img)
+
+ # Stack all transformed images into a batch
+ transformed_batch = torch.stack(transformed_images)
+
+ return (transformed_batch,)
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/nodes/batchcrop_nodes.py b/ComfyUI-KJNodes/nodes/batchcrop_nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..562e6db19055f53c926389bbb965bc007367ea28
--- /dev/null
+++ b/ComfyUI-KJNodes/nodes/batchcrop_nodes.py
@@ -0,0 +1,757 @@
+from ..utility.utility import tensor2pil, pil2tensor
+from PIL import Image, ImageDraw, ImageFilter
+import numpy as np
+import torch
+from torchvision.transforms import Resize, CenterCrop, InterpolationMode
+import math
+
+#based on nodes from mtb https://github.com/melMass/comfy_mtb
+
+def bbox_to_region(bbox, target_size=None):
+ bbox = bbox_check(bbox, target_size)
+ return (bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3])
+
+def bbox_check(bbox, target_size=None):
+ if not target_size:
+ return bbox
+
+ new_bbox = (
+ bbox[0],
+ bbox[1],
+ min(target_size[0] - bbox[0], bbox[2]),
+ min(target_size[1] - bbox[1], bbox[3]),
+ )
+ return new_bbox
+
+class BatchCropFromMask:
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "original_images": ("IMAGE",),
+ "masks": ("MASK",),
+ "crop_size_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}),
+ "bbox_smooth_alpha": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
+ },
+ }
+
+ RETURN_TYPES = (
+ "IMAGE",
+ "IMAGE",
+ "BBOX",
+ "INT",
+ "INT",
+ )
+ RETURN_NAMES = (
+ "original_images",
+ "cropped_images",
+ "bboxes",
+ "width",
+ "height",
+ )
+ FUNCTION = "crop"
+ CATEGORY = "KJNodes/masking"
+
+ def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha):
+ if alpha == 0:
+ return prev_bbox_size
+ return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size)
+
+ def smooth_center(self, prev_center, curr_center, alpha=0.5):
+ if alpha == 0:
+ return prev_center
+ return (
+ round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]),
+ round(alpha * curr_center[1] + (1 - alpha) * prev_center[1])
+ )
+
+ def crop(self, masks, original_images, crop_size_mult, bbox_smooth_alpha):
+
+ bounding_boxes = []
+ cropped_images = []
+
+ self.max_bbox_width = 0
+ self.max_bbox_height = 0
+
+ # First, calculate the maximum bounding box size across all masks
+ curr_max_bbox_width = 0
+ curr_max_bbox_height = 0
+ for mask in masks:
+ _mask = tensor2pil(mask)[0]
+ non_zero_indices = np.nonzero(np.array(_mask))
+ min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1])
+ min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0])
+ width = max_x - min_x
+ height = max_y - min_y
+ curr_max_bbox_width = max(curr_max_bbox_width, width)
+ curr_max_bbox_height = max(curr_max_bbox_height, height)
+
+ # Smooth the changes in the bounding box size
+ self.max_bbox_width = self.smooth_bbox_size(self.max_bbox_width, curr_max_bbox_width, bbox_smooth_alpha)
+ self.max_bbox_height = self.smooth_bbox_size(self.max_bbox_height, curr_max_bbox_height, bbox_smooth_alpha)
+
+ # Apply the crop size multiplier
+ self.max_bbox_width = round(self.max_bbox_width * crop_size_mult)
+ self.max_bbox_height = round(self.max_bbox_height * crop_size_mult)
+ bbox_aspect_ratio = self.max_bbox_width / self.max_bbox_height
+
+ # Then, for each mask and corresponding image...
+ for i, (mask, img) in enumerate(zip(masks, original_images)):
+ _mask = tensor2pil(mask)[0]
+ non_zero_indices = np.nonzero(np.array(_mask))
+ min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1])
+ min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0])
+
+ # Calculate center of bounding box
+ center_x = np.mean(non_zero_indices[1])
+ center_y = np.mean(non_zero_indices[0])
+ curr_center = (round(center_x), round(center_y))
+
+ # If this is the first frame, initialize prev_center with curr_center
+ if not hasattr(self, 'prev_center'):
+ self.prev_center = curr_center
+
+ # Smooth the changes in the center coordinates from the second frame onwards
+ if i > 0:
+ center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha)
+ else:
+ center = curr_center
+
+ # Update prev_center for the next frame
+ self.prev_center = center
+
+ # Create bounding box using max_bbox_width and max_bbox_height
+ half_box_width = round(self.max_bbox_width / 2)
+ half_box_height = round(self.max_bbox_height / 2)
+ min_x = max(0, center[0] - half_box_width)
+ max_x = min(img.shape[1], center[0] + half_box_width)
+ min_y = max(0, center[1] - half_box_height)
+ max_y = min(img.shape[0], center[1] + half_box_height)
+
+ # Append bounding box coordinates
+ bounding_boxes.append((min_x, min_y, max_x - min_x, max_y - min_y))
+
+ # Crop the image from the bounding box
+ cropped_img = img[min_y:max_y, min_x:max_x, :]
+
+ # Calculate the new dimensions while maintaining the aspect ratio
+ new_height = min(cropped_img.shape[0], self.max_bbox_height)
+ new_width = round(new_height * bbox_aspect_ratio)
+
+ # Resize the image
+ resize_transform = Resize((new_height, new_width))
+ resized_img = resize_transform(cropped_img.permute(2, 0, 1))
+
+ # Perform the center crop to the desired size
+ crop_transform = CenterCrop((self.max_bbox_height, self.max_bbox_width)) # swap the order here if necessary
+ cropped_resized_img = crop_transform(resized_img)
+
+ cropped_images.append(cropped_resized_img.permute(1, 2, 0))
+
+ cropped_out = torch.stack(cropped_images, dim=0)
+
+ return (original_images, cropped_out, bounding_boxes, self.max_bbox_width, self.max_bbox_height, )
+
+class BatchUncrop:
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "original_images": ("IMAGE",),
+ "cropped_images": ("IMAGE",),
+ "bboxes": ("BBOX",),
+ "border_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}, ),
+ "crop_rescale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "border_top": ("BOOLEAN", {"default": True}),
+ "border_bottom": ("BOOLEAN", {"default": True}),
+ "border_left": ("BOOLEAN", {"default": True}),
+ "border_right": ("BOOLEAN", {"default": True}),
+ }
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "uncrop"
+
+ CATEGORY = "KJNodes/masking"
+
+ def uncrop(self, original_images, cropped_images, bboxes, border_blending, crop_rescale, border_top, border_bottom, border_left, border_right):
+ def inset_border(image, border_width, border_color, border_top, border_bottom, border_left, border_right):
+ draw = ImageDraw.Draw(image)
+ width, height = image.size
+ if border_top:
+ draw.rectangle((0, 0, width, border_width), fill=border_color)
+ if border_bottom:
+ draw.rectangle((0, height - border_width, width, height), fill=border_color)
+ if border_left:
+ draw.rectangle((0, 0, border_width, height), fill=border_color)
+ if border_right:
+ draw.rectangle((width - border_width, 0, width, height), fill=border_color)
+ return image
+
+ if len(original_images) != len(cropped_images):
+ raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same")
+
+ # Ensure there are enough bboxes, but drop the excess if there are more bboxes than images
+ if len(bboxes) > len(original_images):
+ print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}")
+ bboxes = bboxes[:len(original_images)]
+ elif len(bboxes) < len(original_images):
+ raise ValueError("There should be at least as many bboxes as there are original and cropped images")
+
+ input_images = tensor2pil(original_images)
+ crop_imgs = tensor2pil(cropped_images)
+
+ out_images = []
+ for i in range(len(input_images)):
+ img = input_images[i]
+ crop = crop_imgs[i]
+ bbox = bboxes[i]
+
+ # uncrop the image based on the bounding box
+ bb_x, bb_y, bb_width, bb_height = bbox
+
+ paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size)
+
+ # scale factors
+ scale_x = crop_rescale
+ scale_y = crop_rescale
+
+ # scaled paste_region
+ paste_region = (round(paste_region[0]*scale_x), round(paste_region[1]*scale_y), round(paste_region[2]*scale_x), round(paste_region[3]*scale_y))
+
+ # rescale the crop image to fit the paste_region
+ crop = crop.resize((round(paste_region[2]-paste_region[0]), round(paste_region[3]-paste_region[1])))
+ crop_img = crop.convert("RGB")
+
+ if border_blending > 1.0:
+ border_blending = 1.0
+ elif border_blending < 0.0:
+ border_blending = 0.0
+
+ blend_ratio = (max(crop_img.size) / 2) * float(border_blending)
+
+ blend = img.convert("RGBA")
+ mask = Image.new("L", img.size, 0)
+
+ mask_block = Image.new("L", (paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]), 255)
+ mask_block = inset_border(mask_block, round(blend_ratio / 2), (0), border_top, border_bottom, border_left, border_right)
+
+ mask.paste(mask_block, paste_region)
+ blend.paste(crop_img, paste_region)
+
+ mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4))
+ mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4))
+
+ blend.putalpha(mask)
+ img = Image.alpha_composite(img.convert("RGBA"), blend)
+ out_images.append(img.convert("RGB"))
+
+ return (pil2tensor(out_images),)
+
+class BatchCropFromMaskAdvanced:
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "original_images": ("IMAGE",),
+ "masks": ("MASK",),
+ "crop_size_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "bbox_smooth_alpha": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
+ },
+ }
+
+ RETURN_TYPES = (
+ "IMAGE",
+ "IMAGE",
+ "MASK",
+ "IMAGE",
+ "MASK",
+ "BBOX",
+ "BBOX",
+ "INT",
+ "INT",
+ )
+ RETURN_NAMES = (
+ "original_images",
+ "cropped_images",
+ "cropped_masks",
+ "combined_crop_image",
+ "combined_crop_masks",
+ "bboxes",
+ "combined_bounding_box",
+ "bbox_width",
+ "bbox_height",
+ )
+ FUNCTION = "crop"
+ CATEGORY = "KJNodes/masking"
+
+ def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha):
+ return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size)
+
+ def smooth_center(self, prev_center, curr_center, alpha=0.5):
+ return (round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]),
+ round(alpha * curr_center[1] + (1 - alpha) * prev_center[1]))
+
+ def crop(self, masks, original_images, crop_size_mult, bbox_smooth_alpha):
+ bounding_boxes = []
+ combined_bounding_box = []
+ cropped_images = []
+ cropped_masks = []
+ cropped_masks_out = []
+ combined_crop_out = []
+ combined_cropped_images = []
+ combined_cropped_masks = []
+
+ def calculate_bbox(mask):
+ non_zero_indices = np.nonzero(np.array(mask))
+
+ # handle empty masks
+ min_x, max_x, min_y, max_y = 0, 0, 0, 0
+ if len(non_zero_indices[1]) > 0 and len(non_zero_indices[0]) > 0:
+ min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1])
+ min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0])
+
+ width = max_x - min_x
+ height = max_y - min_y
+ bbox_size = max(width, height)
+ return min_x, max_x, min_y, max_y, bbox_size
+
+ combined_mask = torch.max(masks, dim=0)[0]
+ _mask = tensor2pil(combined_mask)[0]
+ new_min_x, new_max_x, new_min_y, new_max_y, combined_bbox_size = calculate_bbox(_mask)
+ center_x = (new_min_x + new_max_x) / 2
+ center_y = (new_min_y + new_max_y) / 2
+ half_box_size = round(combined_bbox_size // 2)
+ new_min_x = max(0, round(center_x - half_box_size))
+ new_max_x = min(original_images[0].shape[1], round(center_x + half_box_size))
+ new_min_y = max(0, round(center_y - half_box_size))
+ new_max_y = min(original_images[0].shape[0], round(center_y + half_box_size))
+
+ combined_bounding_box.append((new_min_x, new_min_y, new_max_x - new_min_x, new_max_y - new_min_y))
+
+ self.max_bbox_size = 0
+
+ # First, calculate the maximum bounding box size across all masks
+ curr_max_bbox_size = max(calculate_bbox(tensor2pil(mask)[0])[-1] for mask in masks)
+ # Smooth the changes in the bounding box size
+ self.max_bbox_size = self.smooth_bbox_size(self.max_bbox_size, curr_max_bbox_size, bbox_smooth_alpha)
+ # Apply the crop size multiplier
+ self.max_bbox_size = round(self.max_bbox_size * crop_size_mult)
+ # Make sure max_bbox_size is divisible by 16, if not, round it upwards so it is
+ self.max_bbox_size = math.ceil(self.max_bbox_size / 16) * 16
+
+ if self.max_bbox_size > original_images[0].shape[0] or self.max_bbox_size > original_images[0].shape[1]:
+ # max_bbox_size can only be as big as our input's width or height, and it has to be even
+ self.max_bbox_size = math.floor(min(original_images[0].shape[0], original_images[0].shape[1]) / 2) * 2
+
+ # Then, for each mask and corresponding image...
+ for i, (mask, img) in enumerate(zip(masks, original_images)):
+ _mask = tensor2pil(mask)[0]
+ non_zero_indices = np.nonzero(np.array(_mask))
+
+ # check for empty masks
+ if len(non_zero_indices[0]) > 0 and len(non_zero_indices[1]) > 0:
+ min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1])
+ min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0])
+
+ # Calculate center of bounding box
+ center_x = np.mean(non_zero_indices[1])
+ center_y = np.mean(non_zero_indices[0])
+ curr_center = (round(center_x), round(center_y))
+
+ # If this is the first frame, initialize prev_center with curr_center
+ if not hasattr(self, 'prev_center'):
+ self.prev_center = curr_center
+
+ # Smooth the changes in the center coordinates from the second frame onwards
+ if i > 0:
+ center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha)
+ else:
+ center = curr_center
+
+ # Update prev_center for the next frame
+ self.prev_center = center
+
+ # Create bounding box using max_bbox_size
+ half_box_size = self.max_bbox_size // 2
+ min_x = max(0, center[0] - half_box_size)
+ max_x = min(img.shape[1], center[0] + half_box_size)
+ min_y = max(0, center[1] - half_box_size)
+ max_y = min(img.shape[0], center[1] + half_box_size)
+
+ # Append bounding box coordinates
+ bounding_boxes.append((min_x, min_y, max_x - min_x, max_y - min_y))
+
+ # Crop the image from the bounding box
+ cropped_img = img[min_y:max_y, min_x:max_x, :]
+ cropped_mask = mask[min_y:max_y, min_x:max_x]
+
+ # Resize the cropped image to a fixed size
+ new_size = max(cropped_img.shape[0], cropped_img.shape[1])
+ resize_transform = Resize(new_size, interpolation=InterpolationMode.NEAREST, max_size=max(img.shape[0], img.shape[1]))
+ resized_mask = resize_transform(cropped_mask.unsqueeze(0).unsqueeze(0)).squeeze(0).squeeze(0)
+ resized_img = resize_transform(cropped_img.permute(2, 0, 1))
+ # Perform the center crop to the desired size
+ # Constrain the crop to the smaller of our bbox or our image so we don't expand past the image dimensions.
+ crop_transform = CenterCrop((min(self.max_bbox_size, resized_img.shape[1]), min(self.max_bbox_size, resized_img.shape[2])))
+
+ cropped_resized_img = crop_transform(resized_img)
+ cropped_images.append(cropped_resized_img.permute(1, 2, 0))
+
+ cropped_resized_mask = crop_transform(resized_mask)
+ cropped_masks.append(cropped_resized_mask)
+
+ combined_cropped_img = original_images[i][new_min_y:new_max_y, new_min_x:new_max_x, :]
+ combined_cropped_images.append(combined_cropped_img)
+
+ combined_cropped_mask = masks[i][new_min_y:new_max_y, new_min_x:new_max_x]
+ combined_cropped_masks.append(combined_cropped_mask)
+ else:
+ bounding_boxes.append((0, 0, img.shape[1], img.shape[0]))
+ cropped_images.append(img)
+ cropped_masks.append(mask)
+ combined_cropped_images.append(img)
+ combined_cropped_masks.append(mask)
+
+ cropped_out = torch.stack(cropped_images, dim=0)
+ combined_crop_out = torch.stack(combined_cropped_images, dim=0)
+ cropped_masks_out = torch.stack(cropped_masks, dim=0)
+ combined_crop_mask_out = torch.stack(combined_cropped_masks, dim=0)
+
+ return (original_images, cropped_out, cropped_masks_out, combined_crop_out, combined_crop_mask_out, bounding_boxes, combined_bounding_box, self.max_bbox_size, self.max_bbox_size)
+
+class FilterZeroMasksAndCorrespondingImages:
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "masks": ("MASK",),
+ },
+ "optional": {
+ "original_images": ("IMAGE",),
+ },
+ }
+
+ RETURN_TYPES = ("MASK", "IMAGE", "IMAGE", "INDEXES",)
+ RETURN_NAMES = ("non_zero_masks_out", "non_zero_mask_images_out", "zero_mask_images_out", "zero_mask_images_out_indexes",)
+ FUNCTION = "filter"
+ CATEGORY = "KJNodes/masking"
+ DESCRIPTION = """
+Filter out all the empty (i.e. all zero) mask in masks
+Also filter out all the corresponding images in original_images by indexes if provide
+
+original_images (optional): If provided, need have same length as masks.
+"""
+
+ def filter(self, masks, original_images=None):
+ non_zero_masks = []
+ non_zero_mask_images = []
+ zero_mask_images = []
+ zero_mask_images_indexes = []
+
+ masks_num = len(masks)
+ also_process_images = False
+ if original_images is not None:
+ imgs_num = len(original_images)
+ if len(original_images) == masks_num:
+ also_process_images = True
+ else:
+ print(f"[WARNING] ignore input: original_images, due to number of original_images ({imgs_num}) is not equal to number of masks ({masks_num})")
+
+ for i in range(masks_num):
+ non_zero_num = np.count_nonzero(np.array(masks[i]))
+ if non_zero_num > 0:
+ non_zero_masks.append(masks[i])
+ if also_process_images:
+ non_zero_mask_images.append(original_images[i])
+ else:
+ zero_mask_images.append(original_images[i])
+ zero_mask_images_indexes.append(i)
+
+ non_zero_masks_out = torch.stack(non_zero_masks, dim=0)
+ non_zero_mask_images_out = zero_mask_images_out = zero_mask_images_out_indexes = None
+
+ if also_process_images:
+ non_zero_mask_images_out = torch.stack(non_zero_mask_images, dim=0)
+ if len(zero_mask_images) > 0:
+ zero_mask_images_out = torch.stack(zero_mask_images, dim=0)
+ zero_mask_images_out_indexes = zero_mask_images_indexes
+
+ return (non_zero_masks_out, non_zero_mask_images_out, zero_mask_images_out, zero_mask_images_out_indexes)
+
+class InsertImageBatchByIndexes:
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ "images_to_insert": ("IMAGE",),
+ "insert_indexes": ("INDEXES",),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE", )
+ RETURN_NAMES = ("images_after_insert", )
+ FUNCTION = "insert"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+This node is designed to be use with node FilterZeroMasksAndCorrespondingImages
+It inserts the images_to_insert into images according to insert_indexes
+
+Returns:
+ images_after_insert: updated original images with origonal sequence order
+"""
+
+ def insert(self, images, images_to_insert, insert_indexes):
+ images_after_insert = images
+
+ if images_to_insert is not None and insert_indexes is not None:
+ images_to_insert_num = len(images_to_insert)
+ insert_indexes_num = len(insert_indexes)
+ if images_to_insert_num == insert_indexes_num:
+ images_after_insert = []
+
+ i_images = 0
+ for i in range(len(images) + images_to_insert_num):
+ if i in insert_indexes:
+ images_after_insert.append(images_to_insert[insert_indexes.index(i)])
+ else:
+ images_after_insert.append(images[i_images])
+ i_images += 1
+
+ images_after_insert = torch.stack(images_after_insert, dim=0)
+
+ else:
+ print(f"[WARNING] skip this node, due to number of images_to_insert ({images_to_insert_num}) is not equal to number of insert_indexes ({insert_indexes_num})")
+
+
+ return (images_after_insert, )
+
+class BatchUncropAdvanced:
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "original_images": ("IMAGE",),
+ "cropped_images": ("IMAGE",),
+ "cropped_masks": ("MASK",),
+ "combined_crop_mask": ("MASK",),
+ "bboxes": ("BBOX",),
+ "border_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}, ),
+ "crop_rescale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "use_combined_mask": ("BOOLEAN", {"default": False}),
+ "use_square_mask": ("BOOLEAN", {"default": True}),
+ },
+ "optional": {
+ "combined_bounding_box": ("BBOX", {"default": None}),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "uncrop"
+ CATEGORY = "KJNodes/masking"
+
+
+ def uncrop(self, original_images, cropped_images, cropped_masks, combined_crop_mask, bboxes, border_blending, crop_rescale, use_combined_mask, use_square_mask, combined_bounding_box = None):
+
+ def inset_border(image, border_width=20, border_color=(0)):
+ width, height = image.size
+ bordered_image = Image.new(image.mode, (width, height), border_color)
+ bordered_image.paste(image, (0, 0))
+ draw = ImageDraw.Draw(bordered_image)
+ draw.rectangle((0, 0, width - 1, height - 1), outline=border_color, width=border_width)
+ return bordered_image
+
+ if len(original_images) != len(cropped_images):
+ raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same")
+
+ # Ensure there are enough bboxes, but drop the excess if there are more bboxes than images
+ if len(bboxes) > len(original_images):
+ print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}")
+ bboxes = bboxes[:len(original_images)]
+ elif len(bboxes) < len(original_images):
+ raise ValueError("There should be at least as many bboxes as there are original and cropped images")
+
+ crop_imgs = tensor2pil(cropped_images)
+ input_images = tensor2pil(original_images)
+ out_images = []
+
+ for i in range(len(input_images)):
+ img = input_images[i]
+ crop = crop_imgs[i]
+ bbox = bboxes[i]
+
+ if use_combined_mask:
+ bb_x, bb_y, bb_width, bb_height = combined_bounding_box[0]
+ paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size)
+ mask = combined_crop_mask[i]
+ else:
+ bb_x, bb_y, bb_width, bb_height = bbox
+ paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size)
+ mask = cropped_masks[i]
+
+ # scale paste_region
+ scale_x = scale_y = crop_rescale
+ paste_region = (round(paste_region[0]*scale_x), round(paste_region[1]*scale_y), round(paste_region[2]*scale_x), round(paste_region[3]*scale_y))
+
+ # rescale the crop image to fit the paste_region
+ crop = crop.resize((round(paste_region[2]-paste_region[0]), round(paste_region[3]-paste_region[1])))
+ crop_img = crop.convert("RGB")
+
+ #border blending
+ if border_blending > 1.0:
+ border_blending = 1.0
+ elif border_blending < 0.0:
+ border_blending = 0.0
+
+ blend_ratio = (max(crop_img.size) / 2) * float(border_blending)
+ blend = img.convert("RGBA")
+
+ if use_square_mask:
+ mask = Image.new("L", img.size, 0)
+ mask_block = Image.new("L", (paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]), 255)
+ mask_block = inset_border(mask_block, round(blend_ratio / 2), (0))
+ mask.paste(mask_block, paste_region)
+ else:
+ original_mask = tensor2pil(mask)[0]
+ original_mask = original_mask.resize((paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]))
+ mask = Image.new("L", img.size, 0)
+ mask.paste(original_mask, paste_region)
+
+ mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4))
+ mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4))
+
+ blend.paste(crop_img, paste_region)
+ blend.putalpha(mask)
+
+ img = Image.alpha_composite(img.convert("RGBA"), blend)
+ out_images.append(img.convert("RGB"))
+
+ return (pil2tensor(out_images),)
+
+class SplitBboxes:
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "bboxes": ("BBOX",),
+ "index": ("INT", {"default": 0,"min": 0, "max": 99999999, "step": 1}),
+ },
+ }
+
+ RETURN_TYPES = ("BBOX","BBOX",)
+ RETURN_NAMES = ("bboxes_a","bboxes_b",)
+ FUNCTION = "splitbbox"
+ CATEGORY = "KJNodes/masking"
+ DESCRIPTION = """
+Splits the specified bbox list at the given index into two lists.
+"""
+
+ def splitbbox(self, bboxes, index):
+ bboxes_a = bboxes[:index] # Sub-list from the start of bboxes up to (but not including) the index
+ bboxes_b = bboxes[index:] # Sub-list from the index to the end of bboxes
+
+ return (bboxes_a, bboxes_b,)
+
+class BboxToInt:
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "bboxes": ("BBOX",),
+ "index": ("INT", {"default": 0,"min": 0, "max": 99999999, "step": 1}),
+ },
+ }
+
+ RETURN_TYPES = ("INT","INT","INT","INT","INT","INT",)
+ RETURN_NAMES = ("x_min","y_min","width","height", "center_x","center_y",)
+ FUNCTION = "bboxtoint"
+ CATEGORY = "KJNodes/masking"
+ DESCRIPTION = """
+Returns selected index from bounding box list as integers.
+"""
+ def bboxtoint(self, bboxes, index):
+ x_min, y_min, width, height = bboxes[index]
+ center_x = int(x_min + width / 2)
+ center_y = int(y_min + height / 2)
+
+ return (x_min, y_min, width, height, center_x, center_y,)
+
+class BboxVisualize:
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ "bboxes": ("BBOX",),
+ "line_width": ("INT", {"default": 1,"min": 1, "max": 10, "step": 1}),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ RETURN_NAMES = ("images",)
+ FUNCTION = "visualizebbox"
+ DESCRIPTION = """
+Visualizes the specified bbox on the image.
+"""
+
+ CATEGORY = "KJNodes/masking"
+
+ def visualizebbox(self, bboxes, images, line_width):
+ image_list = []
+ for image, bbox in zip(images, bboxes):
+ x_min, y_min, width, height = bbox
+
+ # Ensure bbox coordinates are integers
+ x_min = int(x_min)
+ y_min = int(y_min)
+ width = int(width)
+ height = int(height)
+
+ # Permute the image dimensions
+ image = image.permute(2, 0, 1)
+
+ # Clone the image to draw bounding boxes
+ img_with_bbox = image.clone()
+
+ # Define the color for the bbox, e.g., red
+ color = torch.tensor([1, 0, 0], dtype=torch.float32)
+
+ # Ensure color tensor matches the image channels
+ if color.shape[0] != img_with_bbox.shape[0]:
+ color = color.unsqueeze(1).expand(-1, line_width)
+
+ # Draw lines for each side of the bbox with the specified line width
+ for lw in range(line_width):
+ # Top horizontal line
+ if y_min + lw < img_with_bbox.shape[1]:
+ img_with_bbox[:, y_min + lw, x_min:x_min + width] = color[:, None]
+
+ # Bottom horizontal line
+ if y_min + height - lw < img_with_bbox.shape[1]:
+ img_with_bbox[:, y_min + height - lw, x_min:x_min + width] = color[:, None]
+
+ # Left vertical line
+ if x_min + lw < img_with_bbox.shape[2]:
+ img_with_bbox[:, y_min:y_min + height, x_min + lw] = color[:, None]
+
+ # Right vertical line
+ if x_min + width - lw < img_with_bbox.shape[2]:
+ img_with_bbox[:, y_min:y_min + height, x_min + width - lw] = color[:, None]
+
+ # Permute the image dimensions back
+ img_with_bbox = img_with_bbox.permute(1, 2, 0).unsqueeze(0)
+ image_list.append(img_with_bbox)
+
+ return (torch.cat(image_list, dim=0),)
+
+ return (torch.cat(image_list, dim=0),)
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/nodes/curve_nodes.py b/ComfyUI-KJNodes/nodes/curve_nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b59bdc62d0f67ce9fdbcf74de847b3eb4b3d232
--- /dev/null
+++ b/ComfyUI-KJNodes/nodes/curve_nodes.py
@@ -0,0 +1,1454 @@
+import torch
+from torchvision import transforms
+import json
+from PIL import Image, ImageDraw, ImageFont, ImageColor, ImageFilter, ImageChops
+import numpy as np
+from ..utility.utility import pil2tensor
+import folder_paths
+import io
+import base64
+
+from comfy.utils import common_upscale
+
+def plot_coordinates_to_tensor(coordinates, height, width, bbox_height, bbox_width, size_multiplier, prompt):
+ import matplotlib
+ matplotlib.use('Agg')
+ from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
+ text_color = '#999999'
+ bg_color = '#353535'
+ matplotlib.pyplot.rcParams['text.color'] = text_color
+ fig, ax = matplotlib.pyplot.subplots(figsize=(width/100, height/100), dpi=100)
+ fig.patch.set_facecolor(bg_color)
+ ax.set_facecolor(bg_color)
+ ax.grid(color=text_color, linestyle='-', linewidth=0.5)
+ ax.set_xlabel('x', color=text_color)
+ ax.set_ylabel('y', color=text_color)
+ for text in ax.get_xticklabels() + ax.get_yticklabels():
+ text.set_color(text_color)
+ ax.set_title('position for: ' + prompt)
+ ax.set_xlabel('X Coordinate')
+ ax.set_ylabel('Y Coordinate')
+ #ax.legend().remove()
+ ax.set_xlim(0, width) # Set the x-axis to match the input latent width
+ ax.set_ylim(height, 0) # Set the y-axis to match the input latent height, with (0,0) at top-left
+ # Adjust the margins of the subplot
+ matplotlib.pyplot.subplots_adjust(left=0.08, right=0.95, bottom=0.05, top=0.95, wspace=0.2, hspace=0.2)
+
+ cmap = matplotlib.pyplot.get_cmap('rainbow')
+ image_batch = []
+ canvas = FigureCanvas(fig)
+ width, height = fig.get_size_inches() * fig.get_dpi()
+ # Draw a box at each coordinate
+ for i, ((x, y), size) in enumerate(zip(coordinates, size_multiplier)):
+ color_index = i / (len(coordinates) - 1)
+ color = cmap(color_index)
+ draw_height = bbox_height * size
+ draw_width = bbox_width * size
+ rect = matplotlib.patches.Rectangle((x - draw_width/2, y - draw_height/2), draw_width, draw_height,
+ linewidth=1, edgecolor=color, facecolor='none', alpha=0.5)
+ ax.add_patch(rect)
+
+ # Check if there is a next coordinate to draw an arrow to
+ if i < len(coordinates) - 1:
+ x1, y1 = coordinates[i]
+ x2, y2 = coordinates[i + 1]
+ ax.annotate("", xy=(x2, y2), xytext=(x1, y1),
+ arrowprops=dict(arrowstyle="->",
+ linestyle="-",
+ lw=1,
+ color=color,
+ mutation_scale=20))
+ canvas.draw()
+ image_np = np.frombuffer(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3).copy()
+ image_tensor = torch.from_numpy(image_np).float() / 255.0
+ image_tensor = image_tensor.unsqueeze(0)
+ image_batch.append(image_tensor)
+
+ matplotlib.pyplot.close(fig)
+ image_batch_tensor = torch.cat(image_batch, dim=0)
+
+ return image_batch_tensor
+
+class PlotCoordinates:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "coordinates": ("STRING", {"forceInput": True}),
+ "text": ("STRING", {"default": 'title', "multiline": False}),
+ "width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}),
+ "height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}),
+ "bbox_width": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}),
+ "bbox_height": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}),
+ },
+ "optional": {"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True})},
+ }
+ RETURN_TYPES = ("IMAGE", "INT", "INT", "INT", "INT",)
+ RETURN_NAMES = ("images", "width", "height", "bbox_width", "bbox_height",)
+ FUNCTION = "append"
+ CATEGORY = "KJNodes/experimental"
+ DESCRIPTION = """
+Plots coordinates to sequence of images using Matplotlib.
+
+"""
+
+ def append(self, coordinates, text, width, height, bbox_width, bbox_height, size_multiplier=[1.0]):
+ coordinates = json.loads(coordinates.replace("'", '"'))
+ coordinates = [(coord['x'], coord['y']) for coord in coordinates]
+ batch_size = len(coordinates)
+ if not size_multiplier or len(size_multiplier) != batch_size:
+ size_multiplier = [0] * batch_size
+ else:
+ size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)]
+
+ plot_image_tensor = plot_coordinates_to_tensor(coordinates, height, width, bbox_height, bbox_width, size_multiplier, text)
+
+ return (plot_image_tensor, width, height, bbox_width, bbox_height)
+
+class SplineEditor:
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "points_store": ("STRING", {"multiline": False}),
+ "coordinates": ("STRING", {"multiline": False}),
+ "mask_width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}),
+ "mask_height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}),
+ "points_to_sample": ("INT", {"default": 16, "min": 2, "max": 1000, "step": 1}),
+ "sampling_method": (
+ [
+ 'path',
+ 'time',
+ 'controlpoints'
+ ],
+ {
+ "default": 'time'
+ }),
+ "interpolation": (
+ [
+ 'cardinal',
+ 'monotone',
+ 'basis',
+ 'linear',
+ 'step-before',
+ 'step-after',
+ 'polar',
+ 'polar-reverse',
+ ],
+ {
+ "default": 'cardinal'
+ }),
+ "tension": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
+ "repeat_output": ("INT", {"default": 1, "min": 1, "max": 4096, "step": 1}),
+ "float_output_type": (
+ [
+ 'list',
+ 'pandas series',
+ 'tensor',
+ ],
+ {
+ "default": 'list'
+ }),
+ },
+ "optional": {
+ "min_value": ("FLOAT", {"default": 0.0, "min": -10000.0, "max": 10000.0, "step": 0.01}),
+ "max_value": ("FLOAT", {"default": 1.0, "min": -10000.0, "max": 10000.0, "step": 0.01}),
+ "bg_image": ("IMAGE", ),
+ }
+ }
+
+ RETURN_TYPES = ("MASK", "STRING", "FLOAT", "INT", "STRING",)
+ RETURN_NAMES = ("mask", "coord_str", "float", "count", "normalized_str",)
+ FUNCTION = "splinedata"
+ CATEGORY = "KJNodes/weights"
+ DESCRIPTION = """
+# WORK IN PROGRESS
+Do not count on this as part of your workflow yet,
+probably contains lots of bugs and stability is not
+guaranteed!!
+
+## Graphical editor to create values for various
+## schedules and/or mask batches.
+
+**Shift + click** to add control point at end.
+**Ctrl + click** to add control point (subdivide) between two points.
+**Right click on a point** to delete it.
+Note that you can't delete from start/end.
+
+Right click on canvas for context menu:
+These are purely visual options, doesn't affect the output:
+ - Toggle handles visibility
+ - Display sample points: display the points to be returned.
+
+**points_to_sample** value sets the number of samples
+returned from the **drawn spline itself**, this is independent from the
+actual control points, so the interpolation type matters.
+sampling_method:
+ - time: samples along the time axis, used for schedules
+ - path: samples along the path itself, useful for coordinates
+
+output types:
+ - mask batch
+ example compatible nodes: anything that takes masks
+ - list of floats
+ example compatible nodes: IPAdapter weights
+ - pandas series
+ example compatible nodes: anything that takes Fizz'
+ nodes Batch Value Schedule
+ - torch tensor
+ example compatible nodes: unknown
+"""
+
+ def splinedata(self, mask_width, mask_height, coordinates, float_output_type, interpolation,
+ points_to_sample, sampling_method, points_store, tension, repeat_output,
+ min_value=0.0, max_value=1.0, bg_image=None):
+
+ coordinates = json.loads(coordinates)
+ normalized = []
+ normalized_y_values = []
+ for coord in coordinates:
+ coord['x'] = int(round(coord['x']))
+ coord['y'] = int(round(coord['y']))
+ norm_x = (1.0 - (coord['x'] / mask_height) - 0.0) * (max_value - min_value) + min_value
+ norm_y = (1.0 - (coord['y'] / mask_height) - 0.0) * (max_value - min_value) + min_value
+ normalized_y_values.append(norm_y)
+ normalized.append({'x':norm_x, 'y':norm_y})
+ if float_output_type == 'list':
+ out_floats = normalized_y_values * repeat_output
+ elif float_output_type == 'pandas series':
+ try:
+ import pandas as pd
+ except:
+ raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type")
+ out_floats = pd.Series(normalized_y_values * repeat_output),
+ elif float_output_type == 'tensor':
+ out_floats = torch.tensor(normalized_y_values * repeat_output, dtype=torch.float32)
+ # Create a color map for grayscale intensities
+ color_map = lambda y: torch.full((mask_height, mask_width, 3), y, dtype=torch.float32)
+
+ # Create image tensors for each normalized y value
+ mask_tensors = [color_map(y) for y in normalized_y_values]
+ masks_out = torch.stack(mask_tensors)
+ masks_out = masks_out.repeat(repeat_output, 1, 1, 1)
+ masks_out = masks_out.mean(dim=-1)
+ if bg_image is None:
+ return (masks_out, json.dumps(coordinates), out_floats, len(out_floats) , json.dumps(normalized))
+ else:
+ transform = transforms.ToPILImage()
+ image = transform(bg_image[0].permute(2, 0, 1))
+ buffered = io.BytesIO()
+ image.save(buffered, format="JPEG", quality=75)
+
+ # Step 3: Encode the image bytes to a Base64 string
+ img_bytes = buffered.getvalue()
+ img_base64 = base64.b64encode(img_bytes).decode('utf-8')
+ return {
+ "ui": {"bg_image": [img_base64]},
+ "result":(masks_out, json.dumps(coordinates), out_floats, len(out_floats) , json.dumps(normalized))
+ }
+
+
+class CreateShapeMaskOnPath:
+
+ RETURN_TYPES = ("MASK", "MASK",)
+ RETURN_NAMES = ("mask", "mask_inverted",)
+ FUNCTION = "createshapemask"
+ CATEGORY = "KJNodes/masking/generate"
+ DESCRIPTION = """
+Creates a mask or batch of masks with the specified shape.
+Locations are center locations.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "shape": (
+ [ 'circle',
+ 'square',
+ 'triangle',
+ ],
+ {
+ "default": 'circle'
+ }),
+ "coordinates": ("STRING", {"forceInput": True}),
+ "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "shape_width": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}),
+ "shape_height": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}),
+ },
+ "optional": {
+ "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}),
+ }
+ }
+
+ def createshapemask(self, coordinates, frame_width, frame_height, shape_width, shape_height, shape, size_multiplier=[1.0]):
+ # Define the number of images in the batch
+ coordinates = coordinates.replace("'", '"')
+ coordinates = json.loads(coordinates)
+
+ batch_size = len(coordinates)
+ out = []
+ color = "white"
+ if not size_multiplier or len(size_multiplier) != batch_size:
+ size_multiplier = [0] * batch_size
+ else:
+ size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)]
+ for i, coord in enumerate(coordinates):
+ image = Image.new("RGB", (frame_width, frame_height), "black")
+ draw = ImageDraw.Draw(image)
+
+ # Calculate the size for this frame and ensure it's not less than 0
+ current_width = max(0, shape_width + i * size_multiplier[i])
+ current_height = max(0, shape_height + i * size_multiplier[i])
+
+ location_x = coord['x']
+ location_y = coord['y']
+
+ if shape == 'circle' or shape == 'square':
+ # Define the bounding box for the shape
+ left_up_point = (location_x - current_width // 2, location_y - current_height // 2)
+ right_down_point = (location_x + current_width // 2, location_y + current_height // 2)
+ two_points = [left_up_point, right_down_point]
+
+ if shape == 'circle':
+ draw.ellipse(two_points, fill=color)
+ elif shape == 'square':
+ draw.rectangle(two_points, fill=color)
+
+ elif shape == 'triangle':
+ # Define the points for the triangle
+ left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left
+ right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right
+ top_point = (location_x, location_y - current_height // 2) # top point
+ draw.polygon([top_point, left_up_point, right_down_point], fill=color)
+
+ image = pil2tensor(image)
+ mask = image[:, :, :, 0]
+ out.append(mask)
+ outstack = torch.cat(out, dim=0)
+ return (outstack, 1.0 - outstack,)
+
+class CreateShapeImageOnPath:
+
+ RETURN_TYPES = ("IMAGE", "MASK",)
+ RETURN_NAMES = ("image","mask", )
+ FUNCTION = "createshapemask"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Creates an image or batch of images with the specified shape.
+Locations are center locations.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "shape": (
+ [ 'circle',
+ 'square',
+ 'triangle',
+ ],
+ {
+ "default": 'circle'
+ }),
+ "coordinates": ("STRING", {"forceInput": True}),
+ "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "shape_width": ("INT", {"default": 128,"min": 2, "max": 4096, "step": 1}),
+ "shape_height": ("INT", {"default": 128,"min": 2, "max": 4096, "step": 1}),
+ "shape_color": ("STRING", {"default": 'white'}),
+ "bg_color": ("STRING", {"default": 'black'}),
+ "blur_radius": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1}),
+ "intensity": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 100.0, "step": 0.01}),
+ },
+ "optional": {
+ "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}),
+ "trailing": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ }
+ }
+
+ def createshapemask(self, coordinates, frame_width, frame_height, shape_width, shape_height, shape_color,
+ bg_color, blur_radius, shape, intensity, size_multiplier=[1.0], accumulate=False, trailing=1.0):
+ # Define the number of images in the batch
+ if len(coordinates) < 10:
+ coords_list = []
+ for coords in coordinates:
+ coords = json.loads(coords.replace("'", '"'))
+ coords_list.append(coords)
+ else:
+ coords = json.loads(coordinates.replace("'", '"'))
+ coords_list = [coords]
+
+ batch_size = len(coords_list[0])
+ images_list = []
+ masks_list = []
+
+ if not size_multiplier or len(size_multiplier) != batch_size:
+ size_multiplier = [0] * batch_size
+ else:
+ size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)]
+
+ previous_output = None
+
+ for i in range(batch_size):
+ image = Image.new("RGB", (frame_width, frame_height), bg_color)
+ draw = ImageDraw.Draw(image)
+
+ # Calculate the size for this frame and ensure it's not less than 0
+ current_width = max(0, shape_width + i * size_multiplier[i])
+ current_height = max(0, shape_height + i * size_multiplier[i])
+
+ for coords in coords_list:
+ location_x = coords[i]['x']
+ location_y = coords[i]['y']
+
+ if shape == 'circle' or shape == 'square':
+ # Define the bounding box for the shape
+ left_up_point = (location_x - current_width // 2, location_y - current_height // 2)
+ right_down_point = (location_x + current_width // 2, location_y + current_height // 2)
+ two_points = [left_up_point, right_down_point]
+
+ if shape == 'circle':
+ draw.ellipse(two_points, fill=shape_color)
+ elif shape == 'square':
+ draw.rectangle(two_points, fill=shape_color)
+
+ elif shape == 'triangle':
+ # Define the points for the triangle
+ left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left
+ right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right
+ top_point = (location_x, location_y - current_height // 2) # top point
+ draw.polygon([top_point, left_up_point, right_down_point], fill=shape_color)
+
+ if blur_radius != 0:
+ image = image.filter(ImageFilter.GaussianBlur(blur_radius))
+ # Blend the current image with the accumulated image
+
+ image = pil2tensor(image)
+ if trailing != 1.0 and previous_output is not None:
+ # Add the decayed previous output to the current frame
+ image += trailing * previous_output
+ image = image / image.max()
+ previous_output = image
+ image = image * intensity
+ mask = image[:, :, :, 0]
+ masks_list.append(mask)
+ images_list.append(image)
+ out_images = torch.cat(images_list, dim=0).cpu().float()
+ out_masks = torch.cat(masks_list, dim=0)
+ return (out_images, out_masks)
+
+class CreateTextOnPath:
+
+ RETURN_TYPES = ("IMAGE", "MASK", "MASK",)
+ RETURN_NAMES = ("image", "mask", "mask_inverted",)
+ FUNCTION = "createtextmask"
+ CATEGORY = "KJNodes/masking/generate"
+ DESCRIPTION = """
+Creates a mask or batch of masks with the specified text.
+Locations are center locations.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "coordinates": ("STRING", {"forceInput": True}),
+ "text": ("STRING", {"default": 'text', "multiline": True}),
+ "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "font": (folder_paths.get_filename_list("kjnodes_fonts"), ),
+ "font_size": ("INT", {"default": 42}),
+ "alignment": (
+ [ 'left',
+ 'center',
+ 'right'
+ ],
+ {"default": 'center'}
+ ),
+ "text_color": ("STRING", {"default": 'white'}),
+ },
+ "optional": {
+ "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}),
+ }
+ }
+
+ def createtextmask(self, coordinates, frame_width, frame_height, font, font_size, text, text_color, alignment, size_multiplier=[1.0]):
+ coordinates = coordinates.replace("'", '"')
+ coordinates = json.loads(coordinates)
+
+ batch_size = len(coordinates)
+ mask_list = []
+ image_list = []
+ color = text_color
+ font_path = folder_paths.get_full_path("kjnodes_fonts", font)
+
+ if len(size_multiplier) != batch_size:
+ size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)]
+
+ for i, coord in enumerate(coordinates):
+ image = Image.new("RGB", (frame_width, frame_height), "black")
+ draw = ImageDraw.Draw(image)
+ lines = text.split('\n') # Split the text into lines
+ # Apply the size multiplier to the font size for this iteration
+ current_font_size = int(font_size * size_multiplier[i])
+ current_font = ImageFont.truetype(font_path, current_font_size)
+ line_heights = [current_font.getbbox(line)[3] for line in lines] # List of line heights
+ total_text_height = sum(line_heights) # Total height of text block
+
+ # Calculate the starting Y position to center the block of text
+ start_y = coord['y'] - total_text_height // 2
+ for j, line in enumerate(lines):
+ text_width, text_height = current_font.getbbox(line)[2], line_heights[j]
+ if alignment == 'left':
+ location_x = coord['x']
+ elif alignment == 'center':
+ location_x = int(coord['x'] - text_width // 2)
+ elif alignment == 'right':
+ location_x = int(coord['x'] - text_width)
+
+ location_y = int(start_y + sum(line_heights[:j]))
+ text_position = (location_x, location_y)
+ # Draw the text
+ try:
+ draw.text(text_position, line, fill=color, font=current_font, features=['-liga'])
+ except:
+ draw.text(text_position, line, fill=color, font=current_font)
+
+ image = pil2tensor(image)
+ non_black_pixels = (image > 0).any(dim=-1)
+ mask = non_black_pixels.to(image.dtype)
+ mask_list.append(mask)
+ image_list.append(image)
+
+ out_images = torch.cat(image_list, dim=0).cpu().float()
+ out_masks = torch.cat(mask_list, dim=0)
+ return (out_images, out_masks, 1.0 - out_masks,)
+
+class CreateGradientFromCoords:
+
+ RETURN_TYPES = ("IMAGE", )
+ RETURN_NAMES = ("image", )
+ FUNCTION = "generate"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Creates a gradient image from coordinates.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "coordinates": ("STRING", {"forceInput": True}),
+ "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "start_color": ("STRING", {"default": 'white'}),
+ "end_color": ("STRING", {"default": 'black'}),
+ "multiplier": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 100.0, "step": 0.01}),
+ },
+ }
+
+ def generate(self, coordinates, frame_width, frame_height, start_color, end_color, multiplier):
+ # Parse the coordinates
+ coordinates = json.loads(coordinates.replace("'", '"'))
+
+ # Create an image
+ image = Image.new("RGB", (frame_width, frame_height))
+ draw = ImageDraw.Draw(image)
+
+ # Extract start and end points for the gradient
+ start_coord = coordinates[0]
+ end_coord = coordinates[1]
+
+ start_color = ImageColor.getrgb(start_color)
+ end_color = ImageColor.getrgb(end_color)
+
+ # Calculate the gradient direction (vector)
+ gradient_direction = (end_coord['x'] - start_coord['x'], end_coord['y'] - start_coord['y'])
+ gradient_length = (gradient_direction[0] ** 2 + gradient_direction[1] ** 2) ** 0.5
+
+ # Iterate over each pixel in the image
+ for y in range(frame_height):
+ for x in range(frame_width):
+ # Calculate the projection of the point on the gradient line
+ point_vector = (x - start_coord['x'], y - start_coord['y'])
+ projection = (point_vector[0] * gradient_direction[0] + point_vector[1] * gradient_direction[1]) / gradient_length
+ projection = max(min(projection, gradient_length), 0) # Clamp the projection value
+
+ # Calculate the blend factor for the current pixel
+ blend = projection * multiplier / gradient_length
+
+ # Determine the color of the current pixel
+ color = (
+ int(start_color[0] + (end_color[0] - start_color[0]) * blend),
+ int(start_color[1] + (end_color[1] - start_color[1]) * blend),
+ int(start_color[2] + (end_color[2] - start_color[2]) * blend)
+ )
+
+ # Set the pixel color
+ draw.point((x, y), fill=color)
+
+ # Convert the PIL image to a tensor (assuming such a function exists in your context)
+ image_tensor = pil2tensor(image)
+
+ return (image_tensor,)
+
+class GradientToFloat:
+
+ RETURN_TYPES = ("FLOAT", "FLOAT",)
+ RETURN_NAMES = ("float_x", "float_y", )
+ FUNCTION = "sample"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Calculates list of floats from image.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "image": ("IMAGE", ),
+ "steps": ("INT", {"default": 10, "min": 2, "max": 10000, "step": 1}),
+ },
+ }
+
+ def sample(self, image, steps):
+ # Assuming image is a tensor with shape [B, H, W, C]
+ B, H, W, C = image.shape
+
+ # Sample along the width axis (W)
+ w_intervals = torch.linspace(0, W - 1, steps=steps, dtype=torch.int64)
+ # Assuming we're sampling from the first batch and the first channel
+ w_sampled = image[0, :, w_intervals, 0]
+
+ # Sample along the height axis (H)
+ h_intervals = torch.linspace(0, H - 1, steps=steps, dtype=torch.int64)
+ # Assuming we're sampling from the first batch and the first channel
+ h_sampled = image[0, h_intervals, :, 0]
+
+ # Taking the mean across the height for width sampling, and across the width for height sampling
+ w_values = w_sampled.mean(dim=0).tolist()
+ h_values = h_sampled.mean(dim=1).tolist()
+
+ return (w_values, h_values)
+
+class MaskOrImageToWeight:
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "output_type": (
+ [
+ 'list',
+ 'pandas series',
+ 'tensor',
+ 'string'
+ ],
+ {
+ "default": 'list'
+ }),
+ },
+ "optional": {
+ "images": ("IMAGE",),
+ "masks": ("MASK",),
+ },
+
+ }
+ RETURN_TYPES = ("FLOAT", "STRING",)
+ FUNCTION = "execute"
+ CATEGORY = "KJNodes/weights"
+ DESCRIPTION = """
+Gets the mean values from mask or image batch
+and returns that as the selected output type.
+"""
+
+ def execute(self, output_type, images=None, masks=None):
+ mean_values = []
+ if masks is not None and images is None:
+ for mask in masks:
+ mean_values.append(mask.mean().item())
+ elif masks is None and images is not None:
+ for image in images:
+ mean_values.append(image.mean().item())
+ elif masks is not None and images is not None:
+ raise Exception("MaskOrImageToWeight: Use either mask or image input only.")
+
+ # Convert mean_values to the specified output_type
+ if output_type == 'list':
+ out = mean_values
+ elif output_type == 'pandas series':
+ try:
+ import pandas as pd
+ except:
+ raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type")
+ out = pd.Series(mean_values),
+ elif output_type == 'tensor':
+ out = torch.tensor(mean_values, dtype=torch.float32),
+ return (out, [str(value) for value in mean_values],)
+
+class WeightScheduleConvert:
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "input_values": ("FLOAT", {"default": 0.0, "forceInput": True}),
+ "output_type": (
+ [
+ 'match_input',
+ 'list',
+ 'pandas series',
+ 'tensor',
+ ],
+ {
+ "default": 'list'
+ }),
+ "invert": ("BOOLEAN", {"default": False}),
+ "repeat": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}),
+ },
+ "optional": {
+ "remap_to_frames": ("INT", {"default": 0}),
+ "interpolation_curve": ("FLOAT", {"forceInput": True}),
+ "remap_values": ("BOOLEAN", {"default": False}),
+ "remap_min": ("FLOAT", {"default": 0.0, "min": -100000, "max": 100000.0, "step": 0.01}),
+ "remap_max": ("FLOAT", {"default": 1.0, "min": -100000, "max": 100000.0, "step": 0.01}),
+ },
+
+ }
+ RETURN_TYPES = ("FLOAT", "STRING", "INT",)
+ FUNCTION = "execute"
+ CATEGORY = "KJNodes/weights"
+ DESCRIPTION = """
+Converts different value lists/series to another type.
+"""
+
+ def detect_input_type(self, input_values):
+ import pandas as pd
+ if isinstance(input_values, list):
+ return 'list'
+ elif isinstance(input_values, pd.Series):
+ return 'pandas series'
+ elif isinstance(input_values, torch.Tensor):
+ return 'tensor'
+ else:
+ raise ValueError("Unsupported input type")
+
+ def execute(self, input_values, output_type, invert, repeat, remap_to_frames=0, interpolation_curve=None, remap_min=0.0, remap_max=1.0, remap_values=False):
+ import pandas as pd
+ input_type = self.detect_input_type(input_values)
+
+ if input_type == 'pandas series':
+ float_values = input_values.tolist()
+ elif input_type == 'tensor':
+ float_values = input_values
+ else:
+ float_values = input_values
+
+ if invert:
+ float_values = [1 - value for value in float_values]
+
+ if interpolation_curve is not None:
+ interpolated_pattern = []
+ orig_float_values = float_values
+ for value in interpolation_curve:
+ min_val = min(orig_float_values)
+ max_val = max(orig_float_values)
+ # Normalize the values to [0, 1]
+ normalized_values = [(value - min_val) / (max_val - min_val) for value in orig_float_values]
+ # Interpolate the normalized values to the new frame count
+ remapped_float_values = np.interp(np.linspace(0, 1, int(remap_to_frames * value)), np.linspace(0, 1, len(normalized_values)), normalized_values).tolist()
+ interpolated_pattern.extend(remapped_float_values)
+ float_values = interpolated_pattern
+ else:
+ # Remap float_values to match target_frame_amount
+ if remap_to_frames > 0 and remap_to_frames != len(float_values):
+ min_val = min(float_values)
+ max_val = max(float_values)
+ # Normalize the values to [0, 1]
+ normalized_values = [(value - min_val) / (max_val - min_val) for value in float_values]
+ # Interpolate the normalized values to the new frame count
+ float_values = np.interp(np.linspace(0, 1, remap_to_frames), np.linspace(0, 1, len(normalized_values)), normalized_values).tolist()
+
+ float_values = float_values * repeat
+ if remap_values:
+ float_values = self.remap_values(float_values, remap_min, remap_max)
+
+ if output_type == 'list':
+ out = float_values,
+ elif output_type == 'pandas series':
+ out = pd.Series(float_values),
+ elif output_type == 'tensor':
+ if input_type == 'pandas series':
+ out = torch.tensor(float_values.values, dtype=torch.float32),
+ else:
+ out = torch.tensor(float_values, dtype=torch.float32),
+ elif output_type == 'match_input':
+ out = float_values,
+ return (out, [str(value) for value in float_values], [int(value) for value in float_values])
+
+ def remap_values(self, values, target_min, target_max):
+ # Determine the current range
+ current_min = min(values)
+ current_max = max(values)
+ current_range = current_max - current_min
+
+ # Determine the target range
+ target_range = target_max - target_min
+
+ # Perform the linear interpolation for each value
+ remapped_values = [(value - current_min) / current_range * target_range + target_min for value in values]
+
+ return remapped_values
+
+
+class FloatToMask:
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "input_values": ("FLOAT", {"forceInput": True, "default": 0}),
+ "width": ("INT", {"default": 100, "min": 1}),
+ "height": ("INT", {"default": 100, "min": 1}),
+ },
+ }
+ RETURN_TYPES = ("MASK",)
+ FUNCTION = "execute"
+ CATEGORY = "KJNodes/masking/generate"
+ DESCRIPTION = """
+Generates a batch of masks based on the input float values.
+The batch size is determined by the length of the input float values.
+Each mask is generated with the specified width and height.
+"""
+
+ def execute(self, input_values, width, height):
+ import pandas as pd
+ # Ensure input_values is a list
+ if isinstance(input_values, (float, int)):
+ input_values = [input_values]
+ elif isinstance(input_values, pd.Series):
+ input_values = input_values.tolist()
+ elif isinstance(input_values, list) and all(isinstance(item, list) for item in input_values):
+ input_values = [item for sublist in input_values for item in sublist]
+
+ # Generate a batch of masks based on the input_values
+ masks = []
+ for value in input_values:
+ # Assuming value is a float between 0 and 1 representing the mask's intensity
+ mask = torch.ones((height, width), dtype=torch.float32) * value
+ masks.append(mask)
+ masks_out = torch.stack(masks, dim=0)
+
+ return(masks_out,)
+class WeightScheduleExtend:
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "input_values_1": ("FLOAT", {"default": 0.0, "forceInput": True}),
+ "input_values_2": ("FLOAT", {"default": 0.0, "forceInput": True}),
+ "output_type": (
+ [
+ 'match_input',
+ 'list',
+ 'pandas series',
+ 'tensor',
+ ],
+ {
+ "default": 'match_input'
+ }),
+ },
+
+ }
+ RETURN_TYPES = ("FLOAT",)
+ FUNCTION = "execute"
+ CATEGORY = "KJNodes/weights"
+ DESCRIPTION = """
+Extends, and converts if needed, different value lists/series
+"""
+
+ def detect_input_type(self, input_values):
+ import pandas as pd
+ if isinstance(input_values, list):
+ return 'list'
+ elif isinstance(input_values, pd.Series):
+ return 'pandas series'
+ elif isinstance(input_values, torch.Tensor):
+ return 'tensor'
+ else:
+ raise ValueError("Unsupported input type")
+
+ def execute(self, input_values_1, input_values_2, output_type):
+ import pandas as pd
+ input_type_1 = self.detect_input_type(input_values_1)
+ input_type_2 = self.detect_input_type(input_values_2)
+ # Convert input_values_2 to the same format as input_values_1 if they do not match
+ if not input_type_1 == input_type_2:
+ print("Converting input_values_2 to the same format as input_values_1")
+ if input_type_1 == 'pandas series':
+ # Convert input_values_2 to a pandas Series
+ float_values_2 = pd.Series(input_values_2)
+ elif input_type_1 == 'tensor':
+ # Convert input_values_2 to a tensor
+ float_values_2 = torch.tensor(input_values_2, dtype=torch.float32)
+ else:
+ print("Input types match, no conversion needed")
+ # If the types match, no conversion is needed
+ float_values_2 = input_values_2
+
+ float_values = input_values_1 + float_values_2
+
+ if output_type == 'list':
+ return float_values,
+ elif output_type == 'pandas series':
+ return pd.Series(float_values),
+ elif output_type == 'tensor':
+ if input_type_1 == 'pandas series':
+ return torch.tensor(float_values.values, dtype=torch.float32),
+ else:
+ return torch.tensor(float_values, dtype=torch.float32),
+ elif output_type == 'match_input':
+ return float_values,
+ else:
+ raise ValueError(f"Unsupported output_type: {output_type}")
+
+class FloatToSigmas:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {
+ "float_list": ("FLOAT", {"default": 0.0, "forceInput": True}),
+ }
+ }
+ RETURN_TYPES = ("SIGMAS",)
+ RETURN_NAMES = ("SIGMAS",)
+ CATEGORY = "KJNodes/noise"
+ FUNCTION = "customsigmas"
+ DESCRIPTION = """
+Creates a sigmas tensor from list of float values.
+
+"""
+ def customsigmas(self, float_list):
+ return torch.tensor(float_list, dtype=torch.float32),
+
+class SigmasToFloat:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {
+ "sigmas": ("SIGMAS",),
+ }
+ }
+ RETURN_TYPES = ("FLOAT",)
+ RETURN_NAMES = ("float",)
+ CATEGORY = "KJNodes/noise"
+ FUNCTION = "customsigmas"
+ DESCRIPTION = """
+Creates a float list from sigmas tensors.
+
+"""
+ def customsigmas(self, sigmas):
+ return sigmas.tolist(),
+
+class GLIGENTextBoxApplyBatchCoords:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"conditioning_to": ("CONDITIONING", ),
+ "latents": ("LATENT", ),
+ "clip": ("CLIP", ),
+ "gligen_textbox_model": ("GLIGEN", ),
+ "coordinates": ("STRING", {"forceInput": True}),
+ "text": ("STRING", {"multiline": True}),
+ "width": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}),
+ "height": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}),
+ },
+ "optional": {"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True})},
+ }
+ RETURN_TYPES = ("CONDITIONING", "IMAGE", )
+ RETURN_NAMES = ("conditioning", "coord_preview", )
+ FUNCTION = "append"
+ CATEGORY = "KJNodes/experimental"
+ DESCRIPTION = """
+This node allows scheduling GLIGEN text box positions in a batch,
+to be used with AnimateDiff-Evolved. Intended to pair with the
+Spline Editor -node.
+
+GLIGEN model can be downloaded through the Manage's "Install Models" menu.
+Or directly from here:
+https://huggingface.co/comfyanonymous/GLIGEN_pruned_safetensors/tree/main
+
+Inputs:
+- **latents** input is used to calculate batch size
+- **clip** is your standard text encoder, use same as for the main prompt
+- **gligen_textbox_model** connects to GLIGEN Loader
+- **coordinates** takes a json string of points, directly compatible
+with the spline editor node.
+- **text** is the part of the prompt to set position for
+- **width** and **height** are the size of the GLIGEN bounding box
+
+Outputs:
+- **conditioning** goes between to clip text encode and the sampler
+- **coord_preview** is an optional preview of the coordinates and
+bounding boxes.
+
+"""
+
+ def append(self, latents, coordinates, conditioning_to, clip, gligen_textbox_model, text, width, height, size_multiplier=[1.0]):
+ coordinates = json.loads(coordinates.replace("'", '"'))
+ coordinates = [(coord['x'], coord['y']) for coord in coordinates]
+
+ batch_size = sum(tensor.size(0) for tensor in latents.values())
+ if len(coordinates) != batch_size:
+ print("GLIGENTextBoxApplyBatchCoords WARNING: The number of coordinates does not match the number of latents")
+
+ c = []
+ _, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
+
+ for t in conditioning_to:
+ n = [t[0], t[1].copy()]
+
+ position_params_batch = [[] for _ in range(batch_size)] # Initialize a list of empty lists for each batch item
+ if len(size_multiplier) != batch_size:
+ size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)]
+
+ for i in range(batch_size):
+ x_position, y_position = coordinates[i]
+ position_param = (cond_pooled, int((height // 8) * size_multiplier[i]), int((width // 8) * size_multiplier[i]), (y_position - height // 2) // 8, (x_position - width // 2) // 8)
+ position_params_batch[i].append(position_param) # Append position_param to the correct sublist
+
+ prev = []
+ if "gligen" in n[1]:
+ prev = n[1]['gligen'][2]
+ else:
+ prev = [[] for _ in range(batch_size)]
+ # Concatenate prev and position_params_batch, ensuring both are lists of lists
+ # and each sublist corresponds to a batch item
+ combined_position_params = [prev_item + batch_item for prev_item, batch_item in zip(prev, position_params_batch)]
+ n[1]['gligen'] = ("position_batched", gligen_textbox_model, combined_position_params)
+ c.append(n)
+
+ image_height = latents['samples'].shape[-2] * 8
+ image_width = latents['samples'].shape[-1] * 8
+ plot_image_tensor = plot_coordinates_to_tensor(coordinates, image_height, image_width, height, width, size_multiplier, text)
+
+ return (c, plot_image_tensor,)
+
+class CreateInstanceDiffusionTracking:
+
+ RETURN_TYPES = ("TRACKING", "STRING", "INT", "INT", "INT", "INT",)
+ RETURN_NAMES = ("tracking", "prompt", "width", "height", "bbox_width", "bbox_height",)
+ FUNCTION = "tracking"
+ CATEGORY = "KJNodes/InstanceDiffusion"
+ DESCRIPTION = """
+Creates tracking data to be used with InstanceDiffusion:
+https://github.com/logtd/ComfyUI-InstanceDiffusion
+
+InstanceDiffusion prompt format:
+"class_id.class_name": "prompt",
+for example:
+"1.head": "((head))",
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "coordinates": ("STRING", {"forceInput": True}),
+ "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "bbox_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "bbox_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "class_name": ("STRING", {"default": "class_name"}),
+ "class_id": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}),
+ "prompt": ("STRING", {"default": "prompt", "multiline": True}),
+ },
+ "optional": {
+ "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}),
+ "fit_in_frame": ("BOOLEAN", {"default": True}),
+ }
+ }
+
+ def tracking(self, coordinates, class_name, class_id, width, height, bbox_width, bbox_height, prompt, size_multiplier=[1.0], fit_in_frame=True):
+ # Define the number of images in the batch
+ coordinates = coordinates.replace("'", '"')
+ coordinates = json.loads(coordinates)
+
+ tracked = {}
+ tracked[class_name] = {}
+ batch_size = len(coordinates)
+ # Initialize a list to hold the coordinates for the current ID
+ id_coordinates = []
+ if not size_multiplier or len(size_multiplier) != batch_size:
+ size_multiplier = [0] * batch_size
+ else:
+ size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)]
+ for i, coord in enumerate(coordinates):
+ x = coord['x']
+ y = coord['y']
+ adjusted_bbox_width = bbox_width * size_multiplier[i]
+ adjusted_bbox_height = bbox_height * size_multiplier[i]
+ # Calculate the top left and bottom right coordinates
+ top_left_x = x - adjusted_bbox_width // 2
+ top_left_y = y - adjusted_bbox_height // 2
+ bottom_right_x = x + adjusted_bbox_width // 2
+ bottom_right_y = y + adjusted_bbox_height // 2
+
+ if fit_in_frame:
+ # Clip the coordinates to the frame boundaries
+ top_left_x = max(0, top_left_x)
+ top_left_y = max(0, top_left_y)
+ bottom_right_x = min(width, bottom_right_x)
+ bottom_right_y = min(height, bottom_right_y)
+ # Ensure width and height are positive
+ adjusted_bbox_width = max(1, bottom_right_x - top_left_x)
+ adjusted_bbox_height = max(1, bottom_right_y - top_left_y)
+
+ # Update the coordinates with the new width and height
+ bottom_right_x = top_left_x + adjusted_bbox_width
+ bottom_right_y = top_left_y + adjusted_bbox_height
+
+ # Append the top left and bottom right coordinates to the list for the current ID
+ id_coordinates.append([top_left_x, top_left_y, bottom_right_x, bottom_right_y, width, height])
+
+ class_id = int(class_id)
+ # Assign the list of coordinates to the specified ID within the class_id dictionary
+ tracked[class_name][class_id] = id_coordinates
+
+ prompt_string = ""
+ for class_name, class_data in tracked.items():
+ for class_id in class_data.keys():
+ class_id_str = str(class_id)
+ # Use the incoming prompt for each class name and ID
+ prompt_string += f'"{class_id_str}.{class_name}": "({prompt})",\n'
+
+ # Remove the last comma and newline
+ prompt_string = prompt_string.rstrip(",\n")
+
+ return (tracked, prompt_string, width, height, bbox_width, bbox_height)
+
+class AppendInstanceDiffusionTracking:
+
+ RETURN_TYPES = ("TRACKING", "STRING",)
+ RETURN_NAMES = ("tracking", "prompt",)
+ FUNCTION = "append"
+ CATEGORY = "KJNodes/InstanceDiffusion"
+ DESCRIPTION = """
+Appends tracking data to be used with InstanceDiffusion:
+https://github.com/logtd/ComfyUI-InstanceDiffusion
+
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "tracking_1": ("TRACKING", {"forceInput": True}),
+ "tracking_2": ("TRACKING", {"forceInput": True}),
+ },
+ "optional": {
+ "prompt_1": ("STRING", {"default": "", "forceInput": True}),
+ "prompt_2": ("STRING", {"default": "", "forceInput": True}),
+ }
+ }
+
+ def append(self, tracking_1, tracking_2, prompt_1="", prompt_2=""):
+ tracking_copy = tracking_1.copy()
+ # Check for existing class names and class IDs, and raise an error if they exist
+ for class_name, class_data in tracking_2.items():
+ if class_name not in tracking_copy:
+ tracking_copy[class_name] = class_data
+ else:
+ # If the class name exists, merge the class data from tracking_2 into tracking_copy
+ # This will add new class IDs under the same class name without raising an error
+ tracking_copy[class_name].update(class_data)
+ prompt_string = prompt_1 + "," + prompt_2
+ return (tracking_copy, prompt_string)
+
+class InterpolateCoords:
+
+ RETURN_TYPES = ("STRING",)
+ RETURN_NAMES = ("coordinates",)
+ FUNCTION = "interpolate"
+ CATEGORY = "KJNodes/experimental"
+ DESCRIPTION = """
+Interpolates coordinates based on a curve.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "coordinates": ("STRING", {"forceInput": True}),
+ "interpolation_curve": ("FLOAT", {"forceInput": True}),
+
+ },
+ }
+
+ def interpolate(self, coordinates, interpolation_curve):
+ # Parse the JSON string to get the list of coordinates
+ coordinates = json.loads(coordinates.replace("'", '"'))
+
+ # Convert the list of dictionaries to a list of (x, y) tuples for easier processing
+ coordinates = [(coord['x'], coord['y']) for coord in coordinates]
+
+ # Calculate the total length of the original path
+ path_length = sum(np.linalg.norm(np.array(coordinates[i]) - np.array(coordinates[i-1]))
+ for i in range(1, len(coordinates)))
+
+ # Initialize variables for interpolation
+ interpolated_coords = []
+ current_length = 0
+ current_index = 0
+
+ # Iterate over the normalized curve
+ for normalized_length in interpolation_curve:
+ target_length = normalized_length * path_length # Convert to the original scale
+ while current_index < len(coordinates) - 1:
+ segment_start, segment_end = np.array(coordinates[current_index]), np.array(coordinates[current_index + 1])
+ segment_length = np.linalg.norm(segment_end - segment_start)
+ if current_length + segment_length >= target_length:
+ break
+ current_length += segment_length
+ current_index += 1
+
+ # Interpolate between the last two points
+ if current_index < len(coordinates) - 1:
+ p1, p2 = np.array(coordinates[current_index]), np.array(coordinates[current_index + 1])
+ segment_length = np.linalg.norm(p2 - p1)
+ if segment_length > 0:
+ t = (target_length - current_length) / segment_length
+ interpolated_point = p1 + t * (p2 - p1)
+ interpolated_coords.append(interpolated_point.tolist())
+ else:
+ interpolated_coords.append(p1.tolist())
+ else:
+ # If the target_length is at or beyond the end of the path, add the last coordinate
+ interpolated_coords.append(coordinates[-1])
+
+ # Convert back to string format if necessary
+ interpolated_coords_str = "[" + ", ".join([f"{{'x': {round(coord[0])}, 'y': {round(coord[1])}}}" for coord in interpolated_coords]) + "]"
+ print(interpolated_coords_str)
+
+ return (interpolated_coords_str,)
+
+class DrawInstanceDiffusionTracking:
+
+ RETURN_TYPES = ("IMAGE",)
+ RETURN_NAMES = ("image", )
+ FUNCTION = "draw"
+ CATEGORY = "KJNodes/InstanceDiffusion"
+ DESCRIPTION = """
+Draws the tracking data from
+CreateInstanceDiffusionTracking -node.
+
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "image": ("IMAGE", ),
+ "tracking": ("TRACKING", {"forceInput": True}),
+ "box_line_width": ("INT", {"default": 2, "min": 1, "max": 10, "step": 1}),
+ "draw_text": ("BOOLEAN", {"default": True}),
+ "font": (folder_paths.get_filename_list("kjnodes_fonts"), ),
+ "font_size": ("INT", {"default": 20}),
+ },
+ }
+
+ def draw(self, image, tracking, box_line_width, draw_text, font, font_size):
+ import matplotlib.cm as cm
+
+ modified_images = []
+
+ colormap = cm.get_cmap('rainbow', len(tracking))
+ if draw_text:
+ font_path = folder_paths.get_full_path("kjnodes_fonts", font)
+ font = ImageFont.truetype(font_path, font_size)
+
+ # Iterate over each image in the batch
+ for i in range(image.shape[0]):
+ # Extract the current image and convert it to a PIL image
+ current_image = image[i, :, :, :].permute(2, 0, 1)
+ pil_image = transforms.ToPILImage()(current_image)
+
+ draw = ImageDraw.Draw(pil_image)
+
+ # Iterate over the bounding boxes for the current image
+ for j, (class_name, class_data) in enumerate(tracking.items()):
+ for class_id, bbox_list in class_data.items():
+ # Check if the current index is within the bounds of the bbox_list
+ if i < len(bbox_list):
+ bbox = bbox_list[i]
+ # Ensure bbox is a list or tuple before unpacking
+ if isinstance(bbox, (list, tuple)):
+ x1, y1, x2, y2, _, _ = bbox
+ # Convert coordinates to integers
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
+ # Generate a color from the rainbow colormap
+ color = tuple(int(255 * x) for x in colormap(j / len(tracking)))[:3]
+ # Draw the bounding box on the image with the generated color
+ draw.rectangle([x1, y1, x2, y2], outline=color, width=box_line_width)
+ if draw_text:
+ # Draw the class name and ID as text above the box with the generated color
+ text = f"{class_id}.{class_name}"
+ # Calculate the width and height of the text
+ _, _, text_width, text_height = draw.textbbox((0, 0), text=text, font=font)
+ # Position the text above the top-left corner of the box
+ text_position = (x1, y1 - text_height)
+ draw.text(text_position, text, fill=color, font=font)
+ else:
+ print(f"Unexpected data type for bbox: {type(bbox)}")
+
+ # Convert the drawn image back to a torch tensor and adjust back to (H, W, C)
+ modified_image_tensor = transforms.ToTensor()(pil_image).permute(1, 2, 0)
+ modified_images.append(modified_image_tensor)
+
+ # Stack the modified images back into a batch
+ image_tensor_batch = torch.stack(modified_images).cpu().float()
+
+ return image_tensor_batch,
+
+class PointsEditor:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "points_store": ("STRING", {"multiline": False}),
+ "coordinates": ("STRING", {"multiline": False}),
+ "neg_coordinates": ("STRING", {"multiline": False}),
+ "bbox_store": ("STRING", {"multiline": False}),
+ "bboxes": ("STRING", {"multiline": False}),
+ "bbox_format": (
+ [
+ 'xyxy',
+ 'xywh',
+ ],
+ ),
+ "width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}),
+ "height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}),
+ "normalize": ("BOOLEAN", {"default": False}),
+ },
+ "optional": {
+ "bg_image": ("IMAGE", ),
+ },
+ }
+
+ RETURN_TYPES = ("STRING", "STRING", "BBOX", "MASK", "IMAGE")
+ RETURN_NAMES = ("positive_coords", "negative_coords", "bbox", "bbox_mask", "cropped_image")
+ FUNCTION = "pointdata"
+ CATEGORY = "KJNodes/experimental"
+ DESCRIPTION = """
+# WORK IN PROGRESS
+Do not count on this as part of your workflow yet,
+probably contains lots of bugs and stability is not
+guaranteed!!
+
+## Graphical editor to create coordinates
+
+**Shift + click** to add a positive (green) point.
+**Shift + right click** to add a negative (red) point.
+**Ctrl + click** to draw a box.
+**Right click on a point** to delete it.
+Note that you can't delete from start/end of the points array.
+
+To add an image select the node and copy/paste or drag in the image.
+Or from the bg_image input on queue (first frame of the batch).
+
+**THE IMAGE IS SAVED TO THE NODE AND WORKFLOW METADATA**
+you can clear the image from the context menu by right clicking on the canvas
+
+"""
+
+ def pointdata(self, points_store, bbox_store, width, height, coordinates, neg_coordinates, normalize, bboxes, bbox_format="xyxy", bg_image=None):
+ coordinates = json.loads(coordinates)
+ pos_coordinates = []
+ for coord in coordinates:
+ coord['x'] = int(round(coord['x']))
+ coord['y'] = int(round(coord['y']))
+ if normalize:
+ norm_x = coord['x'] / width
+ norm_y = coord['y'] / height
+ pos_coordinates.append({'x': norm_x, 'y': norm_y})
+ else:
+ pos_coordinates.append({'x': coord['x'], 'y': coord['y']})
+
+ if neg_coordinates:
+ coordinates = json.loads(neg_coordinates)
+ neg_coordinates = []
+ for coord in coordinates:
+ coord['x'] = int(round(coord['x']))
+ coord['y'] = int(round(coord['y']))
+ if normalize:
+ norm_x = coord['x'] / width
+ norm_y = coord['y'] / height
+ neg_coordinates.append({'x': norm_x, 'y': norm_y})
+ else:
+ neg_coordinates.append({'x': coord['x'], 'y': coord['y']})
+
+ # Create a blank mask
+ mask = np.zeros((height, width), dtype=np.uint8)
+ bboxes = json.loads(bboxes)
+ print(bboxes)
+ valid_bboxes = []
+ for bbox in bboxes:
+ if (bbox.get("startX") is None or
+ bbox.get("startY") is None or
+ bbox.get("endX") is None or
+ bbox.get("endY") is None):
+ continue # Skip this bounding box if any value is None
+ else:
+ # Ensure that endX and endY are greater than startX and startY
+ x_min = min(int(bbox["startX"]), int(bbox["endX"]))
+ y_min = min(int(bbox["startY"]), int(bbox["endY"]))
+ x_max = max(int(bbox["startX"]), int(bbox["endX"]))
+ y_max = max(int(bbox["startY"]), int(bbox["endY"]))
+
+ valid_bboxes.append((x_min, y_min, x_max, y_max))
+
+ bboxes_xyxy = []
+ for bbox in valid_bboxes:
+ x_min, y_min, x_max, y_max = bbox
+ bboxes_xyxy.append((x_min, y_min, x_max, y_max))
+ mask[y_min:y_max, x_min:x_max] = 1 # Fill the bounding box area with 1s
+
+ if bbox_format == "xywh":
+ bboxes_xywh = []
+ for bbox in valid_bboxes:
+ x_min, y_min, x_max, y_max = bbox
+ width = x_max - x_min
+ height = y_max - y_min
+ bboxes_xywh.append((x_min, y_min, width, height))
+ bboxes = bboxes_xywh
+ else:
+ bboxes = bboxes_xyxy
+
+ mask_tensor = torch.from_numpy(mask)
+ mask_tensor = mask_tensor.unsqueeze(0).float().cpu()
+
+ if bg_image is not None and len(valid_bboxes) > 0:
+ x_min, y_min, x_max, y_max = bboxes[0]
+ cropped_image = bg_image[:, y_min:y_max, x_min:x_max, :]
+
+ elif bg_image is not None:
+ cropped_image = bg_image
+
+ if bg_image is None:
+ return (json.dumps(pos_coordinates), json.dumps(neg_coordinates), bboxes, mask_tensor)
+ else:
+ transform = transforms.ToPILImage()
+ image = transform(bg_image[0].permute(2, 0, 1))
+ buffered = io.BytesIO()
+ image.save(buffered, format="JPEG", quality=75)
+
+ # Step 3: Encode the image bytes to a Base64 string
+ img_bytes = buffered.getvalue()
+ img_base64 = base64.b64encode(img_bytes).decode('utf-8')
+
+ return {
+ "ui": {"bg_image": [img_base64]},
+ "result": (json.dumps(pos_coordinates), json.dumps(neg_coordinates), bboxes, mask_tensor, cropped_image)
+ }
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/nodes/image_nodes.py b/ComfyUI-KJNodes/nodes/image_nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..de562bfcc19ca94d5c6a0a9a373f4868fda08f38
--- /dev/null
+++ b/ComfyUI-KJNodes/nodes/image_nodes.py
@@ -0,0 +1,2601 @@
+import numpy as np
+import time
+import torch
+import torch.nn.functional as F
+import torchvision.transforms as T
+import io
+import base64
+import random
+import math
+import os
+import re
+import json
+from PIL.PngImagePlugin import PngInfo
+try:
+ import cv2
+except:
+ print("OpenCV not installed")
+ pass
+from PIL import ImageGrab, ImageDraw, ImageFont, Image, ImageSequence, ImageOps
+
+from nodes import MAX_RESOLUTION, SaveImage
+from comfy_extras.nodes_mask import ImageCompositeMasked
+from comfy.cli_args import args
+from comfy.utils import ProgressBar, common_upscale
+import folder_paths
+import model_management
+
+script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+class ImagePass:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ },
+ "optional": {
+ "image": ("IMAGE",),
+ },
+ }
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "passthrough"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Passes the image through without modifying it.
+"""
+
+ def passthrough(self, image=None):
+ return image,
+
+class ColorMatch:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "image_ref": ("IMAGE",),
+ "image_target": ("IMAGE",),
+ "method": (
+ [
+ 'mkl',
+ 'hm',
+ 'reinhard',
+ 'mvgd',
+ 'hm-mvgd-hm',
+ 'hm-mkl-hm',
+ ], {
+ "default": 'mkl'
+ }),
+ },
+ "optional": {
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ }
+ }
+
+ CATEGORY = "KJNodes/image"
+
+ RETURN_TYPES = ("IMAGE",)
+ RETURN_NAMES = ("image",)
+ FUNCTION = "colormatch"
+ DESCRIPTION = """
+color-matcher enables color transfer across images which comes in handy for automatic
+color-grading of photographs, paintings and film sequences as well as light-field
+and stopmotion corrections.
+
+The methods behind the mappings are based on the approach from Reinhard et al.,
+the Monge-Kantorovich Linearization (MKL) as proposed by Pitie et al. and our analytical solution
+to a Multi-Variate Gaussian Distribution (MVGD) transfer in conjunction with classical histogram
+matching. As shown below our HM-MVGD-HM compound outperforms existing methods.
+https://github.com/hahnec/color-matcher/
+
+"""
+
+ def colormatch(self, image_ref, image_target, method, strength=1.0):
+ try:
+ from color_matcher import ColorMatcher
+ except:
+ raise Exception("Can't import color-matcher, did you install requirements.txt? Manual install: pip install color-matcher")
+ cm = ColorMatcher()
+ image_ref = image_ref.cpu()
+ image_target = image_target.cpu()
+ batch_size = image_target.size(0)
+ out = []
+ images_target = image_target.squeeze()
+ images_ref = image_ref.squeeze()
+
+ image_ref_np = images_ref.numpy()
+ images_target_np = images_target.numpy()
+
+ if image_ref.size(0) > 1 and image_ref.size(0) != batch_size:
+ raise ValueError("ColorMatch: Use either single reference image or a matching batch of reference images.")
+
+ for i in range(batch_size):
+ image_target_np = images_target_np if batch_size == 1 else images_target[i].numpy()
+ image_ref_np_i = image_ref_np if image_ref.size(0) == 1 else images_ref[i].numpy()
+ try:
+ image_result = cm.transfer(src=image_target_np, ref=image_ref_np_i, method=method)
+ except BaseException as e:
+ print(f"Error occurred during transfer: {e}")
+ break
+ # Apply the strength multiplier
+ image_result = image_target_np + strength * (image_result - image_target_np)
+ out.append(torch.from_numpy(image_result))
+
+ out = torch.stack(out, dim=0).to(torch.float32)
+ out.clamp_(0, 1)
+ return (out,)
+
+class SaveImageWithAlpha:
+ def __init__(self):
+ self.output_dir = folder_paths.get_output_directory()
+ self.type = "output"
+ self.prefix_append = ""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"images": ("IMAGE", ),
+ "mask": ("MASK", ),
+ "filename_prefix": ("STRING", {"default": "ComfyUI"})},
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
+ }
+
+ RETURN_TYPES = ()
+ FUNCTION = "save_images_alpha"
+ OUTPUT_NODE = True
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Saves an image and mask as .PNG with the mask as the alpha channel.
+"""
+
+ def save_images_alpha(self, images, mask, filename_prefix="ComfyUI_image_with_alpha", prompt=None, extra_pnginfo=None):
+ from PIL.PngImagePlugin import PngInfo
+ filename_prefix += self.prefix_append
+ full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
+ results = list()
+ if mask.dtype == torch.float16:
+ mask = mask.to(torch.float32)
+ def file_counter():
+ max_counter = 0
+ # Loop through the existing files
+ for existing_file in os.listdir(full_output_folder):
+ # Check if the file matches the expected format
+ match = re.fullmatch(fr"{filename}_(\d+)_?\.[a-zA-Z0-9]+", existing_file)
+ if match:
+ # Extract the numeric portion of the filename
+ file_counter = int(match.group(1))
+ # Update the maximum counter value if necessary
+ if file_counter > max_counter:
+ max_counter = file_counter
+ return max_counter
+
+ for image, alpha in zip(images, mask):
+ i = 255. * image.cpu().numpy()
+ a = 255. * alpha.cpu().numpy()
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
+
+ # Resize the mask to match the image size
+ a_resized = Image.fromarray(a).resize(img.size, Image.LANCZOS)
+ a_resized = np.clip(a_resized, 0, 255).astype(np.uint8)
+ img.putalpha(Image.fromarray(a_resized, mode='L'))
+ metadata = None
+ if not args.disable_metadata:
+ metadata = PngInfo()
+ if prompt is not None:
+ metadata.add_text("prompt", json.dumps(prompt))
+ if extra_pnginfo is not None:
+ for x in extra_pnginfo:
+ metadata.add_text(x, json.dumps(extra_pnginfo[x]))
+
+ # Increment the counter by 1 to get the next available value
+ counter = file_counter() + 1
+ file = f"{filename}_{counter:05}.png"
+ img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
+ results.append({
+ "filename": file,
+ "subfolder": subfolder,
+ "type": self.type
+ })
+
+ return { "ui": { "images": results } }
+
+class ImageConcanate:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "image1": ("IMAGE",),
+ "image2": ("IMAGE",),
+ "direction": (
+ [ 'right',
+ 'down',
+ 'left',
+ 'up',
+ ],
+ {
+ "default": 'right'
+ }),
+ "match_image_size": ("BOOLEAN", {"default": True}),
+ }}
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "concanate"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Concatenates the image2 to image1 in the specified direction.
+"""
+
+ def concanate(self, image1, image2, direction, match_image_size, first_image_shape=None):
+ # Check if the batch sizes are different
+ batch_size1 = image1.shape[0]
+ batch_size2 = image2.shape[0]
+
+ if batch_size1 != batch_size2:
+ # Calculate the number of repetitions needed
+ max_batch_size = max(batch_size1, batch_size2)
+ repeats1 = max_batch_size // batch_size1
+ repeats2 = max_batch_size // batch_size2
+
+ # Repeat the images to match the largest batch size
+ image1 = image1.repeat(repeats1, 1, 1, 1)
+ image2 = image2.repeat(repeats2, 1, 1, 1)
+
+ if match_image_size:
+ # Use first_image_shape if provided; otherwise, default to image1's shape
+ target_shape = first_image_shape if first_image_shape is not None else image1.shape
+
+ original_height = image2.shape[1]
+ original_width = image2.shape[2]
+ original_aspect_ratio = original_width / original_height
+
+ if direction in ['left', 'right']:
+ # Match the height and adjust the width to preserve aspect ratio
+ target_height = target_shape[1] # B, H, W, C format
+ target_width = int(target_height * original_aspect_ratio)
+ elif direction in ['up', 'down']:
+ # Match the width and adjust the height to preserve aspect ratio
+ target_width = target_shape[2] # B, H, W, C format
+ target_height = int(target_width / original_aspect_ratio)
+
+ # Adjust image2 to the expected format for common_upscale
+ image2_for_upscale = image2.movedim(-1, 1) # Move C to the second position (B, C, H, W)
+
+ # Resize image2 to match the target size while preserving aspect ratio
+ image2_resized = common_upscale(image2_for_upscale, target_width, target_height, "lanczos", "disabled")
+
+ # Adjust image2 back to the original format (B, H, W, C) after resizing
+ image2_resized = image2_resized.movedim(1, -1)
+ else:
+ image2_resized = image2
+
+ # Ensure both images have the same number of channels
+ channels_image1 = image1.shape[-1]
+ channels_image2 = image2_resized.shape[-1]
+
+ if channels_image1 != channels_image2:
+ if channels_image1 < channels_image2:
+ # Add alpha channel to image1 if image2 has it
+ alpha_channel = torch.ones((*image1.shape[:-1], channels_image2 - channels_image1), device=image1.device)
+ image1 = torch.cat((image1, alpha_channel), dim=-1)
+ else:
+ # Add alpha channel to image2 if image1 has it
+ alpha_channel = torch.ones((*image2_resized.shape[:-1], channels_image1 - channels_image2), device=image2_resized.device)
+ image2_resized = torch.cat((image2_resized, alpha_channel), dim=-1)
+
+
+ # Concatenate based on the specified direction
+ if direction == 'right':
+ concatenated_image = torch.cat((image1, image2_resized), dim=2) # Concatenate along width
+ elif direction == 'down':
+ concatenated_image = torch.cat((image1, image2_resized), dim=1) # Concatenate along height
+ elif direction == 'left':
+ concatenated_image = torch.cat((image2_resized, image1), dim=2) # Concatenate along width
+ elif direction == 'up':
+ concatenated_image = torch.cat((image2_resized, image1), dim=1) # Concatenate along height
+ return concatenated_image,
+
+import torch # Make sure you have PyTorch installed
+
+class ImageConcatFromBatch:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "images": ("IMAGE",),
+ "num_columns": ("INT", {"default": 3, "min": 1, "max": 255, "step": 1}),
+ "match_image_size": ("BOOLEAN", {"default": False}),
+ "max_resolution": ("INT", {"default": 4096}),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "concat"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+ Concatenates images from a batch into a grid with a specified number of columns.
+ """
+
+ def concat(self, images, num_columns, match_image_size, max_resolution):
+ # Assuming images is a batch of images (B, H, W, C)
+ batch_size, height, width, channels = images.shape
+ num_rows = (batch_size + num_columns - 1) // num_columns # Calculate number of rows
+
+ print(f"Initial dimensions: batch_size={batch_size}, height={height}, width={width}, channels={channels}")
+ print(f"num_rows={num_rows}, num_columns={num_columns}")
+
+ if match_image_size:
+ target_shape = images[0].shape
+
+ resized_images = []
+ for image in images:
+ original_height = image.shape[0]
+ original_width = image.shape[1]
+ original_aspect_ratio = original_width / original_height
+
+ if original_aspect_ratio > 1:
+ target_height = target_shape[0]
+ target_width = int(target_height * original_aspect_ratio)
+ else:
+ target_width = target_shape[1]
+ target_height = int(target_width / original_aspect_ratio)
+
+ print(f"Resizing image from ({original_height}, {original_width}) to ({target_height}, {target_width})")
+
+ # Resize the image to match the target size while preserving aspect ratio
+ resized_image = common_upscale(image.movedim(-1, 0), target_width, target_height, "lanczos", "disabled")
+ resized_image = resized_image.movedim(0, -1) # Move channels back to the last dimension
+ resized_images.append(resized_image)
+
+ # Convert the list of resized images back to a tensor
+ images = torch.stack(resized_images)
+
+ height, width = target_shape[:2] # Update height and width
+
+ # Initialize an empty grid
+ grid_height = num_rows * height
+ grid_width = num_columns * width
+
+ print(f"Grid dimensions before scaling: grid_height={grid_height}, grid_width={grid_width}")
+
+ # Original scale factor calculation remains unchanged
+ scale_factor = min(max_resolution / grid_height, max_resolution / grid_width, 1.0)
+
+ # Apply scale factor to height and width
+ scaled_height = height * scale_factor
+ scaled_width = width * scale_factor
+
+ # Round scaled dimensions to the nearest number divisible by 8
+ height = max(1, int(round(scaled_height / 8) * 8))
+ width = max(1, int(round(scaled_width / 8) * 8))
+
+ if abs(scaled_height - height) > 4:
+ height = max(1, int(round((scaled_height + 4) / 8) * 8))
+ if abs(scaled_width - width) > 4:
+ width = max(1, int(round((scaled_width + 4) / 8) * 8))
+
+ # Recalculate grid dimensions with adjusted height and width
+ grid_height = num_rows * height
+ grid_width = num_columns * width
+ print(f"Grid dimensions after scaling: grid_height={grid_height}, grid_width={grid_width}")
+ print(f"Final image dimensions: height={height}, width={width}")
+
+ grid = torch.zeros((grid_height, grid_width, channels), dtype=images.dtype)
+
+ for idx, image in enumerate(images):
+ resized_image = torch.nn.functional.interpolate(image.unsqueeze(0).permute(0, 3, 1, 2), size=(height, width), mode="bilinear").squeeze().permute(1, 2, 0)
+ row = idx // num_columns
+ col = idx % num_columns
+ grid[row*height:(row+1)*height, col*width:(col+1)*width, :] = resized_image
+
+ return grid.unsqueeze(0),
+
+class ImageGridComposite2x2:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "image1": ("IMAGE",),
+ "image2": ("IMAGE",),
+ "image3": ("IMAGE",),
+ "image4": ("IMAGE",),
+ }}
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "compositegrid"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Concatenates the 4 input images into a 2x2 grid.
+"""
+
+ def compositegrid(self, image1, image2, image3, image4):
+ top_row = torch.cat((image1, image2), dim=2)
+ bottom_row = torch.cat((image3, image4), dim=2)
+ grid = torch.cat((top_row, bottom_row), dim=1)
+ return (grid,)
+
+class ImageGridComposite3x3:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "image1": ("IMAGE",),
+ "image2": ("IMAGE",),
+ "image3": ("IMAGE",),
+ "image4": ("IMAGE",),
+ "image5": ("IMAGE",),
+ "image6": ("IMAGE",),
+ "image7": ("IMAGE",),
+ "image8": ("IMAGE",),
+ "image9": ("IMAGE",),
+ }}
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "compositegrid"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Concatenates the 9 input images into a 3x3 grid.
+"""
+
+ def compositegrid(self, image1, image2, image3, image4, image5, image6, image7, image8, image9):
+ top_row = torch.cat((image1, image2, image3), dim=2)
+ mid_row = torch.cat((image4, image5, image6), dim=2)
+ bottom_row = torch.cat((image7, image8, image9), dim=2)
+ grid = torch.cat((top_row, mid_row, bottom_row), dim=1)
+ return (grid,)
+
+class ImageBatchTestPattern:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "batch_size": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}),
+ "start_from": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}),
+ "text_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}),
+ "text_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}),
+ "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "font": (folder_paths.get_filename_list("kjnodes_fonts"), ),
+ "font_size": ("INT", {"default": 255,"min": 8, "max": 4096, "step": 1}),
+ }}
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "generatetestpattern"
+ CATEGORY = "KJNodes/text"
+
+ def generatetestpattern(self, batch_size, font, font_size, start_from, width, height, text_x, text_y):
+ out = []
+ # Generate the sequential numbers for each image
+ numbers = np.arange(start_from, start_from + batch_size)
+ font_path = folder_paths.get_full_path("kjnodes_fonts", font)
+
+ for number in numbers:
+ # Create a black image with the number as a random color text
+ image = Image.new("RGB", (width, height), color='black')
+ draw = ImageDraw.Draw(image)
+
+ # Generate a random color for the text
+ font_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
+
+ font = ImageFont.truetype(font_path, font_size)
+
+ # Get the size of the text and position it in the center
+ text = str(number)
+
+ try:
+ draw.text((text_x, text_y), text, font=font, fill=font_color, features=['-liga'])
+ except:
+ draw.text((text_x, text_y), text, font=font, fill=font_color,)
+
+ # Convert the image to a numpy array and normalize the pixel values
+ image_np = np.array(image).astype(np.float32) / 255.0
+ image_tensor = torch.from_numpy(image_np).unsqueeze(0)
+ out.append(image_tensor)
+ out_tensor = torch.cat(out, dim=0)
+
+ return (out_tensor,)
+
+class ImageGrabPIL:
+
+ @classmethod
+ def IS_CHANGED(cls):
+
+ return
+
+ RETURN_TYPES = ("IMAGE",)
+ RETURN_NAMES = ("image",)
+ FUNCTION = "screencap"
+ CATEGORY = "KJNodes/experimental"
+ DESCRIPTION = """
+Captures an area specified by screen coordinates.
+Can be used for realtime diffusion with autoqueue.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}),
+ "y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}),
+ "width": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}),
+ "num_frames": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}),
+ "delay": ("FLOAT", {"default": 0.1,"min": 0.0, "max": 10.0, "step": 0.01}),
+ },
+ }
+
+ def screencap(self, x, y, width, height, num_frames, delay):
+ start_time = time.time()
+ captures = []
+ bbox = (x, y, x + width, y + height)
+
+ for _ in range(num_frames):
+ # Capture screen
+ screen_capture = ImageGrab.grab(bbox=bbox)
+ screen_capture_torch = torch.from_numpy(np.array(screen_capture, dtype=np.float32) / 255.0).unsqueeze(0)
+ captures.append(screen_capture_torch)
+
+ # Wait for a short delay if more than one frame is to be captured
+ if num_frames > 1:
+ time.sleep(delay)
+
+ elapsed_time = time.time() - start_time
+ print(f"screengrab took {elapsed_time} seconds.")
+
+ return (torch.cat(captures, dim=0),)
+
+class Screencap_mss:
+
+ @classmethod
+ def IS_CHANGED(s, **kwargs):
+ return float("NaN")
+
+ RETURN_TYPES = ("IMAGE",)
+ RETURN_NAMES = ("image",)
+ FUNCTION = "screencap"
+ CATEGORY = "KJNodes/experimental"
+ DESCRIPTION = """
+Captures an area specified by screen coordinates.
+Can be used for realtime diffusion with autoqueue.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "x": ("INT", {"default": 0,"min": 0, "max": 10000, "step": 1}),
+ "y": ("INT", {"default": 0,"min": 0, "max": 10000, "step": 1}),
+ "width": ("INT", {"default": 512,"min": 0, "max": 10000, "step": 1}),
+ "height": ("INT", {"default": 512,"min": 0, "max": 10000, "step": 1}),
+ "num_frames": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}),
+ "delay": ("FLOAT", {"default": 0.1,"min": 0.0, "max": 10.0, "step": 0.01}),
+ },
+ }
+
+ def screencap(self, x, y, width, height, num_frames, delay):
+ from mss import mss
+ captures = []
+ with mss() as sct:
+ bbox = {'top': y, 'left': x, 'width': width, 'height': height}
+
+ for _ in range(num_frames):
+ sct_img = sct.grab(bbox)
+ img_np = np.array(sct_img)
+ img_torch = torch.from_numpy(img_np[..., [2, 1, 0]]).float() / 255.0
+ captures.append(img_torch)
+
+ if num_frames > 1:
+ time.sleep(delay)
+
+ return (torch.stack(captures, 0),)
+
+class WebcamCaptureCV2:
+
+ @classmethod
+ def IS_CHANGED(cls):
+ return
+
+ RETURN_TYPES = ("IMAGE",)
+ RETURN_NAMES = ("image",)
+ FUNCTION = "capture"
+ CATEGORY = "KJNodes/experimental"
+ DESCRIPTION = """
+Captures a frame from a webcam using CV2.
+Can be used for realtime diffusion with autoqueue.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}),
+ "y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}),
+ "width": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}),
+ "cam_index": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}),
+ "release": ("BOOLEAN", {"default": False}),
+ },
+ }
+
+ def capture(self, x, y, cam_index, width, height, release):
+ # Check if the camera index has changed or the capture object doesn't exist
+ if not hasattr(self, "cap") or self.cap is None or self.current_cam_index != cam_index:
+ if hasattr(self, "cap") and self.cap is not None:
+ self.cap.release()
+ self.current_cam_index = cam_index
+ self.cap = cv2.VideoCapture(cam_index)
+ try:
+ self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
+ self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
+ except:
+ pass
+ if not self.cap.isOpened():
+ raise Exception("Could not open webcam")
+
+ ret, frame = self.cap.read()
+ if not ret:
+ raise Exception("Failed to capture image from webcam")
+
+ # Crop the frame to the specified bbox
+ frame = frame[y:y+height, x:x+width]
+ img_torch = torch.from_numpy(frame[..., [2, 1, 0]]).float() / 255.0
+
+ if release:
+ self.cap.release()
+ self.cap = None
+
+ return (img_torch.unsqueeze(0),)
+
+class AddLabel:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "image":("IMAGE",),
+ "text_x": ("INT", {"default": 10, "min": 0, "max": 4096, "step": 1}),
+ "text_y": ("INT", {"default": 2, "min": 0, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 48, "min": 0, "max": 4096, "step": 1}),
+ "font_size": ("INT", {"default": 32, "min": 0, "max": 4096, "step": 1}),
+ "font_color": ("STRING", {"default": "white"}),
+ "label_color": ("STRING", {"default": "black"}),
+ "font": (folder_paths.get_filename_list("kjnodes_fonts"), ),
+ "text": ("STRING", {"default": "Text"}),
+ "direction": (
+ [ 'up',
+ 'down',
+ 'left',
+ 'right',
+ 'overlay'
+ ],
+ {
+ "default": 'up'
+ }),
+ },
+ "optional":{
+ "caption": ("STRING", {"default": "", "forceInput": True}),
+ }
+ }
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "addlabel"
+ CATEGORY = "KJNodes/text"
+ DESCRIPTION = """
+Creates a new with the given text, and concatenates it to
+either above or below the input image.
+Note that this changes the input image's height!
+Fonts are loaded from this folder:
+ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts
+"""
+
+ def addlabel(self, image, text_x, text_y, text, height, font_size, font_color, label_color, font, direction, caption=""):
+ batch_size = image.shape[0]
+ width = image.shape[2]
+
+ font_path = os.path.join(script_directory, "fonts", "TTNorms-Black.otf") if font == "TTNorms-Black.otf" else folder_paths.get_full_path("kjnodes_fonts", font)
+
+ def process_image(input_image, caption_text):
+ if direction == 'overlay':
+ pil_image = Image.fromarray((input_image.cpu().numpy() * 255).astype(np.uint8))
+ else:
+ label_image = Image.new("RGB", (width, height), label_color)
+ pil_image = label_image
+
+ draw = ImageDraw.Draw(pil_image)
+ font = ImageFont.truetype(font_path, font_size)
+
+ words = caption_text.split()
+
+ lines = []
+ current_line = []
+ current_line_width = 0
+ for word in words:
+ word_width = font.getbbox(word)[2]
+ if current_line_width + word_width <= width - 2 * text_x:
+ current_line.append(word)
+ current_line_width += word_width + font.getbbox(" ")[2] # Add space width
+ else:
+ lines.append(" ".join(current_line))
+ current_line = [word]
+ current_line_width = word_width
+
+ if current_line:
+ lines.append(" ".join(current_line))
+
+ y_offset = text_y
+ for line in lines:
+ try:
+ draw.text((text_x, y_offset), line, font=font, fill=font_color, features=['-liga'])
+ except:
+ draw.text((text_x, y_offset), line, font=font, fill=font_color)
+ y_offset += font_size # Move to the next line
+
+ processed_image = torch.from_numpy(np.array(pil_image).astype(np.float32) / 255.0).unsqueeze(0)
+ return processed_image
+
+ if caption == "":
+ processed_images = [process_image(img, text) for img in image]
+ else:
+ assert len(caption) == batch_size, f"Number of captions {(len(caption))} does not match number of images"
+ processed_images = [process_image(img, cap) for img, cap in zip(image, caption)]
+ processed_batch = torch.cat(processed_images, dim=0)
+
+ # Combine images based on direction
+ if direction == 'down':
+ combined_images = torch.cat((image, processed_batch), dim=1)
+ elif direction == 'up':
+ combined_images = torch.cat((processed_batch, image), dim=1)
+ elif direction == 'left':
+ processed_batch = torch.rot90(processed_batch, 3, (2, 3)).permute(0, 3, 1, 2)
+ combined_images = torch.cat((processed_batch, image), dim=2)
+ elif direction == 'right':
+ processed_batch = torch.rot90(processed_batch, 3, (2, 3)).permute(0, 3, 1, 2)
+ combined_images = torch.cat((image, processed_batch), dim=2)
+ else:
+ combined_images = processed_batch
+
+ return (combined_images,)
+
+class GetImageSizeAndCount:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "image": ("IMAGE",),
+ }}
+
+ RETURN_TYPES = ("IMAGE","INT", "INT", "INT",)
+ RETURN_NAMES = ("image", "width", "height", "count",)
+ FUNCTION = "getsize"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Returns width, height and batch size of the image,
+and passes it through unchanged.
+
+"""
+
+ def getsize(self, image):
+ width = image.shape[2]
+ height = image.shape[1]
+ count = image.shape[0]
+ return {"ui": {
+ "text": [f"{count}x{width}x{height}"]},
+ "result": (image, width, height, count)
+ }
+
+class ImageBatchRepeatInterleaving:
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "repeat"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Repeats each image in a batch by the specified number of times.
+Example batch of 5 images: 0, 1 ,2, 3, 4
+with repeats 2 becomes batch of 10 images: 0, 0, 1, 1, 2, 2, 3, 3, 4, 4
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ "repeats": ("INT", {"default": 1, "min": 1, "max": 4096}),
+ },
+ }
+
+ def repeat(self, images, repeats):
+
+ repeated_images = torch.repeat_interleave(images, repeats=repeats, dim=0)
+ return (repeated_images, )
+
+class ImageUpscaleWithModelBatched:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "upscale_model": ("UPSCALE_MODEL",),
+ "images": ("IMAGE",),
+ "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}),
+ }}
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "upscale"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Same as ComfyUI native model upscaling node,
+but allows setting sub-batches for reduced VRAM usage.
+"""
+ def upscale(self, upscale_model, images, per_batch):
+
+ device = model_management.get_torch_device()
+ upscale_model.to(device)
+ in_img = images.movedim(-1,-3)
+
+ steps = in_img.shape[0]
+ pbar = ProgressBar(steps)
+ t = []
+
+ for start_idx in range(0, in_img.shape[0], per_batch):
+ sub_images = upscale_model(in_img[start_idx:start_idx+per_batch].to(device))
+ t.append(sub_images.cpu())
+ # Calculate the number of images processed in this batch
+ batch_count = sub_images.shape[0]
+ # Update the progress bar by the number of images processed in this batch
+ pbar.update(batch_count)
+ upscale_model.cpu()
+
+ t = torch.cat(t, dim=0).permute(0, 2, 3, 1).cpu()
+
+ return (t,)
+
+class ImageNormalize_Neg1_To_1:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "images": ("IMAGE",),
+
+ }}
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "normalize"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Normalize the images to be in the range [-1, 1]
+"""
+
+ def normalize(self,images):
+ images = images * 2.0 - 1.0
+ return (images,)
+
+class RemapImageRange:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "image": ("IMAGE",),
+ "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}),
+ "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}),
+ "clamp": ("BOOLEAN", {"default": True}),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "remap"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Remaps the image values to the specified range.
+"""
+
+ def remap(self, image, min, max, clamp):
+ if image.dtype == torch.float16:
+ image = image.to(torch.float32)
+ image = min + image * (max - min)
+ if clamp:
+ image = torch.clamp(image, min=0.0, max=1.0)
+ return (image, )
+
+class SplitImageChannels:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "image": ("IMAGE",),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK")
+ RETURN_NAMES = ("red", "green", "blue", "mask")
+ FUNCTION = "split"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Splits image channels into images where the selected channel
+is repeated for all channels, and the alpha as a mask.
+"""
+
+ def split(self, image):
+ red = image[:, :, :, 0:1] # Red channel
+ green = image[:, :, :, 1:2] # Green channel
+ blue = image[:, :, :, 2:3] # Blue channel
+ alpha = image[:, :, :, 3:4] # Alpha channel
+ alpha = alpha.squeeze(-1)
+
+ # Repeat the selected channel for all channels
+ red = torch.cat([red, red, red], dim=3)
+ green = torch.cat([green, green, green], dim=3)
+ blue = torch.cat([blue, blue, blue], dim=3)
+ return (red, green, blue, alpha)
+
+class MergeImageChannels:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "red": ("IMAGE",),
+ "green": ("IMAGE",),
+ "blue": ("IMAGE",),
+
+ },
+ "optional": {
+ "alpha": ("MASK", {"default": None}),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ RETURN_NAMES = ("image",)
+ FUNCTION = "merge"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Merges channel data into an image.
+"""
+
+ def merge(self, red, green, blue, alpha=None):
+ image = torch.stack([
+ red[..., 0, None], # Red channel
+ green[..., 1, None], # Green channel
+ blue[..., 2, None] # Blue channel
+ ], dim=-1)
+ image = image.squeeze(-2)
+ if alpha is not None:
+ image = torch.cat([image, alpha.unsqueeze(-1)], dim=-1)
+ return (image,)
+
+class ImagePadForOutpaintMasked:
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "image": ("IMAGE",),
+ "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
+ "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
+ "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
+ "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
+ "feathering": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
+ },
+ "optional": {
+ "mask": ("MASK",),
+ }
+ }
+
+ RETURN_TYPES = ("IMAGE", "MASK")
+ FUNCTION = "expand_image"
+
+ CATEGORY = "image"
+
+ def expand_image(self, image, left, top, right, bottom, feathering, mask=None):
+ if mask is not None:
+ if torch.allclose(mask, torch.zeros_like(mask)):
+ print("Warning: The incoming mask is fully black. Handling it as None.")
+ mask = None
+ B, H, W, C = image.size()
+
+ new_image = torch.ones(
+ (B, H + top + bottom, W + left + right, C),
+ dtype=torch.float32,
+ ) * 0.5
+
+ new_image[:, top:top + H, left:left + W, :] = image
+
+ if mask is None:
+ new_mask = torch.ones(
+ (B, H + top + bottom, W + left + right),
+ dtype=torch.float32,
+ )
+
+ t = torch.zeros(
+ (B, H, W),
+ dtype=torch.float32
+ )
+ else:
+ # If a mask is provided, pad it to fit the new image size
+ mask = F.pad(mask, (left, right, top, bottom), mode='constant', value=0)
+ mask = 1 - mask
+ t = torch.zeros_like(mask)
+
+ if feathering > 0 and feathering * 2 < H and feathering * 2 < W:
+
+ for i in range(H):
+ for j in range(W):
+ dt = i if top != 0 else H
+ db = H - i if bottom != 0 else H
+
+ dl = j if left != 0 else W
+ dr = W - j if right != 0 else W
+
+ d = min(dt, db, dl, dr)
+
+ if d >= feathering:
+ continue
+
+ v = (feathering - d) / feathering
+
+ if mask is None:
+ t[:, i, j] = v * v
+ else:
+ t[:, top + i, left + j] = v * v
+
+ if mask is None:
+ new_mask[:, top:top + H, left:left + W] = t
+ return (new_image, new_mask,)
+ else:
+ return (new_image, mask,)
+
+class ImagePadForOutpaintTargetSize:
+ upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "image": ("IMAGE",),
+ "target_width": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
+ "target_height": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
+ "feathering": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
+ "upscale_method": (s.upscale_methods,),
+ },
+ "optional": {
+ "mask": ("MASK",),
+ }
+ }
+
+ RETURN_TYPES = ("IMAGE", "MASK")
+ FUNCTION = "expand_image"
+
+ CATEGORY = "image"
+
+ def expand_image(self, image, target_width, target_height, feathering, upscale_method, mask=None):
+ B, H, W, C = image.size()
+ new_height = H
+ new_width = W
+ # Calculate the scaling factor while maintaining aspect ratio
+ scaling_factor = min(target_width / W, target_height / H)
+
+ # Check if the image needs to be downscaled
+ if scaling_factor < 1:
+ image = image.movedim(-1,1)
+ # Calculate the new width and height after downscaling
+ new_width = int(W * scaling_factor)
+ new_height = int(H * scaling_factor)
+
+ # Downscale the image
+ image_scaled = common_upscale(image, new_width, new_height, upscale_method, "disabled").movedim(1,-1)
+ if mask is not None:
+ mask_scaled = mask.unsqueeze(0) # Add an extra dimension for batch size
+ mask_scaled = F.interpolate(mask_scaled, size=(new_height, new_width), mode="nearest")
+ mask_scaled = mask_scaled.squeeze(0) # Remove the extra dimension after interpolation
+ else:
+ mask_scaled = mask
+ else:
+ # If downscaling is not needed, use the original image dimensions
+ image_scaled = image
+ mask_scaled = mask
+
+ # Calculate how much padding is needed to reach the target dimensions
+ pad_top = max(0, (target_height - new_height) // 2)
+ pad_bottom = max(0, target_height - new_height - pad_top)
+ pad_left = max(0, (target_width - new_width) // 2)
+ pad_right = max(0, target_width - new_width - pad_left)
+
+ # Now call the original expand_image with the calculated padding
+ return ImagePadForOutpaintMasked.expand_image(self, image_scaled, pad_left, pad_top, pad_right, pad_bottom, feathering, mask_scaled)
+
+class ImageAndMaskPreview(SaveImage):
+ def __init__(self):
+ self.output_dir = folder_paths.get_temp_directory()
+ self.type = "temp"
+ self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
+ self.compress_level = 4
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "mask_opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
+ "mask_color": ("STRING", {"default": "255, 255, 255"}),
+ "pass_through": ("BOOLEAN", {"default": False}),
+ },
+ "optional": {
+ "image": ("IMAGE",),
+ "mask": ("MASK",),
+ },
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
+ }
+ RETURN_TYPES = ("IMAGE",)
+ RETURN_NAMES = ("composite",)
+ FUNCTION = "execute"
+ CATEGORY = "KJNodes"
+ DESCRIPTION = """
+Preview an image or a mask, when both inputs are used
+composites the mask on top of the image.
+with pass_through on the preview is disabled and the
+composite is returned from the composite slot instead,
+this allows for the preview to be passed for video combine
+nodes for example.
+"""
+
+ def execute(self, mask_opacity, mask_color, pass_through, filename_prefix="ComfyUI", image=None, mask=None, prompt=None, extra_pnginfo=None):
+ if mask is not None and image is None:
+ preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3)
+ elif mask is None and image is not None:
+ preview = image
+ elif mask is not None and image is not None:
+ mask_adjusted = mask * mask_opacity
+ mask_image = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3).clone()
+
+ if ',' in mask_color:
+ color_list = np.clip([int(channel) for channel in mask_color.split(',')], 0, 255) # RGB format
+ else:
+ mask_color = mask_color.lstrip('#')
+ color_list = [int(mask_color[i:i+2], 16) for i in (0, 2, 4)] # Hex format
+ mask_image[:, :, :, 0] = color_list[0] / 255 # Red channel
+ mask_image[:, :, :, 1] = color_list[1] / 255 # Green channel
+ mask_image[:, :, :, 2] = color_list[2] / 255 # Blue channel
+
+ preview, = ImageCompositeMasked.composite(self, image, mask_image, 0, 0, True, mask_adjusted)
+ if pass_through:
+ return (preview, )
+ return(self.save_images(preview, filename_prefix, prompt, extra_pnginfo))
+
+class CrossFadeImages:
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "crossfadeimages"
+ CATEGORY = "KJNodes/image"
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images_1": ("IMAGE",),
+ "images_2": ("IMAGE",),
+ "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],),
+ "transition_start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}),
+ "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}),
+ "start_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}),
+ "end_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}),
+ },
+ }
+
+ def crossfadeimages(self, images_1, images_2, transition_start_index, transitioning_frames, interpolation, start_level, end_level):
+
+ def crossfade(images_1, images_2, alpha):
+ crossfade = (1 - alpha) * images_1 + alpha * images_2
+ return crossfade
+ def ease_in(t):
+ return t * t
+ def ease_out(t):
+ return 1 - (1 - t) * (1 - t)
+ def ease_in_out(t):
+ return 3 * t * t - 2 * t * t * t
+ def bounce(t):
+ if t < 0.5:
+ return self.ease_out(t * 2) * 0.5
+ else:
+ return self.ease_in((t - 0.5) * 2) * 0.5 + 0.5
+ def elastic(t):
+ return math.sin(13 * math.pi / 2 * t) * math.pow(2, 10 * (t - 1))
+ def glitchy(t):
+ return t + 0.1 * math.sin(40 * t)
+ def exponential_ease_out(t):
+ return 1 - (1 - t) ** 4
+
+ easing_functions = {
+ "linear": lambda t: t,
+ "ease_in": ease_in,
+ "ease_out": ease_out,
+ "ease_in_out": ease_in_out,
+ "bounce": bounce,
+ "elastic": elastic,
+ "glitchy": glitchy,
+ "exponential_ease_out": exponential_ease_out,
+ }
+
+ crossfade_images = []
+
+ alphas = torch.linspace(start_level, end_level, transitioning_frames)
+ for i in range(transitioning_frames):
+ alpha = alphas[i]
+ image1 = images_1[i + transition_start_index]
+ image2 = images_2[i + transition_start_index]
+ easing_function = easing_functions.get(interpolation)
+ alpha = easing_function(alpha) # Apply the easing function to the alpha value
+
+ crossfade_image = crossfade(image1, image2, alpha)
+ crossfade_images.append(crossfade_image)
+
+ # Convert crossfade_images to tensor
+ crossfade_images = torch.stack(crossfade_images, dim=0)
+ # Get the last frame result of the interpolation
+ last_frame = crossfade_images[-1]
+ # Calculate the number of remaining frames from images_2
+ remaining_frames = len(images_2) - (transition_start_index + transitioning_frames)
+ # Crossfade the remaining frames with the last used alpha value
+ for i in range(remaining_frames):
+ alpha = alphas[-1]
+ image1 = images_1[i + transition_start_index + transitioning_frames]
+ image2 = images_2[i + transition_start_index + transitioning_frames]
+ easing_function = easing_functions.get(interpolation)
+ alpha = easing_function(alpha) # Apply the easing function to the alpha value
+
+ crossfade_image = crossfade(image1, image2, alpha)
+ crossfade_images = torch.cat([crossfade_images, crossfade_image.unsqueeze(0)], dim=0)
+ # Append the beginning of images_1
+ beginning_images_1 = images_1[:transition_start_index]
+ crossfade_images = torch.cat([beginning_images_1, crossfade_images], dim=0)
+ return (crossfade_images, )
+
+class CrossFadeImagesMulti:
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "crossfadeimages"
+ CATEGORY = "KJNodes/image"
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}),
+ "image_1": ("IMAGE",),
+ "image_2": ("IMAGE",),
+ "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],),
+ "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}),
+ },
+ }
+
+ def crossfadeimages(self, inputcount, transitioning_frames, interpolation, **kwargs):
+
+ def crossfade(images_1, images_2, alpha):
+ crossfade = (1 - alpha) * images_1 + alpha * images_2
+ return crossfade
+ def ease_in(t):
+ return t * t
+ def ease_out(t):
+ return 1 - (1 - t) * (1 - t)
+ def ease_in_out(t):
+ return 3 * t * t - 2 * t * t * t
+ def bounce(t):
+ if t < 0.5:
+ return self.ease_out(t * 2) * 0.5
+ else:
+ return self.ease_in((t - 0.5) * 2) * 0.5 + 0.5
+ def elastic(t):
+ return math.sin(13 * math.pi / 2 * t) * math.pow(2, 10 * (t - 1))
+ def glitchy(t):
+ return t + 0.1 * math.sin(40 * t)
+ def exponential_ease_out(t):
+ return 1 - (1 - t) ** 4
+
+ easing_functions = {
+ "linear": lambda t: t,
+ "ease_in": ease_in,
+ "ease_out": ease_out,
+ "ease_in_out": ease_in_out,
+ "bounce": bounce,
+ "elastic": elastic,
+ "glitchy": glitchy,
+ "exponential_ease_out": exponential_ease_out,
+ }
+
+ image_1 = kwargs["image_1"]
+ height = image_1.shape[1]
+ width = image_1.shape[2]
+
+ easing_function = easing_functions[interpolation]
+
+ for c in range(1, inputcount):
+ frames = []
+ new_image = kwargs[f"image_{c + 1}"]
+ new_image_height = new_image.shape[1]
+ new_image_width = new_image.shape[2]
+
+ if new_image_height != height or new_image_width != width:
+ new_image = common_upscale(new_image.movedim(-1, 1), width, height, "lanczos", "disabled")
+ new_image = new_image.movedim(1, -1) # Move channels back to the last dimension
+
+ last_frame_image_1 = image_1[-1]
+ first_frame_image_2 = new_image[0]
+
+ for frame in range(transitioning_frames):
+ t = frame / (transitioning_frames - 1)
+ alpha = easing_function(t)
+ alpha_tensor = torch.tensor(alpha, dtype=last_frame_image_1.dtype, device=last_frame_image_1.device)
+ frame_image = crossfade(last_frame_image_1, first_frame_image_2, alpha_tensor)
+ frames.append(frame_image)
+
+ frames = torch.stack(frames)
+ image_1 = torch.cat((image_1, frames, new_image), dim=0)
+
+ return image_1,
+
+def transition_images(images_1, images_2, alpha, transition_type, blur_radius, reverse):
+ width = images_1.shape[1]
+ height = images_1.shape[0]
+
+ mask = torch.zeros_like(images_1, device=images_1.device)
+
+ alpha = alpha.item()
+ if reverse:
+ alpha = 1 - alpha
+
+ #transitions from matteo's essential nodes
+ if "horizontal slide" in transition_type:
+ pos = round(width * alpha)
+ mask[:, :pos, :] = 1.0
+ elif "vertical slide" in transition_type:
+ pos = round(height * alpha)
+ mask[:pos, :, :] = 1.0
+ elif "box" in transition_type:
+ box_w = round(width * alpha)
+ box_h = round(height * alpha)
+ x1 = (width - box_w) // 2
+ y1 = (height - box_h) // 2
+ x2 = x1 + box_w
+ y2 = y1 + box_h
+ mask[y1:y2, x1:x2, :] = 1.0
+ elif "circle" in transition_type:
+ radius = math.ceil(math.sqrt(pow(width, 2) + pow(height, 2)) * alpha / 2)
+ c_x = width // 2
+ c_y = height // 2
+ x = torch.arange(0, width, dtype=torch.float32, device="cpu")
+ y = torch.arange(0, height, dtype=torch.float32, device="cpu")
+ y, x = torch.meshgrid((y, x), indexing="ij")
+ circle = ((x - c_x) ** 2 + (y - c_y) ** 2) <= (radius ** 2)
+ mask[circle] = 1.0
+ elif "horizontal door" in transition_type:
+ bar = math.ceil(height * alpha / 2)
+ if bar > 0:
+ mask[:bar, :, :] = 1.0
+ mask[-bar:,:, :] = 1.0
+ elif "vertical door" in transition_type:
+ bar = math.ceil(width * alpha / 2)
+ if bar > 0:
+ mask[:, :bar,:] = 1.0
+ mask[:, -bar:,:] = 1.0
+ elif "fade" in transition_type:
+ mask[:, :, :] = alpha
+
+ mask = gaussian_blur(mask, blur_radius)
+
+ return images_1 * (1 - mask) + images_2 * mask
+
+def ease_in(t):
+ return t * t
+def ease_out(t):
+ return 1 - (1 - t) * (1 - t)
+def ease_in_out(t):
+ return 3 * t * t - 2 * t * t * t
+def bounce(t):
+ if t < 0.5:
+ return ease_out(t * 2) * 0.5
+ else:
+ return ease_in((t - 0.5) * 2) * 0.5 + 0.5
+def elastic(t):
+ return math.sin(13 * math.pi / 2 * t) * math.pow(2, 10 * (t - 1))
+def glitchy(t):
+ return t + 0.1 * math.sin(40 * t)
+def exponential_ease_out(t):
+ return 1 - (1 - t) ** 4
+
+def gaussian_blur(mask, blur_radius):
+ if blur_radius > 0:
+ kernel_size = int(blur_radius * 2) + 1
+ if kernel_size % 2 == 0:
+ kernel_size += 1 # Ensure kernel size is odd
+ sigma = blur_radius / 3
+ x = torch.arange(-kernel_size // 2 + 1, kernel_size // 2 + 1, dtype=torch.float32)
+ x = torch.exp(-0.5 * (x / sigma) ** 2)
+ kernel1d = x / x.sum()
+ kernel2d = kernel1d[:, None] * kernel1d[None, :]
+ kernel2d = kernel2d.to(mask.device)
+ kernel2d = kernel2d.expand(mask.shape[2], 1, kernel2d.shape[0], kernel2d.shape[1])
+ mask = mask.permute(2, 0, 1).unsqueeze(0) # Change to [C, H, W] and add batch dimension
+ mask = F.conv2d(mask, kernel2d, padding=kernel_size // 2, groups=mask.shape[1])
+ mask = mask.squeeze(0).permute(1, 2, 0) # Change back to [H, W, C]
+ return mask
+
+easing_functions = {
+ "linear": lambda t: t,
+ "ease_in": ease_in,
+ "ease_out": ease_out,
+ "ease_in_out": ease_in_out,
+ "bounce": bounce,
+ "elastic": elastic,
+ "glitchy": glitchy,
+ "exponential_ease_out": exponential_ease_out,
+}
+
+class TransitionImagesMulti:
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "transition"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Creates transitions between images.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}),
+ "image_1": ("IMAGE",),
+ "image_2": ("IMAGE",),
+ "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],),
+ "transition_type": (["horizontal slide", "vertical slide", "box", "circle", "horizontal door", "vertical door", "fade"],),
+ "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}),
+ "blur_radius": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 100.0, "step": 0.1}),
+ "reverse": ("BOOLEAN", {"default": False}),
+ "device": (["CPU", "GPU"], {"default": "CPU"}),
+ },
+ }
+
+ def transition(self, inputcount, transitioning_frames, transition_type, interpolation, device, blur_radius, reverse, **kwargs):
+
+ gpu = model_management.get_torch_device()
+
+ image_1 = kwargs["image_1"]
+ height = image_1.shape[1]
+ width = image_1.shape[2]
+
+ easing_function = easing_functions[interpolation]
+
+ for c in range(1, inputcount):
+ frames = []
+ new_image = kwargs[f"image_{c + 1}"]
+ new_image_height = new_image.shape[1]
+ new_image_width = new_image.shape[2]
+
+ if new_image_height != height or new_image_width != width:
+ new_image = common_upscale(new_image.movedim(-1, 1), width, height, "lanczos", "disabled")
+ new_image = new_image.movedim(1, -1) # Move channels back to the last dimension
+
+ last_frame_image_1 = image_1[-1]
+ first_frame_image_2 = new_image[0]
+ if device == "GPU":
+ last_frame_image_1 = last_frame_image_1.to(gpu)
+ first_frame_image_2 = first_frame_image_2.to(gpu)
+
+ if reverse:
+ last_frame_image_1, first_frame_image_2 = first_frame_image_2, last_frame_image_1
+
+ for frame in range(transitioning_frames):
+ t = frame / (transitioning_frames - 1)
+ alpha = easing_function(t)
+ alpha_tensor = torch.tensor(alpha, dtype=last_frame_image_1.dtype, device=last_frame_image_1.device)
+ frame_image = transition_images(last_frame_image_1, first_frame_image_2, alpha_tensor, transition_type, blur_radius, reverse)
+ frames.append(frame_image)
+
+ frames = torch.stack(frames).cpu()
+ image_1 = torch.cat((image_1, frames, new_image), dim=0)
+
+ return image_1.cpu(),
+
+class TransitionImagesInBatch:
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "transition"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Creates transitions between images in a batch.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],),
+ "transition_type": (["horizontal slide", "vertical slide", "box", "circle", "horizontal door", "vertical door", "fade"],),
+ "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}),
+ "blur_radius": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 100.0, "step": 0.1}),
+ "reverse": ("BOOLEAN", {"default": False}),
+ "device": (["CPU", "GPU"], {"default": "CPU"}),
+ },
+ }
+
+ #transitions from matteo's essential nodes
+ def transition(self, images, transitioning_frames, transition_type, interpolation, device, blur_radius, reverse):
+ if images.shape[0] == 1:
+ return images,
+
+ gpu = model_management.get_torch_device()
+
+ easing_function = easing_functions[interpolation]
+
+ images_list = []
+ for i in range(images.shape[0] - 1):
+ frames = []
+ image_1 = images[i]
+ image_2 = images[i + 1]
+
+ if device == "GPU":
+ image_1 = image_1.to(gpu)
+ image_2 = image_2.to(gpu)
+
+ if reverse:
+ image_1, image_2 = image_2, image_1
+
+ for frame in range(transitioning_frames):
+ t = frame / (transitioning_frames - 1)
+ alpha = easing_function(t)
+ alpha_tensor = torch.tensor(alpha, dtype=image_1.dtype, device=image_1.device)
+ frame_image = transition_images(image_1, image_2, alpha_tensor, transition_type, blur_radius, reverse)
+ frames.append(frame_image)
+
+ frames = torch.stack(frames).cpu()
+ images_list.append(frames)
+ images = torch.cat(images_list, dim=0)
+
+ return images.cpu(),
+
+class ShuffleImageBatch:
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "shuffle"
+ CATEGORY = "KJNodes/image"
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}),
+ },
+ }
+
+ def shuffle(self, images, seed):
+ torch.manual_seed(seed)
+ B, H, W, C = images.shape
+ indices = torch.randperm(B)
+ shuffled_images = images[indices]
+
+ return shuffled_images,
+
+class GetImageRangeFromBatch:
+
+ RETURN_TYPES = ("IMAGE", "MASK", )
+ FUNCTION = "imagesfrombatch"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Randomizes image order within a batch.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "start_index": ("INT", {"default": 0,"min": -1, "max": 4096, "step": 1}),
+ "num_frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}),
+ },
+ "optional": {
+ "images": ("IMAGE",),
+ "masks": ("MASK",),
+ }
+ }
+
+ def imagesfrombatch(self, start_index, num_frames, images=None, masks=None):
+
+ chosen_images = None
+ chosen_masks = None
+
+ # Process images if provided
+ if images is not None:
+ if start_index == -1:
+ start_index = len(images) - num_frames
+ if start_index < 0 or start_index >= len(images):
+ raise ValueError("Start index is out of range")
+ end_index = start_index + num_frames
+ if end_index > len(images):
+ raise ValueError("End index is out of range")
+ chosen_images = images[start_index:end_index]
+
+ # Process masks if provided
+ if masks is not None:
+ if start_index == -1:
+ start_index = len(masks) - num_frames
+ if start_index < 0 or start_index >= len(masks):
+ raise ValueError("Start index is out of range for masks")
+ end_index = start_index + num_frames
+ if end_index > len(masks):
+ raise ValueError("End index is out of range for masks")
+ chosen_masks = masks[start_index:end_index]
+
+ return (chosen_images, chosen_masks,)
+
+class GetImagesFromBatchIndexed:
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "indexedimagesfrombatch"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Selects and returns the images at the specified indices as an image batch.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}),
+ },
+ }
+
+ def indexedimagesfrombatch(self, images, indexes):
+
+ # Parse the indexes string into a list of integers
+ index_list = [int(index.strip()) for index in indexes.split(',')]
+
+ # Convert list of indices to a PyTorch tensor
+ indices_tensor = torch.tensor(index_list, dtype=torch.long)
+
+ # Select the images at the specified indices
+ chosen_images = images[indices_tensor]
+
+ return (chosen_images,)
+
+class InsertImagesToBatchIndexed:
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "insertimagesfrombatch"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Inserts images at the specified indices into the original image batch.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "original_images": ("IMAGE",),
+ "images_to_insert": ("IMAGE",),
+ "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}),
+ },
+ }
+
+ def insertimagesfrombatch(self, original_images, images_to_insert, indexes):
+
+ # Parse the indexes string into a list of integers
+ index_list = [int(index.strip()) for index in indexes.split(',')]
+
+ # Convert list of indices to a PyTorch tensor
+ indices_tensor = torch.tensor(index_list, dtype=torch.long)
+
+ # Ensure the images_to_insert is a tensor
+ if not isinstance(images_to_insert, torch.Tensor):
+ images_to_insert = torch.tensor(images_to_insert)
+
+ # Insert the images at the specified indices
+ for index, image in zip(indices_tensor, images_to_insert):
+ original_images[index] = image
+
+ return (original_images,)
+
+class ReplaceImagesInBatch:
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "replace"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Replaces the images in a batch, starting from the specified start index,
+with the replacement images.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "original_images": ("IMAGE",),
+ "replacement_images": ("IMAGE",),
+ "start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}),
+ },
+ }
+
+ def replace(self, original_images, replacement_images, start_index):
+ images = None
+ if start_index >= len(original_images):
+ raise ValueError("GetImageRangeFromBatch: Start index is out of range")
+ end_index = start_index + len(replacement_images)
+ if end_index > len(original_images):
+ raise ValueError("GetImageRangeFromBatch: End index is out of range")
+ # Create a copy of the original_images tensor
+ original_images_copy = original_images.clone()
+ original_images_copy[start_index:end_index] = replacement_images
+ images = original_images_copy
+ return (images, )
+
+
+class ReverseImageBatch:
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "reverseimagebatch"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Reverses the order of the images in a batch.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ },
+ }
+
+ def reverseimagebatch(self, images):
+ reversed_images = torch.flip(images, [0])
+ return (reversed_images, )
+
+class ImageBatchMulti:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}),
+ "image_1": ("IMAGE", ),
+ "image_2": ("IMAGE", ),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ RETURN_NAMES = ("images",)
+ FUNCTION = "combine"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Creates an image batch from multiple images.
+You can set how many inputs the node has,
+with the **inputcount** and clicking update.
+"""
+
+ def combine(self, inputcount, **kwargs):
+ from nodes import ImageBatch
+ image_batch_node = ImageBatch()
+ image = kwargs["image_1"]
+ for c in range(1, inputcount):
+ new_image = kwargs[f"image_{c + 1}"]
+ image, = image_batch_node.batch(image, new_image)
+ return (image,)
+
+class ImageAddMulti:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}),
+ "image_1": ("IMAGE", ),
+ "image_2": ("IMAGE", ),
+ "blending": (
+ [ 'add',
+ 'subtract',
+ 'multiply',
+ 'difference',
+ ],
+ {
+ "default": 'add'
+ }),
+ "blend_amount": ("FLOAT", {"default": 0.5, "min": 0, "max": 1, "step": 0.01}),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ RETURN_NAMES = ("images",)
+ FUNCTION = "add"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Add blends multiple images together.
+You can set how many inputs the node has,
+with the **inputcount** and clicking update.
+"""
+
+ def add(self, inputcount, blending, blend_amount, **kwargs):
+ image = kwargs["image_1"]
+ for c in range(1, inputcount):
+ new_image = kwargs[f"image_{c + 1}"]
+ if blending == "add":
+ image = torch.add(image * blend_amount, new_image * blend_amount)
+ elif blending == "subtract":
+ image = torch.sub(image * blend_amount, new_image * blend_amount)
+ elif blending == "multiply":
+ image = torch.mul(image * blend_amount, new_image * blend_amount)
+ elif blending == "difference":
+ image = torch.sub(image, new_image)
+ return (image,)
+
+class ImageConcatMulti:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}),
+ "image_1": ("IMAGE", ),
+ "image_2": ("IMAGE", ),
+ "direction": (
+ [ 'right',
+ 'down',
+ 'left',
+ 'up',
+ ],
+ {
+ "default": 'right'
+ }),
+ "match_image_size": ("BOOLEAN", {"default": False}),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ RETURN_NAMES = ("images",)
+ FUNCTION = "combine"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Creates an image from multiple images.
+You can set how many inputs the node has,
+with the **inputcount** and clicking update.
+"""
+
+ def combine(self, inputcount, direction, match_image_size, **kwargs):
+ image = kwargs["image_1"]
+ first_image_shape = None
+ if first_image_shape is None:
+ first_image_shape = image.shape
+ for c in range(1, inputcount):
+ new_image = kwargs[f"image_{c + 1}"]
+ image, = ImageConcanate.concanate(self, image, new_image, direction, match_image_size, first_image_shape=first_image_shape)
+ first_image_shape = None
+ return (image,)
+
+class PreviewAnimation:
+ def __init__(self):
+ self.output_dir = folder_paths.get_temp_directory()
+ self.type = "temp"
+ self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
+ self.compress_level = 1
+
+ methods = {"default": 4, "fastest": 0, "slowest": 6}
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {
+ "fps": ("FLOAT", {"default": 8.0, "min": 0.01, "max": 1000.0, "step": 0.01}),
+ },
+ "optional": {
+ "images": ("IMAGE", ),
+ "masks": ("MASK", ),
+ },
+ }
+
+ RETURN_TYPES = ()
+ FUNCTION = "preview"
+ OUTPUT_NODE = True
+ CATEGORY = "KJNodes/image"
+
+ def preview(self, fps, images=None, masks=None):
+ filename_prefix = "AnimPreview"
+ full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
+ results = list()
+
+ pil_images = []
+
+ if images is not None and masks is not None:
+ for image in images:
+ i = 255. * image.cpu().numpy()
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
+ pil_images.append(img)
+ for mask in masks:
+ if pil_images:
+ mask_np = mask.cpu().numpy()
+ mask_np = np.clip(mask_np * 255, 0, 255).astype(np.uint8) # Convert to values between 0 and 255
+ mask_img = Image.fromarray(mask_np, mode='L')
+ img = pil_images.pop(0) # Remove and get the first image
+ img = img.convert("RGBA") # Convert base image to RGBA
+
+ # Create a new RGBA image based on the grayscale mask
+ rgba_mask_img = Image.new("RGBA", img.size, (255, 255, 255, 255))
+ rgba_mask_img.putalpha(mask_img) # Use the mask image as the alpha channel
+
+ # Composite the RGBA mask onto the base image
+ composited_img = Image.alpha_composite(img, rgba_mask_img)
+ pil_images.append(composited_img) # Add the composited image back
+
+ elif images is not None and masks is None:
+ for image in images:
+ i = 255. * image.cpu().numpy()
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
+ pil_images.append(img)
+
+ elif masks is not None and images is None:
+ for mask in masks:
+ mask_np = 255. * mask.cpu().numpy()
+ mask_img = Image.fromarray(np.clip(mask_np, 0, 255).astype(np.uint8))
+ pil_images.append(mask_img)
+ else:
+ print("PreviewAnimation: No images or masks provided")
+ return { "ui": { "images": results, "animated": (None,), "text": "empty" }}
+
+ num_frames = len(pil_images)
+
+ c = len(pil_images)
+ for i in range(0, c, num_frames):
+ file = f"{filename}_{counter:05}_.webp"
+ pil_images[i].save(os.path.join(full_output_folder, file), save_all=True, duration=int(1000.0/fps), append_images=pil_images[i + 1:i + num_frames], lossless=False, quality=80, method=4)
+ results.append({
+ "filename": file,
+ "subfolder": subfolder,
+ "type": self.type
+ })
+ counter += 1
+
+ animated = num_frames != 1
+ return { "ui": { "images": results, "animated": (animated,), "text": [f"{num_frames}x{pil_images[0].size[0]}x{pil_images[0].size[1]}"] } }
+
+class ImageResizeKJ:
+ upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "image": ("IMAGE",),
+ "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }),
+ "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }),
+ "upscale_method": (s.upscale_methods,),
+ "keep_proportion": ("BOOLEAN", { "default": False }),
+ "divisible_by": ("INT", { "default": 2, "min": 0, "max": 512, "step": 1, }),
+ },
+ "optional" : {
+ "width_input": ("INT", { "forceInput": True}),
+ "height_input": ("INT", { "forceInput": True}),
+ "get_image_size": ("IMAGE",),
+ "crop": (["disabled","center"],),
+ }
+ }
+
+ RETURN_TYPES = ("IMAGE", "INT", "INT",)
+ RETURN_NAMES = ("IMAGE", "width", "height",)
+ FUNCTION = "resize"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = """
+Resizes the image to the specified width and height.
+Size can be retrieved from the inputs, and the final scale
+is determined in this order of importance:
+- get_image_size
+- width_input and height_input
+- width and height widgets
+
+Keep proportions keeps the aspect ratio of the image, by
+highest dimension.
+"""
+
+ def resize(self, image, width, height, keep_proportion, upscale_method, divisible_by,
+ width_input=None, height_input=None, get_image_size=None, crop="disabled"):
+ B, H, W, C = image.shape
+
+ if width_input:
+ width = width_input
+ if height_input:
+ height = height_input
+ if get_image_size is not None:
+ _, height, width, _ = get_image_size.shape
+
+ if keep_proportion and get_image_size is None:
+ # If one of the dimensions is zero, calculate it to maintain the aspect ratio
+ if width == 0 and height != 0:
+ ratio = height / H
+ width = round(W * ratio)
+ elif height == 0 and width != 0:
+ ratio = width / W
+ height = round(H * ratio)
+ elif width != 0 and height != 0:
+ # Scale based on which dimension is smaller in proportion to the desired dimensions
+ ratio = min(width / W, height / H)
+ width = round(W * ratio)
+ height = round(H * ratio)
+ else:
+ if width == 0:
+ width = W
+ if height == 0:
+ height = H
+
+ if divisible_by > 1 and get_image_size is None:
+ width = width - (width % divisible_by)
+ height = height - (height % divisible_by)
+
+ image = image.movedim(-1,1)
+ image = common_upscale(image, width, height, upscale_method, crop)
+ image = image.movedim(1,-1)
+
+ return(image, image.shape[2], image.shape[1],)
+import pathlib
+class LoadAndResizeImage:
+ _color_channels = ["alpha", "red", "green", "blue"]
+ @classmethod
+ def INPUT_TYPES(s):
+ input_dir = folder_paths.get_input_directory()
+ files = [f.name for f in pathlib.Path(input_dir).iterdir() if f.is_file()]
+ return {"required":
+ {
+ "image": (sorted(files), {"image_upload": True}),
+ "resize": ("BOOLEAN", { "default": False }),
+ "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }),
+ "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }),
+ "repeat": ("INT", { "default": 1, "min": 1, "max": 4096, "step": 1, }),
+ "keep_proportion": ("BOOLEAN", { "default": False }),
+ "divisible_by": ("INT", { "default": 2, "min": 0, "max": 512, "step": 1, }),
+ "mask_channel": (s._color_channels, {"tooltip": "Channel to use for the mask output"}),
+ "background_color": ("STRING", { "default": "", "tooltip": "Fills the alpha channel with the specified color."}),
+ },
+ }
+
+ CATEGORY = "KJNodes/image"
+ RETURN_TYPES = ("IMAGE", "MASK", "INT", "INT", "STRING",)
+ RETURN_NAMES = ("image", "mask", "width", "height","image_path",)
+ FUNCTION = "load_image"
+
+ def load_image(self, image, resize, width, height, repeat, keep_proportion, divisible_by, mask_channel, background_color):
+ from PIL import ImageColor, Image, ImageOps, ImageSequence
+ import numpy as np
+ import torch
+ image_path = folder_paths.get_annotated_filepath(image)
+
+ import node_helpers
+ img = node_helpers.pillow(Image.open, image_path)
+
+ # Process the background_color
+ if background_color:
+ try:
+ # Try to parse as RGB tuple
+ bg_color_rgba = tuple(int(x.strip()) for x in background_color.split(','))
+ except ValueError:
+ # If parsing fails, it might be a hex color or named color
+ if background_color.startswith('#') or background_color.lower() in ImageColor.colormap:
+ bg_color_rgba = ImageColor.getrgb(background_color)
+ else:
+ raise ValueError(f"Invalid background color: {background_color}")
+
+ bg_color_rgba += (255,) # Add alpha channel
+ else:
+ bg_color_rgba = None # No background color specified
+
+ output_images = []
+ output_masks = []
+ w, h = None, None
+
+ excluded_formats = ['MPO']
+
+ W, H = img.size
+ if resize:
+ if keep_proportion:
+ ratio = min(width / W, height / H)
+ width = round(W * ratio)
+ height = round(H * ratio)
+ else:
+ if width == 0:
+ width = W
+ if height == 0:
+ height = H
+
+ if divisible_by > 1:
+ width = width - (width % divisible_by)
+ height = height - (height % divisible_by)
+ else:
+ width, height = W, H
+
+ for frame in ImageSequence.Iterator(img):
+ frame = node_helpers.pillow(ImageOps.exif_transpose, frame)
+
+ if frame.mode == 'I':
+ frame = frame.point(lambda i: i * (1 / 255))
+
+ if frame.mode == 'P':
+ frame = frame.convert("RGBA")
+ elif 'A' in frame.getbands():
+ frame = frame.convert("RGBA")
+
+ # Extract alpha channel if it exists
+ if 'A' in frame.getbands() and bg_color_rgba:
+ alpha_mask = np.array(frame.getchannel('A')).astype(np.float32) / 255.0
+ alpha_mask = 1. - torch.from_numpy(alpha_mask)
+ bg_image = Image.new("RGBA", frame.size, bg_color_rgba)
+ # Composite the frame onto the background
+ frame = Image.alpha_composite(bg_image, frame)
+ else:
+ alpha_mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
+
+ image = frame.convert("RGB")
+
+ if len(output_images) == 0:
+ w = image.size[0]
+ h = image.size[1]
+
+ if image.size[0] != w or image.size[1] != h:
+ continue
+ if resize:
+ image = image.resize((width, height), Image.Resampling.BILINEAR)
+
+ image = np.array(image).astype(np.float32) / 255.0
+ image = torch.from_numpy(image)[None,]
+
+ c = mask_channel[0].upper()
+ if c in frame.getbands():
+ if resize:
+ frame = frame.resize((width, height), Image.Resampling.BILINEAR)
+ mask = np.array(frame.getchannel(c)).astype(np.float32) / 255.0
+ mask = torch.from_numpy(mask)
+ if c == 'A' and bg_color_rgba:
+ mask = alpha_mask
+ elif c == 'A':
+ mask = 1. - mask
+ else:
+ mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
+
+ output_images.append(image)
+ output_masks.append(mask.unsqueeze(0))
+
+ if len(output_images) > 1 and img.format not in excluded_formats:
+ output_image = torch.cat(output_images, dim=0)
+ output_mask = torch.cat(output_masks, dim=0)
+ else:
+ output_image = output_images[0]
+ output_mask = output_masks[0]
+ if repeat > 1:
+ output_image = output_image.repeat(repeat, 1, 1, 1)
+ output_mask = output_mask.repeat(repeat, 1, 1)
+
+ return (output_image, output_mask, width, height, image_path)
+
+
+ # @classmethod
+ # def IS_CHANGED(s, image, **kwargs):
+ # image_path = folder_paths.get_annotated_filepath(image)
+ # m = hashlib.sha256()
+ # with open(image_path, 'rb') as f:
+ # m.update(f.read())
+ # return m.digest().hex()
+
+ @classmethod
+ def VALIDATE_INPUTS(s, image):
+ if not folder_paths.exists_annotated_filepath(image):
+ return "Invalid image file: {}".format(image)
+
+ return True
+
+class LoadImagesFromFolderKJ:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "folder": ("STRING", {"default": ""}),
+ },
+ "optional": {
+ "image_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}),
+ "start_index": ("INT", {"default": 0, "min": 0, "step": 1}),
+ }
+ }
+
+ RETURN_TYPES = ("IMAGE", "MASK", "INT", "STRING",)
+ RETURN_NAMES = ("image", "mask", "count", "image_path",)
+ FUNCTION = "load_images"
+
+ CATEGORY = "image"
+
+ def load_images(self, folder, image_load_cap, start_index):
+ if not os.path.isdir(folder):
+ raise FileNotFoundError(f"Folder '{folder} cannot be found.'")
+ dir_files = os.listdir(folder)
+ if len(dir_files) == 0:
+ raise FileNotFoundError(f"No files in directory '{folder}'.")
+
+ # Filter files by extension
+ valid_extensions = ['.jpg', '.jpeg', '.png', '.webp']
+ dir_files = [f for f in dir_files if any(f.lower().endswith(ext) for ext in valid_extensions)]
+
+ dir_files = sorted(dir_files)
+ dir_files = [os.path.join(folder, x) for x in dir_files]
+
+ # start at start_index
+ dir_files = dir_files[start_index:]
+
+ images = []
+ masks = []
+ image_path_list = []
+
+ limit_images = False
+ if image_load_cap > 0:
+ limit_images = True
+ image_count = 0
+
+ has_non_empty_mask = False
+
+ for image_path in dir_files:
+ if os.path.isdir(image_path) and os.path.ex:
+ continue
+ if limit_images and image_count >= image_load_cap:
+ break
+ i = Image.open(image_path)
+ i = ImageOps.exif_transpose(i)
+ image = i.convert("RGB")
+ image = np.array(image).astype(np.float32) / 255.0
+ image = torch.from_numpy(image)[None,]
+ if 'A' in i.getbands():
+ mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
+ mask = 1. - torch.from_numpy(mask)
+ has_non_empty_mask = True
+ else:
+ mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
+ images.append(image)
+ masks.append(mask)
+ image_path_list.append(image_path)
+ image_count += 1
+
+ if len(images) == 1:
+ return (images[0], masks[0], 1)
+
+ elif len(images) > 1:
+ image1 = images[0]
+ mask1 = None
+
+ for image2 in images[1:]:
+ if image1.shape[1:] != image2.shape[1:]:
+ image2 = common_upscale(image2.movedim(-1, 1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1, -1)
+ image1 = torch.cat((image1, image2), dim=0)
+
+ for mask2 in masks[1:]:
+ if has_non_empty_mask:
+ if image1.shape[1:3] != mask2.shape:
+ mask2 = torch.nn.functional.interpolate(mask2.unsqueeze(0).unsqueeze(0), size=(image1.shape[2], image1.shape[1]), mode='bilinear', align_corners=False)
+ mask2 = mask2.squeeze(0)
+ else:
+ mask2 = mask2.unsqueeze(0)
+ else:
+ mask2 = mask2.unsqueeze(0)
+
+ if mask1 is None:
+ mask1 = mask2
+ else:
+ mask1 = torch.cat((mask1, mask2), dim=0)
+
+ return (image1, mask1, len(images), image_path_list)
+
+class ImageGridtoBatch:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "image": ("IMAGE", ),
+ "columns": ("INT", {"default": 3, "min": 1, "max": 8, "tooltip": "The number of columns in the grid."}),
+ "rows": ("INT", {"default": 0, "min": 1, "max": 8, "tooltip": "The number of rows in the grid. Set to 0 for automatic calculation."}),
+ }
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "decompose"
+ CATEGORY = "KJNodes/image"
+ DESCRIPTION = "Converts a grid of images to a batch of images."
+
+ def decompose(self, image, columns, rows):
+ B, H, W, C = image.shape
+ print("input size: ", image.shape)
+
+ # Calculate cell width, rounding down
+ cell_width = W // columns
+
+ if rows == 0:
+ # If rows is 0, calculate number of full rows
+ rows = H // cell_height
+ else:
+ # If rows is specified, adjust cell_height
+ cell_height = H // rows
+
+ # Crop the image to fit full cells
+ image = image[:, :rows*cell_height, :columns*cell_width, :]
+
+ # Reshape and permute the image to get the grid
+ image = image.view(B, rows, cell_height, columns, cell_width, C)
+ image = image.permute(0, 1, 3, 2, 4, 5).contiguous()
+ image = image.view(B, rows * columns, cell_height, cell_width, C)
+
+ # Reshape to the final batch tensor
+ img_tensor = image.view(-1, cell_height, cell_width, C)
+
+ return (img_tensor,)
+
+class SaveImageKJ:
+ def __init__(self):
+ self.output_dir = folder_paths.get_output_directory()
+ self.type = "output"
+ self.prefix_append = ""
+ self.compress_level = 4
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images": ("IMAGE", {"tooltip": "The images to save."}),
+ "filename_prefix": ("STRING", {"default": "ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}),
+ "output_folder": ("STRING", {"default": "output", "tooltip": "The folder to save the images to."}),
+ },
+ "optional": {
+ "caption_file_extension": ("STRING", {"default": ".txt", "tooltip": "The extension for the caption file."}),
+ "caption": ("STRING", {"forceInput": True, "tooltip": "string to save as .txt file"}),
+ },
+ "hidden": {
+ "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"
+ },
+ }
+
+ RETURN_TYPES = ("STRING",)
+ RETURN_NAMES = ("filename",)
+ FUNCTION = "save_images"
+
+ OUTPUT_NODE = True
+
+ CATEGORY = "image"
+ DESCRIPTION = "Saves the input images to your ComfyUI output directory."
+
+ def save_images(self, images, output_folder, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None, caption=None, caption_file_extension=".txt"):
+ filename_prefix += self.prefix_append
+
+ full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
+ if output_folder != "output":
+ if not os.path.exists(output_folder):
+ os.makedirs(output_folder, exist_ok=True)
+ full_output_folder = output_folder
+ results = list()
+ for (batch_number, image) in enumerate(images):
+ i = 255. * image.cpu().numpy()
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
+ metadata = None
+ if not args.disable_metadata:
+ metadata = PngInfo()
+ if prompt is not None:
+ metadata.add_text("prompt", json.dumps(prompt))
+ if extra_pnginfo is not None:
+ for x in extra_pnginfo:
+ metadata.add_text(x, json.dumps(extra_pnginfo[x]))
+
+ filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
+ base_file_name = f"{filename_with_batch_num}_{counter:05}_"
+ file = f"{base_file_name}.png"
+ img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level)
+ results.append({
+ "filename": file,
+ "subfolder": subfolder,
+ "type": self.type
+ })
+ if caption is not None:
+ txt_file = base_file_name + caption_file_extension
+ file_path = os.path.join(full_output_folder, txt_file)
+ with open(file_path, 'w') as f:
+ f.write(caption)
+
+ counter += 1
+
+
+
+ return { "ui": {
+ "images": results },
+ "result": (file,) }
+
+to_pil_image = T.ToPILImage()
+
+class FastPreview:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "image": ("IMAGE", ),
+ "format": (["JPEG", "PNG", "WEBP"], {"default": "JPEG"}),
+ "quality" : ("INT", {"default": 75, "min": 1, "max": 100, "step": 1}),
+ },
+ }
+
+ RETURN_TYPES = ()
+ FUNCTION = "preview"
+ CATEGORY = "KJNodes/experimental"
+ OUTPUT_NODE = True
+
+ def preview(self, image, format, quality):
+ pil_image = to_pil_image(image[0].permute(2, 0, 1))
+
+ with io.BytesIO() as buffered:
+ pil_image.save(buffered, format=format, quality=quality)
+ img_bytes = buffered.getvalue()
+
+ img_base64 = base64.b64encode(img_bytes).decode('utf-8')
+
+ return {
+ "ui": {"bg_image": [img_base64]},
+ "result": ()
+ }
+
+class ImageCropByMaskAndResize:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "image": ("IMAGE", ),
+ "mask": ("MASK", ),
+ "base_resolution": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }),
+ "padding": ("INT", { "default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }),
+ "min_crop_resolution": ("INT", { "default": 128, "min": 0, "max": MAX_RESOLUTION, "step": 8, }),
+ "max_crop_resolution": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }),
+
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE", "MASK", "BBOX", )
+ RETURN_NAMES = ("images", "masks", "bbox",)
+ FUNCTION = "crop"
+ CATEGORY = "KJNodes/image"
+
+ def crop_by_mask(self, mask, padding=0, min_crop_resolution=None, max_crop_resolution=None):
+ iy, ix = (mask == 1).nonzero(as_tuple=True)
+ h0, w0 = mask.shape
+
+ if iy.numel() == 0:
+ x_c = w0 / 2.0
+ y_c = h0 / 2.0
+ width = 0
+ height = 0
+ else:
+ x_min = ix.min().item()
+ x_max = ix.max().item()
+ y_min = iy.min().item()
+ y_max = iy.max().item()
+
+ width = x_max - x_min
+ height = y_max - y_min
+
+ if width > w0 or height > h0:
+ raise Exception("Masked area out of bounds")
+
+ x_c = (x_min + x_max) / 2.0
+ y_c = (y_min + y_max) / 2.0
+
+ if min_crop_resolution:
+ width = max(width, min_crop_resolution)
+ height = max(height, min_crop_resolution)
+
+ if max_crop_resolution:
+ width = min(width, max_crop_resolution)
+ height = min(height, max_crop_resolution)
+
+ if w0 <= width:
+ x0 = 0
+ w = w0
+ else:
+ x0 = max(0, x_c - width / 2 - padding)
+ w = width + 2 * padding
+ if x0 + w > w0:
+ x0 = w0 - w
+
+ if h0 <= height:
+ y0 = 0
+ h = h0
+ else:
+ y0 = max(0, y_c - height / 2 - padding)
+ h = height + 2 * padding
+ if y0 + h > h0:
+ y0 = h0 - h
+
+ return (int(x0), int(y0), int(w), int(h))
+
+ def crop(self, image, mask, base_resolution, padding=0, min_crop_resolution=128, max_crop_resolution=512):
+ mask = mask.round()
+ image_list = []
+ mask_list = []
+ bbox_list = []
+
+ # First, collect all bounding boxes
+ bbox_params = []
+ aspect_ratios = []
+ for i in range(image.shape[0]):
+ x0, y0, w, h = self.crop_by_mask(mask[i], padding, min_crop_resolution, max_crop_resolution)
+ bbox_params.append((x0, y0, w, h))
+ aspect_ratios.append(w / h)
+
+ # Find maximum width and height
+ max_w = max([w for x0, y0, w, h in bbox_params])
+ max_h = max([h for x0, y0, w, h in bbox_params])
+ max_aspect_ratio = max(aspect_ratios)
+
+ # Ensure dimensions are divisible by 16
+ max_w = (max_w + 15) // 16 * 16
+ max_h = (max_h + 15) // 16 * 16
+ # Calculate common target dimensions
+ if max_aspect_ratio > 1:
+ target_width = base_resolution
+ target_height = int(base_resolution / max_aspect_ratio)
+ else:
+ target_height = base_resolution
+ target_width = int(base_resolution * max_aspect_ratio)
+
+ for i in range(image.shape[0]):
+ x0, y0, w, h = bbox_params[i]
+
+ # Adjust cropping to use maximum width and height
+ x_center = x0 + w / 2
+ y_center = y0 + h / 2
+
+ x0_new = int(max(0, x_center - max_w / 2))
+ y0_new = int(max(0, y_center - max_h / 2))
+ x1_new = int(min(x0_new + max_w, image.shape[2]))
+ y1_new = int(min(y0_new + max_h, image.shape[1]))
+ x0_new = x1_new - max_w
+ y0_new = y1_new - max_h
+
+ cropped_image = image[i][y0_new:y1_new, x0_new:x1_new, :]
+ cropped_mask = mask[i][y0_new:y1_new, x0_new:x1_new]
+
+ # Ensure dimensions are divisible by 16
+ target_width = (target_width + 15) // 16 * 16
+ target_height = (target_height + 15) // 16 * 16
+
+ cropped_image = cropped_image.unsqueeze(0).movedim(-1, 1) # Move C to the second position (B, C, H, W)
+ cropped_image = common_upscale(cropped_image, target_width, target_height, "lanczos", "disabled")
+ cropped_image = cropped_image.movedim(1, -1).squeeze(0)
+
+ cropped_mask = cropped_mask.unsqueeze(0).unsqueeze(0)
+ cropped_mask = common_upscale(cropped_mask, target_width, target_height, 'bilinear', "disabled")
+ cropped_mask = cropped_mask.squeeze(0).squeeze(0)
+
+ image_list.append(cropped_image)
+ mask_list.append(cropped_mask)
+ bbox_list.append((x0_new, y0_new, x1_new, y1_new))
+
+
+ return (torch.stack(image_list), torch.stack(mask_list), bbox_list)
+
+class ImageUncropByMask:
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {
+ "destination": ("IMAGE",),
+ "source": ("IMAGE",),
+ "mask": ("MASK",),
+ "bbox": ("BBOX",),
+ },
+ }
+
+ CATEGORY = "KJNodes/image"
+ RETURN_TYPES = ("IMAGE",)
+ RETURN_NAMES = ("image",)
+ FUNCTION = "uncrop"
+
+ def uncrop(self, destination, source, mask, bbox=None):
+
+ output_list = []
+
+ B, H, W, C = destination.shape
+
+ for i in range(source.shape[0]):
+ x0, y0, x1, y1 = bbox[i]
+ bbox_height = y1 - y0
+ bbox_width = x1 - x0
+
+ # Resize source image to match the bounding box dimensions
+ #resized_source = F.interpolate(source[i].unsqueeze(0).movedim(-1, 1), size=(bbox_height, bbox_width), mode='bilinear', align_corners=False)
+ resized_source = common_upscale(source[i].unsqueeze(0).movedim(-1, 1), bbox_width, bbox_height, "lanczos", "disabled")
+ resized_source = resized_source.movedim(1, -1).squeeze(0)
+
+ # Resize mask to match the bounding box dimensions
+ resized_mask = common_upscale(mask[i].unsqueeze(0).unsqueeze(0), bbox_width, bbox_height, "bilinear", "disabled")
+ resized_mask = resized_mask.squeeze(0).squeeze(0)
+
+ # Calculate padding values
+ pad_left = x0
+ pad_right = W - x1
+ pad_top = y0
+ pad_bottom = H - y1
+
+ # Pad the resized source image and mask to fit the destination dimensions
+ padded_source = F.pad(resized_source, pad=(0, 0, pad_left, pad_right, pad_top, pad_bottom), mode='constant', value=0)
+ padded_mask = F.pad(resized_mask, pad=(pad_left, pad_right, pad_top, pad_bottom), mode='constant', value=0)
+
+ # Ensure the padded mask has the correct shape
+ padded_mask = padded_mask.unsqueeze(2).expand(-1, -1, destination[i].shape[2])
+ # Ensure the padded source has the correct shape
+ padded_source = padded_source.unsqueeze(2).expand(-1, -1, -1, destination[i].shape[2]).squeeze(2)
+
+ # Combine the destination and padded source images using the mask
+ result = destination[i] * (1.0 - padded_mask) + padded_source * padded_mask
+
+ output_list.append(result)
+
+
+ return (torch.stack(output_list),)
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/nodes/intrinsic_lora_nodes.py b/ComfyUI-KJNodes/nodes/intrinsic_lora_nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..613ce2609edacdd55726e1c0d83f9ca027677844
--- /dev/null
+++ b/ComfyUI-KJNodes/nodes/intrinsic_lora_nodes.py
@@ -0,0 +1,115 @@
+import folder_paths
+import os
+import torch
+import torch.nn.functional as F
+from comfy.utils import ProgressBar, load_torch_file
+import comfy.sample
+from nodes import CLIPTextEncode
+
+script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+folder_paths.add_model_folder_path("intrinsic_loras", os.path.join(script_directory, "intrinsic_loras"))
+
+class Intrinsic_lora_sampling:
+ def __init__(self):
+ self.loaded_lora = None
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "model": ("MODEL",),
+ "lora_name": (folder_paths.get_filename_list("intrinsic_loras"), ),
+ "task": (
+ [
+ 'depth map',
+ 'surface normals',
+ 'albedo',
+ 'shading',
+ ],
+ {
+ "default": 'depth map'
+ }),
+ "text": ("STRING", {"multiline": True, "default": ""}),
+ "clip": ("CLIP", ),
+ "vae": ("VAE", ),
+ "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}),
+ },
+ "optional": {
+ "image": ("IMAGE",),
+ "optional_latent": ("LATENT",),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE", "LATENT",)
+ FUNCTION = "onestepsample"
+ CATEGORY = "KJNodes"
+ DESCRIPTION = """
+Sampler to use the intrinsic loras:
+https://github.com/duxiaodan/intrinsic-lora
+These LoRAs are tiny and thus included
+with this node pack.
+"""
+
+ def onestepsample(self, model, lora_name, clip, vae, text, task, per_batch, image=None, optional_latent=None):
+ pbar = ProgressBar(3)
+
+ if optional_latent is None:
+ image_list = []
+ for start_idx in range(0, image.shape[0], per_batch):
+ sub_pixels = vae.vae_encode_crop_pixels(image[start_idx:start_idx+per_batch])
+ image_list.append(vae.encode(sub_pixels[:,:,:,:3]))
+ sample = torch.cat(image_list, dim=0)
+ else:
+ sample = optional_latent["samples"]
+ noise = torch.zeros(sample.size(), dtype=sample.dtype, layout=sample.layout, device="cpu")
+ prompt = task + "," + text
+ positive, = CLIPTextEncode.encode(self, clip, prompt)
+ negative = positive #negative shouldn't do anything in this scenario
+
+ pbar.update(1)
+
+ #custom model sampling to pass latent through as it is
+ class X0_PassThrough(comfy.model_sampling.EPS):
+ def calculate_denoised(self, sigma, model_output, model_input):
+ return model_output
+ def calculate_input(self, sigma, noise):
+ return noise
+ sampling_base = comfy.model_sampling.ModelSamplingDiscrete
+ sampling_type = X0_PassThrough
+
+ class ModelSamplingAdvanced(sampling_base, sampling_type):
+ pass
+ model_sampling = ModelSamplingAdvanced(model.model.model_config)
+
+ #load lora
+ model_clone = model.clone()
+ lora_path = folder_paths.get_full_path("intrinsic_loras", lora_name)
+ lora = load_torch_file(lora_path, safe_load=True)
+ self.loaded_lora = (lora_path, lora)
+
+ model_clone_with_lora = comfy.sd.load_lora_for_models(model_clone, None, lora, 1.0, 0)[0]
+
+ model_clone_with_lora.add_object_patch("model_sampling", model_sampling)
+
+ samples = {"samples": comfy.sample.sample(model_clone_with_lora, noise, 1, 1.0, "euler", "simple", positive, negative, sample,
+ denoise=1.0, disable_noise=True, start_step=0, last_step=1,
+ force_full_denoise=True, noise_mask=None, callback=None, disable_pbar=True, seed=None)}
+ pbar.update(1)
+
+ decoded = []
+ for start_idx in range(0, samples["samples"].shape[0], per_batch):
+ decoded.append(vae.decode(samples["samples"][start_idx:start_idx+per_batch]))
+ image_out = torch.cat(decoded, dim=0)
+
+ pbar.update(1)
+
+ if task == 'depth map':
+ imax = image_out.max()
+ imin = image_out.min()
+ image_out = (image_out-imin)/(imax-imin)
+ image_out = torch.max(image_out, dim=3, keepdim=True)[0].repeat(1, 1, 1, 3)
+ elif task == 'surface normals':
+ image_out = F.normalize(image_out * 2 - 1, dim=3) / 2 + 0.5
+ image_out = 1.0 - image_out
+ else:
+ image_out = image_out.clamp(-1.,1.)
+
+ return (image_out, samples,)
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/nodes/mask_nodes.py b/ComfyUI-KJNodes/nodes/mask_nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a132c7ee0a44ccc0e3ad35a6882ebc21998683a
--- /dev/null
+++ b/ComfyUI-KJNodes/nodes/mask_nodes.py
@@ -0,0 +1,1253 @@
+import torch
+import torch.nn.functional as F
+from torchvision.transforms import functional as TF
+from PIL import Image, ImageDraw, ImageFilter, ImageFont
+import scipy.ndimage
+import numpy as np
+from contextlib import nullcontext
+import os
+
+import model_management
+from comfy.utils import ProgressBar
+from comfy.utils import common_upscale
+from nodes import MAX_RESOLUTION
+
+import folder_paths
+
+from ..utility.utility import tensor2pil, pil2tensor
+
+script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+class BatchCLIPSeg:
+
+ def __init__(self):
+ pass
+
+ @classmethod
+ def INPUT_TYPES(s):
+
+ return {"required":
+ {
+ "images": ("IMAGE",),
+ "text": ("STRING", {"multiline": False}),
+ "threshold": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 10.0, "step": 0.001}),
+ "binary_mask": ("BOOLEAN", {"default": True}),
+ "combine_mask": ("BOOLEAN", {"default": False}),
+ "use_cuda": ("BOOLEAN", {"default": True}),
+ },
+ "optional":
+ {
+ "blur_sigma": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}),
+ "opt_model": ("CLIPSEGMODEL", ),
+ "prev_mask": ("MASK", {"default": None}),
+ "image_bg_level": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
+ "invert": ("BOOLEAN", {"default": False}),
+ }
+ }
+
+ CATEGORY = "KJNodes/masking"
+ RETURN_TYPES = ("MASK", "IMAGE", )
+ RETURN_NAMES = ("Mask", "Image", )
+ FUNCTION = "segment_image"
+ DESCRIPTION = """
+Segments an image or batch of images using CLIPSeg.
+"""
+
+ def segment_image(self, images, text, threshold, binary_mask, combine_mask, use_cuda, blur_sigma=0.0, opt_model=None, prev_mask=None, invert= False, image_bg_level=0.5):
+ from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
+ import torchvision.transforms as transforms
+ offload_device = model_management.unet_offload_device()
+ device = model_management.get_torch_device()
+ if not use_cuda:
+ device = torch.device("cpu")
+ dtype = model_management.unet_dtype()
+
+ if opt_model is None:
+ checkpoint_path = os.path.join(folder_paths.models_dir,'clip_seg', 'clipseg-rd64-refined-fp16')
+ if not hasattr(self, "model"):
+ try:
+ if not os.path.exists(checkpoint_path):
+ from huggingface_hub import snapshot_download
+ snapshot_download(repo_id="Kijai/clipseg-rd64-refined-fp16", local_dir=checkpoint_path, local_dir_use_symlinks=False)
+ self.model = CLIPSegForImageSegmentation.from_pretrained(checkpoint_path)
+ except:
+ checkpoint_path = "CIDAS/clipseg-rd64-refined"
+ self.model = CLIPSegForImageSegmentation.from_pretrained(checkpoint_path)
+ processor = CLIPSegProcessor.from_pretrained(checkpoint_path)
+
+ else:
+ self.model = opt_model['model']
+ processor = opt_model['processor']
+
+ self.model.to(dtype).to(device)
+
+ B, H, W, C = images.shape
+ images = images.to(device)
+
+ autocast_condition = (dtype != torch.float32) and not model_management.is_device_mps(device)
+ with torch.autocast(model_management.get_autocast_device(device), dtype=dtype) if autocast_condition else nullcontext():
+
+ PIL_images = [Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) for image in images ]
+ prompt = [text] * len(images)
+ input_prc = processor(text=prompt, images=PIL_images, return_tensors="pt")
+
+ for key in input_prc:
+ input_prc[key] = input_prc[key].to(device)
+ outputs = self.model(**input_prc)
+
+ mask_tensor = torch.sigmoid(outputs.logits)
+ mask_tensor = (mask_tensor - mask_tensor.min()) / (mask_tensor.max() - mask_tensor.min())
+ mask_tensor = torch.where(mask_tensor > (threshold), mask_tensor, torch.tensor(0, dtype=torch.float))
+ print(mask_tensor.shape)
+ if len(mask_tensor.shape) == 2:
+ mask_tensor = mask_tensor.unsqueeze(0)
+ mask_tensor = F.interpolate(mask_tensor.unsqueeze(1), size=(H, W), mode='nearest')
+ mask_tensor = mask_tensor.squeeze(1)
+
+ self.model.to(offload_device)
+
+ if binary_mask:
+ mask_tensor = (mask_tensor > 0).float()
+ if blur_sigma > 0:
+ kernel_size = int(6 * int(blur_sigma) + 1)
+ blur = transforms.GaussianBlur(kernel_size=(kernel_size, kernel_size), sigma=(blur_sigma, blur_sigma))
+ mask_tensor = blur(mask_tensor)
+
+ if combine_mask:
+ mask_tensor = torch.max(mask_tensor, dim=0)[0]
+ mask_tensor = mask_tensor.unsqueeze(0).repeat(len(images),1,1)
+
+ del outputs
+ model_management.soft_empty_cache()
+
+ if prev_mask is not None:
+ if prev_mask.shape != mask_tensor.shape:
+ prev_mask = F.interpolate(prev_mask.unsqueeze(1), size=(H, W), mode='nearest')
+ mask_tensor = mask_tensor + prev_mask.to(device)
+ torch.clamp(mask_tensor, min=0.0, max=1.0)
+
+ if invert:
+ mask_tensor = 1 - mask_tensor
+
+ image_tensor = images * mask_tensor.unsqueeze(-1) + (1 - mask_tensor.unsqueeze(-1)) * image_bg_level
+ image_tensor = torch.clamp(image_tensor, min=0.0, max=1.0).cpu().float()
+
+ mask_tensor = mask_tensor.cpu().float()
+
+ return mask_tensor, image_tensor,
+
+class DownloadAndLoadCLIPSeg:
+
+ def __init__(self):
+ pass
+
+ @classmethod
+ def INPUT_TYPES(s):
+
+ return {"required":
+ {
+ "model": (
+ [ 'Kijai/clipseg-rd64-refined-fp16',
+ 'CIDAS/clipseg-rd64-refined',
+ ],
+ ),
+ },
+ }
+
+ CATEGORY = "KJNodes/masking"
+ RETURN_TYPES = ("CLIPSEGMODEL",)
+ RETURN_NAMES = ("clipseg_model",)
+ FUNCTION = "segment_image"
+ DESCRIPTION = """
+Downloads and loads CLIPSeg model with huggingface_hub,
+to ComfyUI/models/clip_seg
+"""
+
+ def segment_image(self, model):
+ from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
+ checkpoint_path = os.path.join(folder_paths.models_dir,'clip_seg', os.path.basename(model))
+ if not hasattr(self, "model"):
+ if not os.path.exists(checkpoint_path):
+ from huggingface_hub import snapshot_download
+ snapshot_download(repo_id=model, local_dir=checkpoint_path, local_dir_use_symlinks=False)
+ self.model = CLIPSegForImageSegmentation.from_pretrained(checkpoint_path)
+
+ processor = CLIPSegProcessor.from_pretrained(checkpoint_path)
+
+ clipseg_model = {}
+ clipseg_model['model'] = self.model
+ clipseg_model['processor'] = processor
+
+ return clipseg_model,
+
+class CreateTextMask:
+
+ RETURN_TYPES = ("IMAGE", "MASK",)
+ FUNCTION = "createtextmask"
+ CATEGORY = "KJNodes/text"
+ DESCRIPTION = """
+Creates a text image and mask.
+Looks for fonts from this folder:
+ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts
+
+If start_rotation and/or end_rotation are different values,
+creates animation between them.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "invert": ("BOOLEAN", {"default": False}),
+ "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}),
+ "text_x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}),
+ "text_y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}),
+ "font_size": ("INT", {"default": 32,"min": 8, "max": 4096, "step": 1}),
+ "font_color": ("STRING", {"default": "white"}),
+ "text": ("STRING", {"default": "HELLO!", "multiline": True}),
+ "font": (folder_paths.get_filename_list("kjnodes_fonts"), ),
+ "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "start_rotation": ("INT", {"default": 0,"min": 0, "max": 359, "step": 1}),
+ "end_rotation": ("INT", {"default": 0,"min": -359, "max": 359, "step": 1}),
+ },
+ }
+
+ def createtextmask(self, frames, width, height, invert, text_x, text_y, text, font_size, font_color, font, start_rotation, end_rotation):
+ # Define the number of images in the batch
+ batch_size = frames
+ out = []
+ masks = []
+ rotation = start_rotation
+ if start_rotation != end_rotation:
+ rotation_increment = (end_rotation - start_rotation) / (batch_size - 1)
+
+ font_path = folder_paths.get_full_path("kjnodes_fonts", font)
+ # Generate the text
+ for i in range(batch_size):
+ image = Image.new("RGB", (width, height), "black")
+ draw = ImageDraw.Draw(image)
+ font = ImageFont.truetype(font_path, font_size)
+
+ # Split the text into words
+ words = text.split()
+
+ # Initialize variables for line creation
+ lines = []
+ current_line = []
+ current_line_width = 0
+ try: #new pillow
+ # Iterate through words to create lines
+ for word in words:
+ word_width = font.getbbox(word)[2]
+ if current_line_width + word_width <= width - 2 * text_x:
+ current_line.append(word)
+ current_line_width += word_width + font.getbbox(" ")[2] # Add space width
+ else:
+ lines.append(" ".join(current_line))
+ current_line = [word]
+ current_line_width = word_width
+ except: #old pillow
+ for word in words:
+ word_width = font.getsize(word)[0]
+ if current_line_width + word_width <= width - 2 * text_x:
+ current_line.append(word)
+ current_line_width += word_width + font.getsize(" ")[0] # Add space width
+ else:
+ lines.append(" ".join(current_line))
+ current_line = [word]
+ current_line_width = word_width
+
+ # Add the last line if it's not empty
+ if current_line:
+ lines.append(" ".join(current_line))
+
+ # Draw each line of text separately
+ y_offset = text_y
+ for line in lines:
+ text_width = font.getlength(line)
+ text_height = font_size
+ text_center_x = text_x + text_width / 2
+ text_center_y = y_offset + text_height / 2
+ try:
+ draw.text((text_x, y_offset), line, font=font, fill=font_color, features=['-liga'])
+ except:
+ draw.text((text_x, y_offset), line, font=font, fill=font_color)
+ y_offset += text_height # Move to the next line
+
+ if start_rotation != end_rotation:
+ image = image.rotate(rotation, center=(text_center_x, text_center_y))
+ rotation += rotation_increment
+
+ image = np.array(image).astype(np.float32) / 255.0
+ image = torch.from_numpy(image)[None,]
+ mask = image[:, :, :, 0]
+ masks.append(mask)
+ out.append(image)
+
+ if invert:
+ return (1.0 - torch.cat(out, dim=0), 1.0 - torch.cat(masks, dim=0),)
+ return (torch.cat(out, dim=0),torch.cat(masks, dim=0),)
+
+class ColorToMask:
+
+ RETURN_TYPES = ("MASK",)
+ FUNCTION = "clip"
+ CATEGORY = "KJNodes/masking"
+ DESCRIPTION = """
+Converts chosen RGB value to a mask.
+With batch inputs, the **per_batch**
+controls the number of images processed at once.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ "invert": ("BOOLEAN", {"default": False}),
+ "red": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}),
+ "green": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}),
+ "blue": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}),
+ "threshold": ("INT", {"default": 10,"min": 0, "max": 255, "step": 1}),
+ "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}),
+ },
+ }
+
+ def clip(self, images, red, green, blue, threshold, invert, per_batch):
+
+ color = torch.tensor([red, green, blue], dtype=torch.uint8)
+ black = torch.tensor([0, 0, 0], dtype=torch.uint8)
+ white = torch.tensor([255, 255, 255], dtype=torch.uint8)
+
+ if invert:
+ black, white = white, black
+
+ steps = images.shape[0]
+ pbar = ProgressBar(steps)
+ tensors_out = []
+
+ for start_idx in range(0, images.shape[0], per_batch):
+
+ # Calculate color distances
+ color_distances = torch.norm(images[start_idx:start_idx+per_batch] * 255 - color, dim=-1)
+
+ # Create a mask based on the threshold
+ mask = color_distances <= threshold
+
+ # Apply the mask to create new images
+ mask_out = torch.where(mask.unsqueeze(-1), white, black).float()
+ mask_out = mask_out.mean(dim=-1)
+
+ tensors_out.append(mask_out.cpu())
+ batch_count = mask_out.shape[0]
+ pbar.update(batch_count)
+
+ tensors_out = torch.cat(tensors_out, dim=0)
+ tensors_out = torch.clamp(tensors_out, min=0.0, max=1.0)
+ return tensors_out,
+
+class CreateFluidMask:
+
+ RETURN_TYPES = ("IMAGE", "MASK")
+ FUNCTION = "createfluidmask"
+ CATEGORY = "KJNodes/masking/generate"
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "invert": ("BOOLEAN", {"default": False}),
+ "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}),
+ "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
+ "inflow_count": ("INT", {"default": 3,"min": 0, "max": 255, "step": 1}),
+ "inflow_velocity": ("INT", {"default": 1,"min": 0, "max": 255, "step": 1}),
+ "inflow_radius": ("INT", {"default": 8,"min": 0, "max": 255, "step": 1}),
+ "inflow_padding": ("INT", {"default": 50,"min": 0, "max": 255, "step": 1}),
+ "inflow_duration": ("INT", {"default": 60,"min": 0, "max": 255, "step": 1}),
+ },
+ }
+ #using code from https://github.com/GregTJ/stable-fluids
+ def createfluidmask(self, frames, width, height, invert, inflow_count, inflow_velocity, inflow_radius, inflow_padding, inflow_duration):
+ from ..utility.fluid import Fluid
+ try:
+ from scipy.special import erf
+ except:
+ from scipy.spatial import erf
+ out = []
+ masks = []
+ RESOLUTION = width, height
+ DURATION = frames
+
+ INFLOW_PADDING = inflow_padding
+ INFLOW_DURATION = inflow_duration
+ INFLOW_RADIUS = inflow_radius
+ INFLOW_VELOCITY = inflow_velocity
+ INFLOW_COUNT = inflow_count
+
+ print('Generating fluid solver, this may take some time.')
+ fluid = Fluid(RESOLUTION, 'dye')
+
+ center = np.floor_divide(RESOLUTION, 2)
+ r = np.min(center) - INFLOW_PADDING
+
+ points = np.linspace(-np.pi, np.pi, INFLOW_COUNT, endpoint=False)
+ points = tuple(np.array((np.cos(p), np.sin(p))) for p in points)
+ normals = tuple(-p for p in points)
+ points = tuple(r * p + center for p in points)
+
+ inflow_velocity = np.zeros_like(fluid.velocity)
+ inflow_dye = np.zeros(fluid.shape)
+ for p, n in zip(points, normals):
+ mask = np.linalg.norm(fluid.indices - p[:, None, None], axis=0) <= INFLOW_RADIUS
+ inflow_velocity[:, mask] += n[:, None] * INFLOW_VELOCITY
+ inflow_dye[mask] = 1
+
+
+ for f in range(DURATION):
+ print(f'Computing frame {f + 1} of {DURATION}.')
+ if f <= INFLOW_DURATION:
+ fluid.velocity += inflow_velocity
+ fluid.dye += inflow_dye
+
+ curl = fluid.step()[1]
+ # Using the error function to make the contrast a bit higher.
+ # Any other sigmoid function e.g. smoothstep would work.
+ curl = (erf(curl * 2) + 1) / 4
+
+ color = np.dstack((curl, np.ones(fluid.shape), fluid.dye))
+ color = (np.clip(color, 0, 1) * 255).astype('uint8')
+ image = np.array(color).astype(np.float32) / 255.0
+ image = torch.from_numpy(image)[None,]
+ mask = image[:, :, :, 0]
+ masks.append(mask)
+ out.append(image)
+
+ if invert:
+ return (1.0 - torch.cat(out, dim=0),1.0 - torch.cat(masks, dim=0),)
+ return (torch.cat(out, dim=0),torch.cat(masks, dim=0),)
+
+class CreateAudioMask:
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "createaudiomask"
+ CATEGORY = "KJNodes/deprecated"
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "invert": ("BOOLEAN", {"default": False}),
+ "frames": ("INT", {"default": 16,"min": 1, "max": 255, "step": 1}),
+ "scale": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 2.0, "step": 0.01}),
+ "audio_path": ("STRING", {"default": "audio.wav"}),
+ "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
+ },
+ }
+
+ def createaudiomask(self, frames, width, height, invert, audio_path, scale):
+ try:
+ import librosa
+ except ImportError:
+ raise Exception("Can not import librosa. Install it with 'pip install librosa'")
+ batch_size = frames
+ out = []
+ masks = []
+ if audio_path == "audio.wav": #I don't know why relative path won't work otherwise...
+ audio_path = os.path.join(script_directory, audio_path)
+ audio, sr = librosa.load(audio_path)
+ spectrogram = np.abs(librosa.stft(audio))
+
+ for i in range(batch_size):
+ image = Image.new("RGB", (width, height), "black")
+ draw = ImageDraw.Draw(image)
+ frame = spectrogram[:, i]
+ circle_radius = int(height * np.mean(frame))
+ circle_radius *= scale
+ circle_center = (width // 2, height // 2) # Calculate the center of the image
+
+ draw.ellipse([(circle_center[0] - circle_radius, circle_center[1] - circle_radius),
+ (circle_center[0] + circle_radius, circle_center[1] + circle_radius)],
+ fill='white')
+
+ image = np.array(image).astype(np.float32) / 255.0
+ image = torch.from_numpy(image)[None,]
+ mask = image[:, :, :, 0]
+ masks.append(mask)
+ out.append(image)
+
+ if invert:
+ return (1.0 - torch.cat(out, dim=0),)
+ return (torch.cat(out, dim=0),torch.cat(masks, dim=0),)
+
+class CreateGradientMask:
+
+ RETURN_TYPES = ("MASK",)
+ FUNCTION = "createmask"
+ CATEGORY = "KJNodes/masking/generate"
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "invert": ("BOOLEAN", {"default": False}),
+ "frames": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}),
+ "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
+ },
+ }
+ def createmask(self, frames, width, height, invert):
+ # Define the number of images in the batch
+ batch_size = frames
+ out = []
+ # Create an empty array to store the image batch
+ image_batch = np.zeros((batch_size, height, width), dtype=np.float32)
+ # Generate the black to white gradient for each image
+ for i in range(batch_size):
+ gradient = np.linspace(1.0, 0.0, width, dtype=np.float32)
+ time = i / frames # Calculate the time variable
+ offset_gradient = gradient - time # Offset the gradient values based on time
+ image_batch[i] = offset_gradient.reshape(1, -1)
+ output = torch.from_numpy(image_batch)
+ mask = output
+ out.append(mask)
+ if invert:
+ return (1.0 - torch.cat(out, dim=0),)
+ return (torch.cat(out, dim=0),)
+
+class CreateFadeMask:
+
+ RETURN_TYPES = ("MASK",)
+ FUNCTION = "createfademask"
+ CATEGORY = "KJNodes/deprecated"
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "invert": ("BOOLEAN", {"default": False}),
+ "frames": ("INT", {"default": 2,"min": 2, "max": 255, "step": 1}),
+ "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}),
+ "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],),
+ "start_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}),
+ "midpoint_level": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 1.0, "step": 0.01}),
+ "end_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}),
+ "midpoint_frame": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}),
+ },
+ }
+
+ def createfademask(self, frames, width, height, invert, interpolation, start_level, midpoint_level, end_level, midpoint_frame):
+ def ease_in(t):
+ return t * t
+
+ def ease_out(t):
+ return 1 - (1 - t) * (1 - t)
+
+ def ease_in_out(t):
+ return 3 * t * t - 2 * t * t * t
+
+ batch_size = frames
+ out = []
+ image_batch = np.zeros((batch_size, height, width), dtype=np.float32)
+
+ if midpoint_frame == 0:
+ midpoint_frame = batch_size // 2
+
+ for i in range(batch_size):
+ if i <= midpoint_frame:
+ t = i / midpoint_frame
+ if interpolation == "ease_in":
+ t = ease_in(t)
+ elif interpolation == "ease_out":
+ t = ease_out(t)
+ elif interpolation == "ease_in_out":
+ t = ease_in_out(t)
+ color = start_level - t * (start_level - midpoint_level)
+ else:
+ t = (i - midpoint_frame) / (batch_size - midpoint_frame)
+ if interpolation == "ease_in":
+ t = ease_in(t)
+ elif interpolation == "ease_out":
+ t = ease_out(t)
+ elif interpolation == "ease_in_out":
+ t = ease_in_out(t)
+ color = midpoint_level - t * (midpoint_level - end_level)
+
+ color = np.clip(color, 0, 255)
+ image = np.full((height, width), color, dtype=np.float32)
+ image_batch[i] = image
+
+ output = torch.from_numpy(image_batch)
+ mask = output
+ out.append(mask)
+
+ if invert:
+ return (1.0 - torch.cat(out, dim=0),)
+ return (torch.cat(out, dim=0),)
+
+class CreateFadeMaskAdvanced:
+
+ RETURN_TYPES = ("MASK",)
+ FUNCTION = "createfademask"
+ CATEGORY = "KJNodes/masking/generate"
+ DESCRIPTION = """
+Create a batch of masks interpolated between given frames and values.
+Uses same syntax as Fizz' BatchValueSchedule.
+First value is the frame index (not that this starts from 0, not 1)
+and the second value inside the brackets is the float value of the mask in range 0.0 - 1.0
+
+For example the default values:
+0:(0.0)
+7:(1.0)
+15:(0.0)
+
+Would create a mask batch fo 16 frames, starting from black,
+interpolating with the chosen curve to fully white at the 8th frame,
+and interpolating from that to fully black at the 16th frame.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "points_string": ("STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": True}),
+ "invert": ("BOOLEAN", {"default": False}),
+ "frames": ("INT", {"default": 16,"min": 2, "max": 255, "step": 1}),
+ "width": ("INT", {"default": 512,"min": 1, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 512,"min": 1, "max": 4096, "step": 1}),
+ "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],),
+ },
+ }
+
+ def createfademask(self, frames, width, height, invert, points_string, interpolation):
+ def ease_in(t):
+ return t * t
+
+ def ease_out(t):
+ return 1 - (1 - t) * (1 - t)
+
+ def ease_in_out(t):
+ return 3 * t * t - 2 * t * t * t
+
+ # Parse the input string into a list of tuples
+ points = []
+ points_string = points_string.rstrip(',\n')
+ for point_str in points_string.split(','):
+ frame_str, color_str = point_str.split(':')
+ frame = int(frame_str.strip())
+ color = float(color_str.strip()[1:-1]) # Remove parentheses around color
+ points.append((frame, color))
+
+ # Check if the last frame is already in the points
+ if len(points) == 0 or points[-1][0] != frames - 1:
+ # If not, add it with the color of the last specified frame
+ points.append((frames - 1, points[-1][1] if points else 0))
+
+ # Sort the points by frame number
+ points.sort(key=lambda x: x[0])
+
+ batch_size = frames
+ out = []
+ image_batch = np.zeros((batch_size, height, width), dtype=np.float32)
+
+ # Index of the next point to interpolate towards
+ next_point = 1
+
+ for i in range(batch_size):
+ while next_point < len(points) and i > points[next_point][0]:
+ next_point += 1
+
+ # Interpolate between the previous point and the next point
+ prev_point = next_point - 1
+ t = (i - points[prev_point][0]) / (points[next_point][0] - points[prev_point][0])
+ if interpolation == "ease_in":
+ t = ease_in(t)
+ elif interpolation == "ease_out":
+ t = ease_out(t)
+ elif interpolation == "ease_in_out":
+ t = ease_in_out(t)
+ elif interpolation == "linear":
+ pass # No need to modify `t` for linear interpolation
+
+ color = points[prev_point][1] - t * (points[prev_point][1] - points[next_point][1])
+ color = np.clip(color, 0, 255)
+ image = np.full((height, width), color, dtype=np.float32)
+ image_batch[i] = image
+
+ output = torch.from_numpy(image_batch)
+ mask = output
+ out.append(mask)
+
+ if invert:
+ return (1.0 - torch.cat(out, dim=0),)
+ return (torch.cat(out, dim=0),)
+
+class CreateMagicMask:
+
+ RETURN_TYPES = ("MASK", "MASK",)
+ RETURN_NAMES = ("mask", "mask_inverted",)
+ FUNCTION = "createmagicmask"
+ CATEGORY = "KJNodes/masking/generate"
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "frames": ("INT", {"default": 16,"min": 2, "max": 4096, "step": 1}),
+ "depth": ("INT", {"default": 12,"min": 1, "max": 500, "step": 1}),
+ "distortion": ("FLOAT", {"default": 1.5,"min": 0.0, "max": 100.0, "step": 0.01}),
+ "seed": ("INT", {"default": 123,"min": 0, "max": 99999999, "step": 1}),
+ "transitions": ("INT", {"default": 1,"min": 1, "max": 20, "step": 1}),
+ "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ },
+ }
+
+ def createmagicmask(self, frames, transitions, depth, distortion, seed, frame_width, frame_height):
+ from ..utility.magictex import coordinate_grid, random_transform, magic
+ import matplotlib.pyplot as plt
+ rng = np.random.default_rng(seed)
+ out = []
+ coords = coordinate_grid((frame_width, frame_height))
+
+ # Calculate the number of frames for each transition
+ frames_per_transition = frames // transitions
+
+ # Generate a base set of parameters
+ base_params = {
+ "coords": random_transform(coords, rng),
+ "depth": depth,
+ "distortion": distortion,
+ }
+ for t in range(transitions):
+ # Generate a second set of parameters that is at most max_diff away from the base parameters
+ params1 = base_params.copy()
+ params2 = base_params.copy()
+
+ params1['coords'] = random_transform(coords, rng)
+ params2['coords'] = random_transform(coords, rng)
+
+ for i in range(frames_per_transition):
+ # Compute the interpolation factor
+ alpha = i / frames_per_transition
+
+ # Interpolate between the two sets of parameters
+ params = params1.copy()
+ params['coords'] = (1 - alpha) * params1['coords'] + alpha * params2['coords']
+
+ tex = magic(**params)
+
+ dpi = frame_width / 10
+ fig = plt.figure(figsize=(10, 10), dpi=dpi)
+
+ ax = fig.add_subplot(111)
+ plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
+
+ ax.get_yaxis().set_ticks([])
+ ax.get_xaxis().set_ticks([])
+ ax.imshow(tex, aspect='auto')
+
+ fig.canvas.draw()
+ img = np.array(fig.canvas.renderer._renderer)
+
+ plt.close(fig)
+
+ pil_img = Image.fromarray(img).convert("L")
+ mask = torch.tensor(np.array(pil_img)) / 255.0
+
+ out.append(mask)
+
+ return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),)
+
+class CreateShapeMask:
+
+ RETURN_TYPES = ("MASK", "MASK",)
+ RETURN_NAMES = ("mask", "mask_inverted",)
+ FUNCTION = "createshapemask"
+ CATEGORY = "KJNodes/masking/generate"
+ DESCRIPTION = """
+Creates a mask or batch of masks with the specified shape.
+Locations are center locations.
+Grow value is the amount to grow the shape on each frame, creating animated masks.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "shape": (
+ [ 'circle',
+ 'square',
+ 'triangle',
+ ],
+ {
+ "default": 'circle'
+ }),
+ "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}),
+ "location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}),
+ "location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}),
+ "grow": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
+ "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "shape_width": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}),
+ "shape_height": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}),
+ },
+ }
+
+ def createshapemask(self, frames, frame_width, frame_height, location_x, location_y, shape_width, shape_height, grow, shape):
+ # Define the number of images in the batch
+ batch_size = frames
+ out = []
+ color = "white"
+ for i in range(batch_size):
+ image = Image.new("RGB", (frame_width, frame_height), "black")
+ draw = ImageDraw.Draw(image)
+
+ # Calculate the size for this frame and ensure it's not less than 0
+ current_width = max(0, shape_width + i*grow)
+ current_height = max(0, shape_height + i*grow)
+
+ if shape == 'circle' or shape == 'square':
+ # Define the bounding box for the shape
+ left_up_point = (location_x - current_width // 2, location_y - current_height // 2)
+ right_down_point = (location_x + current_width // 2, location_y + current_height // 2)
+ two_points = [left_up_point, right_down_point]
+
+ if shape == 'circle':
+ draw.ellipse(two_points, fill=color)
+ elif shape == 'square':
+ draw.rectangle(two_points, fill=color)
+
+ elif shape == 'triangle':
+ # Define the points for the triangle
+ left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left
+ right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right
+ top_point = (location_x, location_y - current_height // 2) # top point
+ draw.polygon([top_point, left_up_point, right_down_point], fill=color)
+
+ image = pil2tensor(image)
+ mask = image[:, :, :, 0]
+ out.append(mask)
+ outstack = torch.cat(out, dim=0)
+ return (outstack, 1.0 - outstack,)
+
+class CreateVoronoiMask:
+
+ RETURN_TYPES = ("MASK", "MASK",)
+ RETURN_NAMES = ("mask", "mask_inverted",)
+ FUNCTION = "createvoronoi"
+ CATEGORY = "KJNodes/masking/generate"
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "frames": ("INT", {"default": 16,"min": 2, "max": 4096, "step": 1}),
+ "num_points": ("INT", {"default": 15,"min": 1, "max": 4096, "step": 1}),
+ "line_width": ("INT", {"default": 4,"min": 1, "max": 4096, "step": 1}),
+ "speed": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 1.0, "step": 0.01}),
+ "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ },
+ }
+
+ def createvoronoi(self, frames, num_points, line_width, speed, frame_width, frame_height):
+ from scipy.spatial import Voronoi
+ # Define the number of images in the batch
+ batch_size = frames
+ out = []
+
+ # Calculate aspect ratio
+ aspect_ratio = frame_width / frame_height
+
+ # Create start and end points for each point, considering the aspect ratio
+ start_points = np.random.rand(num_points, 2)
+ start_points[:, 0] *= aspect_ratio
+
+ end_points = np.random.rand(num_points, 2)
+ end_points[:, 0] *= aspect_ratio
+
+ for i in range(batch_size):
+ # Interpolate the points' positions based on the current frame
+ t = (i * speed) / (batch_size - 1) # normalize to [0, 1] over the frames
+ t = np.clip(t, 0, 1) # ensure t is in [0, 1]
+ points = (1 - t) * start_points + t * end_points # lerp
+
+ # Adjust points for aspect ratio
+ points[:, 0] *= aspect_ratio
+
+ vor = Voronoi(points)
+
+ # Create a blank image with a white background
+ fig, ax = plt.subplots()
+ plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
+ ax.set_xlim([0, aspect_ratio]); ax.set_ylim([0, 1]) # adjust x limits
+ ax.axis('off')
+ ax.margins(0, 0)
+ fig.set_size_inches(aspect_ratio * frame_height/100, frame_height/100) # adjust figure size
+ ax.fill_between([0, 1], [0, 1], color='white')
+
+ # Plot each Voronoi ridge
+ for simplex in vor.ridge_vertices:
+ simplex = np.asarray(simplex)
+ if np.all(simplex >= 0):
+ plt.plot(vor.vertices[simplex, 0], vor.vertices[simplex, 1], 'k-', linewidth=line_width)
+
+ fig.canvas.draw()
+ img = np.array(fig.canvas.renderer._renderer)
+
+ plt.close(fig)
+
+ pil_img = Image.fromarray(img).convert("L")
+ mask = torch.tensor(np.array(pil_img)) / 255.0
+
+ out.append(mask)
+
+ return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),)
+
+class GetMaskSizeAndCount:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "mask": ("MASK",),
+ }}
+
+ RETURN_TYPES = ("MASK","INT", "INT", "INT",)
+ RETURN_NAMES = ("mask", "width", "height", "count",)
+ FUNCTION = "getsize"
+ CATEGORY = "KJNodes/masking"
+ DESCRIPTION = """
+Returns the width, height and batch size of the mask,
+and passes it through unchanged.
+
+"""
+
+ def getsize(self, mask):
+ width = mask.shape[2]
+ height = mask.shape[1]
+ count = mask.shape[0]
+ return {"ui": {
+ "text": [f"{count}x{width}x{height}"]},
+ "result": (mask, width, height, count)
+ }
+
+class GrowMaskWithBlur:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "mask": ("MASK",),
+ "expand": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1}),
+ "incremental_expandrate": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}),
+ "tapered_corners": ("BOOLEAN", {"default": True}),
+ "flip_input": ("BOOLEAN", {"default": False}),
+ "blur_radius": ("FLOAT", {
+ "default": 0.0,
+ "min": 0.0,
+ "max": 100,
+ "step": 0.1
+ }),
+ "lerp_alpha": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
+ "decay_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
+ },
+ "optional": {
+ "fill_holes": ("BOOLEAN", {"default": False}),
+ },
+ }
+
+ CATEGORY = "KJNodes/masking"
+ RETURN_TYPES = ("MASK", "MASK",)
+ RETURN_NAMES = ("mask", "mask_inverted",)
+ FUNCTION = "expand_mask"
+ DESCRIPTION = """
+# GrowMaskWithBlur
+- mask: Input mask or mask batch
+- expand: Expand or contract mask or mask batch by a given amount
+- incremental_expandrate: increase expand rate by a given amount per frame
+- tapered_corners: use tapered corners
+- flip_input: flip input mask
+- blur_radius: value higher than 0 will blur the mask
+- lerp_alpha: alpha value for interpolation between frames
+- decay_factor: decay value for interpolation between frames
+- fill_holes: fill holes in the mask (slow)"""
+
+ def expand_mask(self, mask, expand, tapered_corners, flip_input, blur_radius, incremental_expandrate, lerp_alpha, decay_factor, fill_holes=False):
+ alpha = lerp_alpha
+ decay = decay_factor
+ if flip_input:
+ mask = 1.0 - mask
+ c = 0 if tapered_corners else 1
+ kernel = np.array([[c, 1, c],
+ [1, 1, 1],
+ [c, 1, c]])
+ growmask = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).cpu()
+ out = []
+ previous_output = None
+ current_expand = expand
+ for m in growmask:
+ output = m.numpy().astype(np.float32)
+ for _ in range(abs(round(current_expand))):
+ if current_expand < 0:
+ output = scipy.ndimage.grey_erosion(output, footprint=kernel)
+ else:
+ output = scipy.ndimage.grey_dilation(output, footprint=kernel)
+ if current_expand < 0:
+ current_expand -= abs(incremental_expandrate)
+ else:
+ current_expand += abs(incremental_expandrate)
+ if fill_holes:
+ binary_mask = output > 0
+ output = scipy.ndimage.binary_fill_holes(binary_mask)
+ output = output.astype(np.float32) * 255
+ output = torch.from_numpy(output)
+ if alpha < 1.0 and previous_output is not None:
+ # Interpolate between the previous and current frame
+ output = alpha * output + (1 - alpha) * previous_output
+ if decay < 1.0 and previous_output is not None:
+ # Add the decayed previous output to the current frame
+ output += decay * previous_output
+ output = output / output.max()
+ previous_output = output
+ out.append(output)
+
+ if blur_radius != 0:
+ # Convert the tensor list to PIL images, apply blur, and convert back
+ for idx, tensor in enumerate(out):
+ # Convert tensor to PIL image
+ pil_image = tensor2pil(tensor.cpu().detach())[0]
+ # Apply Gaussian blur
+ pil_image = pil_image.filter(ImageFilter.GaussianBlur(blur_radius))
+ # Convert back to tensor
+ out[idx] = pil2tensor(pil_image)
+ blurred = torch.cat(out, dim=0)
+ return (blurred, 1.0 - blurred)
+ else:
+ return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),)
+
+class MaskBatchMulti:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}),
+ "mask_1": ("MASK", ),
+ "mask_2": ("MASK", ),
+ },
+ }
+
+ RETURN_TYPES = ("MASK",)
+ RETURN_NAMES = ("masks",)
+ FUNCTION = "combine"
+ CATEGORY = "KJNodes/masking"
+ DESCRIPTION = """
+Creates an image batch from multiple masks.
+You can set how many inputs the node has,
+with the **inputcount** and clicking update.
+"""
+
+ def combine(self, inputcount, **kwargs):
+ mask = kwargs["mask_1"]
+ for c in range(1, inputcount):
+ new_mask = kwargs[f"mask_{c + 1}"]
+ if mask.shape[1:] != new_mask.shape[1:]:
+ new_mask = F.interpolate(new_mask.unsqueeze(1), size=(mask.shape[1], mask.shape[2]), mode="bicubic").squeeze(1)
+ mask = torch.cat((mask, new_mask), dim=0)
+ return (mask,)
+
+class OffsetMask:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "mask": ("MASK",),
+ "x": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
+ "y": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }),
+ "angle": ("INT", { "default": 0, "min": -360, "max": 360, "step": 1, "display": "number" }),
+ "duplication_factor": ("INT", { "default": 1, "min": 1, "max": 1000, "step": 1, "display": "number" }),
+ "roll": ("BOOLEAN", { "default": False }),
+ "incremental": ("BOOLEAN", { "default": False }),
+ "padding_mode": (
+ [
+ 'empty',
+ 'border',
+ 'reflection',
+
+ ], {
+ "default": 'empty'
+ }),
+ }
+ }
+
+ RETURN_TYPES = ("MASK",)
+ RETURN_NAMES = ("mask",)
+ FUNCTION = "offset"
+ CATEGORY = "KJNodes/masking"
+ DESCRIPTION = """
+Offsets the mask by the specified amount.
+ - mask: Input mask or mask batch
+ - x: Horizontal offset
+ - y: Vertical offset
+ - angle: Angle in degrees
+ - roll: roll edge wrapping
+ - duplication_factor: Number of times to duplicate the mask to form a batch
+ - border padding_mode: Padding mode for the mask
+"""
+
+ def offset(self, mask, x, y, angle, roll=False, incremental=False, duplication_factor=1, padding_mode="empty"):
+ # Create duplicates of the mask batch
+ mask = mask.repeat(duplication_factor, 1, 1).clone()
+
+ batch_size, height, width = mask.shape
+
+ if angle != 0 and incremental:
+ for i in range(batch_size):
+ rotation_angle = angle * (i+1)
+ mask[i] = TF.rotate(mask[i].unsqueeze(0), rotation_angle).squeeze(0)
+ elif angle > 0:
+ for i in range(batch_size):
+ mask[i] = TF.rotate(mask[i].unsqueeze(0), angle).squeeze(0)
+
+ if roll:
+ if incremental:
+ for i in range(batch_size):
+ shift_x = min(x*(i+1), width-1)
+ shift_y = min(y*(i+1), height-1)
+ if shift_x != 0:
+ mask[i] = torch.roll(mask[i], shifts=shift_x, dims=1)
+ if shift_y != 0:
+ mask[i] = torch.roll(mask[i], shifts=shift_y, dims=0)
+ else:
+ shift_x = min(x, width-1)
+ shift_y = min(y, height-1)
+ if shift_x != 0:
+ mask = torch.roll(mask, shifts=shift_x, dims=2)
+ if shift_y != 0:
+ mask = torch.roll(mask, shifts=shift_y, dims=1)
+ else:
+
+ for i in range(batch_size):
+ if incremental:
+ temp_x = min(x * (i+1), width-1)
+ temp_y = min(y * (i+1), height-1)
+ else:
+ temp_x = min(x, width-1)
+ temp_y = min(y, height-1)
+ if temp_x > 0:
+ if padding_mode == 'empty':
+ mask[i] = torch.cat([torch.zeros((height, temp_x)), mask[i, :, :-temp_x]], dim=1)
+ elif padding_mode in ['replicate', 'reflect']:
+ mask[i] = F.pad(mask[i, :, :-temp_x], (0, temp_x), mode=padding_mode)
+ elif temp_x < 0:
+ if padding_mode == 'empty':
+ mask[i] = torch.cat([mask[i, :, :temp_x], torch.zeros((height, -temp_x))], dim=1)
+ elif padding_mode in ['replicate', 'reflect']:
+ mask[i] = F.pad(mask[i, :, -temp_x:], (temp_x, 0), mode=padding_mode)
+
+ if temp_y > 0:
+ if padding_mode == 'empty':
+ mask[i] = torch.cat([torch.zeros((temp_y, width)), mask[i, :-temp_y, :]], dim=0)
+ elif padding_mode in ['replicate', 'reflect']:
+ mask[i] = F.pad(mask[i, :-temp_y, :], (0, temp_y), mode=padding_mode)
+ elif temp_y < 0:
+ if padding_mode == 'empty':
+ mask[i] = torch.cat([mask[i, :temp_y, :], torch.zeros((-temp_y, width))], dim=0)
+ elif padding_mode in ['replicate', 'reflect']:
+ mask[i] = F.pad(mask[i, -temp_y:, :], (temp_y, 0), mode=padding_mode)
+
+ return mask,
+
+class RoundMask:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "mask": ("MASK",),
+ }}
+
+ RETURN_TYPES = ("MASK",)
+ FUNCTION = "round"
+ CATEGORY = "KJNodes/masking"
+ DESCRIPTION = """
+Rounds the mask or batch of masks to a binary mask.
+
+
+"""
+
+ def round(self, mask):
+ mask = mask.round()
+ return (mask,)
+
+class ResizeMask:
+ upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "mask": ("MASK",),
+ "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, "display": "number" }),
+ "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, "display": "number" }),
+ "keep_proportions": ("BOOLEAN", { "default": False }),
+ "upscale_method": (s.upscale_methods,),
+ "crop": (["disabled","center"],),
+ }
+ }
+
+ RETURN_TYPES = ("MASK", "INT", "INT",)
+ RETURN_NAMES = ("mask", "width", "height",)
+ FUNCTION = "resize"
+ CATEGORY = "KJNodes/masking"
+ DESCRIPTION = """
+Resizes the mask or batch of masks to the specified width and height.
+"""
+
+ def resize(self, mask, width, height, keep_proportions, upscale_method,crop):
+ if keep_proportions:
+ _, oh, ow = mask.shape
+ width = ow if width == 0 else width
+ height = oh if height == 0 else height
+ ratio = min(width / ow, height / oh)
+ width = round(ow*ratio)
+ height = round(oh*ratio)
+ outputs = mask.unsqueeze(1)
+ outputs = common_upscale(outputs, width, height, upscale_method, crop)
+ outputs = outputs.squeeze(1)
+
+ return(outputs, outputs.shape[2], outputs.shape[1],)
+
+class RemapMaskRange:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "mask": ("MASK",),
+ "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}),
+ "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}),
+ }
+ }
+
+ RETURN_TYPES = ("MASK",)
+ RETURN_NAMES = ("mask",)
+ FUNCTION = "remap"
+ CATEGORY = "KJNodes/masking"
+ DESCRIPTION = """
+Sets new min and max values for the mask.
+"""
+
+ def remap(self, mask, min, max):
+
+ # Find the maximum value in the mask
+ mask_max = torch.max(mask)
+
+ # If the maximum mask value is zero, avoid division by zero by setting it to 1
+ mask_max = mask_max if mask_max > 0 else 1
+
+ # Scale the mask values to the new range defined by min and max
+ # The highest pixel value in the mask will be scaled to max
+ scaled_mask = (mask / mask_max) * (max - min) + min
+
+ # Clamp the values to ensure they are within [0.0, 1.0]
+ scaled_mask = torch.clamp(scaled_mask, min=0.0, max=1.0)
+
+ return (scaled_mask, )
diff --git a/ComfyUI-KJNodes/nodes/nodes.py b/ComfyUI-KJNodes/nodes/nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..e63d18c3cc3188aeeb592e6862a890b2a6ffecd9
--- /dev/null
+++ b/ComfyUI-KJNodes/nodes/nodes.py
@@ -0,0 +1,2516 @@
+import torch
+import numpy as np
+from PIL import Image
+from typing import Union
+import json, re, os, io, time, platform
+import re
+import importlib
+
+import model_management
+import folder_paths
+from nodes import MAX_RESOLUTION
+from comfy.utils import common_upscale, ProgressBar
+
+script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+folder_paths.add_model_folder_path("kjnodes_fonts", os.path.join(script_directory, "fonts"))
+
+class AnyType(str):
+ """A special class that is always equal in not equal comparisons. Credit to pythongosssss"""
+
+ def __ne__(self, __value: object) -> bool:
+ return False
+any = AnyType("*")
+
+class BOOLConstant:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "value": ("BOOLEAN", {"default": True}),
+ },
+ }
+ RETURN_TYPES = ("BOOLEAN",)
+ RETURN_NAMES = ("value",)
+ FUNCTION = "get_value"
+ CATEGORY = "KJNodes/constants"
+
+ def get_value(self, value):
+ return (value,)
+
+class INTConstant:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "value": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
+ },
+ }
+ RETURN_TYPES = ("INT",)
+ RETURN_NAMES = ("value",)
+ FUNCTION = "get_value"
+ CATEGORY = "KJNodes/constants"
+
+ def get_value(self, value):
+ return (value,)
+
+class FloatConstant:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "value": ("FLOAT", {"default": 0.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.001}),
+ },
+ }
+
+ RETURN_TYPES = ("FLOAT",)
+ RETURN_NAMES = ("value",)
+ FUNCTION = "get_value"
+ CATEGORY = "KJNodes/constants"
+
+ def get_value(self, value):
+ return (value,)
+
+class StringConstant:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "string": ("STRING", {"default": '', "multiline": False}),
+ }
+ }
+ RETURN_TYPES = ("STRING",)
+ FUNCTION = "passtring"
+ CATEGORY = "KJNodes/constants"
+
+ def passtring(self, string):
+ return (string, )
+
+class StringConstantMultiline:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "string": ("STRING", {"default": "", "multiline": True}),
+ "strip_newlines": ("BOOLEAN", {"default": True}),
+ }
+ }
+ RETURN_TYPES = ("STRING",)
+ FUNCTION = "stringify"
+ CATEGORY = "KJNodes/constants"
+
+ def stringify(self, string, strip_newlines):
+ new_string = []
+ for line in io.StringIO(string):
+ if not line.strip().startswith("\n") and strip_newlines:
+ line = line.replace("\n", '')
+ new_string.append(line)
+ new_string = "\n".join(new_string)
+
+ return (new_string, )
+
+
+
+class ScaleBatchPromptSchedule:
+
+ RETURN_TYPES = ("STRING",)
+ FUNCTION = "scaleschedule"
+ CATEGORY = "KJNodes"
+ DESCRIPTION = """
+Scales a batch schedule from Fizz' nodes BatchPromptSchedule
+to a different frame count.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "input_str": ("STRING", {"forceInput": True,"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n"}),
+ "old_frame_count": ("INT", {"forceInput": True,"default": 1,"min": 1, "max": 4096, "step": 1}),
+ "new_frame_count": ("INT", {"forceInput": True,"default": 1,"min": 1, "max": 4096, "step": 1}),
+
+ },
+ }
+
+ def scaleschedule(self, old_frame_count, input_str, new_frame_count):
+ pattern = r'"(\d+)"\s*:\s*"(.*?)"(?:,|\Z)'
+ frame_strings = dict(re.findall(pattern, input_str))
+
+ # Calculate the scaling factor
+ scaling_factor = (new_frame_count - 1) / (old_frame_count - 1)
+
+ # Initialize a dictionary to store the new frame numbers and strings
+ new_frame_strings = {}
+
+ # Iterate over the frame numbers and strings
+ for old_frame, string in frame_strings.items():
+ # Calculate the new frame number
+ new_frame = int(round(int(old_frame) * scaling_factor))
+
+ # Store the new frame number and corresponding string
+ new_frame_strings[new_frame] = string
+
+ # Format the output string
+ output_str = ', '.join([f'"{k}":"{v}"' for k, v in sorted(new_frame_strings.items())])
+ return (output_str,)
+
+
+class GetLatentsFromBatchIndexed:
+
+ RETURN_TYPES = ("LATENT",)
+ FUNCTION = "indexedlatentsfrombatch"
+ CATEGORY = "KJNodes"
+ DESCRIPTION = """
+Selects and returns the latents at the specified indices as an latent batch.
+"""
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "latents": ("LATENT",),
+ "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}),
+ },
+ }
+
+ def indexedlatentsfrombatch(self, latents, indexes):
+
+ samples = latents.copy()
+ latent_samples = samples["samples"]
+
+ # Parse the indexes string into a list of integers
+ index_list = [int(index.strip()) for index in indexes.split(',')]
+
+ # Convert list of indices to a PyTorch tensor
+ indices_tensor = torch.tensor(index_list, dtype=torch.long)
+
+ # Select the latents at the specified indices
+ chosen_latents = latent_samples[indices_tensor]
+
+ samples["samples"] = chosen_latents
+ return (samples,)
+
+
+class ConditioningMultiCombine:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "inputcount": ("INT", {"default": 2, "min": 2, "max": 20, "step": 1}),
+ "operation": (["combine", "concat"], {"default": "combine"}),
+ "conditioning_1": ("CONDITIONING", ),
+ "conditioning_2": ("CONDITIONING", ),
+ },
+ }
+
+ RETURN_TYPES = ("CONDITIONING", "INT")
+ RETURN_NAMES = ("combined", "inputcount")
+ FUNCTION = "combine"
+ CATEGORY = "KJNodes/masking/conditioning"
+ DESCRIPTION = """
+Combines multiple conditioning nodes into one
+"""
+
+ def combine(self, inputcount, operation, **kwargs):
+ from nodes import ConditioningCombine
+ from nodes import ConditioningConcat
+ cond_combine_node = ConditioningCombine()
+ cond_concat_node = ConditioningConcat()
+ cond = kwargs["conditioning_1"]
+ for c in range(1, inputcount):
+ new_cond = kwargs[f"conditioning_{c + 1}"]
+ if operation == "combine":
+ cond = cond_combine_node.combine(new_cond, cond)[0]
+ elif operation == "concat":
+ cond = cond_concat_node.concat(cond, new_cond)[0]
+ return (cond, inputcount,)
+
+class AppendStringsToList:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "string1": ("STRING", {"default": '', "forceInput": True}),
+ "string2": ("STRING", {"default": '', "forceInput": True}),
+ }
+ }
+ RETURN_TYPES = ("STRING",)
+ FUNCTION = "joinstring"
+ CATEGORY = "KJNodes/constants"
+
+ def joinstring(self, string1, string2):
+ if not isinstance(string1, list):
+ string1 = [string1]
+ if not isinstance(string2, list):
+ string2 = [string2]
+
+ joined_string = string1 + string2
+ return (joined_string, )
+
+class JoinStrings:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "string1": ("STRING", {"default": '', "forceInput": True}),
+ "string2": ("STRING", {"default": '', "forceInput": True}),
+ "delimiter": ("STRING", {"default": ' ', "multiline": False}),
+ }
+ }
+ RETURN_TYPES = ("STRING",)
+ FUNCTION = "joinstring"
+ CATEGORY = "KJNodes/constants"
+
+ def joinstring(self, string1, string2, delimiter):
+ joined_string = string1 + delimiter + string2
+ return (joined_string, )
+
+class JoinStringMulti:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}),
+ "string_1": ("STRING", {"default": '', "forceInput": True}),
+ "string_2": ("STRING", {"default": '', "forceInput": True}),
+ "delimiter": ("STRING", {"default": ' ', "multiline": False}),
+ "return_list": ("BOOLEAN", {"default": False}),
+ },
+ }
+
+ RETURN_TYPES = ("STRING",)
+ RETURN_NAMES = ("string",)
+ FUNCTION = "combine"
+ CATEGORY = "KJNodes"
+ DESCRIPTION = """
+Creates single string, or a list of strings, from
+multiple input strings.
+You can set how many inputs the node has,
+with the **inputcount** and clicking update.
+"""
+
+ def combine(self, inputcount, delimiter, **kwargs):
+ string = kwargs["string_1"]
+ return_list = kwargs["return_list"]
+ strings = [string] # Initialize a list with the first string
+ for c in range(1, inputcount):
+ new_string = kwargs[f"string_{c + 1}"]
+ if return_list:
+ strings.append(new_string) # Add new string to the list
+ else:
+ string = string + delimiter + new_string
+ if return_list:
+ return (strings,) # Return the list of strings
+ else:
+ return (string,) # Return the combined string
+
+class CondPassThrough:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ },
+ "optional": {
+ "positive": ("CONDITIONING", ),
+ "negative": ("CONDITIONING", ),
+ },
+ }
+
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING",)
+ RETURN_NAMES = ("positive", "negative")
+ FUNCTION = "passthrough"
+ CATEGORY = "KJNodes/misc"
+ DESCRIPTION = """
+ Simply passes through the positive and negative conditioning,
+ workaround for Set node not allowing bypassed inputs.
+"""
+
+ def passthrough(self, positive=None, negative=None):
+ return (positive, negative,)
+
+class ModelPassThrough:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ },
+ "optional": {
+ "model": ("MODEL", ),
+ },
+ }
+
+ RETURN_TYPES = ("MODEL", )
+ RETURN_NAMES = ("model",)
+ FUNCTION = "passthrough"
+ CATEGORY = "KJNodes/misc"
+ DESCRIPTION = """
+ Simply passes through the model,
+ workaround for Set node not allowing bypassed inputs.
+"""
+
+ def passthrough(self, model=None):
+ return (model,)
+
+def append_helper(t, mask, c, set_area_to_bounds, strength):
+ n = [t[0], t[1].copy()]
+ _, h, w = mask.shape
+ n[1]['mask'] = mask
+ n[1]['set_area_to_bounds'] = set_area_to_bounds
+ n[1]['mask_strength'] = strength
+ c.append(n)
+
+class ConditioningSetMaskAndCombine:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "positive_1": ("CONDITIONING", ),
+ "negative_1": ("CONDITIONING", ),
+ "positive_2": ("CONDITIONING", ),
+ "negative_2": ("CONDITIONING", ),
+ "mask_1": ("MASK", ),
+ "mask_2": ("MASK", ),
+ "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "set_cond_area": (["default", "mask bounds"],),
+ }
+ }
+
+ RETURN_TYPES = ("CONDITIONING","CONDITIONING",)
+ RETURN_NAMES = ("combined_positive", "combined_negative",)
+ FUNCTION = "append"
+ CATEGORY = "KJNodes/masking/conditioning"
+ DESCRIPTION = """
+Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes
+"""
+
+ def append(self, positive_1, negative_1, positive_2, negative_2, mask_1, mask_2, set_cond_area, mask_1_strength, mask_2_strength):
+ c = []
+ c2 = []
+ set_area_to_bounds = False
+ if set_cond_area != "default":
+ set_area_to_bounds = True
+ if len(mask_1.shape) < 3:
+ mask_1 = mask_1.unsqueeze(0)
+ if len(mask_2.shape) < 3:
+ mask_2 = mask_2.unsqueeze(0)
+ for t in positive_1:
+ append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength)
+ for t in positive_2:
+ append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength)
+ for t in negative_1:
+ append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength)
+ for t in negative_2:
+ append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength)
+ return (c, c2)
+
+class ConditioningSetMaskAndCombine3:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "positive_1": ("CONDITIONING", ),
+ "negative_1": ("CONDITIONING", ),
+ "positive_2": ("CONDITIONING", ),
+ "negative_2": ("CONDITIONING", ),
+ "positive_3": ("CONDITIONING", ),
+ "negative_3": ("CONDITIONING", ),
+ "mask_1": ("MASK", ),
+ "mask_2": ("MASK", ),
+ "mask_3": ("MASK", ),
+ "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "set_cond_area": (["default", "mask bounds"],),
+ }
+ }
+
+ RETURN_TYPES = ("CONDITIONING","CONDITIONING",)
+ RETURN_NAMES = ("combined_positive", "combined_negative",)
+ FUNCTION = "append"
+ CATEGORY = "KJNodes/masking/conditioning"
+ DESCRIPTION = """
+Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes
+"""
+
+ def append(self, positive_1, negative_1, positive_2, positive_3, negative_2, negative_3, mask_1, mask_2, mask_3, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength):
+ c = []
+ c2 = []
+ set_area_to_bounds = False
+ if set_cond_area != "default":
+ set_area_to_bounds = True
+ if len(mask_1.shape) < 3:
+ mask_1 = mask_1.unsqueeze(0)
+ if len(mask_2.shape) < 3:
+ mask_2 = mask_2.unsqueeze(0)
+ if len(mask_3.shape) < 3:
+ mask_3 = mask_3.unsqueeze(0)
+ for t in positive_1:
+ append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength)
+ for t in positive_2:
+ append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength)
+ for t in positive_3:
+ append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength)
+ for t in negative_1:
+ append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength)
+ for t in negative_2:
+ append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength)
+ for t in negative_3:
+ append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength)
+ return (c, c2)
+
+class ConditioningSetMaskAndCombine4:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "positive_1": ("CONDITIONING", ),
+ "negative_1": ("CONDITIONING", ),
+ "positive_2": ("CONDITIONING", ),
+ "negative_2": ("CONDITIONING", ),
+ "positive_3": ("CONDITIONING", ),
+ "negative_3": ("CONDITIONING", ),
+ "positive_4": ("CONDITIONING", ),
+ "negative_4": ("CONDITIONING", ),
+ "mask_1": ("MASK", ),
+ "mask_2": ("MASK", ),
+ "mask_3": ("MASK", ),
+ "mask_4": ("MASK", ),
+ "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "mask_4_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "set_cond_area": (["default", "mask bounds"],),
+ }
+ }
+
+ RETURN_TYPES = ("CONDITIONING","CONDITIONING",)
+ RETURN_NAMES = ("combined_positive", "combined_negative",)
+ FUNCTION = "append"
+ CATEGORY = "KJNodes/masking/conditioning"
+ DESCRIPTION = """
+Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes
+"""
+
+ def append(self, positive_1, negative_1, positive_2, positive_3, positive_4, negative_2, negative_3, negative_4, mask_1, mask_2, mask_3, mask_4, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength, mask_4_strength):
+ c = []
+ c2 = []
+ set_area_to_bounds = False
+ if set_cond_area != "default":
+ set_area_to_bounds = True
+ if len(mask_1.shape) < 3:
+ mask_1 = mask_1.unsqueeze(0)
+ if len(mask_2.shape) < 3:
+ mask_2 = mask_2.unsqueeze(0)
+ if len(mask_3.shape) < 3:
+ mask_3 = mask_3.unsqueeze(0)
+ if len(mask_4.shape) < 3:
+ mask_4 = mask_4.unsqueeze(0)
+ for t in positive_1:
+ append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength)
+ for t in positive_2:
+ append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength)
+ for t in positive_3:
+ append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength)
+ for t in positive_4:
+ append_helper(t, mask_4, c, set_area_to_bounds, mask_4_strength)
+ for t in negative_1:
+ append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength)
+ for t in negative_2:
+ append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength)
+ for t in negative_3:
+ append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength)
+ for t in negative_4:
+ append_helper(t, mask_4, c2, set_area_to_bounds, mask_4_strength)
+ return (c, c2)
+
+class ConditioningSetMaskAndCombine5:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "positive_1": ("CONDITIONING", ),
+ "negative_1": ("CONDITIONING", ),
+ "positive_2": ("CONDITIONING", ),
+ "negative_2": ("CONDITIONING", ),
+ "positive_3": ("CONDITIONING", ),
+ "negative_3": ("CONDITIONING", ),
+ "positive_4": ("CONDITIONING", ),
+ "negative_4": ("CONDITIONING", ),
+ "positive_5": ("CONDITIONING", ),
+ "negative_5": ("CONDITIONING", ),
+ "mask_1": ("MASK", ),
+ "mask_2": ("MASK", ),
+ "mask_3": ("MASK", ),
+ "mask_4": ("MASK", ),
+ "mask_5": ("MASK", ),
+ "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "mask_4_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "mask_5_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "set_cond_area": (["default", "mask bounds"],),
+ }
+ }
+
+ RETURN_TYPES = ("CONDITIONING","CONDITIONING",)
+ RETURN_NAMES = ("combined_positive", "combined_negative",)
+ FUNCTION = "append"
+ CATEGORY = "KJNodes/masking/conditioning"
+ DESCRIPTION = """
+Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes
+"""
+
+ def append(self, positive_1, negative_1, positive_2, positive_3, positive_4, positive_5, negative_2, negative_3, negative_4, negative_5, mask_1, mask_2, mask_3, mask_4, mask_5, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength, mask_4_strength, mask_5_strength):
+ c = []
+ c2 = []
+ set_area_to_bounds = False
+ if set_cond_area != "default":
+ set_area_to_bounds = True
+ if len(mask_1.shape) < 3:
+ mask_1 = mask_1.unsqueeze(0)
+ if len(mask_2.shape) < 3:
+ mask_2 = mask_2.unsqueeze(0)
+ if len(mask_3.shape) < 3:
+ mask_3 = mask_3.unsqueeze(0)
+ if len(mask_4.shape) < 3:
+ mask_4 = mask_4.unsqueeze(0)
+ if len(mask_5.shape) < 3:
+ mask_5 = mask_5.unsqueeze(0)
+ for t in positive_1:
+ append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength)
+ for t in positive_2:
+ append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength)
+ for t in positive_3:
+ append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength)
+ for t in positive_4:
+ append_helper(t, mask_4, c, set_area_to_bounds, mask_4_strength)
+ for t in positive_5:
+ append_helper(t, mask_5, c, set_area_to_bounds, mask_5_strength)
+ for t in negative_1:
+ append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength)
+ for t in negative_2:
+ append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength)
+ for t in negative_3:
+ append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength)
+ for t in negative_4:
+ append_helper(t, mask_4, c2, set_area_to_bounds, mask_4_strength)
+ for t in negative_5:
+ append_helper(t, mask_5, c2, set_area_to_bounds, mask_5_strength)
+ return (c, c2)
+
+class VRAM_Debug:
+
+ @classmethod
+
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+
+ "empty_cache": ("BOOLEAN", {"default": True}),
+ "gc_collect": ("BOOLEAN", {"default": True}),
+ "unload_all_models": ("BOOLEAN", {"default": False}),
+ },
+ "optional": {
+ "any_input": (any, {}),
+ "image_pass": ("IMAGE",),
+ "model_pass": ("MODEL",),
+ }
+ }
+
+ RETURN_TYPES = (any, "IMAGE","MODEL","INT", "INT",)
+ RETURN_NAMES = ("any_output", "image_pass", "model_pass", "freemem_before", "freemem_after")
+ FUNCTION = "VRAMdebug"
+ CATEGORY = "KJNodes/misc"
+ DESCRIPTION = """
+Returns the inputs unchanged, they are only used as triggers,
+and performs comfy model management functions and garbage collection,
+reports free VRAM before and after the operations.
+"""
+
+ def VRAMdebug(self, gc_collect, empty_cache, unload_all_models, image_pass=None, model_pass=None, any_input=None):
+ freemem_before = model_management.get_free_memory()
+ print("VRAMdebug: free memory before: ", f"{freemem_before:,.0f}")
+ if empty_cache:
+ model_management.soft_empty_cache()
+ if unload_all_models:
+ model_management.unload_all_models()
+ if gc_collect:
+ import gc
+ gc.collect()
+ freemem_after = model_management.get_free_memory()
+ print("VRAMdebug: free memory after: ", f"{freemem_after:,.0f}")
+ print("VRAMdebug: freed memory: ", f"{freemem_after - freemem_before:,.0f}")
+ return {"ui": {
+ "text": [f"{freemem_before:,.0f}x{freemem_after:,.0f}"]},
+ "result": (any_input, image_pass, model_pass, freemem_before, freemem_after)
+ }
+
+class SomethingToString:
+ @classmethod
+
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "input": (any, {}),
+ },
+ "optional": {
+ "prefix": ("STRING", {"default": ""}),
+ "suffix": ("STRING", {"default": ""}),
+ }
+ }
+ RETURN_TYPES = ("STRING",)
+ FUNCTION = "stringify"
+ CATEGORY = "KJNodes/text"
+ DESCRIPTION = """
+Converts any type to a string.
+"""
+
+ def stringify(self, input, prefix="", suffix=""):
+ if isinstance(input, (int, float, bool)):
+ stringified = str(input)
+ elif isinstance(input, list):
+ stringified = ', '.join(str(item) for item in input)
+ else:
+ return
+ if prefix: # Check if prefix is not empty
+ stringified = prefix + stringified # Add the prefix
+ if suffix: # Check if suffix is not empty
+ stringified = stringified + suffix # Add the suffix
+
+ return (stringified,)
+
+class Sleep:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "input": (any, {}),
+ "minutes": ("INT", {"default": 0, "min": 0, "max": 1439}),
+ "seconds": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 59.99, "step": 0.01}),
+ },
+ }
+ RETURN_TYPES = (any,)
+ FUNCTION = "sleepdelay"
+ CATEGORY = "KJNodes/misc"
+ DESCRIPTION = """
+Delays the execution for the input amount of time.
+"""
+
+ def sleepdelay(self, input, minutes, seconds):
+ total_seconds = minutes * 60 + seconds
+ time.sleep(total_seconds)
+ return input,
+
+class EmptyLatentImagePresets:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "dimensions": (
+ [
+ '512 x 512 (1:1)',
+ '768 x 512 (1.5:1)',
+ '960 x 512 (1.875:1)',
+ '1024 x 512 (2:1)',
+ '1024 x 576 (1.778:1)',
+ '1536 x 640 (2.4:1)',
+ '1344 x 768 (1.75:1)',
+ '1216 x 832 (1.46:1)',
+ '1152 x 896 (1.286:1)',
+ '1024 x 1024 (1:1)',
+ ],
+ {
+ "default": '512 x 512 (1:1)'
+ }),
+
+ "invert": ("BOOLEAN", {"default": False}),
+ "batch_size": ("INT", {
+ "default": 1,
+ "min": 1,
+ "max": 4096
+ }),
+ },
+ }
+
+ RETURN_TYPES = ("LATENT", "INT", "INT")
+ RETURN_NAMES = ("Latent", "Width", "Height")
+ FUNCTION = "generate"
+ CATEGORY = "KJNodes"
+
+ def generate(self, dimensions, invert, batch_size):
+ from nodes import EmptyLatentImage
+ result = [x.strip() for x in dimensions.split('x')]
+
+ # Remove the aspect ratio part
+ result[0] = result[0].split('(')[0].strip()
+ result[1] = result[1].split('(')[0].strip()
+
+ if invert:
+ width = int(result[1].split(' ')[0])
+ height = int(result[0])
+ else:
+ width = int(result[0])
+ height = int(result[1].split(' ')[0])
+ latent = EmptyLatentImage().generate(width, height, batch_size)[0]
+
+ return (latent, int(width), int(height),)
+
+class EmptyLatentImageCustomPresets:
+ @classmethod
+ def INPUT_TYPES(cls):
+ with open(os.path.join(script_directory, 'custom_dimensions.json')) as f:
+ dimensions_dict = json.load(f)
+ return {
+ "required": {
+ "dimensions": (
+ [f"{d['label']} - {d['value']}" for d in dimensions_dict],
+ ),
+
+ "invert": ("BOOLEAN", {"default": False}),
+ "batch_size": ("INT", {
+ "default": 1,
+ "min": 1,
+ "max": 4096
+ }),
+ },
+ }
+
+ RETURN_TYPES = ("LATENT", "INT", "INT")
+ RETURN_NAMES = ("Latent", "Width", "Height")
+ FUNCTION = "generate"
+ CATEGORY = "KJNodes"
+ DESCRIPTION = """
+Generates an empty latent image with the specified dimensions.
+The choices are loaded from 'custom_dimensions.json' in the nodes folder.
+"""
+
+ def generate(self, dimensions, invert, batch_size):
+ from nodes import EmptyLatentImage
+ # Split the string into label and value
+ label, value = dimensions.split(' - ')
+ # Split the value into width and height
+ width, height = [x.strip() for x in value.split('x')]
+
+ if invert:
+ width, height = height, width
+
+ latent = EmptyLatentImage().generate(int(width), int(height), batch_size)[0]
+
+ return (latent, int(width), int(height),)
+
+class WidgetToString:
+ @classmethod
+ def IS_CHANGED(cls, **kwargs):
+ return float("NaN")
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "id": ("INT", {"default": 0}),
+ "widget_name": ("STRING", {"multiline": False}),
+ "return_all": ("BOOLEAN", {"default": False}),
+ },
+ "optional": {
+ "any_input": (any, {}),
+ "node_title": ("STRING", {"multiline": False}),
+ },
+ "hidden": {"extra_pnginfo": "EXTRA_PNGINFO",
+ "prompt": "PROMPT",
+ "unique_id": "UNIQUE_ID",},
+ }
+
+ RETURN_TYPES = ("STRING", )
+ FUNCTION = "get_widget_value"
+ CATEGORY = "KJNodes/text"
+ DESCRIPTION = """
+Selects a node and it's specified widget and outputs the value as a string.
+If no node id or title is provided it will use the 'any_input' link and use that node.
+To see node id's, enable node id display from Manager badge menu.
+Alternatively you can search with the node title. Node titles ONLY exist if they
+are manually edited!
+The 'any_input' is required for making sure the node you want the value from exists in the workflow.
+"""
+
+ def get_widget_value(self, id, widget_name, extra_pnginfo, prompt, unique_id, return_all=False, any_input=None, node_title=""):
+ workflow = extra_pnginfo["workflow"]
+ #print(json.dumps(workflow, indent=4))
+ results = []
+ node_id = None # Initialize node_id to handle cases where no match is found
+ link_id = None
+ link_to_node_map = {}
+
+ for node in workflow["nodes"]:
+ if node_title:
+ if "title" in node:
+ if node["title"] == node_title:
+ node_id = node["id"]
+ break
+ else:
+ print("Node title not found.")
+ elif id != 0:
+ if node["id"] == id:
+ node_id = id
+ break
+ elif any_input is not None:
+ if node["type"] == "WidgetToString" and node["id"] == int(unique_id) and not link_id:
+ for node_input in node["inputs"]:
+ if node_input["name"] == "any_input":
+ link_id = node_input["link"]
+
+ # Construct a map of links to node IDs for future reference
+ node_outputs = node.get("outputs", None)
+ if not node_outputs:
+ continue
+ for output in node_outputs:
+ node_links = output.get("links", None)
+ if not node_links:
+ continue
+ for link in node_links:
+ link_to_node_map[link] = node["id"]
+ if link_id and link == link_id:
+ break
+
+ if link_id:
+ node_id = link_to_node_map.get(link_id, None)
+
+ if node_id is None:
+ raise ValueError("No matching node found for the given title or id")
+
+ values = prompt[str(node_id)]
+ if "inputs" in values:
+ if return_all:
+ results.append(', '.join(f'{k}: {str(v)}' for k, v in values["inputs"].items()))
+ elif widget_name in values["inputs"]:
+ v = str(values["inputs"][widget_name]) # Convert to string here
+ return (v, )
+ else:
+ raise NameError(f"Widget not found: {node_id}.{widget_name}")
+ if not results:
+ raise NameError(f"Node not found: {node_id}")
+ return (', '.join(results).strip(', '), )
+
+class DummyOut:
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "any_input": (any, {}),
+ }
+ }
+
+ RETURN_TYPES = (any,)
+ FUNCTION = "dummy"
+ CATEGORY = "KJNodes/misc"
+ OUTPUT_NODE = True
+ DESCRIPTION = """
+Does nothing, used to trigger generic workflow output.
+A way to get previews in the UI without saving anything to disk.
+"""
+
+ def dummy(self, any_input):
+ return (any_input,)
+
+class FlipSigmasAdjusted:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"sigmas": ("SIGMAS", ),
+ "divide_by_last_sigma": ("BOOLEAN", {"default": False}),
+ "divide_by": ("FLOAT", {"default": 1,"min": 1, "max": 255, "step": 0.01}),
+ "offset_by": ("INT", {"default": 1,"min": -100, "max": 100, "step": 1}),
+ }
+ }
+ RETURN_TYPES = ("SIGMAS", "STRING",)
+ RETURN_NAMES = ("SIGMAS", "sigmas_string",)
+ CATEGORY = "KJNodes/noise"
+ FUNCTION = "get_sigmas_adjusted"
+
+ def get_sigmas_adjusted(self, sigmas, divide_by_last_sigma, divide_by, offset_by):
+
+ sigmas = sigmas.flip(0)
+ if sigmas[0] == 0:
+ sigmas[0] = 0.0001
+ adjusted_sigmas = sigmas.clone()
+ #offset sigma
+ for i in range(1, len(sigmas)):
+ offset_index = i - offset_by
+ if 0 <= offset_index < len(sigmas):
+ adjusted_sigmas[i] = sigmas[offset_index]
+ else:
+ adjusted_sigmas[i] = 0.0001
+ if adjusted_sigmas[0] == 0:
+ adjusted_sigmas[0] = 0.0001
+ if divide_by_last_sigma:
+ adjusted_sigmas = adjusted_sigmas / adjusted_sigmas[-1]
+
+ sigma_np_array = adjusted_sigmas.numpy()
+ array_string = np.array2string(sigma_np_array, precision=2, separator=', ', threshold=np.inf)
+ adjusted_sigmas = adjusted_sigmas / divide_by
+ return (adjusted_sigmas, array_string,)
+
+class CustomSigmas:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {
+ "sigmas_string" :("STRING", {"default": "14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029","multiline": True}),
+ "interpolate_to_steps": ("INT", {"default": 10,"min": 0, "max": 255, "step": 1}),
+ }
+ }
+ RETURN_TYPES = ("SIGMAS",)
+ RETURN_NAMES = ("SIGMAS",)
+ CATEGORY = "KJNodes/noise"
+ FUNCTION = "customsigmas"
+ DESCRIPTION = """
+Creates a sigmas tensor from a string of comma separated values.
+Examples:
+
+Nvidia's optimized AYS 10 step schedule for SD 1.5:
+14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029
+SDXL:
+14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029
+SVD:
+700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002
+"""
+ def customsigmas(self, sigmas_string, interpolate_to_steps):
+ sigmas_list = sigmas_string.split(', ')
+ sigmas_float_list = [float(sigma) for sigma in sigmas_list]
+ sigmas_tensor = torch.FloatTensor(sigmas_float_list)
+ if len(sigmas_tensor) != interpolate_to_steps + 1:
+ sigmas_tensor = self.loglinear_interp(sigmas_tensor, interpolate_to_steps + 1)
+ sigmas_tensor[-1] = 0
+ return (sigmas_tensor.float(),)
+
+ def loglinear_interp(self, t_steps, num_steps):
+ """
+ Performs log-linear interpolation of a given array of decreasing numbers.
+ """
+ t_steps_np = t_steps.numpy()
+
+ xs = np.linspace(0, 1, len(t_steps_np))
+ ys = np.log(t_steps_np[::-1])
+
+ new_xs = np.linspace(0, 1, num_steps)
+ new_ys = np.interp(new_xs, xs, ys)
+
+ interped_ys = np.exp(new_ys)[::-1].copy()
+ interped_ys_tensor = torch.tensor(interped_ys)
+ return interped_ys_tensor
+
+
+class InjectNoiseToLatent:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "latents":("LATENT",),
+ "strength": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 200.0, "step": 0.0001}),
+ "noise": ("LATENT",),
+ "normalize": ("BOOLEAN", {"default": False}),
+ "average": ("BOOLEAN", {"default": False}),
+ },
+ "optional":{
+ "mask": ("MASK", ),
+ "mix_randn_amount": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.001}),
+ "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}),
+ }
+ }
+
+ RETURN_TYPES = ("LATENT",)
+ FUNCTION = "injectnoise"
+ CATEGORY = "KJNodes/noise"
+
+ def injectnoise(self, latents, strength, noise, normalize, average, mix_randn_amount=0, seed=None, mask=None):
+ samples = latents.copy()
+ if latents["samples"].shape != noise["samples"].shape:
+ raise ValueError("InjectNoiseToLatent: Latent and noise must have the same shape")
+ if average:
+ noised = (samples["samples"].clone() + noise["samples"].clone()) / 2
+ else:
+ noised = samples["samples"].clone() + noise["samples"].clone() * strength
+ if normalize:
+ noised = noised / noised.std()
+ if mask is not None:
+ mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(noised.shape[2], noised.shape[3]), mode="bilinear")
+ mask = mask.expand((-1,noised.shape[1],-1,-1))
+ if mask.shape[0] < noised.shape[0]:
+ mask = mask.repeat((noised.shape[0] -1) // mask.shape[0] + 1, 1, 1, 1)[:noised.shape[0]]
+ noised = mask * noised + (1-mask) * latents["samples"]
+ if mix_randn_amount > 0:
+ if seed is not None:
+ generator = torch.manual_seed(seed)
+ rand_noise = torch.randn(noised.size(), dtype=noised.dtype, layout=noised.layout, generator=generator, device="cpu")
+ noised = noised + (mix_randn_amount * rand_noise)
+ samples["samples"] = noised
+ return (samples,)
+
+class SoundReactive:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "sound_level": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 99999, "step": 0.01}),
+ "start_range_hz": ("INT", {"default": 150, "min": 0, "max": 9999, "step": 1}),
+ "end_range_hz": ("INT", {"default": 2000, "min": 0, "max": 9999, "step": 1}),
+ "multiplier": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 99999, "step": 0.01}),
+ "smoothing_factor": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
+ "normalize": ("BOOLEAN", {"default": False}),
+ },
+ }
+
+ RETURN_TYPES = ("FLOAT","INT",)
+ RETURN_NAMES =("sound_level", "sound_level_int",)
+ FUNCTION = "react"
+ CATEGORY = "KJNodes/audio"
+ DESCRIPTION = """
+Reacts to the sound level of the input.
+Uses your browsers sound input options and requires.
+Meant to be used with realtime diffusion with autoqueue.
+"""
+
+ def react(self, sound_level, start_range_hz, end_range_hz, smoothing_factor, multiplier, normalize):
+
+ sound_level *= multiplier
+
+ if normalize:
+ sound_level /= 255
+
+ sound_level_int = int(sound_level)
+ return (sound_level, sound_level_int, )
+
+class GenerateNoise:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
+ "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}),
+ "multiplier": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 4096, "step": 0.01}),
+ "constant_batch_noise": ("BOOLEAN", {"default": False}),
+ "normalize": ("BOOLEAN", {"default": False}),
+ },
+ "optional": {
+ "model": ("MODEL", ),
+ "sigmas": ("SIGMAS", ),
+ "latent_channels": (
+ [ '4',
+ '16',
+ ],
+ ),
+ }
+ }
+
+ RETURN_TYPES = ("LATENT",)
+ FUNCTION = "generatenoise"
+ CATEGORY = "KJNodes/noise"
+ DESCRIPTION = """
+Generates noise for injection or to be used as empty latents on samplers with add_noise off.
+"""
+
+ def generatenoise(self, batch_size, width, height, seed, multiplier, constant_batch_noise, normalize, sigmas=None, model=None, latent_channels=4):
+
+ generator = torch.manual_seed(seed)
+ noise = torch.randn([batch_size, int(latent_channels), height // 8, width // 8], dtype=torch.float32, layout=torch.strided, generator=generator, device="cpu")
+ if sigmas is not None:
+ sigma = sigmas[0] - sigmas[-1]
+ sigma /= model.model.latent_format.scale_factor
+ noise *= sigma
+
+ noise *=multiplier
+
+ if normalize:
+ noise = noise / noise.std()
+ if constant_batch_noise:
+ noise = noise[0].repeat(batch_size, 1, 1, 1)
+
+
+ return ({"samples":noise}, )
+
+def camera_embeddings(elevation, azimuth):
+ elevation = torch.as_tensor([elevation])
+ azimuth = torch.as_tensor([azimuth])
+ embeddings = torch.stack(
+ [
+ torch.deg2rad(
+ (90 - elevation) - (90)
+ ), # Zero123 polar is 90-elevation
+ torch.sin(torch.deg2rad(azimuth)),
+ torch.cos(torch.deg2rad(azimuth)),
+ torch.deg2rad(
+ 90 - torch.full_like(elevation, 0)
+ ),
+ ], dim=-1).unsqueeze(1)
+
+ return embeddings
+
+def interpolate_angle(start, end, fraction):
+ # Calculate the difference in angles and adjust for wraparound if necessary
+ diff = (end - start + 540) % 360 - 180
+ # Apply fraction to the difference
+ interpolated = start + fraction * diff
+ # Normalize the result to be within the range of -180 to 180
+ return (interpolated + 180) % 360 - 180
+
+
+class StableZero123_BatchSchedule:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "clip_vision": ("CLIP_VISION",),
+ "init_image": ("IMAGE",),
+ "vae": ("VAE",),
+ "width": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
+ "height": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
+ "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],),
+ "azimuth_points_string": ("STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": True}),
+ "elevation_points_string": ("STRING", {"default": "0:(0.0),\n7:(0.0),\n15:(0.0)\n", "multiline": True}),
+ }}
+
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
+ RETURN_NAMES = ("positive", "negative", "latent")
+ FUNCTION = "encode"
+ CATEGORY = "KJNodes/experimental"
+
+ def encode(self, clip_vision, init_image, vae, width, height, batch_size, azimuth_points_string, elevation_points_string, interpolation):
+ output = clip_vision.encode_image(init_image)
+ pooled = output.image_embeds.unsqueeze(0)
+ pixels = common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1)
+ encode_pixels = pixels[:,:,:,:3]
+ t = vae.encode(encode_pixels)
+
+ def ease_in(t):
+ return t * t
+ def ease_out(t):
+ return 1 - (1 - t) * (1 - t)
+ def ease_in_out(t):
+ return 3 * t * t - 2 * t * t * t
+
+ # Parse the azimuth input string into a list of tuples
+ azimuth_points = []
+ azimuth_points_string = azimuth_points_string.rstrip(',\n')
+ for point_str in azimuth_points_string.split(','):
+ frame_str, azimuth_str = point_str.split(':')
+ frame = int(frame_str.strip())
+ azimuth = float(azimuth_str.strip()[1:-1])
+ azimuth_points.append((frame, azimuth))
+ # Sort the points by frame number
+ azimuth_points.sort(key=lambda x: x[0])
+
+ # Parse the elevation input string into a list of tuples
+ elevation_points = []
+ elevation_points_string = elevation_points_string.rstrip(',\n')
+ for point_str in elevation_points_string.split(','):
+ frame_str, elevation_str = point_str.split(':')
+ frame = int(frame_str.strip())
+ elevation_val = float(elevation_str.strip()[1:-1])
+ elevation_points.append((frame, elevation_val))
+ # Sort the points by frame number
+ elevation_points.sort(key=lambda x: x[0])
+
+ # Index of the next point to interpolate towards
+ next_point = 1
+ next_elevation_point = 1
+
+ positive_cond_out = []
+ positive_pooled_out = []
+ negative_cond_out = []
+ negative_pooled_out = []
+
+ #azimuth interpolation
+ for i in range(batch_size):
+ # Find the interpolated azimuth for the current frame
+ while next_point < len(azimuth_points) and i >= azimuth_points[next_point][0]:
+ next_point += 1
+ # If next_point is equal to the length of points, we've gone past the last point
+ if next_point == len(azimuth_points):
+ next_point -= 1 # Set next_point to the last index of points
+ prev_point = max(next_point - 1, 0) # Ensure prev_point is not less than 0
+
+ # Calculate fraction
+ if azimuth_points[next_point][0] != azimuth_points[prev_point][0]: # Prevent division by zero
+ fraction = (i - azimuth_points[prev_point][0]) / (azimuth_points[next_point][0] - azimuth_points[prev_point][0])
+ if interpolation == "ease_in":
+ fraction = ease_in(fraction)
+ elif interpolation == "ease_out":
+ fraction = ease_out(fraction)
+ elif interpolation == "ease_in_out":
+ fraction = ease_in_out(fraction)
+
+ # Use the new interpolate_angle function
+ interpolated_azimuth = interpolate_angle(azimuth_points[prev_point][1], azimuth_points[next_point][1], fraction)
+ else:
+ interpolated_azimuth = azimuth_points[prev_point][1]
+ # Interpolate the elevation
+ next_elevation_point = 1
+ while next_elevation_point < len(elevation_points) and i >= elevation_points[next_elevation_point][0]:
+ next_elevation_point += 1
+ if next_elevation_point == len(elevation_points):
+ next_elevation_point -= 1
+ prev_elevation_point = max(next_elevation_point - 1, 0)
+
+ if elevation_points[next_elevation_point][0] != elevation_points[prev_elevation_point][0]:
+ fraction = (i - elevation_points[prev_elevation_point][0]) / (elevation_points[next_elevation_point][0] - elevation_points[prev_elevation_point][0])
+ if interpolation == "ease_in":
+ fraction = ease_in(fraction)
+ elif interpolation == "ease_out":
+ fraction = ease_out(fraction)
+ elif interpolation == "ease_in_out":
+ fraction = ease_in_out(fraction)
+
+ interpolated_elevation = interpolate_angle(elevation_points[prev_elevation_point][1], elevation_points[next_elevation_point][1], fraction)
+ else:
+ interpolated_elevation = elevation_points[prev_elevation_point][1]
+
+ cam_embeds = camera_embeddings(interpolated_elevation, interpolated_azimuth)
+ cond = torch.cat([pooled, cam_embeds.repeat((pooled.shape[0], 1, 1))], dim=-1)
+
+ positive_pooled_out.append(t)
+ positive_cond_out.append(cond)
+ negative_pooled_out.append(torch.zeros_like(t))
+ negative_cond_out.append(torch.zeros_like(pooled))
+
+ # Concatenate the conditions and pooled outputs
+ final_positive_cond = torch.cat(positive_cond_out, dim=0)
+ final_positive_pooled = torch.cat(positive_pooled_out, dim=0)
+ final_negative_cond = torch.cat(negative_cond_out, dim=0)
+ final_negative_pooled = torch.cat(negative_pooled_out, dim=0)
+
+ # Structure the final output
+ final_positive = [[final_positive_cond, {"concat_latent_image": final_positive_pooled}]]
+ final_negative = [[final_negative_cond, {"concat_latent_image": final_negative_pooled}]]
+
+ latent = torch.zeros([batch_size, 4, height // 8, width // 8])
+ return (final_positive, final_negative, {"samples": latent})
+
+def linear_interpolate(start, end, fraction):
+ return start + (end - start) * fraction
+
+class SV3D_BatchSchedule:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "clip_vision": ("CLIP_VISION",),
+ "init_image": ("IMAGE",),
+ "vae": ("VAE",),
+ "width": ("INT", {"default": 576, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
+ "height": ("INT", {"default": 576, "min": 16, "max": MAX_RESOLUTION, "step": 8}),
+ "batch_size": ("INT", {"default": 21, "min": 1, "max": 4096}),
+ "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],),
+ "azimuth_points_string": ("STRING", {"default": "0:(0.0),\n9:(180.0),\n20:(360.0)\n", "multiline": True}),
+ "elevation_points_string": ("STRING", {"default": "0:(0.0),\n9:(0.0),\n20:(0.0)\n", "multiline": True}),
+ }}
+
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
+ RETURN_NAMES = ("positive", "negative", "latent")
+ FUNCTION = "encode"
+ CATEGORY = "KJNodes/experimental"
+ DESCRIPTION = """
+Allow scheduling of the azimuth and elevation conditions for SV3D.
+Note that SV3D is still a video model and the schedule needs to always go forward
+https://huggingface.co/stabilityai/sv3d
+"""
+
+ def encode(self, clip_vision, init_image, vae, width, height, batch_size, azimuth_points_string, elevation_points_string, interpolation):
+ output = clip_vision.encode_image(init_image)
+ pooled = output.image_embeds.unsqueeze(0)
+ pixels = common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1)
+ encode_pixels = pixels[:,:,:,:3]
+ t = vae.encode(encode_pixels)
+
+ def ease_in(t):
+ return t * t
+ def ease_out(t):
+ return 1 - (1 - t) * (1 - t)
+ def ease_in_out(t):
+ return 3 * t * t - 2 * t * t * t
+
+ # Parse the azimuth input string into a list of tuples
+ azimuth_points = []
+ azimuth_points_string = azimuth_points_string.rstrip(',\n')
+ for point_str in azimuth_points_string.split(','):
+ frame_str, azimuth_str = point_str.split(':')
+ frame = int(frame_str.strip())
+ azimuth = float(azimuth_str.strip()[1:-1])
+ azimuth_points.append((frame, azimuth))
+ # Sort the points by frame number
+ azimuth_points.sort(key=lambda x: x[0])
+
+ # Parse the elevation input string into a list of tuples
+ elevation_points = []
+ elevation_points_string = elevation_points_string.rstrip(',\n')
+ for point_str in elevation_points_string.split(','):
+ frame_str, elevation_str = point_str.split(':')
+ frame = int(frame_str.strip())
+ elevation_val = float(elevation_str.strip()[1:-1])
+ elevation_points.append((frame, elevation_val))
+ # Sort the points by frame number
+ elevation_points.sort(key=lambda x: x[0])
+
+ # Index of the next point to interpolate towards
+ next_point = 1
+ next_elevation_point = 1
+ elevations = []
+ azimuths = []
+ # For azimuth interpolation
+ for i in range(batch_size):
+ # Find the interpolated azimuth for the current frame
+ while next_point < len(azimuth_points) and i >= azimuth_points[next_point][0]:
+ next_point += 1
+ if next_point == len(azimuth_points):
+ next_point -= 1
+ prev_point = max(next_point - 1, 0)
+
+ if azimuth_points[next_point][0] != azimuth_points[prev_point][0]:
+ fraction = (i - azimuth_points[prev_point][0]) / (azimuth_points[next_point][0] - azimuth_points[prev_point][0])
+ # Apply the ease function to the fraction
+ if interpolation == "ease_in":
+ fraction = ease_in(fraction)
+ elif interpolation == "ease_out":
+ fraction = ease_out(fraction)
+ elif interpolation == "ease_in_out":
+ fraction = ease_in_out(fraction)
+
+ interpolated_azimuth = linear_interpolate(azimuth_points[prev_point][1], azimuth_points[next_point][1], fraction)
+ else:
+ interpolated_azimuth = azimuth_points[prev_point][1]
+
+ # Interpolate the elevation
+ next_elevation_point = 1
+ while next_elevation_point < len(elevation_points) and i >= elevation_points[next_elevation_point][0]:
+ next_elevation_point += 1
+ if next_elevation_point == len(elevation_points):
+ next_elevation_point -= 1
+ prev_elevation_point = max(next_elevation_point - 1, 0)
+
+ if elevation_points[next_elevation_point][0] != elevation_points[prev_elevation_point][0]:
+ fraction = (i - elevation_points[prev_elevation_point][0]) / (elevation_points[next_elevation_point][0] - elevation_points[prev_elevation_point][0])
+ # Apply the ease function to the fraction
+ if interpolation == "ease_in":
+ fraction = ease_in(fraction)
+ elif interpolation == "ease_out":
+ fraction = ease_out(fraction)
+ elif interpolation == "ease_in_out":
+ fraction = ease_in_out(fraction)
+
+ interpolated_elevation = linear_interpolate(elevation_points[prev_elevation_point][1], elevation_points[next_elevation_point][1], fraction)
+ else:
+ interpolated_elevation = elevation_points[prev_elevation_point][1]
+
+ azimuths.append(interpolated_azimuth)
+ elevations.append(interpolated_elevation)
+
+ #print("azimuths", azimuths)
+ #print("elevations", elevations)
+
+ # Structure the final output
+ final_positive = [[pooled, {"concat_latent_image": t, "elevation": elevations, "azimuth": azimuths}]]
+ final_negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t),"elevation": elevations, "azimuth": azimuths}]]
+
+ latent = torch.zeros([batch_size, 4, height // 8, width // 8])
+ return (final_positive, final_negative, {"samples": latent})
+
+class LoadResAdapterNormalization:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "model": ("MODEL",),
+ "resadapter_path": (folder_paths.get_filename_list("checkpoints"), )
+ }
+ }
+
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "load_res_adapter"
+ CATEGORY = "KJNodes/experimental"
+
+ def load_res_adapter(self, model, resadapter_path):
+ print("ResAdapter: Checking ResAdapter path")
+ resadapter_full_path = folder_paths.get_full_path("checkpoints", resadapter_path)
+ if not os.path.exists(resadapter_full_path):
+ raise Exception("Invalid model path")
+ else:
+ print("ResAdapter: Loading ResAdapter normalization weights")
+ from comfy.utils import load_torch_file
+ prefix_to_remove = 'diffusion_model.'
+ model_clone = model.clone()
+ norm_state_dict = load_torch_file(resadapter_full_path)
+ new_values = {key[len(prefix_to_remove):]: value for key, value in norm_state_dict.items() if key.startswith(prefix_to_remove)}
+ print("ResAdapter: Attempting to add patches with ResAdapter weights")
+ try:
+ for key in model.model.diffusion_model.state_dict().keys():
+ if key in new_values:
+ original_tensor = model.model.diffusion_model.state_dict()[key]
+ new_tensor = new_values[key].to(model.model.diffusion_model.dtype)
+ if original_tensor.shape == new_tensor.shape:
+ model_clone.add_object_patch(f"diffusion_model.{key}.data", new_tensor)
+ else:
+ print("ResAdapter: No match for key: ",key)
+ except:
+ raise Exception("Could not patch model, this way of patching was added to ComfyUI on March 3rd 2024, is your ComfyUI up to date?")
+ print("ResAdapter: Added resnet normalization patches")
+ return (model_clone, )
+
+class Superprompt:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "instruction_prompt": ("STRING", {"default": 'Expand the following prompt to add more detail', "multiline": True}),
+ "prompt": ("STRING", {"default": '', "multiline": True, "forceInput": True}),
+ "max_new_tokens": ("INT", {"default": 128, "min": 1, "max": 4096, "step": 1}),
+ }
+ }
+
+ RETURN_TYPES = ("STRING",)
+ FUNCTION = "process"
+ CATEGORY = "KJNodes/text"
+ DESCRIPTION = """
+# SuperPrompt
+A T5 model fine-tuned on the SuperPrompt dataset for
+upsampling text prompts to more detailed descriptions.
+Meant to be used as a pre-generation step for text-to-image
+models that benefit from more detailed prompts.
+https://huggingface.co/roborovski/superprompt-v1
+"""
+
+ def process(self, instruction_prompt, prompt, max_new_tokens):
+ device = model_management.get_torch_device()
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
+
+ checkpoint_path = os.path.join(script_directory, "models","superprompt-v1")
+ if not os.path.exists(checkpoint_path):
+ print(f"Downloading model to: {checkpoint_path}")
+ from huggingface_hub import snapshot_download
+ snapshot_download(repo_id="roborovski/superprompt-v1",
+ local_dir=checkpoint_path,
+ local_dir_use_symlinks=False)
+ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small", legacy=False)
+
+ model = T5ForConditionalGeneration.from_pretrained(checkpoint_path, device_map=device)
+ model.to(device)
+ input_text = instruction_prompt + ": " + prompt
+
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
+ outputs = model.generate(input_ids, max_new_tokens=max_new_tokens)
+ out = (tokenizer.decode(outputs[0]))
+ out = out.replace('', '')
+ out = out.replace(' ', '')
+
+ return (out, )
+
+
+class CameraPoseVisualizer:
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "pose_file_path": ("STRING", {"default": '', "multiline": False}),
+ "base_xval": ("FLOAT", {"default": 0.2,"min": 0, "max": 100, "step": 0.01}),
+ "zval": ("FLOAT", {"default": 0.3,"min": 0, "max": 100, "step": 0.01}),
+ "scale": ("FLOAT", {"default": 1.0,"min": 0.01, "max": 10.0, "step": 0.01}),
+ "use_exact_fx": ("BOOLEAN", {"default": False}),
+ "relative_c2w": ("BOOLEAN", {"default": True}),
+ "use_viewer": ("BOOLEAN", {"default": False}),
+ },
+ "optional": {
+ "cameractrl_poses": ("CAMERACTRL_POSES", {"default": None}),
+ }
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "plot"
+ CATEGORY = "KJNodes/misc"
+ DESCRIPTION = """
+Visualizes the camera poses, from Animatediff-Evolved CameraCtrl Pose
+or a .txt file with RealEstate camera intrinsics and coordinates, in a 3D plot.
+"""
+
+ def plot(self, pose_file_path, scale, base_xval, zval, use_exact_fx, relative_c2w, use_viewer, cameractrl_poses=None):
+ import matplotlib as mpl
+ import matplotlib.pyplot as plt
+ from torchvision.transforms import ToTensor
+
+ x_min = -2.0 * scale
+ x_max = 2.0 * scale
+ y_min = -2.0 * scale
+ y_max = 2.0 * scale
+ z_min = -2.0 * scale
+ z_max = 2.0 * scale
+ plt.rcParams['text.color'] = '#999999'
+ self.fig = plt.figure(figsize=(18, 7))
+ self.fig.patch.set_facecolor('#353535')
+ self.ax = self.fig.add_subplot(projection='3d')
+ self.ax.set_facecolor('#353535') # Set the background color here
+ self.ax.grid(color='#999999', linestyle='-', linewidth=0.5)
+ self.plotly_data = None # plotly data traces
+ self.ax.set_aspect("auto")
+ self.ax.set_xlim(x_min, x_max)
+ self.ax.set_ylim(y_min, y_max)
+ self.ax.set_zlim(z_min, z_max)
+ self.ax.set_xlabel('x', color='#999999')
+ self.ax.set_ylabel('y', color='#999999')
+ self.ax.set_zlabel('z', color='#999999')
+ for text in self.ax.get_xticklabels() + self.ax.get_yticklabels() + self.ax.get_zticklabels():
+ text.set_color('#999999')
+ print('initialize camera pose visualizer')
+
+ if pose_file_path != "":
+ with open(pose_file_path, 'r') as f:
+ poses = f.readlines()
+ w2cs = [np.asarray([float(p) for p in pose.strip().split(' ')[7:]]).reshape(3, 4) for pose in poses[1:]]
+ fxs = [float(pose.strip().split(' ')[1]) for pose in poses[1:]]
+ #print(poses)
+ elif cameractrl_poses is not None:
+ poses = cameractrl_poses
+ w2cs = [np.array(pose[7:]).reshape(3, 4) for pose in cameractrl_poses]
+ fxs = [pose[1] for pose in cameractrl_poses]
+ else:
+ raise ValueError("Please provide either pose_file_path or cameractrl_poses")
+
+ total_frames = len(w2cs)
+ transform_matrix = np.asarray([[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]]).reshape(4, 4)
+ last_row = np.zeros((1, 4))
+ last_row[0, -1] = 1.0
+
+ w2cs = [np.concatenate((w2c, last_row), axis=0) for w2c in w2cs]
+ c2ws = self.get_c2w(w2cs, transform_matrix, relative_c2w)
+
+ for frame_idx, c2w in enumerate(c2ws):
+ self.extrinsic2pyramid(c2w, frame_idx / total_frames, hw_ratio=1/1, base_xval=base_xval,
+ zval=(fxs[frame_idx] if use_exact_fx else zval))
+
+ # Create the colorbar
+ cmap = mpl.cm.rainbow
+ norm = mpl.colors.Normalize(vmin=0, vmax=total_frames)
+ colorbar = self.fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), ax=self.ax, orientation='vertical')
+
+ # Change the colorbar label
+ colorbar.set_label('Frame', color='#999999') # Change the label and its color
+
+ # Change the tick colors
+ colorbar.ax.yaxis.set_tick_params(colors='#999999') # Change the tick color
+
+ # Change the tick frequency
+ # Assuming you want to set the ticks at every 10th frame
+ ticks = np.arange(0, total_frames, 10)
+ colorbar.ax.yaxis.set_ticks(ticks)
+
+ plt.title('')
+ plt.draw()
+ buf = io.BytesIO()
+ plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
+ buf.seek(0)
+ img = Image.open(buf)
+ tensor_img = ToTensor()(img)
+ buf.close()
+ tensor_img = tensor_img.permute(1, 2, 0).unsqueeze(0)
+ if use_viewer:
+ time.sleep(1)
+ plt.show()
+ return (tensor_img,)
+
+ def extrinsic2pyramid(self, extrinsic, color_map='red', hw_ratio=1/1, base_xval=1, zval=3):
+ from mpl_toolkits.mplot3d.art3d import Poly3DCollection
+ vertex_std = np.array([[0, 0, 0, 1],
+ [base_xval, -base_xval * hw_ratio, zval, 1],
+ [base_xval, base_xval * hw_ratio, zval, 1],
+ [-base_xval, base_xval * hw_ratio, zval, 1],
+ [-base_xval, -base_xval * hw_ratio, zval, 1]])
+ vertex_transformed = vertex_std @ extrinsic.T
+ meshes = [[vertex_transformed[0, :-1], vertex_transformed[1][:-1], vertex_transformed[2, :-1]],
+ [vertex_transformed[0, :-1], vertex_transformed[2, :-1], vertex_transformed[3, :-1]],
+ [vertex_transformed[0, :-1], vertex_transformed[3, :-1], vertex_transformed[4, :-1]],
+ [vertex_transformed[0, :-1], vertex_transformed[4, :-1], vertex_transformed[1, :-1]],
+ [vertex_transformed[1, :-1], vertex_transformed[2, :-1], vertex_transformed[3, :-1], vertex_transformed[4, :-1]]]
+
+ color = color_map if isinstance(color_map, str) else plt.cm.rainbow(color_map)
+
+ self.ax.add_collection3d(
+ Poly3DCollection(meshes, facecolors=color, linewidths=0.3, edgecolors=color, alpha=0.25))
+
+ def customize_legend(self, list_label):
+ from matplotlib.patches import Patch
+ import matplotlib.pyplot as plt
+ list_handle = []
+ for idx, label in enumerate(list_label):
+ color = plt.cm.rainbow(idx / len(list_label))
+ patch = Patch(color=color, label=label)
+ list_handle.append(patch)
+ plt.legend(loc='right', bbox_to_anchor=(1.8, 0.5), handles=list_handle)
+
+ def get_c2w(self, w2cs, transform_matrix, relative_c2w):
+ if relative_c2w:
+ target_cam_c2w = np.array([
+ [1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1]
+ ])
+ abs2rel = target_cam_c2w @ w2cs[0]
+ ret_poses = [target_cam_c2w, ] + [abs2rel @ np.linalg.inv(w2c) for w2c in w2cs[1:]]
+ else:
+ ret_poses = [np.linalg.inv(w2c) for w2c in w2cs]
+ ret_poses = [transform_matrix @ x for x in ret_poses]
+ return np.array(ret_poses, dtype=np.float32)
+
+
+
+class StabilityAPI_SD3:
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "prompt": ("STRING", {"multiline": True}),
+ "n_prompt": ("STRING", {"multiline": True}),
+ "seed": ("INT", {"default": 123,"min": 0, "max": 4294967294, "step": 1}),
+ "model": (
+ [
+ 'sd3',
+ 'sd3-turbo',
+ ],
+ {
+ "default": 'sd3'
+ }),
+ "aspect_ratio": (
+ [
+ '1:1',
+ '16:9',
+ '21:9',
+ '2:3',
+ '3:2',
+ '4:5',
+ '5:4',
+ '9:16',
+ '9:21',
+ ],
+ {
+ "default": '1:1'
+ }),
+ "output_format": (
+ [
+ 'png',
+ 'jpeg',
+ ],
+ {
+ "default": 'jpeg'
+ }),
+ },
+ "optional": {
+ "api_key": ("STRING", {"multiline": True}),
+ "image": ("IMAGE",),
+ "img2img_strength": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
+ "disable_metadata": ("BOOLEAN", {"default": True}),
+ },
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "apicall"
+
+ CATEGORY = "KJNodes/experimental"
+ DESCRIPTION = """
+## Calls StabilityAI API
+
+Although you may have multiple keys in your account,
+you should use the same key for all requests to this API.
+
+Get your API key here: https://platform.stability.ai/account/keys
+Recommended to set the key in the config.json -file under this
+node packs folder.
+# WARNING:
+Otherwise the API key may get saved in the image metadata even
+with "disable_metadata" on if the workflow includes save nodes
+separate from this node.
+
+sd3 requires 6.5 credits per generation
+sd3-turbo requires 4 credits per generation
+
+If no image is provided, mode is set to text-to-image
+
+"""
+
+ def apicall(self, prompt, n_prompt, model, seed, aspect_ratio, output_format,
+ img2img_strength=0.5, image=None, disable_metadata=True, api_key=""):
+ from comfy.cli_args import args
+ if disable_metadata:
+ args.disable_metadata = True
+ else:
+ args.disable_metadata = False
+
+ import requests
+ from torchvision import transforms
+
+ data = {
+ "mode": "text-to-image",
+ "prompt": prompt,
+ "model": model,
+ "seed": seed,
+ "output_format": output_format
+ }
+
+ if image is not None:
+ image = image.permute(0, 3, 1, 2).squeeze(0)
+ to_pil = transforms.ToPILImage()
+ pil_image = to_pil(image)
+ # Save the PIL Image to a BytesIO object
+ buffer = io.BytesIO()
+ pil_image.save(buffer, format='PNG')
+ buffer.seek(0)
+ files = {"image": ("image.png", buffer, "image/png")}
+
+ data["mode"] = "image-to-image"
+ data["image"] = pil_image
+ data["strength"] = img2img_strength
+ else:
+ data["aspect_ratio"] = aspect_ratio,
+ files = {"none": ''}
+
+ if model != "sd3-turbo":
+ data["negative_prompt"] = n_prompt
+
+ headers={
+ "accept": "image/*"
+ }
+
+ if api_key != "":
+ headers["authorization"] = api_key
+ else:
+ config_file_path = os.path.join(script_directory,"config.json")
+ with open(config_file_path, 'r') as file:
+ config = json.load(file)
+ api_key_from_config = config.get("sai_api_key")
+ headers["authorization"] = api_key_from_config
+
+ response = requests.post(
+ f"https://api.stability.ai/v2beta/stable-image/generate/sd3",
+ headers=headers,
+ files = files,
+ data = data,
+ )
+
+ if response.status_code == 200:
+ # Convert the response content to a PIL Image
+ image = Image.open(io.BytesIO(response.content))
+ # Convert the PIL Image to a PyTorch tensor
+ transform = transforms.ToTensor()
+ tensor_image = transform(image)
+ tensor_image = tensor_image.unsqueeze(0)
+ tensor_image = tensor_image.permute(0, 2, 3, 1).cpu().float()
+ return (tensor_image,)
+ else:
+ try:
+ # Attempt to parse the response as JSON
+ error_data = response.json()
+ raise Exception(f"Server error: {error_data}")
+ except json.JSONDecodeError:
+ # If the response is not valid JSON, raise a different exception
+ raise Exception(f"Server error: {response.text}")
+
+class CheckpointPerturbWeights:
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "model": ("MODEL",),
+ "joint_blocks": ("FLOAT", {"default": 0.02, "min": 0.001, "max": 10.0, "step": 0.001}),
+ "final_layer": ("FLOAT", {"default": 0.02, "min": 0.001, "max": 10.0, "step": 0.001}),
+ "rest_of_the_blocks": ("FLOAT", {"default": 0.02, "min": 0.001, "max": 10.0, "step": 0.001}),
+ "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}),
+ }
+ }
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "mod"
+ OUTPUT_NODE = True
+
+ CATEGORY = "KJNodes/experimental"
+
+ def mod(self, seed, model, joint_blocks, final_layer, rest_of_the_blocks):
+ import copy
+ torch.manual_seed(seed)
+ torch.cuda.manual_seed_all(seed)
+ device = model_management.get_torch_device()
+ model_copy = copy.deepcopy(model)
+ model_copy.model.to(device)
+ keys = model_copy.model.diffusion_model.state_dict().keys()
+
+ dict = {}
+ for key in keys:
+ dict[key] = model_copy.model.diffusion_model.state_dict()[key]
+
+ pbar = ProgressBar(len(keys))
+ for k in keys:
+ v = dict[k]
+ print(f'{k}: {v.std()}')
+ if k.startswith('joint_blocks'):
+ multiplier = joint_blocks
+ elif k.startswith('final_layer'):
+ multiplier = final_layer
+ else:
+ multiplier = rest_of_the_blocks
+ dict[k] += torch.normal(torch.zeros_like(v) * v.mean(), torch.ones_like(v) * v.std() * multiplier).to(device)
+ pbar.update(1)
+ model_copy.model.diffusion_model.load_state_dict(dict)
+ return model_copy,
+
+class DifferentialDiffusionAdvanced():
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "model": ("MODEL", ),
+ "samples": ("LATENT",),
+ "mask": ("MASK",),
+ "multiplier": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.001}),
+ }}
+ RETURN_TYPES = ("MODEL", "LATENT")
+ FUNCTION = "apply"
+ CATEGORY = "_for_testing"
+ INIT = False
+
+ def apply(self, model, samples, mask, multiplier):
+ self.multiplier = multiplier
+ model = model.clone()
+ model.set_model_denoise_mask_function(self.forward)
+ s = samples.copy()
+ s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
+ return (model, s)
+
+ def forward(self, sigma: torch.Tensor, denoise_mask: torch.Tensor, extra_options: dict):
+ model = extra_options["model"]
+ step_sigmas = extra_options["sigmas"]
+ sigma_to = model.inner_model.model_sampling.sigma_min
+ if step_sigmas[-1] > sigma_to:
+ sigma_to = step_sigmas[-1]
+ sigma_from = step_sigmas[0]
+
+ ts_from = model.inner_model.model_sampling.timestep(sigma_from)
+ ts_to = model.inner_model.model_sampling.timestep(sigma_to)
+ current_ts = model.inner_model.model_sampling.timestep(sigma[0])
+
+ threshold = (current_ts - ts_to) / (ts_from - ts_to) / self.multiplier
+
+ return (denoise_mask >= threshold).to(denoise_mask.dtype)
+
+class FluxBlockLoraSelect:
+ def __init__(self):
+ self.loaded_lora = None
+
+ @classmethod
+ def INPUT_TYPES(s):
+ arg_dict = {}
+ argument = ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01})
+
+ for i in range(19):
+ arg_dict["double_blocks.{}.".format(i)] = argument
+
+ for i in range(38):
+ arg_dict["single_blocks.{}.".format(i)] = argument
+
+ return {"required": arg_dict}
+
+ RETURN_TYPES = ("SELECTEDBLOCKS", )
+ RETURN_NAMES = ("blocks", )
+ OUTPUT_TOOLTIPS = ("The modified diffusion model.",)
+ FUNCTION = "load_lora"
+
+ CATEGORY = "KJNodes/experimental"
+ DESCRIPTION = "Select individual block alpha values, value of 0 removes the block altogether"
+
+ def load_lora(self, **kwargs):
+ return (kwargs,)
+
+class FluxBlockLoraLoader:
+ def __init__(self):
+ self.loaded_lora = None
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}),
+ "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}),
+
+ },
+ "optional": {
+ "lora_name": (folder_paths.get_filename_list("loras"), {"tooltip": "The name of the LoRA."}),
+ "opt_lora_path": ("STRING", {"forceInput": True, "tooltip": "Absolute path of the LoRA."}),
+ "blocks": ("SELECTEDBLOCKS",),
+ }
+ }
+
+ RETURN_TYPES = ("MODEL", "STRING", )
+ RETURN_NAMES = ("model", "rank", )
+ OUTPUT_TOOLTIPS = ("The modified diffusion model.", "possible rank of the LoRA.")
+ FUNCTION = "load_lora"
+ CATEGORY = "KJNodes/experimental"
+
+ def load_lora(self, model, strength_model, lora_name=None, opt_lora_path=None, blocks=None):
+ from comfy.utils import load_torch_file
+ import comfy.lora
+
+ if opt_lora_path:
+ lora_path = opt_lora_path
+ else:
+ lora_path = folder_paths.get_full_path("loras", lora_name)
+
+ lora = None
+ if self.loaded_lora is not None:
+ if self.loaded_lora[0] == lora_path:
+ lora = self.loaded_lora[1]
+ else:
+ temp = self.loaded_lora
+ self.loaded_lora = None
+ del temp
+
+ if lora is None:
+ lora = load_torch_file(lora_path, safe_load=True)
+ # Find the first key that ends with "weight"
+ rank = "unknown"
+ weight_key = next((key for key in lora.keys() if key.endswith('weight')), None)
+ # Print the shape of the value corresponding to the key
+ if weight_key:
+ print(f"Shape of the first 'weight' key ({weight_key}): {lora[weight_key].shape}")
+ rank = str(lora[weight_key].shape[0])
+ else:
+ print("No key ending with 'weight' found.")
+ rank = "Couldn't find rank"
+ self.loaded_lora = (lora_path, lora)
+
+ key_map = {}
+ if model is not None:
+ key_map = comfy.lora.model_lora_keys_unet(model.model, key_map)
+
+ loaded = comfy.lora.load_lora(lora, key_map)
+
+ if blocks is not None:
+ keys_to_delete = []
+
+ for block in blocks:
+ for key in list(loaded.keys()): # Convert keys to a list to avoid runtime error due to size change
+ match = False
+ if isinstance(key, str) and block in key:
+ match = True
+ elif isinstance(key, tuple):
+ for k in key:
+ if block in k:
+ match = True
+ break
+
+ if match:
+ ratio = blocks[block]
+ if ratio == 0:
+ keys_to_delete.append(key) # Collect keys to delete
+ else:
+ value = loaded[key]
+ if isinstance(value, tuple) and len(value) > 1 and isinstance(value[1], tuple):
+ # Handle the tuple format
+ if len(value[1]) > 3:
+ loaded[key] = (value[0], value[1][:-3] + (ratio, value[1][-2], value[1][-1]))
+ else:
+ loaded[key] = (value[0], value[1][:-2] + (ratio, value[1][-1]))
+ else:
+ # Handle the simpler format directly
+ loaded[key] = (value[0], ratio)
+
+ # Now perform the deletion of keys
+ for key in keys_to_delete:
+ del loaded[key]
+
+ print("loading lora keys:")
+ for key, value in loaded.items():
+ if isinstance(value, tuple) and len(value) > 1 and isinstance(value[1], tuple):
+ # Handle the tuple format
+ if len(value[1]) > 2:
+ alpha = value[1][-3] # Assuming the alpha value is the third last element in the tuple
+ else:
+ alpha = value[1][-2] # Adjust according to the second format's structure
+ else:
+ # Handle the simpler format directly
+ alpha = value[1] if len(value) > 1 else None
+ print(f"Key: {key}, Alpha: {alpha}")
+
+
+ if model is not None:
+ new_modelpatcher = model.clone()
+ k = new_modelpatcher.add_patches(loaded, strength_model)
+
+ k = set(k)
+ for x in loaded:
+ if (x not in k):
+ print("NOT LOADED {}".format(x))
+
+ return (new_modelpatcher, rank)
+
+class CustomControlNetWeightsFluxFromList:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "list_of_floats": ("FLOAT", {"forceInput": True}, ),
+ },
+ "optional": {
+ "uncond_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}, ),
+ "cn_extras": ("CN_WEIGHTS_EXTRAS",),
+ "autosize": ("ACNAUTOSIZE", {"padding": 0}),
+ }
+ }
+
+ RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",)
+ RETURN_NAMES = ("CN_WEIGHTS", "TK_SHORTCUT")
+ FUNCTION = "load_weights"
+ DESCRIPTION = "Creates controlnet weights from a list of floats for Advanced-ControlNet"
+
+ CATEGORY = "KJNodes/controlnet"
+
+ def load_weights(self, list_of_floats: list[float],
+ uncond_multiplier: float=1.0, cn_extras: dict[str]={}):
+
+ acn_nodes = importlib.import_module("ComfyUI-Advanced-ControlNet")
+ ControlWeights = acn_nodes.adv_control.utils.ControlWeights
+ TimestepKeyframeGroup = acn_nodes.adv_control.utils.TimestepKeyframeGroup
+ TimestepKeyframe = acn_nodes.adv_control.utils.TimestepKeyframe
+
+ weights = ControlWeights.controlnet(weights_input=list_of_floats, uncond_multiplier=uncond_multiplier, extras=cn_extras)
+ print(weights.weights_input)
+ return (weights, TimestepKeyframeGroup.default(TimestepKeyframe(control_weights=weights)))
+
+SHAKKERLABS_UNION_CONTROLNET_TYPES = {
+ "canny": 0,
+ "tile": 1,
+ "depth": 2,
+ "blur": 3,
+ "pose": 4,
+ "gray": 5,
+ "low quality": 6,
+}
+
+class SetShakkerLabsUnionControlNetType:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"control_net": ("CONTROL_NET", ),
+ "type": (["auto"] + list(SHAKKERLABS_UNION_CONTROLNET_TYPES.keys()),)
+ }}
+
+ CATEGORY = "conditioning/controlnet"
+ RETURN_TYPES = ("CONTROL_NET",)
+
+ FUNCTION = "set_controlnet_type"
+
+ def set_controlnet_type(self, control_net, type):
+ control_net = control_net.copy()
+ type_number = SHAKKERLABS_UNION_CONTROLNET_TYPES.get(type, -1)
+ if type_number >= 0:
+ control_net.set_extra_arg("control_type", [type_number])
+ else:
+ control_net.set_extra_arg("control_type", [])
+
+ return (control_net,)
+
+class ModelSaveKJ:
+ def __init__(self):
+ self.output_dir = folder_paths.get_output_directory()
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "model": ("MODEL",),
+ "filename_prefix": ("STRING", {"default": "diffusion_models/ComfyUI"}),
+ "model_key_prefix": ("STRING", {"default": "model.diffusion_model."}),
+ },
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},}
+ RETURN_TYPES = ()
+ FUNCTION = "save"
+ OUTPUT_NODE = True
+
+ CATEGORY = "advanced/model_merging"
+
+ def save(self, model, filename_prefix, model_key_prefix, prompt=None, extra_pnginfo=None):
+ from comfy.utils import save_torch_file
+ full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
+
+ output_checkpoint = f"{filename}_{counter:05}_.safetensors"
+ output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
+
+ load_models = [model]
+
+ model_management.load_models_gpu(load_models, force_patch_weights=True)
+ default_prefix = "model.diffusion_model."
+
+ sd = model.model.state_dict_for_saving(None, None, None)
+
+ new_sd = {}
+ for k in sd:
+ if k.startswith(default_prefix):
+ new_key = model_key_prefix + k[len(default_prefix):]
+ else:
+ new_key = k # In case the key doesn't start with the default prefix, keep it unchanged
+ t = sd[k]
+ if not t.is_contiguous():
+ t = t.contiguous()
+ new_sd[new_key] = t
+ print(full_output_folder)
+ if not os.path.exists(full_output_folder):
+ os.makedirs(full_output_folder)
+ save_torch_file(new_sd, os.path.join(full_output_folder, output_checkpoint))
+ return {}
+
+
+from comfy.ldm.modules import attention as comfy_attention
+orig_attention = comfy_attention.optimized_attention
+
+class CheckpointLoaderKJ:
+ original_linear = None
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "ckpt_name": (folder_paths.get_filename_list("checkpoints"), {"tooltip": "The name of the checkpoint (model) to load."}),
+ "patch_cublaslinear": ("BOOLEAN", {"default": True, "tooltip": "Enable or disable the patching, won't take effect on already loaded models!"}),
+ "sage_attention": ("BOOLEAN", {"default": False, "tooltip": "Patch comfy attention to use sageattn."}),
+ },
+ }
+ RETURN_TYPES = ("MODEL", "CLIP", "VAE")
+ FUNCTION = "patch"
+ OUTPUT_NODE = True
+ DESCRIPTION = "Exemplar node for patching torch.nn.Linear with CublasLinear: https://github.com/aredden/torch-cublas-hgemm"
+
+ CATEGORY = "KJNodes/experimental"
+
+ def patch(self, ckpt_name, patch_cublaslinear, sage_attention):
+ from comfy.ops import disable_weight_init, CastWeightBiasOp, cast_bias_weight
+ from nodes import CheckpointLoaderSimple
+ try:
+ from cublas_ops import CublasLinear
+ except ImportError:
+ raise Exception("Can't import 'torch-cublas-hgemm', install it from here https://github.com/aredden/torch-cublas-hgemm")
+
+ if sage_attention:
+ from sageattention import sageattn
+
+ def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False):
+ if skip_reshape:
+ b, _, _, dim_head = q.shape
+ else:
+ b, _, dim_head = q.shape
+ dim_head //= heads
+ if dim_head not in (64, 96, 128) or not (k.shape == q.shape and v.shape == q.shape):
+ return orig_attention(q, k, v, heads, mask=mask, attn_precision=attn_precision, skip_reshape=skip_reshape)
+ if not skip_reshape:
+ q, k, v = map(
+ lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2),
+ (q, k, v),
+ )
+ return (
+ sageattn(q, k, v, is_causal=False, attn_mask=mask, dropout_p=0.0, smooth_k=True)
+ .transpose(1, 2)
+ .reshape(b, -1, heads * dim_head)
+ )
+
+ class OriginalLinear(torch.nn.Linear, CastWeightBiasOp):
+ def reset_parameters(self):
+ return None
+
+ def forward_comfy_cast_weights(self, input):
+ weight, bias = cast_bias_weight(self, input)
+ return torch.nn.functional.linear(input, weight, bias)
+
+ def forward(self, *args, **kwargs):
+ if self.comfy_cast_weights:
+ return self.forward_comfy_cast_weights(*args, **kwargs)
+ else:
+ return super().forward(*args, **kwargs)
+
+ class PatchedLinear(CublasLinear, CastWeightBiasOp):
+ def reset_parameters(self):
+ return None
+
+ def forward_comfy_cast_weights(self, input):
+ weight, bias = cast_bias_weight(self, input)
+ return torch.nn.functional.linear(input, weight, bias)
+
+ def forward(self, *args, **kwargs):
+ if self.comfy_cast_weights:
+ return self.forward_comfy_cast_weights(*args, **kwargs)
+ else:
+ return super().forward(*args, **kwargs)
+
+ if patch_cublaslinear:
+ disable_weight_init.Linear = PatchedLinear
+ else:
+ disable_weight_init.Linear = OriginalLinear
+ if sage_attention:
+ comfy_attention.optimized_attention = attention_sage
+ else:
+ comfy_attention.optimized_attention = orig_attention
+
+ model, clip, vae = CheckpointLoaderSimple.load_checkpoint(self, ckpt_name)
+
+
+ return model, clip, vae
+
+import comfy.model_patcher
+import comfy.utils
+import comfy.sd
+original_patch_model = comfy.model_patcher.ModelPatcher.patch_model
+original_load_lora_for_models = comfy.sd.load_lora_for_models
+
+def patched_patch_model(self, device_to=None, lowvram_model_memory=0, load_weights=True, force_patch_weights=False):
+
+ if lowvram_model_memory == 0:
+ full_load = True
+ else:
+ full_load = False
+
+ if load_weights:
+ self.load(device_to, lowvram_model_memory=lowvram_model_memory, force_patch_weights=force_patch_weights, full_load=full_load)
+ for k in self.object_patches:
+ old = comfy.utils.set_attr(self.model, k, self.object_patches[k])
+ if k not in self.object_patches_backup:
+ self.object_patches_backup[k] = old
+
+ return self.model
+
+def patched_load_lora_for_models(model, clip, lora, strength_model, strength_clip):
+
+ patch_keys = list(model.object_patches_backup.keys())
+ for k in patch_keys:
+ #print("backing up object patch: ", k)
+ comfy.utils.set_attr(model.model, k, model.object_patches_backup[k])
+
+ key_map = {}
+ if model is not None:
+ key_map = comfy.lora.model_lora_keys_unet(model.model, key_map)
+ if clip is not None:
+ key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map)
+
+ loaded = comfy.lora.load_lora(lora, key_map)
+ #print(temp_object_patches_backup)
+
+ if model is not None:
+ new_modelpatcher = model.clone()
+ k = new_modelpatcher.add_patches(loaded, strength_model)
+ else:
+ k = ()
+ new_modelpatcher = None
+
+ if clip is not None:
+ new_clip = clip.clone()
+ k1 = new_clip.add_patches(loaded, strength_clip)
+ else:
+ k1 = ()
+ new_clip = None
+ k = set(k)
+ k1 = set(k1)
+ for x in loaded:
+ if (x not in k) and (x not in k1):
+ print("NOT LOADED {}".format(x))
+
+ if patch_keys:
+ if hasattr(model.model, "compile_settings"):
+ compile_settings = getattr(model.model, "compile_settings")
+ print("compile_settings: ", compile_settings)
+ for k in patch_keys:
+ if "diffusion_model." in k:
+ # Remove the prefix to get the attribute path
+ key = k.replace('diffusion_model.', '')
+ attributes = key.split('.')
+ # Start with the diffusion_model object
+ block = model.get_model_object("diffusion_model")
+ # Navigate through the attributes to get to the block
+ for attr in attributes:
+ if attr.isdigit():
+ block = block[int(attr)]
+ else:
+ block = getattr(block, attr)
+ # Compile the block
+ compiled_block = torch.compile(block, mode=compile_settings["mode"], dynamic=compile_settings["dynamic"], fullgraph=compile_settings["fullgraph"], backend=compile_settings["backend"])
+ # Add the compiled block back as an object patch
+ model.add_object_patch(k, compiled_block)
+ return (new_modelpatcher, new_clip)
+
+def patched_write_atomic(
+ path_: str,
+ content: Union[str, bytes],
+ make_dirs: bool = False,
+ encode_utf_8: bool = False,
+) -> None:
+ # Write into temporary file first to avoid conflicts between threads
+ # Avoid using a named temporary file, as those have restricted permissions
+ from pathlib import Path
+ import os
+ import shutil
+ import threading
+ assert isinstance(
+ content, (str, bytes)
+ ), "Only strings and byte arrays can be saved in the cache"
+ path = Path(path_)
+ if make_dirs:
+ path.parent.mkdir(parents=True, exist_ok=True)
+ tmp_path = path.parent / f".{os.getpid()}.{threading.get_ident()}.tmp"
+ write_mode = "w" if isinstance(content, str) else "wb"
+ with tmp_path.open(write_mode, encoding="utf-8" if encode_utf_8 else None) as f:
+ f.write(content)
+ shutil.copy2(src=tmp_path, dst=path) #changed to allow overwriting cache files
+ os.remove(tmp_path)
+
+class PatchModelPatcherOrder:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "model": ("MODEL",),
+ "patch_order": (["object_patch_first", "weight_patch_first"], {"default": "weight_patch_first", "tooltip": "Patch the comfy patch_model function to load weight patches (LoRAs) before compiling the model"}),
+ }}
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "patch"
+ CATEGORY = "KJNodes/experimental"
+ DESCTIPTION = "Patch the comfy patch_model function patching order, useful for torch.compile (used as object_patch) as it should come last if you want to use LoRAs with compile"
+ EXPERIMENTAL = True
+
+ def patch(self, model, patch_order):
+ comfy.model_patcher.ModelPatcher.temp_object_patches_backup = {}
+ if patch_order == "weight_patch_first":
+ comfy.model_patcher.ModelPatcher.patch_model = patched_patch_model
+ comfy.sd.load_lora_for_models = patched_load_lora_for_models
+ else:
+ comfy.model_patcher.ModelPatcher.patch_model = original_patch_model
+ comfy.sd.load_lora_for_models = original_load_lora_for_models
+
+ return model,
+
+class TorchCompileModelFluxAdvanced:
+ def __init__(self):
+ self._compiled = False
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "model": ("MODEL",),
+ "backend": (["inductor", "cudagraphs"],),
+ "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
+ "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
+ "double_blocks": ("STRING", {"default": "0-18", "multiline": True}),
+ "single_blocks": ("STRING", {"default": "0-37", "multiline": True}),
+ "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}),
+ }}
+ RETURN_TYPES = ("MODEL",)
+ FUNCTION = "patch"
+
+ CATEGORY = "KJNodes/experimental"
+ EXPERIMENTAL = True
+
+ def parse_blocks(self, blocks_str):
+ blocks = []
+ for part in blocks_str.split(','):
+ part = part.strip()
+ if '-' in part:
+ start, end = map(int, part.split('-'))
+ blocks.extend(range(start, end + 1))
+ else:
+ blocks.append(int(part))
+ return blocks
+
+ def patch(self, model, backend, mode, fullgraph, single_blocks, double_blocks, dynamic):
+ if platform.system() == 'Windows':
+ try:
+ import torch._inductor.codecache
+ torch._inductor.codecache.write_atomic = patched_write_atomic #temporary workaround for the cache write bug in Windows
+ import torch
+ except:
+ pass
+ single_block_list = self.parse_blocks(single_blocks)
+ double_block_list = self.parse_blocks(double_blocks)
+ m = model.clone()
+ diffusion_model = m.get_model_object("diffusion_model")
+
+ if not self._compiled:
+ try:
+ for i, block in enumerate(diffusion_model.double_blocks):
+ if i in double_block_list:
+ #print("Compiling double_block", i)
+ m.add_object_patch(f"diffusion_model.double_blocks.{i}", torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend))
+ for i, block in enumerate(diffusion_model.single_blocks):
+ if i in single_block_list:
+ #print("Compiling single block", i)
+ m.add_object_patch(f"diffusion_model.single_blocks.{i}", torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend))
+ self._compiled = True
+ compile_settings = {
+ "backend": backend,
+ "mode": mode,
+ "fullgraph": fullgraph,
+ "dynamic": dynamic,
+ }
+ setattr(m.model, "compile_settings", compile_settings)
+ except:
+ raise RuntimeError("Failed to compile model")
+
+ return (m, )
+ # rest of the layers that are not patched
+ # diffusion_model.final_layer = torch.compile(diffusion_model.final_layer, mode=mode, fullgraph=fullgraph, backend=backend)
+ # diffusion_model.guidance_in = torch.compile(diffusion_model.guidance_in, mode=mode, fullgraph=fullgraph, backend=backend)
+ # diffusion_model.img_in = torch.compile(diffusion_model.img_in, mode=mode, fullgraph=fullgraph, backend=backend)
+ # diffusion_model.time_in = torch.compile(diffusion_model.time_in, mode=mode, fullgraph=fullgraph, backend=backend)
+ # diffusion_model.txt_in = torch.compile(diffusion_model.txt_in, mode=mode, fullgraph=fullgraph, backend=backend)
+ # diffusion_model.vector_in = torch.compile(diffusion_model.vector_in, mode=mode, fullgraph=fullgraph, backend=backend)
+
+class TorchCompileVAE:
+ def __init__(self):
+ self._compiled_encoder = False
+ self._compiled_decoder = False
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "vae": ("VAE",),
+ "backend": (["inductor", "cudagraphs"],),
+ "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
+ "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
+ "compile_encoder": ("BOOLEAN", {"default": True, "tooltip": "Compile encoder"}),
+ "compile_decoder": ("BOOLEAN", {"default": True, "tooltip": "Compile decoder"}),
+ }}
+ RETURN_TYPES = ("VAE",)
+ FUNCTION = "compile"
+
+ CATEGORY = "KJNodes/experimental"
+ EXPERIMENTAL = True
+
+ def compile(self, vae, backend, mode, fullgraph, compile_encoder, compile_decoder):
+ if compile_encoder:
+ if not self._compiled_encoder:
+ try:
+ vae.first_stage_model.encoder = torch.compile(vae.first_stage_model.encoder, mode=mode, fullgraph=fullgraph, backend=backend)
+ self._compiled_encoder = True
+ except:
+ raise RuntimeError("Failed to compile model")
+ if compile_decoder:
+ if not self._compiled_decoder:
+ try:
+ vae.first_stage_model.decoder = torch.compile(vae.first_stage_model.decoder, mode=mode, fullgraph=fullgraph, backend=backend)
+ self._compiled_decoder = True
+ except:
+ raise RuntimeError("Failed to compile model")
+ return (vae, )
+
+class TorchCompileControlNet:
+ def __init__(self):
+ self._compiled= False
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "controlnet": ("CONTROL_NET",),
+ "backend": (["inductor", "cudagraphs"],),
+ "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
+ "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
+ }}
+ RETURN_TYPES = ("CONTROL_NET",)
+ FUNCTION = "compile"
+
+ CATEGORY = "KJNodes/experimental"
+ EXPERIMENTAL = True
+
+ def compile(self, controlnet, backend, mode, fullgraph):
+ if not self._compiled:
+ try:
+ # for i, block in enumerate(controlnet.control_model.double_blocks):
+ # print("Compiling controlnet double_block", i)
+ # controlnet.control_model.double_blocks[i] = torch.compile(block, mode=mode, fullgraph=fullgraph, backend=backend)
+ controlnet.control_model = torch.compile(controlnet.control_model, mode=mode, fullgraph=fullgraph, backend=backend)
+ self._compiled = True
+ except:
+ self._compiled = False
+ raise RuntimeError("Failed to compile model")
+
+ return (controlnet, )
diff --git a/ComfyUI-KJNodes/pyproject.toml b/ComfyUI-KJNodes/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..c8359246ff3bc17cb8e9c9de6a5fc4b0935b6f5e
--- /dev/null
+++ b/ComfyUI-KJNodes/pyproject.toml
@@ -0,0 +1,15 @@
+[project]
+name = "comfyui-kjnodes"
+description = "Various quality of life -nodes for ComfyUI, mostly just visual stuff to improve usability."
+version = "1.0.1"
+license = {file = "LICENSE"}
+dependencies = ["librosa", "numpy", "pillow>=10.3.0", "scipy", "color-matcher", "matplotlib", "huggingface_hub"]
+
+[project.urls]
+Repository = "https://github.com/kijai/ComfyUI-KJNodes"
+# Used by Comfy Registry https://comfyregistry.org
+
+[tool.comfy]
+PublisherId = "kijai"
+DisplayName = "ComfyUI-KJNodes"
+Icon = ""
diff --git a/ComfyUI-KJNodes/requirements.txt b/ComfyUI-KJNodes/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7d84a6dcb5ee7d9f17565410210579962d5713fb
--- /dev/null
+++ b/ComfyUI-KJNodes/requirements.txt
@@ -0,0 +1,7 @@
+pillow>=10.3.0
+scipy
+color-matcher
+matplotlib
+huggingface_hub
+mss
+opencv-python
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/utility/__pycache__/utility.cpython-312.pyc b/ComfyUI-KJNodes/utility/__pycache__/utility.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..92aa81480e3f18d7f5e0a658a662363bc7343347
Binary files /dev/null and b/ComfyUI-KJNodes/utility/__pycache__/utility.cpython-312.pyc differ
diff --git a/ComfyUI-KJNodes/utility/fluid.py b/ComfyUI-KJNodes/utility/fluid.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6387b8c27831c685537877e39e38323adc82c1e
--- /dev/null
+++ b/ComfyUI-KJNodes/utility/fluid.py
@@ -0,0 +1,67 @@
+import numpy as np
+from scipy.ndimage import map_coordinates, spline_filter
+from scipy.sparse.linalg import factorized
+
+from .numerical import difference, operator
+
+
+class Fluid:
+ def __init__(self, shape, *quantities, pressure_order=1, advect_order=3):
+ self.shape = shape
+ self.dimensions = len(shape)
+
+ # Prototyping is simplified by dynamically
+ # creating advected quantities as needed.
+ self.quantities = quantities
+ for q in quantities:
+ setattr(self, q, np.zeros(shape))
+
+ self.indices = np.indices(shape)
+ self.velocity = np.zeros((self.dimensions, *shape))
+
+ laplacian = operator(shape, difference(2, pressure_order))
+ self.pressure_solver = factorized(laplacian)
+
+ self.advect_order = advect_order
+
+ def step(self):
+ # Advection is computed backwards in time as described in Stable Fluids.
+ advection_map = self.indices - self.velocity
+
+ # SciPy's spline filter introduces checkerboard divergence.
+ # A linear blend of the filtered and unfiltered fields based
+ # on some value epsilon eliminates this error.
+ def advect(field, filter_epsilon=10e-2, mode='constant'):
+ filtered = spline_filter(field, order=self.advect_order, mode=mode)
+ field = filtered * (1 - filter_epsilon) + field * filter_epsilon
+ return map_coordinates(field, advection_map, prefilter=False, order=self.advect_order, mode=mode)
+
+ # Apply advection to each axis of the
+ # velocity field and each user-defined quantity.
+ for d in range(self.dimensions):
+ self.velocity[d] = advect(self.velocity[d])
+
+ for q in self.quantities:
+ setattr(self, q, advect(getattr(self, q)))
+
+ # Compute the jacobian at each point in the
+ # velocity field to extract curl and divergence.
+ jacobian_shape = (self.dimensions,) * 2
+ partials = tuple(np.gradient(d) for d in self.velocity)
+ jacobian = np.stack(partials).reshape(*jacobian_shape, *self.shape)
+
+ divergence = jacobian.trace()
+
+ # If this curl calculation is extended to 3D, the y-axis value must be negated.
+ # This corresponds to the coefficients of the levi-civita symbol in that dimension.
+ # Higher dimensions do not have a vector -> scalar, or vector -> vector,
+ # correspondence between velocity and curl due to differing isomorphisms
+ # between exterior powers in dimensions != 2 or 3 respectively.
+ curl_mask = np.triu(np.ones(jacobian_shape, dtype=bool), k=1)
+ curl = (jacobian[curl_mask] - jacobian[curl_mask.T]).squeeze()
+
+ # Apply the pressure correction to the fluid's velocity field.
+ pressure = self.pressure_solver(divergence.flatten()).reshape(self.shape)
+ self.velocity -= np.gradient(pressure)
+
+ return divergence, curl, pressure
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/utility/magictex.py b/ComfyUI-KJNodes/utility/magictex.py
new file mode 100644
index 0000000000000000000000000000000000000000..44986bf4b8e74b9533896a34a2fb2aa24a5d00fd
--- /dev/null
+++ b/ComfyUI-KJNodes/utility/magictex.py
@@ -0,0 +1,95 @@
+"""Generates psychedelic color textures in the spirit of Blender's magic texture shader using Python/Numpy
+
+https://github.com/cheind/magic-texture
+"""
+from typing import Tuple, Optional
+import numpy as np
+
+
+def coordinate_grid(shape: Tuple[int, int], dtype=np.float32):
+ """Returns a three-dimensional coordinate grid of given shape for use in `magic`."""
+ x = np.linspace(-1, 1, shape[1], endpoint=True, dtype=dtype)
+ y = np.linspace(-1, 1, shape[0], endpoint=True, dtype=dtype)
+ X, Y = np.meshgrid(x, y)
+ XYZ = np.stack((X, Y, np.ones_like(X)), -1)
+ return XYZ
+
+
+def random_transform(coords: np.ndarray, rng: np.random.Generator = None):
+ """Returns randomly transformed coordinates"""
+ H, W = coords.shape[:2]
+ rng = rng or np.random.default_rng()
+ m = rng.uniform(-1.0, 1.0, size=(3, 3)).astype(coords.dtype)
+ return (coords.reshape(-1, 3) @ m.T).reshape(H, W, 3)
+
+
+def magic(
+ coords: np.ndarray,
+ depth: Optional[int] = None,
+ distortion: Optional[int] = None,
+ rng: np.random.Generator = None,
+):
+ """Returns color magic color texture.
+
+ The implementation is based on Blender's (https://www.blender.org/) magic
+ texture shader. The following adaptions have been made:
+ - we exchange the nested if-cascade by a probabilistic iterative approach
+
+ Kwargs
+ ------
+ coords: HxWx3 array
+ Coordinates transformed into colors by this method. See
+ `magictex.coordinate_grid` to generate the default.
+ depth: int (optional)
+ Number of transformations applied. Higher numbers lead to more
+ nested patterns. If not specified, randomly sampled.
+ distortion: float (optional)
+ Distortion of patterns. Larger values indicate more distortion,
+ lower values tend to generate smoother patterns. If not specified,
+ randomly sampled.
+ rng: np.random.Generator
+ Optional random generator to draw samples from.
+
+ Returns
+ -------
+ colors: HxWx3 array
+ Three channel color image in range [0,1]
+ """
+ rng = rng or np.random.default_rng()
+ if distortion is None:
+ distortion = rng.uniform(1, 4)
+ if depth is None:
+ depth = rng.integers(1, 5)
+
+ H, W = coords.shape[:2]
+ XYZ = coords
+ x = np.sin((XYZ[..., 0] + XYZ[..., 1] + XYZ[..., 2]) * distortion)
+ y = np.cos((-XYZ[..., 0] + XYZ[..., 1] - XYZ[..., 2]) * distortion)
+ z = -np.cos((-XYZ[..., 0] - XYZ[..., 1] + XYZ[..., 2]) * distortion)
+
+ if depth > 0:
+ x *= distortion
+ y *= distortion
+ z *= distortion
+ y = -np.cos(x - y + z)
+ y *= distortion
+
+ xyz = [x, y, z]
+ fns = [np.cos, np.sin]
+ for _ in range(1, depth):
+ axis = rng.choice(3)
+ fn = fns[rng.choice(2)]
+ signs = rng.binomial(n=1, p=0.5, size=4) * 2 - 1
+
+ xyz[axis] = signs[-1] * fn(
+ signs[0] * xyz[0] + signs[1] * xyz[1] + signs[2] * xyz[2]
+ )
+ xyz[axis] *= distortion
+
+ x, y, z = xyz
+ x /= 2 * distortion
+ y /= 2 * distortion
+ z /= 2 * distortion
+ c = 0.5 - np.stack((x, y, z), -1)
+ np.clip(c, 0, 1.0)
+ return c
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/utility/numerical.py b/ComfyUI-KJNodes/utility/numerical.py
new file mode 100644
index 0000000000000000000000000000000000000000..96dd7fe1e6f51260f7e9545f7114141973ded6e0
--- /dev/null
+++ b/ComfyUI-KJNodes/utility/numerical.py
@@ -0,0 +1,25 @@
+from functools import reduce
+from itertools import cycle
+from math import factorial
+
+import numpy as np
+import scipy.sparse as sp
+
+
+def difference(derivative, accuracy=1):
+ # Central differences implemented based on the article here:
+ # http://web.media.mit.edu/~crtaylor/calculator.html
+ derivative += 1
+ radius = accuracy + derivative // 2 - 1
+ points = range(-radius, radius + 1)
+ coefficients = np.linalg.inv(np.vander(points))
+ return coefficients[-derivative] * factorial(derivative - 1), points
+
+
+def operator(shape, *differences):
+ # Credit to Philip Zucker for figuring out
+ # that kronsum's argument order is reversed.
+ # Without that bit of wisdom I'd have lost it.
+ differences = zip(shape, cycle(differences))
+ factors = (sp.diags(*diff, shape=(dim,) * 2) for dim, diff in differences)
+ return reduce(lambda a, f: sp.kronsum(f, a, format='csc'), factors)
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/utility/utility.py b/ComfyUI-KJNodes/utility/utility.py
new file mode 100644
index 0000000000000000000000000000000000000000..c50b214c54e417f8f5eddc587a7380a7d7098f85
--- /dev/null
+++ b/ComfyUI-KJNodes/utility/utility.py
@@ -0,0 +1,39 @@
+import torch
+import numpy as np
+from PIL import Image
+from typing import Union, List
+
+# Utility functions from mtb nodes: https://github.com/melMass/comfy_mtb
+def pil2tensor(image: Union[Image.Image, List[Image.Image]]) -> torch.Tensor:
+ if isinstance(image, list):
+ return torch.cat([pil2tensor(img) for img in image], dim=0)
+
+ return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
+
+
+def np2tensor(img_np: Union[np.ndarray, List[np.ndarray]]) -> torch.Tensor:
+ if isinstance(img_np, list):
+ return torch.cat([np2tensor(img) for img in img_np], dim=0)
+
+ return torch.from_numpy(img_np.astype(np.float32) / 255.0).unsqueeze(0)
+
+
+def tensor2np(tensor: torch.Tensor):
+ if len(tensor.shape) == 3: # Single image
+ return np.clip(255.0 * tensor.cpu().numpy(), 0, 255).astype(np.uint8)
+ else: # Batch of images
+ return [np.clip(255.0 * t.cpu().numpy(), 0, 255).astype(np.uint8) for t in tensor]
+
+def tensor2pil(image: torch.Tensor) -> List[Image.Image]:
+ batch_count = image.size(0) if len(image.shape) > 3 else 1
+ if batch_count > 1:
+ out = []
+ for i in range(batch_count):
+ out.extend(tensor2pil(image[i]))
+ return out
+
+ return [
+ Image.fromarray(
+ np.clip(255.0 * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)
+ )
+ ]
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/web/green.png b/ComfyUI-KJNodes/web/green.png
new file mode 100644
index 0000000000000000000000000000000000000000..900964e4b3907145fe1e75a5b58473567450e16d
Binary files /dev/null and b/ComfyUI-KJNodes/web/green.png differ
diff --git a/ComfyUI-KJNodes/web/js/appearance.js b/ComfyUI-KJNodes/web/js/appearance.js
new file mode 100644
index 0000000000000000000000000000000000000000..f560920e6243d378959a3f774525c18db34d82ea
--- /dev/null
+++ b/ComfyUI-KJNodes/web/js/appearance.js
@@ -0,0 +1,23 @@
+import { app } from "../../../scripts/app.js";
+
+app.registerExtension({
+ name: "KJNodes.appearance",
+ nodeCreated(node) {
+ switch (node.comfyClass) {
+ case "INTConstant":
+ node.setSize([200, 58]);
+ node.color = "#1b4669";
+ node.bgcolor = "#29699c";
+ break;
+ case "FloatConstant":
+ node.setSize([200, 58]);
+ node.color = LGraphCanvas.node_colors.green.color;
+ node.bgcolor = LGraphCanvas.node_colors.green.bgcolor;
+ break;
+ case "ConditioningMultiCombine":
+ node.color = LGraphCanvas.node_colors.brown.color;
+ node.bgcolor = LGraphCanvas.node_colors.brown.bgcolor;
+ break;
+ }
+ }
+});
diff --git a/ComfyUI-KJNodes/web/js/browserstatus.js b/ComfyUI-KJNodes/web/js/browserstatus.js
new file mode 100644
index 0000000000000000000000000000000000000000..d541c3c51487238e6b9022749b4fc75e8c95b9f0
--- /dev/null
+++ b/ComfyUI-KJNodes/web/js/browserstatus.js
@@ -0,0 +1,53 @@
+import { api } from "../../../scripts/api.js";
+import { app } from "../../../scripts/app.js";
+
+app.registerExtension({
+ name: "KJNodes.browserstatus",
+ setup() {
+ if (!app.ui.settings.getSettingValue("KJNodes.browserStatus")) {
+ return;
+ }
+ api.addEventListener("status", ({ detail }) => {
+ let title = "ComfyUI";
+ let favicon = "green";
+ let queueRemaining = detail && detail.exec_info.queue_remaining;
+
+ if (queueRemaining) {
+ favicon = "red";
+ title = `00% - ${queueRemaining} | ${title}`;
+ }
+ let link = document.querySelector("link[rel~='icon']");
+ if (!link) {
+ link = document.createElement("link");
+ link.rel = "icon";
+ document.head.appendChild(link);
+ }
+ link.href = new URL(`../${favicon}.png`, import.meta.url);
+ document.title = title;
+ });
+ //add progress to the title
+ api.addEventListener("progress", ({ detail }) => {
+ const { value, max } = detail;
+ const progress = Math.floor((value / max) * 100);
+ let title = document.title;
+
+ if (!isNaN(progress) && progress >= 0 && progress <= 100) {
+ const paddedProgress = String(progress).padStart(2, '0');
+ title = `${paddedProgress}% ${title.replace(/^\d+%\s/, '')}`;
+ }
+ document.title = title;
+ });
+ },
+ init() {
+ if (!app.ui.settings.getSettingValue("KJNodes.browserStatus")) {
+ return;
+ }
+ const pythongossFeed = app.extensions.find(
+ (e) => e.name === 'pysssss.FaviconStatus',
+ )
+ if (pythongossFeed) {
+ console.warn("KJNodes - Overriding pysssss.FaviconStatus")
+ app.extensions = app.extensions.filter(item => item !== pythongossFeed);
+ }
+ },
+});
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/web/js/contextmenu.js b/ComfyUI-KJNodes/web/js/contextmenu.js
new file mode 100644
index 0000000000000000000000000000000000000000..e6e549a988c68a2f437d1efbb2b9f800fd912a2c
--- /dev/null
+++ b/ComfyUI-KJNodes/web/js/contextmenu.js
@@ -0,0 +1,152 @@
+import { app } from "../../../scripts/app.js";
+
+// Adds context menu entries, code partly from pyssssscustom-scripts
+
+function addMenuHandler(nodeType, cb) {
+ const getOpts = nodeType.prototype.getExtraMenuOptions;
+ nodeType.prototype.getExtraMenuOptions = function () {
+ const r = getOpts.apply(this, arguments);
+ cb.apply(this, arguments);
+ return r;
+ };
+}
+
+function addNode(name, nextTo, options) {
+ console.log("name:", name);
+ console.log("nextTo:", nextTo);
+ options = { side: "left", select: true, shiftY: 0, shiftX: 0, ...(options || {}) };
+ const node = LiteGraph.createNode(name);
+ app.graph.add(node);
+
+ node.pos = [
+ options.side === "left" ? nextTo.pos[0] - (node.size[0] + options.offset): nextTo.pos[0] + nextTo.size[0] + options.offset,
+
+ nextTo.pos[1] + options.shiftY,
+ ];
+ if (options.select) {
+ app.canvas.selectNode(node, false);
+ }
+ return node;
+}
+
+app.registerExtension({
+ name: "KJNodesContextmenu",
+ async beforeRegisterNodeDef(nodeType, nodeData, app) {
+ if (nodeData.input && nodeData.input.required) {
+ addMenuHandler(nodeType, function (_, options) {
+ options.unshift(
+ {
+ content: "Add GetNode",
+ callback: () => {addNode("GetNode", this, { side:"left", offset: 30});}
+ },
+ {
+ content: "Add SetNode",
+ callback: () => {addNode("SetNode", this, { side:"right", offset: 30 });
+ },
+ });
+ });
+ }
+ },
+ async setup(app) {
+ const onChange = (value) => {
+ if (value) {
+ const valuesToAddToIn = ["GetNode"];
+ const valuesToAddToOut = ["SetNode"];
+
+ for (const arr of Object.values(LiteGraph.slot_types_default_in)) {
+ for (const valueToAdd of valuesToAddToIn) {
+ const idx = arr.indexOf(valueToAdd);
+ if (idx !== 0) {
+ arr.splice(idx, 1);
+ }
+ arr.unshift(valueToAdd);
+ }
+ }
+
+ for (const arr of Object.values(LiteGraph.slot_types_default_out)) {
+ for (const valueToAdd of valuesToAddToOut) {
+ const idx = arr.indexOf(valueToAdd);
+ if (idx !== 0) {
+ arr.splice(idx, 1);
+ }
+ arr.unshift(valueToAdd);
+ }
+ }
+ }
+ };
+
+ app.ui.settings.addSetting({
+ id: "KJNodes.SetGetMenu",
+ name: "KJNodes: Make Set/Get -nodes defaults (turn off and reload to disable)",
+ defaultValue: false,
+ type: "boolean",
+ options: (value) => [
+ {
+ value: true,
+ text: "On",
+ selected: value === true,
+ },
+ {
+ value: false,
+ text: "Off",
+ selected: value === false,
+ },
+ ],
+ onChange: onChange,
+
+ });
+ app.ui.settings.addSetting({
+ id: "KJNodes.DisableMiddleClickDefault",
+ name: "KJNodes: Middle click default node adding",
+ defaultValue: false,
+ type: "boolean",
+ options: (value) => [
+ { value: true, text: "On", selected: value === true },
+ { value: false, text: "Off", selected: value === false },
+ ],
+ onChange: (value) => {
+ LiteGraph.middle_click_slot_add_default_node = value;
+ },
+ });
+ app.ui.settings.addSetting({
+ id: "KJNodes.nodeAutoColor",
+ name: "KJNodes: Automatically set node colors",
+ defaultValue: true,
+ type: "boolean",
+ options: (value) => [
+ { value: true, text: "On", selected: value === true },
+ { value: false, text: "Off", selected: value === false },
+ ],
+ });
+ app.ui.settings.addSetting({
+ id: "KJNodes.helpPopup",
+ name: "KJNodes: Help popups",
+ defaultValue: true,
+ type: "boolean",
+ options: (value) => [
+ { value: true, text: "On", selected: value === true },
+ { value: false, text: "Off", selected: value === false },
+ ],
+ });
+ app.ui.settings.addSetting({
+ id: "KJNodes.disablePrefix",
+ name: "KJNodes: Disable automatic Set_ and Get_ prefix",
+ defaultValue: false,
+ type: "boolean",
+ options: (value) => [
+ { value: true, text: "On", selected: value === true },
+ { value: false, text: "Off", selected: value === false },
+ ],
+ });
+ app.ui.settings.addSetting({
+ id: "KJNodes.browserStatus",
+ name: "KJNodes: 🟢 Stoplight browser status icon 🔴",
+ defaultValue: false,
+ type: "boolean",
+ options: (value) => [
+ { value: true, text: "On", selected: value === true },
+ { value: false, text: "Off", selected: value === false },
+ ],
+ });
+}
+});
diff --git a/ComfyUI-KJNodes/web/js/fast_preview.js b/ComfyUI-KJNodes/web/js/fast_preview.js
new file mode 100644
index 0000000000000000000000000000000000000000..c99dc40f8aacd7a35d0890721a4df78210588119
--- /dev/null
+++ b/ComfyUI-KJNodes/web/js/fast_preview.js
@@ -0,0 +1,95 @@
+import { app } from '../../../scripts/app.js'
+
+//from melmass
+export function makeUUID() {
+ let dt = new Date().getTime()
+ const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
+ const r = ((dt + Math.random() * 16) % 16) | 0
+ dt = Math.floor(dt / 16)
+ return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16)
+ })
+ return uuid
+}
+
+function chainCallback(object, property, callback) {
+ if (object == undefined) {
+ //This should not happen.
+ console.error("Tried to add callback to non-existant object")
+ return;
+ }
+ if (property in object) {
+ const callback_orig = object[property]
+ object[property] = function () {
+ const r = callback_orig.apply(this, arguments);
+ callback.apply(this, arguments);
+ return r
+ };
+ } else {
+ object[property] = callback;
+ }
+}
+app.registerExtension({
+ name: 'KJNodes.FastPreview',
+
+ async beforeRegisterNodeDef(nodeType, nodeData) {
+ if (nodeData?.name === 'FastPreview') {
+ chainCallback(nodeType.prototype, "onNodeCreated", function () {
+
+ var element = document.createElement("div");
+ this.uuid = makeUUID()
+ element.id = `fast-preview-${this.uuid}`
+
+ this.previewWidget = this.addDOMWidget(nodeData.name, "FastPreviewWidget", element, {
+ serialize: false,
+ hideOnZoom: false,
+ });
+
+ this.previewer = new Previewer(this);
+
+ this.setSize([550, 550]);
+ this.resizable = false;
+ this.previewWidget.parentEl = document.createElement("div");
+ this.previewWidget.parentEl.className = "fast-preview";
+ this.previewWidget.parentEl.id = `fast-preview-${this.uuid}`
+ element.appendChild(this.previewWidget.parentEl);
+
+ chainCallback(this, "onExecuted", function (message) {
+ let bg_image = message["bg_image"];
+ this.properties.imgData = {
+ name: "bg_image",
+ base64: bg_image
+ };
+ this.previewer.refreshBackgroundImage(this);
+ });
+
+
+ }); // onAfterGraphConfigured
+ }//node created
+ } //before register
+})//register
+
+class Previewer {
+ constructor(context) {
+ this.node = context;
+ this.previousWidth = null;
+ this.previousHeight = null;
+ }
+ refreshBackgroundImage = () => {
+ const imgData = this.node?.properties?.imgData;
+ if (imgData?.base64) {
+ const base64String = imgData.base64;
+ const imageUrl = `data:${imgData.type};base64,${base64String}`;
+ const img = new Image();
+ img.src = imageUrl;
+ img.onload = () => {
+ const { width, height } = img;
+ if (width !== this.previousWidth || height !== this.previousHeight) {
+ this.node.setSize([width, height]);
+ this.previousWidth = width;
+ this.previousHeight = height;
+ }
+ this.node.previewWidget.element.style.backgroundImage = `url(${imageUrl})`;
+ };
+ }
+ };
+ }
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/web/js/help_popup.js b/ComfyUI-KJNodes/web/js/help_popup.js
new file mode 100644
index 0000000000000000000000000000000000000000..4eb6b22c9c494ddf069b12c8cf745a440599631b
--- /dev/null
+++ b/ComfyUI-KJNodes/web/js/help_popup.js
@@ -0,0 +1,326 @@
+import { app } from "../../../scripts/app.js";
+
+// code based on mtb nodes by Mel Massadian https://github.com/melMass/comfy_mtb/
+export const loadScript = (
+ FILE_URL,
+ async = true,
+ type = 'text/javascript',
+) => {
+ return new Promise((resolve, reject) => {
+ try {
+ // Check if the script already exists
+ const existingScript = document.querySelector(`script[src="${FILE_URL}"]`)
+ if (existingScript) {
+ resolve({ status: true, message: 'Script already loaded' })
+ return
+ }
+
+ const scriptEle = document.createElement('script')
+ scriptEle.type = type
+ scriptEle.async = async
+ scriptEle.src = FILE_URL
+
+ scriptEle.addEventListener('load', (ev) => {
+ resolve({ status: true })
+ })
+
+ scriptEle.addEventListener('error', (ev) => {
+ reject({
+ status: false,
+ message: `Failed to load the script ${FILE_URL}`,
+ })
+ })
+
+ document.body.appendChild(scriptEle)
+ } catch (error) {
+ reject(error)
+ }
+ })
+}
+
+loadScript('/kjweb_async/marked.min.js').catch((e) => {
+ console.log(e)
+})
+loadScript('/kjweb_async/purify.min.js').catch((e) => {
+ console.log(e)
+})
+
+const categories = ["KJNodes", "SUPIR", "VoiceCraft", "Marigold", "IC-Light"];
+app.registerExtension({
+ name: "KJNodes.HelpPopup",
+ async beforeRegisterNodeDef(nodeType, nodeData) {
+
+ if (app.ui.settings.getSettingValue("KJNodes.helpPopup") === false) {
+ return;
+ }
+ try {
+ categories.forEach(category => {
+ if (nodeData?.category?.startsWith(category)) {
+ addDocumentation(nodeData, nodeType);
+ }
+ else return
+ });
+ } catch (error) {
+ console.error("Error in registering KJNodes.HelpPopup", error);
+ }
+ },
+});
+
+const create_documentation_stylesheet = () => {
+ const tag = 'kj-documentation-stylesheet'
+
+ let styleTag = document.head.querySelector(tag)
+
+ if (!styleTag) {
+ styleTag = document.createElement('style')
+ styleTag.type = 'text/css'
+ styleTag.id = tag
+ styleTag.innerHTML = `
+ .kj-documentation-popup {
+ background: var(--comfy-menu-bg);
+ position: absolute;
+ color: var(--fg-color);
+ font: 12px monospace;
+ line-height: 1.5em;
+ padding: 10px;
+ border-radius: 10px;
+ border-style: solid;
+ border-width: medium;
+ border-color: var(--border-color);
+ z-index: 5;
+ overflow: hidden;
+ }
+ .content-wrapper {
+ overflow: auto;
+ max-height: 100%;
+ /* Scrollbar styling for Chrome */
+ &::-webkit-scrollbar {
+ width: 6px;
+ }
+ &::-webkit-scrollbar-track {
+ background: var(--bg-color);
+ }
+ &::-webkit-scrollbar-thumb {
+ background-color: var(--fg-color);
+ border-radius: 6px;
+ border: 3px solid var(--bg-color);
+ }
+
+ /* Scrollbar styling for Firefox */
+ scrollbar-width: thin;
+ scrollbar-color: var(--fg-color) var(--bg-color);
+ a {
+ color: yellow;
+ }
+ a:visited {
+ color: orange;
+ }
+ a:hover {
+ color: red;
+ }
+ }
+ `
+ document.head.appendChild(styleTag)
+ }
+ }
+
+ /** Add documentation widget to the selected node */
+ export const addDocumentation = (
+ nodeData,
+ nodeType,
+ opts = { icon_size: 14, icon_margin: 4 },) => {
+
+ opts = opts || {}
+ const iconSize = opts.icon_size ? opts.icon_size : 14
+ const iconMargin = opts.icon_margin ? opts.icon_margin : 4
+ let docElement = null
+ let contentWrapper = null
+ //if no description in the node python code, don't do anything
+ if (!nodeData.description) {
+ return
+ }
+
+ const drawFg = nodeType.prototype.onDrawForeground
+ nodeType.prototype.onDrawForeground = function (ctx) {
+ const r = drawFg ? drawFg.apply(this, arguments) : undefined
+ if (this.flags.collapsed) return r
+
+ // icon position
+ const x = this.size[0] - iconSize - iconMargin
+
+ // create the popup
+ if (this.show_doc && docElement === null) {
+ docElement = document.createElement('div')
+ contentWrapper = document.createElement('div');
+ docElement.appendChild(contentWrapper);
+
+ create_documentation_stylesheet()
+ contentWrapper.classList.add('content-wrapper');
+ docElement.classList.add('kj-documentation-popup')
+
+ //parse the string from the python node code to html with marked, and sanitize the html with DOMPurify
+ contentWrapper.innerHTML = DOMPurify.sanitize(marked.parse(nodeData.description,))
+
+ // resize handle
+ const resizeHandle = document.createElement('div');
+ resizeHandle.style.width = '0';
+ resizeHandle.style.height = '0';
+ resizeHandle.style.position = 'absolute';
+ resizeHandle.style.bottom = '0';
+ resizeHandle.style.right = '0';
+ resizeHandle.style.cursor = 'se-resize';
+
+ // Add pseudo-elements to create a triangle shape
+ const borderColor = getComputedStyle(document.documentElement).getPropertyValue('--border-color').trim();
+ resizeHandle.style.borderTop = '10px solid transparent';
+ resizeHandle.style.borderLeft = '10px solid transparent';
+ resizeHandle.style.borderBottom = `10px solid ${borderColor}`;
+ resizeHandle.style.borderRight = `10px solid ${borderColor}`;
+
+ docElement.appendChild(resizeHandle)
+ let isResizing = false
+ let startX, startY, startWidth, startHeight
+
+ resizeHandle.addEventListener('mousedown', function (e) {
+ e.preventDefault();
+ e.stopPropagation();
+ isResizing = true;
+ startX = e.clientX;
+ startY = e.clientY;
+ startWidth = parseInt(document.defaultView.getComputedStyle(docElement).width, 10);
+ startHeight = parseInt(document.defaultView.getComputedStyle(docElement).height, 10);
+ },
+ { signal: this.docCtrl.signal },
+ );
+
+ // close button
+ const closeButton = document.createElement('div');
+ closeButton.textContent = '❌';
+ closeButton.style.position = 'absolute';
+ closeButton.style.top = '0';
+ closeButton.style.right = '0';
+ closeButton.style.cursor = 'pointer';
+ closeButton.style.padding = '5px';
+ closeButton.style.color = 'red';
+ closeButton.style.fontSize = '12px';
+
+ docElement.appendChild(closeButton)
+
+ closeButton.addEventListener('mousedown', (e) => {
+ e.stopPropagation();
+ this.show_doc = !this.show_doc
+ docElement.parentNode.removeChild(docElement)
+ docElement = null
+ if (contentWrapper) {
+ contentWrapper.remove()
+ contentWrapper = null
+ }
+ },
+ { signal: this.docCtrl.signal },
+ );
+
+ document.addEventListener('mousemove', function (e) {
+ if (!isResizing) return;
+ const scale = app.canvas.ds.scale;
+ const newWidth = startWidth + (e.clientX - startX) / scale;
+ const newHeight = startHeight + (e.clientY - startY) / scale;;
+ docElement.style.width = `${newWidth}px`;
+ docElement.style.height = `${newHeight}px`;
+ },
+ { signal: this.docCtrl.signal },
+ );
+
+ document.addEventListener('mouseup', function () {
+ isResizing = false
+ },
+ { signal: this.docCtrl.signal },
+ )
+
+ document.body.appendChild(docElement)
+ }
+ // close the popup
+ else if (!this.show_doc && docElement !== null) {
+ docElement.parentNode.removeChild(docElement)
+ docElement = null
+ }
+ // update position of the popup
+ if (this.show_doc && docElement !== null) {
+ const rect = ctx.canvas.getBoundingClientRect()
+ const scaleX = rect.width / ctx.canvas.width
+ const scaleY = rect.height / ctx.canvas.height
+
+ const transform = new DOMMatrix()
+ .scaleSelf(scaleX, scaleY)
+ .multiplySelf(ctx.getTransform())
+ .translateSelf(this.size[0] * scaleX * Math.max(1.0,window.devicePixelRatio) , 0)
+ .translateSelf(10, -32)
+
+ const scale = new DOMMatrix()
+ .scaleSelf(transform.a, transform.d);
+ const bcr = app.canvas.canvas.getBoundingClientRect()
+
+ const styleObject = {
+ transformOrigin: '0 0',
+ transform: scale,
+ left: `${transform.a + bcr.x + transform.e}px`,
+ top: `${transform.d + bcr.y + transform.f}px`,
+ };
+ Object.assign(docElement.style, styleObject);
+ }
+
+ ctx.save()
+ ctx.translate(x - 2, iconSize - 34)
+ ctx.scale(iconSize / 32, iconSize / 32)
+ ctx.strokeStyle = 'rgba(255,255,255,0.3)'
+ ctx.lineCap = 'round'
+ ctx.lineJoin = 'round'
+ ctx.lineWidth = 2.4
+ ctx.font = 'bold 36px monospace'
+ ctx.fillStyle = 'orange';
+ ctx.fillText('?', 0, 24)
+ ctx.restore()
+ return r
+ }
+ // handle clicking of the icon
+ const mouseDown = nodeType.prototype.onMouseDown
+ nodeType.prototype.onMouseDown = function (e, localPos, canvas) {
+ const r = mouseDown ? mouseDown.apply(this, arguments) : undefined
+ const iconX = this.size[0] - iconSize - iconMargin
+ const iconY = iconSize - 34
+ if (
+ localPos[0] > iconX &&
+ localPos[0] < iconX + iconSize &&
+ localPos[1] > iconY &&
+ localPos[1] < iconY + iconSize
+ ) {
+ if (this.show_doc === undefined) {
+ this.show_doc = true
+ } else {
+ this.show_doc = !this.show_doc
+ }
+ if (this.show_doc) {
+ this.docCtrl = new AbortController()
+ } else {
+ this.docCtrl.abort()
+ }
+ return true;
+ }
+ return r;
+ }
+ const onRem = nodeType.prototype.onRemoved
+
+ nodeType.prototype.onRemoved = function () {
+ const r = onRem ? onRem.apply(this, []) : undefined
+
+ if (docElement) {
+ docElement.remove()
+ docElement = null
+ }
+
+ if (contentWrapper) {
+ contentWrapper.remove()
+ contentWrapper = null
+ }
+ return r
+ }
+}
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/web/js/jsnodes.js b/ComfyUI-KJNodes/web/js/jsnodes.js
new file mode 100644
index 0000000000000000000000000000000000000000..c1798c74891192a3f4272e8629ae10decaf4c958
--- /dev/null
+++ b/ComfyUI-KJNodes/web/js/jsnodes.js
@@ -0,0 +1,356 @@
+import { app } from "../../../scripts/app.js";
+
+app.registerExtension({
+ name: "KJNodes.jsnodes",
+ async beforeRegisterNodeDef(nodeType, nodeData, app) {
+ if(!nodeData?.category?.startsWith("KJNodes")) {
+ return;
+ }
+ switch (nodeData.name) {
+ case "ConditioningMultiCombine":
+ nodeType.prototype.onNodeCreated = function () {
+ this.cond_type = "CONDITIONING"
+ this.inputs_offset = nodeData.name.includes("selective")?1:0
+ this.addWidget("button", "Update inputs", null, () => {
+ if (!this.inputs) {
+ this.inputs = [];
+ }
+ const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"];
+ if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing
+
+ if(target_number_of_inputs < this.inputs.length){
+ for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--)
+ this.removeInput(i)
+ }
+ else{
+ for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i)
+ this.addInput(`conditioning_${i}`, this.cond_type)
+ }
+ });
+ }
+ break;
+ case "ImageBatchMulti":
+ case "ImageAddMulti":
+ case "ImageConcatMulti":
+ case "CrossFadeImagesMulti":
+ case "TransitionImagesMulti":
+ nodeType.prototype.onNodeCreated = function () {
+ this._type = "IMAGE"
+ this.inputs_offset = nodeData.name.includes("selective")?1:0
+ this.addWidget("button", "Update inputs", null, () => {
+ if (!this.inputs) {
+ this.inputs = [];
+ }
+ const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"];
+ if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing
+
+ if(target_number_of_inputs < this.inputs.length){
+ for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--)
+ this.removeInput(i)
+ }
+ else{
+ for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i)
+ this.addInput(`image_${i}`, this._type)
+ }
+ });
+ }
+ break;
+ case "MaskBatchMulti":
+ nodeType.prototype.onNodeCreated = function () {
+ this._type = "MASK"
+ this.inputs_offset = nodeData.name.includes("selective")?1:0
+ this.addWidget("button", "Update inputs", null, () => {
+ if (!this.inputs) {
+ this.inputs = [];
+ }
+ const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"];
+ if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing
+
+ if(target_number_of_inputs < this.inputs.length){
+ for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--)
+ this.removeInput(i)
+ }
+ else{
+ for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i)
+ this.addInput(`mask_${i}`, this._type)
+ }
+ });
+ }
+ break;
+
+ case "FluxBlockLoraSelect":
+ nodeType.prototype.onNodeCreated = function () {
+ this.addWidget("button", "Set all", null, () => {
+ const userInput = prompt("Enter the values to set for widgets (e.g., s0,1,2-7=2.0, d0,1,2-7=2.0, or 1.0):", "");
+ if (userInput) {
+ const regex = /([sd])?(\d+(?:,\d+|-?\d+)*?)?=(\d+(\.\d+)?)/;
+ const match = userInput.match(regex);
+ if (match) {
+ const type = match[1];
+ const indicesPart = match[2];
+ const value = parseFloat(match[3]);
+
+ let targetWidgets = [];
+ if (type === 's') {
+ targetWidgets = this.widgets.filter(widget => widget.name.includes("single"));
+ } else if (type === 'd') {
+ targetWidgets = this.widgets.filter(widget => widget.name.includes("double"));
+ } else {
+ targetWidgets = this.widgets; // No type specified, all widgets
+ }
+
+ if (indicesPart) {
+ const indices = indicesPart.split(',').flatMap(part => {
+ if (part.includes('-')) {
+ const [start, end] = part.split('-').map(Number);
+ return Array.from({ length: end - start + 1 }, (_, i) => start + i);
+ }
+ return Number(part);
+ });
+
+ for (const index of indices) {
+ if (index < targetWidgets.length) {
+ targetWidgets[index].value = value;
+ }
+ }
+ } else {
+ // No indices provided, set value for all target widgets
+ for (const widget of targetWidgets) {
+ widget.value = value;
+ }
+ }
+ } else if (!isNaN(parseFloat(userInput))) {
+ // Single value provided, set it for all widgets
+ const value = parseFloat(userInput);
+ for (const widget of this.widgets) {
+ widget.value = value;
+ }
+ } else {
+ alert("Invalid input format. Please use the format s0,1,2-7=2.0, d0,1,2-7=2.0, or 1.0");
+ }
+ } else {
+ alert("Invalid input. Please enter a value.");
+ }
+ });
+ };
+ break;
+
+ case "GetMaskSizeAndCount":
+ const onGetMaskSizeConnectInput = nodeType.prototype.onConnectInput;
+ nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) {
+ const v = onGetMaskSizeConnectInput? onGetMaskSizeConnectInput.apply(this, arguments): undefined
+ this.outputs[1]["name"] = "width"
+ this.outputs[2]["name"] = "height"
+ this.outputs[3]["name"] = "count"
+ return v;
+ }
+ const onGetMaskSizeExecuted = nodeType.prototype.onExecuted;
+ nodeType.prototype.onExecuted = function(message) {
+ const r = onGetMaskSizeExecuted? onGetMaskSizeExecuted.apply(this,arguments): undefined
+ let values = message["text"].toString().split('x').map(Number);
+ this.outputs[1]["name"] = values[1] + " width"
+ this.outputs[2]["name"] = values[2] + " height"
+ this.outputs[3]["name"] = values[0] + " count"
+ return r
+ }
+ break;
+
+ case "GetImageSizeAndCount":
+ const onGetImageSizeConnectInput = nodeType.prototype.onConnectInput;
+ nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) {
+ const v = onGetImageSizeConnectInput? onGetImageSizeConnectInput.apply(this, arguments): undefined
+ this.outputs[1]["name"] = "width"
+ this.outputs[2]["name"] = "height"
+ this.outputs[3]["name"] = "count"
+ return v;
+ }
+ const onGetImageSizeExecuted = nodeType.prototype.onExecuted;
+ nodeType.prototype.onExecuted = function(message) {
+ const r = onGetImageSizeExecuted? onGetImageSizeExecuted.apply(this,arguments): undefined
+ let values = message["text"].toString().split('x').map(Number);
+ this.outputs[1]["name"] = values[1] + " width"
+ this.outputs[2]["name"] = values[2] + " height"
+ this.outputs[3]["name"] = values[0] + " count"
+ return r
+ }
+ break;
+
+ case "PreviewAnimation":
+ const onPreviewAnimationConnectInput = nodeType.prototype.onConnectInput;
+ nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) {
+ const v = onPreviewAnimationConnectInput? onPreviewAnimationConnectInput.apply(this, arguments): undefined
+ this.title = "Preview Animation"
+ return v;
+ }
+ const onPreviewAnimationExecuted = nodeType.prototype.onExecuted;
+ nodeType.prototype.onExecuted = function(message) {
+ const r = onPreviewAnimationExecuted? onPreviewAnimationExecuted.apply(this,arguments): undefined
+ let values = message["text"].toString();
+ this.title = "Preview Animation " + values
+ return r
+ }
+ break;
+
+ case "VRAM_Debug":
+ const onVRAM_DebugConnectInput = nodeType.prototype.onConnectInput;
+ nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) {
+ const v = onVRAM_DebugConnectInput? onVRAM_DebugConnectInput.apply(this, arguments): undefined
+ this.outputs[3]["name"] = "freemem_before"
+ this.outputs[4]["name"] = "freemem_after"
+ return v;
+ }
+ const onVRAM_DebugExecuted = nodeType.prototype.onExecuted;
+ nodeType.prototype.onExecuted = function(message) {
+ const r = onVRAM_DebugExecuted? onVRAM_DebugExecuted.apply(this,arguments): undefined
+ let values = message["text"].toString().split('x');
+ this.outputs[3]["name"] = values[0] + " freemem_before"
+ this.outputs[4]["name"] = values[1] + " freemem_after"
+ return r
+ }
+ break;
+
+ case "JoinStringMulti":
+ const originalOnNodeCreated = nodeType.prototype.onNodeCreated || function() {};
+ nodeType.prototype.onNodeCreated = function () {
+ originalOnNodeCreated.apply(this, arguments);
+
+ this._type = "STRING";
+ this.inputs_offset = nodeData.name.includes("selective") ? 1 : 0;
+ this.addWidget("button", "Update inputs", null, () => {
+ if (!this.inputs) {
+ this.inputs = [];
+ }
+ const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"];
+ if (target_number_of_inputs === this.inputs.length) return; // already set, do nothing
+
+ if (target_number_of_inputs < this.inputs.length) {
+ for (let i = this.inputs.length; i >= this.inputs_offset + target_number_of_inputs; i--)
+ this.removeInput(i);
+ } else {
+ for (let i = this.inputs.length + 1 - this.inputs_offset; i <= target_number_of_inputs; ++i)
+ this.addInput(`string_${i}`, this._type);
+ }
+ });
+ }
+ break;
+ case "SoundReactive":
+ nodeType.prototype.onNodeCreated = function () {
+ let audioContext;
+ let microphoneStream;
+ let animationFrameId;
+ let analyser;
+ let dataArray;
+ let startRangeHz;
+ let endRangeHz;
+ let smoothingFactor = 0.5;
+ let smoothedSoundLevel = 0;
+
+ // Function to update the widget value in real-time
+ const updateWidgetValueInRealTime = () => {
+ // Ensure analyser and dataArray are defined before using them
+ if (analyser && dataArray) {
+ analyser.getByteFrequencyData(dataArray);
+
+ const startRangeHzWidget = this.widgets.find(w => w.name === "start_range_hz");
+ if (startRangeHzWidget) startRangeHz = startRangeHzWidget.value;
+ const endRangeHzWidget = this.widgets.find(w => w.name === "end_range_hz");
+ if (endRangeHzWidget) endRangeHz = endRangeHzWidget.value;
+ const smoothingFactorWidget = this.widgets.find(w => w.name === "smoothing_factor");
+ if (smoothingFactorWidget) smoothingFactor = smoothingFactorWidget.value;
+
+ // Calculate frequency bin width (frequency resolution)
+ const frequencyBinWidth = audioContext.sampleRate / analyser.fftSize;
+ // Convert the widget values from Hz to indices
+ const startRangeIndex = Math.floor(startRangeHz / frequencyBinWidth);
+ const endRangeIndex = Math.floor(endRangeHz / frequencyBinWidth);
+
+ // Function to calculate the average value for a frequency range
+ const calculateAverage = (start, end) => {
+ const sum = dataArray.slice(start, end).reduce((acc, val) => acc + val, 0);
+ const average = sum / (end - start);
+
+ // Apply exponential moving average smoothing
+ smoothedSoundLevel = (average * (1 - smoothingFactor)) + (smoothedSoundLevel * smoothingFactor);
+ return smoothedSoundLevel;
+ };
+ // Calculate the average levels for each frequency range
+ const soundLevel = calculateAverage(startRangeIndex, endRangeIndex);
+
+ // Update the widget values
+
+ const lowLevelWidget = this.widgets.find(w => w.name === "sound_level");
+ if (lowLevelWidget) lowLevelWidget.value = soundLevel;
+
+ animationFrameId = requestAnimationFrame(updateWidgetValueInRealTime);
+ }
+ };
+
+ // Function to start capturing audio from the microphone
+ const startMicrophoneCapture = () => {
+ // Only create the audio context and analyser once
+ if (!audioContext) {
+ audioContext = new (window.AudioContext || window.webkitAudioContext)();
+ // Access the sample rate of the audio context
+ console.log(`Sample rate: ${audioContext.sampleRate}Hz`);
+ analyser = audioContext.createAnalyser();
+ analyser.fftSize = 2048;
+ dataArray = new Uint8Array(analyser.frequencyBinCount);
+ // Get the range values from widgets (assumed to be in Hz)
+ const lowRangeWidget = this.widgets.find(w => w.name === "low_range_hz");
+ if (lowRangeWidget) startRangeHz = lowRangeWidget.value;
+
+ const midRangeWidget = this.widgets.find(w => w.name === "mid_range_hz");
+ if (midRangeWidget) endRangeHz = midRangeWidget.value;
+ }
+
+ navigator.mediaDevices.getUserMedia({ audio: true }).then(stream => {
+ microphoneStream = stream;
+ const microphone = audioContext.createMediaStreamSource(stream);
+ microphone.connect(analyser);
+ updateWidgetValueInRealTime();
+ }).catch(error => {
+ console.error('Access to microphone was denied or an error occurred:', error);
+ });
+ };
+
+ // Function to stop capturing audio from the microphone
+ const stopMicrophoneCapture = () => {
+ if (animationFrameId) {
+ cancelAnimationFrame(animationFrameId);
+ }
+ if (microphoneStream) {
+ microphoneStream.getTracks().forEach(track => track.stop());
+ }
+ if (audioContext) {
+ audioContext.close();
+ // Reset audioContext to ensure it can be created again when starting
+ audioContext = null;
+ }
+ };
+
+ // Add start button
+ this.addWidget("button", "Start mic capture", null, startMicrophoneCapture);
+
+ // Add stop button
+ this.addWidget("button", "Stop mic capture", null, stopMicrophoneCapture);
+ };
+ break;
+
+ }
+
+ },
+ async setup() {
+ // to keep Set/Get node virtual connections visible when offscreen
+ const originalComputeVisibleNodes = LGraphCanvas.prototype.computeVisibleNodes;
+ LGraphCanvas.prototype.computeVisibleNodes = function () {
+ const visibleNodesSet = new Set(originalComputeVisibleNodes.apply(this, arguments));
+ for (const node of this.graph._nodes) {
+ if ((node.type === "SetNode" || node.type === "GetNode") && node.drawConnection) {
+ visibleNodesSet.add(node);
+ }
+ }
+ return Array.from(visibleNodesSet);
+ };
+
+ }
+});
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/web/js/point_editor.js b/ComfyUI-KJNodes/web/js/point_editor.js
new file mode 100644
index 0000000000000000000000000000000000000000..e8c5665d46363759e4bec59d6093f75d96533fd0
--- /dev/null
+++ b/ComfyUI-KJNodes/web/js/point_editor.js
@@ -0,0 +1,736 @@
+import { app } from '../../../scripts/app.js'
+
+//from melmass
+export function makeUUID() {
+ let dt = new Date().getTime()
+ const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
+ const r = ((dt + Math.random() * 16) % 16) | 0
+ dt = Math.floor(dt / 16)
+ return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16)
+ })
+ return uuid
+}
+
+export const loadScript = (
+ FILE_URL,
+ async = true,
+ type = 'text/javascript',
+) => {
+ return new Promise((resolve, reject) => {
+ try {
+ // Check if the script already exists
+ const existingScript = document.querySelector(`script[src="${FILE_URL}"]`)
+ if (existingScript) {
+ resolve({ status: true, message: 'Script already loaded' })
+ return
+ }
+
+ const scriptEle = document.createElement('script')
+ scriptEle.type = type
+ scriptEle.async = async
+ scriptEle.src = FILE_URL
+
+ scriptEle.addEventListener('load', (ev) => {
+ resolve({ status: true })
+ })
+
+ scriptEle.addEventListener('error', (ev) => {
+ reject({
+ status: false,
+ message: `Failed to load the script ${FILE_URL}`,
+ })
+ })
+
+ document.body.appendChild(scriptEle)
+ } catch (error) {
+ reject(error)
+ }
+ })
+}
+const create_documentation_stylesheet = () => {
+ const tag = 'kj-pointseditor-stylesheet'
+
+ let styleTag = document.head.querySelector(tag)
+
+ if (!styleTag) {
+ styleTag = document.createElement('style')
+ styleTag.type = 'text/css'
+ styleTag.id = tag
+ styleTag.innerHTML = `
+ .points-editor {
+
+ position: absolute;
+
+ font: 12px monospace;
+ line-height: 1.5em;
+ padding: 10px;
+ z-index: 0;
+ overflow: hidden;
+ }
+ `
+ document.head.appendChild(styleTag)
+ }
+}
+
+loadScript('/kjweb_async/svg-path-properties.min.js').catch((e) => {
+ console.log(e)
+})
+loadScript('/kjweb_async/protovis.min.js').catch((e) => {
+ console.log(e)
+})
+create_documentation_stylesheet()
+
+function chainCallback(object, property, callback) {
+ if (object == undefined) {
+ //This should not happen.
+ console.error("Tried to add callback to non-existant object")
+ return;
+ }
+ if (property in object) {
+ const callback_orig = object[property]
+ object[property] = function () {
+ const r = callback_orig.apply(this, arguments);
+ callback.apply(this, arguments);
+ return r
+ };
+ } else {
+ object[property] = callback;
+ }
+}
+app.registerExtension({
+ name: 'KJNodes.PointEditor',
+
+ async beforeRegisterNodeDef(nodeType, nodeData) {
+ if (nodeData?.name === 'PointsEditor') {
+ chainCallback(nodeType.prototype, "onNodeCreated", function () {
+
+ hideWidgetForGood(this, this.widgets.find(w => w.name === "coordinates"))
+ hideWidgetForGood(this, this.widgets.find(w => w.name === "neg_coordinates"))
+ hideWidgetForGood(this, this.widgets.find(w => w.name === "bboxes"))
+
+ var element = document.createElement("div");
+ this.uuid = makeUUID()
+ element.id = `points-editor-${this.uuid}`
+
+ // fake image widget to allow copy/paste
+ const fakeimagewidget = this.addWidget("COMBO", "image", null, () => { }, {});
+ hideWidgetForGood(this, fakeimagewidget)
+
+ this.pointsEditor = this.addDOMWidget(nodeData.name, "PointsEditorWidget", element, {
+ serialize: false,
+ hideOnZoom: false,
+ });
+
+ // context menu
+ this.contextMenu = document.createElement("div");
+ this.contextMenu.id = "context-menu";
+ this.contextMenu.style.display = "none";
+ this.contextMenu.style.position = "absolute";
+ this.contextMenu.style.backgroundColor = "#202020";
+ this.contextMenu.style.minWidth = "100px";
+ this.contextMenu.style.boxShadow = "0px 8px 16px 0px rgba(0,0,0,0.2)";
+ this.contextMenu.style.zIndex = "100";
+ this.contextMenu.style.padding = "5px";
+
+ function styleMenuItem(menuItem) {
+ menuItem.style.display = "block";
+ menuItem.style.padding = "5px";
+ menuItem.style.color = "#FFF";
+ menuItem.style.fontFamily = "Arial, sans-serif";
+ menuItem.style.fontSize = "16px";
+ menuItem.style.textDecoration = "none";
+ menuItem.style.marginBottom = "5px";
+ }
+ function createMenuItem(id, textContent) {
+ let menuItem = document.createElement("a");
+ menuItem.href = "#";
+ menuItem.id = `menu-item-${id}`;
+ menuItem.textContent = textContent;
+ styleMenuItem(menuItem);
+ return menuItem;
+ }
+
+ // Create an array of menu items using the createMenuItem function
+ this.menuItems = [
+ createMenuItem(0, "Load Image"),
+ createMenuItem(1, "Clear Image"),
+ ];
+
+ // Add mouseover and mouseout event listeners to each menu item for styling
+ this.menuItems.forEach(menuItem => {
+ menuItem.addEventListener('mouseover', function () {
+ this.style.backgroundColor = "gray";
+ });
+
+ menuItem.addEventListener('mouseout', function () {
+ this.style.backgroundColor = "#202020";
+ });
+ });
+
+ // Append each menu item to the context menu
+ this.menuItems.forEach(menuItem => {
+ this.contextMenu.appendChild(menuItem);
+ });
+
+ document.body.appendChild(this.contextMenu);
+
+ this.addWidget("button", "New canvas", null, () => {
+ if (!this.properties || !("points" in this.properties)) {
+ this.editor = new PointsEditor(this);
+ this.addProperty("points", this.constructor.type, "string");
+ this.addProperty("neg_points", this.constructor.type, "string");
+
+ }
+ else {
+ this.editor = new PointsEditor(this, true);
+ }
+ });
+
+ this.setSize([550, 550]);
+ this.resizable = false;
+ this.pointsEditor.parentEl = document.createElement("div");
+ this.pointsEditor.parentEl.className = "points-editor";
+ this.pointsEditor.parentEl.id = `points-editor-${this.uuid}`
+ element.appendChild(this.pointsEditor.parentEl);
+
+ chainCallback(this, "onConfigure", function () {
+ try {
+ this.editor = new PointsEditor(this);
+ } catch (error) {
+ console.error("An error occurred while configuring the editor:", error);
+ }
+ });
+ chainCallback(this, "onExecuted", function (message) {
+ let bg_image = message["bg_image"];
+ this.properties.imgData = {
+ name: "bg_image",
+ base64: bg_image
+ };
+ this.editor.refreshBackgroundImage(this);
+ });
+
+ }); // onAfterGraphConfigured
+ }//node created
+ } //before register
+})//register
+
+class PointsEditor {
+ constructor(context, reset = false) {
+ this.node = context;
+ this.reset = reset;
+ const self = this; // Keep a reference to the main class context
+
+ console.log("creatingPointEditor")
+
+ this.node.pasteFile = (file) => {
+ if (file.type.startsWith("image/")) {
+ this.handleImageFile(file);
+ return true;
+ }
+ return false;
+ };
+
+ this.node.onDragOver = function (e) {
+ if (e.dataTransfer && e.dataTransfer.items) {
+ return [...e.dataTransfer.items].some(f => f.kind === "file" && f.type.startsWith("image/"));
+ }
+ return false;
+ };
+
+ // On drop upload files
+ this.node.onDragDrop = (e) => {
+ console.log("onDragDrop called");
+ let handled = false;
+ for (const file of e.dataTransfer.files) {
+ if (file.type.startsWith("image/")) {
+ this.handleImageFile(file);
+ handled = true;
+ }
+ }
+ return handled;
+ };
+
+ // context menu
+ this.createContextMenu();
+
+ if (reset && context.pointsEditor.element) {
+ context.pointsEditor.element.innerHTML = ''; // Clear the container
+ }
+ this.pos_coordWidget = context.widgets.find(w => w.name === "coordinates");
+ this.neg_coordWidget = context.widgets.find(w => w.name === "neg_coordinates");
+ this.pointsStoreWidget = context.widgets.find(w => w.name === "points_store");
+ this.widthWidget = context.widgets.find(w => w.name === "width");
+ this.heightWidget = context.widgets.find(w => w.name === "height");
+ this.bboxStoreWidget = context.widgets.find(w => w.name === "bbox_store");
+ this.bboxWidget = context.widgets.find(w => w.name === "bboxes");
+
+ //widget callbacks
+ this.widthWidget.callback = () => {
+ this.width = this.widthWidget.value;
+ if (this.width > 256) {
+ context.setSize([this.width + 45, context.size[1]]);
+ }
+ this.vis.width(this.width);
+ this.updateData();
+ }
+ this.heightWidget.callback = () => {
+ this.height = this.heightWidget.value
+ this.vis.height(this.height)
+ context.setSize([context.size[0], this.height + 300]);
+ this.updateData();
+ }
+ this.pointsStoreWidget.callback = () => {
+ this.points = JSON.parse(pointsStoreWidget.value).positive;
+ this.neg_points = JSON.parse(pointsStoreWidget.value).negative;
+ this.updateData();
+ }
+ this.bboxStoreWidget.callback = () => {
+ this.bbox = JSON.parse(bboxStoreWidget.value)
+ this.updateData();
+ }
+
+ this.width = this.widthWidget.value;
+ this.height = this.heightWidget.value;
+ var i = 3;
+ this.points = [];
+ this.neg_points = [];
+ this.bbox = [{}];
+ var drawing = false;
+
+ // Initialize or reset points array
+ if (!reset && this.pointsStoreWidget.value != "") {
+ this.points = JSON.parse(this.pointsStoreWidget.value).positive;
+ this.neg_points = JSON.parse(this.pointsStoreWidget.value).negative;
+ this.bbox = JSON.parse(this.bboxStoreWidget.value);
+ console.log(this.bbox)
+ } else {
+ this.points = [
+ {
+ x: this.width / 2, // Middle point horizontally centered
+ y: this.height / 2 // Middle point vertically centered
+ }
+ ];
+ this.neg_points = [
+ {
+ x: 0, // Middle point horizontally centered
+ y: 0 // Middle point vertically centered
+ }
+ ];
+ const combinedPoints = {
+ positive: this.points,
+ negative: this.neg_points,
+ };
+ this.pointsStoreWidget.value = JSON.stringify(combinedPoints);
+ this.bboxStoreWidget.value = JSON.stringify(this.bbox);
+ }
+
+ //create main canvas panel
+ this.vis = new pv.Panel()
+ .width(this.width)
+ .height(this.height)
+ .fillStyle("#222")
+ .strokeStyle("gray")
+ .lineWidth(2)
+ .antialias(false)
+ .margin(10)
+ .event("mousedown", function () {
+ if (pv.event.shiftKey && pv.event.button === 2) { // Use pv.event to access the event object
+ let scaledMouse = {
+ x: this.mouse().x / app.canvas.ds.scale,
+ y: this.mouse().y / app.canvas.ds.scale
+ };
+ i = self.neg_points.push(scaledMouse) - 1;
+ self.updateData();
+ return this;
+ }
+ else if (pv.event.shiftKey) {
+ let scaledMouse = {
+ x: this.mouse().x / app.canvas.ds.scale,
+ y: this.mouse().y / app.canvas.ds.scale
+ };
+ i = self.points.push(scaledMouse) - 1;
+ self.updateData();
+ return this;
+ }
+ else if (pv.event.ctrlKey) {
+ console.log("start drawing at " + this.mouse().x / app.canvas.ds.scale + ", " + this.mouse().y / app.canvas.ds.scale);
+ drawing = true;
+ self.bbox[0].startX = this.mouse().x / app.canvas.ds.scale;
+ self.bbox[0].startY = this.mouse().y / app.canvas.ds.scale;
+ }
+ else if (pv.event.button === 2) {
+ self.node.contextMenu.style.display = 'block';
+ self.node.contextMenu.style.left = `${pv.event.clientX}px`;
+ self.node.contextMenu.style.top = `${pv.event.clientY}px`;
+ }
+ })
+ .event("mousemove", function () {
+ if (drawing) {
+ self.bbox[0].endX = this.mouse().x / app.canvas.ds.scale;
+ self.bbox[0].endY = this.mouse().y / app.canvas.ds.scale;
+ self.vis.render();
+ }
+ })
+ .event("mouseup", function () {
+ console.log("end drawing at " + this.mouse().x / app.canvas.ds.scale + ", " + this.mouse().y / app.canvas.ds.scale);
+ drawing = false;
+ self.updateData();
+ });
+
+ this.backgroundImage = this.vis.add(pv.Image).visible(false)
+
+ //create bounding box
+ this.bounding_box = this.vis.add(pv.Area)
+ .data(function () {
+ if (drawing || (self.bbox && self.bbox[0] && Object.keys(self.bbox[0]).length > 0)) {
+ return [self.bbox[0].startX, self.bbox[0].endX];
+ } else {
+ return [];
+ }
+ })
+ .bottom(function () {return self.height - Math.max(self.bbox[0].startY, self.bbox[0].endY); })
+ .left(function (d) {return d; })
+ .height(function () {return Math.abs(self.bbox[0].startY - self.bbox[0].endY);})
+ .fillStyle("rgba(70, 130, 180, 0.5)")
+ .strokeStyle("steelblue")
+ .visible(function () {return drawing || Object.keys(self.bbox[0]).length > 0; })
+ .add(pv.Dot)
+ .visible(function () {return drawing || Object.keys(self.bbox[0]).length > 0; })
+ .data(() => {
+ if (self.bbox && Object.keys(self.bbox[0]).length > 0) {
+ return [{
+ x: self.bbox[0].endX,
+ y: self.bbox[0].endY
+ }];
+ } else {
+ return [];
+ }
+ })
+ .left(d => d.x)
+ .top(d => d.y)
+ .radius(Math.log(Math.min(self.width, self.height)) * 1)
+ .shape("square")
+ .cursor("move")
+ .strokeStyle("steelblue")
+ .lineWidth(2)
+ .fillStyle(function () { return "rgba(100, 100, 100, 0.6)"; })
+ .event("mousedown", pv.Behavior.drag())
+ .event("drag", function () {
+ let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new position by the inverse of the scale factor
+ let adjustedY = this.mouse().y / app.canvas.ds.scale;
+
+ // Adjust the new position if it would place the dot outside the bounds of the vis.Panel
+ adjustedX = Math.max(0, Math.min(self.vis.width(), adjustedX));
+ adjustedY = Math.max(0, Math.min(self.vis.height(), adjustedY));
+ self.bbox[0].endX = this.mouse().x / app.canvas.ds.scale;
+ self.bbox[0].endY = this.mouse().y / app.canvas.ds.scale;
+ self.vis.render();
+ })
+ .event("dragend", function () {
+ self.updateData();
+ });
+
+ //create positive points
+ this.vis.add(pv.Dot)
+ .data(() => this.points)
+ .left(d => d.x)
+ .top(d => d.y)
+ .radius(Math.log(Math.min(self.width, self.height)) * 4)
+ .shape("circle")
+ .cursor("move")
+ .strokeStyle(function () { return i == this.index ? "#07f907" : "#139613"; })
+ .lineWidth(4)
+ .fillStyle(function () { return "rgba(100, 100, 100, 0.6)"; })
+ .event("mousedown", pv.Behavior.drag())
+ .event("dragstart", function () {
+ i = this.index;
+ })
+ .event("dragend", function () {
+ if (pv.event.button === 2 && i !== 0 && i !== self.points.length - 1) {
+ this.index = i;
+ self.points.splice(i--, 1);
+ }
+ self.updateData();
+
+ })
+ .event("drag", function () {
+ let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor
+ let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor
+ // Determine the bounds of the vis.Panel
+ const panelWidth = self.vis.width();
+ const panelHeight = self.vis.height();
+
+ // Adjust the new position if it would place the dot outside the bounds of the vis.Panel
+ adjustedX = Math.max(0, Math.min(panelWidth, adjustedX));
+ adjustedY = Math.max(0, Math.min(panelHeight, adjustedY));
+ self.points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position
+ self.vis.render(); // Re-render the visualization to reflect the new position
+ })
+
+ .anchor("center")
+ .add(pv.Label)
+ .left(d => d.x < this.width / 2 ? d.x + 30 : d.x - 35) // Shift label to right if on left half, otherwise shift to left
+ .top(d => d.y < this.height / 2 ? d.y + 25 : d.y - 25) // Shift label down if on top half, otherwise shift up
+ .font(25 + "px sans-serif")
+ .text(d => {return this.points.indexOf(d); })
+ .textStyle("#139613")
+ .textShadow("2px 2px 2px black")
+ .add(pv.Dot) // Add smaller point in the center
+ .data(() => this.points)
+ .left(d => d.x)
+ .top(d => d.y)
+ .radius(2) // Smaller radius for the center point
+ .shape("circle")
+ .fillStyle("red") // Color for the center point
+ .lineWidth(1); // Stroke thickness for the center point
+
+ //create negative points
+ this.vis.add(pv.Dot)
+ .data(() => this.neg_points)
+ .left(d => d.x)
+ .top(d => d.y)
+ .radius(Math.log(Math.min(self.width, self.height)) * 4)
+ .shape("circle")
+ .cursor("move")
+ .strokeStyle(function () { return i == this.index ? "#f91111" : "#891616"; })
+ .lineWidth(4)
+ .fillStyle(function () { return "rgba(100, 100, 100, 0.6)"; })
+ .event("mousedown", pv.Behavior.drag())
+ .event("dragstart", function () {
+ i = this.index;
+ })
+ .event("dragend", function () {
+ if (pv.event.button === 2 && i !== 0 && i !== self.neg_points.length - 1) {
+ this.index = i;
+ self.neg_points.splice(i--, 1);
+ }
+ self.updateData();
+
+ })
+ .event("drag", function () {
+ let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor
+ let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor
+ // Determine the bounds of the vis.Panel
+ const panelWidth = self.vis.width();
+ const panelHeight = self.vis.height();
+
+ // Adjust the new position if it would place the dot outside the bounds of the vis.Panel
+ adjustedX = Math.max(0, Math.min(panelWidth, adjustedX));
+ adjustedY = Math.max(0, Math.min(panelHeight, adjustedY));
+ self.neg_points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position
+ self.vis.render(); // Re-render the visualization to reflect the new position
+ })
+ .anchor("center")
+ .add(pv.Label)
+ .left(d => d.x < this.width / 2 ? d.x + 30 : d.x - 35) // Shift label to right if on left half, otherwise shift to left
+ .top(d => d.y < this.height / 2 ? d.y + 25 : d.y - 25) // Shift label down if on top half, otherwise shift up
+ .font(25 + "px sans-serif")
+ .text(d => {return this.neg_points.indexOf(d); })
+ .textStyle("red")
+ .textShadow("2px 2px 2px black")
+ .add(pv.Dot) // Add smaller point in the center
+ .data(() => this.neg_points)
+ .left(d => d.x)
+ .top(d => d.y)
+ .radius(2) // Smaller radius for the center point
+ .shape("circle")
+ .fillStyle("red") // Color for the center point
+ .lineWidth(1); // Stroke thickness for the center point
+
+ if (this.points.length != 0) {
+ this.vis.render();
+ }
+
+ var svgElement = this.vis.canvas();
+ svgElement.style['zIndex'] = "2"
+ svgElement.style['position'] = "relative"
+ this.node.pointsEditor.element.appendChild(svgElement);
+
+ if (this.width > 256) {
+ this.node.setSize([this.width + 45, this.node.size[1]]);
+ }
+ this.node.setSize([this.node.size[0], this.height + 300]);
+ this.updateData();
+ this.refreshBackgroundImage();
+
+ }//end constructor
+
+ updateData = () => {
+ if (!this.points || this.points.length === 0) {
+ console.log("no points");
+ return;
+ }
+ const combinedPoints = {
+ positive: this.points,
+ negative: this.neg_points,
+ };
+ this.pointsStoreWidget.value = JSON.stringify(combinedPoints);
+ this.pos_coordWidget.value = JSON.stringify(this.points);
+ this.neg_coordWidget.value = JSON.stringify(this.neg_points);
+
+ if (this.bbox.length != 0) {
+ let bboxString = JSON.stringify(this.bbox);
+ this.bboxStoreWidget.value = bboxString;
+ this.bboxWidget.value = bboxString;
+ }
+
+ this.vis.render();
+ };
+
+ handleImageLoad = (img, file, base64String) => {
+ console.log(img.width, img.height); // Access width and height here
+ this.widthWidget.value = img.width;
+ this.heightWidget.value = img.height;
+
+ if (img.width != this.vis.width() || img.height != this.vis.height()) {
+ if (img.width > 256) {
+ this.node.setSize([img.width + 45, this.node.size[1]]);
+ }
+ this.node.setSize([this.node.size[0], img.height + 300]);
+ this.vis.width(img.width);
+ this.vis.height(img.height);
+ this.height = img.height;
+ this.width = img.width;
+ this.updateData();
+ }
+ this.backgroundImage.url(file ? URL.createObjectURL(file) : `data:${this.node.properties.imgData.type};base64,${base64String}`).visible(true).root.render();
+ };
+
+ processImage = (img, file) => {
+ const canvas = document.createElement('canvas');
+ const ctx = canvas.getContext('2d');
+
+ const maxWidth = 800; // maximum width
+ const maxHeight = 600; // maximum height
+ let width = img.width;
+ let height = img.height;
+
+ // Calculate the new dimensions while preserving the aspect ratio
+ if (width > height) {
+ if (width > maxWidth) {
+ height *= maxWidth / width;
+ width = maxWidth;
+ }
+ } else {
+ if (height > maxHeight) {
+ width *= maxHeight / height;
+ height = maxHeight;
+ }
+ }
+
+ canvas.width = width;
+ canvas.height = height;
+ ctx.drawImage(img, 0, 0, width, height);
+
+ // Get the compressed image data as a Base64 string
+ const base64String = canvas.toDataURL('image/jpeg', 0.5).replace('data:', '').replace(/^.+,/, ''); // 0.5 is the quality from 0 to 1
+
+ this.node.properties.imgData = {
+ name: file.name,
+ lastModified: file.lastModified,
+ size: file.size,
+ type: file.type,
+ base64: base64String
+ };
+ handleImageLoad(img, file, base64String);
+};
+
+ handleImageFile = (file) => {
+ const reader = new FileReader();
+ reader.onloadend = () => {
+ const img = new Image();
+ img.src = reader.result;
+ img.onload = () => processImage(img, file);
+ };
+ reader.readAsDataURL(file);
+
+ const imageUrl = URL.createObjectURL(file);
+ const img = new Image();
+ img.src = imageUrl;
+ img.onload = () => this.handleImageLoad(img, file, null);
+ };
+
+ refreshBackgroundImage = () => {
+ if (this.node.properties.imgData && this.node.properties.imgData.base64) {
+ const base64String = this.node.properties.imgData.base64;
+ const imageUrl = `data:${this.node.properties.imgData.type};base64,${base64String}`;
+ const img = new Image();
+ img.src = imageUrl;
+ img.onload = () => this.handleImageLoad(img, null, base64String);
+ }
+ };
+
+ createContextMenu = () => {
+ self = this;
+ document.addEventListener('contextmenu', function (e) {
+ e.preventDefault();
+ });
+
+ document.addEventListener('click', function (e) {
+ if (!self.node.contextMenu.contains(e.target)) {
+ self.node.contextMenu.style.display = 'none';
+ }
+ });
+
+ this.node.menuItems.forEach((menuItem, index) => {
+ self = this;
+ menuItem.addEventListener('click', function (e) {
+ e.preventDefault();
+ switch (index) {
+ case 0:
+ // Create file input element
+ const fileInput = document.createElement('input');
+ fileInput.type = 'file';
+ fileInput.accept = 'image/*'; // Accept only image files
+
+ // Listen for file selection
+ fileInput.addEventListener('change', function (event) {
+ const file = event.target.files[0]; // Get the selected file
+
+ if (file) {
+ const imageUrl = URL.createObjectURL(file);
+ let img = new Image();
+ img.src = imageUrl;
+ img.onload = () => self.handleImageLoad(img, file, null);
+ }
+ });
+
+ fileInput.click();
+
+ self.node.contextMenu.style.display = 'none';
+ break;
+ case 1:
+ self.backgroundImage.visible(false).root.render();
+ self.node.properties.imgData = null;
+ self.node.contextMenu.style.display = 'none';
+ break;
+ }
+ });
+ });
+ }//end createContextMenu
+}//end class
+
+
+//from melmass
+export function hideWidgetForGood(node, widget, suffix = '') {
+ widget.origType = widget.type
+ widget.origComputeSize = widget.computeSize
+ widget.origSerializeValue = widget.serializeValue
+ widget.computeSize = () => [0, -4] // -4 is due to the gap litegraph adds between widgets automatically
+ widget.type = "converted-widget" + suffix
+ // widget.serializeValue = () => {
+ // // Prevent serializing the widget if we have no input linked
+ // const w = node.inputs?.find((i) => i.widget?.name === widget.name);
+ // if (w?.link == null) {
+ // return undefined;
+ // }
+ // return widget.origSerializeValue ? widget.origSerializeValue() : widget.value;
+ // };
+
+ // Hide any linked widgets, e.g. seed+seedControl
+ if (widget.linkedWidgets) {
+ for (const w of widget.linkedWidgets) {
+ hideWidgetForGood(node, w, ':' + widget.name)
+ }
+ }
+}
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/web/js/setgetnodes.js b/ComfyUI-KJNodes/web/js/setgetnodes.js
new file mode 100644
index 0000000000000000000000000000000000000000..3051a3dbe4b48998513503293eccd63663aff370
--- /dev/null
+++ b/ComfyUI-KJNodes/web/js/setgetnodes.js
@@ -0,0 +1,559 @@
+import { app } from "../../../scripts/app.js";
+
+//based on diffus3's SetGet: https://github.com/diffus3/ComfyUI-extensions
+
+// Nodes that allow you to tunnel connections for cleaner graphs
+function setColorAndBgColor(type) {
+ const colorMap = {
+ "MODEL": LGraphCanvas.node_colors.blue,
+ "LATENT": LGraphCanvas.node_colors.purple,
+ "VAE": LGraphCanvas.node_colors.red,
+ "CONDITIONING": LGraphCanvas.node_colors.brown,
+ "IMAGE": LGraphCanvas.node_colors.pale_blue,
+ "CLIP": LGraphCanvas.node_colors.yellow,
+ "FLOAT": LGraphCanvas.node_colors.green,
+ "MASK": { color: "#1c5715", bgcolor: "#1f401b"},
+ "INT": { color: "#1b4669", bgcolor: "#29699c"},
+ "CONTROL_NET": { color: "#156653", bgcolor: "#1c453b"},
+ "NOISE": { color: "#2e2e2e", bgcolor: "#242121"},
+ "GUIDER": { color: "#3c7878", bgcolor: "#1c453b"},
+ "SAMPLER": { color: "#614a4a", bgcolor: "#3b2c2c"},
+ "SIGMAS": { color: "#485248", bgcolor: "#272e27"},
+
+ };
+
+ const colors = colorMap[type];
+ if (colors) {
+ this.color = colors.color;
+ this.bgcolor = colors.bgcolor;
+ }
+}
+let isAlertShown = false;
+let disablePrefix = app.ui.settings.getSettingValue("KJNodes.disablePrefix")
+const LGraphNode = LiteGraph.LGraphNode
+
+function showAlertWithThrottle(message, delay) {
+ if (!isAlertShown) {
+ isAlertShown = true;
+ alert(message);
+ setTimeout(() => isAlertShown = false, delay);
+ }
+}
+app.registerExtension({
+ name: "SetNode",
+ registerCustomNodes() {
+ class SetNode extends LGraphNode {
+ defaultVisibility = true;
+ serialize_widgets = true;
+ drawConnection = false;
+ currentGetters = null;
+ slotColor = "#FFF";
+ canvas = app.canvas;
+ menuEntry = "Show connections";
+
+ constructor(title) {
+ super(title)
+ if (!this.properties) {
+ this.properties = {
+ "previousName": ""
+ };
+ }
+ this.properties.showOutputText = SetNode.defaultVisibility;
+
+ const node = this;
+
+ this.addWidget(
+ "text",
+ "Constant",
+ '',
+ (s, t, u, v, x) => {
+ node.validateName(node.graph);
+ if(this.widgets[0].value !== ''){
+ this.title = (!disablePrefix ? "Set_" : "") + this.widgets[0].value;
+ }
+ this.update();
+ this.properties.previousName = this.widgets[0].value;
+ },
+ {}
+ )
+
+ this.addInput("*", "*");
+ this.addOutput("*", '*');
+
+ this.onConnectionsChange = function(
+ slotType, //1 = input, 2 = output
+ slot,
+ isChangeConnect,
+ link_info,
+ output
+ ) {
+ //On Disconnect
+ if (slotType == 1 && !isChangeConnect) {
+ if(this.inputs[slot].name === ''){
+ this.inputs[slot].type = '*';
+ this.inputs[slot].name = '*';
+ this.title = "Set"
+ }
+ }
+ if (slotType == 2 && !isChangeConnect) {
+ this.outputs[slot].type = '*';
+ this.outputs[slot].name = '*';
+
+ }
+ //On Connect
+ if (link_info && node.graph && slotType == 1 && isChangeConnect) {
+ const fromNode = node.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id);
+
+ if (fromNode && fromNode.outputs && fromNode.outputs[link_info.origin_slot]) {
+ const type = fromNode.outputs[link_info.origin_slot].type;
+
+ if (this.title === "Set"){
+ this.title = (!disablePrefix ? "Set_" : "") + type;
+ }
+ if (this.widgets[0].value === '*'){
+ this.widgets[0].value = type
+ }
+
+ this.validateName(node.graph);
+ this.inputs[0].type = type;
+ this.inputs[0].name = type;
+
+ if (app.ui.settings.getSettingValue("KJNodes.nodeAutoColor")){
+ setColorAndBgColor.call(this, type);
+ }
+ } else {
+ alert("Error: Set node input undefined. Most likely you're missing custom nodes");
+ }
+ }
+ if (link_info && node.graph && slotType == 2 && isChangeConnect) {
+ const fromNode = node.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id);
+
+ if (fromNode && fromNode.inputs && fromNode.inputs[link_info.origin_slot]) {
+ const type = fromNode.inputs[link_info.origin_slot].type;
+
+ this.outputs[0].type = type;
+ this.outputs[0].name = type;
+ } else {
+ alert("Error: Get Set node output undefined. Most likely you're missing custom nodes");
+ }
+ }
+
+
+ //Update either way
+ this.update();
+ }
+
+ this.validateName = function(graph) {
+ let widgetValue = node.widgets[0].value;
+
+ if (widgetValue !== '') {
+ let tries = 0;
+ const existingValues = new Set();
+
+ graph._nodes.forEach(otherNode => {
+ if (otherNode !== this && otherNode.type === 'SetNode') {
+ existingValues.add(otherNode.widgets[0].value);
+ }
+ });
+
+ while (existingValues.has(widgetValue)) {
+ widgetValue = node.widgets[0].value + "_" + tries;
+ tries++;
+ }
+
+ node.widgets[0].value = widgetValue;
+ this.update();
+ }
+ }
+
+ this.clone = function () {
+ const cloned = SetNode.prototype.clone.apply(this);
+ cloned.inputs[0].name = '*';
+ cloned.inputs[0].type = '*';
+ cloned.value = '';
+ cloned.properties.previousName = '';
+ cloned.size = cloned.computeSize();
+ return cloned;
+ };
+
+ this.onAdded = function(graph) {
+ this.validateName(graph);
+ }
+
+
+ this.update = function() {
+ if (!node.graph) {
+ return;
+ }
+
+ const getters = this.findGetters(node.graph);
+ getters.forEach(getter => {
+ getter.setType(this.inputs[0].type);
+ });
+
+ if (this.widgets[0].value) {
+ const gettersWithPreviousName = this.findGetters(node.graph, true);
+ gettersWithPreviousName.forEach(getter => {
+ getter.setName(this.widgets[0].value);
+ });
+ }
+
+ const allGetters = node.graph._nodes.filter(otherNode => otherNode.type === "GetNode");
+ allGetters.forEach(otherNode => {
+ if (otherNode.setComboValues) {
+ otherNode.setComboValues();
+ }
+ });
+ }
+
+
+ this.findGetters = function(graph, checkForPreviousName) {
+ const name = checkForPreviousName ? this.properties.previousName : this.widgets[0].value;
+ return graph._nodes.filter(otherNode => otherNode.type === 'GetNode' && otherNode.widgets[0].value === name && name !== '');
+ }
+
+
+ // This node is purely frontend and does not impact the resulting prompt so should not be serialized
+ this.isVirtualNode = true;
+ }
+
+
+ onRemoved() {
+ const allGetters = this.graph._nodes.filter((otherNode) => otherNode.type == "GetNode");
+ allGetters.forEach((otherNode) => {
+ if (otherNode.setComboValues) {
+ otherNode.setComboValues([this]);
+ }
+ })
+ }
+ getExtraMenuOptions(_, options) {
+ this.menuEntry = this.drawConnection ? "Hide connections" : "Show connections";
+ options.unshift(
+ {
+ content: this.menuEntry,
+ callback: () => {
+ this.currentGetters = this.findGetters(this.graph);
+ if (this.currentGetters.length == 0) return;
+ let linkType = (this.currentGetters[0].outputs[0].type);
+ this.slotColor = this.canvas.default_connection_color_byType[linkType]
+ this.menuEntry = this.drawConnection ? "Hide connections" : "Show connections";
+ this.drawConnection = !this.drawConnection;
+ this.canvas.setDirty(true, true);
+
+ },
+ has_submenu: true,
+ submenu: {
+ title: "Color",
+ options: [
+ {
+ content: "Highlight",
+ callback: () => {
+ this.slotColor = "orange"
+ this.canvas.setDirty(true, true);
+ }
+ }
+ ],
+ },
+ },
+ {
+ content: "Hide all connections",
+ callback: () => {
+ const allGetters = this.graph._nodes.filter(otherNode => otherNode.type === "GetNode" || otherNode.type === "SetNode");
+ allGetters.forEach(otherNode => {
+ otherNode.drawConnection = false;
+ console.log(otherNode);
+ });
+
+ this.menuEntry = "Show connections";
+ this.drawConnection = false
+ this.canvas.setDirty(true, true);
+
+ },
+
+ },
+ );
+ // Dynamically add a submenu for all getters
+ this.currentGetters = this.findGetters(this.graph);
+ if (this.currentGetters) {
+
+ let gettersSubmenu = this.currentGetters.map(getter => ({
+
+ content: `${getter.title} id: ${getter.id}`,
+ callback: () => {
+ this.canvas.centerOnNode(getter);
+ this.canvas.selectNode(getter, false);
+ this.canvas.setDirty(true, true);
+
+ },
+ }));
+
+ options.unshift({
+ content: "Getters",
+ has_submenu: true,
+ submenu: {
+ title: "GetNodes",
+ options: gettersSubmenu,
+ }
+ });
+ }
+ }
+
+
+ onDrawForeground(ctx, lGraphCanvas) {
+ if (this.drawConnection) {
+ this._drawVirtualLinks(lGraphCanvas, ctx);
+ }
+ }
+ // onDrawCollapsed(ctx, lGraphCanvas) {
+ // if (this.drawConnection) {
+ // this._drawVirtualLinks(lGraphCanvas, ctx);
+ // }
+ // }
+ _drawVirtualLinks(lGraphCanvas, ctx) {
+ if (!this.currentGetters?.length) return;
+ var title = this.getTitle ? this.getTitle() : this.title;
+ var title_width = ctx.measureText(title).width;
+ if (!this.flags.collapsed) {
+ var start_node_slotpos = [
+ this.size[0],
+ LiteGraph.NODE_TITLE_HEIGHT * 0.5,
+ ];
+ }
+ else {
+
+ var start_node_slotpos = [
+ title_width + 55,
+ -15,
+
+ ];
+ }
+
+ for (const getter of this.currentGetters) {
+ if (!this.flags.collapsed) {
+ var end_node_slotpos = this.getConnectionPos(false, 0);
+ end_node_slotpos = [
+ getter.pos[0] - end_node_slotpos[0] + this.size[0],
+ getter.pos[1] - end_node_slotpos[1]
+ ];
+ }
+ else {
+ var end_node_slotpos = this.getConnectionPos(false, 0);
+ end_node_slotpos = [
+ getter.pos[0] - end_node_slotpos[0] + title_width + 50,
+ getter.pos[1] - end_node_slotpos[1] - 30
+ ];
+ }
+ lGraphCanvas.renderLink(
+ ctx,
+ start_node_slotpos,
+ end_node_slotpos,
+ null,
+ false,
+ null,
+ this.slotColor,
+ LiteGraph.RIGHT,
+ LiteGraph.LEFT
+ );
+ }
+ }
+ }
+
+ LiteGraph.registerNodeType(
+ "SetNode",
+ Object.assign(SetNode, {
+ title: "Set",
+ })
+ );
+
+ SetNode.category = "KJNodes";
+ },
+});
+
+app.registerExtension({
+ name: "GetNode",
+ registerCustomNodes() {
+ class GetNode extends LGraphNode {
+
+ defaultVisibility = true;
+ serialize_widgets = true;
+ drawConnection = false;
+ slotColor = "#FFF";
+ currentSetter = null;
+ canvas = app.canvas;
+
+ constructor(title) {
+ super(title)
+ if (!this.properties) {
+ this.properties = {};
+ }
+ this.properties.showOutputText = GetNode.defaultVisibility;
+ const node = this;
+ this.addWidget(
+ "combo",
+ "Constant",
+ "",
+ (e) => {
+ this.onRename();
+ },
+ {
+ values: () => {
+ const setterNodes = node.graph._nodes.filter((otherNode) => otherNode.type == 'SetNode');
+ return setterNodes.map((otherNode) => otherNode.widgets[0].value).sort();
+ }
+ }
+ )
+
+ this.addOutput("*", '*');
+ this.onConnectionsChange = function(
+ slotType, //0 = output, 1 = input
+ slot, //self-explanatory
+ isChangeConnect,
+ link_info,
+ output
+ ) {
+ this.validateLinks();
+ }
+
+ this.setName = function(name) {
+ node.widgets[0].value = name;
+ node.onRename();
+ node.serialize();
+ }
+
+ this.onRename = function() {
+ const setter = this.findSetter(node.graph);
+ if (setter) {
+ let linkType = (setter.inputs[0].type);
+
+ this.setType(linkType);
+ this.title = (!disablePrefix ? "Get_" : "") + setter.widgets[0].value;
+
+ if (app.ui.settings.getSettingValue("KJNodes.nodeAutoColor")){
+ setColorAndBgColor.call(this, linkType);
+ }
+
+ } else {
+ this.setType('*');
+ }
+ }
+
+ this.clone = function () {
+ const cloned = GetNode.prototype.clone.apply(this);
+ cloned.size = cloned.computeSize();
+ return cloned;
+ };
+
+ this.validateLinks = function() {
+ if (this.outputs[0].type !== '*' && this.outputs[0].links) {
+ this.outputs[0].links.filter(linkId => {
+ const link = node.graph.links[linkId];
+ return link && (link.type !== this.outputs[0].type && link.type !== '*');
+ }).forEach(linkId => {
+ node.graph.removeLink(linkId);
+ });
+ }
+ };
+
+ this.setType = function(type) {
+ this.outputs[0].name = type;
+ this.outputs[0].type = type;
+ this.validateLinks();
+ }
+
+ this.findSetter = function(graph) {
+ const name = this.widgets[0].value;
+ const foundNode = graph._nodes.find(otherNode => otherNode.type === 'SetNode' && otherNode.widgets[0].value === name && name !== '');
+ return foundNode;
+ };
+
+ this.goToSetter = function() {
+ const setter = this.findSetter(this.graph);
+ this.canvas.centerOnNode(setter);
+ this.canvas.selectNode(setter, false);
+ };
+
+ // This node is purely frontend and does not impact the resulting prompt so should not be serialized
+ this.isVirtualNode = true;
+ }
+
+ getInputLink(slot) {
+ const setter = this.findSetter(this.graph);
+
+ if (setter) {
+ const slotInfo = setter.inputs[slot];
+ const link = this.graph.links[slotInfo.link];
+ return link;
+ } else {
+ const errorMessage = "No SetNode found for " + this.widgets[0].value + "(" + this.type + ")";
+ showAlertWithThrottle(errorMessage, 5000);
+ //throw new Error(errorMessage);
+ }
+ }
+ onAdded(graph) {
+ }
+ getExtraMenuOptions(_, options) {
+ let menuEntry = this.drawConnection ? "Hide connections" : "Show connections";
+
+ options.unshift(
+ {
+ content: "Go to setter",
+ callback: () => {
+ this.goToSetter();
+ },
+ },
+ {
+ content: menuEntry,
+ callback: () => {
+ this.currentSetter = this.findSetter(this.graph);
+ if (this.currentSetter.length == 0) return;
+ let linkType = (this.currentSetter.inputs[0].type);
+ this.drawConnection = !this.drawConnection;
+ this.slotColor = this.canvas.default_connection_color_byType[linkType]
+ menuEntry = this.drawConnection ? "Hide connections" : "Show connections";
+ this.canvas.setDirty(true, true);
+ },
+ },
+ );
+ }
+
+ onDrawForeground(ctx, lGraphCanvas) {
+ if (this.drawConnection) {
+ this._drawVirtualLink(lGraphCanvas, ctx);
+ }
+ }
+ // onDrawCollapsed(ctx, lGraphCanvas) {
+ // if (this.drawConnection) {
+ // this._drawVirtualLink(lGraphCanvas, ctx);
+ // }
+ // }
+ _drawVirtualLink(lGraphCanvas, ctx) {
+ if (!this.currentSetter) return;
+
+ let start_node_slotpos = this.currentSetter.getConnectionPos(false, 0);
+ start_node_slotpos = [
+ start_node_slotpos[0] - this.pos[0],
+ start_node_slotpos[1] - this.pos[1],
+ ];
+ let end_node_slotpos = [0, -LiteGraph.NODE_TITLE_HEIGHT * 0.5];
+ lGraphCanvas.renderLink(
+ ctx,
+ start_node_slotpos,
+ end_node_slotpos,
+ null,
+ false,
+ null,
+ this.slotColor
+ );
+ }
+ }
+
+ LiteGraph.registerNodeType(
+ "GetNode",
+ Object.assign(GetNode, {
+ title: "Get",
+ })
+ );
+
+ GetNode.category = "KJNodes";
+ },
+});
diff --git a/ComfyUI-KJNodes/web/js/spline_editor.js b/ComfyUI-KJNodes/web/js/spline_editor.js
new file mode 100644
index 0000000000000000000000000000000000000000..2d9ec62d0a0028ff41ea17e655683144be97c02c
--- /dev/null
+++ b/ComfyUI-KJNodes/web/js/spline_editor.js
@@ -0,0 +1,866 @@
+import { app } from '../../../scripts/app.js'
+
+//from melmass
+export function makeUUID() {
+ let dt = new Date().getTime()
+ const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
+ const r = ((dt + Math.random() * 16) % 16) | 0
+ dt = Math.floor(dt / 16)
+ return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16)
+ })
+ return uuid
+}
+
+export const loadScript = (
+ FILE_URL,
+ async = true,
+ type = 'text/javascript',
+ ) => {
+ return new Promise((resolve, reject) => {
+ try {
+ // Check if the script already exists
+ const existingScript = document.querySelector(`script[src="${FILE_URL}"]`)
+ if (existingScript) {
+ resolve({ status: true, message: 'Script already loaded' })
+ return
+ }
+
+ const scriptEle = document.createElement('script')
+ scriptEle.type = type
+ scriptEle.async = async
+ scriptEle.src = FILE_URL
+
+ scriptEle.addEventListener('load', (ev) => {
+ resolve({ status: true })
+ })
+
+ scriptEle.addEventListener('error', (ev) => {
+ reject({
+ status: false,
+ message: `Failed to load the script ${FILE_URL}`,
+ })
+ })
+
+ document.body.appendChild(scriptEle)
+ } catch (error) {
+ reject(error)
+ }
+ })
+ }
+ const create_documentation_stylesheet = () => {
+ const tag = 'kj-splineditor-stylesheet'
+
+ let styleTag = document.head.querySelector(tag)
+
+ if (!styleTag) {
+ styleTag = document.createElement('style')
+ styleTag.type = 'text/css'
+ styleTag.id = tag
+ styleTag.innerHTML = `
+ .spline-editor {
+
+ position: absolute;
+
+ font: 12px monospace;
+ line-height: 1.5em;
+ padding: 10px;
+ z-index: 0;
+ overflow: hidden;
+ }
+ `
+ document.head.appendChild(styleTag)
+ }
+ }
+
+loadScript('/kjweb_async/svg-path-properties.min.js').catch((e) => {
+ console.log(e)
+})
+loadScript('/kjweb_async/protovis.min.js').catch((e) => {
+ console.log(e)
+})
+create_documentation_stylesheet()
+
+function chainCallback(object, property, callback) {
+ if (object == undefined) {
+ //This should not happen.
+ console.error("Tried to add callback to non-existant object")
+ return;
+ }
+ if (property in object) {
+ const callback_orig = object[property]
+ object[property] = function () {
+ const r = callback_orig.apply(this, arguments);
+ callback.apply(this, arguments);
+ return r
+ };
+ } else {
+ object[property] = callback;
+ }
+}
+app.registerExtension({
+ name: 'KJNodes.SplineEditor',
+
+ async beforeRegisterNodeDef(nodeType, nodeData) {
+ if (nodeData?.name === 'SplineEditor') {
+ chainCallback(nodeType.prototype, "onNodeCreated", function () {
+
+ hideWidgetForGood(this, this.widgets.find(w => w.name === "coordinates"))
+
+ var element = document.createElement("div");
+ this.uuid = makeUUID()
+ element.id = `spline-editor-${this.uuid}`
+
+ // fake image widget to allow copy/paste
+ const fakeimagewidget = this.addWidget("COMBO", "image", null, () => { }, {});
+ hideWidgetForGood(this, fakeimagewidget)
+
+ this.splineEditor = this.addDOMWidget(nodeData.name, "SplineEditorWidget", element, {
+ serialize: false,
+ hideOnZoom: false,
+ });
+
+ // context menu
+ this.contextMenu = document.createElement("div");
+ this.contextMenu.className = 'spline-editor-context-menu';
+ this.contextMenu.id = "context-menu";
+ this.contextMenu.style.display = "none";
+ this.contextMenu.style.position = "absolute";
+ this.contextMenu.style.backgroundColor = "#202020";
+ this.contextMenu.style.minWidth = "100px";
+ this.contextMenu.style.boxShadow = "0px 8px 16px 0px rgba(0,0,0,0.2)";
+ this.contextMenu.style.zIndex = "100";
+ this.contextMenu.style.padding = "5px";
+
+ function styleMenuItem(menuItem) {
+ menuItem.style.display = "block";
+ menuItem.style.padding = "5px";
+ menuItem.style.color = "#FFF";
+ menuItem.style.fontFamily = "Arial, sans-serif";
+ menuItem.style.fontSize = "16px";
+ menuItem.style.textDecoration = "none";
+ menuItem.style.marginBottom = "5px";
+ }
+ function createMenuItem(id, textContent) {
+ let menuItem = document.createElement("a");
+ menuItem.href = "#";
+ menuItem.id = `menu-item-${id}`;
+ menuItem.textContent = textContent;
+ styleMenuItem(menuItem);
+ return menuItem;
+ }
+
+ // Create an array of menu items using the createMenuItem function
+ this.menuItems = [
+ createMenuItem(0, "Toggle handles"),
+ createMenuItem(1, "Display sample points"),
+ createMenuItem(2, "Switch point shape"),
+ createMenuItem(3, "Background image"),
+ createMenuItem(4, "Invert point order"),
+ createMenuItem(5, "Clear Image"),
+ ];
+
+ // Add mouseover and mouseout event listeners to each menu item for styling
+ this.menuItems.forEach(menuItem => {
+ menuItem.addEventListener('mouseover', function() {
+ this.style.backgroundColor = "gray";
+ });
+
+ menuItem.addEventListener('mouseout', function() {
+ this.style.backgroundColor = "#202020";
+ });
+ });
+
+ // Append each menu item to the context menu
+ this.menuItems.forEach(menuItem => {
+ this.contextMenu.appendChild(menuItem);
+ });
+
+ document.body.appendChild(this.contextMenu);
+
+ this.addWidget("button", "New spline", null, () => {
+ if (!this.properties || !("points" in this.properties)) {
+ this.editor = new SplineEditor(this);
+ this.addProperty("points", this.constructor.type, "string");
+ }
+ else {
+ this.editor = new SplineEditor(this, true);
+ }
+ });
+
+ this.setSize([550, 950]);
+ this.resizable = false;
+ this.splineEditor.parentEl = document.createElement("div");
+ this.splineEditor.parentEl.className = "spline-editor";
+ this.splineEditor.parentEl.id = `spline-editor-${this.uuid}`
+ element.appendChild(this.splineEditor.parentEl);
+
+ chainCallback(this, "onConfigure", function () {
+ try {
+ this.editor = new SplineEditor(this);
+ } catch (error) {
+ console.error("An error occurred while configuring the editor:", error);
+ }
+ });
+ chainCallback(this, "onExecuted", function (message) {
+ let bg_image = message["bg_image"];
+ this.properties.imgData = {
+ name: "bg_image",
+ base64: bg_image
+ };
+ this.editor.refreshBackgroundImage(this);
+ });
+
+ }); // onAfterGraphConfigured
+ }//node created
+ } //before register
+})//register
+
+
+class SplineEditor{
+ constructor(context, reset = false) {
+ this.node = context;
+ this.reset=reset;
+ const self = this;
+ console.log("creatingSplineEditor")
+
+ this.node.pasteFile = (file) => {
+ if (file.type.startsWith("image/")) {
+ this.handleImageFile(file);
+ return true;
+ }
+ return false;
+ };
+
+ this.node.onDragOver = function (e) {
+ if (e.dataTransfer && e.dataTransfer.items) {
+ return [...e.dataTransfer.items].some(f => f.kind === "file" && f.type.startsWith("image/"));
+ }
+ return false;
+ };
+
+ // On drop upload files
+ this.node.onDragDrop = (e) => {
+ console.log("onDragDrop called");
+ let handled = false;
+ for (const file of e.dataTransfer.files) {
+ if (file.type.startsWith("image/")) {
+ this.handleImageFile(file);
+ handled = true;
+ }
+ }
+ return handled;
+ };
+
+ // context menu
+ this.createContextMenu();
+
+
+ this.dotShape = "circle";
+ this.drawSamplePoints = false;
+
+ if (reset && context.splineEditor.element) {
+ context.splineEditor.element.innerHTML = ''; // Clear the container
+ }
+ this.coordWidget = context.widgets.find(w => w.name === "coordinates");
+ this.interpolationWidget = context.widgets.find(w => w.name === "interpolation");
+ this.pointsWidget = context.widgets.find(w => w.name === "points_to_sample");
+ this.pointsStoreWidget = context.widgets.find(w => w.name === "points_store");
+ this.tensionWidget = context.widgets.find(w => w.name === "tension");
+ this.minValueWidget = context.widgets.find(w => w.name === "min_value");
+ this.maxValueWidget = context.widgets.find(w => w.name === "max_value");
+ this.samplingMethodWidget = context.widgets.find(w => w.name === "sampling_method");
+ this.widthWidget = context.widgets.find(w => w.name === "mask_width");
+ this.heightWidget = context.widgets.find(w => w.name === "mask_height");
+
+ this.interpolation = this.interpolationWidget.value
+ this.tension = this.tensionWidget.value
+ this.points_to_sample = this.pointsWidget.value
+ this.rangeMin = this.minValueWidget.value
+ this.rangeMax = this.maxValueWidget.value
+ this.pointsLayer = null;
+ this.samplingMethod = this.samplingMethodWidget.value
+
+ if (this.samplingMethod == "path") {
+ this.dotShape = "triangle"
+ }
+
+
+ this.interpolationWidget.callback = () => {
+ this.interpolation = this.interpolationWidget.value
+ this.updatePath();
+ }
+ this.samplingMethodWidget.callback = () => {
+ this.samplingMethod = this.samplingMethodWidget.value
+ if (this.samplingMethod == "path") {
+ this.dotShape = "triangle"
+ }
+ else if (this.samplingMethod == "controlpoints") {
+ this.dotShape = "circle"
+ this.drawSamplePoints = true;
+ }
+ this.updatePath();
+ }
+ this.tensionWidget.callback = () => {
+ this.tension = this.tensionWidget.value
+ this.updatePath();
+ }
+ this.pointsWidget.callback = () => {
+ this.points_to_sample = this.pointsWidget.value
+ this.updatePath();
+ }
+ this.minValueWidget.callback = () => {
+ this.rangeMin = this.minValueWidget.value
+ this.updatePath();
+ }
+ this.maxValueWidget.callback = () => {
+ this.rangeMax = this.maxValueWidget.value
+ this.updatePath();
+ }
+ this.widthWidget.callback = () => {
+ this.width = this.widthWidget.value;
+ if (this.width > 256) {
+ context.setSize([this.width + 45, context.size[1]]);
+ }
+ this.vis.width(this.width);
+ this.updatePath();
+}
+this.heightWidget.callback = () => {
+ this.height = this.heightWidget.value
+ this.vis.height(this.height)
+ context.setSize([context.size[0], this.height + 430]);
+ this.updatePath();
+ }
+ this.pointsStoreWidget.callback = () => {
+ points = JSON.parse(this.pointsStoreWidget.value);
+ this.updatePath();
+ }
+
+ // Initialize or reset points array
+ this.drawHandles = false;
+ this.drawRuler = true;
+ var hoverIndex = -1;
+ var isDragging = false;
+ this.width = this.widthWidget.value;
+ this.height = this.heightWidget.value;
+ var i = 3;
+ this.points = [];
+
+ if (!reset && this.pointsStoreWidget.value != "") {
+ this.points = JSON.parse(this.pointsStoreWidget.value);
+ } else {
+ this.points = pv.range(1, 4).map((i, index) => {
+ if (index === 0) {
+ // First point at the bottom-left corner
+ return { x: 0, y: this.height };
+ } else if (index === 2) {
+ // Last point at the top-right corner
+ return { x: this.width, y: 0 };
+ } else {
+ // Other points remain as they were
+ return {
+ x: i * this.width / 5,
+ y: 50 + Math.random() * (this.height - 100)
+ };
+ }
+ });
+ this.pointsStoreWidget.value = JSON.stringify(this.points);
+ }
+
+ this.vis = new pv.Panel()
+ .width(this.width)
+ .height(this.height)
+ .fillStyle("#222")
+ .strokeStyle("gray")
+ .lineWidth(2)
+ .antialias(false)
+ .margin(10)
+ .event("mousedown", function () {
+ if (pv.event.shiftKey) { // Use pv.event to access the event object
+ let scaledMouse = {
+ x: this.mouse().x / app.canvas.ds.scale,
+ y: this.mouse().y / app.canvas.ds.scale
+ };
+ i = self.points.push(scaledMouse) - 1;
+ self.updatePath();
+ return this;
+ }
+ else if (pv.event.ctrlKey) {
+ // Capture the clicked location
+ let clickedPoint = {
+ x: this.mouse().x / app.canvas.ds.scale,
+ y: this.mouse().y / app.canvas.ds.scale
+ };
+
+ // Find the two closest points to the clicked location
+ let { point1Index, point2Index } = self.findClosestPoints(self.points, clickedPoint);
+
+ // Calculate the midpoint between the two closest points
+ let midpoint = {
+ x: (self.points[point1Index].x + self.points[point2Index].x) / 2,
+ y: (self.points[point1Index].y + self.points[point2Index].y) / 2
+ };
+
+ // Insert the midpoint into the array
+ self.points.splice(point2Index, 0, midpoint);
+ i = point2Index;
+ self.updatePath();
+ }
+ else if (pv.event.button === 2) {
+ self.node.contextMenu.style.display = 'block';
+ self.node.contextMenu.style.left = `${pv.event.clientX}px`;
+ self.node.contextMenu.style.top = `${pv.event.clientY}px`;
+ }
+ })
+ this.backgroundImage = this.vis.add(pv.Image).visible(false)
+
+ this.vis.add(pv.Rule)
+ .data(pv.range(0, this.height, 64))
+ .bottom(d => d)
+ .strokeStyle("gray")
+ .lineWidth(3)
+ .visible(() => self.drawRuler)
+
+ // vis.add(pv.Rule)
+ // .data(pv.range(0, points_to_sample, 1))
+ // .left(d => d * 512 / (points_to_sample - 1))
+ // .strokeStyle("gray")
+ // .lineWidth(2)
+
+ this.vis.add(pv.Line)
+ .data(() => this.points)
+ .left(d => d.x)
+ .top(d => d.y)
+ .interpolate(() => this.interpolation)
+ .tension(() => this.tension)
+ .segmented(() => false)
+ .strokeStyle(pv.Colors.category10().by(pv.index))
+ .lineWidth(3)
+
+ this.vis.add(pv.Dot)
+ .data(() => this.points)
+ .left(d => d.x)
+ .top(d => d.y)
+ .radius(10)
+ .shape(function() {
+ return self.dotShape;
+ })
+ .angle(function() {
+ const index = this.index;
+ let angle = 0;
+
+ if (self.dotShape === "triangle") {
+ let dxNext = 0, dyNext = 0;
+ if (index < self.points.length - 1) {
+ dxNext = self.points[index + 1].x - self.points[index].x;
+ dyNext = self.points[index + 1].y - self.points[index].y;
+ }
+
+ let dxPrev = 0, dyPrev = 0;
+ if (index > 0) {
+ dxPrev = self.points[index].x - self.points[index - 1].x;
+ dyPrev = self.points[index].y - self.points[index - 1].y;
+ }
+
+ const dx = (dxNext + dxPrev) / 2;
+ const dy = (dyNext + dyPrev) / 2;
+
+ angle = Math.atan2(dy, dx);
+ angle -= Math.PI / 2;
+ angle = (angle + 2 * Math.PI) % (2 * Math.PI);
+ }
+
+ return angle;
+ })
+ .cursor("move")
+ .strokeStyle(function () { return i == this.index ? "#ff7f0e" : "#1f77b4"; })
+ .fillStyle(function () { return "rgba(100, 100, 100, 0.3)"; })
+ .event("mousedown", pv.Behavior.drag())
+ .event("dragstart", function () {
+ i = this.index;
+ hoverIndex = this.index;
+ isDragging = true;
+ if (pv.event.button === 2 && i !== 0 && i !== self.points.length - 1) {
+ self.points.splice(i--, 1);
+ self.vis.render();
+ }
+ return this;
+ })
+ .event("dragend", function() {
+ if (this.pathElements !== null) {
+ self.updatePath();
+ }
+ isDragging = false;
+ })
+ .event("drag", function () {
+ let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor
+ let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor
+ // Determine the bounds of the vis.Panel
+ const panelWidth = self.vis.width();
+ const panelHeight = self.vis.height();
+
+ // Adjust the new position if it would place the dot outside the bounds of the vis.Panel
+ adjustedX = Math.max(0, Math.min(panelWidth, adjustedX));
+ adjustedY = Math.max(0, Math.min(panelHeight, adjustedY));
+ self.points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position
+ self.vis.render(); // Re-render the visualization to reflect the new position
+ })
+ .event("mouseover", function() {
+ hoverIndex = this.index; // Set the hover index to the index of the hovered dot
+ self.vis.render(); // Re-render the visualization
+ })
+ .event("mouseout", function() {
+ !isDragging && (hoverIndex = -1); // Reset the hover index when the mouse leaves the dot
+ self.vis.render(); // Re-render the visualization
+ })
+ .anchor("center")
+ .add(pv.Label)
+ .visible(function() {
+ return hoverIndex === this.index; // Only show the label for the hovered dot
+ })
+ .left(d => d.x < this.width / 2 ? d.x + 80 : d.x - 70) // Shift label to right if on left half, otherwise shift to left
+ .top(d => d.y < this.height / 2 ? d.y + 20 : d.y - 20) // Shift label down if on top half, otherwise shift up
+ .font(12 + "px sans-serif")
+ .text(d => {
+ if (this.samplingMethod == "path") {
+ return `X: ${Math.round(d.x)}, Y: ${Math.round(d.y)}`;
+ } else {
+ let frame = Math.round((d.x / self.width) * self.points_to_sample);
+ let normalizedY = (1.0 - (d.y / self.height) - 0.0) * (self.rangeMax - self.rangeMin) + self.rangeMin;
+ let normalizedX = (d.x / self.width);
+ return `F: ${frame}, X: ${normalizedX.toFixed(2)}, Y: ${normalizedY.toFixed(2)}`;
+ }
+ })
+ .textStyle("orange")
+
+ if (this.points.length != 0) {
+ this.vis.render();
+ }
+ var svgElement = this.vis.canvas();
+ svgElement.style['zIndex'] = "2"
+ svgElement.style['position'] = "relative"
+ this.node.splineEditor.element.appendChild(svgElement);
+ this.pathElements = svgElement.getElementsByTagName('path'); // Get all path elements
+
+ if (this.width > 256) {
+ this.node.setSize([this.width + 45, this.node.size[1]]);
+ }
+ this.node.setSize([this.node.size[0], this.height + 430]);
+ this.updatePath();
+ this.refreshBackgroundImage();
+}
+
+ updatePath = () => {
+ if (!this.points || this.points.length === 0) {
+ console.log("no points");
+ return;
+ }
+ if (this.samplingMethod != "controlpoints") {
+ var coords = this.samplePoints(this.pathElements[0], this.points_to_sample, this.samplingMethod, this.width);
+ }
+ else {
+ var coords = this.points
+ }
+
+ if (this.drawSamplePoints) {
+ if (this.pointsLayer) {
+ // Update the data of the existing points layer
+ this.pointsLayer.data(coords);
+ } else {
+ // Create the points layer if it doesn't exist
+ this.pointsLayer = this.vis.add(pv.Dot)
+ .data(coords)
+ .left(function(d) { return d.x; })
+ .top(function(d) { return d.y; })
+ .radius(5) // Adjust the radius as needed
+ .fillStyle("red") // Change the color as needed
+ .strokeStyle("black") // Change the stroke color as needed
+ .lineWidth(1); // Adjust the line width as needed
+ }
+ } else {
+ if (this.pointsLayer) {
+ // Remove the points layer
+ this.pointsLayer.data([]);
+ this.vis.render();
+ }
+ }
+ let coordsString = JSON.stringify(coords);
+ this.pointsStoreWidget.value = JSON.stringify(this.points);
+ if (this.coordWidget) {
+ this.coordWidget.value = coordsString;
+ }
+ this.vis.render();
+ };
+ handleImageLoad = (img, file, base64String) => {
+ console.log(img.width, img.height); // Access width and height here
+ this.widthWidget.value = img.width;
+ this.heightWidget.value = img.height;
+ this.drawRuler = false;
+
+ if (img.width != this.vis.width() || img.height != this.vis.height()) {
+ if (img.width > 256) {
+ this.node.setSize([img.width + 45, this.node.size[1]]);
+ }
+ this.node.setSize([this.node.size[0], img.height + 500]);
+ this.vis.width(img.width);
+ this.vis.height(img.height);
+ this.height = img.height;
+ this.width = img.width;
+
+ this.updatePath();
+ }
+ this.backgroundImage.url(file ? URL.createObjectURL(file) : `data:${this.node.properties.imgData.type};base64,${base64String}`).visible(true).root.render();
+ };
+
+ processImage = (img, file) => {
+ const canvas = document.createElement('canvas');
+ const ctx = canvas.getContext('2d');
+
+ const maxWidth = 800; // maximum width
+ const maxHeight = 600; // maximum height
+ let width = img.width;
+ let height = img.height;
+
+ // Calculate the new dimensions while preserving the aspect ratio
+ if (width > height) {
+ if (width > maxWidth) {
+ height *= maxWidth / width;
+ width = maxWidth;
+ }
+ } else {
+ if (height > maxHeight) {
+ width *= maxHeight / height;
+ height = maxHeight;
+ }
+ }
+
+ canvas.width = width;
+ canvas.height = height;
+ ctx.drawImage(img, 0, 0, width, height);
+
+ // Get the compressed image data as a Base64 string
+ const base64String = canvas.toDataURL('image/jpeg', 0.5).replace('data:', '').replace(/^.+,/, ''); // 0.5 is the quality from 0 to 1
+
+ this.node.properties.imgData = {
+ name: file.name,
+ lastModified: file.lastModified,
+ size: file.size,
+ type: file.type,
+ base64: base64String
+ };
+ handleImageLoad(img, file, base64String);
+ };
+
+ handleImageFile = (file) => {
+ const reader = new FileReader();
+ reader.onloadend = () => {
+ const img = new Image();
+ img.src = reader.result;
+ img.onload = () => processImage(img, file);
+ };
+ reader.readAsDataURL(file);
+
+ const imageUrl = URL.createObjectURL(file);
+ const img = new Image();
+ img.src = imageUrl;
+ img.onload = () => this.handleImageLoad(img, file, null);
+ };
+
+ refreshBackgroundImage = () => {
+ if (this.node.properties.imgData && this.node.properties.imgData.base64) {
+ const base64String = this.node.properties.imgData.base64;
+ const imageUrl = `data:${this.node.properties.imgData.type};base64,${base64String}`;
+ const img = new Image();
+ img.src = imageUrl;
+ img.onload = () => this.handleImageLoad(img, null, base64String);
+ }
+ };
+
+ createContextMenu = () => {
+ self = this;
+ document.addEventListener('contextmenu', function (e) {
+ e.preventDefault();
+
+ });
+
+ document.addEventListener('click', function (e) {
+ document.querySelectorAll('.spline-editor-context-menu').forEach(menu => {
+ menu.style.display = 'none';
+ });
+ });
+
+ this.node.menuItems.forEach((menuItem, index) => {
+ self = this;
+ menuItem.addEventListener('click', function (e) {
+ e.preventDefault();
+ switch (index) {
+ case 0:
+ e.preventDefault();
+ if (!self.drawHandles) {
+ self.drawHandles = true
+ self.vis.add(pv.Line)
+ .data(() => self.points.map((point, index) => ({
+ start: point,
+ end: [index]
+ })))
+ .left(d => d.start.x)
+ .top(d => d.start.y)
+ .interpolate("linear")
+ .tension(0) // Straight lines
+ .strokeStyle("#ff7f0e") // Same color as control points
+ .lineWidth(1)
+ .visible(() => self.drawHandles);
+ self.vis.render();
+ } else {
+ self.drawHandles = false
+ self.vis.render();
+ }
+ self.node.contextMenu.style.display = 'none';
+ break;
+ case 1:
+ e.preventDefault();
+ self.drawSamplePoints = !self.drawSamplePoints;
+ self.updatePath();
+ break;
+ case 2:
+ e.preventDefault();
+ if (self.dotShape == "circle"){
+ self.dotShape = "triangle"
+ }
+ else {
+ self.dotShape = "circle"
+ }
+ console.log(self.dotShape)
+ self.updatePath();
+ break;
+ case 3:
+ // Create file input element
+ const fileInput = document.createElement('input');
+ fileInput.type = 'file';
+ fileInput.accept = 'image/*'; // Accept only image files
+
+ // Listen for file selection
+ fileInput.addEventListener('change', function (event) {
+ const file = event.target.files[0]; // Get the selected file
+
+ if (file) {
+ const imageUrl = URL.createObjectURL(file);
+ let img = new Image();
+ img.src = imageUrl;
+ img.onload = () => self.handleImageLoad(img, file, null);
+ }
+ });
+
+ fileInput.click();
+
+ self.node.contextMenu.style.display = 'none';
+ break;
+ case 4:
+ e.preventDefault();
+ self.points.reverse();
+ self.updatePath();
+ break;
+ case 5:
+ self.backgroundImage.visible(false).root.render();
+ self.node.properties.imgData = null;
+ self.node.contextMenu.style.display = 'none';
+ break;
+ }
+ });
+ });
+ }
+
+ samplePoints(svgPathElement, numSamples, samplingMethod, width) {
+ var svgWidth = width; // Fixed width of the SVG element
+ var pathLength = svgPathElement.getTotalLength();
+ var points = [];
+
+ for (var i = 0; i < numSamples; i++) {
+ if (samplingMethod === "time") {
+ // Calculate the x-coordinate for the current sample based on the SVG's width
+ var x = (svgWidth / (numSamples - 1)) * i;
+ // Find the point on the path that intersects the vertical line at the calculated x-coordinate
+ var point = this.findPointAtX(svgPathElement, x, pathLength);
+ }
+ else if (samplingMethod === "path") {
+ // Calculate the distance along the path for the current sample
+ var distance = (pathLength / (numSamples - 1)) * i;
+ // Get the point at the current distance
+ var point = svgPathElement.getPointAtLength(distance);
+ }
+
+ // Add the point to the array of points
+ points.push({ x: point.x, y: point.y });
+ }
+ return points;
+ }
+
+ findClosestPoints(points, clickedPoint) {
+ // Calculate distances from clickedPoint to each point in the array
+ let distances = points.map(point => {
+ let dx = clickedPoint.x - point.x;
+ let dy = clickedPoint.y - point.y;
+ return { index: points.indexOf(point), distance: Math.sqrt(dx * dx + dy * dy) };
+ });
+ // Sort distances and get the indices of the two closest points
+ let sortedDistances = distances.sort((a, b) => a.distance - b.distance);
+ let closestPoint1Index = sortedDistances[0].index;
+ let closestPoint2Index = sortedDistances[1].index;
+ // Ensure point1Index is always the smaller index
+ if (closestPoint1Index > closestPoint2Index) {
+ [closestPoint1Index, closestPoint2Index] = [closestPoint2Index, closestPoint1Index];
+ }
+ return { point1Index: closestPoint1Index, point2Index: closestPoint2Index };
+ }
+
+ findPointAtX(svgPathElement, targetX, pathLength) {
+ let low = 0;
+ let high = pathLength;
+ let bestPoint = svgPathElement.getPointAtLength(0);
+
+ while (low <= high) {
+ let mid = low + (high - low) / 2;
+ let point = svgPathElement.getPointAtLength(mid);
+
+ if (Math.abs(point.x - targetX) < 1) {
+ return point; // The point is close enough to the target
+ }
+
+ if (point.x < targetX) {
+ low = mid + 1;
+ } else {
+ high = mid - 1;
+ }
+
+ // Keep track of the closest point found so far
+ if (Math.abs(point.x - targetX) < Math.abs(bestPoint.x - targetX)) {
+ bestPoint = point;
+ }
+ }
+
+ // Return the closest point found
+ return bestPoint;
+ }
+}
+//from melmass
+export function hideWidgetForGood(node, widget, suffix = '') {
+ widget.origType = widget.type
+ widget.origComputeSize = widget.computeSize
+ widget.origSerializeValue = widget.serializeValue
+ widget.computeSize = () => [0, -4] // -4 is due to the gap litegraph adds between widgets automatically
+ widget.type = "converted-widget" + suffix
+ // widget.serializeValue = () => {
+ // // Prevent serializing the widget if we have no input linked
+ // const w = node.inputs?.find((i) => i.widget?.name === widget.name);
+ // if (w?.link == null) {
+ // return undefined;
+ // }
+ // return widget.origSerializeValue ? widget.origSerializeValue() : widget.value;
+ // };
+
+ // Hide any linked widgets, e.g. seed+seedControl
+ if (widget.linkedWidgets) {
+ for (const w of widget.linkedWidgets) {
+ hideWidgetForGood(node, w, ':' + widget.name)
+ }
+ }
+}
\ No newline at end of file
diff --git a/ComfyUI-KJNodes/web/red.png b/ComfyUI-KJNodes/web/red.png
new file mode 100644
index 0000000000000000000000000000000000000000..4352c118b2c5fa6f33edc4d99a5e4d22649ff827
Binary files /dev/null and b/ComfyUI-KJNodes/web/red.png differ